Merge "Separate create server schema for admin password"
diff --git a/etc/tempest.conf.sample b/etc/tempest.conf.sample
index ee90aa1..f80fc1b 100644
--- a/etc/tempest.conf.sample
+++ b/etc/tempest.conf.sample
@@ -391,12 +391,14 @@
# A list of enabled compute extensions with a special entry
# all which indicates every extension is enabled. Each
-# extension should be specified with alias name (list value)
+# extension should be specified with alias name. Empty list
+# indicates all extensions are disabled (list value)
#api_extensions=all
# A list of enabled v3 extensions with a special entry all
# which indicates every extension is enabled. Each extension
-# should be specified with alias name (list value)
+# should be specified with alias name. Empty list indicates
+# all extensions are disabled (list value)
#api_v3_extensions=all
# Does the test environment support changing the admin
@@ -755,7 +757,8 @@
#ipv6=true
# A list of enabled network extensions with a special entry
-# all which indicates every extension is enabled (list value)
+# all which indicates every extension is enabled. Empty list
+# indicates all extensions are disabled (list value)
#api_extensions=all
# Allow the execution of IPv6 subnet tests that use the
@@ -1110,7 +1113,8 @@
#snapshot=true
# A list of enabled volume extensions with a special entry all
-# which indicates every extension is enabled (list value)
+# which indicates every extension is enabled. Empty list
+# indicates all extensions are disabled (list value)
#api_extensions=all
# Is the v1 volume API enabled (boolean value)
diff --git a/tempest/README.rst b/tempest/README.rst
index 18c7cf3..fb25151 100644
--- a/tempest/README.rst
+++ b/tempest/README.rst
@@ -23,9 +23,8 @@
belongs in each directory, the rules and examples for good tests, are
documented in a README.rst file in the directory.
-
-api
----
+:ref:`api_field_guide`
+----------------------
API tests are validation tests for the OpenStack API. They should not
use the existing python clients for OpenStack, but should instead use
@@ -39,8 +38,8 @@
frameworks.
-cli
----
+:ref:`cli_field_guide`
+----------------------
CLI tests use the openstack CLI to interact with the OpenStack
cloud. CLI testing in unit tests is somewhat difficult because unlike
@@ -49,8 +48,8 @@
prereqs having a running OpenStack cloud.
-scenario
---------
+:ref:`scenario_field_guide`
+---------------------------
Scenario tests are complex "through path" tests for OpenStack
functionality. They are typically a series of steps where complicated
@@ -59,18 +58,26 @@
Scenario tests can and should use the OpenStack python clients.
-stress
-------
+:ref:`stress_field_guide`
+-------------------------
Stress tests are designed to stress an OpenStack environment by running a high
workload against it and seeing what breaks. The stress test framework runs
several test jobs in parallel and can run any existing test in Tempest as a
stress job.
-thirdparty
-----------
+:ref:`third_party_field_guide`
+-----------------------------
Many openstack components include 3rdparty API support. It is
completely legitimate for Tempest to include tests of 3rdparty APIs,
but those should be kept separate from the normal OpenStack
validation.
+
+:ref:`unit_tests_field_guide`
+-----------------------------
+
+Unit tests are the self checks for Tempest. They provide functional
+verification and regression checking for the internal components of tempest.
+They should be used to just verify that the individual pieces of tempest are
+working as expected.
diff --git a/tempest/api/README.rst b/tempest/api/README.rst
index 9eac19d..91e6ad6 100644
--- a/tempest/api/README.rst
+++ b/tempest/api/README.rst
@@ -1,3 +1,5 @@
+.. _api_field_guide:
+
Tempest Field Guide to API tests
================================
diff --git a/tempest/api/compute/admin/test_quotas_negative.py b/tempest/api/compute/admin/test_quotas_negative.py
index f147b9c..599b058 100644
--- a/tempest/api/compute/admin/test_quotas_negative.py
+++ b/tempest/api/compute/admin/test_quotas_negative.py
@@ -111,7 +111,9 @@
security_groups=default_sg_quota)
# Check we cannot create anymore
- self.assertRaises(exceptions.OverLimit,
+ # A 403 Forbidden or 413 Overlimit (old behaviour) exception
+ # will be raised when out of quota
+ self.assertRaises((exceptions.Unauthorized, exceptions.OverLimit),
self.sg_client.create_security_group,
"sg-overlimit", "sg-desc")
@@ -147,7 +149,9 @@
ip_protocol = 'tcp'
# Check we cannot create SG rule anymore
- self.assertRaises(exceptions.OverLimit,
+ # A 403 Forbidden or 413 Overlimit (old behaviour) exception
+ # will be raised when out of quota
+ self.assertRaises((exceptions.OverLimit, exceptions.Unauthorized),
self.sg_client.create_security_group_rule,
secgroup_id, ip_protocol, 1025, 1025)
diff --git a/tempest/api/compute/admin/test_security_group_default_rules.py b/tempest/api/compute/admin/test_security_group_default_rules.py
new file mode 100644
index 0000000..07408a8
--- /dev/null
+++ b/tempest/api/compute/admin/test_security_group_default_rules.py
@@ -0,0 +1,127 @@
+# Copyright 2014 NEC Corporation. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import testtools
+
+from tempest.api.compute import base
+from tempest import config
+from tempest import exceptions
+from tempest import test
+
+CONF = config.CONF
+
+
+class SecurityGroupDefaultRulesTest(base.BaseV2ComputeAdminTest):
+
+ @classmethod
+ # TODO(GMann): Once Bug# 1311500 is fixed, these test can run
+ # for Neutron also.
+ @testtools.skipIf(CONF.service_available.neutron,
+ "Skip as this functionality is not yet "
+ "implemented in Neutron. Related Bug#1311500")
+ @test.safe_setup
+ def setUpClass(cls):
+ # A network and a subnet will be created for these tests
+ cls.set_network_resources(network=True, subnet=True)
+ super(SecurityGroupDefaultRulesTest, cls).setUpClass()
+ cls.adm_client = cls.os_adm.security_group_default_rules_client
+
+ def _create_security_group_default_rules(self, ip_protocol='tcp',
+ from_port=22, to_port=22,
+ cidr='10.10.0.0/24'):
+ # Create Security Group default rule
+ _, rule = self.adm_client.create_security_default_group_rule(
+ ip_protocol,
+ from_port,
+ to_port,
+ cidr=cidr)
+ self.assertEqual(ip_protocol, rule['ip_protocol'])
+ self.assertEqual(from_port, rule['from_port'])
+ self.assertEqual(to_port, rule['to_port'])
+ self.assertEqual(cidr, rule['ip_range']['cidr'])
+ return rule
+
+ @test.attr(type='smoke')
+ def test_create_delete_security_group_default_rules(self):
+ # Create and delete Security Group default rule
+ ip_protocols = {'tcp', 'udp', 'icmp'}
+ for ip_protocol in ip_protocols:
+ rule = self._create_security_group_default_rules(ip_protocol)
+ # Delete Security Group default rule
+ self.adm_client.delete_security_group_default_rule(rule['id'])
+ self.assertRaises(exceptions.NotFound,
+ self.adm_client.get_security_group_default_rule,
+ rule['id'])
+
+ @test.attr(type='smoke')
+ def test_create_security_group_default_rule_without_cidr(self):
+ ip_protocol = 'udp'
+ from_port = 80
+ to_port = 80
+ _, rule = self.adm_client.create_security_default_group_rule(
+ ip_protocol,
+ from_port,
+ to_port)
+ self.addCleanup(self.adm_client.delete_security_group_default_rule,
+ rule['id'])
+ self.assertNotEqual(0, rule['id'])
+ self.assertEqual('0.0.0.0/0', rule['ip_range']['cidr'])
+
+ @test.attr(type='smoke')
+ def test_create_security_group_default_rule_with_blank_cidr(self):
+ ip_protocol = 'icmp'
+ from_port = 10
+ to_port = 10
+ cidr = ''
+ _, rule = self.adm_client.create_security_default_group_rule(
+ ip_protocol,
+ from_port,
+ to_port,
+ cidr=cidr)
+ self.addCleanup(self.adm_client.delete_security_group_default_rule,
+ rule['id'])
+ self.assertNotEqual(0, rule['id'])
+ self.assertEqual('0.0.0.0/0', rule['ip_range']['cidr'])
+
+ @test.attr(type='smoke')
+ def test_security_group_default_rules_list(self):
+ ip_protocol = 'tcp'
+ from_port = 22
+ to_port = 22
+ cidr = '10.10.0.0/24'
+ rule = self._create_security_group_default_rules(ip_protocol,
+ from_port,
+ to_port,
+ cidr)
+ self.addCleanup(self.adm_client.delete_security_group_default_rule,
+ rule['id'])
+ _, rules = self.adm_client.list_security_group_default_rules()
+ self.assertNotEqual(0, len(rules))
+ self.assertIn(rule, rules)
+
+ @test.attr(type='smoke')
+ def test_default_security_group_default_rule_show(self):
+ ip_protocol = 'tcp'
+ from_port = 22
+ to_port = 22
+ cidr = '10.10.0.0/24'
+ rule = self._create_security_group_default_rules(ip_protocol,
+ from_port,
+ to_port,
+ cidr)
+ self.addCleanup(self.adm_client.delete_security_group_default_rule,
+ rule['id'])
+ _, fetched_rule = self.adm_client.get_security_group_default_rule(
+ rule['id'])
+ self.assertEqual(rule, fetched_rule)
diff --git a/tempest/api/compute/admin/test_servers_negative.py b/tempest/api/compute/admin/test_servers_negative.py
index cccaf13..f4d010e 100644
--- a/tempest/api/compute/admin/test_servers_negative.py
+++ b/tempest/api/compute/admin/test_servers_negative.py
@@ -127,6 +127,8 @@
self.client.migrate_server,
str(uuid.uuid4()))
+ @testtools.skipUnless(CONF.compute_feature_enabled.resize,
+ 'Resize not available.')
@testtools.skipUnless(CONF.compute_feature_enabled.suspend,
'Suspend is not available.')
@test.attr(type=['negative', 'gate'])
diff --git a/tempest/api/compute/base.py b/tempest/api/compute/base.py
index 70a9604..a3295eb 100644
--- a/tempest/api/compute/base.py
+++ b/tempest/api/compute/base.py
@@ -83,6 +83,8 @@
cls.hypervisor_client = cls.os.hypervisor_client
cls.certificates_client = cls.os.certificates_client
cls.migrations_client = cls.os.migrations_client
+ cls.security_group_default_rules_client = (
+ cls.os.security_group_default_rules_client)
elif cls._api_version == 3:
if not CONF.compute_feature_enabled.api_v3:
diff --git a/tempest/api/compute/servers/test_list_server_filters.py b/tempest/api/compute/servers/test_list_server_filters.py
index f66020c..9d39c9f 100644
--- a/tempest/api/compute/servers/test_list_server_filters.py
+++ b/tempest/api/compute/servers/test_list_server_filters.py
@@ -203,11 +203,13 @@
params = {'status': 'active'}
resp, body = self.client.list_servers_with_detail(params)
servers = body['servers']
+ test_ids = [s['id'] for s in (self.s1, self.s2, self.s3)]
self.assertIn(self.s1['id'], map(lambda x: x['id'], servers))
self.assertIn(self.s2['id'], map(lambda x: x['id'], servers))
self.assertIn(self.s3['id'], map(lambda x: x['id'], servers))
- self.assertEqual(['ACTIVE'] * 3, [x['status'] for x in servers])
+ self.assertEqual(['ACTIVE'] * 3, [x['status'] for x in servers
+ if x['id'] in test_ids])
@test.attr(type='gate')
def test_list_servers_filtered_by_name_wildcard(self):
diff --git a/tempest/api/compute/servers/test_server_metadata_negative.py b/tempest/api/compute/servers/test_server_metadata_negative.py
index b55833c..fbda401 100644
--- a/tempest/api/compute/servers/test_server_metadata_negative.py
+++ b/tempest/api/compute/servers/test_server_metadata_negative.py
@@ -119,20 +119,22 @@
@test.attr(type=['negative', 'gate'])
def test_metadata_items_limit(self):
- # Raise a 413 OverLimit exception while exceeding metadata items limit
- # for tenant.
+ # A 403 Forbidden or 413 Overlimit (old behaviour) exception
+ # will be raised while exceeding metadata items limit for
+ # tenant.
_, quota_set = self.quotas.get_quota_set(self.tenant_id)
quota_metadata = quota_set['metadata_items']
req_metadata = {}
for num in range(1, quota_metadata + 2):
req_metadata['key' + str(num)] = 'val' + str(num)
- self.assertRaises(exceptions.OverLimit,
+ self.assertRaises((exceptions.OverLimit, exceptions.Unauthorized),
self.client.set_server_metadata,
self.server_id, req_metadata)
- # Raise a 413 OverLimit exception while exceeding metadata items limit
- # for tenant (update).
- self.assertRaises(exceptions.OverLimit,
+ # A 403 Forbidden or 413 Overlimit (old behaviour) exception
+ # will be raised while exceeding metadata items limit for
+ # tenant.
+ self.assertRaises((exceptions.Unauthorized, exceptions.OverLimit),
self.client.update_server_metadata,
self.server_id, req_metadata)
diff --git a/tempest/api/data_processing/base.py b/tempest/api/data_processing/base.py
index ab0e83a..cfb5a3d 100644
--- a/tempest/api/data_processing/base.py
+++ b/tempest/api/data_processing/base.py
@@ -75,16 +75,16 @@
object. All resources created in this method will be automatically
removed in tearDownClass method.
"""
- resp, body = cls.client.create_node_group_template(name, plugin_name,
- hadoop_version,
- node_processes,
- flavor_id,
- node_configs,
- **kwargs)
+ _, resp_body = cls.client.create_node_group_template(name, plugin_name,
+ hadoop_version,
+ node_processes,
+ flavor_id,
+ node_configs,
+ **kwargs)
# store id of created node group template
- cls._node_group_templates.append(body['id'])
+ cls._node_group_templates.append(resp_body['id'])
- return resp, body
+ return resp_body
@classmethod
def create_cluster_template(cls, name, plugin_name, hadoop_version,
@@ -95,15 +95,15 @@
object. All resources created in this method will be automatically
removed in tearDownClass method.
"""
- resp, body = cls.client.create_cluster_template(name, plugin_name,
- hadoop_version,
- node_groups,
- cluster_configs,
- **kwargs)
+ _, resp_body = cls.client.create_cluster_template(name, plugin_name,
+ hadoop_version,
+ node_groups,
+ cluster_configs,
+ **kwargs)
# store id of created cluster template
- cls._cluster_templates.append(body['id'])
+ cls._cluster_templates.append(resp_body['id'])
- return resp, body
+ return resp_body
@classmethod
def create_data_source(cls, name, type, url, **kwargs):
@@ -113,11 +113,11 @@
object. All resources created in this method will be automatically
removed in tearDownClass method.
"""
- resp, body = cls.client.create_data_source(name, type, url, **kwargs)
+ _, resp_body = cls.client.create_data_source(name, type, url, **kwargs)
# store id of created data source
- cls._data_sources.append(body['id'])
+ cls._data_sources.append(resp_body['id'])
- return resp, body
+ return resp_body
@classmethod
def create_job_binary_internal(cls, name, data):
@@ -126,11 +126,11 @@
It returns created object. All resources created in this method will
be automatically removed in tearDownClass method.
"""
- resp, body = cls.client.create_job_binary_internal(name, data)
+ _, resp_body = cls.client.create_job_binary_internal(name, data)
# store id of created job binary internal
- cls._job_binary_internals.append(body['id'])
+ cls._job_binary_internals.append(resp_body['id'])
- return resp, body
+ return resp_body
def create_job_binary(cls, name, url, extra=None, **kwargs):
"""Creates watched job binary with specified params.
@@ -139,8 +139,8 @@
object. All resources created in this method will be automatically
removed in tearDownClass method.
"""
- resp, body = cls.client.create_job_binary(name, url, extra, **kwargs)
+ _, resp_body = cls.client.create_job_binary(name, url, extra, **kwargs)
# store id of created job binary
- cls._job_binaries.append(body['id'])
+ cls._job_binaries.append(resp_body['id'])
- return resp, body
+ return resp_body
diff --git a/tempest/api/data_processing/test_cluster_templates.py b/tempest/api/data_processing/test_cluster_templates.py
index ad9ed2a..ff67c1c 100644
--- a/tempest/api/data_processing/test_cluster_templates.py
+++ b/tempest/api/data_processing/test_cluster_templates.py
@@ -39,7 +39,7 @@
}
}
}
- resp_body = cls.create_node_group_template(**node_group_template)[1]
+ resp_body = cls.create_node_group_template(**node_group_template)
node_group_template_id = resp_body['id']
cls.full_cluster_template = {
@@ -95,23 +95,22 @@
def _create_cluster_template(self, template_name=None):
"""Creates Cluster Template with optional name specified.
- It creates template and ensures response status, template name and
- response body. Returns id and name of created template.
+ It creates template, ensures template name and response body.
+ Returns id and name of created template.
"""
if not template_name:
# generate random name if it's not specified
template_name = data_utils.rand_name('sahara-cluster-template')
# create cluster template
- resp, body = self.create_cluster_template(template_name,
- **self.full_cluster_template)
+ resp_body = self.create_cluster_template(template_name,
+ **self.full_cluster_template)
# ensure that template created successfully
- self.assertEqual(202, resp.status)
- self.assertEqual(template_name, body['name'])
- self.assertDictContainsSubset(self.cluster_template, body)
+ self.assertEqual(template_name, resp_body['name'])
+ self.assertDictContainsSubset(self.cluster_template, resp_body)
- return body['id'], template_name
+ return resp_body['id'], template_name
@test.attr(type='smoke')
def test_cluster_template_create(self):
@@ -122,8 +121,7 @@
template_info = self._create_cluster_template()
# check for cluster template in list
- resp, templates = self.client.list_cluster_templates()
- self.assertEqual(200, resp.status)
+ _, templates = self.client.list_cluster_templates()
templates_info = [(template['id'], template['name'])
for template in templates]
self.assertIn(template_info, templates_info)
@@ -133,16 +131,14 @@
template_id, template_name = self._create_cluster_template()
# check cluster template fetch by id
- resp, template = self.client.get_cluster_template(template_id)
- self.assertEqual(200, resp.status)
+ _, template = self.client.get_cluster_template(template_id)
self.assertEqual(template_name, template['name'])
self.assertDictContainsSubset(self.cluster_template, template)
@test.attr(type='smoke')
def test_cluster_template_delete(self):
- template_id = self._create_cluster_template()[0]
+ template_id, _ = self._create_cluster_template()
# delete the cluster template by id
- resp = self.client.delete_cluster_template(template_id)[0]
- self.assertEqual(204, resp.status)
+ self.client.delete_cluster_template(template_id)
# TODO(ylobankov): check that cluster template is really deleted
diff --git a/tempest/api/data_processing/test_data_sources.py b/tempest/api/data_processing/test_data_sources.py
index 345153b..aae56c4 100644
--- a/tempest/api/data_processing/test_data_sources.py
+++ b/tempest/api/data_processing/test_data_sources.py
@@ -48,65 +48,59 @@
def _create_data_source(self, source_body, source_name=None):
"""Creates Data Source with optional name specified.
- It creates a link to input-source file (it may not exist) and ensures
- response status and source name. Returns id and name of created source.
+ It creates a link to input-source file (it may not exist), ensures
+ source name and response body. Returns id and name of created source.
"""
if not source_name:
# generate random name if it's not specified
source_name = data_utils.rand_name('sahara-data-source')
# create data source
- resp, body = self.create_data_source(source_name, **source_body)
+ resp_body = self.create_data_source(source_name, **source_body)
# ensure that source created successfully
- self.assertEqual(202, resp.status)
- self.assertEqual(source_name, body['name'])
+ self.assertEqual(source_name, resp_body['name'])
if source_body['type'] == 'swift':
source_body = self.swift_data_source
- self.assertDictContainsSubset(source_body, body)
+ self.assertDictContainsSubset(source_body, resp_body)
- return body['id'], source_name
+ return resp_body['id'], source_name
def _list_data_sources(self, source_info):
# check for data source in list
- resp, sources = self.client.list_data_sources()
- self.assertEqual(200, resp.status)
+ _, sources = self.client.list_data_sources()
sources_info = [(source['id'], source['name']) for source in sources]
self.assertIn(source_info, sources_info)
def _get_data_source(self, source_id, source_name, source_body):
# check data source fetch by id
- resp, source = self.client.get_data_source(source_id)
- self.assertEqual(200, resp.status)
+ _, source = self.client.get_data_source(source_id)
self.assertEqual(source_name, source['name'])
self.assertDictContainsSubset(source_body, source)
- def _delete_data_source(self, source_id):
- # delete the data source by id
- resp = self.client.delete_data_source(source_id)[0]
- self.assertEqual(204, resp.status)
-
@test.attr(type='smoke')
def test_swift_data_source_create(self):
self._create_data_source(self.swift_data_source_with_creds)
@test.attr(type='smoke')
def test_swift_data_source_list(self):
- source_info = self._create_data_source(
- self.swift_data_source_with_creds)
+ source_info = (
+ self._create_data_source(self.swift_data_source_with_creds))
self._list_data_sources(source_info)
@test.attr(type='smoke')
def test_swift_data_source_get(self):
- source_id, source_name = self._create_data_source(
- self.swift_data_source_with_creds)
+ source_id, source_name = (
+ self._create_data_source(self.swift_data_source_with_creds))
self._get_data_source(source_id, source_name, self.swift_data_source)
@test.attr(type='smoke')
def test_swift_data_source_delete(self):
- source_id = self._create_data_source(
- self.swift_data_source_with_creds)[0]
- self._delete_data_source(source_id)
+ source_id, _ = (
+ self._create_data_source(self.swift_data_source_with_creds))
+
+ # delete the data source by id
+ self.client.delete_data_source(source_id)
@test.attr(type='smoke')
def test_local_hdfs_data_source_create(self):
@@ -119,15 +113,17 @@
@test.attr(type='smoke')
def test_local_hdfs_data_source_get(self):
- source_id, source_name = self._create_data_source(
- self.local_hdfs_data_source)
+ source_id, source_name = (
+ self._create_data_source(self.local_hdfs_data_source))
self._get_data_source(
source_id, source_name, self.local_hdfs_data_source)
@test.attr(type='smoke')
def test_local_hdfs_data_source_delete(self):
- source_id = self._create_data_source(self.local_hdfs_data_source)[0]
- self._delete_data_source(source_id)
+ source_id, _ = self._create_data_source(self.local_hdfs_data_source)
+
+ # delete the data source by id
+ self.client.delete_data_source(source_id)
@test.attr(type='smoke')
def test_external_hdfs_data_source_create(self):
@@ -140,12 +136,14 @@
@test.attr(type='smoke')
def test_external_hdfs_data_source_get(self):
- source_id, source_name = self._create_data_source(
- self.external_hdfs_data_source)
+ source_id, source_name = (
+ self._create_data_source(self.external_hdfs_data_source))
self._get_data_source(
source_id, source_name, self.external_hdfs_data_source)
@test.attr(type='smoke')
def test_external_hdfs_data_source_delete(self):
- source_id = self._create_data_source(self.external_hdfs_data_source)[0]
- self._delete_data_source(source_id)
+ source_id, _ = self._create_data_source(self.external_hdfs_data_source)
+
+ # delete the data source by id
+ self.client.delete_data_source(source_id)
diff --git a/tempest/api/data_processing/test_job_binaries.py b/tempest/api/data_processing/test_job_binaries.py
index 689c1fe..15ee145 100644
--- a/tempest/api/data_processing/test_job_binaries.py
+++ b/tempest/api/data_processing/test_job_binaries.py
@@ -40,8 +40,8 @@
name = data_utils.rand_name('sahara-internal-job-binary')
cls.job_binary_data = 'Some script may be data'
- job_binary_internal = cls.create_job_binary_internal(
- name, cls.job_binary_data)[1]
+ job_binary_internal = (
+ cls.create_job_binary_internal(name, cls.job_binary_data))
cls.internal_db_job_binary = {
'url': 'internal-db://%s' % job_binary_internal['id'],
'description': 'Test job binary',
@@ -50,26 +50,25 @@
def _create_job_binary(self, binary_body, binary_name=None):
"""Creates Job Binary with optional name specified.
- It creates a link to data (jar, pig files, etc.) and ensures response
- status, job binary name and response body. Returns id and name of
- created job binary. Data may not exist when using Swift
- as data storage. In other cases data must exist in storage.
+ It creates a link to data (jar, pig files, etc.), ensures job binary
+ name and response body. Returns id and name of created job binary.
+ Data may not exist when using Swift as data storage.
+ In other cases data must exist in storage.
"""
if not binary_name:
# generate random name if it's not specified
binary_name = data_utils.rand_name('sahara-job-binary')
# create job binary
- resp, body = self.create_job_binary(binary_name, **binary_body)
+ resp_body = self.create_job_binary(binary_name, **binary_body)
# ensure that binary created successfully
- self.assertEqual(202, resp.status)
- self.assertEqual(binary_name, body['name'])
+ self.assertEqual(binary_name, resp_body['name'])
if 'swift' in binary_body['url']:
binary_body = self.swift_job_binary
- self.assertDictContainsSubset(binary_body, body)
+ self.assertDictContainsSubset(binary_body, resp_body)
- return body['id'], binary_name
+ return resp_body['id'], binary_name
@test.attr(type='smoke')
def test_swift_job_binary_create(self):
@@ -80,30 +79,27 @@
binary_info = self._create_job_binary(self.swift_job_binary_with_extra)
# check for job binary in list
- resp, binaries = self.client.list_job_binaries()
- self.assertEqual(200, resp.status)
+ _, binaries = self.client.list_job_binaries()
binaries_info = [(binary['id'], binary['name']) for binary in binaries]
self.assertIn(binary_info, binaries_info)
@test.attr(type='smoke')
def test_swift_job_binary_get(self):
- binary_id, binary_name = self._create_job_binary(
- self.swift_job_binary_with_extra)
+ binary_id, binary_name = (
+ self._create_job_binary(self.swift_job_binary_with_extra))
# check job binary fetch by id
- resp, binary = self.client.get_job_binary(binary_id)
- self.assertEqual(200, resp.status)
+ _, binary = self.client.get_job_binary(binary_id)
self.assertEqual(binary_name, binary['name'])
self.assertDictContainsSubset(self.swift_job_binary, binary)
@test.attr(type='smoke')
def test_swift_job_binary_delete(self):
- binary_id = self._create_job_binary(
- self.swift_job_binary_with_extra)[0]
+ binary_id, _ = (
+ self._create_job_binary(self.swift_job_binary_with_extra))
# delete the job binary by id
- resp = self.client.delete_job_binary(binary_id)[0]
- self.assertEqual(204, resp.status)
+ self.client.delete_job_binary(binary_id)
@test.attr(type='smoke')
def test_internal_db_job_binary_create(self):
@@ -114,35 +110,31 @@
binary_info = self._create_job_binary(self.internal_db_job_binary)
# check for job binary in list
- resp, binaries = self.client.list_job_binaries()
- self.assertEqual(200, resp.status)
+ _, binaries = self.client.list_job_binaries()
binaries_info = [(binary['id'], binary['name']) for binary in binaries]
self.assertIn(binary_info, binaries_info)
@test.attr(type='smoke')
def test_internal_db_job_binary_get(self):
- binary_id, binary_name = self._create_job_binary(
- self.internal_db_job_binary)
+ binary_id, binary_name = (
+ self._create_job_binary(self.internal_db_job_binary))
# check job binary fetch by id
- resp, binary = self.client.get_job_binary(binary_id)
- self.assertEqual(200, resp.status)
+ _, binary = self.client.get_job_binary(binary_id)
self.assertEqual(binary_name, binary['name'])
self.assertDictContainsSubset(self.internal_db_job_binary, binary)
@test.attr(type='smoke')
def test_internal_db_job_binary_delete(self):
- binary_id = self._create_job_binary(self.internal_db_job_binary)[0]
+ binary_id, _ = self._create_job_binary(self.internal_db_job_binary)
# delete the job binary by id
- resp = self.client.delete_job_binary(binary_id)[0]
- self.assertEqual(204, resp.status)
+ self.client.delete_job_binary(binary_id)
@test.attr(type='smoke')
def test_job_binary_get_data(self):
- binary_id = self._create_job_binary(self.internal_db_job_binary)[0]
+ binary_id, _ = self._create_job_binary(self.internal_db_job_binary)
# get data of job binary by id
- resp, data = self.client.get_job_binary_data(binary_id)
- self.assertEqual(200, resp.status)
+ _, data = self.client.get_job_binary_data(binary_id)
self.assertEqual(data, self.job_binary_data)
diff --git a/tempest/api/data_processing/test_job_binary_internals.py b/tempest/api/data_processing/test_job_binary_internals.py
index 6d59177..45e1140 100644
--- a/tempest/api/data_processing/test_job_binary_internals.py
+++ b/tempest/api/data_processing/test_job_binary_internals.py
@@ -29,23 +29,22 @@
def _create_job_binary_internal(self, binary_name=None):
"""Creates Job Binary Internal with optional name specified.
- It puts data into Sahara database and ensures response status and
- job binary internal name. Returns id and name of created job binary
- internal.
+ It puts data into Sahara database and ensures job binary internal name.
+ Returns id and name of created job binary internal.
"""
if not binary_name:
# generate random name if it's not specified
binary_name = data_utils.rand_name('sahara-job-binary-internal')
# create job binary internal
- resp, body = self.create_job_binary_internal(
- binary_name, self.job_binary_internal_data)
+ resp_body = (
+ self.create_job_binary_internal(binary_name,
+ self.job_binary_internal_data))
# ensure that job binary internal created successfully
- self.assertEqual(202, resp.status)
- self.assertEqual(binary_name, body['name'])
+ self.assertEqual(binary_name, resp_body['name'])
- return body['id'], binary_name
+ return resp_body['id'], binary_name
@test.attr(type='smoke')
def test_job_binary_internal_create(self):
@@ -56,8 +55,7 @@
binary_info = self._create_job_binary_internal()
# check for job binary internal in list
- resp, binaries = self.client.list_job_binary_internals()
- self.assertEqual(200, resp.status)
+ _, binaries = self.client.list_job_binary_internals()
binaries_info = [(binary['id'], binary['name']) for binary in binaries]
self.assertIn(binary_info, binaries_info)
@@ -66,23 +64,20 @@
binary_id, binary_name = self._create_job_binary_internal()
# check job binary internal fetch by id
- resp, binary = self.client.get_job_binary_internal(binary_id)
- self.assertEqual(200, resp.status)
+ _, binary = self.client.get_job_binary_internal(binary_id)
self.assertEqual(binary_name, binary['name'])
@test.attr(type='smoke')
def test_job_binary_internal_delete(self):
- binary_id = self._create_job_binary_internal()[0]
+ binary_id, _ = self._create_job_binary_internal()
# delete the job binary internal by id
- resp = self.client.delete_job_binary_internal(binary_id)[0]
- self.assertEqual(204, resp.status)
+ self.client.delete_job_binary_internal(binary_id)
@test.attr(type='smoke')
def test_job_binary_internal_get_data(self):
- binary_id = self._create_job_binary_internal()[0]
+ binary_id, _ = self._create_job_binary_internal()
# get data of job binary internal by id
- resp, data = self.client.get_job_binary_internal_data(binary_id)
- self.assertEqual(200, resp.status)
+ _, data = self.client.get_job_binary_internal_data(binary_id)
self.assertEqual(data, self.job_binary_internal_data)
diff --git a/tempest/api/data_processing/test_node_group_templates.py b/tempest/api/data_processing/test_node_group_templates.py
index 04f98b4..c2c0075 100644
--- a/tempest/api/data_processing/test_node_group_templates.py
+++ b/tempest/api/data_processing/test_node_group_templates.py
@@ -43,7 +43,7 @@
def _create_node_group_template(self, template_name=None):
"""Creates Node Group Template with optional name specified.
- It creates template and ensures response status and template name.
+ It creates template, ensures template name and response body.
Returns id and name of created template.
"""
if not template_name:
@@ -51,15 +51,14 @@
template_name = data_utils.rand_name('sahara-ng-template')
# create node group template
- resp, body = self.create_node_group_template(
- template_name, **self.node_group_template)
+ resp_body = self.create_node_group_template(template_name,
+ **self.node_group_template)
# ensure that template created successfully
- self.assertEqual(202, resp.status)
- self.assertEqual(template_name, body['name'])
- self.assertDictContainsSubset(self.node_group_template, body)
+ self.assertEqual(template_name, resp_body['name'])
+ self.assertDictContainsSubset(self.node_group_template, resp_body)
- return body['id'], template_name
+ return resp_body['id'], template_name
@test.attr(type='smoke')
def test_node_group_template_create(self):
@@ -70,8 +69,7 @@
template_info = self._create_node_group_template()
# check for node group template in list
- resp, templates = self.client.list_node_group_templates()
- self.assertEqual(200, resp.status)
+ _, templates = self.client.list_node_group_templates()
templates_info = [(template['id'], template['name'])
for template in templates]
self.assertIn(template_info, templates_info)
@@ -81,15 +79,13 @@
template_id, template_name = self._create_node_group_template()
# check node group template fetch by id
- resp, template = self.client.get_node_group_template(template_id)
- self.assertEqual(200, resp.status)
+ _, template = self.client.get_node_group_template(template_id)
self.assertEqual(template_name, template['name'])
self.assertDictContainsSubset(self.node_group_template, template)
@test.attr(type='smoke')
def test_node_group_template_delete(self):
- template_id = self._create_node_group_template()[0]
+ template_id, _ = self._create_node_group_template()
# delete the node group template by id
- resp = self.client.delete_node_group_template(template_id)[0]
- self.assertEqual(204, resp.status)
+ self.client.delete_node_group_template(template_id)
diff --git a/tempest/api/data_processing/test_plugins.py b/tempest/api/data_processing/test_plugins.py
index d643f23..9fd7a17 100644
--- a/tempest/api/data_processing/test_plugins.py
+++ b/tempest/api/data_processing/test_plugins.py
@@ -20,10 +20,9 @@
def _list_all_plugin_names(self):
"""Returns all enabled plugin names.
- It ensures response status and main plugins availability.
+ It ensures main plugins availability.
"""
- resp, plugins = self.client.list_plugins()
- self.assertEqual(200, resp.status)
+ _, plugins = self.client.list_plugins()
plugins_names = [plugin['name'] for plugin in plugins]
self.assertIn('vanilla', plugins_names)
self.assertIn('hdp', plugins_names)
@@ -37,14 +36,12 @@
@test.attr(type='smoke')
def test_plugin_get(self):
for plugin_name in self._list_all_plugin_names():
- resp, plugin = self.client.get_plugin(plugin_name)
- self.assertEqual(200, resp.status)
+ _, plugin = self.client.get_plugin(plugin_name)
self.assertEqual(plugin_name, plugin['name'])
for plugin_version in plugin['versions']:
- resp, detailed_plugin = self.client.get_plugin(plugin_name,
- plugin_version)
- self.assertEqual(200, resp.status)
+ _, detailed_plugin = self.client.get_plugin(plugin_name,
+ plugin_version)
self.assertEqual(plugin_name, detailed_plugin['name'])
# check that required image tags contains name and version
diff --git a/tempest/api/identity/admin/v3/test_list_projects.py b/tempest/api/identity/admin/v3/test_list_projects.py
new file mode 100644
index 0000000..a3944e2
--- /dev/null
+++ b/tempest/api/identity/admin/v3/test_list_projects.py
@@ -0,0 +1,73 @@
+# Copyright 2014 Hewlett-Packard Development Company, L.P
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from tempest.api.identity import base
+from tempest.common.utils import data_utils
+from tempest import test
+
+
+class ListProjectsTestJSON(base.BaseIdentityV3AdminTest):
+ _interface = 'json'
+
+ @classmethod
+ def setUpClass(cls):
+ super(ListProjectsTestJSON, cls).setUpClass()
+ cls.project_ids = list()
+ cls.data.setup_test_domain()
+ # Create project with domain
+ cls.p1_name = data_utils.rand_name('project')
+ _, cls.p1 = cls.client.create_project(
+ cls.p1_name, enabled=False, domain_id=cls.data.domain['id'])
+ cls.data.projects.append(cls.p1)
+ cls.project_ids.append(cls.p1['id'])
+ # Create default project
+ p2_name = data_utils.rand_name('project')
+ _, cls.p2 = cls.client.create_project(p2_name)
+ cls.data.projects.append(cls.p2)
+ cls.project_ids.append(cls.p2['id'])
+
+ @test.attr(type='gate')
+ def test_projects_list(self):
+ # List projects
+ resp, list_projects = self.client.list_projects()
+
+ for p in self.project_ids:
+ _, get_project = self.client.get_project(p)
+ self.assertIn(get_project, list_projects)
+
+ @test.attr(type='gate')
+ def test_list_projects_with_domains(self):
+ # List projects with domain
+ self._list_projects_with_params(
+ {'domain_id': self.data.domain['id']}, 'domain_id')
+
+ @test.attr(type='gate')
+ def test_list_projects_with_enabled(self):
+ # List the projects with enabled
+ self._list_projects_with_params({'enabled': False}, 'enabled')
+
+ @test.attr(type='gate')
+ def test_list_projects_with_name(self):
+ # List projects with name
+ self._list_projects_with_params({'name': self.p1_name}, 'name')
+
+ def _list_projects_with_params(self, params, key):
+ resp, body = self.client.list_projects(params)
+ self.assertIn(self.p1[key], map(lambda x: x[key], body))
+ self.assertNotIn(self.p2[key], map(lambda x: x[key], body))
+
+
+class ListProjectsTestXML(ListProjectsTestJSON):
+ _interface = 'xml'
diff --git a/tempest/api/identity/admin/v3/test_projects.py b/tempest/api/identity/admin/v3/test_projects.py
index 77acd57..5890eab 100644
--- a/tempest/api/identity/admin/v3/test_projects.py
+++ b/tempest/api/identity/admin/v3/test_projects.py
@@ -13,35 +13,14 @@
# License for the specific language governing permissions and limitations
# under the License.
-from six import moves
-
from tempest.api.identity import base
from tempest.common.utils import data_utils
-from tempest import exceptions
from tempest import test
class ProjectsTestJSON(base.BaseIdentityV3AdminTest):
_interface = 'json'
- def _delete_project(self, project_id):
- self.client.delete_project(project_id)
- self.assertRaises(
- exceptions.NotFound, self.client.get_project, project_id)
-
- @test.attr(type='gate')
- def test_project_list_delete(self):
- # Create several projects and delete them
- for _ in moves.xrange(3):
- _, project = self.client.create_project(
- data_utils.rand_name('project-new'))
- self.addCleanup(self._delete_project, project['id'])
-
- _, list_projects = self.client.list_projects()
-
- _, get_project = self.client.get_project(project['id'])
- self.assertIn(get_project, list_projects)
-
@test.attr(type='gate')
def test_project_create_with_description(self):
# Create project with a description
@@ -60,6 +39,21 @@
'to be set')
@test.attr(type='gate')
+ def test_project_create_with_domain(self):
+ # Create project with a domain
+ self.data.setup_test_domain()
+ project_name = data_utils.rand_name('project')
+ resp, project = self.client.create_project(
+ project_name, domain_id=self.data.domain['id'])
+ self.data.projects.append(project)
+ project_id = project['id']
+ self.assertEqual(project_name, project['name'])
+ self.assertEqual(self.data.domain['id'], project['domain_id'])
+ _, body = self.client.get_project(project_id)
+ self.assertEqual(project_name, body['name'])
+ self.assertEqual(self.data.domain['id'], body['domain_id'])
+
+ @test.attr(type='gate')
def test_project_create_enabled(self):
# Create a project that is enabled
project_name = data_utils.rand_name('project-')
diff --git a/tempest/api/identity/admin/v3/test_tokens.py b/tempest/api/identity/admin/v3/test_tokens.py
index e61b738..bd08614 100644
--- a/tempest/api/identity/admin/v3/test_tokens.py
+++ b/tempest/api/identity/admin/v3/test_tokens.py
@@ -48,6 +48,7 @@
self.assertRaises(exceptions.NotFound, self.client.get_token,
subject_token)
+ @test.skip_because(bug="1351026")
@test.attr(type='gate')
def test_rescope_token(self):
"""Rescope a token.
diff --git a/tempest/api/identity/admin/v3/test_trusts.py b/tempest/api/identity/admin/v3/test_trusts.py
index fed5171..1561a6e 100644
--- a/tempest/api/identity/admin/v3/test_trusts.py
+++ b/tempest/api/identity/admin/v3/test_trusts.py
@@ -150,7 +150,6 @@
self.assertNotIn('v3/roles/%s' % self.not_delegated_role_id,
role['links']['self'])
- @test.skip_because(bug='1334368')
def check_trust_roles(self):
# Check we find the delegated role
_, roles_get = self.trustor_client.get_trust_roles(
@@ -164,12 +163,6 @@
_, role_get = self.trustor_client.check_trust_role(
self.trust_id, self.delegated_role_id)
- # This tempest two-step change conflicted with the change
- # moving response checking to the client. This test should be
- # re-enabled by removing the following assert and changing
- # the response code in tempest/services/identity/v3/json/
- # identity_client.py in the check_trust_role_method.
- # self.assertEqual('200', resp['status'])
# And that we don't find not_delegated_role
self.assertRaises(exceptions.NotFound,
diff --git a/tempest/api/identity/admin/v3/test_users.py b/tempest/api/identity/admin/v3/test_users.py
index 558575e..3c25819 100644
--- a/tempest/api/identity/admin/v3/test_users.py
+++ b/tempest/api/identity/admin/v3/test_users.py
@@ -65,6 +65,28 @@
self.assertEqual('false', str(new_user_get['enabled']).lower())
@test.attr(type='gate')
+ def test_update_user_password(self):
+ # Creating User to check password updation
+ u_name = data_utils.rand_name('user')
+ original_password = data_utils.rand_name('pass')
+ _, user = self.client.create_user(
+ u_name, password=original_password)
+ # Delete the User at the end all test methods
+ self.addCleanup(self.client.delete_user, user['id'])
+ # Update user with new password
+ new_password = data_utils.rand_name('pass1')
+ self.client.update_user_password(user['id'], new_password,
+ original_password)
+ resp, body = self.token.auth(user['id'], new_password)
+ self.assertEqual(201, resp.status)
+ subject_token = resp['x-subject-token']
+ # Perform GET Token to verify and confirm password is updated
+ _, token_details = self.client.get_token(subject_token)
+ self.assertEqual(resp['x-subject-token'], subject_token)
+ self.assertEqual(token_details['user']['id'], user['id'])
+ self.assertEqual(token_details['user']['name'], u_name)
+
+ @test.attr(type='gate')
def test_list_user_projects(self):
# List the projects that a user has access upon
assigned_project_ids = list()
diff --git a/tempest/api/image/base.py b/tempest/api/image/base.py
index 02d391b..c875b2f 100644
--- a/tempest/api/image/base.py
+++ b/tempest/api/image/base.py
@@ -100,7 +100,7 @@
cls.alt_tenant_id = cls.alt_img_cli.tenant_id
def _create_image(self):
- image_file = StringIO.StringIO('*' * 1024)
+ image_file = StringIO.StringIO(data_utils.random_bytes())
resp, image = self.create_image(container_format='bare',
disk_format='raw',
is_public=False,
diff --git a/tempest/api/image/v1/test_images.py b/tempest/api/image/v1/test_images.py
index 8528e42..bf55b89 100644
--- a/tempest/api/image/v1/test_images.py
+++ b/tempest/api/image/v1/test_images.py
@@ -44,7 +44,7 @@
self.assertEqual(val, body.get('properties')[key])
# Now try uploading an image file
- image_file = StringIO.StringIO(('*' * 1024))
+ image_file = StringIO.StringIO(data_utils.random_bytes())
_, body = self.client.update_image(image_id, data=image_file)
self.assertIn('size', body)
self.assertEqual(1024, body.get('size'))
@@ -157,7 +157,7 @@
image. Note that the size of the new image is a random number between
1024 and 4096
"""
- image_file = StringIO.StringIO('*' * size)
+ image_file = StringIO.StringIO(data_utils.random_bytes(size))
name = 'New Standard Image %s' % name
_, image = cls.create_image(name=name,
container_format=container_format,
@@ -338,10 +338,9 @@
disk_format, size):
"""
Create a new standard image and return the ID of the newly-registered
- image. Note that the size of the new image is a random number between
- 1024 and 4096
+ image.
"""
- image_file = StringIO.StringIO('*' * size)
+ image_file = StringIO.StringIO(data_utils.random_bytes(size))
name = 'New Standard Image %s' % name
_, image = cls.create_image(name=name,
container_format=container_format,
diff --git a/tempest/api/image/v2/test_images.py b/tempest/api/image/v2/test_images.py
index ae777eb..a974ebb 100644
--- a/tempest/api/image/v2/test_images.py
+++ b/tempest/api/image/v2/test_images.py
@@ -52,7 +52,7 @@
self.assertEqual('queued', body['status'])
# Now try uploading an image file
- file_content = '*' * 1024
+ file_content = data_utils.random_bytes()
image_file = StringIO.StringIO(file_content)
self.client.store_image(image_id, image_file)
@@ -86,7 +86,8 @@
# Verifying deletion
_, images = self.client.image_list()
- self.assertNotIn(image_id, images)
+ images_id = [item['id'] for item in images]
+ self.assertNotIn(image_id, images_id)
@test.attr(type='gate')
def test_update_image(self):
@@ -103,8 +104,7 @@
image_id = body['id']
# Now try uploading an image file
- file_content = '*' * 1024
- image_file = StringIO.StringIO(file_content)
+ image_file = StringIO.StringIO(data_utils.random_bytes())
self.client.store_image(image_id, image_file)
# Update Image
@@ -145,7 +145,8 @@
image. Note that the size of the new image is a random number between
1024 and 4096
"""
- image_file = StringIO.StringIO('*' * random.randint(1024, 4096))
+ size = random.randint(1024, 4096)
+ image_file = StringIO.StringIO(data_utils.random_bytes(size))
name = data_utils.rand_name('image-')
_, body = cls.create_image(name=name,
container_format=container_format,
diff --git a/tempest/api/network/admin/test_quotas.py b/tempest/api/network/admin/test_quotas.py
index d1a8faf..9fa54b1 100644
--- a/tempest/api/network/admin/test_quotas.py
+++ b/tempest/api/network/admin/test_quotas.py
@@ -46,8 +46,7 @@
raise cls.skipException(msg)
cls.identity_admin_client = cls.os_adm.identity_client
- @test.attr(type='gate')
- def test_quotas(self):
+ def _check_quotas(self, new_quotas):
# Add a tenant to conduct the test
test_tenant = data_utils.rand_name('test_tenant_')
test_description = data_utils.rand_name('desc_')
@@ -56,14 +55,15 @@
description=test_description)
tenant_id = tenant['id']
self.addCleanup(self.identity_admin_client.delete_tenant, tenant_id)
+
# Change quotas for tenant
- new_quotas = {'network': 0, 'security_group': 0}
resp, quota_set = self.admin_client.update_quotas(tenant_id,
**new_quotas)
self.assertEqual('200', resp['status'])
self.addCleanup(self.admin_client.reset_quotas, tenant_id)
- self.assertEqual(0, quota_set['network'])
- self.assertEqual(0, quota_set['security_group'])
+ for key, value in new_quotas.iteritems():
+ self.assertEqual(value, quota_set[key])
+
# Confirm our tenant is listed among tenants with non default quotas
resp, non_default_quotas = self.admin_client.list_quotas()
self.assertEqual('200', resp['status'])
@@ -72,12 +72,14 @@
if qs['tenant_id'] == tenant_id:
found = True
self.assertTrue(found)
- # Confirm from APi quotas were changed as requested for tenant
+
+ # Confirm from API quotas were changed as requested for tenant
resp, quota_set = self.admin_client.show_quotas(tenant_id)
quota_set = quota_set['quota']
self.assertEqual('200', resp['status'])
- self.assertEqual(0, quota_set['network'])
- self.assertEqual(0, quota_set['security_group'])
+ for key, value in new_quotas.iteritems():
+ self.assertEqual(value, quota_set[key])
+
# Reset quotas to default and confirm
resp, body = self.admin_client.reset_quotas(tenant_id)
self.assertEqual('204', resp['status'])
@@ -86,49 +88,14 @@
for q in non_default_quotas['quotas']:
self.assertNotEqual(tenant_id, q['tenant_id'])
+ @test.attr(type='gate')
+ def test_quotas(self):
+ new_quotas = {'network': 0, 'security_group': 0}
+ self._check_quotas(new_quotas)
+
@test.requires_ext(extension='lbaas', service='network')
@test.attr(type='gate')
def test_lbaas_quotas(self):
- # Add a tenant to conduct the test
- test_tenant = data_utils.rand_name('test_tenant_')
- test_description = data_utils.rand_name('desc_')
- _, tenant = self.identity_admin_client.create_tenant(
- name=test_tenant,
- description=test_description)
- tenant_id = tenant['id']
- self.addCleanup(self.identity_admin_client.delete_tenant, tenant_id)
- # Change lbaas quotas for tenant
new_quotas = {'vip': 1, 'pool': 2,
'member': 3, 'health_monitor': 4}
-
- resp, quota_set = self.admin_client.update_quotas(tenant_id,
- **new_quotas)
- self.assertEqual('200', resp['status'])
- self.addCleanup(self.admin_client.reset_quotas, tenant_id)
- self.assertEqual(1, quota_set['vip'])
- self.assertEqual(2, quota_set['pool'])
- self.assertEqual(3, quota_set['member'])
- self.assertEqual(4, quota_set['health_monitor'])
- # Confirm our tenant is listed among tenants with non default quotas
- resp, non_default_quotas = self.admin_client.list_quotas()
- self.assertEqual('200', resp['status'])
- found = False
- for qs in non_default_quotas['quotas']:
- if qs['tenant_id'] == tenant_id:
- found = True
- self.assertTrue(found)
- # Confirm from APi quotas were changed as requested for tenant
- resp, quota_set = self.admin_client.show_quotas(tenant_id)
- quota_set = quota_set['quota']
- self.assertEqual('200', resp['status'])
- self.assertEqual(1, quota_set['vip'])
- self.assertEqual(2, quota_set['pool'])
- self.assertEqual(3, quota_set['member'])
- self.assertEqual(4, quota_set['health_monitor'])
- # Reset quotas to default and confirm
- resp, body = self.admin_client.reset_quotas(tenant_id)
- self.assertEqual('204', resp['status'])
- resp, non_default_quotas = self.admin_client.list_quotas()
- self.assertEqual('200', resp['status'])
- for q in non_default_quotas['quotas']:
- self.assertNotEqual(tenant_id, q['tenant_id'])
+ self._check_quotas(new_quotas)
diff --git a/tempest/api/network/base.py b/tempest/api/network/base.py
index 087b87a..d75339c 100644
--- a/tempest/api/network/base.py
+++ b/tempest/api/network/base.py
@@ -85,57 +85,58 @@
@classmethod
def tearDownClass(cls):
- # Clean up ipsec policies
- for ipsecpolicy in cls.ipsecpolicies:
- cls.client.delete_ipsecpolicy(ipsecpolicy['id'])
- # Clean up firewall policies
- for fw_policy in cls.fw_policies:
- cls.client.delete_firewall_policy(fw_policy['id'])
- # Clean up firewall rules
- for fw_rule in cls.fw_rules:
- cls.client.delete_firewall_rule(fw_rule['id'])
- # Clean up ike policies
- for ikepolicy in cls.ikepolicies:
- cls.client.delete_ikepolicy(ikepolicy['id'])
- # Clean up vpn services
- for vpnservice in cls.vpnservices:
- cls.client.delete_vpnservice(vpnservice['id'])
- # Clean up floating IPs
- for floating_ip in cls.floating_ips:
- cls.client.delete_floatingip(floating_ip['id'])
- # Clean up routers
- for router in cls.routers:
- cls.delete_router(router)
+ if CONF.service_available.neutron:
+ # Clean up ipsec policies
+ for ipsecpolicy in cls.ipsecpolicies:
+ cls.client.delete_ipsecpolicy(ipsecpolicy['id'])
+ # Clean up firewall policies
+ for fw_policy in cls.fw_policies:
+ cls.client.delete_firewall_policy(fw_policy['id'])
+ # Clean up firewall rules
+ for fw_rule in cls.fw_rules:
+ cls.client.delete_firewall_rule(fw_rule['id'])
+ # Clean up ike policies
+ for ikepolicy in cls.ikepolicies:
+ cls.client.delete_ikepolicy(ikepolicy['id'])
+ # Clean up vpn services
+ for vpnservice in cls.vpnservices:
+ cls.client.delete_vpnservice(vpnservice['id'])
+ # Clean up floating IPs
+ for floating_ip in cls.floating_ips:
+ cls.client.delete_floatingip(floating_ip['id'])
+ # Clean up routers
+ for router in cls.routers:
+ cls.delete_router(router)
- # Clean up health monitors
- for health_monitor in cls.health_monitors:
- cls.client.delete_health_monitor(health_monitor['id'])
- # Clean up members
- for member in cls.members:
- cls.client.delete_member(member['id'])
- # Clean up vips
- for vip in cls.vips:
- cls.client.delete_vip(vip['id'])
- # Clean up pools
- for pool in cls.pools:
- cls.client.delete_pool(pool['id'])
- # Clean up metering label rules
- for metering_label_rule in cls.metering_label_rules:
- cls.admin_client.delete_metering_label_rule(
- metering_label_rule['id'])
- # Clean up metering labels
- for metering_label in cls.metering_labels:
- cls.admin_client.delete_metering_label(metering_label['id'])
- # Clean up ports
- for port in cls.ports:
- cls.client.delete_port(port['id'])
- # Clean up subnets
- for subnet in cls.subnets:
- cls.client.delete_subnet(subnet['id'])
- # Clean up networks
- for network in cls.networks:
- cls.client.delete_network(network['id'])
- cls.clear_isolated_creds()
+ # Clean up health monitors
+ for health_monitor in cls.health_monitors:
+ cls.client.delete_health_monitor(health_monitor['id'])
+ # Clean up members
+ for member in cls.members:
+ cls.client.delete_member(member['id'])
+ # Clean up vips
+ for vip in cls.vips:
+ cls.client.delete_vip(vip['id'])
+ # Clean up pools
+ for pool in cls.pools:
+ cls.client.delete_pool(pool['id'])
+ # Clean up metering label rules
+ for metering_label_rule in cls.metering_label_rules:
+ cls.admin_client.delete_metering_label_rule(
+ metering_label_rule['id'])
+ # Clean up metering labels
+ for metering_label in cls.metering_labels:
+ cls.admin_client.delete_metering_label(metering_label['id'])
+ # Clean up ports
+ for port in cls.ports:
+ cls.client.delete_port(port['id'])
+ # Clean up subnets
+ for subnet in cls.subnets:
+ cls.client.delete_subnet(subnet['id'])
+ # Clean up networks
+ for network in cls.networks:
+ cls.client.delete_network(network['id'])
+ cls.clear_isolated_creds()
super(BaseNetworkTest, cls).tearDownClass()
@classmethod
diff --git a/tempest/api/object_storage/test_object_services.py b/tempest/api/object_storage/test_object_services.py
index 1ef9aa1..b21aa44 100644
--- a/tempest/api/object_storage/test_object_services.py
+++ b/tempest/api/object_storage/test_object_services.py
@@ -17,7 +17,7 @@
import hashlib
import random
import re
-from six import moves
+import six
import time
import zlib
@@ -54,15 +54,36 @@
object_name = data_utils.rand_name(name='LObject')
data = data_utils.arbitrary_string()
segments = 10
- data_segments = [data + str(i) for i in moves.xrange(segments)]
+ data_segments = [data + str(i) for i in six.moves.xrange(segments)]
# uploading segments
- for i in moves.xrange(segments):
+ for i in six.moves.xrange(segments):
resp, _ = self.object_client.create_object_segments(
self.container_name, object_name, i, data_segments[i])
self.assertEqual(resp['status'], '201')
return object_name, data_segments
+ def _copy_object_2d(self, src_object_name, metadata=None):
+ dst_object_name = data_utils.rand_name(name='TestObject')
+ resp, _ = self.object_client.copy_object_2d_way(self.container_name,
+ src_object_name,
+ dst_object_name,
+ metadata=metadata)
+ return dst_object_name, resp
+
+ def _check_copied_obj(self, dst_object_name, src_body,
+ in_meta=None, not_in_meta=None):
+ resp, dest_body = self.object_client.get_object(self.container_name,
+ dst_object_name)
+
+ self.assertEqual(src_body, dest_body)
+ if in_meta:
+ for meta_key in in_meta:
+ self.assertIn('x-object-meta-' + meta_key, resp)
+ if not_in_meta:
+ for meta_key in not_in_meta:
+ self.assertNotIn('x-object-meta-' + meta_key, resp)
+
@test.attr(type='gate')
def test_create_object(self):
# create object
@@ -765,10 +786,7 @@
# change the content type of an existing object
# create object
- object_name = data_utils.rand_name(name='TestObject')
- data = data_utils.arbitrary_string()
- self.object_client.create_object(self.container_name,
- object_name, data)
+ object_name, data = self._create_object()
# get the old content type
resp_tmp, _ = self.object_client.list_object_metadata(
self.container_name, object_name)
@@ -805,20 +823,12 @@
dst_object_name)
self.assertEqual(resp['status'], '201')
self.assertHeaders(resp, 'Object', 'COPY')
-
- self.assertIn('last-modified', resp)
- self.assertIn('x-copied-from', resp)
- self.assertIn('x-copied-from-last-modified', resp)
- self.assertNotEqual(len(resp['last-modified']), 0)
self.assertEqual(
resp['x-copied-from'],
self.container_name + "/" + src_object_name)
- self.assertNotEqual(len(resp['x-copied-from-last-modified']), 0)
# check data
- resp, body = self.object_client.get_object(self.container_name,
- dst_object_name)
- self.assertEqual(body, src_data)
+ self._check_copied_obj(dst_object_name, src_data)
@test.attr(type='smoke')
def test_copy_object_across_containers(self):
@@ -862,15 +872,82 @@
self.assertIn(actual_meta_key, resp)
self.assertEqual(resp[actual_meta_key], meta_value)
+ @test.attr(type='smoke')
+ def test_copy_object_with_x_fresh_metadata(self):
+ # create source object
+ metadata = {'x-object-meta-src': 'src_value'}
+ src_object_name, data = self._create_object(metadata)
+
+ # copy source object with x_fresh_metadata header
+ metadata = {'X-Fresh-Metadata': 'true'}
+ dst_object_name, resp = self._copy_object_2d(src_object_name,
+ metadata)
+
+ self.assertEqual(resp['status'], '201')
+ self.assertHeaders(resp, 'Object', 'COPY')
+
+ self.assertNotIn('x-object-meta-src', resp)
+ self.assertEqual(resp['x-copied-from'],
+ self.container_name + "/" + src_object_name)
+
+ # check that destination object does NOT have any object-meta
+ self._check_copied_obj(dst_object_name, data, not_in_meta=["src"])
+
+ @test.attr(type='smoke')
+ def test_copy_object_with_x_object_metakey(self):
+ # create source object
+ metadata = {'x-object-meta-src': 'src_value'}
+ src_obj_name, data = self._create_object(metadata)
+
+ # copy source object to destination with x-object-meta-key
+ metadata = {'x-object-meta-test': ''}
+ dst_obj_name, resp = self._copy_object_2d(src_obj_name, metadata)
+
+ self.assertEqual(resp['status'], '201')
+ self.assertHeaders(resp, 'Object', 'COPY')
+
+ expected = {'x-object-meta-test': '',
+ 'x-object-meta-src': 'src_value',
+ 'x-copied-from': self.container_name + "/" + src_obj_name}
+ for key, value in six.iteritems(expected):
+ self.assertIn(key, resp)
+ self.assertEqual(value, resp[key])
+
+ # check destination object
+ self._check_copied_obj(dst_obj_name, data, in_meta=["test", "src"])
+
+ @test.attr(type='smoke')
+ def test_copy_object_with_x_object_meta(self):
+ # create source object
+ metadata = {'x-object-meta-src': 'src_value'}
+ src_obj_name, data = self._create_object(metadata)
+
+ # copy source object to destination with object metadata
+ metadata = {'x-object-meta-test': 'value'}
+ dst_obj_name, resp = self._copy_object_2d(src_obj_name, metadata)
+
+ self.assertEqual(resp['status'], '201')
+ self.assertHeaders(resp, 'Object', 'COPY')
+
+ expected = {'x-object-meta-test': 'value',
+ 'x-object-meta-src': 'src_value',
+ 'x-copied-from': self.container_name + "/" + src_obj_name}
+ for key, value in six.iteritems(expected):
+ self.assertIn(key, resp)
+ self.assertEqual(value, resp[key])
+
+ # check destination object
+ self._check_copied_obj(dst_obj_name, data, in_meta=["test", "src"])
+
@test.attr(type='gate')
def test_object_upload_in_segments(self):
# create object
object_name = data_utils.rand_name(name='LObject')
data = data_utils.arbitrary_string()
segments = 10
- data_segments = [data + str(i) for i in moves.xrange(segments)]
+ data_segments = [data + str(i) for i in six.moves.xrange(segments)]
# uploading segments
- for i in moves.xrange(segments):
+ for i in six.moves.xrange(segments):
resp, _ = self.object_client.create_object_segments(
self.container_name, object_name, i, data_segments[i])
self.assertEqual(resp['status'], '201')
diff --git a/tempest/api/orchestration/base.py b/tempest/api/orchestration/base.py
index 446f4ab..531df2d 100644
--- a/tempest/api/orchestration/base.py
+++ b/tempest/api/orchestration/base.py
@@ -11,6 +11,7 @@
# under the License.
import os.path
+import yaml
from tempest import clients
from tempest.common.utils import data_utils
@@ -84,11 +85,8 @@
pass
for stack_identifier in cls.stacks:
- try:
- cls.client.wait_for_stack_status(
- stack_identifier, 'DELETE_COMPLETE')
- except exceptions.NotFound:
- pass
+ cls.client.wait_for_stack_status(
+ stack_identifier, 'DELETE_COMPLETE')
@classmethod
def _create_keypair(cls, name_start='keypair-heat-'):
@@ -125,7 +123,7 @@
pass
@classmethod
- def load_template(cls, name, ext='yaml'):
+ def read_template(cls, name, ext='yaml'):
loc = ["stacks", "templates", "%s.%s" % (name, ext)]
fullpath = os.path.join(os.path.dirname(__file__), *loc)
@@ -134,6 +132,14 @@
return content
@classmethod
+ def load_template(cls, name, ext='yaml'):
+ loc = ["stacks", "templates", "%s.%s" % (name, ext)]
+ fullpath = os.path.join(os.path.dirname(__file__), *loc)
+
+ with open(fullpath, "r") as f:
+ return yaml.safe_load(f)
+
+ @classmethod
def tearDownClass(cls):
cls._clear_stacks()
cls._clear_keypairs()
diff --git a/tempest/api/orchestration/stacks/test_environment.py b/tempest/api/orchestration/stacks/test_environment.py
index 3911e72..96e1c50 100644
--- a/tempest/api/orchestration/stacks/test_environment.py
+++ b/tempest/api/orchestration/stacks/test_environment.py
@@ -28,7 +28,7 @@
def test_environment_parameter(self):
"""Test passing a stack parameter via the environment."""
stack_name = data_utils.rand_name('heat')
- template = self.load_template('random_string')
+ template = self.read_template('random_string')
environment = {'parameters': {'random_length': 20}}
stack_identifier = self.create_stack(stack_name, template,
@@ -56,7 +56,7 @@
'''
environment = {'resource_registry':
{'My:Random::String': 'my_random.yaml'}}
- files = {'my_random.yaml': self.load_template('random_string')}
+ files = {'my_random.yaml': self.read_template('random_string')}
stack_identifier = self.create_stack(stack_name, template,
environment=environment,
@@ -65,7 +65,10 @@
# random_string.yaml specifies a length of 10
random_value = self.get_stack_output(stack_identifier, 'random_value')
- self.assertEqual(10, len(random_value))
+ random_string_template = self.load_template('random_string')
+ expected_length = random_string_template['parameters'][
+ 'random_length']['default']
+ self.assertEqual(expected_length, len(random_value))
@test.attr(type='gate')
def test_files_provider_resource(self):
@@ -82,7 +85,7 @@
random_value:
value: {get_attr: [random, random_value]}
'''
- files = {'my_random.yaml': self.load_template('random_string')}
+ files = {'my_random.yaml': self.read_template('random_string')}
stack_identifier = self.create_stack(stack_name, template,
files=files)
@@ -90,4 +93,7 @@
# random_string.yaml specifies a length of 10
random_value = self.get_stack_output(stack_identifier, 'random_value')
- self.assertEqual(10, len(random_value))
+ random_string_template = self.load_template('random_string')
+ expected_length = random_string_template['parameters'][
+ 'random_length']['default']
+ self.assertEqual(expected_length, len(random_value))
diff --git a/tempest/api/orchestration/stacks/test_neutron_resources.py b/tempest/api/orchestration/stacks/test_neutron_resources.py
index e92b945..27c6196 100644
--- a/tempest/api/orchestration/stacks/test_neutron_resources.py
+++ b/tempest/api/orchestration/stacks/test_neutron_resources.py
@@ -39,7 +39,7 @@
raise cls.skipException("Neutron support is required")
cls.network_client = os.network_client
cls.stack_name = data_utils.rand_name('heat')
- template = cls.load_template('neutron_basic')
+ template = cls.read_template('neutron_basic')
cls.keypair_name = (CONF.orchestration.keypair_name or
cls._create_keypair()['name'])
cls.external_network_id = CONF.network.public_network_id
diff --git a/tempest/api/orchestration/stacks/test_non_empty_stack.py b/tempest/api/orchestration/stacks/test_non_empty_stack.py
index 585c90b..a97c561 100644
--- a/tempest/api/orchestration/stacks/test_non_empty_stack.py
+++ b/tempest/api/orchestration/stacks/test_non_empty_stack.py
@@ -28,7 +28,7 @@
def setUpClass(cls):
super(StacksTestJSON, cls).setUpClass()
cls.stack_name = data_utils.rand_name('heat')
- template = cls.load_template('non_empty_stack')
+ template = cls.read_template('non_empty_stack')
image_id = (CONF.orchestration.image_ref or
cls._create_image()['id'])
# create the stack
diff --git a/tempest/api/orchestration/stacks/test_nova_keypair_resources.py b/tempest/api/orchestration/stacks/test_nova_keypair_resources.py
index a81a540..e22a08b 100644
--- a/tempest/api/orchestration/stacks/test_nova_keypair_resources.py
+++ b/tempest/api/orchestration/stacks/test_nova_keypair_resources.py
@@ -23,12 +23,14 @@
class NovaKeyPairResourcesYAMLTest(base.BaseOrchestrationTest):
_tpl_type = 'yaml'
+ _resource = 'resources'
+ _type = 'type'
@classmethod
def setUpClass(cls):
super(NovaKeyPairResourcesYAMLTest, cls).setUpClass()
cls.stack_name = data_utils.rand_name('heat')
- template = cls.load_template('nova_keypair', ext=cls._tpl_type)
+ template = cls.read_template('nova_keypair', ext=cls._tpl_type)
# create the stack, avoid any duplicated key.
cls.stack_identifier = cls.create_stack(
@@ -49,8 +51,15 @@
@test.attr(type='slow')
def test_created_resources(self):
"""Verifies created keypair resource."""
- resources = [('KeyPairSavePrivate', 'OS::Nova::KeyPair'),
- ('KeyPairDontSavePrivate', 'OS::Nova::KeyPair')]
+
+ nova_keypair_template = self.load_template('nova_keypair',
+ ext=self._tpl_type)
+ resources = [('KeyPairSavePrivate',
+ nova_keypair_template[self._resource][
+ 'KeyPairSavePrivate'][self._type]),
+ ('KeyPairDontSavePrivate',
+ nova_keypair_template[self._resource][
+ 'KeyPairDontSavePrivate'][self._type])]
for resource_name, resource_type in resources:
resource = self.test_resources.get(resource_name, None)
@@ -85,3 +94,5 @@
class NovaKeyPairResourcesAWSTest(NovaKeyPairResourcesYAMLTest):
_tpl_type = 'json'
+ _resource = 'Resources'
+ _type = 'Type'
diff --git a/tempest/api/orchestration/stacks/test_stacks.py b/tempest/api/orchestration/stacks/test_stacks.py
index 5b45d82..d5e66e8 100644
--- a/tempest/api/orchestration/stacks/test_stacks.py
+++ b/tempest/api/orchestration/stacks/test_stacks.py
@@ -64,3 +64,4 @@
# delete the stack
resp = self.client.delete_stack(stack_identifier)
self.assertEqual('204', resp[0]['status'])
+ self.client.wait_for_stack_status(stack_identifier, 'DELETE_COMPLETE')
diff --git a/tempest/api/orchestration/stacks/test_swift_resources.py b/tempest/api/orchestration/stacks/test_swift_resources.py
index 6d53fb2..adab8c3 100644
--- a/tempest/api/orchestration/stacks/test_swift_resources.py
+++ b/tempest/api/orchestration/stacks/test_swift_resources.py
@@ -30,7 +30,7 @@
def setUpClass(cls):
super(SwiftResourcesTestJSON, cls).setUpClass()
cls.stack_name = data_utils.rand_name('heat')
- template = cls.load_template('swift_basic')
+ template = cls.read_template('swift_basic')
os = clients.Manager()
if not CONF.service_available.swift:
raise cls.skipException("Swift support is required")
@@ -49,8 +49,11 @@
def test_created_resources(self):
"""Created stack should be in the list of existing stacks."""
- resources = [('SwiftContainer', 'OS::Swift::Container'),
- ('SwiftContainerWebsite', 'OS::Swift::Container')]
+ swift_basic_template = self.load_template('swift_basic')
+ resources = [('SwiftContainer', swift_basic_template['resources'][
+ 'SwiftContainer']['type']),
+ ('SwiftContainerWebsite', swift_basic_template[
+ 'resources']['SwiftContainerWebsite']['type'])]
for resource_name, resource_type in resources:
resource = self.test_resources.get(resource_name)
self.assertIsInstance(resource, dict)
@@ -84,10 +87,9 @@
self.assertIn(h, headers)
def test_metadata(self):
- metadatas = {
- "web-index": "index.html",
- "web-error": "error.html"
- }
+ swift_basic_template = self.load_template('swift_basic')
+ metadatas = swift_basic_template['resources']['SwiftContainerWebsite'][
+ 'properties']['X-Container-Meta']
swcont_website = self.test_resources.get(
'SwiftContainerWebsite')['physical_resource_id']
headers, _ = self.container_client.list_container_metadata(
diff --git a/tempest/api/orchestration/stacks/test_volumes.py b/tempest/api/orchestration/stacks/test_volumes.py
index 5ac2a8d..d422752 100644
--- a/tempest/api/orchestration/stacks/test_volumes.py
+++ b/tempest/api/orchestration/stacks/test_volumes.py
@@ -31,43 +31,44 @@
if not CONF.service_available.cinder:
raise cls.skipException('Cinder support is required')
- def _cinder_verify(self, volume_id):
+ def _cinder_verify(self, volume_id, template):
self.assertIsNotNone(volume_id)
resp, volume = self.volumes_client.get_volume(volume_id)
self.assertEqual(200, resp.status)
self.assertEqual('available', volume.get('status'))
- self.assertEqual(1, volume.get('size'))
- self.assertEqual('a descriptive description',
- volume.get('display_description'))
- self.assertEqual('volume_name',
- volume.get('display_name'))
+ self.assertEqual(template['resources']['volume']['properties'][
+ 'size'], volume.get('size'))
+ self.assertEqual(template['resources']['volume']['properties'][
+ 'description'], volume.get('display_description'))
+ self.assertEqual(template['resources']['volume']['properties'][
+ 'name'], volume.get('display_name'))
- def _outputs_verify(self, stack_identifier):
+ def _outputs_verify(self, stack_identifier, template):
self.assertEqual('available',
self.get_stack_output(stack_identifier, 'status'))
- self.assertEqual('1',
- self.get_stack_output(stack_identifier, 'size'))
- self.assertEqual('a descriptive description',
- self.get_stack_output(stack_identifier,
- 'display_description'))
- self.assertEqual('volume_name',
- self.get_stack_output(stack_identifier,
- 'display_name'))
+ self.assertEqual(str(template['resources']['volume']['properties'][
+ 'size']), self.get_stack_output(stack_identifier, 'size'))
+ self.assertEqual(template['resources']['volume']['properties'][
+ 'description'], self.get_stack_output(stack_identifier,
+ 'display_description'))
+ self.assertEqual(template['resources']['volume']['properties'][
+ 'name'], self.get_stack_output(stack_identifier, 'display_name'))
@test.attr(type='gate')
def test_cinder_volume_create_delete(self):
"""Create and delete a volume via OS::Cinder::Volume."""
stack_name = data_utils.rand_name('heat')
- template = self.load_template('cinder_basic')
+ template = self.read_template('cinder_basic')
stack_identifier = self.create_stack(stack_name, template)
self.client.wait_for_stack_status(stack_identifier, 'CREATE_COMPLETE')
# Verify with cinder that the volume exists, with matching details
volume_id = self.get_stack_output(stack_identifier, 'volume_id')
- self._cinder_verify(volume_id)
+ cinder_basic_template = self.load_template('cinder_basic')
+ self._cinder_verify(volume_id, cinder_basic_template)
# Verify the stack outputs are as expected
- self._outputs_verify(stack_identifier)
+ self._outputs_verify(stack_identifier, cinder_basic_template)
# Delete the stack and ensure the volume is gone
self.client.delete_stack(stack_identifier)
@@ -86,21 +87,22 @@
def test_cinder_volume_create_delete_retain(self):
"""Ensure the 'Retain' deletion policy is respected."""
stack_name = data_utils.rand_name('heat')
- template = self.load_template('cinder_basic_delete_retain')
+ template = self.read_template('cinder_basic_delete_retain')
stack_identifier = self.create_stack(stack_name, template)
self.client.wait_for_stack_status(stack_identifier, 'CREATE_COMPLETE')
# Verify with cinder that the volume exists, with matching details
volume_id = self.get_stack_output(stack_identifier, 'volume_id')
self.addCleanup(self._cleanup_volume, volume_id)
- self._cinder_verify(volume_id)
+ retain_template = self.load_template('cinder_basic_delete_retain')
+ self._cinder_verify(volume_id, retain_template)
# Verify the stack outputs are as expected
- self._outputs_verify(stack_identifier)
+ self._outputs_verify(stack_identifier, retain_template)
# Delete the stack and ensure the volume is *not* gone
self.client.delete_stack(stack_identifier)
self.client.wait_for_stack_status(stack_identifier, 'DELETE_COMPLETE')
- self._cinder_verify(volume_id)
+ self._cinder_verify(volume_id, retain_template)
# Volume cleanup happens via addCleanup calling _cleanup_volume
diff --git a/tempest/api/telemetry/base.py b/tempest/api/telemetry/base.py
index 2b422fd..b5b2bb1 100644
--- a/tempest/api/telemetry/base.py
+++ b/tempest/api/telemetry/base.py
@@ -34,19 +34,27 @@
cls.telemetry_client = os.telemetry_client
cls.servers_client = os.servers_client
cls.flavors_client = os.flavors_client
+ cls.image_client = os.image_client
+ cls.image_client_v2 = os.image_client_v2
cls.nova_notifications = ['memory', 'vcpus', 'disk.root.size',
'disk.ephemeral.size']
+
+ cls.glance_notifications = ['image.update', 'image.upload',
+ 'image.delete']
+
+ cls.glance_v2_notifications = ['image.download', 'image.serve']
+
cls.server_ids = []
cls.alarm_ids = []
+ cls.image_ids = []
@classmethod
def create_alarm(cls, **kwargs):
resp, body = cls.telemetry_client.create_alarm(
name=data_utils.rand_name('telemetry_alarm'),
type='threshold', **kwargs)
- if resp['status'] == '201':
- cls.alarm_ids.append(body['alarm_id'])
+ cls.alarm_ids.append(body['alarm_id'])
return resp, body
@classmethod
@@ -55,8 +63,15 @@
data_utils.rand_name('ceilometer-instance'),
CONF.compute.image_ref, CONF.compute.flavor_ref,
wait_until='ACTIVE')
- if resp['status'] == '202':
- cls.server_ids.append(body['id'])
+ cls.server_ids.append(body['id'])
+ return resp, body
+
+ @classmethod
+ def create_image(cls, client):
+ resp, body = client.create_image(
+ data_utils.rand_name('image'), container_format='bare',
+ disk_format='raw', visibility='private')
+ cls.image_ids.append(body['id'])
return resp, body
@staticmethod
@@ -71,6 +86,7 @@
def tearDownClass(cls):
cls.cleanup_resources(cls.telemetry_client.delete_alarm, cls.alarm_ids)
cls.cleanup_resources(cls.servers_client.delete_server, cls.server_ids)
+ cls.cleanup_resources(cls.image_client.delete_image, cls.image_ids)
cls.clear_isolated_creds()
super(BaseTelemetryTest, cls).tearDownClass()
diff --git a/tempest/api/telemetry/test_telemetry_notification_api.py b/tempest/api/telemetry/test_telemetry_notification_api.py
index 148f5a3..2a170c7 100644
--- a/tempest/api/telemetry/test_telemetry_notification_api.py
+++ b/tempest/api/telemetry/test_telemetry_notification_api.py
@@ -32,6 +32,7 @@
@test.attr(type="gate")
@testtools.skipIf(not CONF.service_available.nova,
"Nova is not available.")
+ @test.skip_because(bug="1336755")
def test_check_nova_notification(self):
resp, body = self.create_server()
@@ -42,6 +43,36 @@
for metric in self.nova_notifications:
self.await_samples(metric, query)
+ @test.attr(type="smoke")
+ @test.services("image")
+ @testtools.skipIf(not CONF.image_feature_enabled.api_v1,
+ "Glance api v1 is disabled")
+ def test_check_glance_v1_notifications(self):
+ _, body = self.create_image(self.image_client)
+ self.image_client.update_image(body['id'], data='data')
+
+ query = 'resource', 'eq', body['id']
+
+ self.image_client.delete_image(body['id'])
+
+ for metric in self.glance_notifications:
+ self.await_samples(metric, query)
+
+ @test.attr(type="smoke")
+ @test.services("image")
+ @testtools.skipIf(not CONF.image_feature_enabled.api_v2,
+ "Glance api v2 is disabled")
+ def test_check_glance_v2_notifications(self):
+ _, body = self.create_image(self.image_client_v2)
+
+ self.image_client_v2.store_image(body['id'], "file")
+ self.image_client_v2.get_image_file(body['id'])
+
+ query = 'resource', 'eq', body['id']
+
+ for metric in self.glance_v2_notifications:
+ self.await_samples(metric, query)
+
class TelemetryNotificationAPITestXML(TelemetryNotificationAPITestJSON):
_interface = 'xml'
diff --git a/tempest/api/volume/admin/test_multi_backend.py b/tempest/api/volume/admin/test_multi_backend.py
index e79d23c..d451517 100644
--- a/tempest/api/volume/admin/test_multi_backend.py
+++ b/tempest/api/volume/admin/test_multi_backend.py
@@ -36,42 +36,55 @@
cls.volume_client = cls.os_adm.volumes_client
cls.volume_type_id_list = []
- cls.volume_id_list = []
+ cls.volume_id_list_with_prefix = []
+ cls.volume_id_list_without_prefix = []
- # Volume/Type creation (uses backend1_name)
- type1_name = data_utils.rand_name('Type-')
- vol1_name = data_utils.rand_name('Volume-')
- extra_specs1 = {"volume_backend_name": cls.backend1_name}
- resp, cls.type1 = cls.client.create_volume_type(
- type1_name, extra_specs=extra_specs1)
- cls.volume_type_id_list.append(cls.type1['id'])
-
- resp, cls.volume1 = cls.volume_client.create_volume(
- size=1, display_name=vol1_name, volume_type=type1_name)
- cls.volume_id_list.append(cls.volume1['id'])
- cls.volume_client.wait_for_volume_status(cls.volume1['id'],
- 'available')
+ # Volume/Type creation (uses volume_backend_name)
+ cls._create_type_and_volume(cls.backend1_name, False)
+ # Volume/Type creation (uses capabilities:volume_backend_name)
+ cls._create_type_and_volume(cls.backend1_name, True)
if cls.backend1_name != cls.backend2_name:
# Volume/Type creation (uses backend2_name)
- type2_name = data_utils.rand_name('Type-')
- vol2_name = data_utils.rand_name('Volume-')
- extra_specs2 = {"volume_backend_name": cls.backend2_name}
- resp, cls.type2 = cls.client.create_volume_type(
- type2_name, extra_specs=extra_specs2)
- cls.volume_type_id_list.append(cls.type2['id'])
+ cls._create_type_and_volume(cls.backend2_name, False)
+ # Volume/Type creation (uses capabilities:volume_backend_name)
+ cls._create_type_and_volume(cls.backend2_name, True)
- resp, cls.volume2 = cls.volume_client.create_volume(
- size=1, display_name=vol2_name, volume_type=type2_name)
- cls.volume_id_list.append(cls.volume2['id'])
- cls.volume_client.wait_for_volume_status(cls.volume2['id'],
- 'available')
+ @classmethod
+ def _create_type_and_volume(self, backend_name_key, with_prefix):
+ # Volume/Type creation
+ type_name = data_utils.rand_name('Type')
+ vol_name = data_utils.rand_name('Volume')
+ spec_key_with_prefix = "capabilities:volume_backend_name"
+ spec_key_without_prefix = "volume_backend_name"
+ if with_prefix:
+ extra_specs = {spec_key_with_prefix: backend_name_key}
+ else:
+ extra_specs = {spec_key_without_prefix: backend_name_key}
+ resp, self.type = self.client.create_volume_type(
+ type_name, extra_specs=extra_specs)
+ self.volume_type_id_list.append(self.type['id'])
+
+ resp, self.volume = self.volume_client.create_volume(
+ size=1, display_name=vol_name, volume_type=type_name)
+ self.volume_client.wait_for_volume_status(
+ self.volume['id'], 'available')
+ if with_prefix:
+ self.volume_id_list_with_prefix.append(self.volume['id'])
+ else:
+ self.volume_id_list_without_prefix.append(
+ self.volume['id'])
@classmethod
def tearDownClass(cls):
# volumes deletion
- volume_id_list = getattr(cls, 'volume_id_list', [])
- for volume_id in volume_id_list:
+ vid_prefix = getattr(cls, 'volume_id_list_with_prefix', [])
+ for volume_id in vid_prefix:
+ cls.volume_client.delete_volume(volume_id)
+ cls.volume_client.wait_for_resource_deletion(volume_id)
+
+ vid_no_pre = getattr(cls, 'volume_id_list_without_prefix', [])
+ for volume_id in vid_no_pre:
cls.volume_client.delete_volume(volume_id)
cls.volume_client.wait_for_resource_deletion(volume_id)
@@ -84,32 +97,57 @@
@test.attr(type='smoke')
def test_backend_name_reporting(self):
+ # get volume id which created by type without prefix
+ volume_id = self.volume_id_list_without_prefix[0]
+ self._test_backend_name_reporting_by_volume_id(volume_id)
+
+ @test.attr(type='smoke')
+ def test_backend_name_reporting_with_prefix(self):
+ # get volume id which created by type with prefix
+ volume_id = self.volume_id_list_with_prefix[0]
+ self._test_backend_name_reporting_by_volume_id(volume_id)
+
+ @test.attr(type='gate')
+ def test_backend_name_distinction(self):
+ if self.backend1_name == self.backend2_name:
+ raise self.skipException("backends configured with same name")
+ # get volume id which created by type without prefix
+ volume1_id = self.volume_id_list_without_prefix[0]
+ volume2_id = self.volume_id_list_without_prefix[1]
+ self._test_backend_name_distinction(volume1_id, volume2_id)
+
+ @test.attr(type='gate')
+ def test_backend_name_distinction_with_prefix(self):
+ if self.backend1_name == self.backend2_name:
+ raise self.skipException("backends configured with same name")
+ # get volume id which created by type without prefix
+ volume1_id = self.volume_id_list_with_prefix[0]
+ volume2_id = self.volume_id_list_with_prefix[1]
+ self._test_backend_name_distinction(volume1_id, volume2_id)
+
+ def _test_backend_name_reporting_by_volume_id(self, volume_id):
# this test checks if os-vol-attr:host is populated correctly after
# the multi backend feature has been enabled
# if multi-backend is enabled: os-vol-attr:host should be like:
# host@backend_name
- resp, volume = self.volume_client.get_volume(self.volume1['id'])
+ resp, volume = self.volume_client.get_volume(volume_id)
self.assertEqual(200, resp.status)
volume1_host = volume['os-vol-host-attr:host']
msg = ("multi-backend reporting incorrect values for volume %s" %
- self.volume1['id'])
+ volume_id)
self.assertTrue(len(volume1_host.split("@")) > 1, msg)
- @test.attr(type='gate')
- def test_backend_name_distinction(self):
+ def _test_backend_name_distinction(self, volume1_id, volume2_id):
# this test checks that the two volumes created at setUp don't
# belong to the same backend (if they are, than the
# volume backend distinction is not working properly)
- if self.backend1_name == self.backend2_name:
- raise self.skipException("backends configured with same name")
-
- resp, volume = self.volume_client.get_volume(self.volume1['id'])
+ resp, volume = self.volume_client.get_volume(volume1_id)
volume1_host = volume['os-vol-host-attr:host']
- resp, volume = self.volume_client.get_volume(self.volume2['id'])
+ resp, volume = self.volume_client.get_volume(volume2_id)
volume2_host = volume['os-vol-host-attr:host']
msg = ("volumes %s and %s were created in the same backend" %
- (self.volume1['id'], self.volume2['id']))
+ (volume1_id, volume2_id))
self.assertNotEqual(volume1_host, volume2_host, msg)
diff --git a/tempest/api/volume/base.py b/tempest/api/volume/base.py
index c5be1f3..abf3c6b 100644
--- a/tempest/api/volume/base.py
+++ b/tempest/api/volume/base.py
@@ -61,12 +61,20 @@
cls.volumes_extension_client = cls.os.volumes_extension_client
cls.availability_zone_client = (
cls.os.volume_availability_zone_client)
+ # Special fields and resp code for cinder v1
+ cls.special_fields = {'name_field': 'display_name',
+ 'descrip_field': 'display_description',
+ 'create_resp': 200}
elif cls._api_version == 2:
if not CONF.volume_feature_enabled.api_v2:
msg = "Volume API v2 is disabled"
raise cls.skipException(msg)
cls.volumes_client = cls.os.volumes_v2_client
+ # Special fields and resp code for cinder v2
+ cls.special_fields = {'name_field': 'name',
+ 'descrip_field': 'description',
+ 'create_resp': 202}
else:
msg = ("Invalid Cinder API version (%s)" % cls._api_version)
@@ -82,15 +90,15 @@
@classmethod
def create_volume(cls, size=1, **kwargs):
"""Wrapper utility that returns a test volume."""
- vol_name = data_utils.rand_name('Volume')
- if cls._api_version == 1:
- resp, volume = cls.volumes_client.create_volume(
- size, display_name=vol_name, **kwargs)
- assert 200 == resp.status
- elif cls._api_version == 2:
- resp, volume = cls.volumes_client.create_volume(
- size, name=vol_name, **kwargs)
- assert 202 == resp.status
+ name = data_utils.rand_name('Volume')
+
+ name_field = cls.special_fields['name_field']
+ expect_status = cls.special_fields['create_resp']
+
+ kwargs[name_field] = name
+ resp, volume = cls.volumes_client.create_volume(size, **kwargs)
+ assert expect_status == resp.status
+
cls.volumes.append(volume)
cls.volumes_client.wait_for_volume_status(volume['id'], 'available')
return volume
diff --git a/tempest/api/volume/test_volume_metadata.py b/tempest/api/volume/test_volume_metadata.py
index 0d57d47..0505f19 100644
--- a/tempest/api/volume/test_volume_metadata.py
+++ b/tempest/api/volume/test_volume_metadata.py
@@ -19,13 +19,12 @@
from tempest import test
-class VolumeMetadataTest(base.BaseVolumeV1Test):
- _interface = "json"
+class VolumesV2MetadataTest(base.BaseVolumeTest):
@classmethod
@test.safe_setup
def setUpClass(cls):
- super(VolumeMetadataTest, cls).setUpClass()
+ super(VolumesV2MetadataTest, cls).setUpClass()
# Create a volume
cls.volume = cls.create_volume()
cls.volume_id = cls.volume['id']
@@ -33,7 +32,7 @@
def tearDown(self):
# Update the metadata to {}
self.volumes_client.update_volume_metadata(self.volume_id, {})
- super(VolumeMetadataTest, self).tearDown()
+ super(VolumesV2MetadataTest, self).tearDown()
@test.attr(type='gate')
def test_create_get_delete_volume_metadata(self):
@@ -117,5 +116,13 @@
self.assertThat(body.items(), matchers.ContainsAll(expect.items()))
-class VolumeMetadataTestXML(VolumeMetadataTest):
+class VolumesV2MetadataTestXML(VolumesV2MetadataTest):
+ _interface = "xml"
+
+
+class VolumesV1MetadataTest(VolumesV2MetadataTest):
+ _api_version = 1
+
+
+class VolumesV1MetadataTestXML(VolumesV1MetadataTest):
_interface = "xml"
diff --git a/tempest/api/volume/test_volume_transfers.py b/tempest/api/volume/test_volume_transfers.py
index 82d1364..bf61222 100644
--- a/tempest/api/volume/test_volume_transfers.py
+++ b/tempest/api/volume/test_volume_transfers.py
@@ -23,12 +23,11 @@
CONF = config.CONF
-class VolumesTransfersTest(base.BaseVolumeV1Test):
- _interface = "json"
+class VolumesV2TransfersTest(base.BaseVolumeTest):
@classmethod
def setUpClass(cls):
- super(VolumesTransfersTest, cls).setUpClass()
+ super(VolumesV2TransfersTest, cls).setUpClass()
# Add another tenant to test volume-transfer
if CONF.compute.allow_tenant_isolation:
@@ -110,5 +109,13 @@
self.client.wait_for_volume_status(volume['id'], 'available')
-class VolumesTransfersTestXML(VolumesTransfersTest):
+class VolumesV2TransfersTestXML(VolumesV2TransfersTest):
+ _interface = "xml"
+
+
+class VolumesV1TransfersTest(VolumesV2TransfersTest):
+ _api_version = 1
+
+
+class VolumesV1TransfersTestXML(VolumesV1TransfersTest):
_interface = "xml"
diff --git a/tempest/api/volume/test_volumes_actions.py b/tempest/api/volume/test_volumes_actions.py
index cfab0bd..6fef564 100644
--- a/tempest/api/volume/test_volumes_actions.py
+++ b/tempest/api/volume/test_volumes_actions.py
@@ -21,13 +21,12 @@
CONF = config.CONF
-class VolumesActionsTest(base.BaseVolumeV1Test):
- _interface = "json"
+class VolumesV2ActionsTest(base.BaseVolumeTest):
@classmethod
@test.safe_setup
def setUpClass(cls):
- super(VolumesActionsTest, cls).setUpClass()
+ super(VolumesV2ActionsTest, cls).setUpClass()
cls.client = cls.volumes_client
cls.image_client = cls.os.image_client
@@ -47,7 +46,7 @@
cls.servers_client.delete_server(cls.server['id'])
cls.servers_client.wait_for_server_termination(cls.server['id'])
- super(VolumesActionsTest, cls).tearDownClass()
+ super(VolumesV2ActionsTest, cls).tearDownClass()
@test.stresstest(class_setup_per='process')
@test.attr(type='smoke')
@@ -165,5 +164,13 @@
self.assertEqual(False, bool_flag)
-class VolumesActionsTestXML(VolumesActionsTest):
+class VolumesV2ActionsTestXML(VolumesV2ActionsTest):
+ _interface = "xml"
+
+
+class VolumesV1ActionsTest(VolumesV2ActionsTest):
+ _api_version = 1
+
+
+class VolumesV1ActionsTestXML(VolumesV1ActionsTest):
_interface = "xml"
diff --git a/tempest/api/volume/test_volumes_get.py b/tempest/api/volume/test_volumes_get.py
index 2745b95..82208aa 100644
--- a/tempest/api/volume/test_volumes_get.py
+++ b/tempest/api/volume/test_volumes_get.py
@@ -23,14 +23,17 @@
CONF = config.CONF
-class VolumesGetTest(base.BaseVolumeV1Test):
- _interface = "json"
+class VolumesV2GetTest(base.BaseVolumeTest):
@classmethod
def setUpClass(cls):
- super(VolumesGetTest, cls).setUpClass()
+ super(VolumesV2GetTest, cls).setUpClass()
cls.client = cls.volumes_client
+ cls.name_field = cls.special_fields['name_field']
+ cls.descrip_field = cls.special_fields['descrip_field']
+ cls.create_resp = cls.special_fields['create_resp']
+
def _delete_volume(self, volume_id):
resp, _ = self.client.delete_volume(volume_id)
self.assertEqual(202, resp.status)
@@ -51,24 +54,24 @@
v_name = data_utils.rand_name('Volume')
metadata = {'Type': 'Test'}
# Create a volume
- resp, volume = self.client.create_volume(display_name=v_name,
- metadata=metadata,
- **kwargs)
- self.assertEqual(200, resp.status)
+ kwargs[self.name_field] = v_name
+ kwargs['metadata'] = metadata
+ resp, volume = self.client.create_volume(**kwargs)
+ self.assertEqual(self.create_resp, resp.status)
self.assertIn('id', volume)
self.addCleanup(self._delete_volume, volume['id'])
- self.assertIn('display_name', volume)
- self.assertEqual(volume['display_name'], v_name,
+ self.client.wait_for_volume_status(volume['id'], 'available')
+ self.assertIn(self.name_field, volume)
+ self.assertEqual(volume[self.name_field], v_name,
"The created volume name is not equal "
"to the requested name")
self.assertTrue(volume['id'] is not None,
"Field volume id is empty or not found.")
- self.client.wait_for_volume_status(volume['id'], 'available')
# Get Volume information
resp, fetched_volume = self.client.get_volume(volume['id'])
self.assertEqual(200, resp.status)
self.assertEqual(v_name,
- fetched_volume['display_name'],
+ fetched_volume[self.name_field],
'The fetched Volume name is different '
'from the created Volume')
self.assertEqual(volume['id'],
@@ -90,27 +93,25 @@
# Update Volume
# Test volume update when display_name is same with original value
- resp, update_volume = \
- self.client.update_volume(volume['id'],
- display_name=v_name)
+ params = {self.name_field: v_name}
+ resp, update_volume = self.client.update_volume(volume['id'], **params)
self.assertEqual(200, resp.status)
# Test volume update when display_name is new
new_v_name = data_utils.rand_name('new-Volume')
new_desc = 'This is the new description of volume'
- resp, update_volume = \
- self.client.update_volume(volume['id'],
- display_name=new_v_name,
- display_description=new_desc)
+ params = {self.name_field: new_v_name,
+ self.descrip_field: new_desc}
+ resp, update_volume = self.client.update_volume(volume['id'], **params)
# Assert response body for update_volume method
self.assertEqual(200, resp.status)
- self.assertEqual(new_v_name, update_volume['display_name'])
- self.assertEqual(new_desc, update_volume['display_description'])
+ self.assertEqual(new_v_name, update_volume[self.name_field])
+ self.assertEqual(new_desc, update_volume[self.descrip_field])
# Assert response body for get_volume method
resp, updated_volume = self.client.get_volume(volume['id'])
self.assertEqual(200, resp.status)
self.assertEqual(volume['id'], updated_volume['id'])
- self.assertEqual(new_v_name, updated_volume['display_name'])
- self.assertEqual(new_desc, updated_volume['display_description'])
+ self.assertEqual(new_v_name, updated_volume[self.name_field])
+ self.assertEqual(new_desc, updated_volume[self.descrip_field])
self.assertThat(updated_volume['metadata'].items(),
matchers.ContainsAll(metadata.items()),
'The fetched Volume metadata misses data '
@@ -120,20 +121,18 @@
# then test volume update if display_name is duplicated
new_volume = {}
new_v_desc = data_utils.rand_name('@#$%^* description')
- resp, new_volume = \
- self.client.create_volume(
- size=1,
- display_description=new_v_desc,
- availability_zone=volume['availability_zone'])
- self.assertEqual(200, resp.status)
+ params = {self.descrip_field: new_v_desc,
+ 'availability_zone': volume['availability_zone']}
+ resp, new_volume = self.client.create_volume(size=1, **params)
+ self.assertEqual(self.create_resp, resp.status)
self.assertIn('id', new_volume)
self.addCleanup(self._delete_volume, new_volume['id'])
self.client.wait_for_volume_status(new_volume['id'], 'available')
- resp, update_volume = \
- self.client.update_volume(
- new_volume['id'],
- display_name=volume['display_name'],
- display_description=volume['display_description'])
+
+ params = {self.name_field: volume[self.name_field],
+ self.descrip_field: volume[self.descrip_field]}
+ resp, update_volume = self.client.update_volume(new_volume['id'],
+ **params)
self.assertEqual(200, resp.status)
# NOTE(jdg): Revert back to strict true/false checking
@@ -159,5 +158,13 @@
self._volume_create_get_update_delete(source_volid=origin['id'])
-class VolumesGetTestXML(VolumesGetTest):
+class VolumesV2GetTestXML(VolumesV2GetTest):
+ _interface = "xml"
+
+
+class VolumesV1GetTest(VolumesV2GetTest):
+ _api_version = 1
+
+
+class VolumesV1GetTestXML(VolumesV1GetTest):
_interface = "xml"
diff --git a/tempest/api/volume/test_volumes_negative.py b/tempest/api/volume/test_volumes_negative.py
index bc5b1dc..8bd4c88 100644
--- a/tempest/api/volume/test_volumes_negative.py
+++ b/tempest/api/volume/test_volumes_negative.py
@@ -21,15 +21,16 @@
from tempest import test
-class VolumesNegativeTest(base.BaseVolumeV1Test):
- _interface = 'json'
+class VolumesV2NegativeTest(base.BaseVolumeTest):
@classmethod
@test.safe_setup
def setUpClass(cls):
- super(VolumesNegativeTest, cls).setUpClass()
+ super(VolumesV2NegativeTest, cls).setUpClass()
cls.client = cls.volumes_client
+ cls.name_field = cls.special_fields['name_field']
+
# Create a test shared instance and volume for attach/detach tests
cls.volume = cls.create_volume()
cls.mountpoint = "/dev/vdc"
@@ -237,7 +238,7 @@
@test.attr(type=['negative', 'gate'])
def test_list_volumes_with_nonexistent_name(self):
v_name = data_utils.rand_name('Volume-')
- params = {'display_name': v_name}
+ params = {self.name_field: v_name}
resp, fetched_volume = self.client.list_volumes(params)
self.assertEqual(200, resp.status)
self.assertEqual(0, len(fetched_volume))
@@ -245,7 +246,7 @@
@test.attr(type=['negative', 'gate'])
def test_list_volumes_detail_with_nonexistent_name(self):
v_name = data_utils.rand_name('Volume-')
- params = {'display_name': v_name}
+ params = {self.name_field: v_name}
resp, fetched_volume = self.client.list_volumes_with_detail(params)
self.assertEqual(200, resp.status)
self.assertEqual(0, len(fetched_volume))
@@ -265,5 +266,14 @@
self.assertEqual(0, len(fetched_volume))
-class VolumesNegativeTestXML(VolumesNegativeTest):
+class VolumesV2NegativeTestXML(VolumesV2NegativeTest):
+ _interface = 'xml'
+
+
+class VolumesV1NegativeTest(VolumesV2NegativeTest):
+ _api_version = 1
+ _name = 'display_name'
+
+
+class VolumesV1NegativeTestXML(VolumesV1NegativeTest):
_interface = 'xml'
diff --git a/tempest/api_schema/compute/v2/servers.py b/tempest/api_schema/compute/v2/servers.py
index e4d95f7..4abadf4 100644
--- a/tempest/api_schema/compute/v2/servers.py
+++ b/tempest/api_schema/compute/v2/servers.py
@@ -284,3 +284,14 @@
'properties'].update({'adminPass': {'type': 'string'}})
rebuild_server_with_admin_pass['response_body']['properties']['server'][
'required'].append('adminPass')
+
+rescue_server = {
+ 'status_code': [200],
+ 'response_body': {
+ 'type': 'object',
+ 'properties': {
+ 'adminPass': {'type': 'string'}
+ },
+ 'required': ['adminPass']
+ }
+}
diff --git a/tempest/api_schema/compute/v3/servers.py b/tempest/api_schema/compute/v3/servers.py
index 742252d..a84ac3c 100644
--- a/tempest/api_schema/compute/v3/servers.py
+++ b/tempest/api_schema/compute/v3/servers.py
@@ -195,3 +195,18 @@
'properties'].update({'admin_password': {'type': 'string'}})
rebuild_server_with_admin_pass['response_body']['properties']['server'][
'required'].append('admin_password')
+
+rescue_server_with_admin_pass = {
+ 'status_code': [202],
+ 'response_body': {
+ 'type': 'object',
+ 'properties': {
+ 'admin_password': {'type': 'string'}
+ },
+ 'required': ['admin_password']
+ }
+}
+
+rescue_server = copy.deepcopy(rescue_server_with_admin_pass)
+del rescue_server['response_body']['properties']
+del rescue_server['response_body']['required']
diff --git a/tempest/auth.py b/tempest/auth.py
index 830dca9..5df6224 100644
--- a/tempest/auth.py
+++ b/tempest/auth.py
@@ -13,10 +13,12 @@
# License for the specific language governing permissions and limitations
# under the License.
+import abc
import copy
import datetime
import exceptions
import re
+import six
import urlparse
from tempest import config
@@ -31,6 +33,7 @@
LOG = logging.getLogger(__name__)
+@six.add_metaclass(abc.ABCMeta)
class AuthProvider(object):
"""
Provide authentication
@@ -70,18 +73,21 @@
interface=self.interface, cache=self.cache
)
+ @abc.abstractmethod
def _decorate_request(self, filters, method, url, headers=None, body=None,
auth_data=None):
"""
Decorate request with authentication data
"""
- raise NotImplementedError
+ return
+ @abc.abstractmethod
def _get_auth(self):
- raise NotImplementedError
+ return
+ @abc.abstractmethod
def _fill_credentials(self, auth_data_body):
- raise NotImplementedError
+ return
def fill_credentials(self):
"""
@@ -130,8 +136,9 @@
self.cache = None
self.credentials.reset()
+ @abc.abstractmethod
def is_expired(self, auth_data):
- raise NotImplementedError
+ return
def auth_request(self, method, url, headers=None, body=None, filters=None):
"""
@@ -188,11 +195,12 @@
self.alt_part = request_part
self.alt_auth_data = auth_data
+ @abc.abstractmethod
def base_url(self, filters, auth_data=None):
"""
Extracts the base_url based on provided filters
"""
- raise NotImplementedError
+ return
class KeystoneAuthProvider(AuthProvider):
@@ -225,11 +233,13 @@
# no change to method or body
return str(_url), _headers, body
+ @abc.abstractmethod
def _auth_client(self):
- raise NotImplementedError
+ return
+ @abc.abstractmethod
def _auth_params(self):
- raise NotImplementedError
+ return
def _get_auth(self):
# Bypasses the cache
@@ -321,7 +331,7 @@
if noversion_path != "":
path += "/" + noversion_path
_base_url = _base_url.replace(parts.path, path)
- if filters.get('skip_path', None) is not None:
+ if filters.get('skip_path', None) is not None and parts.path != '':
_base_url = _base_url.replace(parts.path, "/")
return _base_url
diff --git a/tempest/cli/README.rst b/tempest/cli/README.rst
index dcd940b..bc18084 100644
--- a/tempest/cli/README.rst
+++ b/tempest/cli/README.rst
@@ -1,3 +1,5 @@
+.. _cli_field_guide:
+
Tempest Field Guide to CLI tests
================================
diff --git a/tempest/cli/__init__.py b/tempest/cli/__init__.py
index ba94c82..02f8c05 100644
--- a/tempest/cli/__init__.py
+++ b/tempest/cli/__init__.py
@@ -19,6 +19,7 @@
import tempest.cli.output_parser
from tempest import config
+from tempest import exceptions
from tempest.openstack.common import log as logging
import tempest.test
@@ -130,10 +131,10 @@
cmd, stdout=stdout, stderr=stderr)
result, result_err = proc.communicate()
if not fail_ok and proc.returncode != 0:
- raise CommandFailed(proc.returncode,
- cmd,
- result,
- stderr=result_err)
+ raise exceptions.CommandFailed(proc.returncode,
+ cmd,
+ result,
+ result_err)
return result
def assertTableStruct(self, items, field_names):
@@ -146,11 +147,3 @@
self.assertTrue(lines[0].startswith(beginning),
msg=('Beginning of first line has invalid content: %s'
% lines[:3]))
-
-
-class CommandFailed(subprocess.CalledProcessError):
- # adds output attribute for python2.6
- def __init__(self, returncode, cmd, output, stderr=""):
- super(CommandFailed, self).__init__(returncode, cmd)
- self.output = output
- self.stderr = stderr
diff --git a/tempest/cli/simple_read_only/test_cinder.py b/tempest/cli/simple_read_only/test_cinder.py
index 946b89e..04971c1 100644
--- a/tempest/cli/simple_read_only/test_cinder.py
+++ b/tempest/cli/simple_read_only/test_cinder.py
@@ -15,17 +15,17 @@
import logging
import re
-import subprocess
import testtools
-import tempest.cli
+from tempest import cli
from tempest import config
+from tempest import exceptions
CONF = config.CONF
LOG = logging.getLogger(__name__)
-class SimpleReadOnlyCinderClientTest(tempest.cli.ClientTestBase):
+class SimpleReadOnlyCinderClientTest(cli.ClientTestBase):
"""Basic, read-only tests for Cinder CLI client.
Checks return values and output of read-only commands.
@@ -41,7 +41,7 @@
super(SimpleReadOnlyCinderClientTest, cls).setUpClass()
def test_cinder_fake_action(self):
- self.assertRaises(subprocess.CalledProcessError,
+ self.assertRaises(exceptions.CommandFailed,
self.cinder,
'this-does-not-exist')
@@ -66,7 +66,7 @@
'Attached to'])
self.cinder('list', params='--all-tenants 1')
self.cinder('list', params='--all-tenants 0')
- self.assertRaises(subprocess.CalledProcessError,
+ self.assertRaises(exceptions.CommandFailed,
self.cinder,
'list',
params='--all-tenants bad')
diff --git a/tempest/cli/simple_read_only/test_glance.py b/tempest/cli/simple_read_only/test_glance.py
index 9869483..90cdc55 100644
--- a/tempest/cli/simple_read_only/test_glance.py
+++ b/tempest/cli/simple_read_only/test_glance.py
@@ -14,10 +14,10 @@
# under the License.
import re
-import subprocess
-import tempest.cli
+from tempest import cli
from tempest import config
+from tempest import exceptions
from tempest.openstack.common import log as logging
CONF = config.CONF
@@ -25,7 +25,7 @@
LOG = logging.getLogger(__name__)
-class SimpleReadOnlyGlanceClientTest(tempest.cli.ClientTestBase):
+class SimpleReadOnlyGlanceClientTest(cli.ClientTestBase):
"""Basic, read-only tests for Glance CLI client.
Checks return values and output of read-only commands.
@@ -41,7 +41,7 @@
super(SimpleReadOnlyGlanceClientTest, cls).setUpClass()
def test_glance_fake_action(self):
- self.assertRaises(subprocess.CalledProcessError,
+ self.assertRaises(exceptions.CommandFailed,
self.glance,
'this-does-not-exist')
@@ -76,7 +76,7 @@
commands = set(commands)
wanted_commands = set(('image-create', 'image-delete', 'help',
'image-download', 'image-show', 'image-update',
- 'member-add', 'member-create', 'member-delete',
+ 'member-create', 'member-delete',
'member-list'))
self.assertFalse(wanted_commands - commands)
diff --git a/tempest/cli/simple_read_only/test_keystone.py b/tempest/cli/simple_read_only/test_keystone.py
index dda65c1..9218fcd 100644
--- a/tempest/cli/simple_read_only/test_keystone.py
+++ b/tempest/cli/simple_read_only/test_keystone.py
@@ -14,10 +14,10 @@
# under the License.
import re
-import subprocess
-import tempest.cli
+from tempest import cli
from tempest import config
+from tempest import exceptions
from tempest.openstack.common import log as logging
CONF = config.CONF
@@ -26,7 +26,7 @@
LOG = logging.getLogger(__name__)
-class SimpleReadOnlyKeystoneClientTest(tempest.cli.ClientTestBase):
+class SimpleReadOnlyKeystoneClientTest(cli.ClientTestBase):
"""Basic, read-only tests for Keystone CLI client.
Checks return values and output of read-only commands.
@@ -35,7 +35,7 @@
"""
def test_admin_fake_action(self):
- self.assertRaises(subprocess.CalledProcessError,
+ self.assertRaises(exceptions.CommandFailed,
self.keystone,
'this-does-not-exist')
diff --git a/tempest/cli/simple_read_only/test_neutron.py b/tempest/cli/simple_read_only/test_neutron.py
index 49d079e..87f6b67 100644
--- a/tempest/cli/simple_read_only/test_neutron.py
+++ b/tempest/cli/simple_read_only/test_neutron.py
@@ -14,10 +14,10 @@
# under the License.
import re
-import subprocess
from tempest import cli
from tempest import config
+from tempest import exceptions
from tempest.openstack.common import log as logging
from tempest import test
@@ -43,7 +43,7 @@
@test.attr(type='smoke')
def test_neutron_fake_action(self):
- self.assertRaises(subprocess.CalledProcessError,
+ self.assertRaises(exceptions.CommandFailed,
self.neutron,
'this-does-not-exist')
@@ -89,7 +89,7 @@
def _test_neutron_lbaas_command(self, command):
try:
self.neutron(command)
- except cli.CommandFailed as e:
+ except exceptions.CommandFailed as e:
if '404 Not Found' not in e.stderr:
self.fail('%s: Unexpected failure.' % command)
diff --git a/tempest/cli/simple_read_only/test_nova.py b/tempest/cli/simple_read_only/test_nova.py
index 1c1ddf1..7085cc9 100644
--- a/tempest/cli/simple_read_only/test_nova.py
+++ b/tempest/cli/simple_read_only/test_nova.py
@@ -13,12 +13,11 @@
# License for the specific language governing permissions and limitations
# under the License.
-import subprocess
-
import testtools
-import tempest.cli
+from tempest import cli
from tempest import config
+from tempest import exceptions
from tempest.openstack.common import log as logging
import tempest.test
@@ -27,7 +26,7 @@
LOG = logging.getLogger(__name__)
-class SimpleReadOnlyNovaClientTest(tempest.cli.ClientTestBase):
+class SimpleReadOnlyNovaClientTest(cli.ClientTestBase):
"""
This is a first pass at a simple read only python-novaclient test. This
@@ -49,7 +48,7 @@
super(SimpleReadOnlyNovaClientTest, cls).setUpClass()
def test_admin_fake_action(self):
- self.assertRaises(subprocess.CalledProcessError,
+ self.assertRaises(exceptions.CommandFailed,
self.nova,
'this-does-nova-exist')
@@ -86,11 +85,11 @@
self.nova('endpoints')
def test_admin_flavor_acces_list(self):
- self.assertRaises(subprocess.CalledProcessError,
+ self.assertRaises(exceptions.CommandFailed,
self.nova,
'flavor-access-list')
# Failed to get access list for public flavor type
- self.assertRaises(subprocess.CalledProcessError,
+ self.assertRaises(exceptions.CommandFailed,
self.nova,
'flavor-access-list',
params='--flavor m1.tiny')
@@ -127,7 +126,7 @@
self.nova('list')
self.nova('list', params='--all-tenants 1')
self.nova('list', params='--all-tenants 0')
- self.assertRaises(subprocess.CalledProcessError,
+ self.assertRaises(exceptions.CommandFailed,
self.nova,
'list',
params='--all-tenants bad')
diff --git a/tempest/cli/simple_read_only/test_nova_manage.py b/tempest/cli/simple_read_only/test_nova_manage.py
index f1fee2e..dae0cf8 100644
--- a/tempest/cli/simple_read_only/test_nova_manage.py
+++ b/tempest/cli/simple_read_only/test_nova_manage.py
@@ -13,10 +13,9 @@
# License for the specific language governing permissions and limitations
# under the License.
-import subprocess
-
-import tempest.cli
+from tempest import cli
from tempest import config
+from tempest import exceptions
from tempest.openstack.common import log as logging
@@ -24,7 +23,7 @@
LOG = logging.getLogger(__name__)
-class SimpleReadOnlyNovaManageTest(tempest.cli.ClientTestBase):
+class SimpleReadOnlyNovaManageTest(cli.ClientTestBase):
"""
This is a first pass at a simple read only nova-manage test. This
@@ -48,7 +47,7 @@
super(SimpleReadOnlyNovaManageTest, cls).setUpClass()
def test_admin_fake_action(self):
- self.assertRaises(subprocess.CalledProcessError,
+ self.assertRaises(exceptions.CommandFailed,
self.nova_manage,
'this-does-nova-exist')
diff --git a/tempest/cli/simple_read_only/test_sahara.py b/tempest/cli/simple_read_only/test_sahara.py
index f00dcae..2c6e0e2 100644
--- a/tempest/cli/simple_read_only/test_sahara.py
+++ b/tempest/cli/simple_read_only/test_sahara.py
@@ -14,10 +14,10 @@
# limitations under the License.
import logging
import re
-import subprocess
from tempest import cli
from tempest import config
+from tempest import exceptions
from tempest import test
CONF = config.CONF
@@ -42,7 +42,7 @@
@test.attr(type='negative')
def test_sahara_fake_action(self):
- self.assertRaises(subprocess.CalledProcessError,
+ self.assertRaises(exceptions.CommandFailed,
self.sahara,
'this-does-not-exist')
diff --git a/tempest/cli/simple_read_only/test_swift.py b/tempest/cli/simple_read_only/test_swift.py
index 6d6caa7..069a384 100644
--- a/tempest/cli/simple_read_only/test_swift.py
+++ b/tempest/cli/simple_read_only/test_swift.py
@@ -14,15 +14,15 @@
# under the License.
import re
-import subprocess
-import tempest.cli
+from tempest import cli
from tempest import config
+from tempest import exceptions
CONF = config.CONF
-class SimpleReadOnlySwiftClientTest(tempest.cli.ClientTestBase):
+class SimpleReadOnlySwiftClientTest(cli.ClientTestBase):
"""Basic, read-only tests for Swift CLI client.
Checks return values and output of read-only commands.
@@ -38,7 +38,7 @@
super(SimpleReadOnlySwiftClientTest, cls).setUpClass()
def test_swift_fake_action(self):
- self.assertRaises(subprocess.CalledProcessError,
+ self.assertRaises(exceptions.CommandFailed,
self.swift,
'this-does-not-exist')
diff --git a/tempest/clients.py b/tempest/clients.py
index 4e2205e..519e686 100644
--- a/tempest/clients.py
+++ b/tempest/clients.py
@@ -52,6 +52,8 @@
MigrationsClientJSON
from tempest.services.compute.json.quotas_client import QuotaClassesClientJSON
from tempest.services.compute.json.quotas_client import QuotasClientJSON
+from tempest.services.compute.json.security_group_default_rules_client import \
+ SecurityGroupDefaultRulesClientJSON
from tempest.services.compute.json.security_groups_client import \
SecurityGroupsClientJSON
from tempest.services.compute.json.servers_client import ServersClientJSON
@@ -406,6 +408,8 @@
self.data_processing_client = DataProcessingClient(
self.auth_provider)
self.migrations_client = MigrationsClientJSON(self.auth_provider)
+ self.security_group_default_rules_client = (
+ SecurityGroupDefaultRulesClientJSON(self.auth_provider))
class AltManager(Manager):
diff --git a/tempest/cmd/javelin.py b/tempest/cmd/javelin.py
index 0b72b1c..67b92b0 100755
--- a/tempest/cmd/javelin.py
+++ b/tempest/cmd/javelin.py
@@ -28,6 +28,7 @@
import argparse
import tempest.auth
+from tempest import config
from tempest import exceptions
from tempest.services.compute.json import flavors_client
from tempest.services.compute.json import servers_client
@@ -218,6 +219,8 @@
def check_objects(self):
"""Check that the objects created are still there."""
+ if not self.res.get('objects'):
+ return
LOG.info("checking objects")
for obj in self.res['objects']:
client = client_for_user(obj['owner'])
@@ -228,6 +231,8 @@
def check_servers(self):
"""Check that the servers are still up and running."""
+ if not self.res.get('servers'):
+ return
LOG.info("checking servers")
for server in self.res['servers']:
client = client_for_user(server['owner'])
@@ -239,12 +244,18 @@
r, found = client.servers.get_server(found['id'])
# get the ipv4 address
addr = found['addresses']['private'][0]['addr']
- self.assertEqual(os.system("ping -c 1 " + addr), 0,
- "Server %s is not pingable at %s" % (
- server['name'], addr))
+ for count in range(60):
+ return_code = os.system("ping -c1 " + addr)
+ if return_code is 0:
+ break
+ self.assertNotEqual(count, 59,
+ "Server %s is not pingable at %s" % (
+ server['name'], addr))
def check_volumes(self):
"""Check that the volumes are still there and attached."""
+ if not self.res.get('volumes'):
+ return
LOG.info("checking volumes")
for volume in self.res['volumes']:
client = client_for_user(volume['owner'])
@@ -273,6 +284,8 @@
def create_objects(objects):
+ if not objects:
+ return
LOG.info("Creating objects")
for obj in objects:
LOG.debug("Object %s" % obj)
@@ -297,6 +310,9 @@
def create_images(images):
+ if not images:
+ return
+ LOG.info("Creating images")
for image in images:
client = client_for_user(image['owner'])
@@ -304,6 +320,7 @@
r, body = client.images.image_list()
names = [x['name'] for x in body]
if image['name'] in names:
+ LOG.info("Image '%s' already exists" % image['name'])
continue
# special handling for 3 part image
@@ -359,15 +376,39 @@
def create_servers(servers):
+ if not servers:
+ return
+ LOG.info("Creating servers")
for server in servers:
client = client_for_user(server['owner'])
if _get_server_by_name(client, server['name']):
+ LOG.info("Server '%s' already exists" % server['name'])
continue
image_id = _get_image_by_name(client, server['image'])['id']
flavor_id = _get_flavor_by_name(client, server['flavor'])['id']
- client.servers.create_server(server['name'], image_id, flavor_id)
+ resp, body = client.servers.create_server(server['name'], image_id,
+ flavor_id)
+ server_id = body['id']
+ client.servers.wait_for_server_status(server_id, 'ACTIVE')
+
+
+def destroy_servers(servers):
+ if not servers:
+ return
+ LOG.info("Destroying servers")
+ for server in servers:
+ client = client_for_user(server['owner'])
+
+ response = _get_server_by_name(client, server['name'])
+ if not response:
+ LOG.info("Server '%s' does not exist" % server['name'])
+ continue
+
+ client.servers.delete_server(response['id'])
+ client.servers.wait_for_server_termination(response['id'],
+ ignore_error=True)
#######################
@@ -428,6 +469,23 @@
# attach_volumes(RES['volumes'])
+def destroy_resources():
+ LOG.info("Destroying Resources")
+ # Destroy in inverse order of create
+
+ # Future
+ # detach_volumes
+ # destroy_volumes
+
+ destroy_servers(RES['servers'])
+ LOG.warn("Destroy mode incomplete")
+ # destroy_images
+ # destroy_objects
+
+ # destroy_users
+ # destroy_tenants
+
+
def get_options():
global OPTS
parser = argparse.ArgumentParser(
@@ -440,11 +498,17 @@
required=True,
metavar='resourcefile.yaml',
help='Resources definition yaml file')
+
parser.add_argument(
'-d', '--devstack-base',
required=True,
metavar='/opt/stack/old',
help='Devstack base directory for retrieving artifacts')
+ parser.add_argument(
+ '-c', '--config-file',
+ metavar='/etc/tempest.conf',
+ help='path to javelin2(tempest) config file')
+
# auth bits, letting us also just source the devstack openrc
parser.add_argument('--os-username',
metavar='<auth-user-name>',
@@ -464,6 +528,8 @@
print("ERROR: Unknown mode -m %s\n" % OPTS.mode)
parser.print_help()
sys.exit(1)
+ if OPTS.config_file:
+ config.CONF.set_config_path(OPTS.config_file)
def setup_logging(debug=True):
@@ -491,15 +557,20 @@
if OPTS.mode == 'create':
create_resources()
+ # Make sure the resources we just created actually work
+ checker = JavelinCheck(USERS, RES)
+ checker.check()
elif OPTS.mode == 'check':
collect_users(RES['users'])
checker = JavelinCheck(USERS, RES)
checker.check()
elif OPTS.mode == 'destroy':
- LOG.warn("Destroy mode not yet implemented")
+ collect_users(RES['users'])
+ destroy_resources()
else:
LOG.error('Unknown mode %s' % OPTS.mode)
return 1
+ LOG.info('javelin2 successfully finished')
return 0
if __name__ == "__main__":
diff --git a/tempest/cmd/verify_tempest_config.py b/tempest/cmd/verify_tempest_config.py
index 0834cff..673da4f 100755
--- a/tempest/cmd/verify_tempest_config.py
+++ b/tempest/cmd/verify_tempest_config.py
@@ -29,13 +29,12 @@
CONF = config.CONF
RAW_HTTP = httplib2.Http()
-CONF_FILE = None
-OUTFILE = sys.stdout
+CONF_PARSER = None
def _get_config_file():
default_config_dir = os.path.join(os.path.abspath(
- os.path.dirname(os.path.dirname(__file__))), "etc")
+ os.path.dirname(os.path.dirname(os.path.dirname(__file__)))), "etc")
default_config_file = "tempest.conf"
conf_dir = os.environ.get('TEMPEST_CONFIG_DIR', default_config_dir)
@@ -46,14 +45,9 @@
def change_option(option, group, value):
- config_parse = moves.configparser.SafeConfigParser()
- config_parse.optionxform = str
- config_parse.readfp(CONF_FILE)
- if not config_parse.has_section(group):
- config_parse.add_section(group)
- config_parse.set(group, option, str(value))
- global OUTFILE
- config_parse.write(OUTFILE)
+ if not CONF_PARSER.has_section(group):
+ CONF_PARSER.add_section(group)
+ CONF_PARSER.set(group, option, str(value))
def print_and_or_update(option, group, value, update):
@@ -288,6 +282,9 @@
if update:
change_option(codename_match[cfgname],
'service_available', True)
+ # If we are going to enable this we should allow
+ # extension checks.
+ avail_services.append(codename_match[cfgname])
else:
avail_services.append(codename_match[cfgname])
return avail_services
@@ -321,12 +318,16 @@
opts = parse_args()
update = opts.update
replace = opts.replace_ext
- global CONF_FILE
- global OUTFILE
+ global CONF_PARSER
+
+ outfile = sys.stdout
if update:
- CONF_FILE = _get_config_file()
+ conf_file = _get_config_file()
if opts.output:
- OUTFILE = open(opts.output, 'w+')
+ outfile = open(opts.output, 'w+')
+ CONF_PARSER = moves.configparser.SafeConfigParser()
+ CONF_PARSER.optionxform = str
+ CONF_PARSER.readfp(conf_file)
os = clients.ComputeAdminManager(interface='json')
services = check_service_availability(os, update)
results = {}
@@ -341,9 +342,10 @@
verify_nova_api_versions(os, update)
verify_cinder_api_versions(os, update)
display_results(results, update, replace)
- if CONF_FILE:
- CONF_FILE.close()
- OUTFILE.close()
+ if update:
+ conf_file.close()
+ CONF_PARSER.write(outfile)
+ outfile.close()
if __name__ == "__main__":
diff --git a/tempest/common/cred_provider.py b/tempest/common/cred_provider.py
new file mode 100644
index 0000000..dc4f049
--- /dev/null
+++ b/tempest/common/cred_provider.py
@@ -0,0 +1,44 @@
+# (c) 2014 Deutsche Telekom AG
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import abc
+import six
+
+from tempest import config
+from tempest.openstack.common import log as logging
+
+CONF = config.CONF
+LOG = logging.getLogger(__name__)
+
+
+@six.add_metaclass(abc.ABCMeta)
+class CredentialProvider(object):
+ def __init__(self, name, tempest_client=True, interface='json',
+ password='pass', network_resources=None):
+ self.name = name
+
+ @abc.abstractmethod
+ def get_primary_creds(self):
+ return
+
+ @abc.abstractmethod
+ def get_admin_creds(self):
+ return
+
+ @abc.abstractmethod
+ def get_alt_creds(self):
+ return
+
+ @abc.abstractmethod
+ def clear_isolated_creds(self):
+ return
diff --git a/tempest/common/custom_matchers.py b/tempest/common/custom_matchers.py
index 4a7921f..996c365 100644
--- a/tempest/common/custom_matchers.py
+++ b/tempest/common/custom_matchers.py
@@ -69,10 +69,24 @@
elif self.target == 'Object':
if 'etag' not in actual:
return NonExistentHeader('etag')
- elif self.method == 'PUT' or self.method == 'COPY':
+ if 'last-modified' not in actual:
+ return NonExistentHeader('last-modified')
+ elif self.method == 'PUT':
if self.target == 'Object':
if 'etag' not in actual:
return NonExistentHeader('etag')
+ if 'last-modified' not in actual:
+ return NonExistentHeader('last-modified')
+ elif self.method == 'COPY':
+ if self.target == 'Object':
+ if 'etag' not in actual:
+ return NonExistentHeader('etag')
+ if 'last-modified' not in actual:
+ return NonExistentHeader('last-modified')
+ if 'x-copied-from' not in actual:
+ return NonExistentHeader('x-copied-from')
+ if 'x-copied-from-last-modified' not in actual:
+ return NonExistentHeader('x-copied-from-last-modified')
return None
@@ -122,11 +136,17 @@
return InvalidFormat(key, value)
elif key == 'content-type' and not value:
return InvalidFormat(key, value)
+ elif key == 'x-copied-from' and not re.match("\S+/\S+", value):
+ return InvalidFormat(key, value)
+ elif key == 'x-copied-from-last-modified' and not value:
+ return InvalidFormat(key, value)
elif key == 'x-trans-id' and \
not re.match("^tx[0-9a-f]{21}-[0-9a-f]{10}.*", value):
return InvalidFormat(key, value)
elif key == 'date' and not value:
return InvalidFormat(key, value)
+ elif key == 'last-modified' and not value:
+ return InvalidFormat(key, value)
elif key == 'accept-ranges' and not value == 'bytes':
return InvalidFormat(key, value)
elif key == 'etag' and not value.isalnum():
diff --git a/tempest/common/isolated_creds.py b/tempest/common/isolated_creds.py
index 208f42f..98b0116 100644
--- a/tempest/common/isolated_creds.py
+++ b/tempest/common/isolated_creds.py
@@ -16,6 +16,7 @@
from tempest import auth
from tempest import clients
+from tempest.common import cred_provider
from tempest.common.utils import data_utils
from tempest import config
from tempest import exceptions
@@ -25,15 +26,16 @@
LOG = logging.getLogger(__name__)
-class IsolatedCreds(object):
+class IsolatedCreds(cred_provider.CredentialProvider):
def __init__(self, name, tempest_client=True, interface='json',
password='pass', network_resources=None):
+ super(IsolatedCreds, self).__init__(name, tempest_client, interface,
+ password, network_resources)
self.network_resources = network_resources
self.isolated_creds = {}
self.isolated_net_resources = {}
self.ports = []
- self.name = name
self.tempest_client = tempest_client
self.interface = interface
self.password = password
diff --git a/tempest/common/utils/data_utils.py b/tempest/common/utils/data_utils.py
index 174e557..5a29ea0 100644
--- a/tempest/common/utils/data_utils.py
+++ b/tempest/common/utils/data_utils.py
@@ -71,3 +71,11 @@
if not base_text:
base_text = 'test'
return ''.join(itertools.islice(itertools.cycle(base_text), size))
+
+
+def random_bytes(size=1024):
+ """
+ Return size randomly selected bytes as a string.
+ """
+ return ''.join([chr(random.randint(0, 255))
+ for i in range(size)])
diff --git a/tempest/config.py b/tempest/config.py
index 0a0335d..01bc243 100644
--- a/tempest/config.py
+++ b/tempest/config.py
@@ -268,12 +268,14 @@
default=['all'],
help='A list of enabled compute extensions with a special '
'entry all which indicates every extension is enabled. '
- 'Each extension should be specified with alias name'),
+ 'Each extension should be specified with alias name. '
+ 'Empty list indicates all extensions are disabled'),
cfg.ListOpt('api_v3_extensions',
default=['all'],
help='A list of enabled v3 extensions with a special entry all'
' which indicates every extension is enabled. '
- 'Each extension should be specified with alias name'),
+ 'Each extension should be specified with alias name. '
+ 'Empty list indicates all extensions are disabled'),
cfg.BoolOpt('change_password',
default=False,
help="Does the test environment support changing the admin "
@@ -446,7 +448,8 @@
cfg.ListOpt('api_extensions',
default=['all'],
help='A list of enabled network extensions with a special '
- 'entry all which indicates every extension is enabled'),
+ 'entry all which indicates every extension is enabled. '
+ 'Empty list indicates all extensions are disabled'),
cfg.BoolOpt('ipv6_subnet_attributes',
default=False,
help="Allow the execution of IPv6 subnet tests that use "
@@ -551,7 +554,8 @@
cfg.ListOpt('api_extensions',
default=['all'],
help='A list of enabled volume extensions with a special '
- 'entry all which indicates every extension is enabled'),
+ 'entry all which indicates every extension is enabled. '
+ 'Empty list indicates all extensions are disabled'),
cfg.BoolOpt('api_v1',
default=True,
help="Is the v1 volume API enabled"),
@@ -1090,18 +1094,22 @@
cfg.CONF.set_default('domain_name', self.identity.admin_domain_name,
group='compute-admin')
- def __init__(self, parse_conf=True):
+ def __init__(self, parse_conf=True, config_path=None):
"""Initialize a configuration from a conf directory and conf file."""
super(TempestConfigPrivate, self).__init__()
config_files = []
failsafe_path = "/etc/tempest/" + self.DEFAULT_CONFIG_FILE
- # Environment variables override defaults...
- conf_dir = os.environ.get('TEMPEST_CONFIG_DIR',
- self.DEFAULT_CONFIG_DIR)
- conf_file = os.environ.get('TEMPEST_CONFIG', self.DEFAULT_CONFIG_FILE)
+ if config_path:
+ path = config_path
+ else:
+ # Environment variables override defaults...
+ conf_dir = os.environ.get('TEMPEST_CONFIG_DIR',
+ self.DEFAULT_CONFIG_DIR)
+ conf_file = os.environ.get('TEMPEST_CONFIG',
+ self.DEFAULT_CONFIG_FILE)
- path = os.path.join(conf_dir, conf_file)
+ path = os.path.join(conf_dir, conf_file)
if not os.path.isfile(path):
path = failsafe_path
@@ -1123,6 +1131,7 @@
class TempestConfigProxy(object):
_config = None
+ _path = None
_extra_log_defaults = [
'keystoneclient.session=INFO',
@@ -1139,9 +1148,12 @@
def __getattr__(self, attr):
if not self._config:
self._fix_log_levels()
- self._config = TempestConfigPrivate()
+ self._config = TempestConfigPrivate(config_path=self._path)
return getattr(self._config, attr)
+ def set_config_path(self, path):
+ self._path = path
+
CONF = TempestConfigProxy()
diff --git a/tempest/exceptions.py b/tempest/exceptions.py
index 4eb1cea..9d443cc 100644
--- a/tempest/exceptions.py
+++ b/tempest/exceptions.py
@@ -211,3 +211,17 @@
class InvalidStructure(TempestException):
message = "Invalid structure of table with details"
+
+
+class CommandFailed(Exception):
+ def __init__(self, returncode, cmd, output, stderr):
+ super(CommandFailed, self).__init__()
+ self.returncode = returncode
+ self.cmd = cmd
+ self.stdout = output
+ self.stderr = stderr
+
+ def __str__(self):
+ return ("Command '%s' returned non-zero exit status %d.\n"
+ "stdout:\n%s\n"
+ "stderr:\n%s" % (self.cmd, self.returncode, self.stdout, self.stderr))
diff --git a/tempest/scenario/README.rst b/tempest/scenario/README.rst
index 835ba99..5a287d6 100644
--- a/tempest/scenario/README.rst
+++ b/tempest/scenario/README.rst
@@ -1,3 +1,5 @@
+.. _scenario_field_guide:
+
Tempest Field Guide to Scenario tests
=====================================
diff --git a/tempest/scenario/manager.py b/tempest/scenario/manager.py
index ca79325..3cfc698 100644
--- a/tempest/scenario/manager.py
+++ b/tempest/scenario/manager.py
@@ -53,6 +53,32 @@
LOG_cinder_client.addHandler(log.NullHandler())
+class ScenarioTest(tempest.test.BaseTestCase):
+
+ @classmethod
+ def setUpClass(cls):
+ super(ScenarioTest, cls).setUpClass()
+ cls.isolated_creds = isolated_creds.IsolatedCreds(
+ cls.__name__, tempest_client=True,
+ network_resources=cls.network_resources)
+ cls.manager = clients.Manager(
+ credentials=cls.credentials()
+ )
+
+ @classmethod
+ def _get_credentials(cls, get_creds, ctype):
+ if CONF.compute.allow_tenant_isolation:
+ creds = get_creds()
+ else:
+ creds = auth.get_default_credentials(ctype)
+ return creds
+
+ @classmethod
+ def credentials(cls):
+ return cls._get_credentials(cls.isolated_creds.get_primary_creds,
+ 'user')
+
+
class OfficialClientTest(tempest.test.BaseTestCase):
"""
Official Client test base class for scenario testing.
@@ -87,6 +113,11 @@
cls.ceilometer_client = cls.manager.ceilometer_client
@classmethod
+ def tearDownClass(cls):
+ cls.isolated_creds.clear_isolated_creds()
+ super(OfficialClientTest, cls).tearDownClass()
+
+ @classmethod
def _get_credentials(cls, get_creds, ctype):
if CONF.compute.allow_tenant_isolation:
creds = get_creds()
diff --git a/tempest/scenario/test_dashboard_basic_ops.py b/tempest/scenario/test_dashboard_basic_ops.py
index 6418a73..4fcc70a 100644
--- a/tempest/scenario/test_dashboard_basic_ops.py
+++ b/tempest/scenario/test_dashboard_basic_ops.py
@@ -24,7 +24,7 @@
CONF = config.CONF
-class TestDashboardBasicOps(manager.OfficialClientTest):
+class TestDashboardBasicOps(manager.ScenarioTest):
"""
This is a basic scenario test:
diff --git a/tempest/services/__init__.py b/tempest/services/__init__.py
index e7bec60..e69de29 100644
--- a/tempest/services/__init__.py
+++ b/tempest/services/__init__.py
@@ -1,37 +0,0 @@
-# Copyright 2012 OpenStack Foundation
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-"""
-Base Service class, which acts as a descriptor for an OpenStack service
-in the test environment
-"""
-
-
-class Service(object):
-
- def __init__(self, config):
- """
- Initializes the service.
-
- :param config: `tempest.config.Config` object
- """
- self.config = config
-
- def get_client(self):
- """
- Returns a client object that may be used to query
- the service API.
- """
- raise NotImplementedError
diff --git a/tempest/services/compute/json/security_group_default_rules_client.py b/tempest/services/compute/json/security_group_default_rules_client.py
new file mode 100644
index 0000000..6d29837
--- /dev/null
+++ b/tempest/services/compute/json/security_group_default_rules_client.py
@@ -0,0 +1,74 @@
+# Copyright 2014 NEC Corporation.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import json
+
+from tempest.common import rest_client
+from tempest import config
+
+CONF = config.CONF
+
+
+class SecurityGroupDefaultRulesClientJSON(rest_client.RestClient):
+
+ def __init__(self, auth_provider):
+ super(SecurityGroupDefaultRulesClientJSON,
+ self).__init__(auth_provider)
+ self.service = CONF.compute.catalog_type
+
+ def create_security_default_group_rule(self, ip_protocol, from_port,
+ to_port, **kwargs):
+ """
+ Creating security group default rules.
+ ip_protocol : ip_protocol (icmp, tcp, udp).
+ from_port: Port at start of range.
+ to_port : Port at end of range.
+ cidr : CIDR for address range.
+ """
+ post_body = {
+ 'ip_protocol': ip_protocol,
+ 'from_port': from_port,
+ 'to_port': to_port,
+ 'cidr': kwargs.get('cidr'),
+ }
+ post_body = json.dumps({'security_group_default_rule': post_body})
+ url = 'os-security-group-default-rules'
+ resp, body = self.post(url, post_body)
+ self.expected_success(200, resp.status)
+ body = json.loads(body)
+ return resp, body['security_group_default_rule']
+
+ def delete_security_group_default_rule(self,
+ security_group_default_rule_id):
+ """Deletes the provided Security Group default rule."""
+ resp, body = self.delete('os-security-group-default-rules/%s' % str(
+ security_group_default_rule_id))
+ self.expected_success(204, resp.status)
+ return resp, body
+
+ def list_security_group_default_rules(self):
+ """List all Security Group default rules."""
+ resp, body = self.get('os-security-group-default-rules')
+ self.expected_success(200, resp.status)
+ body = json.loads(body)
+ return resp, body['security_group_default_rules']
+
+ def get_security_group_default_rule(self, security_group_default_rule_id):
+ """Return the details of provided Security Group default rule."""
+ resp, body = self.get('os-security-group-default-rules/%s' % str(
+ security_group_default_rule_id))
+ self.expected_success(200, resp.status)
+ body = json.loads(body)
+ return resp, body['security_group_default_rule']
diff --git a/tempest/services/compute/json/servers_client.py b/tempest/services/compute/json/servers_client.py
index aa81567..a4e3641 100644
--- a/tempest/services/compute/json/servers_client.py
+++ b/tempest/services/compute/json/servers_client.py
@@ -459,7 +459,8 @@
def rescue_server(self, server_id, **kwargs):
"""Rescue the provided server."""
- return self.action(server_id, 'rescue', 'adminPass', None, **kwargs)
+ return self.action(server_id, 'rescue', 'adminPass',
+ schema.rescue_server, **kwargs)
def unrescue_server(self, server_id):
"""Unrescue the provided server."""
diff --git a/tempest/services/compute/v3/json/servers_client.py b/tempest/services/compute/v3/json/servers_client.py
index dbc87f4..c3fd355 100644
--- a/tempest/services/compute/v3/json/servers_client.py
+++ b/tempest/services/compute/v3/json/servers_client.py
@@ -454,8 +454,16 @@
def rescue_server(self, server_id, **kwargs):
"""Rescue the provided server."""
- return self.action(server_id, 'rescue', 'admin_password',
- None, **kwargs)
+ post_body = json.dumps({'rescue': kwargs})
+ resp, body = self.post('servers/%s/action' % str(server_id),
+ post_body)
+ if CONF.compute_feature_enabled.enable_instance_password:
+ rescue_schema = schema.rescue_server_with_admin_pass
+ else:
+ rescue_schema = schema.rescue_server
+ body = json.loads(body)
+ self.validate_response(rescue_schema, resp, body)
+ return resp, body
def unrescue_server(self, server_id):
"""Unrescue the provided server."""
diff --git a/tempest/services/data_processing/v1_1/client.py b/tempest/services/data_processing/v1_1/client.py
index c2c7fd1..1fe0cf1 100644
--- a/tempest/services/data_processing/v1_1/client.py
+++ b/tempest/services/data_processing/v1_1/client.py
@@ -25,29 +25,42 @@
super(DataProcessingClient, self).__init__(auth_provider)
self.service = CONF.data_processing.catalog_type
- @classmethod
- def _request_and_parse(cls, req_fun, uri, res_name, *args, **kwargs):
- """Make a request using specified req_fun and parse response.
+ def _request_and_check_resp(self, request_func, uri, resp_status):
+ """Make a request using specified request_func and check response
+ status code.
+
+ It returns pair: resp and response body.
+ """
+ resp, body = request_func(uri)
+ self.expected_success(resp_status, resp.status)
+ return resp, body
+
+ def _request_check_and_parse_resp(self, request_func, uri, resp_status,
+ resource_name, *args, **kwargs):
+ """Make a request using specified request_func, check response status
+ code and parse response body.
It returns pair: resp and parsed resource(s) body.
"""
- resp, body = req_fun(uri, headers={
- 'Content-Type': 'application/json'
- }, *args, **kwargs)
+ headers = {'Content-Type': 'application/json'}
+ resp, body = request_func(uri, headers=headers, *args, **kwargs)
+ self.expected_success(resp_status, resp.status)
body = json.loads(body)
- return resp, body[res_name]
+ return resp, body[resource_name]
def list_node_group_templates(self):
"""List all node group templates for a user."""
uri = 'node-group-templates'
- return self._request_and_parse(self.get, uri, 'node_group_templates')
+ return self._request_check_and_parse_resp(self.get, uri,
+ 200, 'node_group_templates')
def get_node_group_template(self, tmpl_id):
"""Returns the details of a single node group template."""
uri = 'node-group-templates/%s' % tmpl_id
- return self._request_and_parse(self.get, uri, 'node_group_template')
+ return self._request_check_and_parse_resp(self.get, uri,
+ 200, 'node_group_template')
def create_node_group_template(self, name, plugin_name, hadoop_version,
node_processes, flavor_id,
@@ -67,20 +80,22 @@
'flavor_id': flavor_id,
'node_configs': node_configs or dict(),
})
- return self._request_and_parse(self.post, uri, 'node_group_template',
- body=json.dumps(body))
+ return self._request_check_and_parse_resp(self.post, uri, 202,
+ 'node_group_template',
+ body=json.dumps(body))
def delete_node_group_template(self, tmpl_id):
"""Deletes the specified node group template by id."""
uri = 'node-group-templates/%s' % tmpl_id
- return self.delete(uri)
+ return self._request_and_check_resp(self.delete, uri, 204)
def list_plugins(self):
"""List all enabled plugins."""
uri = 'plugins'
- return self._request_and_parse(self.get, uri, 'plugins')
+ return self._request_check_and_parse_resp(self.get,
+ uri, 200, 'plugins')
def get_plugin(self, plugin_name, plugin_version=None):
"""Returns the details of a single plugin."""
@@ -88,19 +103,21 @@
uri = 'plugins/%s' % plugin_name
if plugin_version:
uri += '/%s' % plugin_version
- return self._request_and_parse(self.get, uri, 'plugin')
+ return self._request_check_and_parse_resp(self.get, uri, 200, 'plugin')
def list_cluster_templates(self):
"""List all cluster templates for a user."""
uri = 'cluster-templates'
- return self._request_and_parse(self.get, uri, 'cluster_templates')
+ return self._request_check_and_parse_resp(self.get, uri,
+ 200, 'cluster_templates')
def get_cluster_template(self, tmpl_id):
"""Returns the details of a single cluster template."""
uri = 'cluster-templates/%s' % tmpl_id
- return self._request_and_parse(self.get, uri, 'cluster_template')
+ return self._request_check_and_parse_resp(self.get,
+ uri, 200, 'cluster_template')
def create_cluster_template(self, name, plugin_name, hadoop_version,
node_groups, cluster_configs=None,
@@ -119,26 +136,29 @@
'node_groups': node_groups,
'cluster_configs': cluster_configs or dict(),
})
- return self._request_and_parse(self.post, uri, 'cluster_template',
- body=json.dumps(body))
+ return self._request_check_and_parse_resp(self.post, uri, 202,
+ 'cluster_template',
+ body=json.dumps(body))
def delete_cluster_template(self, tmpl_id):
"""Deletes the specified cluster template by id."""
uri = 'cluster-templates/%s' % tmpl_id
- return self.delete(uri)
+ return self._request_and_check_resp(self.delete, uri, 204)
def list_data_sources(self):
"""List all data sources for a user."""
uri = 'data-sources'
- return self._request_and_parse(self.get, uri, 'data_sources')
+ return self._request_check_and_parse_resp(self.get,
+ uri, 200, 'data_sources')
def get_data_source(self, source_id):
"""Returns the details of a single data source."""
uri = 'data-sources/%s' % source_id
- return self._request_and_parse(self.get, uri, 'data_source')
+ return self._request_check_and_parse_resp(self.get,
+ uri, 200, 'data_source')
def create_data_source(self, name, data_source_type, url, **kwargs):
"""Creates data source with specified params.
@@ -153,57 +173,62 @@
'type': data_source_type,
'url': url
})
- return self._request_and_parse(self.post, uri, 'data_source',
- body=json.dumps(body))
+ return self._request_check_and_parse_resp(self.post, uri,
+ 202, 'data_source',
+ body=json.dumps(body))
def delete_data_source(self, source_id):
"""Deletes the specified data source by id."""
uri = 'data-sources/%s' % source_id
- return self.delete(uri)
+ return self._request_and_check_resp(self.delete, uri, 204)
def list_job_binary_internals(self):
"""List all job binary internals for a user."""
uri = 'job-binary-internals'
- return self._request_and_parse(self.get, uri, 'binaries')
+ return self._request_check_and_parse_resp(self.get,
+ uri, 200, 'binaries')
def get_job_binary_internal(self, job_binary_id):
"""Returns the details of a single job binary internal."""
uri = 'job-binary-internals/%s' % job_binary_id
- return self._request_and_parse(self.get, uri, 'job_binary_internal')
+ return self._request_check_and_parse_resp(self.get, uri,
+ 200, 'job_binary_internal')
def create_job_binary_internal(self, name, data):
"""Creates job binary internal with specified params."""
uri = 'job-binary-internals/%s' % name
- return self._request_and_parse(self.put, uri, 'job_binary_internal',
- data)
+ return self._request_check_and_parse_resp(self.put, uri, 202,
+ 'job_binary_internal', data)
def delete_job_binary_internal(self, job_binary_id):
"""Deletes the specified job binary internal by id."""
uri = 'job-binary-internals/%s' % job_binary_id
- return self.delete(uri)
+ return self._request_and_check_resp(self.delete, uri, 204)
def get_job_binary_internal_data(self, job_binary_id):
"""Returns data of a single job binary internal."""
uri = 'job-binary-internals/%s/data' % job_binary_id
- return self.get(uri)
+ return self._request_and_check_resp(self.get, uri, 200)
def list_job_binaries(self):
"""List all job binaries for a user."""
uri = 'job-binaries'
- return self._request_and_parse(self.get, uri, 'binaries')
+ return self._request_check_and_parse_resp(self.get,
+ uri, 200, 'binaries')
def get_job_binary(self, job_binary_id):
"""Returns the details of a single job binary."""
uri = 'job-binaries/%s' % job_binary_id
- return self._request_and_parse(self.get, uri, 'job_binary')
+ return self._request_check_and_parse_resp(self.get,
+ uri, 200, 'job_binary')
def create_job_binary(self, name, url, extra=None, **kwargs):
"""Creates job binary with specified params.
@@ -218,17 +243,18 @@
'url': url,
'extra': extra or dict(),
})
- return self._request_and_parse(self.post, uri, 'job_binary',
- body=json.dumps(body))
+ return self._request_check_and_parse_resp(self.post, uri,
+ 202, 'job_binary',
+ body=json.dumps(body))
def delete_job_binary(self, job_binary_id):
"""Deletes the specified job binary by id."""
uri = 'job-binaries/%s' % job_binary_id
- return self.delete(uri)
+ return self._request_and_check_resp(self.delete, uri, 204)
def get_job_binary_data(self, job_binary_id):
"""Returns data of a single job binary."""
uri = 'job-binaries/%s/data' % job_binary_id
- return self.get(uri)
+ return self._request_and_check_resp(self.get, uri, 200)
diff --git a/tempest/services/identity/v3/json/identity_client.py b/tempest/services/identity/v3/json/identity_client.py
index 329f026..d57b931 100644
--- a/tempest/services/identity/v3/json/identity_client.py
+++ b/tempest/services/identity/v3/json/identity_client.py
@@ -77,6 +77,17 @@
body = json.loads(body)
return resp, body['user']
+ def update_user_password(self, user_id, password, original_password):
+ """Updates a user password."""
+ update_user = {
+ 'password': password,
+ 'original_password': original_password
+ }
+ update_user = json.dumps({'user': update_user})
+ resp, _ = self.post('users/%s/password' % user_id, update_user)
+ self.expected_success(204, resp.status)
+ return resp
+
def list_user_projects(self, user_id):
"""Lists the projects on which a user has roles assigned."""
resp, body = self.get('users/%s/projects' % user_id)
@@ -124,8 +135,11 @@
body = json.loads(body)
return resp, body['project']
- def list_projects(self):
- resp, body = self.get("projects")
+ def list_projects(self, params=None):
+ url = "projects"
+ if params:
+ url += '?%s' % urllib.urlencode(params)
+ resp, body = self.get(url)
self.expected_success(200, resp.status)
body = json.loads(body)
return resp, body['projects']
@@ -502,10 +516,7 @@
"""HEAD Check if role is delegated by a trust."""
resp, body = self.head("OS-TRUST/trusts/%s/roles/%s"
% (trust_id, role_id))
- # This code needs to change to 200 when the keystone changes
- # for bug 1334368 merge and check_trust_roles test is
- # unskipped
- self.expected_success(204, resp.status)
+ self.expected_success(200, resp.status)
return resp, body
diff --git a/tempest/services/identity/v3/xml/identity_client.py b/tempest/services/identity/v3/xml/identity_client.py
index 3790f13..c2bd77e 100644
--- a/tempest/services/identity/v3/xml/identity_client.py
+++ b/tempest/services/identity/v3/xml/identity_client.py
@@ -139,6 +139,17 @@
body = self._parse_body(etree.fromstring(body))
return resp, body
+ def update_user_password(self, user_id, password, original_password):
+ """Updates a user password."""
+ update_user = common.Element("user",
+ xmlns=XMLNS,
+ password=password,
+ original_password=original_password)
+ resp, _ = self.post('users/%s/password' % user_id,
+ str(common.Document(update_user)))
+ self.expected_success(204, resp.status)
+ return resp
+
def list_user_projects(self, user_id):
"""Lists the projects on which a user has roles assigned."""
resp, body = self.get('users/%s/projects' % user_id)
@@ -186,9 +197,12 @@
body = self._parse_body(etree.fromstring(body))
return resp, body
- def list_projects(self):
+ def list_projects(self, params=None):
"""Get the list of projects."""
- resp, body = self.get("projects")
+ url = 'projects'
+ if params:
+ url += '?%s' % urllib.urlencode(params)
+ resp, body = self.get(url)
self.expected_success(200, resp.status)
body = self._parse_projects(etree.fromstring(body))
return resp, body
diff --git a/tempest/services/orchestration/json/orchestration_client.py b/tempest/services/orchestration/json/orchestration_client.py
index d325eb5..46b0ec4 100644
--- a/tempest/services/orchestration/json/orchestration_client.py
+++ b/tempest/services/orchestration/json/orchestration_client.py
@@ -181,7 +181,11 @@
fail_regexp = re.compile(failure_pattern)
while True:
- resp, body = self.get_stack(stack_identifier)
+ try:
+ resp, body = self.get_stack(stack_identifier)
+ except exceptions.NotFound:
+ if status == 'DELETE_COMPLETE':
+ return
stack_name = body['stack_name']
stack_status = body['stack_status']
if stack_status == status:
diff --git a/tempest/stress/README.rst b/tempest/stress/README.rst
index 0a63679..4f1f56c 100644
--- a/tempest/stress/README.rst
+++ b/tempest/stress/README.rst
@@ -1,3 +1,5 @@
+.. _stress_field_guide:
+
Tempest Field Guide to Stress Tests
===================================
diff --git a/tempest/test.py b/tempest/test.py
index afe7a96..5b7330b 100644
--- a/tempest/test.py
+++ b/tempest/test.py
@@ -215,6 +215,8 @@
'network': CONF.network_feature_enabled.api_extensions,
'object': CONF.object_storage_feature_enabled.discoverable_apis,
}
+ if len(config_dict[service]) == 0:
+ return False
if config_dict[service][0] == 'all':
return True
if extension_name in config_dict[service]:
diff --git a/tempest/tests/README.rst b/tempest/tests/README.rst
index 33d321f..e54d4c0 100644
--- a/tempest/tests/README.rst
+++ b/tempest/tests/README.rst
@@ -1,3 +1,5 @@
+.. _unit_tests_field_guide:
+
Tempest Field Guide to Unit tests
=================================
diff --git a/tempest/tests/cli/test_command_failed.py b/tempest/tests/cli/test_command_failed.py
new file mode 100644
index 0000000..36a4fc8
--- /dev/null
+++ b/tempest/tests/cli/test_command_failed.py
@@ -0,0 +1,30 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from tempest import exceptions
+from tempest.tests import base
+
+
+class TestOutputParser(base.TestCase):
+
+ def test_command_failed_exception(self):
+ returncode = 1
+ cmd = "foo"
+ stdout = "output"
+ stderr = "error"
+ try:
+ raise exceptions.CommandFailed(returncode, cmd, stdout, stderr)
+ except exceptions.CommandFailed as e:
+ self.assertIn(str(returncode), str(e))
+ self.assertIn(cmd, str(e))
+ self.assertIn(stdout, str(e))
+ self.assertIn(stderr, str(e))
diff --git a/tempest/tests/fake_config.py b/tempest/tests/fake_config.py
index 4bed0c2..536cbcf 100644
--- a/tempest/tests/fake_config.py
+++ b/tempest/tests/fake_config.py
@@ -58,6 +58,6 @@
class FakePrivate(config.TempestConfigPrivate):
- def __init__(self):
+ def __init__(self, parse_conf=True, config_path=None):
cfg.CONF([], default_config_files=[])
self._set_attrs()
diff --git a/tempest/tests/test_auth.py b/tempest/tests/test_auth.py
index 1dcddad..6a2e335 100644
--- a/tempest/tests/test_auth.py
+++ b/tempest/tests/test_auth.py
@@ -59,12 +59,24 @@
obviously don't test not implemented method or the ones which strongly
depends on them.
"""
- _auth_provider_class = auth.AuthProvider
- def test_check_credentials_class(self):
- self.assertRaises(NotImplementedError,
- self.auth_provider.check_credentials,
- auth.Credentials())
+ class FakeAuthProviderImpl(auth.AuthProvider):
+ def _decorate_request():
+ pass
+
+ def _fill_credentials():
+ pass
+
+ def _get_auth():
+ pass
+
+ def base_url():
+ pass
+
+ def is_expired():
+ pass
+
+ _auth_provider_class = FakeAuthProviderImpl
def test_check_credentials_bad_type(self):
self.assertFalse(self.auth_provider.check_credentials([]))
@@ -74,16 +86,6 @@
auth_provider = self._auth(credentials={})
self.assertIsInstance(auth_provider.credentials, auth.Credentials)
- def test_instantiate_with_bad_credentials_type(self):
- """
- Assure that credentials with bad type fail with TypeError
- """
- self.assertRaises(TypeError, self._auth, [])
-
- def test_auth_data_property(self):
- self.assertRaises(NotImplementedError, getattr, self.auth_provider,
- 'auth_data')
-
def test_auth_data_property_when_cache_exists(self):
self.auth_provider.cache = 'foo'
self.useFixture(mockpatch.PatchObject(self.auth_provider,
@@ -110,9 +112,10 @@
self.assertIsNone(self.auth_provider.alt_part)
self.assertIsNone(self.auth_provider.alt_auth_data)
- def test_fill_credentials(self):
- self.assertRaises(NotImplementedError,
- self.auth_provider.fill_credentials)
+ def test_auth_class(self):
+ self.assertRaises(TypeError,
+ auth.AuthProvider,
+ fake_credentials.FakeCredentials)
class TestKeystoneV2AuthProvider(BaseAuthTestsSetUp):
diff --git a/tempest/tests/test_waiters.py b/tempest/tests/test_waiters.py
index a29cb46..1f9825e 100644
--- a/tempest/tests/test_waiters.py
+++ b/tempest/tests/test_waiters.py
@@ -15,7 +15,6 @@
import time
import mock
-import testtools
from tempest.common import waiters
from tempest import exceptions
@@ -48,221 +47,3 @@
self.assertRaises(exceptions.AddImageException,
waiters.wait_for_image_status,
self.client, 'fake_image_id', 'active')
-
-
-class TestServerWaiters(base.TestCase):
- def setUp(self):
- super(TestServerWaiters, self).setUp()
- self.client = mock.MagicMock()
- self.client.build_timeout = 1
- self.client.build_interval = 1
-
- def test_wait_for_server_status(self):
- self.client.get_server.return_value = (None, {'status':
- 'active'}
- )
- start_time = int(time.time())
- waiters.wait_for_server_status(self.client, 'fake_svr_id',
- 'active'
- )
- end_time = int(time.time())
- # Ensure waiter returns before build_timeout
- self.assertTrue((end_time - start_time) < 2)
-
- def test_wait_for_server_status_BUILD_from_not_UNKNOWN(self):
- self.client.get_server.return_value = (None, {'status': 'active'})
- start_time = int(time.time())
- waiters.wait_for_server_status(self.client, 'fake_svr_id',
- 'BUILD')
- end_time = int(time.time())
- # Ensure waiter returns before build_timeout
- self.assertTrue((end_time - start_time) < 2)
-
- def test_wait_for_server_status_ready_wait_with_BUILD(self):
- self.client.get_server.return_value = (None, {'status': 'BUILD'})
- start_time = int(time.time())
- waiters.wait_for_server_status(self.client, 'fake_svr_id',
- 'BUILD', True)
- end_time = int(time.time())
- # Ensure waiter returns before build_timeout
- self.assertTrue((end_time - start_time) < 2)
-
- def test_wait_for_server_status_ready_wait(self):
- self.client.get_server.return_value = (None, {'status':
- 'ERROR',
- 'OS-EXT-STS:task_state':
- 'n/a'
- }
- )
- self.client.get_console_output.return_value = (None,
- {'output': 'Server fake_svr_id failed to reach '
- 'active status and task state n/a within the '
- 'required time (1 s).\nCurrent status: SUSPENDED.'
- '\nCurrent task state: None.'}
- )
- self.assertRaises(exceptions.BuildErrorException,
- waiters.wait_for_server_status,
- self.client, 'fake_svr_id', 'active',
- ready_wait=True, extra_timeout=0,
- raise_on_error=True
- )
-
- def test_wait_for_server_status_no_ready_wait(self):
- self.client.get_server.return_value = (None, {'status':
- 'ERROR',
- 'OS-EXT-STS:task_state':
- 'n/a'
- }
- )
- start_time = int(time.time())
- waiters.wait_for_server_status(self.client, 'fake_svr_id',
- 'ERROR', ready_wait=False,
- extra_timeout=10, raise_on_error=True
- )
- end_time = int(time.time())
- # Ensure waiter returns before build_timeout + extra_timeout
- self.assertTrue((end_time - start_time) < 12)
-
- def test_wait_for_server_status_timeout(self):
- self.client.get_server.return_value = (None, {'status': 'SUSPENDED'})
- self.client.get_console_output.return_value = (None,
- {'output': 'Server fake_svr_id failed to reach '
- 'active status and task state n/a within the '
- 'required time (1 s).\nCurrent status: SUSPENDED.'
- '\nCurrent task state: None.'}
- )
- self.assertRaises(exceptions.TimeoutException,
- waiters.wait_for_server_status,
- self.client, 'fake_svr_id', 'active')
-
- def test_wait_for_server_status_extra_timeout(self):
- self.client.get_server.return_value = (None, {'status': 'SUSPENDED'})
- start_time = int(time.time())
- self.client.get_console_output.return_value = (None,
- {'output': 'Server fake_svr_id failed to reach '
- 'active status and task state n/a within the '
- 'required time (10 s). \nCurrent status: SUSPENDED.'
- '\nCurrent task state: None.'}
- )
- self.assertRaises(exceptions.TimeoutException,
- waiters.wait_for_server_status,
- self.client, 'fake_svr_id',
- 'active', ready_wait=True,
- extra_timeout=10, raise_on_error=True
- )
- end_time = int(time.time())
- # Ensure waiter returns after build_timeout but
- # before build_timeout+extra timeout
- self.assertTrue(10 < (end_time - start_time) < 12)
-
- def test_wait_for_server_status_error_on_server_create(self):
- self.client.get_server.return_value = (None, {'status': 'ERROR'})
- self.client.get_console_output.return_value = (None,
- {'output': 'Server fake_svr_id failed to reach '
- 'activestatus and task state n/a within the '
- 'required time (1 s).\nCurrent status: ERROR.'
- '\nCurrent task state: None.'}
- )
- self.assertRaises(exceptions.BuildErrorException,
- waiters.wait_for_server_status,
- self.client, 'fake_svr_id', 'active')
-
- def test_wait_for_server_status_no_raise_on_error(self):
- self.client.get_server.return_value = (None, {'status': 'ERROR'})
- self.client.get_console_output.return_value = (None,
- {'output': 'Server fake_svr_id failed to reach '
- 'activestatus and task state n/a within the '
- 'required time (1 s).\nCurrent status: ERROR.'
- '\nCurrent task state: None.'}
- )
- self.assertRaises(exceptions.TimeoutException,
- waiters.wait_for_server_status,
- self.client, 'fake_svr_id', 'active',
- ready_wait=True, extra_timeout=0,
- raise_on_error=False
- )
-
- def test_wait_for_server_status_no_ready_wait_timeout(self):
- self.client.get_server.return_value = (None, {'status': 'ERROR'})
- self.client.get_console_output.return_value = (None,
- {'output': 'Server fake_svr_id failed to reach '
- 'active status and task state n/a within the '
- 'required time (11 s).\nCurrent status: ERROR.'
- '\nCurrent task state: None.'}
- )
- expected_msg = '''Request timed out
-Details: (TestServerWaiters:test_wait_for_server_status_no_ready_wait_timeout)\
- Server fake_svr_id failed to reach active status and task state "n/a" within\
- the required time (11 s). Current status: ERROR. Current task state: None.\
-'''
- with testtools.ExpectedException(exceptions.TimeoutException,
- testtools.matchers.AfterPreprocessing(
- str,
- testtools.matchers.Equals(expected_msg)
- )
- ):
- waiters.wait_for_server_status(self.client, 'fake_svr_id',
- 'active', ready_wait=False,
- extra_timeout=10,
- raise_on_error=False
- )
-
- def test_wait_for_server_status_ready_wait_timeout(self):
- self.client.get_server.return_value = (None, {'status': 'ERROR'})
- self.client.get_console_output.return_value = (None,
- {'output': 'Server fake_svr_id failed to reach '
- 'activestatus and task state n/a within the '
- 'required time (11 s).\nCurrent status: ERROR.'
- '\nCurrent task state: None.'}
- )
- expected_msg = '''Request timed out
-Details: (TestServerWaiters:test_wait_for_server_status_ready_wait_timeout)\
- Server fake_svr_id failed to reach active status and task state "None" within\
- the required time (11 s). Current status: ERROR. Current task state: None.\
-'''
- with testtools.ExpectedException(exceptions.TimeoutException,
- testtools.matchers.AfterPreprocessing(
- str,
- testtools.matchers.Equals(expected_msg)
- )
- ):
- waiters.wait_for_server_status(self.client, 'fake_svr_id',
- 'active', ready_wait=True,
- extra_timeout=10,
- raise_on_error=False
- )
-
- def test_wait_for_changing_server_status(self):
- self.client.get_server.side_effect = [(None, {'status': 'BUILD'}),
- (None, {'status': 'active'})]
- start_time = int(time.time())
- waiters.wait_for_server_status(self.client, 'fake_svr_id',
- 'active', ready_wait=True,
- extra_timeout=10,
- raise_on_error=True
- )
- end_time = int(time.time())
- # Ensure waiter returns before build_timeout + extra_timeout
- self.assertTrue((end_time - start_time) < 12)
-
- def test_wait_for_changing_server_task_status(self):
- self.client.get_server.side_effect = [(None, {'status': 'BUILD',
- 'OS-EXT-STS:task_state':
- 'n/a'
- }
- ),
- (None, {'status': 'active',
- 'OS-EXT-STS:task_state':
- 'None'
- }
- )
- ]
- start_time = int(time.time())
- waiters.wait_for_server_status(self.client, 'fake_svr_id',
- 'active', ready_wait=True,
- extra_timeout=10,
- raise_on_error=True
- )
- end_time = int(time.time())
- # Ensure waiter returns before build_timeout + extra_timeout
- self.assertTrue((end_time - start_time) < 12)
diff --git a/tempest/thirdparty/README.rst b/tempest/thirdparty/README.rst
index 53cb54b..b0bfdf7 100644
--- a/tempest/thirdparty/README.rst
+++ b/tempest/thirdparty/README.rst
@@ -1,3 +1,5 @@
+.. _third_party_field_guide:
+
Tempest Field Guide to Third Party API tests
============================================