Merge "Adds scenario for IPv6 addresses"
diff --git a/HACKING.rst b/HACKING.rst
index e920634..607682b 100644
--- a/HACKING.rst
+++ b/HACKING.rst
@@ -11,7 +11,7 @@
- [T102] Cannot import OpenStack python clients in tempest/api &
tempest/scenario tests
- [T104] Scenario tests require a services decorator
-- [T105] Unit tests cannot use setUpClass
+- [T105] Tests cannot use setUpClass/tearDownClass
- [T106] vim configuration should not be kept in source files.
- [N322] Method's default argument shouldn't be mutable
@@ -108,12 +108,52 @@
in tempest.api.compute would require a service tag for those services, however
they do not need to be tagged as compute.
+Test fixtures and resources
+---------------------------
+Test level resources should be cleaned-up after the test execution. Clean-up
+is best scheduled using `addCleanup` which ensures that the resource cleanup
+code is always invoked, and in reverse order with respect to the creation
+order.
+
+Test class level resources should be defined in the `resource_setup` method of
+the test class, except for any credential obtained from the credentials
+provider, which should be set-up in the `setup_credentials` method.
+
+The test base class `BaseTestCase` defines Tempest framework for class level
+fixtures. `setUpClass` and `tearDownClass` are defined here and cannot be
+overwritten by subclasses (enforced via hacking rule T105).
+
+Set-up is split in a series of steps (setup stages), which can be overwritten
+by test classes. Set-up stages are:
+- `skip_checks`
+- `setup_credentials`
+- `setup_clients`
+- `resource_setup`
+
+Tear-down is also split in a series of steps (teardown stages), which are
+stacked for execution only if the corresponding setup stage had been
+reached during the setup phase. Tear-down stages are:
+- `clear_isolated_creds` (defined in the base test class)
+- `resource_cleanup`
+
+Skipping Tests
+--------------
+Skipping tests should be based on configuration only. If that is not possible,
+it is likely that either a configuration flag is missing, or the test should
+fail rather than be skipped.
+Using discovery for skipping tests is generally discouraged.
+
+When running a test that requires a certain "feature" in the target
+cloud, if that feature is missing we should fail, because either the test
+configuration is invalid, or the cloud is broken and the expected "feature" is
+not there even if the cloud was configured with it.
+
Negative Tests
--------------
Newly added negative tests should use the negative test framework. First step
-is to create an interface description in a json file under `etc/schemas`.
-These descriptions consists of two important sections for the test
-(one of those is mandatory):
+is to create an interface description in a python file under
+`tempest/api_schema/request/`. These descriptions consists of two important
+sections for the test (one of those is mandatory):
- A resource (part of the URL of the request): Resources needed for a test
must be created in `setUpClass` and registered with `set_resource` e.g.:
@@ -126,21 +166,17 @@
load_tests = test.NegativeAutoTest.load_tests
- class SampeTestNegativeTestJSON(<your base class>, test.NegativeAutoTest):
- _interface = 'json'
+ @test.SimpleNegativeAutoTest
+ class SampleTestNegativeTestJSON(<your base class>, test.NegativeAutoTest):
_service = 'compute'
- _schema_file = <your Schema file>
+ _schema = <your schema file>
-Negative tests must be marked with a negative attribute::
-
- @test.attr(type=['negative', 'gate'])
- def test_get_console_output(self):
- self.execute(self._schema_file)
+The class decorator `SimpleNegativeAutoTest` will automatically generate test
+cases out of the given schema in the attribute `_schema`.
All negative tests should be added into a separate negative test file.
If such a file doesn't exist for the particular resource being tested a new
-test file should be added. Old XML based negative tests can be kept but should
-be renamed to `_xml.py`.
+test file should be added.
Test skips because of Known Bugs
--------------------------------
diff --git a/etc/tempest.conf.sample b/etc/tempest.conf.sample
index b70b446..86dda80 100644
--- a/etc/tempest.conf.sample
+++ b/etc/tempest.conf.sample
@@ -517,9 +517,6 @@
# From tempest.config
#
-# Enable diagnostic commands (boolean value)
-#enable = true
-
# A regex to determine which requests should be traced. This is a
# regex to match the caller for rest client requests to be able to
# selectively trace calls out of specific classes and methods. It
@@ -578,6 +575,10 @@
# (string value)
#auth_version = v2
+# Specify a CA bundle file to use in verifying a TLS (https) server
+# certificate. (string value)
+#ca_certificates_file = <None>
+
# Catalog type of the Identity service. (string value)
#catalog_type = identity
diff --git a/tempest/api/baremetal/admin/test_ports.py b/tempest/api/baremetal/admin/test_ports.py
index b3f9b7f..3392ab9 100644
--- a/tempest/api/baremetal/admin/test_ports.py
+++ b/tempest/api/baremetal/admin/test_ports.py
@@ -57,11 +57,13 @@
_, body = self.client.show_port(uuid)
self._assertExpected(port, body)
+ @test.skip_because(bug='1398350')
@test.attr(type='smoke')
def test_create_port_with_extra(self):
node_id = self.node['uuid']
address = data_utils.rand_mac_address()
- extra = {'key': 'value'}
+ extra = {'str': 'value', 'int': 123, 'float': 0.123,
+ 'bool': True, 'list': [1, 2, 3], 'dict': {'foo': 'bar'}}
_, port = self.create_port(node_id=node_id, address=address,
extra=extra)
@@ -224,6 +226,7 @@
_, body = self.client.show_port(port['uuid'])
self.assertEqual(extra, body['extra'])
+ @test.skip_because(bug='1398350')
@test.attr(type='smoke')
def test_update_port_mixed_ops(self):
node_id = self.node['uuid']
@@ -234,7 +237,7 @@
extra=extra)
new_address = data_utils.rand_mac_address()
- new_extra = {'key1': 'new-value1', 'key3': 'new-value3'}
+ new_extra = {'key1': 0.123, 'key3': {'cat': 'meow'}}
patch = [{'path': '/address',
'op': 'replace',
diff --git a/tempest/api/baremetal/admin/test_ports_negative.py b/tempest/api/baremetal/admin/test_ports_negative.py
index ead3799..8080eb6 100644
--- a/tempest/api/baremetal/admin/test_ports_negative.py
+++ b/tempest/api/baremetal/admin/test_ports_negative.py
@@ -34,15 +34,6 @@
self.create_port, node_id=node_id, address=address)
@test.attr(type=['negative', 'smoke'])
- def test_create_port_malformed_extra(self):
- node_id = self.node['uuid']
- address = data_utils.rand_mac_address()
- extra = {'key': 0.123}
- self.assertRaises(exc.BadRequest,
- self.create_port, node_id=node_id,
- address=address, extra=extra)
-
- @test.attr(type=['negative', 'smoke'])
def test_create_port_nonexsistent_node_id(self):
node_id = str(data_utils.rand_uuid())
address = data_utils.rand_mac_address()
@@ -160,31 +151,6 @@
'value': new_address}])
@test.attr(type=['negative', 'smoke'])
- def test_update_port_add_malformed_extra(self):
- node_id = self.node['uuid']
- address = data_utils.rand_mac_address()
-
- _, port = self.create_port(node_id=node_id, address=address)
- port_id = port['uuid']
-
- self.assertRaises(exc.BadRequest, self.client.update_port, port_id,
- [{'path': '/extra/key', ' op': 'add',
- 'value': 0.123}])
-
- @test.attr(type=['negative', 'smoke'])
- def test_update_port_add_whole_malformed_extra(self):
- node_id = self.node['uuid']
- address = data_utils.rand_mac_address()
-
- _, port = self.create_port(node_id=node_id, address=address)
- port_id = port['uuid']
-
- self.assertRaises(exc.BadRequest, self.client.update_port, port_id,
- [{'path': '/extra',
- 'op': 'add',
- 'value': [1, 2, 3, 4, 'a']}])
-
- @test.attr(type=['negative', 'smoke'])
def test_update_port_add_nonexistent_property(self):
node_id = self.node['uuid']
address = data_utils.rand_mac_address()
@@ -257,37 +223,6 @@
self.client.update_port, port_id, patch)
@test.attr(type=['negative', 'smoke'])
- def test_update_port_replace_extra_item_with_malformed(self):
- node_id = self.node['uuid']
- address = data_utils.rand_mac_address()
- extra = {'key': 'value'}
-
- _, port = self.create_port(node_id=node_id, address=address,
- extra=extra)
- port_id = port['uuid']
-
- patch = [{'path': '/extra/key',
- 'op': 'replace',
- 'value': 0.123}]
- self.assertRaises(exc.BadRequest,
- self.client.update_port, port_id, patch)
-
- @test.attr(type=['negative', 'smoke'])
- def test_update_port_replace_whole_extra_with_malformed(self):
- node_id = self.node['uuid']
- address = data_utils.rand_mac_address()
-
- _, port = self.create_port(node_id=node_id, address=address)
- port_id = port['uuid']
-
- patch = [{'path': '/extra',
- 'op': 'replace',
- 'value': [1, 2, 3, 4, 'a']}]
-
- self.assertRaises(exc.BadRequest,
- self.client.update_port, port_id, patch)
-
- @test.attr(type=['negative', 'smoke'])
def test_update_port_replace_nonexistent_property(self):
node_id = self.node['uuid']
address = data_utils.rand_mac_address()
diff --git a/tempest/api/compute/admin/test_flavors_negative.py b/tempest/api/compute/admin/test_flavors_negative.py
index 5bc3d10..fb27360 100644
--- a/tempest/api/compute/admin/test_flavors_negative.py
+++ b/tempest/api/compute/admin/test_flavors_negative.py
@@ -18,9 +18,13 @@
from tempest.api.compute import base
from tempest.api_schema.request.compute.v2 import flavors
from tempest.common.utils import data_utils
+from tempest import config
from tempest import exceptions
from tempest import test
+
+CONF = config.CONF
+
load_tests = test.NegativeAutoTest.load_tests
@@ -106,5 +110,5 @@
class FlavorCreateNegativeTestJSON(base.BaseV2ComputeAdminTest,
test.NegativeAutoTest):
_interface = 'json'
- _service = 'compute'
+ _service = CONF.compute.catalog_type
_schema = flavors.flavor_create
diff --git a/tempest/api/compute/flavors/test_flavors_negative.py b/tempest/api/compute/flavors/test_flavors_negative.py
index cae1ac4..83f8e19 100644
--- a/tempest/api/compute/flavors/test_flavors_negative.py
+++ b/tempest/api/compute/flavors/test_flavors_negative.py
@@ -15,23 +15,26 @@
from tempest.api.compute import base
from tempest.api_schema.request.compute.v2 import flavors
+from tempest import config
from tempest import test
+CONF = config.CONF
+
load_tests = test.NegativeAutoTest.load_tests
@test.SimpleNegativeAutoTest
class FlavorsListWithDetailsNegativeTestJSON(base.BaseV2ComputeTest,
test.NegativeAutoTest):
- _service = 'compute'
+ _service = CONF.compute.catalog_type
_schema = flavors.flavor_list
@test.SimpleNegativeAutoTest
class FlavorDetailsNegativeTestJSON(base.BaseV2ComputeTest,
test.NegativeAutoTest):
- _service = 'compute'
+ _service = CONF.compute.catalog_type
_schema = flavors.flavors_details
@classmethod
diff --git a/tempest/api/compute/servers/test_server_metadata_negative.py b/tempest/api/compute/servers/test_server_metadata_negative.py
index ad1ec70..8b074fd 100644
--- a/tempest/api/compute/servers/test_server_metadata_negative.py
+++ b/tempest/api/compute/servers/test_server_metadata_negative.py
@@ -40,7 +40,7 @@
for sz in [256, 257, 511, 1023]:
key = "k" * sz
meta = {key: 'data1'}
- self.assertRaises(exceptions.OverLimit,
+ self.assertRaises((exceptions.BadRequest, exceptions.OverLimit),
self.create_test_server,
meta=meta)
diff --git a/tempest/api/compute/servers/test_servers_negative.py b/tempest/api/compute/servers/test_servers_negative.py
index 1a338bd..4e6dcda 100644
--- a/tempest/api/compute/servers/test_servers_negative.py
+++ b/tempest/api/compute/servers/test_servers_negative.py
@@ -219,7 +219,7 @@
# Pass really long metadata while creating a server
metadata = {'a': 'b' * 260}
- self.assertRaises(exceptions.OverLimit,
+ self.assertRaises((exceptions.BadRequest, exceptions.OverLimit),
self.create_test_server,
meta=metadata)
diff --git a/tempest/api/compute/test_extensions.py b/tempest/api/compute/test_extensions.py
index 25e14a8..46e7251 100644
--- a/tempest/api/compute/test_extensions.py
+++ b/tempest/api/compute/test_extensions.py
@@ -38,11 +38,11 @@
if ext == 'all':
self.assertIn('Hosts', map(lambda x: x['name'], extensions))
elif ext:
- self.assertIn(ext, map(lambda x: x['name'], extensions))
+ self.assertIn(ext, map(lambda x: x['alias'], extensions))
else:
raise self.skipException('There are not any extensions configured')
# Log extensions list
- extension_list = map(lambda x: x['name'], extensions)
+ extension_list = map(lambda x: x['alias'], extensions)
LOG.debug("Nova extensions: %s" % ','.join(extension_list))
@test.requires_ext(extension='os-consoles', service='compute')
diff --git a/tempest/api/identity/admin/v3/test_default_project_id.py b/tempest/api/identity/admin/v3/test_default_project_id.py
new file mode 100644
index 0000000..8ffd1ed
--- /dev/null
+++ b/tempest/api/identity/admin/v3/test_default_project_id.py
@@ -0,0 +1,84 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from tempest.api.identity import base
+from tempest import auth
+from tempest import clients
+from tempest.common.utils import data_utils
+from tempest import test
+
+
+class TestDefaultProjectId (base.BaseIdentityV3AdminTest):
+ _interface = 'json'
+
+ @classmethod
+ def resource_setup(cls):
+ cls.set_network_resources()
+ super(TestDefaultProjectId, cls).resource_setup()
+
+ def _delete_domain(self, domain_id):
+ # It is necessary to disable the domain before deleting,
+ # or else it would result in unauthorized error
+ self.client.update_domain(domain_id, enabled=False)
+ self.client.delete_domain(domain_id)
+
+ @test.attr(type='smoke')
+ def test_default_project_id(self):
+ # create a domain
+ dom_name = data_utils.rand_name('dom')
+ _, domain_body = self.client.create_domain(dom_name)
+ dom_id = domain_body['id']
+ self.addCleanup(self._delete_domain, dom_id)
+
+ # create a project in the domain
+ proj_name = data_utils.rand_name('proj')
+ _, proj_body = self.client.create_project(proj_name, domain_id=dom_id)
+ proj_id = proj_body['id']
+ self.addCleanup(self.client.delete_project, proj_id)
+ self.assertEqual(proj_body['domain_id'], dom_id,
+ "project " + proj_name +
+ "doesn't have domain id " + dom_id)
+
+ # create a user in the domain, with the previous project as his
+ # default project
+ user_name = data_utils.rand_name('user')
+ _, user_body = self.client.create_user(user_name, password=user_name,
+ domain_id=dom_id,
+ default_project_id=proj_id)
+ user_id = user_body['id']
+ self.addCleanup(self.client.delete_user, user_id)
+ self.assertEqual(user_body['domain_id'], dom_id,
+ "user " + user_name +
+ "doesn't have domain id " + dom_id)
+
+ # get roles and find the admin role
+ admin_role = self.get_role_by_name("admin")
+ admin_role_id = admin_role['id']
+
+ # grant the admin role to the user on his project
+ self.client.assign_user_role_on_project(proj_id, user_id,
+ admin_role_id)
+
+ # create a new client with user's credentials (NOTE: unscoped token!)
+ creds = auth.KeystoneV3Credentials(username=user_name,
+ password=user_name,
+ domain_name=dom_name)
+ auth_provider = auth.KeystoneV3AuthProvider(creds)
+ creds = auth_provider.fill_credentials()
+ admin_client = clients.Manager(interface=self._interface,
+ credentials=creds)
+
+ # verify the user's token and see that it is scoped to the project
+ token, auth_data = admin_client.auth_provider.get_auth()
+ _, result = admin_client.identity_v3_client.get_token(token)
+ self.assertEqual(result['project']['domain']['id'], dom_id)
+ self.assertEqual(result['project']['id'], proj_id)
diff --git a/tempest/api/identity/admin/v3/test_policies.py b/tempest/api/identity/admin/v3/test_policies.py
index 9ea61df..23df13d 100644
--- a/tempest/api/identity/admin/v3/test_policies.py
+++ b/tempest/api/identity/admin/v3/test_policies.py
@@ -32,13 +32,13 @@
for _ in range(3):
blob = data_utils.rand_name('BlobName-')
policy_type = data_utils.rand_name('PolicyType-')
- resp, policy = self.policy_client.create_policy(blob,
- policy_type)
+ policy = self.policy_client.create_policy(blob,
+ policy_type)
# Delete the Policy at the end of this method
self.addCleanup(self._delete_policy, policy['id'])
policy_ids.append(policy['id'])
# List and Verify Policies
- _, body = self.policy_client.list_policies()
+ body = self.policy_client.list_policies()
for p in body:
fetched_ids.append(p['id'])
missing_pols = [p for p in policy_ids if p not in fetched_ids]
@@ -49,7 +49,7 @@
# Test to update policy
blob = data_utils.rand_name('BlobName-')
policy_type = data_utils.rand_name('PolicyType-')
- _, policy = self.policy_client.create_policy(blob, policy_type)
+ policy = self.policy_client.create_policy(blob, policy_type)
self.addCleanup(self._delete_policy, policy['id'])
self.assertIn('id', policy)
self.assertIn('type', policy)
@@ -59,11 +59,11 @@
self.assertEqual(policy_type, policy['type'])
# Update policy
update_type = data_utils.rand_name('UpdatedPolicyType-')
- _, data = self.policy_client.update_policy(
+ data = self.policy_client.update_policy(
policy['id'], type=update_type)
self.assertIn('type', data)
# Assertion for updated value with fetched value
- _, fetched_policy = self.policy_client.get_policy(policy['id'])
+ fetched_policy = self.policy_client.get_policy(policy['id'])
self.assertIn('id', fetched_policy)
self.assertIn('blob', fetched_policy)
self.assertIn('type', fetched_policy)
diff --git a/tempest/api/image/base.py b/tempest/api/image/base.py
index 74baba6..76b6f17 100644
--- a/tempest/api/image/base.py
+++ b/tempest/api/image/base.py
@@ -15,7 +15,7 @@
import cStringIO as StringIO
from tempest import clients
-from tempest.common import isolated_creds
+from tempest.common import credentials
from tempest.common.utils import data_utils
from tempest import config
from tempest import exceptions
@@ -36,7 +36,7 @@
super(BaseImageTest, cls).resource_setup()
cls.created_images = []
cls._interface = 'json'
- cls.isolated_creds = isolated_creds.IsolatedCreds(
+ cls.isolated_creds = credentials.get_isolated_credentials(
cls.__name__, network_resources=cls.network_resources)
if not CONF.service_available.glance:
skip_msg = ("%s skipped as glance is not available" % cls.__name__)
diff --git a/tempest/api/network/test_fwaas_extensions.py b/tempest/api/network/test_fwaas_extensions.py
index 12b8887..0c36820 100644
--- a/tempest/api/network/test_fwaas_extensions.py
+++ b/tempest/api/network/test_fwaas_extensions.py
@@ -38,6 +38,8 @@
Update firewall policy
Insert firewall rule to policy
Remove firewall rule from policy
+ Insert firewall rule after/before rule in policy
+ Update firewall policy audited attribute
Delete firewall policy
Show firewall policy
List firewall
@@ -222,14 +224,14 @@
self.client.delete_firewall(firewall_id)
@test.attr(type='smoke')
- def test_insert_remove_firewall_rule_from_policy(self):
+ def test_firewall_rule_insertion_position_removal_rule_from_policy(self):
# Create firewall rule
resp, body = self.client.create_firewall_rule(
name=data_utils.rand_name("fw-rule"),
action="allow",
protocol="tcp")
- fw_rule_id = body['firewall_rule']['id']
- self.addCleanup(self._try_delete_rule, fw_rule_id)
+ fw_rule_id1 = body['firewall_rule']['id']
+ self.addCleanup(self._try_delete_rule, fw_rule_id1)
# Create firewall policy
_, body = self.client.create_firewall_policy(
name=data_utils.rand_name("fw-policy"))
@@ -238,19 +240,76 @@
# Insert rule to firewall policy
self.client.insert_firewall_rule_in_policy(
- fw_policy_id, fw_rule_id, '', '')
+ fw_policy_id, fw_rule_id1, '', '')
# Verify insertion of rule in policy
- self.assertIn(fw_rule_id, self._get_list_fw_rule_ids(fw_policy_id))
+ self.assertIn(fw_rule_id1, self._get_list_fw_rule_ids(fw_policy_id))
+ # Create another firewall rule
+ _, body = self.client.create_firewall_rule(
+ name=data_utils.rand_name("fw-rule"),
+ action="allow",
+ protocol="icmp")
+ fw_rule_id2 = body['firewall_rule']['id']
+ self.addCleanup(self._try_delete_rule, fw_rule_id2)
+
+ # Insert rule to firewall policy after the first rule
+ self.client.insert_firewall_rule_in_policy(
+ fw_policy_id, fw_rule_id2, fw_rule_id1, '')
+
+ # Verify the posiition of rule after insertion
+ _, fw_rule = self.client.show_firewall_rule(
+ fw_rule_id2)
+
+ self.assertEqual(int(fw_rule['firewall_rule']['position']), 2)
# Remove rule from the firewall policy
self.client.remove_firewall_rule_from_policy(
- fw_policy_id, fw_rule_id)
+ fw_policy_id, fw_rule_id2)
+ # Insert rule to firewall policy before the first rule
+ self.client.insert_firewall_rule_in_policy(
+ fw_policy_id, fw_rule_id2, '', fw_rule_id1)
+ # Verify the posiition of rule after insertion
+ _, fw_rule = self.client.show_firewall_rule(
+ fw_rule_id2)
+ self.assertEqual(int(fw_rule['firewall_rule']['position']), 1)
+ # Remove rule from the firewall policy
+ self.client.remove_firewall_rule_from_policy(
+ fw_policy_id, fw_rule_id2)
+ # Verify removal of rule from firewall policy
+ self.assertNotIn(fw_rule_id2, self._get_list_fw_rule_ids(fw_policy_id))
+
+ # Remove rule from the firewall policy
+ self.client.remove_firewall_rule_from_policy(
+ fw_policy_id, fw_rule_id1)
# Verify removal of rule from firewall policy
- self.assertNotIn(fw_rule_id, self._get_list_fw_rule_ids(fw_policy_id))
+ self.assertNotIn(fw_rule_id1, self._get_list_fw_rule_ids(fw_policy_id))
def _get_list_fw_rule_ids(self, fw_policy_id):
_, fw_policy = self.client.show_firewall_policy(
fw_policy_id)
return [ruleid for ruleid in fw_policy['firewall_policy']
['firewall_rules']]
+
+ def test_update_firewall_policy_audited_attribute(self):
+ # Create firewall rule
+ _, body = self.client.create_firewall_rule(
+ name=data_utils.rand_name("fw-rule"),
+ action="allow",
+ protocol="icmp")
+ fw_rule_id = body['firewall_rule']['id']
+ self.addCleanup(self._try_delete_rule, fw_rule_id)
+ # Create firewall policy
+ _, body = self.client.create_firewall_policy(
+ name=data_utils.rand_name('fw-policy'))
+ fw_policy_id = body['firewall_policy']['id']
+ self.addCleanup(self._try_delete_policy, fw_policy_id)
+ self.assertFalse(body['firewall_policy']['audited'])
+ # Update firewall policy audited attribute to ture
+ self.client.update_firewall_policy(fw_policy_id,
+ audited=True)
+ # Insert Firewall rule to firewall policy
+ self.client.insert_firewall_rule_in_policy(
+ fw_policy_id, fw_rule_id, '', '')
+ _, body = self.client.show_firewall_policy(
+ fw_policy_id)
+ self.assertFalse(body['firewall_policy']['audited'])
diff --git a/tempest/api/network/test_ports.py b/tempest/api/network/test_ports.py
index d30c7dc..9cd5e2e 100644
--- a/tempest/api/network/test_ports.py
+++ b/tempest/api/network/test_ports.py
@@ -66,6 +66,23 @@
self.assertEqual(updated_port['name'], new_name)
self.assertFalse(updated_port['admin_state_up'])
+ def test_create_bulk_port(self):
+ network1 = self.network
+ name = data_utils.rand_name('network-')
+ network2 = self.create_network(network_name=name)
+ network_list = [network1['id'], network2['id']]
+ port_list = [{'network_id': net_id} for net_id in network_list]
+ _, body = self.client.create_bulk_port(port_list)
+ created_ports = body['ports']
+ port1 = created_ports[0]
+ port2 = created_ports[1]
+ self.addCleanup(self._delete_port, port1['id'])
+ self.addCleanup(self._delete_port, port2['id'])
+ self.assertEqual(port1['network_id'], network1['id'])
+ self.assertEqual(port2['network_id'], network2['id'])
+ self.assertTrue(port1['admin_state_up'])
+ self.assertTrue(port2['admin_state_up'])
+
@test.attr(type='smoke')
def test_show_port(self):
# Verify the details of port
@@ -186,6 +203,23 @@
[data_utils.rand_name('secgroup'),
data_utils.rand_name('secgroup')])
+ @test.attr(type='smoke')
+ def test_create_show_delete_port_user_defined_mac(self):
+ # Create a port for a legal mac
+ _, body = self.client.create_port(network_id=self.network['id'])
+ old_port = body['port']
+ free_mac_address = old_port['mac_address']
+ self.client.delete_port(old_port['id'])
+ # Create a new port with user defined mac
+ _, body = self.client.create_port(network_id=self.network['id'],
+ mac_address=free_mac_address)
+ self.addCleanup(self.client.delete_port, body['port']['id'])
+ port = body['port']
+ _, body = self.client.show_port(port['id'])
+ show_port = body['port']
+ self.assertEqual(free_mac_address,
+ show_port['mac_address'])
+
class PortsAdminExtendedAttrsTestJSON(base.BaseAdminNetworkTest):
_interface = 'json'
diff --git a/tempest/api/network/test_routers.py b/tempest/api/network/test_routers.py
index 2b4e60a..34650c5 100644
--- a/tempest/api/network/test_routers.py
+++ b/tempest/api/network/test_routers.py
@@ -190,11 +190,12 @@
self.assertEqual(len(list_body['ports']), 1)
gw_port = list_body['ports'][0]
fixed_ips = gw_port['fixed_ips']
- self.assertEqual(len(fixed_ips), 1)
+ self.assertGreaterEqual(len(fixed_ips), 1)
resp, public_net_body = self.admin_client.show_network(
CONF.network.public_network_id)
public_subnet_id = public_net_body['network']['subnets'][0]
- self.assertEqual(fixed_ips[0]['subnet_id'], public_subnet_id)
+ self.assertIn(public_subnet_id,
+ map(lambda x: x['subnet_id'], fixed_ips))
@test.attr(type='smoke')
def test_update_router_set_gateway(self):
diff --git a/tempest/api/network/test_security_groups_negative.py b/tempest/api/network/test_security_groups_negative.py
index 4626aae..b9e8666 100644
--- a/tempest/api/network/test_security_groups_negative.py
+++ b/tempest/api/network/test_security_groups_negative.py
@@ -138,6 +138,7 @@
# Create rule for icmp protocol with invalid ports
states = [(1, 256, 'Invalid value for ICMP code'),
+ (None, 6, 'ICMP type (port-range-min) is missing'),
(300, 1, 'Invalid value for ICMP type')]
for pmin, pmax, msg in states:
ex = self.assertRaises(
diff --git a/tempest/api/network/test_service_type_management.py b/tempest/api/network/test_service_type_management.py
index 6695f47..7f8b479 100644
--- a/tempest/api/network/test_service_type_management.py
+++ b/tempest/api/network/test_service_type_management.py
@@ -24,6 +24,7 @@
msg = "Neutron Service Type Management not enabled."
raise cls.skipException(msg)
+ @test.skip_because(bug="1400370")
@test.attr(type='smoke')
def test_service_provider_list(self):
_, body = self.client.list_service_providers()
diff --git a/tempest/api/object_storage/base.py b/tempest/api/object_storage/base.py
index 2e39cf9..7fabb7d 100644
--- a/tempest/api/object_storage/base.py
+++ b/tempest/api/object_storage/base.py
@@ -16,8 +16,8 @@
from tempest.api.identity import base
from tempest import clients
+from tempest.common import credentials
from tempest.common import custom_matchers
-from tempest.common import isolated_creds
from tempest import config
from tempest import exceptions
import tempest.test
@@ -34,7 +34,7 @@
if not CONF.service_available.swift:
skip_msg = ("%s skipped as swift is not available" % cls.__name__)
raise cls.skipException(skip_msg)
- cls.isolated_creds = isolated_creds.IsolatedCreds(
+ cls.isolated_creds = credentials.get_isolated_credentials(
cls.__name__, network_resources=cls.network_resources)
# Get isolated creds for normal user
cls.os = clients.Manager(cls.isolated_creds.get_primary_creds())
diff --git a/tempest/api/orchestration/stacks/templates/non_empty_stack.yaml b/tempest/api/orchestration/stacks/templates/non_empty_stack.yaml
index 8690941..4f9df91 100644
--- a/tempest/api/orchestration/stacks/templates/non_empty_stack.yaml
+++ b/tempest/api/orchestration/stacks/templates/non_empty_stack.yaml
@@ -7,6 +7,8 @@
Default: not_yet
image:
Type: String
+ flavor:
+ Type: String
Resources:
fluffy:
Type: AWS::AutoScaling::LaunchConfiguration
@@ -16,7 +18,7 @@
- Stinky
Properties:
ImageId: {Ref: image}
- InstanceType: not_used
+ InstanceType: {Ref: flavor}
UserData:
Fn::Replace:
- variable_a: {Ref: trigger}
diff --git a/tempest/api/orchestration/stacks/test_non_empty_stack.py b/tempest/api/orchestration/stacks/test_non_empty_stack.py
index 759cbbe..bf6c79c 100644
--- a/tempest/api/orchestration/stacks/test_non_empty_stack.py
+++ b/tempest/api/orchestration/stacks/test_non_empty_stack.py
@@ -31,13 +31,15 @@
template = cls.read_template('non_empty_stack')
image_id = (CONF.orchestration.image_ref or
cls._create_image()['id'])
+ flavor = CONF.orchestration.instance_type
# create the stack
cls.stack_identifier = cls.create_stack(
cls.stack_name,
template,
parameters={
'trigger': 'start',
- 'image': image_id
+ 'image': image_id,
+ 'flavor': flavor
})
cls.stack_id = cls.stack_identifier.split('/')[1]
cls.resource_name = 'fluffy'
diff --git a/tempest/api/volume/admin/test_volume_services.py b/tempest/api/volume/admin/test_volume_services.py
index 7820148..fffc5cb 100644
--- a/tempest/api/volume/admin/test_volume_services.py
+++ b/tempest/api/volume/admin/test_volume_services.py
@@ -17,7 +17,7 @@
from tempest import test
-class VolumesServicesTestJSON(base.BaseVolumeV1AdminTest):
+class VolumesServicesV2TestJSON(base.BaseVolumeAdminTest):
"""
Tests Volume Services API.
volume service list requires admin privileges.
@@ -26,21 +26,20 @@
@classmethod
def resource_setup(cls):
- super(VolumesServicesTestJSON, cls).resource_setup()
- cls.client = cls.os_adm.volume_services_client
- _, cls.services = cls.client.list_services()
+ super(VolumesServicesV2TestJSON, cls).resource_setup()
+ _, cls.services = cls.admin_volume_services_client.list_services()
cls.host_name = cls.services[0]['host']
cls.binary_name = cls.services[0]['binary']
@test.attr(type='gate')
def test_list_services(self):
- _, services = self.client.list_services()
+ _, services = self.admin_volume_services_client.list_services()
self.assertNotEqual(0, len(services))
@test.attr(type='gate')
def test_get_service_by_service_binary_name(self):
params = {'binary': self.binary_name}
- _, services = self.client.list_services(params)
+ _, services = self.admin_volume_services_client.list_services(params)
self.assertNotEqual(0, len(services))
for service in services:
self.assertEqual(self.binary_name, service['binary'])
@@ -51,7 +50,7 @@
service['host'] == self.host_name]
params = {'host': self.host_name}
- _, services = self.client.list_services(params)
+ _, services = self.admin_volume_services_client.list_services(params)
# we could have a periodic job checkin between the 2 service
# lookups, so only compare binary lists.
@@ -65,7 +64,11 @@
def test_get_service_by_service_and_host_name(self):
params = {'host': self.host_name, 'binary': self.binary_name}
- _, services = self.client.list_services(params)
+ _, services = self.admin_volume_services_client.list_services(params)
self.assertEqual(1, len(services))
self.assertEqual(self.host_name, services[0]['host'])
self.assertEqual(self.binary_name, services[0]['binary'])
+
+
+class VolumesServicesV1TestJSON(VolumesServicesV2TestJSON):
+ _api_version = 1
diff --git a/tempest/api/volume/base.py b/tempest/api/volume/base.py
index 2a52e55..52e48f3 100644
--- a/tempest/api/volume/base.py
+++ b/tempest/api/volume/base.py
@@ -145,10 +145,6 @@
pass
-class BaseVolumeV1Test(BaseVolumeTest):
- _api_version = 1
-
-
class BaseVolumeAdminTest(BaseVolumeTest):
"""Base test case class for all Volume Admin API tests."""
@classmethod
@@ -170,6 +166,8 @@
msg = "Volume API v1 is disabled"
raise cls.skipException(msg)
cls.volume_qos_client = cls.os_adm.volume_qos_client
+ cls.admin_volume_services_client = \
+ cls.os_adm.volume_services_client
cls.volume_types_client = cls.os_adm.volume_types_client
cls.admin_volume_client = cls.os_adm.volumes_client
cls.hosts_client = cls.os_adm.volume_hosts_client
@@ -181,6 +179,8 @@
msg = "Volume API v2 is disabled"
raise cls.skipException(msg)
cls.volume_qos_client = cls.os_adm.volume_qos_v2_client
+ cls.admin_volume_services_client = \
+ cls.os_adm.volume_services_v2_client
cls.volume_types_client = cls.os_adm.volume_types_v2_client
cls.admin_volume_client = cls.os_adm.volumes_v2_client
cls.hosts_client = cls.os_adm.volume_hosts_v2_client
@@ -218,7 +218,3 @@
except exceptions.NotFound:
# The qos_specs may have already been deleted which is OK.
pass
-
-
-class BaseVolumeV1AdminTest(BaseVolumeAdminTest):
- _api_version = 1
diff --git a/tempest/api/volume/test_extensions.py b/tempest/api/volume/test_extensions.py
index 66ea9b7..0f6c2d6 100644
--- a/tempest/api/volume/test_extensions.py
+++ b/tempest/api/volume/test_extensions.py
@@ -39,7 +39,7 @@
if ext == 'all':
self.assertIn('Hosts', map(lambda x: x['name'], extensions))
elif ext:
- self.assertIn(ext, map(lambda x: x['name'], extensions))
+ self.assertIn(ext, map(lambda x: x['alias'], extensions))
else:
raise self.skipException('There are not any extensions configured')
diff --git a/tempest/api_schema/response/compute/services.py b/tempest/api_schema/response/compute/services.py
index fc42b89..6f361ef 100644
--- a/tempest/api_schema/response/compute/services.py
+++ b/tempest/api_schema/response/compute/services.py
@@ -22,7 +22,8 @@
'items': {
'type': 'object',
'properties': {
- 'id': {'type': 'integer'},
+ 'id': {'type': ['integer', 'string'],
+ 'pattern': '^[a-zA-Z!]*@[0-9]+$'},
'zone': {'type': 'string'},
'host': {'type': 'string'},
'state': {'type': 'string'},
diff --git a/tempest/cli/simple_read_only/network/test_neutron.py b/tempest/cli/simple_read_only/network/test_neutron.py
index 6090882..6cf0640 100644
--- a/tempest/cli/simple_read_only/network/test_neutron.py
+++ b/tempest/cli/simple_read_only/network/test_neutron.py
@@ -198,6 +198,31 @@
'auth_mode', 'status'])
@test.attr(type='smoke')
+ @test.requires_ext(extension='fwaas', service='network')
+ def test_neutron_firewall_list(self):
+ firewall_list = self.parser.listing(self.neutron
+ ('firewall-list'))
+ self.assertTableStruct(firewall_list, ['id', 'name',
+ 'firewall_policy_id'])
+
+ @test.attr(type='smoke')
+ @test.requires_ext(extension='fwaas', service='network')
+ def test_neutron_firewall_policy_list(self):
+ firewall_policy = self.parser.listing(self.neutron
+ ('firewall-policy-list'))
+ self.assertTableStruct(firewall_policy, ['id', 'name',
+ 'firewall_rules'])
+
+ @test.attr(type='smoke')
+ @test.requires_ext(extension='fwaas', service='network')
+ def test_neutron_firewall_rule_list(self):
+ firewall_rule = self.parser.listing(self.neutron
+ ('firewall-rule-list'))
+ self.assertTableStruct(firewall_rule, ['id', 'name',
+ 'firewall_policy_id',
+ 'summary', 'enabled'])
+
+ @test.attr(type='smoke')
def test_neutron_help(self):
help_text = self.neutron('help')
lines = help_text.split('\n')
diff --git a/tempest/clients.py b/tempest/clients.py
index 5873a85..91dc5f7 100644
--- a/tempest/clients.py
+++ b/tempest/clients.py
@@ -140,6 +140,8 @@
VolumeHostsV2ClientJSON
from tempest.services.volume.v2.json.admin.volume_quotas_client import \
VolumeQuotasV2Client
+from tempest.services.volume.v2.json.admin.volume_services_client import \
+ VolumesServicesV2ClientJSON
from tempest.services.volume.v2.json.admin.volume_types_client import \
VolumeTypesV2ClientJSON
from tempest.services.volume.v2.json.availability_zone_client import \
@@ -289,6 +291,8 @@
self.volume_qos_client = QosSpecsClientJSON(self.auth_provider)
self.volume_qos_v2_client = QosSpecsV2ClientJSON(
self.auth_provider)
+ self.volume_services_v2_client = VolumesServicesV2ClientJSON(
+ self.auth_provider)
def _set_volume_json_clients(self):
self.backups_client = BackupsClientJSON(self.auth_provider)
diff --git a/tempest/cmd/cleanup.py b/tempest/cmd/cleanup.py
index a305e42..f36ef56 100755
--- a/tempest/cmd/cleanup.py
+++ b/tempest/cmd/cleanup.py
@@ -9,7 +9,7 @@
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
@@ -36,14 +36,14 @@
**NOTE**: The _tenants_to_clean array in dry-run.json lists the
tenants that cleanup will loop through and delete child objects, not
delete the tenant itself. This may differ from the tenants array as you
-can clean the tempest and alternate tempest tenants but not delete the
-tenants themselves. This is actually the default behavior.
+can clean the tempest and alternate tempest tenants but by default,
+cleanup deletes the objects in the tempest and alternate tempest tenants
+but does not delete those tenants unless the --delete-tempest-conf-objects
+flag is used to force their deletion.
**Normal mode**: running with no arguments, will query your deployment and
-build a list of objects to delete after filtering out out the objects
-found in saved_state.json and based on the
---preserve-tempest-conf-objects and
---delete-tempest-conf-objects flags.
+build a list of objects to delete after filtering out the objects found in
+saved_state.json and based on the --delete-tempest-conf-objects flag.
By default the tempest and alternate tempest users and tenants are not
deleted and the admin user specified in tempest.conf is never deleted.
@@ -84,7 +84,6 @@
# available services
self.tenant_services = cleanup_service.get_tenant_cleanup_services()
self.global_services = cleanup_service.get_global_cleanup_services()
- cleanup_service.init_conf()
def run(self):
opts = self.options
@@ -98,7 +97,7 @@
def _cleanup(self):
LOG.debug("Begin cleanup")
is_dry_run = self.options.dry_run
- is_preserve = self.options.preserve_tempest_conf_objects
+ is_preserve = not self.options.delete_tempest_conf_objects
is_save_state = False
if is_dry_run:
@@ -149,7 +148,7 @@
LOG.debug("Cleaning tenant: %s " % tenant['name'])
is_dry_run = self.options.dry_run
dry_run_data = self.dry_run_data
- is_preserve = self.options.preserve_tempest_conf_objects
+ is_preserve = not self.options.delete_tempest_conf_objects
tenant_id = tenant['id']
tenant_name = tenant['name']
tenant_data = None
@@ -194,23 +193,16 @@
dest='init_saved_state', default=False,
help="Creates JSON file: " + SAVED_STATE_JSON +
", representing the current state of your "
- "deployment, specifically objects types "
- "Tempest creates and destroys during a run. "
+ "deployment, specifically object types "
+ "tempest creates and destroys during a run. "
"You must run with this flag prior to "
- "executing cleanup.")
- parser.add_argument('--preserve-tempest-conf-objects',
- action="store_true",
- dest='preserve_tempest_conf_objects',
- default=True, help="Do not delete the "
- "tempest and alternate tempest users and "
- "tenants, so they may be used for future "
- "tempest runs. By default this is argument "
- "is true.")
+ "executing cleanup in normal mode, which is with "
+ "no arguments.")
parser.add_argument('--delete-tempest-conf-objects',
- action="store_false",
- dest='preserve_tempest_conf_objects',
+ action="store_true",
+ dest='delete_tempest_conf_objects',
default=False,
- help="Delete the tempest and "
+ help="Force deletion of the tempest and "
"alternate tempest users and tenants.")
parser.add_argument('--dry-run', action="store_true",
dest='dry_run', default=False,
@@ -291,6 +283,7 @@
def main():
+ cleanup_service.init_conf()
cleanup = Cleanup()
cleanup.run()
LOG.info('Cleanup finished!')
diff --git a/tempest/cmd/cleanup_service.py b/tempest/cmd/cleanup_service.py
index 8adfbef..67843e6 100644
--- a/tempest/cmd/cleanup_service.py
+++ b/tempest/cmd/cleanup_service.py
@@ -1,3 +1,5 @@
+#!/usr/bin/env python
+
# Copyright 2014 Dell Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
@@ -12,6 +14,7 @@
# License for the specific language governing permissions and limitations
# under the License.
+from tempest import clients
from tempest import config
from tempest.openstack.common import log as logging
from tempest import test
@@ -19,13 +22,14 @@
LOG = logging.getLogger(__name__)
CONF = config.CONF
-CONF_USERS = None
-CONF_TENANTS = None
-CONF_PUB_NETWORK = None
-CONF_PRIV_NETWORK_NAME = None
-CONF_PUB_ROUTER = None
CONF_FLAVORS = None
CONF_IMAGES = None
+CONF_NETWORKS = []
+CONF_PRIV_NETWORK_NAME = None
+CONF_PUB_NETWORK = None
+CONF_PUB_ROUTER = None
+CONF_TENANTS = None
+CONF_USERS = None
IS_CEILOMETER = None
IS_CINDER = None
@@ -36,14 +40,15 @@
def init_conf():
- global CONF_USERS
- global CONF_TENANTS
- global CONF_PUB_NETWORK
- global CONF_PRIV_NETWORK_NAME
- global CONF_PUB_ROUTER
global CONF_FLAVORS
global CONF_IMAGES
-
+ global CONF_NETWORKS
+ global CONF_PRIV_NETWORK
+ global CONF_PRIV_NETWORK_NAME
+ global CONF_PUB_NETWORK
+ global CONF_PUB_ROUTER
+ global CONF_TENANTS
+ global CONF_USERS
global IS_CEILOMETER
global IS_CINDER
global IS_GLANCE
@@ -51,17 +56,6 @@
global IS_NEUTRON
global IS_NOVA
- CONF_USERS = [CONF.identity.admin_username, CONF.identity.username,
- CONF.identity.alt_username]
- CONF_TENANTS = [CONF.identity.admin_tenant_name,
- CONF.identity.tenant_name,
- CONF.identity.alt_tenant_name]
- CONF_PUB_NETWORK = CONF.network.public_network_id
- CONF_PRIV_NETWORK_NAME = CONF.compute.fixed_network_name
- CONF_PUB_ROUTER = CONF.network.public_router_id
- CONF_FLAVORS = [CONF.compute.flavor_ref, CONF.compute.flavor_ref_alt]
- CONF_IMAGES = [CONF.compute.image_ref, CONF.compute.image_ref_alt]
-
IS_CEILOMETER = CONF.service_available.ceilometer
IS_CINDER = CONF.service_available.cinder
IS_GLANCE = CONF.service_available.glance
@@ -69,6 +63,38 @@
IS_NEUTRON = CONF.service_available.neutron
IS_NOVA = CONF.service_available.nova
+ CONF_FLAVORS = [CONF.compute.flavor_ref, CONF.compute.flavor_ref_alt]
+ CONF_IMAGES = [CONF.compute.image_ref, CONF.compute.image_ref_alt]
+ CONF_PRIV_NETWORK_NAME = CONF.compute.fixed_network_name
+ CONF_PUB_NETWORK = CONF.network.public_network_id
+ CONF_PUB_ROUTER = CONF.network.public_router_id
+ CONF_TENANTS = [CONF.identity.admin_tenant_name,
+ CONF.identity.tenant_name,
+ CONF.identity.alt_tenant_name]
+ CONF_USERS = [CONF.identity.admin_username, CONF.identity.username,
+ CONF.identity.alt_username]
+
+ if IS_NEUTRON:
+ CONF_PRIV_NETWORK = _get_priv_net_id(CONF.compute.fixed_network_name,
+ CONF.identity.tenant_name)
+ CONF_NETWORKS = [CONF_PUB_NETWORK, CONF_PRIV_NETWORK]
+
+
+def _get_priv_net_id(prv_net_name, tenant_name):
+ am = clients.AdminManager()
+ net_cl = am.network_client
+ id_cl = am.identity_client
+
+ _, networks = net_cl.list_networks()
+ tenant = id_cl.get_tenant_by_name(tenant_name)
+ t_id = tenant['id']
+ n_id = None
+ for net in networks['networks']:
+ if (net['tenant_id'] == t_id and net['name'] == prv_net_name):
+ n_id = net['id']
+ break
+ return n_id
+
class BaseService(object):
def __init__(self, kwargs):
@@ -84,11 +110,8 @@
or 'tenant_id' not in item_list[0]):
return item_list
- _filtered_list = []
- for item in item_list:
- if item['tenant_id'] == self.tenant_id:
- _filtered_list.append(item)
- return _filtered_list
+ return [item for item in item_list
+ if item['tenant_id'] == self.tenant_id]
def list(self):
pass
@@ -325,6 +348,13 @@
super(NetworkService, self).__init__(kwargs)
self.client = manager.network_client
+ def _filter_by_conf_networks(self, item_list):
+ if not item_list or not all(('network_id' in i for i in item_list)):
+ return item_list
+
+ return [item for item in item_list if item['network_id']
+ not in CONF_NETWORKS]
+
def list(self):
client = self.client
_, networks = client.list_networks()
@@ -332,8 +362,7 @@
# filter out networks declared in tempest.conf
if self.is_preserve:
networks = [network for network in networks
- if (network['name'] != CONF_PRIV_NETWORK_NAME
- and network['id'] != CONF_PUB_NETWORK)]
+ if network['id'] not in CONF_NETWORKS]
LOG.debug("List count, %s Networks" % networks)
return networks
@@ -527,7 +556,7 @@
for port in ports:
subid = port['fixed_ips'][0]['subnet_id']
client.remove_router_interface_with_subnet_id(rid, subid)
- client.delete_router(rid)
+ client.delete_router(rid)
except Exception as e:
LOG.exception("Delete Router exception: %s" % e)
pass
@@ -694,6 +723,8 @@
_, ports = client.list_ports()
ports = ports['ports']
ports = self._filter_by_tenant_id(ports)
+ if self.is_preserve:
+ ports = self._filter_by_conf_networks(ports)
LOG.debug("List count, %s Ports" % len(ports))
return ports
@@ -719,6 +750,8 @@
_, subnets = client.list_subnets()
subnets = subnets['subnets']
subnets = self._filter_by_tenant_id(subnets)
+ if self.is_preserve:
+ subnets = self._filter_by_conf_networks(subnets)
LOG.debug("List count, %s Subnets" % len(subnets))
return subnets
diff --git a/tempest/cmd/verify_tempest_config.py b/tempest/cmd/verify_tempest_config.py
index f426e4d..6f74c3e 100755
--- a/tempest/cmd/verify_tempest_config.py
+++ b/tempest/cmd/verify_tempest_config.py
@@ -165,21 +165,19 @@
def verify_extensions(os, service, results):
extensions_client = get_extension_client(os, service)
__, resp = extensions_client.list_extensions()
+ # For Nova, Cinder and Neutron we use the alias name rather than the
+ # 'name' field because the alias is considered to be the canonical
+ # name.
if isinstance(resp, dict):
- # For both Nova and Neutron we use the alias name rather than the
- # 'name' field because the alias is considered to be the canonical
- # name.
- if service in ['nova', 'nova_v3', 'neutron']:
- extensions = map(lambda x: x['alias'], resp['extensions'])
- elif service == 'swift':
+ if service == 'swift':
# Remove Swift general information from extensions list
resp.pop('swift')
extensions = resp.keys()
else:
- extensions = map(lambda x: x['name'], resp['extensions'])
+ extensions = map(lambda x: x['alias'], resp['extensions'])
else:
- extensions = map(lambda x: x['name'], resp)
+ extensions = map(lambda x: x['alias'], resp)
if not results.get(service):
results[service] = {}
extensions_opt = get_enabled_extensions(service)
diff --git a/tempest/common/commands.py b/tempest/common/commands.py
index 6583475..e68c20e 100644
--- a/tempest/common/commands.py
+++ b/tempest/common/commands.py
@@ -19,63 +19,6 @@
LOG = logging.getLogger(__name__)
-# NOTE(afazekas):
-# These commands assumes the tempest node is the same as
-# the only one service node. all-in-one installation.
-
-
-def sudo_cmd_call(cmd):
- args = shlex.split(cmd.encode('utf-8'))
- subprocess_args = {'stdout': subprocess.PIPE,
- 'stderr': subprocess.STDOUT}
- proc = subprocess.Popen(['/usr/bin/sudo', '-n'] + args,
- **subprocess_args)
- stdout = proc.communicate()[0]
- if proc.returncode != 0:
- LOG.error(("Command {0} returned with exit status {1},"
- "output {2}").format(cmd, proc.returncode, stdout))
- return stdout
-
-
-def ip_addr_raw():
- return sudo_cmd_call("ip a")
-
-
-def ip_route_raw():
- return sudo_cmd_call("ip r")
-
-
-def ip_ns_raw():
- return sudo_cmd_call("ip netns list")
-
-
-def iptables_raw(table):
- return sudo_cmd_call("iptables --line-numbers -L -nv -t " + table)
-
-
-def ip_ns_list():
- return ip_ns_raw().split()
-
-
-def ip_ns_exec(ns, cmd):
- return sudo_cmd_call(" ".join(("ip netns exec", ns, cmd)))
-
-
-def ip_ns_addr(ns):
- return ip_ns_exec(ns, "ip a")
-
-
-def ip_ns_route(ns):
- return ip_ns_exec(ns, "ip r")
-
-
-def iptables_ns(ns, table):
- return ip_ns_exec(ns, "iptables -v -S -t " + table)
-
-
-def ovs_db_dump():
- return sudo_cmd_call("ovsdb-client dump")
-
def copy_file_to_host(file_from, dest, host, username, pkey):
dest = "%s@%s:%s" % (username, host, dest)
diff --git a/tempest/common/debug.py b/tempest/common/debug.py
deleted file mode 100644
index 16e5ffe..0000000
--- a/tempest/common/debug.py
+++ /dev/null
@@ -1,51 +0,0 @@
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from tempest.common import commands
-from tempest import config
-from tempest.openstack.common import log as logging
-
-CONF = config.CONF
-LOG = logging.getLogger(__name__)
-
-TABLES = ['filter', 'nat', 'mangle']
-
-
-def log_ip_ns():
- if not CONF.debug.enable:
- return
- LOG.info("Host Addr:\n" + commands.ip_addr_raw())
- LOG.info("Host Route:\n" + commands.ip_route_raw())
- for table in TABLES:
- LOG.info('Host %s table:\n%s', table, commands.iptables_raw(table))
- ns_list = commands.ip_ns_list()
- LOG.info("Host ns list" + str(ns_list))
- for ns in ns_list:
- LOG.info("ns(%s) Addr:\n%s", ns, commands.ip_ns_addr(ns))
- LOG.info("ns(%s) Route:\n%s", ns, commands.ip_ns_route(ns))
- for table in TABLES:
- LOG.info('ns(%s) table(%s):\n%s', ns, table,
- commands.iptables_ns(ns, table))
-
-
-def log_ovs_db():
- if not CONF.debug.enable or not CONF.service_available.neutron:
- return
- db_dump = commands.ovs_db_dump()
- LOG.info("OVS DB:\n" + db_dump)
-
-
-def log_net_debug():
- log_ip_ns()
- log_ovs_db()
diff --git a/tempest/common/isolated_creds.py b/tempest/common/isolated_creds.py
index 228e47c..1ce1e39 100644
--- a/tempest/common/isolated_creds.py
+++ b/tempest/common/isolated_creds.py
@@ -203,9 +203,8 @@
if 'overlaps with another subnet' not in str(e):
raise
else:
- e = exceptions.BuildErrorException()
- e.message = 'Available CIDR for subnet creation could not be found'
- raise e
+ message = 'Available CIDR for subnet creation could not be found'
+ raise Exception(message)
return resp_body['subnet']
def _create_router(self, router_name, tenant_id):
diff --git a/tempest/common/rest_client.py b/tempest/common/rest_client.py
index c9448a7..1df8896 100644
--- a/tempest/common/rest_client.py
+++ b/tempest/common/rest_client.py
@@ -33,23 +33,43 @@
# redrive rate limited calls at most twice
MAX_RECURSION_DEPTH = 2
-TOKEN_CHARS_RE = re.compile('^[-A-Za-z0-9+/=]*$')
# All the successful HTTP status codes from RFC 7231 & 4918
HTTP_SUCCESS = (200, 201, 202, 203, 204, 205, 206, 207)
-# convert a structure into a string safely
-def safe_body(body, maxlen=4096):
- try:
- text = six.text_type(body)
- except UnicodeDecodeError:
- # if this isn't actually text, return marker that
- return "<BinaryData: removed>"
- if len(text) > maxlen:
- return text[:maxlen]
- else:
- return text
+class ResponseBody(dict):
+ """Class that wraps an http response and dict body into a single value.
+
+ Callers that receive this object will normally use it as a dict but
+ can extract the response if needed.
+ """
+
+ def __init__(self, response, body=None):
+ body_data = body or {}
+ self.update(body_data)
+ self.response = response
+
+ def __str__(self):
+ body = super.__str__(self)
+ return "response: %s\nBody: %s" % (self.response, body)
+
+
+class ResponseBodyList(list):
+ """Class that wraps an http response and list body into a single value.
+
+ Callers that receive this object will normally use it as a list but
+ can extract the response if needed.
+ """
+
+ def __init__(self, response, body=None):
+ body_data = body or []
+ self.extend(body_data)
+ self.response = response
+
+ def __str__(self):
+ body = super.__str__(self)
+ return "response: %s\nBody: %s" % (self.response, body)
class RestClient(object):
@@ -89,8 +109,9 @@
'retry-after', 'server',
'vary', 'www-authenticate'))
dscv = CONF.identity.disable_ssl_certificate_validation
+ ca_certs = CONF.identity.ca_certificates_file
self.http_obj = http.ClosingHttp(
- disable_ssl_certificate_validation=dscv)
+ disable_ssl_certificate_validation=dscv, ca_certs=ca_certs)
def _get_type(self):
return self.TYPE
@@ -261,6 +282,18 @@
return resp[i]
return ""
+ def _safe_body(self, body, maxlen=4096):
+ # convert a structure into a string safely
+ try:
+ text = six.text_type(body)
+ except UnicodeDecodeError:
+ # if this isn't actually text, return marker that
+ return "<BinaryData: removed>"
+ if len(text) > maxlen:
+ return text[:maxlen]
+ else:
+ return text
+
def _log_request_start(self, method, req_url, req_headers=None,
req_body=None):
if req_headers is None:
@@ -291,9 +324,9 @@
req_url,
secs,
str(req_headers),
- safe_body(req_body),
+ self._safe_body(req_body),
str(resp),
- safe_body(resp_body)),
+ self._safe_body(resp_body)),
extra=extra)
def _log_request(self, method, req_url, resp,
diff --git a/tempest/common/waiters.py b/tempest/common/waiters.py
index 52568cb..93f02c9 100644
--- a/tempest/common/waiters.py
+++ b/tempest/common/waiters.py
@@ -111,22 +111,24 @@
while image['status'] != status:
time.sleep(client.build_interval)
resp, image = client.get_image(image_id)
- if image['status'] == 'ERROR':
+ status_curr = image['status']
+ if status_curr == 'ERROR':
raise exceptions.AddImageException(image_id=image_id)
# check the status again to avoid a false negative where we hit
# the timeout at the same time that the image reached the expected
# status
- if image['status'] == status:
+ if status_curr == status:
return
if int(time.time()) - start >= client.build_timeout:
- message = ('Image %(image_id)s failed to reach %(status)s '
- 'status within the required time (%(timeout)s s).' %
+ message = ('Image %(image_id)s failed to reach %(status)s state'
+ '(current state %(status_curr)s) '
+ 'within the required time (%(timeout)s s).' %
{'image_id': image_id,
'status': status,
+ 'status_curr': status_curr,
'timeout': client.build_timeout})
- message += ' Current status: %s.' % image['status']
caller = misc_utils.find_test_caller()
if caller:
message = '(%s) %s' % (caller, message)
@@ -144,7 +146,8 @@
while node[attr] != status:
time.sleep(client.build_interval)
_, node = client.show_node(node_id)
- if node[attr] == status:
+ status_curr = node[attr]
+ if status_curr == status:
return
if int(time.time()) - start >= client.build_timeout:
@@ -154,7 +157,7 @@
'attr': attr,
'status': status,
'timeout': client.build_timeout})
- message += ' Current state of %s: %s.' % (attr, node[attr])
+ message += ' Current state of %s: %s.' % (attr, status_curr)
caller = misc_utils.find_test_caller()
if caller:
message = '(%s) %s' % (caller, message)
diff --git a/tempest/config.py b/tempest/config.py
index b467f83..cc6d626 100644
--- a/tempest/config.py
+++ b/tempest/config.py
@@ -71,6 +71,10 @@
cfg.BoolOpt('disable_ssl_certificate_validation',
default=False,
help="Set to True if using self-signed SSL certificates."),
+ cfg.StrOpt('ca_certificates_file',
+ default=None,
+ help='Specify a CA bundle file to use in verifying a '
+ 'TLS (https) server certificate.'),
cfg.StrOpt('uri',
help="Full URI of the OpenStack Identity API (Keystone), v2"),
cfg.StrOpt('uri_v3',
@@ -922,9 +926,6 @@
title="Debug System")
DebugGroup = [
- cfg.BoolOpt('enable',
- default=True,
- help="Enable diagnostic commands"),
cfg.StrOpt('trace_requests',
default='',
help="""A regex to determine which requests should be traced.
diff --git a/tempest/exceptions.py b/tempest/exceptions.py
index cc31fad..213d5de 100644
--- a/tempest/exceptions.py
+++ b/tempest/exceptions.py
@@ -75,7 +75,7 @@
message = 'Unauthorized'
-class InvalidServiceTag(RestClientException):
+class InvalidServiceTag(TempestException):
message = "Invalid service tag"
@@ -140,15 +140,15 @@
message = "Endpoint not found"
-class RateLimitExceeded(TempestException):
+class RateLimitExceeded(RestClientException):
message = "Rate limit exceeded"
-class OverLimit(TempestException):
+class OverLimit(RestClientException):
message = "Quota exceeded"
-class ServerFault(TempestException):
+class ServerFault(RestClientException):
message = "Got server fault"
diff --git a/tempest/scenario/manager.py b/tempest/scenario/manager.py
index 8911ff0..410d90a 100644
--- a/tempest/scenario/manager.py
+++ b/tempest/scenario/manager.py
@@ -24,7 +24,6 @@
from tempest import auth
from tempest import clients
from tempest.common import credentials
-from tempest.common import debug
from tempest.common.utils import data_utils
from tempest.common.utils.linux import remote_client
from tempest import config
@@ -297,7 +296,19 @@
return secgroup
- def get_remote_client(self, server_or_ip, username=None, private_key=None):
+ def get_remote_client(self, server_or_ip, username=None, private_key=None,
+ log_console_of_servers=None):
+ """Get a SSH client to a remote server
+
+ @param server_or_ip a server object as returned by Tempest compute
+ client or an IP address to connect to
+ @param username name of the Linux account on the remote server
+ @param private_key the SSH private key to use
+ @param log_console_of_servers a list of server objects. Each server
+ in the list will have its console printed in the logs in case the
+ SSH connection failed to be established
+ @return a RemoteClient object
+ """
if isinstance(server_or_ip, six.string_types):
ip = server_or_ip
else:
@@ -312,9 +323,12 @@
pkey=private_key)
try:
linux_client.validate_authentication()
- except exceptions.SSHTimeout:
- LOG.exception('ssh connection to %s failed' % ip)
- debug.log_net_debug()
+ except Exception:
+ LOG.exception('Initializing SSH connection to %s failed' % ip)
+ # If we don't explicitely set for which servers we want to
+ # log the console output then all the servers will be logged.
+ # See the definition of _log_console_output()
+ self._log_console_output(log_console_of_servers)
raise
return linux_client
@@ -384,7 +398,6 @@
# network debug is called as part of ssh init
if not isinstance(exc, exceptions.SSHTimeout):
LOG.debug('Network information on a devstack host')
- debug.log_net_debug()
def create_server_snapshot(self, server, name=None):
# Glance client
@@ -494,15 +507,12 @@
username,
private_key,
should_connect=should_connect)
- except Exception as e:
+ except Exception:
ex_msg = 'Public network connectivity check failed'
if msg:
ex_msg += ": " + msg
LOG.exception(ex_msg)
self._log_console_output(servers)
- # network debug is called as part of ssh init
- if not isinstance(e, exceptions.SSHTimeout):
- debug.log_net_debug()
raise
def create_floating_ip(self, thing, pool_name=None):
@@ -1006,6 +1016,10 @@
self.addCleanup(self.delete_wrapper, router.delete)
return router
+ def _update_router_admin_state(self, router, admin_state_up):
+ router.update(admin_state_up=admin_state_up)
+ self.assertEqual(admin_state_up, router.admin_state_up)
+
def create_networks(self, client=None, tenant_id=None):
"""Create a network with a subnet connected to a router.
diff --git a/tempest/scenario/orchestration/test_server_cfn_init.py b/tempest/scenario/orchestration/test_server_cfn_init.py
index ddfabe4..f09f00c 100644
--- a/tempest/scenario/orchestration/test_server_cfn_init.py
+++ b/tempest/scenario/orchestration/test_server_cfn_init.py
@@ -119,14 +119,8 @@
if self.keypair:
# Check that the user can authenticate with the generated
# keypair
- try:
- linux_client = self.get_remote_client(
- server_ip, username='ec2-user')
- linux_client.validate_authentication()
- except (exceptions.ServerUnreachable,
- exceptions.SSHTimeout) as e:
- self._log_console_output(servers=[server])
- raise e
+ self.get_remote_client(server_ip, username='ec2-user',
+ log_console_of_servers=[server])
@test.attr(type='slow')
@test.skip_because(bug='1374175')
diff --git a/tempest/scenario/test_dashboard_basic_ops.py b/tempest/scenario/test_dashboard_basic_ops.py
index 1a10b79..2014293 100644
--- a/tempest/scenario/test_dashboard_basic_ops.py
+++ b/tempest/scenario/test_dashboard_basic_ops.py
@@ -65,7 +65,7 @@
def check_login_page(self):
response = urllib2.urlopen(CONF.dashboard.dashboard_url)
- self.assertIn("<h3>Log In</h3>", response.read())
+ self.assertIn("Log In", response.read())
def user_login(self):
self.opener = urllib2.build_opener(urllib2.HTTPCookieProcessor())
diff --git a/tempest/scenario/test_large_ops.py b/tempest/scenario/test_large_ops.py
index 91b95a8..60fd2bd 100644
--- a/tempest/scenario/test_large_ops.py
+++ b/tempest/scenario/test_large_ops.py
@@ -12,6 +12,7 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
+from tempest_lib import exceptions
from tempest.common.utils import data_utils
from tempest import config
@@ -44,6 +45,22 @@
"instances")
cls.set_network_resources()
super(TestLargeOpsScenario, cls).resource_setup()
+ # list of cleanup calls to be executed in reverse order
+ cls._cleanup_resources = []
+
+ @classmethod
+ def resource_cleanup(cls):
+ while cls._cleanup_resources:
+ function, args, kwargs = cls._cleanup_resources.pop(-1)
+ try:
+ function(*args, **kwargs)
+ except exceptions.NotFound:
+ pass
+ super(TestLargeOpsScenario, cls).resource_cleanup()
+
+ @classmethod
+ def addCleanupClass(cls, function, *arguments, **keywordArguments):
+ cls._cleanup_resources.append((function, arguments, keywordArguments))
def _wait_for_server_status(self, status):
for server in self.servers:
@@ -54,13 +71,20 @@
def nova_boot(self):
name = data_utils.rand_name('scenario-server-')
flavor_id = CONF.compute.flavor_ref
- secgroup = self._create_security_group()
+ # Explicitly create secgroup to avoid cleanup at the end of testcases.
+ # Since no traffic is tested, we don't need to actually add rules to
+ # secgroup
+ _, secgroup = self.security_groups_client.create_security_group(
+ 'secgroup-%s' % name, 'secgroup-desc-%s' % name)
+ self.addCleanupClass(self.security_groups_client.delete_security_group,
+ secgroup['id'])
+
self.servers_client.create_server(
name,
self.image,
flavor_id,
min_count=CONF.scenario.large_ops_number,
- security_groups=[secgroup])
+ security_groups=[{'name': secgroup['name']}])
# needed because of bug 1199788
params = {'name': name}
_, server_list = self.servers_client.list_servers(params)
@@ -68,15 +92,12 @@
for server in self.servers:
# after deleting all servers - wait for all servers to clear
# before cleanup continues
- self.addCleanup(self.servers_client.wait_for_server_termination,
- server['id'])
+ self.addCleanupClass(self.servers_client.
+ wait_for_server_termination,
+ server['id'])
for server in self.servers:
- self.addCleanup_with_wait(
- waiter_callable=(self.servers_client.
- wait_for_server_termination),
- thing_id=server['id'], thing_id_param='server_id',
- cleanup_callable=self.delete_wrapper,
- cleanup_args=[self.servers_client.delete_server, server['id']])
+ self.addCleanupClass(self.servers_client.delete_server,
+ server['id'])
self._wait_for_server_status('ACTIVE')
def _large_ops_scenario(self):
diff --git a/tempest/scenario/test_load_balancer_basic.py b/tempest/scenario/test_load_balancer_basic.py
index d061406..4b2dacd 100644
--- a/tempest/scenario/test_load_balancer_basic.py
+++ b/tempest/scenario/test_load_balancer_basic.py
@@ -123,10 +123,10 @@
def _create_server(self, name):
keypair = self.create_keypair()
- security_groups = [self.security_group]
+ security_groups = [{'name': self.security_group['name']}]
create_kwargs = {
- 'nics': [
- {'net-id': self.network['id']},
+ 'networks': [
+ {'uuid': self.network['id']},
],
'key_name': keypair['name'],
'security_groups': security_groups,
@@ -170,9 +170,9 @@
private_key=private_key)
# Write a backend's response into a file
- resp = """echo -ne "HTTP/1.1 200 OK\r\nContent-Length: 7\r\n""" \
- """Connection: close\r\nContent-Type: text/html; """ \
- """charset=UTF-8\r\n\r\n%s"; cat >/dev/null"""
+ resp = ('echo -ne "HTTP/1.1 200 OK\r\nContent-Length: 7\r\n'
+ 'Connection: close\r\nContent-Type: text/html; '
+ 'charset=UTF-8\r\n\r\n%s"; cat >/dev/null')
with tempfile.NamedTemporaryFile() as script:
script.write(resp % server_name)
@@ -186,8 +186,9 @@
username, key.name)
# Start netcat
- start_server = """sudo nc -ll -p %(port)s -e sh """ \
- """/tmp/%(script)s &"""
+ start_server = ('while true; do '
+ 'sudo nc -l -p %(port)s -e sh /tmp/%(script)s; '
+ 'done &')
cmd = start_server % {'port': self.port1,
'script': 'script1'}
ssh_client.exec_command(cmd)
@@ -215,6 +216,8 @@
return False
except IOError:
return False
+ except urllib2.HTTPError:
+ return False
timeout = config.compute.ping_timeout
start = time.time()
while not try_connect(check_ip, port):
@@ -297,8 +300,13 @@
def _send_requests(self, vip_ip, servers):
counters = dict.fromkeys(servers, 0)
for i in range(self.num):
- server = urllib2.urlopen("http://{0}/".format(vip_ip)).read()
- counters[server] += 1
+ try:
+ server = urllib2.urlopen("http://{0}/".format(vip_ip)).read()
+ counters[server] += 1
+ # HTTP exception means fail of server, so don't increase counter
+ # of success and continue connection tries
+ except urllib2.HTTPError:
+ continue
# Assert that each member of the pool gets balanced at least once
for member, counter in counters.iteritems():
self.assertGreater(counter, 0, 'Member %s never balanced' % member)
diff --git a/tempest/scenario/test_minimum_basic.py b/tempest/scenario/test_minimum_basic.py
index 59af6b3..16a65c9 100644
--- a/tempest/scenario/test_minimum_basic.py
+++ b/tempest/scenario/test_minimum_basic.py
@@ -14,7 +14,6 @@
# under the License.
from tempest.common import custom_matchers
-from tempest.common import debug
from tempest import config
from tempest import exceptions
from tempest.openstack.common import log as logging
@@ -89,17 +88,6 @@
self.servers_client.reboot(self.server['id'], 'SOFT')
self._wait_for_server_status('ACTIVE')
- def ssh_to_server(self):
- try:
- self.linux_client = self.get_remote_client(self.floating_ip['ip'])
- except Exception as e:
- LOG.exception('ssh to server failed')
- self._log_console_output()
- # network debug is called as part of ssh init
- if not isinstance(e, test.exceptions.SSHTimeout):
- debug.log_net_debug()
- raise
-
def check_partitions(self):
# NOTE(andreaf) The device name may be different on different guest OS
partitions = self.linux_client.get_partitions()
@@ -147,7 +135,9 @@
self.floating_ip = self.create_floating_ip(self.server)
self.create_and_add_security_group()
- self.ssh_to_server()
+
+ self.linux_client = self.get_remote_client(self.floating_ip['ip'])
self.nova_reboot()
- self.ssh_to_server()
+
+ self.linux_client = self.get_remote_client(self.floating_ip['ip'])
self.check_partitions()
diff --git a/tempest/scenario/test_network_advanced_server_ops.py b/tempest/scenario/test_network_advanced_server_ops.py
index ad7f18c..61c710e 100644
--- a/tempest/scenario/test_network_advanced_server_ops.py
+++ b/tempest/scenario/test_network_advanced_server_ops.py
@@ -65,7 +65,7 @@
{'uuid': network.id},
],
'key_name': self.keypair['name'],
- 'security_groups': [security_group],
+ 'security_groups': [{'name': security_group['name']}],
}
server_name = data_utils.rand_name('server-smoke')
self.server = self.create_server(name=server_name,
diff --git a/tempest/scenario/test_network_basic_ops.py b/tempest/scenario/test_network_basic_ops.py
index bac955d..98e3fda 100644
--- a/tempest/scenario/test_network_basic_ops.py
+++ b/tempest/scenario/test_network_basic_ops.py
@@ -18,7 +18,6 @@
import testtools
-from tempest.common import debug
from tempest.common.utils import data_utils
from tempest import config
from tempest import exceptions
@@ -146,7 +145,7 @@
def _create_server(self, name, network):
keypair = self.create_keypair()
self.keypairs[keypair['name']] = keypair
- security_groups = [self.security_group]
+ security_groups = [{'name': self.security_group['name']}]
create_kwargs = {
'networks': [
{'uuid': network.id},
@@ -170,8 +169,9 @@
server, ssh_login, self._get_server_key(server),
servers_for_debug=self.servers)
- def check_public_network_connectivity(self, should_connect=True,
- msg=None):
+ def check_public_network_connectivity(
+ self, should_connect=True, msg=None,
+ should_check_floating_ip_status=True):
"""Verifies connectivty to a VM via public network and floating IP,
and verifies floating IP has resource status is correct.
@@ -180,6 +180,8 @@
:param msg: Failure message to add to Error message. Should describe
the place in the test scenario where the method was called,
to indicate the context of the failure
+ :param should_check_floating_ip_status: bool. should status of
+ floating_ip be checked or not
"""
ssh_login = CONF.compute.image_ssh_user
floating_ip, server = self.floating_ip_tuple
@@ -193,7 +195,8 @@
super(TestNetworkBasicOps, self).check_public_network_connectivity(
ip_address, ssh_login, private_key, should_connect, msg,
self.servers)
- self.check_floating_ip_status(floating_ip, floatingip_status)
+ if should_check_floating_ip_status:
+ self.check_floating_ip_status(floating_ip, floatingip_status)
def _disassociate_floating_ips(self):
floating_ip, server = self.floating_ip_tuple
@@ -321,7 +324,6 @@
LOG.exception("Unable to access {dest} via ssh to "
"floating-ip {src}".format(dest=remote_ip,
src=floating_ip))
- debug.log_ip_ns()
raise
@test.attr(type='smoke')
@@ -393,3 +395,34 @@
self._create_new_network()
self._hotplug_server()
self._check_network_internal_connectivity(network=self.new_net)
+
+ @testtools.skipIf(CONF.baremetal.driver_enabled,
+ 'Router state cannot be altered on a shared baremetal '
+ 'network')
+ @test.attr(type='smoke')
+ @test.services('compute', 'network')
+ def test_update_router_admin_state(self):
+ """
+ 1. Check public connectivity before updating
+ admin_state_up attribute of router to False
+ 2. Check public connectivity after updating
+ admin_state_up attribute of router to False
+ 3. Check public connectivity after updating
+ admin_state_up attribute of router to True
+ """
+ self._setup_network_and_servers()
+ self.check_public_network_connectivity(
+ should_connect=True, msg="before updating "
+ "admin_state_up of router to False")
+ self._update_router_admin_state(self.router, False)
+ # TODO(alokmaurya): Remove should_check_floating_ip_status=False check
+ # once bug 1396310 is fixed
+
+ self.check_public_network_connectivity(
+ should_connect=False, msg="after updating "
+ "admin_state_up of router to False",
+ should_check_floating_ip_status=False)
+ self._update_router_admin_state(self.router, True)
+ self.check_public_network_connectivity(
+ should_connect=True, msg="after updating "
+ "admin_state_up of router to True")
diff --git a/tempest/scenario/test_security_groups_basic_ops.py b/tempest/scenario/test_security_groups_basic_ops.py
index 747850b..d8f7a26 100644
--- a/tempest/scenario/test_security_groups_basic_ops.py
+++ b/tempest/scenario/test_security_groups_basic_ops.py
@@ -14,7 +14,6 @@
# under the License.
from tempest import clients
-from tempest.common import debug
from tempest.common.utils import data_utils
from tempest import config
from tempest.openstack.common import log as logging
@@ -230,12 +229,13 @@
self._set_compute_context(tenant)
if security_groups is None:
security_groups = [tenant.security_groups['default']]
+ security_groups_names = [{'name': s['name']} for s in security_groups]
create_kwargs = {
'networks': [
{'uuid': tenant.network.id},
],
'key_name': tenant.keypair['name'],
- 'security_groups': security_groups,
+ 'security_groups': security_groups_names,
'tenant_id': tenant.creds.tenant_id
}
server = self.create_server(name=name, create_kwargs=create_kwargs)
@@ -333,15 +333,8 @@
msg = "Timed out waiting for %s to become reachable" % ip
else:
msg = "%s is reachable" % ip
- try:
- self.assertTrue(self._check_remote_connectivity(access_point, ip,
- should_succeed),
- msg)
- except test.exceptions.SSHTimeout:
- raise
- except Exception:
- debug.log_net_debug()
- raise
+ self.assertTrue(self._check_remote_connectivity(access_point, ip,
+ should_succeed), msg)
def _test_in_tenant_block(self, tenant):
access_point_ssh = self._connect_to_access_point(tenant)
diff --git a/tempest/scenario/test_server_basic_ops.py b/tempest/scenario/test_server_basic_ops.py
index eb636f7..23743c5 100644
--- a/tempest/scenario/test_server_basic_ops.py
+++ b/tempest/scenario/test_server_basic_ops.py
@@ -68,7 +68,7 @@
def boot_instance(self):
# Create server with image and flavor from input scenario
- security_groups = [self.security_group]
+ security_groups = [{'name': self.security_group['name']}]
create_kwargs = {
'key_name': self.keypair['name'],
'security_groups': security_groups
@@ -88,15 +88,10 @@
self.floating_ips_client.associate_floating_ip_to_server(
floating_ip['ip'], self.instance['id'])
# Check ssh
- try:
- self.get_remote_client(
- server_or_ip=floating_ip['ip'],
- username=self.image_utils.ssh_user(self.image_ref),
- private_key=self.keypair['private_key'])
- except Exception:
- LOG.exception('ssh to server failed')
- self._log_console_output()
- raise
+ self.get_remote_client(
+ server_or_ip=floating_ip['ip'],
+ username=self.image_utils.ssh_user(self.image_ref),
+ private_key=self.keypair['private_key'])
@test.services('compute', 'network')
def test_server_basicops(self):
diff --git a/tempest/scenario/test_shelve_instance.py b/tempest/scenario/test_shelve_instance.py
index 3ee71dd..8882177 100644
--- a/tempest/scenario/test_shelve_instance.py
+++ b/tempest/scenario/test_shelve_instance.py
@@ -67,10 +67,11 @@
self.keypair = self.create_keypair()
self.security_group = self._create_security_group()
+ security_groups = [{'name': self.security_group['name']}]
create_kwargs = {
'key_name': self.keypair['name'],
- 'security_groups': [self.security_group]
+ 'security_groups': security_groups
}
server = self.create_server(image=CONF.compute.image_ref,
create_kwargs=create_kwargs)
diff --git a/tempest/scenario/test_snapshot_pattern.py b/tempest/scenario/test_snapshot_pattern.py
index 9a99da4..5cb7c99 100644
--- a/tempest/scenario/test_snapshot_pattern.py
+++ b/tempest/scenario/test_snapshot_pattern.py
@@ -37,7 +37,7 @@
"""
def _boot_image(self, image_id):
- security_groups = [self.security_group]
+ security_groups = [{'name': self.security_group['name']}]
create_kwargs = {
'key_name': self.keypair['name'],
'security_groups': security_groups
@@ -47,21 +47,13 @@
def _add_keypair(self):
self.keypair = self.create_keypair()
- def _ssh_to_server(self, server_or_ip):
- try:
- return self.get_remote_client(server_or_ip)
- except Exception:
- LOG.exception('Initializing SSH connection failed')
- self._log_console_output()
- raise
-
def _write_timestamp(self, server_or_ip):
- ssh_client = self._ssh_to_server(server_or_ip)
+ ssh_client = self.get_remote_client(server_or_ip)
ssh_client.exec_command('date > /tmp/timestamp; sync')
self.timestamp = ssh_client.exec_command('cat /tmp/timestamp')
def _check_timestamp(self, server_or_ip):
- ssh_client = self._ssh_to_server(server_or_ip)
+ ssh_client = self.get_remote_client(server_or_ip)
got_timestamp = ssh_client.exec_command('cat /tmp/timestamp')
self.assertEqual(self.timestamp, got_timestamp)
diff --git a/tempest/scenario/test_stamp_pattern.py b/tempest/scenario/test_stamp_pattern.py
index ee2c737..cfc1d37 100644
--- a/tempest/scenario/test_stamp_pattern.py
+++ b/tempest/scenario/test_stamp_pattern.py
@@ -61,7 +61,7 @@
status)
def _boot_image(self, image_id):
- security_groups = [self.security_group]
+ security_groups = [{'name': self.security_group['name']}]
create_kwargs = {
'key_name': self.keypair['name'],
'security_groups': security_groups
diff --git a/tempest/scenario/test_swift_basic_ops.py b/tempest/scenario/test_swift_basic_ops.py
index fcb9505..312fbc6 100644
--- a/tempest/scenario/test_swift_basic_ops.py
+++ b/tempest/scenario/test_swift_basic_ops.py
@@ -65,7 +65,10 @@
obj_name, _ = self.upload_object_to_container(container_name)
obj_url = '%s/%s/%s' % (self.object_client.base_url,
container_name, obj_name)
- http_client = http.ClosingHttp()
+ dscv = CONF.identity.disable_ssl_certificate_validation
+ ca_certs = CONF.identity.ca_certificates_file
+ http_client = http.ClosingHttp(
+ disable_ssl_certificate_validation=dscv, ca_certs=ca_certs)
resp, _ = http_client.request(obj_url, 'GET')
self.assertEqual(resp.status, 401)
self.change_container_acl(container_name, '.r:*')
diff --git a/tempest/scenario/test_volume_boot_pattern.py b/tempest/scenario/test_volume_boot_pattern.py
index dd115e7..c584a6e 100644
--- a/tempest/scenario/test_volume_boot_pattern.py
+++ b/tempest/scenario/test_volume_boot_pattern.py
@@ -110,14 +110,8 @@
network_name_for_ssh = CONF.compute.network_for_ssh
ip = server.networks[network_name_for_ssh][0]
- try:
- return self.get_remote_client(
- ip,
- private_key=keypair['private_key'])
- except Exception:
- LOG.exception('ssh to server failed')
- self._log_console_output(servers=[server])
- raise
+ return self.get_remote_client(ip, private_key=keypair['private_key'],
+ log_console_of_servers=[server])
def _get_content(self, ssh_client):
return ssh_client.exec_command('cat /tmp/text')
diff --git a/tempest/services/botoclients.py b/tempest/services/botoclients.py
index 7af904b..f581e89 100644
--- a/tempest/services/botoclients.py
+++ b/tempest/services/botoclients.py
@@ -38,6 +38,7 @@
# FIXME(andreaf) replace credentials and auth_url with auth_provider
insecure_ssl = CONF.identity.disable_ssl_certificate_validation
+ ca_cert = CONF.identity.ca_certificates_file
self.connection_timeout = str(CONF.boto.http_socket_timeout)
self.num_retries = str(CONF.boto.num_retries)
@@ -46,7 +47,8 @@
"password": password,
"auth_url": auth_url,
"tenant_name": tenant_name,
- "insecure": insecure_ssl}
+ "insecure": insecure_ssl,
+ "cacert": ca_cert}
def _keystone_aws_get(self):
# FIXME(andreaf) Move EC2 credentials to AuthProvider
diff --git a/tempest/services/compute/json/interfaces_client.py b/tempest/services/compute/json/interfaces_client.py
index 83c253a..620ed68 100644
--- a/tempest/services/compute/json/interfaces_client.py
+++ b/tempest/services/compute/json/interfaces_client.py
@@ -79,9 +79,10 @@
timed_out = int(time.time()) - start >= self.build_timeout
if interface_status != status and timed_out:
- message = ('Interface %s failed to reach %s status within '
- 'the required time (%s s).' %
- (port_id, status, self.build_timeout))
+ message = ('Interface %s failed to reach %s status '
+ '(current %s) within the required time (%s s).' %
+ (port_id, status, interface_status,
+ self.build_timeout))
raise exceptions.TimeoutException(message)
return resp, body
diff --git a/tempest/services/compute/json/volumes_extensions_client.py b/tempest/services/compute/json/volumes_extensions_client.py
index 309dc5b..afa6937 100644
--- a/tempest/services/compute/json/volumes_extensions_client.py
+++ b/tempest/services/compute/json/volumes_extensions_client.py
@@ -73,10 +73,9 @@
metadata: A dictionary of values to be used as metadata.
"""
post_body = {
- 'size': size,
- 'display_name': kwargs.get('display_name'),
- 'metadata': kwargs.get('metadata'),
+ 'size': size
}
+ post_body.update(kwargs)
post_body = json.dumps({'volume': post_body})
resp, body = self.post('os-volumes', post_body)
@@ -93,7 +92,6 @@
def wait_for_volume_status(self, volume_id, status):
"""Waits for a Volume to reach a given status."""
resp, body = self.get_volume(volume_id)
- volume_name = body['displayName']
volume_status = body['status']
start = int(time.time())
@@ -105,9 +103,10 @@
raise exceptions.VolumeBuildErrorException(volume_id=volume_id)
if int(time.time()) - start >= self.build_timeout:
- message = ('Volume %s failed to reach %s status within '
- 'the required time (%s s).' %
- (volume_name, status, self.build_timeout))
+ message = ('Volume %s failed to reach %s status (current %s) '
+ 'within the required time (%s s).' %
+ (volume_id, status, volume_status,
+ self.build_timeout))
raise exceptions.TimeoutException(message)
def is_resource_deleted(self, id):
diff --git a/tempest/services/compute/v3/json/interfaces_client.py b/tempest/services/compute/v3/json/interfaces_client.py
index e99c124..ccc20c8 100644
--- a/tempest/services/compute/v3/json/interfaces_client.py
+++ b/tempest/services/compute/v3/json/interfaces_client.py
@@ -80,9 +80,10 @@
timed_out = int(time.time()) - start >= self.build_timeout
if interface_status != status and timed_out:
- message = ('Interface %s failed to reach %s status within '
- 'the required time (%s s).' %
- (port_id, status, self.build_timeout))
+ message = ('Interface %s failed to reach %s status '
+ '(current %s) within the required time (%s s).' %
+ (port_id, status, interface_status,
+ self.build_timeout))
raise exceptions.TimeoutException(message)
return resp, body
diff --git a/tempest/services/identity/v3/json/identity_client.py b/tempest/services/identity/v3/json/identity_client.py
index 5ad416c..6ac4901 100644
--- a/tempest/services/identity/v3/json/identity_client.py
+++ b/tempest/services/identity/v3/json/identity_client.py
@@ -36,8 +36,10 @@
"""Creates a user."""
en = kwargs.get('enabled', True)
description = kwargs.get('description', None)
+ default_project_id = kwargs.get('default_project_id')
post_body = {
'project_id': project_id,
+ 'default_project_id': default_project_id,
'description': description,
'domain_id': domain_id,
'email': email,
@@ -57,6 +59,11 @@
email = kwargs.get('email', body['email'])
en = kwargs.get('enabled', body['enabled'])
project_id = kwargs.get('project_id', body['project_id'])
+ if 'default_project_id' in body.keys():
+ default_project_id = kwargs.get('default_project_id',
+ body['default_project_id'])
+ else:
+ default_project_id = kwargs.get('default_project_id')
description = kwargs.get('description', body['description'])
domain_id = kwargs.get('domain_id', body['domain_id'])
post_body = {
@@ -64,6 +71,7 @@
'email': email,
'enabled': en,
'project_id': project_id,
+ 'default_project_id': default_project_id,
'id': user_id,
'domain_id': domain_id,
'description': description
diff --git a/tempest/services/identity/v3/json/policy_client.py b/tempest/services/identity/v3/json/policy_client.py
index e093260..579243c 100644
--- a/tempest/services/identity/v3/json/policy_client.py
+++ b/tempest/services/identity/v3/json/policy_client.py
@@ -39,14 +39,14 @@
resp, body = self.post('policies', post_body)
self.expected_success(201, resp.status)
body = json.loads(body)
- return resp, body['policy']
+ return rest_client.ResponseBody(resp, body['policy'])
def list_policies(self):
"""Lists the policies."""
resp, body = self.get('policies')
self.expected_success(200, resp.status)
body = json.loads(body)
- return resp, body['policies']
+ return rest_client.ResponseBodyList(resp, body['policies'])
def get_policy(self, policy_id):
"""Lists out the given policy."""
@@ -54,7 +54,7 @@
resp, body = self.get(url)
self.expected_success(200, resp.status)
body = json.loads(body)
- return resp, body['policy']
+ return rest_client.ResponseBody(resp, body['policy'])
def update_policy(self, policy_id, **kwargs):
"""Updates a policy."""
@@ -67,11 +67,11 @@
resp, body = self.patch(url, post_body)
self.expected_success(200, resp.status)
body = json.loads(body)
- return resp, body['policy']
+ return rest_client.ResponseBody(resp, body['policy'])
def delete_policy(self, policy_id):
"""Deletes the policy."""
url = "policies/%s" % policy_id
resp, body = self.delete(url)
self.expected_success(204, resp.status)
- return resp, body
+ return rest_client.ResponseBody(resp, body)
diff --git a/tempest/services/image/v1/json/image_client.py b/tempest/services/image/v1/json/image_client.py
index d0d32e5..d60c9d9 100644
--- a/tempest/services/image/v1/json/image_client.py
+++ b/tempest/services/image/v1/json/image_client.py
@@ -106,9 +106,10 @@
def _get_http(self):
dscv = CONF.identity.disable_ssl_certificate_validation
+ ca_certs = CONF.identity.ca_certificates_file
return glance_http.HTTPClient(auth_provider=self.auth_provider,
filters=self.filters,
- insecure=dscv)
+ insecure=dscv, ca_certs=ca_certs)
def _create_with_data(self, headers, data):
resp, body_iter = self.http.raw_request('POST', '/v1/images',
diff --git a/tempest/services/image/v2/json/image_client.py b/tempest/services/image/v2/json/image_client.py
index 4865073..7421508 100644
--- a/tempest/services/image/v2/json/image_client.py
+++ b/tempest/services/image/v2/json/image_client.py
@@ -35,9 +35,10 @@
def _get_http(self):
dscv = CONF.identity.disable_ssl_certificate_validation
+ ca_certs = CONF.identity.ca_certificates_file
return glance_http.HTTPClient(auth_provider=self.auth_provider,
filters=self.filters,
- insecure=dscv)
+ insecure=dscv, ca_certs=ca_certs)
def _validate_schema(self, body, type='image'):
if type in ['image', 'images']:
diff --git a/tempest/services/network/json/network_client.py b/tempest/services/network/json/network_client.py
index 46475f0..809c98b 100644
--- a/tempest/services/network/json/network_client.py
+++ b/tempest/services/network/json/network_client.py
@@ -40,8 +40,13 @@
def deserialize_list(self, body):
res = json.loads(body)
# expecting response in form
- # {'resources': [ res1, res2] }
- return res[res.keys()[0]]
+ # {'resources': [ res1, res2] } => when pagination disabled
+ # {'resources': [..], 'resources_links': {}} => if pagination enabled
+ pagination_suffix = "_links"
+ for k in res.keys():
+ if k[-len(pagination_suffix):] == pagination_suffix:
+ continue
+ return res[k]
def serialize(self, data):
return json.dumps(data)
diff --git a/tempest/services/network/network_client_base.py b/tempest/services/network/network_client_base.py
index 5ad5f37..2c767d9 100644
--- a/tempest/services/network/network_client_base.py
+++ b/tempest/services/network/network_client_base.py
@@ -259,6 +259,7 @@
# At this point, the wait has timed out
message = 'Resource %s' % (str(resource))
message += ' failed to reach status %s' % status
+ message += ' (current: %s)' % resource['status']
message += ' within the required time %s' % timeout
caller = misc.find_test_caller()
if caller:
diff --git a/tempest/services/object_storage/account_client.py b/tempest/services/object_storage/account_client.py
index 4417e3b..a2044ef 100644
--- a/tempest/services/object_storage/account_client.py
+++ b/tempest/services/object_storage/account_client.py
@@ -181,7 +181,11 @@
def request(self, method, url, extra_headers=False, headers=None,
body=None):
"""A simple HTTP request interface."""
- self.http_obj = http.ClosingHttp()
+ dscv = CONF.identity.disable_ssl_certificate_validation
+ ca_certs = CONF.identity.ca_certificates_file
+ self.http_obj = http.ClosingHttp(
+ disable_ssl_certificate_validation=dscv,
+ ca_certs=ca_certs)
if headers is None:
headers = {}
elif extra_headers:
diff --git a/tempest/services/object_storage/object_client.py b/tempest/services/object_storage/object_client.py
index 2231407..7a69fa8 100644
--- a/tempest/services/object_storage/object_client.py
+++ b/tempest/services/object_storage/object_client.py
@@ -197,8 +197,10 @@
body=None):
"""A simple HTTP request interface."""
dscv = CONF.identity.disable_ssl_certificate_validation
+ ca_certs = CONF.identity.ca_certificates_file
self.http_obj = http.ClosingHttp(
- disable_ssl_certificate_validation=dscv)
+ disable_ssl_certificate_validation=dscv,
+ ca_certs=ca_certs)
if headers is None:
headers = {}
elif extra_headers:
diff --git a/tempest/services/orchestration/json/orchestration_client.py b/tempest/services/orchestration/json/orchestration_client.py
index 15306a0..9b4700a 100644
--- a/tempest/services/orchestration/json/orchestration_client.py
+++ b/tempest/services/orchestration/json/orchestration_client.py
@@ -185,9 +185,12 @@
resource_status_reason=body['resource_status_reason'])
if int(time.time()) - start >= self.build_timeout:
- message = ('Resource %s failed to reach %s status within '
- 'the required time (%s s).' %
- (resource_name, status, self.build_timeout))
+ message = ('Resource %s failed to reach %s status '
+ '(current %s) within the required time (%s s).' %
+ (resource_name,
+ status,
+ resource_status,
+ self.build_timeout))
raise exceptions.TimeoutException(message)
time.sleep(self.build_interval)
@@ -214,9 +217,10 @@
stack_status_reason=body['stack_status_reason'])
if int(time.time()) - start >= self.build_timeout:
- message = ('Stack %s failed to reach %s status within '
- 'the required time (%s s).' %
- (stack_name, status, self.build_timeout))
+ message = ('Stack %s failed to reach %s status (current: %s) '
+ 'within the required time (%s s).' %
+ (stack_name, status, stack_status,
+ self.build_timeout))
raise exceptions.TimeoutException(message)
time.sleep(self.build_interval)
diff --git a/tempest/services/volume/json/admin/volume_services_client.py b/tempest/services/volume/json/admin/volume_services_client.py
index c9b8bcc..88c6db0 100644
--- a/tempest/services/volume/json/admin/volume_services_client.py
+++ b/tempest/services/volume/json/admin/volume_services_client.py
@@ -22,10 +22,10 @@
CONF = config.CONF
-class VolumesServicesClientJSON(rest_client.RestClient):
+class BaseVolumesServicesClientJSON(rest_client.RestClient):
def __init__(self, auth_provider):
- super(VolumesServicesClientJSON, self).__init__(auth_provider)
+ super(BaseVolumesServicesClientJSON, self).__init__(auth_provider)
self.service = CONF.volume.catalog_type
def list_services(self, params=None):
@@ -37,3 +37,7 @@
body = json.loads(body)
self.expected_success(200, resp.status)
return resp, body['services']
+
+
+class VolumesServicesClientJSON(BaseVolumesServicesClientJSON):
+ """Volume V1 volume services client"""
diff --git a/tempest/services/volume/json/backups_client.py b/tempest/services/volume/json/backups_client.py
index da47639..51a017e 100644
--- a/tempest/services/volume/json/backups_client.py
+++ b/tempest/services/volume/json/backups_client.py
@@ -95,9 +95,10 @@
raise exceptions.VolumeBackupException(backup_id=backup_id)
if int(time.time()) - start >= self.build_timeout:
- message = ('Volume backup %s failed to reach %s status within '
- 'the required time (%s s).' %
- (backup_id, status, self.build_timeout))
+ message = ('Volume backup %s failed to reach %s status '
+ '(current %s) within the required time (%s s).' %
+ (backup_id, status, backup_status,
+ self.build_timeout))
raise exceptions.TimeoutException(message)
diff --git a/tempest/services/volume/json/volumes_client.py b/tempest/services/volume/json/volumes_client.py
index cf2837b..1e49e5a 100644
--- a/tempest/services/volume/json/volumes_client.py
+++ b/tempest/services/volume/json/volumes_client.py
@@ -174,10 +174,12 @@
raise exceptions.VolumeBuildErrorException(volume_id=volume_id)
if int(time.time()) - start >= self.build_timeout:
- message = 'Volume %s failed to reach %s status within '\
- 'the required time (%s s).' % (volume_id,
- status,
- self.build_timeout)
+ message = ('Volume %s failed to reach %s status (current: %s) '
+ 'within the required time '
+ '(%s s).' % (volume_id,
+ status,
+ volume_status,
+ self.build_timeout))
raise exceptions.TimeoutException(message)
def is_resource_deleted(self, id):
diff --git a/tempest/services/volume/v2/json/admin/volume_services_client.py b/tempest/services/volume/v2/json/admin/volume_services_client.py
new file mode 100644
index 0000000..dc3c8ea
--- /dev/null
+++ b/tempest/services/volume/v2/json/admin/volume_services_client.py
@@ -0,0 +1,26 @@
+# Copyright 2014 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from tempest.services.volume.json.admin import volume_services_client as vs_cli
+
+
+class VolumesServicesV2ClientJSON(vs_cli.BaseVolumesServicesClientJSON):
+ """
+ Client class to send CRUD Volume V2 API requests to a Cinder endpoint
+ """
+
+ def __init__(self, auth_provider):
+ super(VolumesServicesV2ClientJSON, self).__init__(auth_provider)
+ self.api_version = "v2"
diff --git a/tempest/test.py b/tempest/test.py
index 14cf3bb..6deb42b 100644
--- a/tempest/test.py
+++ b/tempest/test.py
@@ -224,6 +224,23 @@
class BaseTestCase(testtools.testcase.WithAttributes,
testtools.TestCase):
+ """The test base class defines Tempest framework for class level fixtures.
+ `setUpClass` and `tearDownClass` are defined here and cannot be overwritten
+ by subclasses (enforced via hacking rule T105).
+
+ Set-up is split in a series of steps (setup stages), which can be
+ overwritten by test classes. Set-up stages are:
+ - skip_checks
+ - setup_credentials
+ - setup_clients
+ - resource_setup
+
+ Tear-down is also split in a series of steps (teardown stages), which are
+ stacked for execution only if the corresponding setup stage had been
+ reached during the setup phase. Tear-down stages are:
+ - clear_isolated_creds (defined in the base test class)
+ - resource_cleanup
+ """
setUpClassCalled = False
_service = None
@@ -242,31 +259,28 @@
if hasattr(super(BaseTestCase, cls), 'setUpClass'):
super(BaseTestCase, cls).setUpClass()
cls.setUpClassCalled = True
- # No test resource is allocated until here
+ # Stack of (name, callable) to be invoked in reverse order at teardown
+ cls.teardowns = []
+ # All the configuration checks that may generate a skip
+ cls.skip_checks()
try:
- # TODO(andreaf) Split-up resource_setup in stages:
- # skip checks, pre-hook, credentials, clients, resources, post-hook
+ # Allocation of all required credentials and client managers
+ cls.teardowns.append(('credentials', cls.clear_isolated_creds))
+ cls.setup_credentials()
+ # Shortcuts to clients
+ cls.setup_clients()
+ # Additional class-wide test resources
+ cls.teardowns.append(('resources', cls.resource_cleanup))
cls.resource_setup()
except Exception:
etype, value, trace = sys.exc_info()
- LOG.info("%s in resource setup. Invoking tearDownClass." % etype)
- # Catch any exception in tearDown so we can re-raise the original
- # exception at the end
+ LOG.info("%s in %s.setUpClass. Invoking tearDownClass." % (
+ cls.__name__, etype))
+ cls.tearDownClass()
try:
- cls.tearDownClass()
- except Exception as te:
- tetype, _, _ = sys.exc_info()
- # TODO(gmann): Till we split-up resource_setup &
- # resource_cleanup in more structural way, log
- # AttributeError as info instead of exception.
- if tetype is AttributeError:
- LOG.info("tearDownClass failed: %s" % te)
- else:
- LOG.exception("tearDownClass failed: %s" % te)
- try:
- raise etype(value), None, trace
+ raise etype, value, trace
finally:
- del trace # for avoiding circular refs
+ del trace # to avoid circular refs
@classmethod
def tearDownClass(cls):
@@ -274,21 +288,78 @@
# It should never be overridden by descendants
if hasattr(super(BaseTestCase, cls), 'tearDownClass'):
super(BaseTestCase, cls).tearDownClass()
- try:
- cls.resource_cleanup()
- finally:
- cls.clear_isolated_creds()
+ # Save any existing exception, we always want to re-raise the original
+ # exception only
+ etype, value, trace = sys.exc_info()
+ # If there was no exception during setup we shall re-raise the first
+ # exception in teardown
+ re_raise = (etype is None)
+ while cls.teardowns:
+ name, teardown = cls.teardowns.pop()
+ # Catch any exception in tearDown so we can re-raise the original
+ # exception at the end
+ try:
+ teardown()
+ except Exception as te:
+ sys_exec_info = sys.exc_info()
+ tetype = sys_exec_info[0]
+ # TODO(andreaf): Till we have the ability to cleanup only
+ # resources that were successfully setup in resource_cleanup,
+ # log AttributeError as info instead of exception.
+ if tetype is AttributeError and name == 'resources':
+ LOG.info("tearDownClass of %s failed: %s" % (name, te))
+ else:
+ LOG.exception("teardown of %s failed: %s" % (name, te))
+ if not etype:
+ etype, value, trace = sys_exec_info
+ # If exceptions were raised during teardown, an not before, re-raise
+ # the first one
+ if re_raise and etype is not None:
+ try:
+ raise etype, value, trace
+ finally:
+ del trace # to avoid circular refs
@classmethod
def resource_setup(cls):
- """Class level setup steps for test cases.
- Recommended order: skip checks, credentials, clients, resources.
+ """Class level resource setup for test cases.
"""
pass
@classmethod
def resource_cleanup(cls):
- """Class level resource cleanup for test cases. """
+ """Class level resource cleanup for test cases.
+ Resource cleanup must be able to handle the case of partially setup
+ resources, in case a failure during `resource_setup` should happen.
+ """
+ pass
+
+ @classmethod
+ def skip_checks(cls):
+ """Class level skip checks. Subclasses verify in here all
+ conditions that might prevent the execution of the entire test class.
+ Checks implemented here may not make use API calls, and should rely on
+ configuration alone.
+ In general skip checks that require an API call are discouraged.
+ If one is really needed it may be implemented either in the
+ resource_setup or at test level.
+ """
+ pass
+
+ @classmethod
+ def setup_credentials(cls):
+ """Allocate credentials and the client managers from them."""
+ # TODO(andreaf) There is a fair amount of code that could me moved from
+ # base / test classes in here. Ideally tests should be able to only
+ # specify a list of (additional) credentials the need to use.
+ pass
+
+ @classmethod
+ def setup_clients(cls):
+ """Create links to the clients into the test object."""
+ # TODO(andreaf) There is a fair amount of code that could me moved from
+ # base / test classes in here. Ideally tests should be able to only
+ # specify which client is `client` and nothing else.
pass
def setUp(self):
@@ -414,12 +485,8 @@
else:
standard_tests, module, loader = args
for test in testtools.iterate_tests(standard_tests):
- schema_file = getattr(test, '_schema_file', None)
schema = getattr(test, '_schema', None)
- if schema_file is not None:
- setattr(test, 'scenarios',
- NegativeAutoTest.generate_scenario(schema_file))
- elif schema is not None:
+ if schema is not None:
setattr(test, 'scenarios',
NegativeAutoTest.generate_scenario(schema))
return testscenarios.load_tests_apply_scenarios(*args)
diff --git a/tempest/tests/cmd/test_verify_tempest_config.py b/tempest/tests/cmd/test_verify_tempest_config.py
index 6679c79..b672b86 100644
--- a/tempest/tests/cmd/test_verify_tempest_config.py
+++ b/tempest/tests/cmd/test_verify_tempest_config.py
@@ -261,9 +261,9 @@
def test_verify_extensions_cinder(self):
def fake_list_extensions():
- return (None, {'extensions': [{'name': 'fake1'},
- {'name': 'fake2'},
- {'name': 'not_fake'}]})
+ return (None, {'extensions': [{'alias': 'fake1'},
+ {'alias': 'fake2'},
+ {'alias': 'not_fake'}]})
fake_os = mock.MagicMock()
fake_os.volumes_extension_client.list_extensions = fake_list_extensions
self.useFixture(mockpatch.PatchObject(
@@ -283,9 +283,9 @@
def test_verify_extensions_cinder_all(self):
def fake_list_extensions():
- return (None, {'extensions': [{'name': 'fake1'},
- {'name': 'fake2'},
- {'name': 'not_fake'}]})
+ return (None, {'extensions': [{'alias': 'fake1'},
+ {'alias': 'fake2'},
+ {'alias': 'not_fake'}]})
fake_os = mock.MagicMock()
fake_os.volumes_extension_client.list_extensions = fake_list_extensions
self.useFixture(mockpatch.PatchObject(
@@ -300,9 +300,8 @@
def test_verify_extensions_nova(self):
def fake_list_extensions():
- return (None, {'extensions': [{'alias': 'fake1'},
- {'alias': 'fake2'},
- {'alias': 'not_fake'}]})
+ return (None, [{'alias': 'fake1'}, {'alias': 'fake2'},
+ {'alias': 'not_fake'}])
fake_os = mock.MagicMock()
fake_os.extensions_client.list_extensions = fake_list_extensions
self.useFixture(mockpatch.PatchObject(
@@ -339,9 +338,9 @@
def test_verify_extensions_nova_v3(self):
def fake_list_extensions():
- return (None, {'extensions': [{'alias': 'fake1'},
- {'alias': 'fake2'},
- {'alias': 'not_fake'}]})
+ return (None, [{'alias': 'fake1'},
+ {'alias': 'fake2'},
+ {'alias': 'not_fake'}])
fake_os = mock.MagicMock()
fake_os.extensions_v3_client.list_extensions = fake_list_extensions
self.useFixture(mockpatch.PatchObject(
diff --git a/tempest/tests/common/test_debug.py b/tempest/tests/common/test_debug.py
deleted file mode 100644
index 8a880f2..0000000
--- a/tempest/tests/common/test_debug.py
+++ /dev/null
@@ -1,122 +0,0 @@
-# Copyright 2014 NEC Corporation.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import mock
-
-from tempest.common import debug
-from tempest import config
-from tempest.openstack.common.fixture import mockpatch
-from tempest import test
-from tempest.tests import base
-from tempest.tests import fake_config
-
-
-class TestDebug(base.TestCase):
-
- def setUp(self):
- super(TestDebug, self).setUp()
- self.useFixture(fake_config.ConfigFixture())
- self.stubs.Set(config, 'TempestConfigPrivate', fake_config.FakePrivate)
-
- common_pre = 'tempest.common.commands'
- self.ip_addr_raw_mock = self.patch(common_pre + '.ip_addr_raw')
- self.ip_route_raw_mock = self.patch(common_pre + '.ip_route_raw')
- self.iptables_raw_mock = self.patch(common_pre + '.iptables_raw')
- self.ip_ns_list_mock = self.patch(common_pre + '.ip_ns_list')
- self.ip_ns_addr_mock = self.patch(common_pre + '.ip_ns_addr')
- self.ip_ns_route_mock = self.patch(common_pre + '.ip_ns_route')
- self.iptables_ns_mock = self.patch(common_pre + '.iptables_ns')
- self.ovs_db_dump_mock = self.patch(common_pre + '.ovs_db_dump')
-
- self.log_mock = self.patch('tempest.common.debug.LOG')
-
- def test_log_ip_ns_debug_disabled(self):
- self.useFixture(mockpatch.PatchObject(test.CONF.debug,
- 'enable', False))
- debug.log_ip_ns()
- self.assertFalse(self.ip_addr_raw_mock.called)
- self.assertFalse(self.log_mock.info.called)
-
- def test_log_ip_ns_debug_enabled(self):
- self.useFixture(mockpatch.PatchObject(test.CONF.debug,
- 'enable', True))
-
- self.ip_ns_list_mock.return_value = [1, 2]
-
- debug.log_ip_ns()
- self.ip_addr_raw_mock.assert_called_with()
- self.assertTrue(self.log_mock.info.called)
- self.ip_route_raw_mock.assert_called_with()
- self.assertEqual(len(debug.TABLES), self.iptables_raw_mock.call_count)
- for table in debug.TABLES:
- self.assertIn(mock.call(table),
- self.iptables_raw_mock.call_args_list)
-
- self.ip_ns_list_mock.assert_called_with()
- self.assertEqual(len(self.ip_ns_list_mock.return_value),
- self.ip_ns_addr_mock.call_count)
- self.assertEqual(len(self.ip_ns_list_mock.return_value),
- self.ip_ns_route_mock.call_count)
- for ns in self.ip_ns_list_mock.return_value:
- self.assertIn(mock.call(ns),
- self.ip_ns_addr_mock.call_args_list)
- self.assertIn(mock.call(ns),
- self.ip_ns_route_mock.call_args_list)
-
- self.assertEqual(len(debug.TABLES) *
- len(self.ip_ns_list_mock.return_value),
- self.iptables_ns_mock.call_count)
- for ns in self.ip_ns_list_mock.return_value:
- for table in debug.TABLES:
- self.assertIn(mock.call(ns, table),
- self.iptables_ns_mock.call_args_list)
-
- def test_log_ovs_db_debug_disabled(self):
- self.useFixture(mockpatch.PatchObject(test.CONF.debug,
- 'enable', False))
- self.useFixture(mockpatch.PatchObject(test.CONF.service_available,
- 'neutron', False))
- debug.log_ovs_db()
- self.assertFalse(self.ovs_db_dump_mock.called)
-
- self.useFixture(mockpatch.PatchObject(test.CONF.debug,
- 'enable', True))
- self.useFixture(mockpatch.PatchObject(test.CONF.service_available,
- 'neutron', False))
- debug.log_ovs_db()
- self.assertFalse(self.ovs_db_dump_mock.called)
-
- self.useFixture(mockpatch.PatchObject(test.CONF.debug,
- 'enable', False))
- self.useFixture(mockpatch.PatchObject(test.CONF.service_available,
- 'neutron', True))
- debug.log_ovs_db()
- self.assertFalse(self.ovs_db_dump_mock.called)
-
- def test_log_ovs_db_debug_enabled(self):
- self.useFixture(mockpatch.PatchObject(test.CONF.debug,
- 'enable', True))
- self.useFixture(mockpatch.PatchObject(test.CONF.service_available,
- 'neutron', True))
- debug.log_ovs_db()
- self.ovs_db_dump_mock.assert_called_with()
-
- def test_log_net_debug(self):
- self.log_ip_ns_mock = self.patch('tempest.common.debug.log_ip_ns')
- self.log_ovs_db_mock = self.patch('tempest.common.debug.log_ovs_db')
-
- debug.log_net_debug()
- self.log_ip_ns_mock.assert_called_with()
- self.log_ovs_db_mock.assert_called_with()
diff --git a/tempest/tests/test_commands.py b/tempest/tests/test_commands.py
deleted file mode 100644
index 2379741..0000000
--- a/tempest/tests/test_commands.py
+++ /dev/null
@@ -1,89 +0,0 @@
-# Copyright 2014 NEC Corporation. All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import subprocess
-
-import mock
-
-from tempest.common import commands
-from tempest.tests import base
-
-
-class TestCommands(base.TestCase):
-
- def setUp(self):
- super(TestCommands, self).setUp()
- self.subprocess_args = {'stdout': subprocess.PIPE,
- 'stderr': subprocess.STDOUT}
-
- @mock.patch('subprocess.Popen')
- def test_ip_addr_raw(self, mock):
- expected = ['/usr/bin/sudo', '-n', 'ip', 'a']
- commands.ip_addr_raw()
- mock.assert_called_once_with(expected, **self.subprocess_args)
-
- @mock.patch('subprocess.Popen')
- def test_ip_route_raw(self, mock):
- expected = ['/usr/bin/sudo', '-n', 'ip', 'r']
- commands.ip_route_raw()
- mock.assert_called_once_with(expected, **self.subprocess_args)
-
- @mock.patch('subprocess.Popen')
- def test_ip_ns_raw(self, mock):
- expected = ['/usr/bin/sudo', '-n', 'ip', 'netns', 'list']
- commands.ip_ns_raw()
- mock.assert_called_once_with(expected, **self.subprocess_args)
-
- @mock.patch('subprocess.Popen')
- def test_iptables_raw(self, mock):
- table = 'filter'
- expected = ['/usr/bin/sudo', '-n', 'iptables', '--line-numbers',
- '-L', '-nv', '-t',
- '%s' % table]
- commands.iptables_raw(table)
- mock.assert_called_once_with(expected, **self.subprocess_args)
-
- @mock.patch('subprocess.Popen')
- def test_ip_ns_list(self, mock):
- expected = ['/usr/bin/sudo', '-n', 'ip', 'netns', 'list']
- commands.ip_ns_list()
- mock.assert_called_once_with(expected, **self.subprocess_args)
-
- @mock.patch('subprocess.Popen')
- def test_ip_ns_addr(self, mock):
- ns_list = commands.ip_ns_list()
- for ns in ns_list:
- expected = ['/usr/bin/sudo', '-n', 'ip', 'netns', 'exec', ns,
- 'ip', 'a']
- commands.ip_ns_addr(ns)
- mock.assert_called_once_with(expected, **self.subprocess_args)
-
- @mock.patch('subprocess.Popen')
- def test_ip_ns_route(self, mock):
- ns_list = commands.ip_ns_list()
- for ns in ns_list:
- expected = ['/usr/bin/sudo', '-n', 'ip', 'netns', 'exec', ns,
- 'ip', 'r']
- commands.ip_ns_route(ns)
- mock.assert_called_once_with(expected, **self.subprocess_args)
-
- @mock.patch('subprocess.Popen')
- def test_iptables_ns(self, mock):
- table = 'filter'
- ns_list = commands.ip_ns_list()
- for ns in ns_list:
- expected = ['/usr/bin/sudo', '-n', 'ip', 'netns', 'exec', ns,
- 'iptables', '-v', '-S', '-t', table]
- commands.iptables_ns(ns, table)
- mock.assert_called_once_with(expected, **self.subprocess_args)