Merge "Add Credentials Provider factory"
diff --git a/tempest/api/compute/admin/test_fixed_ips_negative.py b/tempest/api/compute/admin/test_fixed_ips_negative.py
index 90be820..8d6a7fc 100644
--- a/tempest/api/compute/admin/test_fixed_ips_negative.py
+++ b/tempest/api/compute/admin/test_fixed_ips_negative.py
@@ -68,7 +68,10 @@
# NOTE(maurosr): since this exercises the same code snippet, we do it
# only for reserve action
body = {"reserve": "None"}
- self.assertRaises(exceptions.NotFound,
+ # NOTE(eliqiao): in Juno, the exception is NotFound, but in master, we
+ # change the error code to BadRequest, both exceptions should be
+ # accepted by tempest
+ self.assertRaises((exceptions.NotFound, exceptions.BadRequest),
self.client.reserve_fixed_ip,
"my.invalid.ip", body)
diff --git a/tempest/api/compute/images/test_images_oneserver.py b/tempest/api/compute/images/test_images_oneserver.py
index c0b6730..459d78b 100644
--- a/tempest/api/compute/images/test_images_oneserver.py
+++ b/tempest/api/compute/images/test_images_oneserver.py
@@ -105,7 +105,11 @@
raise self.skipException("Not testable in XML")
# prefix character is:
# http://www.fileformat.info/info/unicode/char/1F4A9/index.htm
- utf8_name = data_utils.rand_name(u'\xF0\x9F\x92\xA9')
+
+ # We use a string with 3 byte utf-8 character due to bug
+ # #1370954 in glance which will 500 if mysql is used as the
+ # backend and it attempts to store a 4 byte utf-8 character
+ utf8_name = data_utils.rand_name('\xe2\x82\xa1')
resp, body = self.client.create_image(self.server_id, utf8_name)
image_id = data_utils.parse_image_id(resp['location'])
self.addCleanup(self.client.delete_image, image_id)
diff --git a/tempest/api/compute/security_groups/test_security_group_rules.py b/tempest/api/compute/security_groups/test_security_group_rules.py
index 901c377..45b913a 100644
--- a/tempest/api/compute/security_groups/test_security_group_rules.py
+++ b/tempest/api/compute/security_groups/test_security_group_rules.py
@@ -27,13 +27,9 @@
super(SecurityGroupRulesTestJSON, cls).resource_setup()
cls.client = cls.security_groups_client
cls.neutron_available = CONF.service_available.neutron
-
- @classmethod
- def setUpClass(self):
- super(SecurityGroupRulesTestJSON, self).setUpClass()
- self.ip_protocol = 'tcp'
- self.from_port = 22
- self.to_port = 22
+ cls.ip_protocol = 'tcp'
+ cls.from_port = 22
+ cls.to_port = 22
@test.attr(type='smoke')
@test.services('network')
@@ -54,31 +50,46 @@
@test.attr(type='smoke')
@test.services('network')
- def test_security_group_rules_create_with_optional_arguments(self):
+ def test_security_group_rules_create_with_optional_cidr(self):
# Positive test: Creation of Security Group rule
- # with optional arguments
+ # with optional argument cidr
# should be successful
- secgroup1 = None
- secgroup2 = None
+ # Creating a Security Group to add rules to it
+ resp, security_group = self.create_security_group()
+ parent_group_id = security_group['id']
+
+ # Adding rules to the created Security Group with optional cidr
+ cidr = '10.2.3.124/24'
+ self.client.create_security_group_rule(parent_group_id,
+ self.ip_protocol,
+ self.from_port,
+ self.to_port,
+ cidr=cidr)
+
+ @test.attr(type='smoke')
+ @test.services('network')
+ def test_security_group_rules_create_with_optional_group_id(self):
+ # Positive test: Creation of Security Group rule
+ # with optional argument group_id
+ # should be successful
+
# Creating a Security Group to add rules to it
resp, security_group = self.create_security_group()
secgroup1 = security_group['id']
+
# Creating a Security Group so as to assign group_id to the rule
resp, security_group = self.create_security_group()
secgroup2 = security_group['id']
- # Adding rules to the created Security Group with optional arguments
+
+ # Adding rules to the created Security Group with optional group_id
parent_group_id = secgroup1
- cidr = '10.2.3.124/24'
group_id = secgroup2
- resp, rule = \
- self.client.create_security_group_rule(parent_group_id,
- self.ip_protocol,
- self.from_port,
- self.to_port,
- cidr=cidr,
- group_id=group_id)
- self.assertEqual(200, resp.status)
+ self.client.create_security_group_rule(parent_group_id,
+ self.ip_protocol,
+ self.from_port,
+ self.to_port,
+ group_id=group_id)
@test.attr(type='smoke')
@test.services('network')
diff --git a/tempest/api/compute/servers/test_create_server.py b/tempest/api/compute/servers/test_create_server.py
index 5df8d82..25dc87d 100644
--- a/tempest/api/compute/servers/test_create_server.py
+++ b/tempest/api/compute/servers/test_create_server.py
@@ -42,6 +42,7 @@
personality = [{'path': '/test.txt',
'contents': base64.b64encode(file_contents)}]
cls.client = cls.servers_client
+ cls.network_client = cls.os.network_client
cli_resp = cls.create_test_server(name=cls.name,
meta=cls.meta,
accessIPv4=cls.accessIPv4,
@@ -124,6 +125,40 @@
self.assertEqual(200, resp.status)
self.assertIn(server['id'], server_group['members'])
+ @testtools.skipUnless(CONF.service_available.neutron,
+ 'Neutron service must be available.')
+ def test_verify_multiple_nics_order(self):
+ # Verify that the networks order given at the server creation is
+ # preserved within the server.
+ name_net1 = data_utils.rand_name(self.__class__.__name__)
+ _, net1 = self.network_client.create_network(name=name_net1)
+ name_net2 = data_utils.rand_name(self.__class__.__name__)
+ _, net2 = self.network_client.create_network(name=name_net2)
+
+ _, subnet1 = self.network_client.create_subnet(
+ network_id=net1['network']['id'],
+ cidr='19.80.0.0/24',
+ ip_version=4)
+ _, subnet2 = self.network_client.create_subnet(
+ network_id=net2['network']['id'],
+ cidr='19.86.0.0/24',
+ ip_version=4)
+
+ networks = [{'uuid': net1['network']['id']},
+ {'uuid': net2['network']['id']}]
+
+ _, server_multi_nics = self.create_test_server(
+ networks=networks, wait_until='ACTIVE')
+
+ _, addresses = self.client.list_addresses(server_multi_nics['id'])
+
+ expected_addr = ['19.80.0.2', '19.86.0.2']
+
+ addr = [addresses[name_net1][0]['addr'],
+ addresses[name_net2][0]['addr']]
+
+ self.assertEqual(expected_addr, addr)
+
class ServersWithSpecificFlavorTestJSON(base.BaseV2ComputeAdminTest):
disk_config = 'AUTO'
diff --git a/tempest/api/identity/base.py b/tempest/api/identity/base.py
index a225f12..1e4973b 100644
--- a/tempest/api/identity/base.py
+++ b/tempest/api/identity/base.py
@@ -202,7 +202,7 @@
def _try_wrapper(func, item, **kwargs):
try:
if kwargs:
- func(item['id'], kwargs)
+ func(item['id'], **kwargs)
else:
func(item['id'])
except exceptions.NotFound:
diff --git a/tempest/api/network/base.py b/tempest/api/network/base.py
index ec6973e..91e3e14 100644
--- a/tempest/api/network/base.py
+++ b/tempest/api/network/base.py
@@ -83,6 +83,7 @@
cls.fw_rules = []
cls.fw_policies = []
cls.ipsecpolicies = []
+ cls.ethertype = "IPv" + str(cls._ip_version)
@classmethod
def resource_cleanup(cls):
diff --git a/tempest/api/network/test_fwaas_extensions.py b/tempest/api/network/test_fwaas_extensions.py
index 11588d6..8e2b7f5 100644
--- a/tempest/api/network/test_fwaas_extensions.py
+++ b/tempest/api/network/test_fwaas_extensions.py
@@ -36,6 +36,8 @@
List firewall policies
Create firewall policy
Update firewall policy
+ Insert firewall rule to policy
+ Remove firewall rule from policy
Delete firewall policy
Show firewall policy
List firewall
@@ -62,6 +64,14 @@
except exceptions.NotFound:
pass
+ def _try_delete_rule(self, rule_id):
+ # delete rule, if it exists
+ try:
+ self.client.delete_firewall_rule(rule_id)
+ # if rule is not found, this means it was deleted in the test
+ except exceptions.NotFound:
+ pass
+
def _try_delete_firewall(self, fw_id):
# delete firewall, if it exists
try:
@@ -211,6 +221,40 @@
# Delete firewall
self.client.delete_firewall(firewall_id)
+ @test.attr(type='smoke')
+ def test_insert_remove_firewall_rule_from_policy(self):
+ # Create firewall rule
+ resp, body = self.client.create_firewall_rule(
+ name=data_utils.rand_name("fw-rule"),
+ action="allow",
+ protocol="tcp")
+ fw_rule_id = body['firewall_rule']['id']
+ self.addCleanup(self._try_delete_rule, fw_rule_id)
+ # Create firewall policy
+ _, body = self.client.create_firewall_policy(
+ name=data_utils.rand_name("fw-policy"))
+ fw_policy_id = body['firewall_policy']['id']
+ self.addCleanup(self._try_delete_policy, fw_policy_id)
+
+ # Insert rule to firewall policy
+ self.client.insert_firewall_rule_in_policy(
+ fw_policy_id, fw_rule_id, '', '')
+
+ # Verify insertion of rule in policy
+ self.assertIn(fw_rule_id, self._get_list_fw_rule_ids(fw_policy_id))
+ # Remove rule from the firewall policy
+ self.client.remove_firewall_rule_from_policy(
+ fw_policy_id, fw_rule_id)
+
+ # Verify removal of rule from firewall policy
+ self.assertNotIn(fw_rule_id, self._get_list_fw_rule_ids(fw_policy_id))
+
+ def _get_list_fw_rule_ids(self, fw_policy_id):
+ _, fw_policy = self.client.show_firewall_policy(
+ fw_policy_id)
+ return [ruleid for ruleid in fw_policy['firewall_policy']
+ ['firewall_rules']]
+
class FWaaSExtensionTestXML(FWaaSExtensionTestJSON):
_interface = 'xml'
diff --git a/tempest/api/network/test_security_groups.py b/tempest/api/network/test_security_groups.py
index 9764b4d..e20b58e 100644
--- a/tempest/api/network/test_security_groups.py
+++ b/tempest/api/network/test_security_groups.py
@@ -17,11 +17,15 @@
from tempest.api.network import base_security_groups as base
from tempest.common.utils import data_utils
+from tempest import config
from tempest import test
+CONF = config.CONF
+
class SecGroupTest(base.BaseSecGroupTest):
_interface = 'json'
+ _tenant_network_cidr = CONF.network.tenant_network_cidr
@classmethod
def resource_setup(cls):
@@ -30,6 +34,40 @@
msg = "security-group extension not enabled."
raise cls.skipException(msg)
+ def _create_verify_security_group_rule(self, sg_id, direction,
+ ethertype, protocol,
+ port_range_min,
+ port_range_max,
+ remote_group_id=None,
+ remote_ip_prefix=None):
+ # Create Security Group rule with the input params and validate
+ # that SG rule is created with the same parameters.
+ resp, rule_create_body = self.client.create_security_group_rule(
+ security_group_id=sg_id,
+ direction=direction,
+ ethertype=ethertype,
+ protocol=protocol,
+ port_range_min=port_range_min,
+ port_range_max=port_range_max,
+ remote_group_id=remote_group_id,
+ remote_ip_prefix=remote_ip_prefix
+ )
+
+ sec_group_rule = rule_create_body['security_group_rule']
+ self.addCleanup(self._delete_security_group_rule,
+ sec_group_rule['id'])
+
+ expected = {'direction': direction, 'protocol': protocol,
+ 'ethertype': ethertype, 'port_range_min': port_range_min,
+ 'port_range_max': port_range_max,
+ 'remote_group_id': remote_group_id,
+ 'remote_ip_prefix': remote_ip_prefix}
+ for key, value in six.iteritems(expected):
+ self.assertEqual(value, sec_group_rule[key],
+ "Field %s of the created security group "
+ "rule does not match with %s." %
+ (key, value))
+
@test.attr(type='smoke')
def test_list_security_groups(self):
# Verify the that security group belonging to tenant exist in list
@@ -80,7 +118,8 @@
_, rule_create_body = self.client.create_security_group_rule(
security_group_id=group_create_body['security_group']['id'],
protocol=protocol,
- direction='ingress'
+ direction='ingress',
+ ethertype=self.ethertype
)
# Show details of the created security rule
@@ -102,30 +141,93 @@
@test.attr(type='smoke')
def test_create_security_group_rule_with_additional_args(self):
- # Verify creating security group rule with the following
- # arguments works: "protocol": "tcp", "port_range_max": 77,
- # "port_range_min": 77, "direction":"ingress".
- group_create_body, _ = self._create_security_group()
+ """Verify security group rule with additional arguments works.
+ direction:ingress, ethertype:[IPv4/IPv6],
+ protocol:tcp, port_range_min:77, port_range_max:77
+ """
+ group_create_body, _ = self._create_security_group()
+ sg_id = group_create_body['security_group']['id']
direction = 'ingress'
protocol = 'tcp'
port_range_min = 77
port_range_max = 77
- _, rule_create_body = self.client.create_security_group_rule(
- security_group_id=group_create_body['security_group']['id'],
- direction=direction,
- protocol=protocol,
- port_range_min=port_range_min,
- port_range_max=port_range_max
- )
+ self._create_verify_security_group_rule(sg_id, direction,
+ self.ethertype, protocol,
+ port_range_min,
+ port_range_max)
- sec_group_rule = rule_create_body['security_group_rule']
+ @test.attr(type='smoke')
+ def test_create_security_group_rule_with_icmp_type_code(self):
+ """Verify security group rule for icmp protocol works.
- self.assertEqual(sec_group_rule['direction'], direction)
- self.assertEqual(sec_group_rule['protocol'], protocol)
- self.assertEqual(int(sec_group_rule['port_range_min']), port_range_min)
- self.assertEqual(int(sec_group_rule['port_range_max']), port_range_max)
+ Specify icmp type (port_range_min) and icmp code
+ (port_range_max) with different values. A seperate testcase
+ is added for icmp protocol as icmp validation would be
+ different from tcp/udp.
+ """
+ group_create_body, _ = self._create_security_group()
+
+ sg_id = group_create_body['security_group']['id']
+ direction = 'ingress'
+ protocol = 'icmp'
+ icmp_type_codes = [(3, 2), (2, 3), (3, 0), (2, None)]
+ for icmp_type, icmp_code in icmp_type_codes:
+ self._create_verify_security_group_rule(sg_id, direction,
+ self.ethertype, protocol,
+ icmp_type, icmp_code)
+
+ @test.attr(type='smoke')
+ def test_create_security_group_rule_with_remote_group_id(self):
+ # Verify creating security group rule with remote_group_id works
+ sg1_body, _ = self._create_security_group()
+ sg2_body, _ = self._create_security_group()
+
+ sg_id = sg1_body['security_group']['id']
+ direction = 'ingress'
+ protocol = 'udp'
+ port_range_min = 50
+ port_range_max = 55
+ remote_id = sg2_body['security_group']['id']
+ self._create_verify_security_group_rule(sg_id, direction,
+ self.ethertype, protocol,
+ port_range_min,
+ port_range_max,
+ remote_group_id=remote_id)
+
+ @test.attr(type='smoke')
+ def test_create_security_group_rule_with_remote_ip_prefix(self):
+ # Verify creating security group rule with remote_ip_prefix works
+ sg1_body, _ = self._create_security_group()
+
+ sg_id = sg1_body['security_group']['id']
+ direction = 'ingress'
+ protocol = 'tcp'
+ port_range_min = 76
+ port_range_max = 77
+ ip_prefix = self._tenant_network_cidr
+ self._create_verify_security_group_rule(sg_id, direction,
+ self.ethertype, protocol,
+ port_range_min,
+ port_range_max,
+ remote_ip_prefix=ip_prefix)
class SecGroupTestXML(SecGroupTest):
_interface = 'xml'
+
+
+class SecGroupIPv6Test(SecGroupTest):
+ _ip_version = 6
+ _tenant_network_cidr = CONF.network.tenant_network_v6_cidr
+
+ @classmethod
+ def resource_setup(cls):
+ if not CONF.network_feature_enabled.ipv6:
+ skip_msg = "IPv6 Tests are disabled."
+ raise cls.skipException(skip_msg)
+ super(SecGroupIPv6Test, cls).resource_setup()
+
+
+class SecGroupIPv6TestXML(SecGroupIPv6Test):
+ _interface = 'xml'
diff --git a/tempest/api/network/test_security_groups_negative.py b/tempest/api/network/test_security_groups_negative.py
index 9c6c267..97e4cb7 100644
--- a/tempest/api/network/test_security_groups_negative.py
+++ b/tempest/api/network/test_security_groups_negative.py
@@ -16,12 +16,16 @@
import uuid
from tempest.api.network import base_security_groups as base
+from tempest import config
from tempest import exceptions
from tempest import test
+CONF = config.CONF
+
class NegativeSecGroupTest(base.BaseSecGroupTest):
_interface = 'json'
+ _tenant_network_cidr = CONF.network.tenant_network_cidr
@classmethod
def resource_setup(cls):
@@ -60,23 +64,87 @@
self.assertRaises(
exceptions.BadRequest, self.client.create_security_group_rule,
security_group_id=group_create_body['security_group']['id'],
- protocol=pname, direction='ingress')
+ protocol=pname, direction='ingress', ethertype=self.ethertype)
+
+ @test.attr(type=['negative', 'gate'])
+ def test_create_security_group_rule_with_bad_remote_ip_prefix(self):
+ group_create_body, _ = self._create_security_group()
+
+ # Create rule with bad remote_ip_prefix
+ prefix = ['192.168.1./24', '192.168.1.1/33', 'bad_prefix', '256']
+ for remote_ip_prefix in prefix:
+ self.assertRaises(
+ exceptions.BadRequest, self.client.create_security_group_rule,
+ security_group_id=group_create_body['security_group']['id'],
+ protocol='tcp', direction='ingress', ethertype=self.ethertype,
+ remote_ip_prefix=remote_ip_prefix)
+
+ @test.attr(type=['negative', 'gate'])
+ def test_create_security_group_rule_with_non_existent_remote_groupid(self):
+ group_create_body, _ = self._create_security_group()
+ non_exist_id = str(uuid.uuid4())
+
+ # Create rule with non existent remote_group_id
+ group_ids = ['bad_group_id', non_exist_id]
+ for remote_group_id in group_ids:
+ self.assertRaises(
+ exceptions.NotFound, self.client.create_security_group_rule,
+ security_group_id=group_create_body['security_group']['id'],
+ protocol='tcp', direction='ingress', ethertype=self.ethertype,
+ remote_group_id=remote_group_id)
+
+ @test.attr(type=['negative', 'gate'])
+ def test_create_security_group_rule_with_remote_ip_and_group(self):
+ sg1_body, _ = self._create_security_group()
+ sg2_body, _ = self._create_security_group()
+
+ # Create rule specifying both remote_ip_prefix and remote_group_id
+ prefix = self._tenant_network_cidr
+ self.assertRaises(
+ exceptions.BadRequest, self.client.create_security_group_rule,
+ security_group_id=sg1_body['security_group']['id'],
+ protocol='tcp', direction='ingress',
+ ethertype=self.ethertype, remote_ip_prefix=prefix,
+ remote_group_id=sg2_body['security_group']['id'])
+
+ @test.attr(type=['negative', 'gate'])
+ def test_create_security_group_rule_with_bad_ethertype(self):
+ group_create_body, _ = self._create_security_group()
+
+ # Create rule with bad ethertype
+ ethertype = 'bad_ethertype'
+ self.assertRaises(
+ exceptions.BadRequest, self.client.create_security_group_rule,
+ security_group_id=group_create_body['security_group']['id'],
+ protocol='udp', direction='ingress', ethertype=ethertype)
@test.attr(type=['negative', 'gate'])
def test_create_security_group_rule_with_invalid_ports(self):
group_create_body, _ = self._create_security_group()
- # Create rule with invalid ports
+ # Create rule for tcp protocol with invalid ports
states = [(-16, 80, 'Invalid value for port -16'),
(80, 79, 'port_range_min must be <= port_range_max'),
(80, 65536, 'Invalid value for port 65536'),
+ (None, 6, 'port_range_min must be <= port_range_max'),
(-16, 65536, 'Invalid value for port')]
for pmin, pmax, msg in states:
ex = self.assertRaises(
exceptions.BadRequest, self.client.create_security_group_rule,
security_group_id=group_create_body['security_group']['id'],
protocol='tcp', port_range_min=pmin, port_range_max=pmax,
- direction='ingress')
+ direction='ingress', ethertype=self.ethertype)
+ self.assertIn(msg, str(ex))
+
+ # Create rule for icmp protocol with invalid ports
+ states = [(1, 256, 'Invalid value for ICMP code'),
+ (300, 1, 'Invalid value for ICMP type')]
+ for pmin, pmax, msg in states:
+ ex = self.assertRaises(
+ exceptions.BadRequest, self.client.create_security_group_rule,
+ security_group_id=group_create_body['security_group']['id'],
+ protocol='icmp', port_range_min=pmin, port_range_max=pmax,
+ direction='ingress', ethertype=self.ethertype)
self.assertIn(msg, str(ex))
@test.attr(type=['negative', 'smoke'])
@@ -88,14 +156,54 @@
name=name)
@test.attr(type=['negative', 'smoke'])
+ def test_create_duplicate_security_group_rule_fails(self):
+ # Create duplicate security group rule, it should fail.
+ body, _ = self._create_security_group()
+
+ min_port = 66
+ max_port = 67
+ # Create a rule with valid params
+ resp, _ = self.client.create_security_group_rule(
+ security_group_id=body['security_group']['id'],
+ direction='ingress',
+ ethertype=self.ethertype,
+ protocol='tcp',
+ port_range_min=min_port,
+ port_range_max=max_port
+ )
+
+ # Try creating the same security group rule, it should fail
+ self.assertRaises(
+ exceptions.Conflict, self.client.create_security_group_rule,
+ security_group_id=body['security_group']['id'],
+ protocol='tcp', direction='ingress', ethertype=self.ethertype,
+ port_range_min=min_port, port_range_max=max_port)
+
+ @test.attr(type=['negative', 'smoke'])
def test_create_security_group_rule_with_non_existent_security_group(self):
# Create security group rules with not existing security group.
non_existent_sg = str(uuid.uuid4())
self.assertRaises(exceptions.NotFound,
self.client.create_security_group_rule,
security_group_id=non_existent_sg,
- direction='ingress')
+ direction='ingress', ethertype=self.ethertype)
class NegativeSecGroupTestXML(NegativeSecGroupTest):
_interface = 'xml'
+
+
+class NegativeSecGroupIPv6Test(NegativeSecGroupTest):
+ _ip_version = 6
+ _tenant_network_cidr = CONF.network.tenant_network_v6_cidr
+
+ @classmethod
+ def resource_setup(cls):
+ if not CONF.network_feature_enabled.ipv6:
+ skip_msg = "IPv6 Tests are disabled."
+ raise cls.skipException(skip_msg)
+ super(NegativeSecGroupIPv6Test, cls).resource_setup()
+
+
+class NegativeSecGroupIPv6TestXML(NegativeSecGroupIPv6Test):
+ _interface = 'xml'
diff --git a/tempest/api/telemetry/test_telemetry_notification_api.py b/tempest/api/telemetry/test_telemetry_notification_api.py
index 3782b70..42e2a2d 100644
--- a/tempest/api/telemetry/test_telemetry_notification_api.py
+++ b/tempest/api/telemetry/test_telemetry_notification_api.py
@@ -32,7 +32,6 @@
@test.attr(type="gate")
@testtools.skipIf(not CONF.service_available.nova,
"Nova is not available.")
- @test.skip_because(bug="1336755")
def test_check_nova_notification(self):
resp, body = self.create_server()
diff --git a/tempest/api_schema/request/compute/flavors.py b/tempest/api_schema/request/compute/flavors.py
index 8fe9e3a..adaaf27 100644
--- a/tempest/api_schema/request/compute/flavors.py
+++ b/tempest/api_schema/request/compute/flavors.py
@@ -40,14 +40,19 @@
"json-schema": {
"type": "object",
"properties": {
- "name": {"type": "string"},
- "ram": {"type": "integer", "minimum": 1},
- "vcpus": {"type": "integer", "minimum": 1},
- "disk": {"type": "integer"},
- "id": {"type": "integer"},
- "swap": {"type": "integer"},
- "rxtx_factor": {"type": "integer"},
- "OS-FLV-EXT-DATA:ephemeral": {"type": "integer"}
+ "flavor": {
+ "type": "object",
+ "properties": {
+ "name": {"type": "string",
+ "exclude_tests": ["gen_str_min_length"]},
+ "ram": {"type": "integer", "minimum": 1},
+ "vcpus": {"type": "integer", "minimum": 1},
+ "disk": {"type": "integer"},
+ "id": {"type": "integer",
+ "exclude_tests": ["gen_none", "gen_string"]
+ },
+ }
+ }
}
}
}
diff --git a/tempest/cmd/cleanup.py b/tempest/cmd/cleanup.py
index 9ae3dfb..a305e42 100644
--- a/tempest/cmd/cleanup.py
+++ b/tempest/cmd/cleanup.py
@@ -12,7 +12,6 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
-# @author: David Paterson
"""
Utility for cleaning up environment after Tempest run
diff --git a/tempest/cmd/cleanup_service.py b/tempest/cmd/cleanup_service.py
index f5f0db3..0d3c6c6 100644
--- a/tempest/cmd/cleanup_service.py
+++ b/tempest/cmd/cleanup_service.py
@@ -13,11 +13,7 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
-'''
-Created on Sep 3, 2014
-@author: David_Paterson
-'''
from tempest import config
from tempest.openstack.common import log as logging
from tempest import test
diff --git a/tempest/cmd/javelin.py b/tempest/cmd/javelin.py
index 3c41dd9..0adc7e0 100755
--- a/tempest/cmd/javelin.py
+++ b/tempest/cmd/javelin.py
@@ -20,6 +20,7 @@
"""
import argparse
+import collections
import datetime
import os
import sys
@@ -43,7 +44,7 @@
OPTS = {}
USERS = {}
-RES = {}
+RES = collections.defaultdict(list)
LOG = None
@@ -282,6 +283,8 @@
If in check mode confirm that the oldest sample available is from
before the upgrade.
"""
+ if not self.res.get('telemetry'):
+ return
LOG.info("checking telemetry")
for server in self.res['servers']:
client = client_for_user(server['owner'])
@@ -508,6 +511,9 @@
def create_volumes(volumes):
+ if not volumes:
+ return
+ LOG.info("Creating volumes")
for volume in volumes:
client = client_for_user(volume['owner'])
@@ -630,7 +636,7 @@
global RES
get_options()
setup_logging()
- RES = load_resources(OPTS.resources)
+ RES.update(load_resources(OPTS.resources))
if OPTS.mode == 'create':
create_resources()
diff --git a/tempest/cmd/resources.yaml b/tempest/cmd/resources.yaml
index 19ee6d5..2d5e686 100644
--- a/tempest/cmd/resources.yaml
+++ b/tempest/cmd/resources.yaml
@@ -57,3 +57,4 @@
name: javelin1
owner: javelin
file: /etc/hosts
+telemetry: true
diff --git a/tempest/cmd/verify_tempest_config.py b/tempest/cmd/verify_tempest_config.py
index 5046bff..f426e4d 100755
--- a/tempest/cmd/verify_tempest_config.py
+++ b/tempest/cmd/verify_tempest_config.py
@@ -122,6 +122,18 @@
not CONF.volume_feature_enabled.api_v2, update)
+def verify_api_versions(os, service, update):
+ verify = {
+ 'cinder': verify_cinder_api_versions,
+ 'glance': verify_glance_api_versions,
+ 'keystone': verify_keystone_api_versions,
+ 'nova': verify_nova_api_versions,
+ }
+ if service not in verify:
+ return
+ verify[service](os, update)
+
+
def get_extension_client(os, service):
extensions_client = {
'nova': os.extensions_client,
@@ -337,10 +349,13 @@
elif service not in services:
continue
results = verify_extensions(os, service, results)
- verify_keystone_api_versions(os, update)
- verify_glance_api_versions(os, update)
- verify_nova_api_versions(os, update)
- verify_cinder_api_versions(os, update)
+
+ # Verify API verisons of all services in the keystone catalog and keystone
+ # itself.
+ services.append('keystone')
+ for service in services:
+ verify_api_versions(os, service, update)
+
display_results(results, update, replace)
if update:
conf_file.close()
diff --git a/tempest/common/generator/base_generator.py b/tempest/common/generator/base_generator.py
index 0398af1..3f405b1 100644
--- a/tempest/common/generator/base_generator.py
+++ b/tempest/common/generator/base_generator.py
@@ -13,6 +13,7 @@
# License for the specific language governing permissions and limitations
# under the License.
+import copy
import functools
import jsonschema
@@ -30,9 +31,11 @@
return expected_result
-def generator_type(*args):
+def generator_type(*args, **kwargs):
def wrapper(func):
func.types = args
+ for key in kwargs:
+ setattr(func, key, kwargs[key])
return func
return wrapper
@@ -106,37 +109,74 @@
jsonschema.Draft4Validator.check_schema(schema['json-schema'])
jsonschema.validate(schema, self.schema)
- def generate(self, schema):
+ def generate_scenarios(self, schema, path=None):
"""
- Generate an json dictionary based on a schema.
- Only one value is mis-generated for each dictionary created.
+ Generates the scenario (all possible test cases) out of the given
+ schema.
- Any generator must return a list of tuples or a single tuple.
- The values of this tuple are:
- result[0]: Name of the test
- result[1]: json schema for the test
- result[2]: expected result of the test (can be None)
+ :param schema: a dict style schema (see ``BasicGeneratorSet.schema``)
+ :param path: the schema path if the given schema is a subschema
"""
- LOG.debug("generate_invalid: %s" % schema)
- schema_type = schema["type"]
- if isinstance(schema_type, list):
+ schema_type = schema['type']
+ scenarios = []
+
+ if schema_type == 'object':
+ properties = schema["properties"]
+ for attribute, definition in properties.iteritems():
+ current_path = copy.copy(path)
+ if path is not None:
+ current_path.append(attribute)
+ else:
+ current_path = [attribute]
+ scenarios.extend(
+ self.generate_scenarios(definition, current_path))
+ elif isinstance(schema_type, list):
if "integer" in schema_type:
schema_type = "integer"
else:
raise Exception("non-integer list types not supported")
- result = []
- if schema_type not in self.types_dict:
- raise TypeError("generator (%s) doesn't support type: %s"
- % (self.__class__.__name__, schema_type))
for generator in self.types_dict[schema_type]:
- ret = generator(schema)
- if ret is not None:
- if isinstance(ret, list):
- result.extend(ret)
- elif isinstance(ret, tuple):
- result.append(ret)
- else:
- raise Exception("generator (%s) returns invalid result: %s"
- % (generator, ret))
- LOG.debug("result: %s" % result)
- return result
+ if hasattr(generator, "needed_property"):
+ prop = generator.needed_property
+ if (prop not in schema or
+ schema[prop] is None or
+ schema[prop] is False):
+ continue
+
+ name = generator.__name__
+ if ("exclude_tests" in schema and
+ name in schema["exclude_tests"]):
+ continue
+ if path is not None:
+ name = "%s_%s" % ("_".join(path), name)
+ scenarios.append({
+ "_negtest_name": name,
+ "_negtest_generator": generator,
+ "_negtest_schema": schema,
+ "_negtest_path": path})
+ return scenarios
+
+ def generate_payload(self, test, schema):
+ """
+ Generates one jsonschema out of the given test. It's mandatory to use
+ generate_scenarios before to register all needed variables to the test.
+
+ :param test: A test object (scenario) with all _negtest variables on it
+ :param schema: schema for the test
+ """
+ generator = test._negtest_generator
+ ret = generator(test._negtest_schema)
+ path = copy.copy(test._negtest_path)
+ expected_result = None
+
+ if ret is not None:
+ generator_result = generator(test._negtest_schema)
+ invalid_snippet = generator_result[1]
+ expected_result = generator_result[2]
+ element = path.pop()
+ if len(path) > 0:
+ schema_snip = reduce(dict.get, path, schema)
+ schema_snip[element] = invalid_snippet
+ else:
+ schema[element] = invalid_snippet
+ return expected_result
diff --git a/tempest/common/generator/negative_generator.py b/tempest/common/generator/negative_generator.py
index 4f3d2cd..1d5ed43 100644
--- a/tempest/common/generator/negative_generator.py
+++ b/tempest/common/generator/negative_generator.py
@@ -47,65 +47,32 @@
if min_length > 0:
return "x" * (min_length - 1)
- @base.generator_type("string")
+ @base.generator_type("string", needed_property="maxLength")
@base.simple_generator
def gen_str_max_length(self, schema):
max_length = schema.get("maxLength", -1)
- if max_length > -1:
- return "x" * (max_length + 1)
+ return "x" * (max_length + 1)
- @base.generator_type("integer")
+ @base.generator_type("integer", needed_property="minimum")
@base.simple_generator
def gen_int_min(self, schema):
- if "minimum" in schema:
- minimum = schema["minimum"]
- if "exclusiveMinimum" not in schema:
- minimum -= 1
- return minimum
+ minimum = schema["minimum"]
+ if "exclusiveMinimum" not in schema:
+ minimum -= 1
+ return minimum
- @base.generator_type("integer")
+ @base.generator_type("integer", needed_property="maximum")
@base.simple_generator
def gen_int_max(self, schema):
- if "maximum" in schema:
- maximum = schema["maximum"]
- if "exclusiveMaximum" not in schema:
- maximum += 1
- return maximum
+ maximum = schema["maximum"]
+ if "exclusiveMaximum" not in schema:
+ maximum += 1
+ return maximum
- @base.generator_type("object")
- def gen_obj_remove_attr(self, schema):
- invalids = []
- valid_schema = valid.ValidTestGenerator().generate_valid(schema)
- required = schema.get("required", [])
- for r in required:
- new_valid = copy.deepcopy(valid_schema)
- del new_valid[r]
- invalids.append(("gen_obj_remove_attr", new_valid, None))
- return invalids
-
- @base.generator_type("object")
+ @base.generator_type("object", needed_property="additionalProperties")
@base.simple_generator
def gen_obj_add_attr(self, schema):
valid_schema = valid.ValidTestGenerator().generate_valid(schema)
- if not schema.get("additionalProperties", True):
- new_valid = copy.deepcopy(valid_schema)
- new_valid["$$$$$$$$$$"] = "xxx"
- return new_valid
-
- @base.generator_type("object")
- def gen_inv_prop_obj(self, schema):
- LOG.debug("generate_invalid_object: %s" % schema)
- valid_schema = valid.ValidTestGenerator().generate_valid(schema)
- invalids = []
- properties = schema["properties"]
-
- for k, v in properties.iteritems():
- for invalid in self.generate(v):
- LOG.debug(v)
- new_valid = copy.deepcopy(valid_schema)
- new_valid[k] = invalid[1]
- name = "prop_%s_%s" % (k, invalid[0])
- invalids.append((name, new_valid, invalid[2]))
-
- LOG.debug("generate_invalid_object return: %s" % invalids)
- return invalids
+ new_valid = copy.deepcopy(valid_schema)
+ new_valid["$$$$$$$$$$"] = "xxx"
+ return new_valid
diff --git a/tempest/common/generator/valid_generator.py b/tempest/common/generator/valid_generator.py
index 0d7b398..7b80afc 100644
--- a/tempest/common/generator/valid_generator.py
+++ b/tempest/common/generator/valid_generator.py
@@ -54,5 +54,28 @@
obj[k] = self.generate_valid(v)
return obj
+ def generate(self, schema):
+ schema_type = schema["type"]
+ if isinstance(schema_type, list):
+ if "integer" in schema_type:
+ schema_type = "integer"
+ else:
+ raise Exception("non-integer list types not supported")
+ result = []
+ if schema_type not in self.types_dict:
+ raise TypeError("generator (%s) doesn't support type: %s"
+ % (self.__class__.__name__, schema_type))
+ for generator in self.types_dict[schema_type]:
+ ret = generator(schema)
+ if ret is not None:
+ if isinstance(ret, list):
+ result.extend(ret)
+ elif isinstance(ret, tuple):
+ result.append(ret)
+ else:
+ raise Exception("generator (%s) returns invalid result: %s"
+ % (generator, ret))
+ return result
+
def generate_valid(self, schema):
return self.generate(schema)[0][1]
diff --git a/tempest/hacking/checks.py b/tempest/hacking/checks.py
index 55cc89b..6014cff 100644
--- a/tempest/hacking/checks.py
+++ b/tempest/hacking/checks.py
@@ -24,7 +24,7 @@
PYTHON_CLIENT_RE = re.compile('import (%s)client' % '|'.join(PYTHON_CLIENTS))
TEST_DEFINITION = re.compile(r'^\s*def test.*')
-SETUPCLASS_DEFINITION = re.compile(r'^\s*def setUpClass')
+SETUP_TEARDOWN_CLASS_DEFINITION = re.compile(r'^\s+def (setUp|tearDown)Class')
SCENARIO_DECORATOR = re.compile(r'\s*@.*services\((.*)\)')
VI_HEADER_RE = re.compile(r"^#\s+vim?:.+")
mutable_default_args = re.compile(r"^\s*def .+\((.+=\{\}|.+=\[\])")
@@ -58,15 +58,15 @@
"T104: Scenario tests require a service decorator")
-def no_setupclass_for_unit_tests(physical_line, filename):
+def no_setup_teardown_class_for_tests(physical_line, filename):
if pep8.noqa(physical_line):
return
- if 'tempest/tests' in filename:
- if SETUPCLASS_DEFINITION.match(physical_line):
+ if 'tempest/test.py' not in filename:
+ if SETUP_TEARDOWN_CLASS_DEFINITION.match(physical_line):
return (physical_line.find('def'),
- "T105: setUpClass can not be used with unit tests")
+ "T105: (setUp|tearDown)Class can not be used in tests")
def no_vi_headers(physical_line, line_number, lines):
@@ -119,7 +119,7 @@
def factory(register):
register(import_no_clients_in_api)
register(scenario_tests_need_service_tags)
- register(no_setupclass_for_unit_tests)
+ register(no_setup_teardown_class_for_tests)
register(no_vi_headers)
register(service_tags_not_in_module_path)
register(no_mutable_default_args)
diff --git a/tempest/scenario/manager.py b/tempest/scenario/manager.py
index 7946e06..ec68230 100644
--- a/tempest/scenario/manager.py
+++ b/tempest/scenario/manager.py
@@ -379,6 +379,12 @@
LOG.debug(self.servers_client.get_console_output(server['id'],
length=None))
+ def _log_net_info(self, exc):
+ # network debug is called as part of ssh init
+ if not isinstance(exc, exceptions.SSHTimeout):
+ LOG.debug('Network information on a devstack host')
+ debug.log_net_debug()
+
def create_server_snapshot(self, server, name=None):
# Glance client
_image_client = self.image_client
@@ -659,9 +665,7 @@
ex_msg += ": " + msg
LOG.exception(ex_msg)
self._log_console_output(servers)
- # network debug is called as part of ssh init
- if not isinstance(e, exceptions.SSHTimeout):
- debug.log_net_debug()
+ self._log_net_info(e)
raise
def _check_tenant_network_connectivity(self, server,
@@ -685,9 +689,7 @@
except Exception as e:
LOG.exception('Tenant network connectivity check failed')
self._log_console_output(servers_for_debug)
- # network debug is called as part of ssh init
- if not isinstance(e, exceptions.SSHTimeout):
- debug.log_net_debug()
+ self._log_net_info(e)
raise
def _check_remote_connectivity(self, source, dest, should_succeed=True):
diff --git a/tempest/scenario/test_security_groups_basic_ops.py b/tempest/scenario/test_security_groups_basic_ops.py
index 188dea8..6c36034 100644
--- a/tempest/scenario/test_security_groups_basic_ops.py
+++ b/tempest/scenario/test_security_groups_basic_ops.py
@@ -241,7 +241,11 @@
'security_groups': security_groups,
'tenant_id': tenant.creds.tenant_id
}
- return self.create_server(name=name, create_kwargs=create_kwargs)
+ server = self.create_server(name=name, create_kwargs=create_kwargs)
+ self.assertEqual(
+ sorted([s['name'] for s in security_groups]),
+ sorted([s['name'] for s in server['security_groups']]))
+ return server
def _create_tenant_servers(self, tenant, num=1):
for i in range(num):
diff --git a/tempest/services/compute/json/images_client.py b/tempest/services/compute/json/images_client.py
index 9877391..4af8331 100644
--- a/tempest/services/compute/json/images_client.py
+++ b/tempest/services/compute/json/images_client.py
@@ -76,7 +76,7 @@
def get_image(self, image_id):
"""Returns the details of a single image."""
resp, body = self.get("images/%s" % str(image_id))
- self.expected_success(200, resp)
+ self.expected_success(200, resp.status)
body = json.loads(body)
self.validate_response(schema.get_image, resp, body)
return resp, body['image']
diff --git a/tempest/services/compute/xml/images_client.py b/tempest/services/compute/xml/images_client.py
index 6b15404..94acf36 100644
--- a/tempest/services/compute/xml/images_client.py
+++ b/tempest/services/compute/xml/images_client.py
@@ -127,7 +127,7 @@
def get_image(self, image_id):
"""Returns the details of a single image."""
resp, body = self.get("images/%s" % str(image_id))
- self.expected_success(200, resp)
+ self.expected_success(200, resp.status)
body = self._parse_image(etree.fromstring(body))
return resp, body
diff --git a/tempest/services/compute/xml/servers_client.py b/tempest/services/compute/xml/servers_client.py
index 156d889..06f1b83 100644
--- a/tempest/services/compute/xml/servers_client.py
+++ b/tempest/services/compute/xml/servers_client.py
@@ -349,8 +349,11 @@
networks = xml_utils.Element("networks")
server.append(networks)
for network in kwargs['networks']:
- s = xml_utils.Element("network", uuid=network['uuid'],
- fixed_ip=network['fixed_ip'])
+ if 'fixed_ip' in network:
+ s = xml_utils.Element("network", uuid=network['uuid'],
+ fixed_ip=network['fixed_ip'])
+ else:
+ s = xml_utils.Element("network", uuid=network['uuid'])
networks.append(s)
if 'meta' in kwargs:
diff --git a/tempest/services/identity/v3/json/identity_client.py b/tempest/services/identity/v3/json/identity_client.py
index df424ca..5ad416c 100644
--- a/tempest/services/identity/v3/json/identity_client.py
+++ b/tempest/services/identity/v3/json/identity_client.py
@@ -31,14 +31,11 @@
self.endpoint_url = 'adminURL'
self.api_version = "v3"
- def create_user(self, user_name, **kwargs):
+ def create_user(self, user_name, password=None, project_id=None,
+ email=None, domain_id='default', **kwargs):
"""Creates a user."""
- password = kwargs.get('password', None)
- email = kwargs.get('email', None)
en = kwargs.get('enabled', True)
- project_id = kwargs.get('project_id', None)
description = kwargs.get('description', None)
- domain_id = kwargs.get('domain_id', 'default')
post_body = {
'project_id': project_id,
'description': description,
diff --git a/tempest/services/identity/v3/xml/identity_client.py b/tempest/services/identity/v3/xml/identity_client.py
index 5c43692..fdc0a0a 100644
--- a/tempest/services/identity/v3/xml/identity_client.py
+++ b/tempest/services/identity/v3/xml/identity_client.py
@@ -95,14 +95,11 @@
_json = common.xml_to_json(body)
return _json
- def create_user(self, user_name, **kwargs):
+ def create_user(self, user_name, password=None, project_id=None,
+ email=None, domain_id='default', **kwargs):
"""Creates a user."""
- password = kwargs.get('password', None)
- email = kwargs.get('email', None)
en = kwargs.get('enabled', 'true')
- project_id = kwargs.get('project_id', None)
description = kwargs.get('description', None)
- domain_id = kwargs.get('domain_id', 'default')
post_body = common.Element("user",
xmlns=XMLNS,
name=user_name,
diff --git a/tempest/services/network/json/network_client.py b/tempest/services/network/json/network_client.py
index 16a4f5c..78ed56f 100644
--- a/tempest/services/network/json/network_client.py
+++ b/tempest/services/network/json/network_client.py
@@ -320,3 +320,30 @@
self.rest_client.expected_success(201, resp.status)
body = json.loads(body)
return resp, body
+
+ def insert_firewall_rule_in_policy(self, firewall_policy_id,
+ firewall_rule_id, insert_after="",
+ insert_before=""):
+ uri = '%s/fw/firewall_policies/%s/insert_rule' % (self.uri_prefix,
+ firewall_policy_id)
+ body = {
+ "firewall_rule_id": firewall_rule_id,
+ "insert_after": insert_after,
+ "insert_before": insert_before
+ }
+ body = json.dumps(body)
+ resp, body = self.put(uri, body)
+ self.rest_client.expected_success(200, resp.status)
+ body = json.loads(body)
+ return resp, body
+
+ def remove_firewall_rule_from_policy(self, firewall_policy_id,
+ firewall_rule_id):
+ uri = '%s/fw/firewall_policies/%s/remove_rule' % (self.uri_prefix,
+ firewall_policy_id)
+ update_body = {"firewall_rule_id": firewall_rule_id}
+ update_body = json.dumps(update_body)
+ resp, body = self.put(uri, update_body)
+ self.rest_client.expected_success(200, resp.status)
+ body = json.loads(body)
+ return resp, body
diff --git a/tempest/services/network/xml/network_client.py b/tempest/services/network/xml/network_client.py
index 17b1f8e..c65390e 100644
--- a/tempest/services/network/xml/network_client.py
+++ b/tempest/services/network/xml/network_client.py
@@ -25,7 +25,8 @@
# list of plurals used for xml serialization
PLURALS = ['dns_nameservers', 'host_routes', 'allocation_pools',
'fixed_ips', 'extensions', 'extra_dhcp_opts', 'pools',
- 'health_monitors', 'vips', 'members', 'allowed_address_pairs']
+ 'health_monitors', 'vips', 'members', 'allowed_address_pairs',
+ 'firewall_rules']
def get_rest_client(self, auth_provider):
rc = rest_client.RestClient(auth_provider)
@@ -281,6 +282,27 @@
body = _root_tag_fetcher_and_xml_to_json_parse(body)
return resp, body
+ def insert_firewall_rule_in_policy(self, firewall_policy_id,
+ firewall_rule_id, insert_after="",
+ insert_before=""):
+ uri = '%s/fw/firewall_policies/%s/insert_rule' % (self.uri_prefix,
+ firewall_policy_id)
+ rule = common.Element("firewall_rule_id", firewall_rule_id)
+ resp, body = self.put(uri, str(common.Document(rule)))
+ self.rest_client.expected_success(200, resp.status)
+ body = _root_tag_fetcher_and_xml_to_json_parse(body)
+ return resp, body
+
+ def remove_firewall_rule_from_policy(self, firewall_policy_id,
+ firewall_rule_id):
+ uri = '%s/fw/firewall_policies/%s/remove_rule' % (self.uri_prefix,
+ firewall_policy_id)
+ rule = common.Element("firewall_rule_id", firewall_rule_id)
+ resp, body = self.put(uri, str(common.Document(rule)))
+ self.rest_client.expected_success(200, resp.status)
+ body = _root_tag_fetcher_and_xml_to_json_parse(body)
+ return resp, body
+
def _root_tag_fetcher_and_xml_to_json_parse(xml_returned_body):
body = ET.fromstring(xml_returned_body)
diff --git a/tempest/stress/actions/server_create_destroy.py b/tempest/stress/actions/server_create_destroy.py
index 4a9f0d5..34e299d 100644
--- a/tempest/stress/actions/server_create_destroy.py
+++ b/tempest/stress/actions/server_create_destroy.py
@@ -28,15 +28,13 @@
def run(self):
name = data_utils.rand_name("instance")
self.logger.info("creating %s" % name)
- resp, server = self.manager.servers_client.create_server(
+ _, server = self.manager.servers_client.create_server(
name, self.image, self.flavor)
server_id = server['id']
- assert(resp.status == 202)
self.manager.servers_client.wait_for_server_status(server_id,
'ACTIVE')
self.logger.info("created %s" % server_id)
self.logger.info("deleting %s" % name)
- resp, _ = self.manager.servers_client.delete_server(server_id)
- assert(resp.status == 204)
+ self.manager.servers_client.delete_server(server_id)
self.manager.servers_client.wait_for_server_termination(server_id)
self.logger.info("deleted %s" % server_id)
diff --git a/tempest/stress/actions/ssh_floating.py b/tempest/stress/actions/ssh_floating.py
index d78112c..5bc8cac 100644
--- a/tempest/stress/actions/ssh_floating.py
+++ b/tempest/stress/actions/ssh_floating.py
@@ -74,19 +74,17 @@
self.logger.info("creating %s" % name)
vm_args = self.vm_extra_args.copy()
vm_args['security_groups'] = [self.sec_grp]
- resp, server = servers_client.create_server(name, self.image,
- self.flavor,
- **vm_args)
+ _, server = servers_client.create_server(name, self.image,
+ self.flavor,
+ **vm_args)
self.server_id = server['id']
- assert(resp.status == 202)
if self.wait_after_vm_create:
self.manager.servers_client.wait_for_server_status(self.server_id,
'ACTIVE')
def _destroy_vm(self):
self.logger.info("deleting %s" % self.server_id)
- resp, _ = self.manager.servers_client.delete_server(self.server_id)
- assert(resp.status == 204) # It cannot be 204 if I had to wait..
+ self.manager.servers_client.delete_server(self.server_id)
self.manager.servers_client.wait_for_server_termination(self.server_id)
self.logger.info("deleted %s" % self.server_id)
diff --git a/tempest/stress/actions/volume_attach_delete.py b/tempest/stress/actions/volume_attach_delete.py
index e0238d3..9c4070f 100644
--- a/tempest/stress/actions/volume_attach_delete.py
+++ b/tempest/stress/actions/volume_attach_delete.py
@@ -28,10 +28,9 @@
# Step 1: create volume
name = data_utils.rand_name("volume")
self.logger.info("creating volume: %s" % name)
- resp, volume = self.manager.volumes_client.create_volume(
+ _, volume = self.manager.volumes_client.create_volume(
size=1,
display_name=name)
- assert(resp.status == 200)
self.manager.volumes_client.wait_for_volume_status(volume['id'],
'available')
self.logger.info("created volume: %s" % volume['id'])
@@ -39,20 +38,18 @@
# Step 2: create vm instance
vm_name = data_utils.rand_name("instance")
self.logger.info("creating vm: %s" % vm_name)
- resp, server = self.manager.servers_client.create_server(
+ _, server = self.manager.servers_client.create_server(
vm_name, self.image, self.flavor)
server_id = server['id']
- assert(resp.status == 202)
self.manager.servers_client.wait_for_server_status(server_id, 'ACTIVE')
self.logger.info("created vm %s" % server_id)
# Step 3: attach volume to vm
self.logger.info("attach volume (%s) to vm %s" %
(volume['id'], server_id))
- resp, body = self.manager.servers_client.attach_volume(server_id,
- volume['id'],
- '/dev/vdc')
- assert(resp.status == 200)
+ self.manager.servers_client.attach_volume(server_id,
+ volume['id'],
+ '/dev/vdc')
self.manager.volumes_client.wait_for_volume_status(volume['id'],
'in-use')
self.logger.info("volume (%s) attached to vm %s" %
@@ -60,14 +57,12 @@
# Step 4: delete vm
self.logger.info("deleting vm: %s" % vm_name)
- resp, _ = self.manager.servers_client.delete_server(server_id)
- assert(resp.status == 204)
+ self.manager.servers_client.delete_server(server_id)
self.manager.servers_client.wait_for_server_termination(server_id)
self.logger.info("deleted vm: %s" % server_id)
# Step 5: delete volume
self.logger.info("deleting volume: %s" % volume['id'])
- resp, _ = self.manager.volumes_client.delete_volume(volume['id'])
- assert(resp.status == 202)
+ self.manager.volumes_client.delete_volume(volume['id'])
self.manager.volumes_client.wait_for_resource_deletion(volume['id'])
self.logger.info("deleted volume: %s" % volume['id'])
diff --git a/tempest/stress/actions/volume_attach_verify.py b/tempest/stress/actions/volume_attach_verify.py
index 0d3cb23..a13d890 100644
--- a/tempest/stress/actions/volume_attach_verify.py
+++ b/tempest/stress/actions/volume_attach_verify.py
@@ -24,12 +24,10 @@
def _create_keypair(self):
keyname = data_utils.rand_name("key")
- resp, self.key = self.manager.keypairs_client.create_keypair(keyname)
- assert(resp.status == 200)
+ _, self.key = self.manager.keypairs_client.create_keypair(keyname)
def _delete_keypair(self):
- resp, _ = self.manager.keypairs_client.delete_keypair(self.key['name'])
- assert(resp.status == 202)
+ self.manager.keypairs_client.delete_keypair(self.key['name'])
def _create_vm(self):
self.name = name = data_utils.rand_name("instance")
@@ -38,18 +36,16 @@
vm_args = self.vm_extra_args.copy()
vm_args['security_groups'] = [self.sec_grp]
vm_args['key_name'] = self.key['name']
- resp, server = servers_client.create_server(name, self.image,
- self.flavor,
- **vm_args)
+ _, server = servers_client.create_server(name, self.image,
+ self.flavor,
+ **vm_args)
self.server_id = server['id']
- assert(resp.status == 202)
self.manager.servers_client.wait_for_server_status(self.server_id,
'ACTIVE')
def _destroy_vm(self):
self.logger.info("deleting server: %s" % self.server_id)
- resp, _ = self.manager.servers_client.delete_server(self.server_id)
- assert(resp.status == 204) # It cannot be 204 if I had to wait..
+ self.manager.servers_client.delete_server(self.server_id)
self.manager.servers_client.wait_for_server_termination(self.server_id)
self.logger.info("deleted server: %s" % self.server_id)
@@ -81,10 +77,9 @@
name = data_utils.rand_name("volume")
self.logger.info("creating volume: %s" % name)
volumes_client = self.manager.volumes_client
- resp, self.volume = volumes_client.create_volume(
+ _, self.volume = volumes_client.create_volume(
size=1,
display_name=name)
- assert(resp.status == 200)
volumes_client.wait_for_volume_status(self.volume['id'],
'available')
self.logger.info("created volume: %s" % self.volume['id'])
@@ -92,8 +87,7 @@
def _delete_volume(self):
self.logger.info("deleting volume: %s" % self.volume['id'])
volumes_client = self.manager.volumes_client
- resp, _ = volumes_client.delete_volume(self.volume['id'])
- assert(resp.status == 202)
+ volumes_client.delete_volume(self.volume['id'])
volumes_client.wait_for_resource_deletion(self.volume['id'])
self.logger.info("deleted volume: %s" % self.volume['id'])
@@ -193,10 +187,9 @@
servers_client = self.manager.servers_client
self.logger.info("attach volume (%s) to vm %s" %
(self.volume['id'], self.server_id))
- resp, body = servers_client.attach_volume(self.server_id,
- self.volume['id'],
- self.part_name)
- assert(resp.status == 200)
+ servers_client.attach_volume(self.server_id,
+ self.volume['id'],
+ self.part_name)
self.manager.volumes_client.wait_for_volume_status(self.volume['id'],
'in-use')
if self.enable_ssh_verify:
@@ -204,9 +197,8 @@
% self.server_id)
self.part_wait(self.attach_match_count)
- resp, body = servers_client.detach_volume(self.server_id,
- self.volume['id'])
- assert(resp.status == 202)
+ servers_client.detach_volume(self.server_id,
+ self.volume['id'])
self.manager.volumes_client.wait_for_volume_status(self.volume['id'],
'available')
if self.enable_ssh_verify:
diff --git a/tempest/stress/actions/volume_create_delete.py b/tempest/stress/actions/volume_create_delete.py
index 4e75be0..b1c5bb7 100644
--- a/tempest/stress/actions/volume_create_delete.py
+++ b/tempest/stress/actions/volume_create_delete.py
@@ -20,14 +20,12 @@
name = data_utils.rand_name("volume")
self.logger.info("creating %s" % name)
volumes_client = self.manager.volumes_client
- resp, volume = volumes_client.create_volume(size=1,
- display_name=name)
- assert(resp.status == 200)
+ _, volume = volumes_client.create_volume(size=1,
+ display_name=name)
vol_id = volume['id']
volumes_client.wait_for_volume_status(vol_id, 'available')
self.logger.info("created %s" % volume['id'])
self.logger.info("deleting %s" % name)
- resp, _ = volumes_client.delete_volume(vol_id)
- assert(resp.status == 202)
+ volumes_client.delete_volume(vol_id)
volumes_client.wait_for_resource_deletion(vol_id)
self.logger.info("deleted %s" % vol_id)
diff --git a/tempest/test.py b/tempest/test.py
index 1c94ed4..1c6265d 100644
--- a/tempest/test.py
+++ b/tempest/test.py
@@ -66,35 +66,6 @@
return decorator
-def safe_setup(f):
- """A decorator used to wrap the setUpClass for cleaning up resources
- when setUpClass failed.
-
- Deprecated, see:
- http://specs.openstack.org/openstack/qa-specs/specs/resource-cleanup.html
- """
- @functools.wraps(f)
- def decorator(cls):
- try:
- f(cls)
- except Exception as se:
- etype, value, trace = sys.exc_info()
- if etype is cls.skipException:
- LOG.info("setUpClass skipped: %s:" % se)
- else:
- LOG.exception("setUpClass failed: %s" % se)
- try:
- cls.tearDownClass()
- except Exception as te:
- LOG.exception("tearDownClass failed: %s" % te)
- try:
- raise etype(value), None, trace
- finally:
- del trace # for avoiding circular refs
-
- return decorator
-
-
def get_service_list():
service_list = {
'compute': CONF.service_available.nova,
@@ -123,7 +94,7 @@
def decorator(f):
services = ['compute', 'image', 'baremetal', 'volume', 'orchestration',
'network', 'identity', 'object_storage', 'dashboard',
- 'ceilometer', 'data_processing']
+ 'telemetry', 'data_processing']
for service in args:
if service not in services:
raise exceptions.InvalidServiceTag('%s is not a valid '
@@ -299,7 +270,14 @@
try:
cls.tearDownClass()
except Exception as te:
- LOG.exception("tearDownClass failed: %s" % te)
+ tetype, _, _ = sys.exc_info()
+ # TODO(gmann): Till we split-up resource_setup &
+ # resource_cleanup in more structural way, log
+ # AttributeError as info instead of exception.
+ if tetype is AttributeError:
+ LOG.info("tearDownClass failed: %s" % te)
+ else:
+ LOG.exception("tearDownClass failed: %s" % te)
try:
raise etype(value), None, trace
finally:
@@ -499,13 +477,9 @@
"expected_result": expected_result
}))
if schema is not None:
- for name, schema, expected_result in generator.generate(schema):
- if (expected_result is None and
- "default_result_code" in description):
- expected_result = description["default_result_code"]
- scenario_list.append((name,
- {"schema": schema,
- "expected_result": expected_result}))
+ for scenario in generator.generate_scenarios(schema):
+ scenario_list.append((scenario['_negtest_name'],
+ scenario))
LOG.debug(scenario_list)
return scenario_list
@@ -535,8 +509,14 @@
"""
LOG.info("Executing %s" % description["name"])
LOG.debug(description)
+ generator = importutils.import_class(
+ CONF.negative.test_generator)()
+ schema = description.get("json-schema", None)
method = description["http-method"]
url = description["url"]
+ expected_result = None
+ if "default_result_code" in description:
+ expected_result = description["default_result_code"]
resources = [self.get_resource(r) for
r in description.get("resources", [])]
@@ -546,13 +526,19 @@
# entry (see get_resource).
# We just send a valid json-schema with it
valid_schema = None
- schema = description.get("json-schema", None)
if schema:
valid_schema = \
valid.ValidTestGenerator().generate_valid(schema)
new_url, body = self._http_arguments(valid_schema, url, method)
- elif hasattr(self, "schema"):
- new_url, body = self._http_arguments(self.schema, url, method)
+ elif hasattr(self, "_negtest_name"):
+ schema_under_test = \
+ valid.ValidTestGenerator().generate_valid(schema)
+ local_expected_result = \
+ generator.generate_payload(self, schema_under_test)
+ if local_expected_result is not None:
+ expected_result = local_expected_result
+ new_url, body = \
+ self._http_arguments(schema_under_test, url, method)
else:
raise Exception("testscenarios are not active. Please make sure "
"that your test runner supports the load_tests "
@@ -564,7 +550,7 @@
client = self.client
resp, resp_body = client.send_request(method, new_url,
resources, body=body)
- self._check_negative_response(resp.status, resp_body)
+ self._check_negative_response(expected_result, resp.status, resp_body)
def _http_arguments(self, json_dict, url, method):
LOG.debug("dict: %s url: %s method: %s" % (json_dict, url, method))
@@ -575,8 +561,7 @@
else:
return url, json.dumps(json_dict)
- def _check_negative_response(self, result, body):
- expected_result = getattr(self, "expected_result", None)
+ def _check_negative_response(self, expected_result, result, body):
self.assertTrue(result >= 400 and result < 500 and result != 413,
"Expected client error, got %s:%s" %
(result, body))
diff --git a/tempest/tests/cmd/test_verify_tempest_config.py b/tempest/tests/cmd/test_verify_tempest_config.py
index a28684e..6679c79 100644
--- a/tempest/tests/cmd/test_verify_tempest_config.py
+++ b/tempest/tests/cmd/test_verify_tempest_config.py
@@ -86,6 +86,24 @@
self.assertIn('v2.0', versions)
self.assertIn('v3.0', versions)
+ def test_verify_api_versions(self):
+ api_services = ['cinder', 'glance', 'keystone', 'nova']
+ fake_os = mock.MagicMock()
+ for svc in api_services:
+ m = 'verify_%s_api_versions' % svc
+ with mock.patch.object(verify_tempest_config, m) as verify_mock:
+ verify_tempest_config.verify_api_versions(fake_os, svc, True)
+ verify_mock.assert_called_once_with(fake_os, True)
+
+ def test_verify_api_versions_not_implemented(self):
+ api_services = ['cinder', 'glance', 'keystone', 'nova']
+ fake_os = mock.MagicMock()
+ for svc in api_services:
+ m = 'verify_%s_api_versions' % svc
+ with mock.patch.object(verify_tempest_config, m) as verify_mock:
+ verify_tempest_config.verify_api_versions(fake_os, 'foo', True)
+ self.assertFalse(verify_mock.called)
+
def test_verify_keystone_api_versions_no_v3(self):
self.useFixture(mockpatch.PatchObject(
verify_tempest_config, '_get_unversioned_endpoint',
diff --git a/tempest/tests/common/utils/test_misc.py b/tempest/tests/common/utils/test_misc.py
index aee9805..554027f 100644
--- a/tempest/tests/common/utils/test_misc.py
+++ b/tempest/tests/common/utils/test_misc.py
@@ -82,7 +82,7 @@
self.assertEqual(':tearDown', tearDown())
def test_find_test_caller_teardown_class(self):
- def tearDownClass(cls):
+ def tearDownClass(cls): # noqa
return misc.find_test_caller()
self.assertEqual('TestMisc:tearDownClass',
tearDownClass(self.__class__))
diff --git a/tempest/tests/negative/test_negative_auto_test.py b/tempest/tests/negative/test_negative_auto_test.py
index dddd083..fb1da43 100644
--- a/tempest/tests/negative/test_negative_auto_test.py
+++ b/tempest/tests/negative/test_negative_auto_test.py
@@ -43,9 +43,9 @@
def _check_prop_entries(self, result, entry):
entries = [a for a in result if entry in a[0]]
self.assertIsNotNone(entries)
- self.assertIs(len(entries), 2)
+ self.assertGreater(len(entries), 1)
for entry in entries:
- self.assertIsNotNone(entry[1]['schema'])
+ self.assertIsNotNone(entry[1]['_negtest_name'])
def _check_resource_entries(self, result, entry):
entries = [a for a in result if entry in a[0]]
@@ -57,12 +57,11 @@
def test_generate_scenario(self):
scenarios = test.NegativeAutoTest.\
generate_scenario(self.fake_input_desc)
-
self.assertIsInstance(scenarios, list)
for scenario in scenarios:
self.assertIsInstance(scenario, tuple)
self.assertIsInstance(scenario[0], str)
self.assertIsInstance(scenario[1], dict)
- self._check_prop_entries(scenarios, "prop_minRam")
- self._check_prop_entries(scenarios, "prop_minDisk")
+ self._check_prop_entries(scenarios, "minRam")
+ self._check_prop_entries(scenarios, "minDisk")
self._check_resource_entries(scenarios, "inv_res")
diff --git a/tempest/tests/negative/test_negative_generators.py b/tempest/tests/negative/test_negative_generators.py
index a7af619..2fa6933 100644
--- a/tempest/tests/negative/test_negative_generators.py
+++ b/tempest/tests/negative/test_negative_generators.py
@@ -13,6 +13,8 @@
# License for the specific language governing permissions and limitations
# under the License.
+import copy
+
import jsonschema
import mock
@@ -86,15 +88,6 @@
class BaseNegativeGenerator(object):
types = ['string', 'integer', 'object']
- fake_input_str = {"type": "string",
- "minLength": 2,
- "maxLength": 8,
- 'results': {'gen_int': 404}}
-
- fake_input_int = {"type": "integer",
- "maximum": 255,
- "minimum": 1}
-
fake_input_obj = {"type": "object",
"properties": {"minRam": {"type": "integer"},
"diskName": {"type": "string"},
@@ -106,31 +99,21 @@
"type": "not_defined"
}
- def _validate_result(self, data):
- self.assertTrue(isinstance(data, list))
- for t in data:
- self.assertIsInstance(t, tuple)
- self.assertEqual(3, len(t))
- self.assertIsInstance(t[0], str)
+ class fake_test_class(object):
+ def __init__(self, scenario):
+ for k, v in scenario.iteritems():
+ setattr(self, k, v)
- def test_generate_string(self):
- result = self.generator.generate(self.fake_input_str)
- self._validate_result(result)
-
- def test_generate_integer(self):
- result = self.generator.generate(self.fake_input_int)
- self._validate_result(result)
-
- def test_generate_obj(self):
- result = self.generator.generate(self.fake_input_obj)
- self._validate_result(result)
+ def _validate_result(self, valid_schema, invalid_schema):
+ for k, v in valid_schema.iteritems():
+ self.assertTrue(k in invalid_schema)
def test_generator_mandatory_functions(self):
for data_type in self.types:
self.assertIn(data_type, self.generator.types_dict)
def test_generate_with_unknown_type(self):
- self.assertRaises(TypeError, self.generator.generate,
+ self.assertRaises(TypeError, self.generator.generate_payload,
self.unknown_type_schema)
@@ -151,3 +134,16 @@
def setUp(self):
super(TestNegativeNegativeGenerator, self).setUp()
self.generator = negative_generator.NegativeTestGenerator()
+
+ def test_generate_obj(self):
+ schema = self.fake_input_obj
+ scenarios = self.generator.generate_scenarios(schema)
+ for scenario in scenarios:
+ test = self.fake_test_class(scenario)
+ valid_schema = \
+ valid_generator.ValidTestGenerator().generate_valid(schema)
+ schema_under_test = copy.copy(valid_schema)
+ expected_result = \
+ self.generator.generate_payload(test, schema_under_test)
+ self.assertEqual(expected_result, None)
+ self._validate_result(valid_schema, schema_under_test)
diff --git a/tempest/tests/test_decorators.py b/tempest/tests/test_decorators.py
index 12104ec..32cefd0 100644
--- a/tempest/tests/test_decorators.py
+++ b/tempest/tests/test_decorators.py
@@ -97,6 +97,28 @@
self._test_services_helper, 'compute',
'volume')
+ def test_services_list(self):
+ service_list = test.get_service_list()
+ for service in service_list:
+ try:
+ self._test_services_helper(service)
+ except exceptions.InvalidServiceTag:
+ self.fail('%s is not listed in the valid service tag list'
+ % service)
+ except KeyError:
+ # NOTE(mtreinish): This condition is to test for a entry in
+ # the outer decorator list but not in the service_list dict.
+ # However, because we're looping over the service_list dict
+ # it's unlikely we'll trigger this. So manual review is still
+ # need for the list in the outer decorator.
+ self.fail('%s is in the list of valid service tags but there '
+ 'is no corresponding entry in the dict returned from'
+ ' get_service_list()' % service)
+ except testtools.TestCase.skipException:
+ # Test didn't raise an exception because of an incorrect list
+ # entry so move onto the next entry
+ continue
+
class TestStressDecorator(BaseDecoratorsTest):
def _test_stresstest_helper(self, expected_frequency='process',
diff --git a/tempest/tests/test_hacking.py b/tempest/tests/test_hacking.py
index 37ad18e..6857461 100644
--- a/tempest/tests/test_hacking.py
+++ b/tempest/tests/test_hacking.py
@@ -47,13 +47,27 @@
just assertTrue if the check is expected to fail and assertFalse if it
should pass.
"""
- def test_no_setupclass_for_unit_tests(self):
- self.assertTrue(checks.no_setupclass_for_unit_tests(
+ def test_no_setup_teardown_class_for_tests(self):
+ self.assertTrue(checks.no_setup_teardown_class_for_tests(
" def setUpClass(cls):", './tempest/tests/fake_test.py'))
- self.assertIsNone(checks.no_setupclass_for_unit_tests(
+ self.assertIsNone(checks.no_setup_teardown_class_for_tests(
" def setUpClass(cls): # noqa", './tempest/tests/fake_test.py'))
- self.assertFalse(checks.no_setupclass_for_unit_tests(
+ self.assertTrue(checks.no_setup_teardown_class_for_tests(
" def setUpClass(cls):", './tempest/api/fake_test.py'))
+ self.assertTrue(checks.no_setup_teardown_class_for_tests(
+ " def setUpClass(cls):", './tempest/scenario/fake_test.py'))
+ self.assertFalse(checks.no_setup_teardown_class_for_tests(
+ " def setUpClass(cls):", './tempest/test.py'))
+ self.assertTrue(checks.no_setup_teardown_class_for_tests(
+ " def tearDownClass(cls):", './tempest/tests/fake_test.py'))
+ self.assertIsNone(checks.no_setup_teardown_class_for_tests(
+ " def tearDownClass(cls): # noqa", './tempest/tests/fake_test.py'))
+ self.assertTrue(checks.no_setup_teardown_class_for_tests(
+ " def tearDownClass(cls):", './tempest/api/fake_test.py'))
+ self.assertTrue(checks.no_setup_teardown_class_for_tests(
+ " def tearDownClass(cls):", './tempest/scenario/fake_test.py'))
+ self.assertFalse(checks.no_setup_teardown_class_for_tests(
+ " def tearDownClass(cls):", './tempest/test.py'))
def test_import_no_clients_in_api(self):
for client in checks.PYTHON_CLIENTS: