Merge "Remove unnecessary back slash"
diff --git a/doc/source/library/credential_providers.rst b/doc/source/library/credential_providers.rst
index f4eb37d..d96c97a 100644
--- a/doc/source/library/credential_providers.rst
+++ b/doc/source/library/credential_providers.rst
@@ -130,19 +130,18 @@
   # role
   provider.clear_creds()
 
-API Reference
-=============
 
-------------------------------
+API Reference
+-------------
+
 The dynamic credentials module
-------------------------------
+''''''''''''''''''''''''''''''
 
 .. automodule:: tempest.lib.common.dynamic_creds
    :members:
 
---------------------------------------
 The pre-provisioned credentials module
---------------------------------------
+''''''''''''''''''''''''''''''''''''''
 
 .. automodule:: tempest.lib.common.preprov_creds
    :members:
diff --git a/tempest/api/network/base.py b/tempest/api/network/base.py
index 6bec0d7..8308e34 100644
--- a/tempest/api/network/base.py
+++ b/tempest/api/network/base.py
@@ -96,6 +96,12 @@
         cls.metering_labels = []
         cls.metering_label_rules = []
         cls.ethertype = "IPv" + str(cls._ip_version)
+        if cls._ip_version == 4:
+            cls.cidr = netaddr.IPNetwork(CONF.network.project_network_cidr)
+            cls.mask_bits = CONF.network.project_network_mask_bits
+        elif cls._ip_version == 6:
+            cls.cidr = netaddr.IPNetwork(CONF.network.project_network_v6_cidr)
+            cls.mask_bits = CONF.network.project_network_v6_mask_bits
 
     @classmethod
     def resource_cleanup(cls):
diff --git a/tempest/api/network/test_networks.py b/tempest/api/network/test_networks.py
index 88340c1..1c59556 100644
--- a/tempest/api/network/test_networks.py
+++ b/tempest/api/network/test_networks.py
@@ -34,8 +34,7 @@
     def resource_setup(cls):
         super(BaseNetworkTestResources, cls).resource_setup()
         cls.network = cls.create_network()
-        cls.subnet = cls._create_subnet_with_last_subnet_block(cls.network,
-                                                               cls._ip_version)
+        cls.subnet = cls._create_subnet_with_last_subnet_block(cls.network)
         cls._subnet_data = {6: {'gateway':
                                 str(cls._get_gateway_from_tempest_conf(6)),
                                 'allocation_pools':
@@ -64,20 +63,13 @@
                                 'new_dns_nameservers': ['7.8.8.8', '7.8.4.4']}}
 
     @classmethod
-    def _create_subnet_with_last_subnet_block(cls, network, ip_version):
+    def _create_subnet_with_last_subnet_block(cls, network):
         # Derive last subnet CIDR block from project CIDR and
         # create the subnet with that derived CIDR
-        if ip_version == 4:
-            cidr = netaddr.IPNetwork(CONF.network.project_network_cidr)
-            mask_bits = CONF.network.project_network_mask_bits
-        elif ip_version == 6:
-            cidr = netaddr.IPNetwork(CONF.network.project_network_v6_cidr)
-            mask_bits = CONF.network.project_network_v6_mask_bits
-
-        subnet_cidr = list(cidr.subnet(mask_bits))[-1]
+        subnet_cidr = list(cls.cidr.subnet(cls.mask_bits))[-1]
         gateway_ip = str(netaddr.IPAddress(subnet_cidr) + 1)
         return cls.create_subnet(network, gateway=gateway_ip,
-                                 cidr=subnet_cidr, mask_bits=mask_bits)
+                                 cidr=subnet_cidr, mask_bits=cls.mask_bits)
 
     @classmethod
     def _get_gateway_from_tempest_conf(cls, ip_version):
@@ -487,14 +479,8 @@
     def test_bulk_create_delete_subnet(self):
         networks = [self.create_network(), self.create_network()]
         # Creates 2 subnets in one request
-        if self._ip_version == 4:
-            cidr = netaddr.IPNetwork(CONF.network.project_network_cidr)
-            mask_bits = CONF.network.project_network_mask_bits
-        else:
-            cidr = netaddr.IPNetwork(CONF.network.project_network_v6_cidr)
-            mask_bits = CONF.network.project_network_v6_mask_bits
-
-        cidrs = [subnet_cidr for subnet_cidr in cidr.subnet(mask_bits)]
+        cidrs = [subnet_cidr
+                 for subnet_cidr in self.cidr.subnet(self.mask_bits)]
 
         names = [data_utils.rand_name('subnet-') for i in range(len(networks))]
         subnets_list = []
diff --git a/tempest/api/network/test_ports.py b/tempest/api/network/test_ports.py
index 5c36747..eb53fbb 100644
--- a/tempest/api/network/test_ports.py
+++ b/tempest/api/network/test_ports.py
@@ -84,25 +84,13 @@
         self.assertTrue(port1['admin_state_up'])
         self.assertTrue(port2['admin_state_up'])
 
-    @classmethod
-    def _get_ipaddress_from_tempest_conf(cls):
-        """Return subnet with mask bits for configured CIDR """
-        if cls._ip_version == 4:
-            cidr = netaddr.IPNetwork(CONF.network.project_network_cidr)
-            cidr.prefixlen = CONF.network.project_network_mask_bits
-
-        elif cls._ip_version == 6:
-            cidr = netaddr.IPNetwork(CONF.network.project_network_v6_cidr)
-            cidr.prefixlen = CONF.network.project_network_v6_mask_bits
-
-        return cidr
-
     @decorators.attr(type='smoke')
     @decorators.idempotent_id('0435f278-40ae-48cb-a404-b8a087bc09b1')
     def test_create_port_in_allowed_allocation_pools(self):
         network = self.create_network()
         net_id = network['id']
-        address = self._get_ipaddress_from_tempest_conf()
+        address = self.cidr
+        address.prefixlen = self.mask_bits
         if ((address.version == 4 and address.prefixlen >= 30) or
            (address.version == 6 and address.prefixlen >= 126)):
             msg = ("Subnet %s isn't large enough for the test" % address.cidr)
diff --git a/tempest/api/network/test_routers.py b/tempest/api/network/test_routers.py
index 3883cc2..981f29c 100644
--- a/tempest/api/network/test_routers.py
+++ b/tempest/api/network/test_routers.py
@@ -59,13 +59,6 @@
             msg = "router extension not enabled."
             raise cls.skipException(msg)
 
-    @classmethod
-    def resource_setup(cls):
-        super(RoutersTest, cls).resource_setup()
-        cls.tenant_cidr = (CONF.network.project_network_cidr
-                           if cls._ip_version == 4 else
-                           CONF.network.project_network_v6_cidr)
-
     @decorators.attr(type='smoke')
     @decorators.idempotent_id('f64403e2-8483-4b34-8ccd-b09a87bcc68c')
     @testtools.skipUnless(CONF.network.public_network_id,
@@ -202,7 +195,7 @@
     def test_update_delete_extra_route(self):
         # Create different cidr for each subnet to avoid cidr duplicate
         # The cidr starts from project_cidr
-        next_cidr = netaddr.IPNetwork(self.tenant_cidr)
+        next_cidr = netaddr.IPNetwork(self.cidr)
         # Prepare to build several routes
         test_routes = []
         routes_num = 4
@@ -278,7 +271,7 @@
         network02 = self.create_network(
             network_name=data_utils.rand_name('router-network02-'))
         subnet01 = self.create_subnet(network01)
-        sub02_cidr = netaddr.IPNetwork(self.tenant_cidr).next()
+        sub02_cidr = netaddr.IPNetwork(self.cidr).next()
         subnet02 = self.create_subnet(network02, cidr=sub02_cidr)
         router = self._create_router()
         interface01 = self._add_router_interface_with_subnet_id(router['id'],
diff --git a/tempest/api/network/test_routers_negative.py b/tempest/api/network/test_routers_negative.py
index 60b9de7..27bda3a 100644
--- a/tempest/api/network/test_routers_negative.py
+++ b/tempest/api/network/test_routers_negative.py
@@ -40,9 +40,6 @@
         cls.router = cls.create_router()
         cls.network = cls.create_network()
         cls.subnet = cls.create_subnet(cls.network)
-        cls.tenant_cidr = (CONF.network.project_network_cidr
-                           if cls._ip_version == 4 else
-                           CONF.network.project_network_v6_cidr)
 
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('37a94fc0-a834-45b9-bd23-9a81d2fd1e22')
@@ -57,7 +54,7 @@
     @decorators.idempotent_id('11836a18-0b15-4327-a50b-f0d9dc66bddd')
     def test_router_add_gateway_net_not_external_returns_400(self):
         alt_network = self.create_network()
-        sub_cidr = netaddr.IPNetwork(self.tenant_cidr).next()
+        sub_cidr = netaddr.IPNetwork(self.cidr).next()
         self.create_subnet(alt_network, cidr=sub_cidr)
         self.assertRaises(lib_exc.BadRequest,
                           self.routers_client.update_router,
diff --git a/tempest/common/validation_resources.py b/tempest/common/validation_resources.py
index ae9d584..3a04b9a 100644
--- a/tempest/common/validation_resources.py
+++ b/tempest/common/validation_resources.py
@@ -1,4 +1,5 @@
 # Copyright (c) 2015 Hewlett-Packard Development Company, L.P.
+# Copyright (c) 2017 IBM Corp.
 #    Licensed under the Apache License, Version 2.0 (the "License");
 #    you may not use this file except in compliance with the License.
 #    You may obtain a copy of the License at
@@ -12,6 +13,7 @@
 #    limitations under the License.
 
 from oslo_log import log as logging
+from oslo_utils import excutils
 
 from tempest.lib.common.utils import data_utils
 from tempest.lib import exceptions as lib_exc
@@ -72,26 +74,48 @@
     # Security Group Rules clients require different parameters depending on
     # the network service in use
     if add_rule:
-        if use_neutron:
-            security_group_rules_client.create_security_group_rule(
-                security_group_id=security_group['id'],
-                protocol='tcp',
-                ethertype=ethertype,
-                port_range_min=22,
-                port_range_max=22,
-                direction='ingress')
-            security_group_rules_client.create_security_group_rule(
-                security_group_id=security_group['id'],
-                protocol='icmp',
-                ethertype=ethertype,
-                direction='ingress')
-        else:
-            security_group_rules_client.create_security_group_rule(
-                parent_group_id=security_group['id'], ip_protocol='tcp',
-                from_port=22, to_port=22)
-            security_group_rules_client.create_security_group_rule(
-                parent_group_id=security_group['id'], ip_protocol='icmp',
-                from_port=-1, to_port=-1)
+        try:
+            if use_neutron:
+                security_group_rules_client.create_security_group_rule(
+                    security_group_id=security_group['id'],
+                    protocol='tcp',
+                    ethertype=ethertype,
+                    port_range_min=22,
+                    port_range_max=22,
+                    direction='ingress')
+                security_group_rules_client.create_security_group_rule(
+                    security_group_id=security_group['id'],
+                    protocol='icmp',
+                    ethertype=ethertype,
+                    direction='ingress')
+            else:
+                security_group_rules_client.create_security_group_rule(
+                    parent_group_id=security_group['id'], ip_protocol='tcp',
+                    from_port=22, to_port=22)
+                security_group_rules_client.create_security_group_rule(
+                    parent_group_id=security_group['id'], ip_protocol='icmp',
+                    from_port=-1, to_port=-1)
+        except Exception as sgc_exc:
+            # If adding security group rules fails, we cleanup the SG before
+            # re-raising the failure up
+            with excutils.save_and_reraise_exception():
+                try:
+                    msg = ('Error while provisioning security group rules in '
+                           'security group %s. Trying to cleanup.')
+                    # The exceptions logging is already handled, so using
+                    # debug here just to provide more context
+                    LOG.debug(msg, sgc_exc)
+                    clear_validation_resources(
+                        clients, keypair=None, floating_ip=None,
+                        security_group=security_group,
+                        use_neutron=use_neutron)
+                except Exception as cleanup_exc:
+                    msg = ('Error during cleanup of a security group. '
+                           'The cleanup was triggered by an exception during '
+                           'the provisioning of security group rules.\n'
+                           'Provisioning exception: %s\n'
+                           'First cleanup exception: %s')
+                    LOG.exception(msg, sgc_exc, cleanup_exc)
     LOG.debug("SSH Validation resource security group with tcp and icmp "
               "rules %s created", sg_name)
     return security_group
@@ -130,9 +154,9 @@
     :param floating_network_name: The name of the floating IP pool used to
         provision the floating IP. Only used if a floating IP is requested and
         with nova-net.
-    :returns: A dictionary with the same keys as the input
-        `validation_resources` and the resources for values in the format
-         they are returned by the API.
+    :returns: A dictionary with the resources in the format they are returned
+        by the API. Valid keys are 'keypair', 'floating_ip' and
+        'security_group'.
 
     Examples::
 
@@ -157,35 +181,64 @@
     """
     # Create and Return the validation resources required to validate a VM
     validation_data = {}
-    if keypair:
-        keypair_name = data_utils.rand_name('keypair')
-        validation_data.update(
-            clients.compute.KeyPairsClient().create_keypair(
-                name=keypair_name))
-        LOG.debug("Validation resource key %s created", keypair_name)
-    if security_group:
-        validation_data['security_group'] = create_ssh_security_group(
-            clients, add_rule=security_group_rules,
-            use_neutron=use_neutron, ethertype=ethertype)
-    if floating_ip:
-        floating_ip_client = _network_service(
-            clients, use_neutron).FloatingIPsClient()
-        if use_neutron:
-            floatingip = floating_ip_client.create_floatingip(
-                floating_network_id=floating_network_id)
-            # validation_resources['floating_ip'] has historically looked
-            # like a compute API POST /os-floating-ips response, so we need
-            # to mangle it a bit for a Neutron response with different
-            # fields.
-            validation_data['floating_ip'] = floatingip['floatingip']
-            validation_data['floating_ip']['ip'] = (
-                floatingip['floatingip']['floating_ip_address'])
-        else:
-            # NOTE(mriedem): The os-floating-ips compute API was deprecated
-            # in the 2.36 microversion. Any tests for CRUD operations on
-            # floating IPs using the compute API should be capped at 2.35.
-            validation_data.update(floating_ip_client.create_floating_ip(
-                pool=floating_network_name))
+    try:
+        if keypair:
+            keypair_name = data_utils.rand_name('keypair')
+            validation_data.update(
+                clients.compute.KeyPairsClient().create_keypair(
+                    name=keypair_name))
+            LOG.debug("Validation resource key %s created", keypair_name)
+        if security_group:
+            validation_data['security_group'] = create_ssh_security_group(
+                clients, add_rule=security_group_rules,
+                use_neutron=use_neutron, ethertype=ethertype)
+        if floating_ip:
+            floating_ip_client = _network_service(
+                clients, use_neutron).FloatingIPsClient()
+            if use_neutron:
+                floatingip = floating_ip_client.create_floatingip(
+                    floating_network_id=floating_network_id)
+                # validation_resources['floating_ip'] has historically looked
+                # like a compute API POST /os-floating-ips response, so we need
+                # to mangle it a bit for a Neutron response with different
+                # fields.
+                validation_data['floating_ip'] = floatingip['floatingip']
+                validation_data['floating_ip']['ip'] = (
+                    floatingip['floatingip']['floating_ip_address'])
+            else:
+                # NOTE(mriedem): The os-floating-ips compute API was deprecated
+                # in the 2.36 microversion. Any tests for CRUD operations on
+                # floating IPs using the compute API should be capped at 2.35.
+                validation_data.update(floating_ip_client.create_floating_ip(
+                    pool=floating_network_name))
+            LOG.debug("Validation resource floating IP %s created",
+                      validation_data['floating_ip'])
+    except Exception as prov_exc:
+        # If something goes wrong, cleanup as much as possible before we
+        # re-raise the exception
+        with excutils.save_and_reraise_exception():
+            if validation_data:
+                # Cleanup may fail as well
+                try:
+                    msg = ('Error while provisioning validation resources %s. '
+                           'Trying to cleanup what we provisioned so far: %s')
+                    # The exceptions logging is already handled, so using
+                    # debug here just to provide more context
+                    LOG.debug(msg, prov_exc, str(validation_data))
+                    clear_validation_resources(
+                        clients,
+                        keypair=validation_data.get('keypair', None),
+                        floating_ip=validation_data.get('floating_ip', None),
+                        security_group=validation_data.get('security_group',
+                                                           None),
+                        use_neutron=use_neutron)
+                except Exception as cleanup_exc:
+                    msg = ('Error during cleanup of validation resources. '
+                           'The cleanup was triggered by an exception during '
+                           'the provisioning step.\n'
+                           'Provisioning exception: %s\n'
+                           'First cleanup exception: %s')
+                    LOG.exception(msg, prov_exc, cleanup_exc)
     return validation_data
 
 
@@ -209,9 +262,6 @@
         Defaults to None.
     :param use_neutron: When True resources are provisioned via neutron, when
         False resources are provisioned via nova.
-    :returns: A dictionary with the same keys as the input
-        `validation_resources` and the resources for values in the format
-         they are returned by the API.
 
     Examples::
 
diff --git a/tempest/lib/common/dynamic_creds.py b/tempest/lib/common/dynamic_creds.py
index 9a6c8f5..4f1a883 100644
--- a/tempest/lib/common/dynamic_creds.py
+++ b/tempest/lib/common/dynamic_creds.py
@@ -28,6 +28,43 @@
 
 
 class DynamicCredentialProvider(cred_provider.CredentialProvider):
+    """Creates credentials dynamically for tests
+
+    A credential provider that, based on an initial set of
+    admin credentials, creates new credentials on the fly for
+    tests to use and then discard.
+
+    :param str identity_version: identity API version to use `v2` or `v3`
+    :param str admin_role: name of the admin role added to admin users
+    :param str name: names of dynamic resources include this parameter
+                     when specified
+    :param str credentials_domain: name of the domain where the users
+                                   are created. If not defined, the project
+                                   domain from admin_credentials is used
+    :param dict network_resources: network resources to be created for
+                                   the created credentials
+    :param Credentials admin_creds: initial admin credentials
+    :param bool identity_admin_domain_scope: Set to true if admin should be
+                                             scoped to the domain. By
+                                             default this is False and the
+                                             admin role is scoped to the
+                                             project.
+    :param str identity_admin_role: The role name to use for admin
+    :param list extra_roles: A list of strings for extra roles that should
+                             be assigned to all created users
+    :param bool neutron_available: Whether we are running in an environemnt
+                                   with neutron
+    :param bool create_networks: Whether dynamic project networks should be
+                                 created or not
+    :param project_network_cidr: The CIDR to use for created project
+                                 networks
+    :param project_network_mask_bits: The network mask bits to use for
+                                      created project networks
+    :param public_network_id: The id for the public network to use
+    :param identity_admin_endpoint_type: The endpoint type for identity
+                                         admin clients. Defaults to public.
+    :param identity_uri: Identity URI of the target cloud
+    """
 
     def __init__(self, identity_version, name=None, network_resources=None,
                  credentials_domain=None, admin_role=None, admin_creds=None,
@@ -37,43 +74,6 @@
                  project_network_cidr=None, project_network_mask_bits=None,
                  public_network_id=None, resource_prefix=None,
                  identity_admin_endpoint_type='public', identity_uri=None):
-        """Creates credentials dynamically for tests
-
-        A credential provider that, based on an initial set of
-        admin credentials, creates new credentials on the fly for
-        tests to use and then discard.
-
-        :param str identity_version: identity API version to use `v2` or `v3`
-        :param str admin_role: name of the admin role added to admin users
-        :param str name: names of dynamic resources include this parameter
-                         when specified
-        :param str credentials_domain: name of the domain where the users
-                                       are created. If not defined, the project
-                                       domain from admin_credentials is used
-        :param dict network_resources: network resources to be created for
-                                       the created credentials
-        :param Credentials admin_creds: initial admin credentials
-        :param bool identity_admin_domain_scope: Set to true if admin should be
-                                                 scoped to the domain. By
-                                                 default this is False and the
-                                                 admin role is scoped to the
-                                                 project.
-        :param str identity_admin_role: The role name to use for admin
-        :param list extra_roles: A list of strings for extra roles that should
-                                 be assigned to all created users
-        :param bool neutron_available: Whether we are running in an environemnt
-                                       with neutron
-        :param bool create_networks: Whether dynamic project networks should be
-                                     created or not
-        :param project_network_cidr: The CIDR to use for created project
-                                     networks
-        :param project_network_mask_bits: The network mask bits to use for
-                                          created project networks
-        :param public_network_id: The id for the public network to use
-        :param identity_admin_endpoint_type: The endpoint type for identity
-                                             admin clients. Defaults to public.
-        :param identity_uri: Identity URI of the target cloud
-        """
         super(DynamicCredentialProvider, self).__init__(
             identity_version=identity_version, identity_uri=identity_uri,
             admin_role=admin_role, name=name,
diff --git a/tempest/lib/common/preprov_creds.py b/tempest/lib/common/preprov_creds.py
index cd3a10e..83db513 100644
--- a/tempest/lib/common/preprov_creds.py
+++ b/tempest/lib/common/preprov_creds.py
@@ -41,6 +41,35 @@
 
 
 class PreProvisionedCredentialProvider(cred_provider.CredentialProvider):
+    """Credentials provider using pre-provisioned accounts
+
+    This credentials provider loads the details of pre-provisioned
+    accounts from a YAML file, in the format specified by
+    ``etc/accounts.yaml.sample``. It locks accounts while in use, using the
+    external locking mechanism, allowing for multiple python processes
+    to share a single account file, and thus running tests in parallel.
+
+    The accounts_lock_dir must be generated using `lockutils.get_lock_path`
+    from the oslo.concurrency library. For instance::
+
+        accounts_lock_dir = os.path.join(lockutils.get_lock_path(CONF),
+                                         'test_accounts')
+
+    Role names for object storage are optional as long as the
+    `operator` and `reseller_admin` credential types are not used in the
+    accounts file.
+
+    :param identity_version: identity version of the credentials
+    :param admin_role: name of the admin role
+    :param test_accounts_file: path to the accounts YAML file
+    :param accounts_lock_dir: the directory for external locking
+    :param name: name of the hash file (optional)
+    :param credentials_domain: name of the domain credentials belong to
+                               (if no domain is configured)
+    :param object_storage_operator_role: name of the role
+    :param object_storage_reseller_admin_role: name of the role
+    :param identity_uri: Identity URI of the target cloud
+    """
 
     # Exclude from the hash fields specific to v2 or v3 identity API
     # i.e. only include user*, project*, tenant* and password
@@ -51,35 +80,6 @@
                  accounts_lock_dir, name=None, credentials_domain=None,
                  admin_role=None, object_storage_operator_role=None,
                  object_storage_reseller_admin_role=None, identity_uri=None):
-        """Credentials provider using pre-provisioned accounts
-
-        This credentials provider loads the details of pre-provisioned
-        accounts from a YAML file, in the format specified by
-        `etc/accounts.yaml.sample`. It locks accounts while in use, using the
-        external locking mechanism, allowing for multiple python processes
-        to share a single account file, and thus running tests in parallel.
-
-        The accounts_lock_dir must be generated using `lockutils.get_lock_path`
-        from the oslo.concurrency library. For instance:
-
-            accounts_lock_dir = os.path.join(lockutils.get_lock_path(CONF),
-                                             'test_accounts')
-
-        Role names for object storage are optional as long as the
-        `operator` and `reseller_admin` credential types are not used in the
-        accounts file.
-
-        :param identity_version: identity version of the credentials
-        :param admin_role: name of the admin role
-        :param test_accounts_file: path to the accounts YAML file
-        :param accounts_lock_dir: the directory for external locking
-        :param name: name of the hash file (optional)
-        :param credentials_domain: name of the domain credentials belong to
-                                   (if no domain is configured)
-        :param object_storage_operator_role: name of the role
-        :param object_storage_reseller_admin_role: name of the role
-        :param identity_uri: Identity URI of the target cloud
-        """
         super(PreProvisionedCredentialProvider, self).__init__(
             identity_version=identity_version, name=name,
             admin_role=admin_role, credentials_domain=credentials_domain,