Merge "Build pdf doc"
diff --git a/ironic_tempest_plugin/common/waiters.py b/ironic_tempest_plugin/common/waiters.py
index 08e0941..554e5f8 100644
--- a/ironic_tempest_plugin/common/waiters.py
+++ b/ironic_tempest_plugin/common/waiters.py
@@ -45,7 +45,7 @@
 
 
 def wait_for_bm_node_status(client, node_id, attr, status, timeout=None,
-                            interval=None):
+                            interval=None, abort_on_error_state=False):
     """Waits for a baremetal node attribute to reach given status.
 
     :param client: an instance of tempest plugin BaremetalClient.
@@ -56,6 +56,8 @@
         Defaults to client.build_timeout.
     :param interval: an interval between show_node calls for status check.
         Defaults to client.build_interval.
+    :param abort_on_error_state: whether to abort waiting if the node reaches
+        an error state.
 
     The client should have a show_node(node_id) method to get the node.
     """
@@ -69,6 +71,14 @@
         node = utils.get_node(client, node_id=node_id)
         if node[attr] in status:
             return True
+        elif (abort_on_error_state
+              and node['provision_state'].endswith(' failed')):
+            raise lib_exc.TempestException(
+                'Node %(node)s reached failure state %(state)s while waiting '
+                'for %(attr)s=%(expected)s. Error: %(error)s' %
+                {'node': node_id, 'state': node['provision_state'],
+                 'attr': attr, 'expected': status,
+                 'error': node.get('last_error')})
         return False
 
     if not test_utils.call_until_true(is_attr_in_status, timeout,
diff --git a/ironic_tempest_plugin/config.py b/ironic_tempest_plugin/config.py
index 79b6d18..d5cdcfa 100644
--- a/ironic_tempest_plugin/config.py
+++ b/ironic_tempest_plugin/config.py
@@ -132,6 +132,9 @@
     cfg.ListOpt('enabled_boot_interfaces',
                 default=['fake', 'pxe'],
                 help="List of Ironic enabled boot interfaces."),
+    cfg.ListOpt('enabled_raid_interfaces',
+                default=['no-raid', 'agent'],
+                help="List of Ironic enabled RAID interfaces."),
     cfg.StrOpt('default_rescue_interface',
                help="Ironic default rescue interface."),
     cfg.IntOpt('adjusted_root_disk_size_gb',
@@ -147,6 +150,16 @@
     cfg.BoolOpt('ipxe_enabled',
                 default=True,
                 help="Defines if IPXE is enabled"),
+    cfg.BoolOpt('adoption',
+                # Defaults to False since it's a destructive operation AND it
+                # requires the plugin to be able to read ipmi_password.
+                default=False,
+                help="Defines if adoption is enabled"),
+    cfg.BoolOpt('software_raid',
+                default=False,
+                help="Defines if software RAID is enabled (available "
+                     "starting with Train). Requires at least two disks "
+                     "on testing nodes."),
 ]
 
 BaremetalIntrospectionGroup = [
diff --git a/ironic_tempest_plugin/services/baremetal/v1/json/baremetal_client.py b/ironic_tempest_plugin/services/baremetal/v1/json/baremetal_client.py
index b326e0d..9f001b8 100644
--- a/ironic_tempest_plugin/services/baremetal/v1/json/baremetal_client.py
+++ b/ironic_tempest_plugin/services/baremetal/v1/json/baremetal_client.py
@@ -277,6 +277,11 @@
         return self._create_request('nodes', node)
 
     @base.handle_errors
+    def create_node_raw(self, **kwargs):
+        """Create a baremetal node from the given body."""
+        return self._create_request('nodes', kwargs)
+
+    @base.handle_errors
     def create_chassis(self, **kwargs):
         """Create a chassis with the specified parameters.
 
@@ -307,13 +312,12 @@
         :return: A tuple with the server response and the created port.
 
         """
-        port = {'extra': kwargs.get('extra', {'foo': 'bar'}),
-                'uuid': kwargs['uuid']}
+        port = {'extra': kwargs.get('extra', {'foo': 'bar'})}
 
         if node_id is not None:
             port['node_uuid'] = node_id
 
-        for key in ('address', 'physical_network', 'portgroup_uuid'):
+        for key in ('uuid', 'address', 'physical_network', 'portgroup_uuid'):
             if kwargs.get(key) is not None:
                 port[key] = kwargs[key]
 
@@ -505,6 +509,7 @@
                            'driver',
                            'bios_interface',
                            'deploy_interface',
+                           'raid_interface',
                            'rescue_interface',
                            'instance_uuid',
                            'resource_class',
diff --git a/ironic_tempest_plugin/tests/api/admin/test_allocations.py b/ironic_tempest_plugin/tests/api/admin/test_allocations.py
index 972d864..b46c8af 100644
--- a/ironic_tempest_plugin/tests/api/admin/test_allocations.py
+++ b/ironic_tempest_plugin/tests/api/admin/test_allocations.py
@@ -124,15 +124,16 @@
 
     @decorators.idempotent_id('5e30452d-ee92-4342-82c1-5eea5e55c937')
     def test_delete_allocation_by_name(self):
-        _, body = self.create_allocation(self.resource_class, name='banana')
-        self.client.delete_allocation('banana')
-        self.assertRaises(lib_exc.NotFound, self.client.show_allocation,
-                          'banana')
+        name = 'alloc-%s' % uuidutils.generate_uuid()
+        _, body = self.create_allocation(self.resource_class, name=name)
+        self.client.delete_allocation(name)
+        self.assertRaises(lib_exc.NotFound, self.client.show_allocation, name)
 
     @decorators.idempotent_id('fbbc13bc-86da-438b-af01-d1bc1bab57d6')
     def test_show_by_name(self):
-        _, body = self.create_allocation(self.resource_class, name='banana')
-        _, loaded_body = self.client.show_allocation('banana')
+        name = 'alloc-%s' % uuidutils.generate_uuid()
+        _, body = self.create_allocation(self.resource_class, name=name)
+        _, loaded_body = self.client.show_allocation(name)
         # The allocation will likely have been processed by this time, so do
         # not compare the whole body.
         for field in ('name', 'uuid', 'resource_class'):
diff --git a/ironic_tempest_plugin/tests/scenario/baremetal_manager.py b/ironic_tempest_plugin/tests/scenario/baremetal_manager.py
index 6d7d55b..1788463 100644
--- a/ironic_tempest_plugin/tests/scenario/baremetal_manager.py
+++ b/ironic_tempest_plugin/tests/scenario/baremetal_manager.py
@@ -104,10 +104,12 @@
         cls.baremetal_client.list_nodes()
 
     @classmethod
-    def wait_provisioning_state(cls, node_id, state, timeout=10, interval=1):
+    def wait_provisioning_state(cls, node_id, state, timeout=10, interval=1,
+                                abort_on_error_state=True):
         ironic_waiters.wait_for_bm_node_status(
             cls.baremetal_client, node_id=node_id, attr='provision_state',
-            status=state, timeout=timeout, interval=interval)
+            status=state, timeout=timeout, interval=interval,
+            abort_on_error_state=abort_on_error_state)
 
     @classmethod
     def wait_power_state(cls, node_id, state):
diff --git a/ironic_tempest_plugin/tests/scenario/baremetal_standalone_manager.py b/ironic_tempest_plugin/tests/scenario/baremetal_standalone_manager.py
index 94780a8..bbd7782 100644
--- a/ironic_tempest_plugin/tests/scenario/baremetal_standalone_manager.py
+++ b/ironic_tempest_plugin/tests/scenario/baremetal_standalone_manager.py
@@ -101,7 +101,7 @@
 
         For a full list of available parameters, please refer to the official
         API reference:
-        http://developer.openstack.org/api-ref/networking/v2/index.html#create-port
+        https://docs.openstack.org/api-ref/network/v2/index.html#create-port
 
         :returns: server response body.
         """
@@ -424,6 +424,12 @@
     # set via a different test).
     boot_interface = None
 
+    # The raid interface to use by the HW type. The raid interface of the
+    # node used in the test will be set to this value. If set to None, the
+    # node will retain its existing raid_interface value (which may have been
+    # set via a different test).
+    raid_interface = None
+
     # Boolean value specify if image is wholedisk or not.
     wholedisk_image = None
 
@@ -476,6 +482,13 @@
                 "in the list of enabled boot interfaces %(enabled)s" % {
                     'iface': cls.boot_interface,
                     'enabled': CONF.baremetal.enabled_boot_interfaces})
+        if (cls.raid_interface and cls.raid_interface not in
+                CONF.baremetal.enabled_raid_interfaces):
+            raise cls.skipException(
+                "RAID interface %(iface)s required by test is not "
+                "in the list of enabled RAID interfaces %(enabled)s" % {
+                    'iface': cls.raid_interface,
+                    'enabled': CONF.baremetal.enabled_raid_interfaces})
         if not cls.wholedisk_image and CONF.baremetal.use_provision_network:
             raise cls.skipException(
                 'Partitioned images are not supported with multitenancy.')
@@ -512,6 +525,8 @@
             boot_kwargs['rescue_interface'] = cls.rescue_interface
         if cls.boot_interface:
             boot_kwargs['boot_interface'] = cls.boot_interface
+        if cls.raid_interface:
+            boot_kwargs['raid_interface'] = cls.raid_interface
 
         # just get an available node
         cls.node = cls.get_and_reserve_node()
@@ -540,3 +555,34 @@
         self.set_node_to_active(image_ref, image_checksum)
         self.assertTrue(self.ping_ip_address(self.node_ip,
                                              should_succeed=should_succeed))
+
+    def build_raid_and_verify_node(self, config=None, clean_steps=None):
+        config = config or self.raid_config
+        clean_steps = clean_steps or [
+            {
+                "interface": "raid",
+                "step": "delete_configuration"
+            },
+            # NOTE(dtantsur): software RAID building fails if any
+            # partitions exist on holder devices.
+            {
+                "interface": "deploy",
+                "step": "erase_devices_metadata"
+            },
+            {
+                "interface": "raid",
+                "step": "create_configuration"
+            }
+        ]
+
+        self.baremetal_client.set_node_raid_config(self.node['uuid'], config)
+        self.manual_cleaning(self.node, clean_steps=clean_steps)
+
+        # NOTE(dtantsur): this is not required, but it allows us to check that
+        # the RAID device was in fact created and is used for deployment.
+        patch = [{'path': '/properties/root_device',
+                  'op': 'add', 'value': {'name': '/dev/md0'}}]
+        self.update_node(self.node['uuid'], patch=patch)
+        # NOTE(dtantsur): apparently cirros cannot boot from md devices :(
+        # So we only move the node to active (verifying deployment).
+        self.set_node_to_active()
diff --git a/ironic_tempest_plugin/tests/scenario/ironic_standalone/test_adoption.py b/ironic_tempest_plugin/tests/scenario/ironic_standalone/test_adoption.py
new file mode 100644
index 0000000..63e5f5a
--- /dev/null
+++ b/ironic_tempest_plugin/tests/scenario/ironic_standalone/test_adoption.py
@@ -0,0 +1,107 @@
+#
+# Copyright 2017 Mirantis Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from oslo_log import log as logging
+from tempest.common import utils
+from tempest import config
+from tempest.lib import decorators
+
+from ironic_tempest_plugin.tests.scenario import \
+    baremetal_standalone_manager as bsm
+
+LOG = logging.getLogger(__name__)
+CONF = config.CONF
+
+
+class BaremetalAdoptionIpmiWholedisk(
+        bsm.BaremetalStandaloneScenarioTest):
+
+    driver = 'ipmi'
+    image_ref = CONF.baremetal.whole_disk_image_ref
+    wholedisk_image = True
+    deploy_interface = 'iscsi'
+    # 1.37 is required to be able to copy traits
+    api_microversion = '1.37'
+
+    @classmethod
+    def skip_checks(cls):
+        super(BaremetalAdoptionIpmiWholedisk, cls).skip_checks()
+        if not CONF.baremetal_feature_enabled.adoption:
+            skip_msg = ("Adoption feature is not enabled")
+            raise cls.skipException(skip_msg)
+
+    @classmethod
+    def recreate_node(cls):
+        # Now record all up-to-date node information for creation
+        cls.node = cls.get_node(cls.node['uuid'])
+        body = {'driver_info': cls.node['driver_info'],
+                'instance_info': cls.node['instance_info'],
+                'driver': cls.node['driver'],
+                'properties': cls.node['properties']}
+        if set(body['driver_info'].get('ipmi_password')) == {'*'}:
+            # A hack to enable devstack testing without showing secrets
+            # secrets. Use the hardcoded devstack value.
+            body['driver_info']['ipmi_password'] = 'password'
+        # configdrive is hidden and anyway should be supplied on rebuild
+        body['instance_info'].pop('configdrive', None)
+        for key, value in cls.node.items():
+            if key.endswith('_interface') and value:
+                body[key] = value
+        traits = cls.node['traits']
+        _, vifs = cls.baremetal_client.vif_list(cls.node['uuid'])
+        _, ports = cls.baremetal_client.list_ports(node=cls.node['uuid'])
+
+        # Delete the active node using maintenance
+        cls.update_node(cls.node['uuid'], [{'op': 'replace',
+                                            'path': '/maintenance',
+                                            'value': True}])
+        cls.baremetal_client.delete_node(cls.node['uuid'])
+
+        # Now create an identical node and attach VIFs
+        _, cls.node = cls.baremetal_client.create_node_raw(**body)
+        if traits:
+            cls.baremetal_client.set_node_traits(cls.node['uuid'], traits)
+        for port in ports['ports']:
+            cls.baremetal_client.create_port(cls.node['uuid'],
+                                             address=port['address'])
+
+        cls.set_node_provision_state(cls.node['uuid'], 'manage')
+        cls.wait_provisioning_state(cls.node['uuid'], 'manageable',
+                                    timeout=300, interval=5)
+
+        for vif in vifs['vifs']:
+            cls.vif_attach(cls.node['uuid'], vif['id'])
+
+        return cls.node
+
+    @decorators.idempotent_id('2f51890e-20d9-43ef-af39-41b335ec066b')
+    @utils.services('image', 'network')
+    def test_adoption(self):
+        # First, prepare a deployed node.
+        self.boot_node()
+
+        # Then re-create it with the same parameters.
+        self.recreate_node()
+
+        # Now adoption!
+        self.set_node_provision_state(self.node['uuid'], 'adopt')
+        self.wait_provisioning_state(self.node['uuid'], 'active',
+                                     timeout=300, interval=5)
+
+        # Try to rebuild the server to make sure we can manage it now.
+        self.set_node_provision_state(self.node['uuid'], 'rebuild')
+        self.wait_provisioning_state(self.node['uuid'], 'active',
+                                     timeout=CONF.baremetal.active_timeout,
+                                     interval=30)
diff --git a/ironic_tempest_plugin/tests/scenario/ironic_standalone/test_cleaning.py b/ironic_tempest_plugin/tests/scenario/ironic_standalone/test_cleaning.py
index e30b9ef..7df98ce 100644
--- a/ironic_tempest_plugin/tests/scenario/ironic_standalone/test_cleaning.py
+++ b/ironic_tempest_plugin/tests/scenario/ironic_standalone/test_cleaning.py
@@ -69,3 +69,66 @@
     @utils.services('image', 'network')
     def test_manual_cleaning(self):
         self.check_manual_partition_cleaning(self.node)
+
+
+class SoftwareRaidIscsi(bsm.BaremetalStandaloneScenarioTest):
+
+    driver = 'ipmi'
+    image_ref = CONF.baremetal.whole_disk_image_ref
+    wholedisk_image = True
+    deploy_interface = 'iscsi'
+    raid_interface = 'agent'
+    api_microversion = '1.31'
+
+    raid_config = {
+        "logical_disks": [
+            {
+                "size_gb": "MAX",
+                "raid_level": "1",
+                "controller": "software"
+            },
+        ]
+    }
+
+    @classmethod
+    def skip_checks(cls):
+        super(SoftwareRaidIscsi, cls).skip_checks()
+        if not CONF.baremetal_feature_enabled.software_raid:
+            raise cls.skipException("Software RAID feature is not enabled")
+
+    @decorators.idempotent_id('7ecba4f7-98b8-4ea1-b95e-3ec399f46798')
+    @utils.services('image', 'network')
+    def test_software_raid(self):
+        self.build_raid_and_verify_node()
+
+
+class SoftwareRaidDirect(bsm.BaremetalStandaloneScenarioTest):
+
+    driver = 'ipmi'
+    image_ref = CONF.baremetal.whole_disk_image_ref
+    wholedisk_image = True
+    deploy_interface = 'direct'
+    raid_interface = 'agent'
+    api_microversion = '1.31'
+
+    # TODO(dtantsur): more complex layout in this job
+    raid_config = {
+        "logical_disks": [
+            {
+                "size_gb": "MAX",
+                "raid_level": "1",
+                "controller": "software"
+            },
+        ]
+    }
+
+    @classmethod
+    def skip_checks(cls):
+        super(SoftwareRaidDirect, cls).skip_checks()
+        if not CONF.baremetal_feature_enabled.software_raid:
+            raise cls.skipException("Software RAID feature is not enabled")
+
+    @decorators.idempotent_id('125361ac-0eb3-4d79-8be2-a91936aa3f46')
+    @utils.services('image', 'network')
+    def test_software_raid(self):
+        self.build_raid_and_verify_node()
diff --git a/ironic_tempest_plugin/tests/scenario/test_baremetal_single_tenant.py b/ironic_tempest_plugin/tests/scenario/test_baremetal_single_tenant.py
new file mode 100644
index 0000000..3715608
--- /dev/null
+++ b/ironic_tempest_plugin/tests/scenario/test_baremetal_single_tenant.py
@@ -0,0 +1,173 @@
+#
+# Copyright 2019 Red Hat, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from tempest.common import utils
+from tempest import config
+from tempest.lib.common.utils import data_utils
+from tempest.lib import decorators
+
+from ironic_tempest_plugin import manager
+from ironic_tempest_plugin.tests.scenario import baremetal_manager
+
+CONF = config.CONF
+
+
+class BaremetalSingleTenant(baremetal_manager.BaremetalScenarioTest,
+                            manager.NetworkScenarioTest):
+    """Check "No L2 isolation" of baremetal and VM instances of same tenant:
+
+    * Create a keypair, network, subnet and router for the primary tenant
+    * Boot 2 instances in the same tenant's network using the keypair
+    * Associate floating ips to both instances
+    * Verify there is L3 connectivity between instances of same tenant
+    * Verify connectivity between instances floating IP's
+    * Delete both instances
+    """
+
+    credentials = ['primary', 'alt', 'admin']
+
+    @classmethod
+    def skip_checks(cls):
+        super(BaremetalSingleTenant, cls).skip_checks()
+        if not CONF.baremetal.use_provision_network:
+            msg = 'Ironic/Neutron tenant isolation is not configured.'
+            raise cls.skipException(msg)
+        if (CONF.baremetal.available_nodes is not None and
+                CONF.baremetal.available_nodes < 2):
+            msg = ('Not enough baremetal nodes, %d configured, test requires '
+                   'a minimum of 2') % CONF.baremetal.available_nodes
+            raise cls.skipException(msg)
+
+    def create_tenant_network(self, clients, tenant_cidr):
+        network = self._create_network(
+            networks_client=clients.networks_client,
+            tenant_id=clients.credentials.tenant_id)
+        router = self._get_router(
+            client=clients.routers_client,
+            tenant_id=clients.credentials.tenant_id)
+
+        result = clients.subnets_client.create_subnet(
+            name=data_utils.rand_name('subnet'),
+            network_id=network['id'],
+            tenant_id=clients.credentials.tenant_id,
+            ip_version=CONF.validation.ip_version_for_ssh,
+            cidr=tenant_cidr)
+        subnet = result['subnet']
+        clients.routers_client.add_router_interface(router['id'],
+                                                    subnet_id=subnet['id'])
+        self.addCleanup(clients.subnets_client.delete_subnet, subnet['id'])
+        self.addCleanup(clients.routers_client.remove_router_interface,
+                        router['id'], subnet_id=subnet['id'])
+
+        return network, subnet, router
+
+    def verify_l3_connectivity(self, source_ip, private_key,
+                               destination_ip, conn_expected=True):
+        remote = self.get_remote_client(source_ip, private_key=private_key)
+        remote.validate_authentication()
+
+        cmd = 'ping %s -c4 -w4 || exit 0' % destination_ip
+        success_substring = "64 bytes from %s" % destination_ip
+        output = remote.exec_command(cmd)
+        if conn_expected:
+            self.assertIn(success_substring, output)
+        else:
+            self.assertNotIn(success_substring, output)
+
+    def tenancy_check(self, use_vm=False):
+
+        tenant_cidr = '10.0.100.0/24'
+
+        keypair = self.create_keypair()
+        network, subnet, router = self.create_tenant_network(
+            self.os_primary, tenant_cidr)
+
+        instance1, node1 = self.boot_instance(
+            clients=self.os_primary,
+            keypair=keypair,
+            net_id=network['id'],
+        )
+
+        fixed_ip1 = instance1['addresses'][network['name']][0]['addr']
+        floating_ip1 = self.create_floating_ip(
+            instance1,
+        )['floating_ip_address']
+        self.check_vm_connectivity(ip_address=floating_ip1,
+                                   private_key=keypair['private_key'])
+
+        if use_vm:
+            # Create VM on compute node
+            instance2 = self.create_server(
+                clients=self.os_primary,
+                key_name=keypair['name'],
+                flavor=CONF.compute.flavor_ref,
+                networks=[{'uuid': network['id']}]
+            )
+        else:
+            # Create BM
+            instance2, node2 = self.boot_instance(
+                keypair=keypair,
+                clients=self.os_primary,
+                net_id=network['id'],
+            )
+        fixed_ip2 = \
+            instance2['addresses'][network['name']][0]['addr']
+        floating_ip2 = self.create_floating_ip(
+            instance2,
+            client=self.os_primary.floating_ips_client
+        )['floating_ip_address']
+        self.check_vm_connectivity(
+            ip_address=floating_ip2,
+            private_key=keypair['private_key'])
+
+        self.verify_l3_connectivity(
+            floating_ip2,
+            keypair['private_key'],
+            fixed_ip1,
+            conn_expected=True
+        )
+        self.verify_l3_connectivity(
+            floating_ip1,
+            keypair['private_key'],
+            fixed_ip2,
+            conn_expected=True
+        )
+        self.verify_l3_connectivity(
+            floating_ip1,
+            keypair['private_key'],
+            floating_ip2,
+            conn_expected=True
+        )
+        self.terminate_instance(
+            instance=instance2,
+            servers_client=self.os_primary.servers_client)
+        self.terminate_instance(instance=instance1)
+
+    @decorators.idempotent_id('8fe15552-3788-11e9-b599-74e5f9e2a801')
+    @utils.services('compute', 'image', 'network')
+    def test_baremetal_single_tenant(self):
+        if CONF.service_available.nova:
+            self.skipTest('Compute service Nova is running,'
+                          ' BM to BM test will be skipped,'
+                          ' to save test execution time')
+        self.tenancy_check()
+
+    @decorators.idempotent_id('90b3b6be-3788-11e9-b599-74e5f9e2a801')
+    @utils.services('compute', 'image', 'network')
+    def test_baremetal_vm_single_tenant(self):
+        if not CONF.service_available.nova:
+            self.skipTest('Compute service Nova is disabled,'
+                          ' VM is required to run this test')
+        self.tenancy_check(use_vm=True)
diff --git a/tox.ini b/tox.ini
index c81582c..048d70b 100644
--- a/tox.ini
+++ b/tox.ini
@@ -5,11 +5,12 @@
 
 [testenv]
 usedevelop = True
-install_command = pip install -c{env:UPPER_CONSTRAINTS_FILE:https://opendev.org/openstack/requirements/raw/branch/master/upper-constraints.txt} {opts} {packages}
+install_command = pip install {opts} {packages}
 setenv =
    VIRTUAL_ENV={envdir}
    PYTHONWARNINGS=default::DeprecationWarning
-deps = -r{toxinidir}/test-requirements.txt
+deps = -c{env:UPPER_CONSTRAINTS_FILE:https://releases.openstack.org/constraints/upper/master}
+       -r{toxinidir}/test-requirements.txt
 commands = python setup.py test --slowest --testr-args='{posargs}'
 
 [testenv:pep8]