Merge "Add tests for compute v2.10 microversion"
diff --git a/HACKING.rst b/HACKING.rst
index 0962f80..44519d4 100644
--- a/HACKING.rst
+++ b/HACKING.rst
@@ -18,6 +18,8 @@
 - [T109] Cannot use testtools.skip decorator; instead use
          decorators.skip_because from tempest-lib
 - [T110] Check that service client names of GET should be consistent
+- [T111] Check that service client names of DELETE should be consistent
+- [T112] Check that tempest.lib should not import local tempest code
 - [N322] Method's default argument shouldn't be mutable
 
 Test Data/Configuration
diff --git a/README.rst b/README.rst
index c859ddd..7da83cd 100644
--- a/README.rst
+++ b/README.rst
@@ -1,6 +1,14 @@
 Tempest - The OpenStack Integration Test Suite
 ==============================================
 
+.. image:: https://img.shields.io/pypi/v/tempest.svg
+    :target: https://pypi.python.org/pypi/tempest/
+    :alt: Latest Version
+
+.. image:: https://img.shields.io/pypi/dm/tempest.svg
+    :target: https://pypi.python.org/pypi/tempest/
+    :alt: Downloads
+
 This is a set of integration tests to be run against a live OpenStack
 cluster. Tempest has batteries of tests for OpenStack API validation,
 Scenarios, and other specific tests useful in validating an OpenStack
diff --git a/doc/source/configuration.rst b/doc/source/configuration.rst
index 524c0fa..367be41 100644
--- a/doc/source/configuration.rst
+++ b/doc/source/configuration.rst
@@ -246,6 +246,21 @@
 run. This section covers the different methods of configuring Tempest to provide
 a network when creating servers.
 
+The ``validation`` group gathers all the connection options to remotely access the
+created servers.
+
+To enable remote access to servers, at least the three following options need to be
+set:
+
+* The ``run_validation`` option needs be set to ``true``.
+
+* The ``connect_method`` option. Two connect methods are available: ``fixed`` and
+  ``floating``, the later being set by default.
+
+* The ``auth_method`` option. Currently, only authentication by keypair is
+  available.
+
+
 Fixed Network Name
 """"""""""""""""""
 This is the simplest method of specifying how networks should be used. You can
@@ -296,7 +311,7 @@
 With Dynamic Credentials
 """"""""""""""""""""""""
 With dynamic credentials enabled and using nova-network, your only option for
-configuration is to either set a fixed network name or not.  However, in most
+configuration is to either set a fixed network name or not. However, in most
 cases it shouldn't matter because nova-network should have no problem booting a
 server with multiple networks. If this is not the case for your cloud then using
 an accounts file is recommended because it provides the necessary flexibility to
diff --git a/doc/source/index.rst b/doc/source/index.rst
index 17def1c..5f357b2 100644
--- a/doc/source/index.rst
+++ b/doc/source/index.rst
@@ -12,6 +12,7 @@
    REVIEWING
    plugin
    library
+   microversion_testing
 
 ------------
 Field Guides
diff --git a/doc/source/microversion_testing.rst b/doc/source/microversion_testing.rst
new file mode 100644
index 0000000..63ec04b
--- /dev/null
+++ b/doc/source/microversion_testing.rst
@@ -0,0 +1,207 @@
+===================================
+How To Implement Microversion Tests
+===================================
+
+Tempest provides stable interfaces to test API Microversion.
+For Details, see: `API Microversion testing Framework`_
+This document explains how to implement Microversion tests using those
+interfaces.
+
+.. _API Microversion testing Framework: http://docs.openstack.org/developer/tempest/library/api_microversion_testing.html
+
+
+Configuration options for Microversion
+""""""""""""""""""""""""""""""""""""""
+
+* Add configuration options for specifying test target Microversions.
+  We need to specify test target Microversions because the supported
+  Microversions may be different between OpenStack clouds. For operating
+  multiple Microversion tests in a single Tempest operation, configuration
+  options should represent the range of test target Microversions.
+  New configuration options are:
+  * min_microversion
+  * max_microversion
+
+  Those should be defined under respective section of each service.
+  For example::
+      [compute]
+      min_microversion = None
+      max_microversion = latest
+
+
+How To Implement Microversion Tests
+"""""""""""""""""""""""""""""""""""
+
+Step1: Add skip logic based on configured Microversion range
+''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''
+
+Add logic to skip the tests based on Tests class and configured Microversion
+range.
+api_version_utils.check_skip_with_microversion function can be used
+to automatically skip the tests which do not fall under configured
+Microversion range.
+For example::
+
+    class BaseTestCase1(api_version_utils.BaseMicroversionTest):
+
+        [..]
+    @classmethod
+    def skip_checks(cls):
+        super(BaseTestCase1, cls).skip_checks()
+        api_version_utils.check_skip_with_microversion(cls.min_microversion,
+                                                       cls.max_microversion,
+                                                       CONF.compute.min_microversion,
+                                                       CONF.compute.max_microversion)
+
+Skip logic can be added in tests base class or any specific test class depends on
+tests class structure.
+
+Step2: Selected API request microversion
+''''''''''''''''''''''''''''''''''''''''
+
+Select appropriate Microversion which needs to be used
+to send with API request.
+api_version_utils.select_request_microversion function can be used
+to select the appropriate Microversion which will be used for API request.
+For example::
+
+    @classmethod
+    def resource_setup(cls):
+        super(BaseTestCase1, cls).resource_setup()
+        cls.request_microversion = (
+            api_version_utils.select_request_microversion(
+                cls.min_microversion,
+                CONF.compute.min_microversion))
+
+
+Step3: Set Microversion on Service Clients
+''''''''''''''''''''''''''''''''''''''''''
+
+Microversion selected by Test Class in previous step needs to be set on
+service clients so that APIs can be requested with selected Microversion.
+
+Microversion can be defined as global variable on service clients which
+can be set using fixture.
+Also Microversion header name needs to be defined on service clients which
+should be constant because it is not supposed to be changed by project
+as per API contract.
+For example::
+
+      COMPUTE_MICROVERSION = None
+
+      class BaseClient1(rest_client.RestClient):
+          api_microversion_header_name = 'X-OpenStack-Nova-API-Version'
+
+Now test class can set the selected Microversion on required service clients
+using fixture which can take care of resetting the same once tests is completed.
+For example::
+
+    def setUp(self):
+        super(BaseTestCase1, self).setUp()
+        self.useFixture(api_microversion_fixture.APIMicroversionFixture(
+            self.request_microversion))
+
+Service clients needs to add set Microversion in API request header which
+can be done by overriding the get_headers() method of rest_client.
+For example::
+
+      COMPUTE_MICROVERSION = None
+
+      class BaseClient1(rest_client.RestClient):
+          api_microversion_header_name = 'X-OpenStack-Nova-API-Version'
+
+          def get_headers(self):
+              headers = super(BaseClient1, self).get_headers()
+              if COMPUTE_MICROVERSION:
+                  headers[self.api_microversion_header_name] = COMPUTE_MICROVERSION
+              return headers
+
+
+Step4: Separate Test classes for each Microversion
+''''''''''''''''''''''''''''''''''''''''''''''''''
+
+This is last step to implement Microversion test class.
+
+For any Microversion tests, basically we need to implement a
+separate test class. In addition, each test class defines its
+Microversion range with class variable like min_microversion
+and max_microversion. Tests will be valid for that defined range.
+If that range is out of configured Microversion range then, test
+will be skipped.
+
+*NOTE: Microversion testing is supported at test class level not at individual
+test case level.*
+For example:
+
+Below test is applicable for Microversion from 2.2 till 2.9::
+
+    class BaseTestCase1(api_version_utils.BaseMicroversionTest,
+                        tempest.test.BaseTestCase):
+
+        [..]
+
+
+    class Test1(BaseTestCase1):
+        min_microversion = '2.2'
+        max_microversion = '2.9'
+
+        [..]
+
+Below test is applicable for Microversion from 2.10 till latest::
+
+    class Test2(BaseTestCase1):
+        min_microversion = '2.10'
+        max_microversion = 'latest'
+
+        [..]
+
+
+
+
+Notes about Compute Microversion Tests
+"""""""""""""""""""""""""""""""""""
+Some of the compute Microversion tests have been already implemented
+with the Microversion testing framework. So for further tests only
+step 4 is needed.
+
+Along with that JSON response schema might need versioning if needed.
+
+Compute service clients strictly validate the response against defined JSON
+schema and does not allow additional elements in response.
+So if that Microversion changed the API response then schema needs to be versioned.
+New JSON schema file needs to be defined with new response attributes and service
+client methods will select the schema based on requested microversion.
+
+If Microversion tests are implemented randomly meaning not
+in sequence order(v2.20 tests added and previous Microversion tests are not yet added)
+then, still schema might need to be version for older Microversion if they changed
+the response.
+This is because Nova Microversion includes all the previous Microversions behavior.
+
+For Example:
+    Implementing the v2.20 Microversion tests before v2.9 and 2.19-
+    v2.20 API request will respond as latest behavior of Nova till v2.20,
+    and in v2.9 and 2.19, server response has been changed so response schema needs
+    to be versioned accordingly.
+
+That can be done by using the get_schema method in below module:
+
+The base_compute_client module
+''''''''''''''''''''''''''''''
+
+.. automodule:: tempest.lib.services.compute.base_compute_client
+   :members:
+
+
+Microversion tests implemented in Tempest
+"""""""""""""""""""""""""""""""""""""""""
+
+* Compute
+
+ * `2.1`_
+
+ .. _2.1:  http://docs.openstack.org/developer/nova/api_microversion_history.html#id1
+
+ * `2.2`_
+
+ .. _2.2: http://docs.openstack.org/developer/nova/api_microversion_history.html#id2
diff --git a/releasenotes/source/index.rst b/releasenotes/source/index.rst
index 584540b..5bd9176 100644
--- a/releasenotes/source/index.rst
+++ b/releasenotes/source/index.rst
@@ -5,6 +5,7 @@
  .. toctree::
     :maxdepth: 1
 
+    v10.0.0
     unreleased
 
 Indices and tables
diff --git a/releasenotes/source/v10.0.0.rst b/releasenotes/source/v10.0.0.rst
new file mode 100644
index 0000000..38ed2ef
--- /dev/null
+++ b/releasenotes/source/v10.0.0.rst
@@ -0,0 +1,6 @@
+=====================
+v10.0.0 Release Notes
+=====================
+
+.. release-notes:: 10.0.0 Release Notes
+   :version: 10.0.0
diff --git a/tempest/api/compute/admin/test_live_migration.py b/tempest/api/compute/admin/test_live_migration.py
index ead6db3..94635ff 100644
--- a/tempest/api/compute/admin/test_live_migration.py
+++ b/tempest/api/compute/admin/test_live_migration.py
@@ -81,7 +81,8 @@
         body = self.volumes_client.show_volume(volume_id)['volume']
         if body['status'] == 'in-use':
             self.servers_client.detach_volume(server_id, volume_id)
-            self.volumes_client.wait_for_volume_status(volume_id, 'available')
+            waiters.wait_for_volume_status(self.volumes_client,
+                                           volume_id, 'available')
         self.volumes_client.delete_volume(volume_id)
 
     def _test_live_migration(self, state='ACTIVE', volume_backed=False):
@@ -152,14 +153,15 @@
         volume = self.volumes_client.create_volume(
             display_name='test')['volume']
 
-        self.volumes_client.wait_for_volume_status(volume['id'],
-                                                   'available')
+        waiters.wait_for_volume_status(self.volumes_client,
+                                       volume['id'], 'available')
         self.addCleanup(self._volume_clean_up, server_id, volume['id'])
 
         # Attach the volume to the server
         self.servers_client.attach_volume(server_id, volumeId=volume['id'],
                                           device='/dev/xvdb')
-        self.volumes_client.wait_for_volume_status(volume['id'], 'in-use')
+        waiters.wait_for_volume_status(self.volumes_client,
+                                       volume['id'], 'in-use')
 
         self._migrate_server_to(server_id, target_host)
         waiters.wait_for_server_status(self.servers_client,
diff --git a/tempest/api/compute/floating_ips/test_floating_ips_actions.py b/tempest/api/compute/floating_ips/test_floating_ips_actions.py
index 85fd4ab..3d7f4f8 100644
--- a/tempest/api/compute/floating_ips/test_floating_ips_actions.py
+++ b/tempest/api/compute/floating_ips/test_floating_ips_actions.py
@@ -16,9 +16,12 @@
 from tempest.api.compute.floating_ips import base
 from tempest.common.utils import data_utils
 from tempest.common import waiters
+from tempest import config
 from tempest.lib import exceptions as lib_exc
 from tempest import test
 
+CONF = config.CONF
+
 
 class FloatingIPsTestJSON(base.BaseFloatingIPsTest):
     server_id = None
@@ -38,7 +41,8 @@
         server = cls.create_test_server(wait_until='ACTIVE')
         cls.server_id = server['id']
         # Floating IP creation
-        body = cls.client.create_floating_ip()['floating_ip']
+        body = cls.client.create_floating_ip(
+            pool=CONF.network.floating_network_name)['floating_ip']
         cls.floating_ip_id = body['id']
         cls.floating_ip = body['ip']
 
@@ -62,7 +66,8 @@
     def test_allocate_floating_ip(self):
         # Positive test:Allocation of a new floating IP to a project
         # should be successful
-        body = self.client.create_floating_ip()['floating_ip']
+        body = self.client.create_floating_ip(
+            pool=CONF.network.floating_network_name)['floating_ip']
         floating_ip_id_allocated = body['id']
         self.addCleanup(self.client.delete_floating_ip,
                         floating_ip_id_allocated)
@@ -78,7 +83,8 @@
         # Positive test:Deletion of valid floating IP from project
         # should be successful
         # Creating the floating IP that is to be deleted in this method
-        floating_ip_body = self.client.create_floating_ip()['floating_ip']
+        floating_ip_body = self.client.create_floating_ip(
+            pool=CONF.network.floating_network_name)['floating_ip']
         self.addCleanup(self._try_delete_floating_ip, floating_ip_body['id'])
         # Deleting the floating IP from the project
         self.client.delete_floating_ip(floating_ip_body['id'])
diff --git a/tempest/api/compute/floating_ips/test_list_floating_ips.py b/tempest/api/compute/floating_ips/test_list_floating_ips.py
index d003967..5738293 100644
--- a/tempest/api/compute/floating_ips/test_list_floating_ips.py
+++ b/tempest/api/compute/floating_ips/test_list_floating_ips.py
@@ -14,8 +14,11 @@
 #    under the License.
 
 from tempest.api.compute import base
+from tempest import config
 from tempest import test
 
+CONF = config.CONF
+
 
 class FloatingIPDetailsTestJSON(base.BaseV2ComputeTest):
 
@@ -31,7 +34,8 @@
         cls.floating_ip = []
         cls.floating_ip_id = []
         for i in range(3):
-            body = cls.client.create_floating_ip()['floating_ip']
+            body = cls.client.create_floating_ip(
+                pool=CONF.network.floating_network_name)['floating_ip']
             cls.floating_ip.append(body)
             cls.floating_ip_id.append(body['id'])
 
@@ -57,7 +61,8 @@
     def test_get_floating_ip_details(self):
         # Positive test:Should be able to GET the details of floatingIP
         # Creating a floating IP for which details are to be checked
-        body = self.client.create_floating_ip()['floating_ip']
+        body = self.client.create_floating_ip(
+            pool=CONF.network.floating_network_name)['floating_ip']
         floating_ip_id = body['id']
         self.addCleanup(self.client.delete_floating_ip,
                         floating_ip_id)
diff --git a/tempest/api/compute/servers/test_server_rescue.py b/tempest/api/compute/servers/test_server_rescue.py
index 12b824f..32faf7d 100644
--- a/tempest/api/compute/servers/test_server_rescue.py
+++ b/tempest/api/compute/servers/test_server_rescue.py
@@ -41,7 +41,8 @@
         super(ServerRescueTestJSON, cls).resource_setup()
 
         # Floating IP creation
-        body = cls.floating_ips_client.create_floating_ip()['floating_ip']
+        body = cls.floating_ips_client.create_floating_ip(
+            pool=CONF.network.floating_network_name)['floating_ip']
         cls.floating_ip_id = str(body['id']).strip()
         cls.floating_ip = str(body['ip']).strip()
 
diff --git a/tempest/api/compute/servers/test_servers.py b/tempest/api/compute/servers/test_servers.py
index 2f79d47..e91857a 100644
--- a/tempest/api/compute/servers/test_servers.py
+++ b/tempest/api/compute/servers/test_servers.py
@@ -80,7 +80,7 @@
         self.assertEqual(key_name, server['key_name'])
 
     def _update_server_name(self, server_id, status, prefix_name='server'):
-        # The server name should be changed to the the provided value
+        # The server name should be changed to the provided value
         new_name = data_utils.rand_name(prefix_name)
 
         # Update the server with a new name
@@ -95,7 +95,7 @@
 
     @test.idempotent_id('5e6ccff8-349d-4852-a8b3-055df7988dd2')
     def test_update_server_name(self):
-        # The server name should be changed to the the provided value
+        # The server name should be changed to the provided value
         server = self.create_test_server(wait_until='ACTIVE')
         # Update instance name with non-ASCII characters
         prefix_name = u'\u00CD\u00F1st\u00E1\u00F1c\u00E9'
@@ -103,7 +103,7 @@
 
     @test.idempotent_id('6ac19cb1-27a3-40ec-b350-810bdc04c08e')
     def test_update_server_name_in_stop_state(self):
-        # The server name should be changed to the the provided value
+        # The server name should be changed to the provided value
         server = self.create_test_server(wait_until='ACTIVE')
         self.client.stop_server(server['id'])
         waiters.wait_for_server_status(self.client, server['id'], 'SHUTOFF')
diff --git a/tempest/api/compute/test_quotas.py b/tempest/api/compute/test_quotas.py
index 43f4c97..122e7cc 100644
--- a/tempest/api/compute/test_quotas.py
+++ b/tempest/api/compute/test_quotas.py
@@ -79,8 +79,8 @@
     @test.idempotent_id('cd65d997-f7e4-4966-a7e9-d5001b674fdc')
     def test_compare_tenant_quotas_with_default_quotas(self):
         # Tenants are created with the default quota values
-        defualt_quota_set = \
+        default_quota_set = \
             self.client.show_default_quota_set(self.tenant_id)['quota_set']
         tenant_quota_set = (self.client.show_quota_set(self.tenant_id)
                             ['quota_set'])
-        self.assertEqual(defualt_quota_set, tenant_quota_set)
+        self.assertEqual(default_quota_set, tenant_quota_set)
diff --git a/tempest/api/compute/volumes/test_attach_volume.py b/tempest/api/compute/volumes/test_attach_volume.py
index 01a8e58..e9c8e30 100644
--- a/tempest/api/compute/volumes/test_attach_volume.py
+++ b/tempest/api/compute/volumes/test_attach_volume.py
@@ -52,7 +52,8 @@
     def _detach(self, server_id, volume_id):
         if self.attachment:
             self.servers_client.detach_volume(server_id, volume_id)
-            self.volumes_client.wait_for_volume_status(volume_id, 'available')
+            waiters.wait_for_volume_status(self.volumes_client,
+                                           volume_id, 'available')
 
     def _delete_volume(self):
         # Delete the created Volumes
@@ -77,15 +78,16 @@
         self.volume = self.volumes_client.create_volume(
             size=CONF.volume.volume_size, display_name='test')['volume']
         self.addCleanup(self._delete_volume)
-        self.volumes_client.wait_for_volume_status(self.volume['id'],
-                                                   'available')
+        waiters.wait_for_volume_status(self.volumes_client,
+                                       self.volume['id'], 'available')
 
         # Attach the volume to the server
         self.attachment = self.servers_client.attach_volume(
             self.server['id'],
             volumeId=self.volume['id'],
             device='/dev/%s' % self.device)['volumeAttachment']
-        self.volumes_client.wait_for_volume_status(self.volume['id'], 'in-use')
+        waiters.wait_for_volume_status(self.volumes_client,
+                                       self.volume['id'], 'in-use')
 
         self.addCleanup(self._detach, self.server['id'], self.volume['id'])
 
diff --git a/tempest/api/network/admin/test_dhcp_agent_scheduler.py b/tempest/api/network/admin/test_dhcp_agent_scheduler.py
index fcb6fce..d2ab237 100644
--- a/tempest/api/network/admin/test_dhcp_agent_scheduler.py
+++ b/tempest/api/network/admin/test_dhcp_agent_scheduler.py
@@ -36,12 +36,12 @@
 
     @test.idempotent_id('5032b1fe-eb42-4a64-8f3b-6e189d8b5c7d')
     def test_list_dhcp_agent_hosting_network(self):
-        self.admin_client.list_dhcp_agent_hosting_network(
+        self.admin_networks_client.list_dhcp_agents_on_hosting_network(
             self.network['id'])
 
     @test.idempotent_id('30c48f98-e45d-4ffb-841c-b8aad57c7587')
     def test_list_networks_hosted_by_one_dhcp(self):
-        body = self.admin_client.list_dhcp_agent_hosting_network(
+        body = self.admin_networks_client.list_dhcp_agents_on_hosting_network(
             self.network['id'])
         agents = body['agents']
         self.assertIsNotNone(agents)
diff --git a/tempest/api/network/base.py b/tempest/api/network/base.py
index d78fc04..71edf74 100644
--- a/tempest/api/network/base.py
+++ b/tempest/api/network/base.py
@@ -257,7 +257,7 @@
 
     @classmethod
     def delete_router(cls, router):
-        body = cls.client.list_router_interfaces(router['id'])
+        body = cls.ports_client.list_ports(device_id=router['id'])
         interfaces = body['ports']
         for i in interfaces:
             try:
diff --git a/tempest/api/network/test_ports.py b/tempest/api/network/test_ports.py
index 5ff23c6..e302284 100644
--- a/tempest/api/network/test_ports.py
+++ b/tempest/api/network/test_ports.py
@@ -131,12 +131,15 @@
         body = self.ports_client.show_port(self.port['id'])
         port = body['port']
         self.assertIn('id', port)
-        # TODO(Santosh)- This is a temporary workaround to compare create_port
-        # and show_port dict elements.Remove this once extra_dhcp_opts issue
-        # gets fixed in neutron.( bug - 1365341.)
+        # NOTE(rfolco): created_at and updated_at may get inconsistent values
+        # due to possible delay between POST request and resource creation.
+        # TODO(rfolco): Neutron Bug #1365341 is fixed, can remove the key
+        # extra_dhcp_opts in the O release (K/L gate jobs still need it).
         self.assertThat(self.port,
                         custom_matchers.MatchesDictExceptForKeys
-                        (port, excluded_keys=['extra_dhcp_opts']))
+                        (port, excluded_keys=['extra_dhcp_opts',
+                                              'created_at',
+                                              'updated_at']))
 
     @test.idempotent_id('45fcdaf2-dab0-4c13-ac6c-fcddfb579dbd')
     def test_show_port_fields(self):
diff --git a/tempest/api/volume/admin/test_multi_backend.py b/tempest/api/volume/admin/test_multi_backend.py
index 60e6e6c..f19717e 100644
--- a/tempest/api/volume/admin/test_multi_backend.py
+++ b/tempest/api/volume/admin/test_multi_backend.py
@@ -13,6 +13,7 @@
 import six
 from tempest.api.volume import base
 from tempest.common.utils import data_utils
+from tempest.common import waiters
 from tempest import config
 from tempest import test
 
@@ -80,8 +81,8 @@
         else:
             self.volume_id_list_without_prefix.append(
                 self.volume['id'])
-        self.admin_volume_client.wait_for_volume_status(
-            self.volume['id'], 'available')
+        waiters.wait_for_volume_status(self.admin_volume_client,
+                                       self.volume['id'], 'available')
 
     @classmethod
     def resource_cleanup(cls):
diff --git a/tempest/api/volume/admin/test_snapshots_actions.py b/tempest/api/volume/admin/test_snapshots_actions.py
index f2bf613..26a5a45 100644
--- a/tempest/api/volume/admin/test_snapshots_actions.py
+++ b/tempest/api/volume/admin/test_snapshots_actions.py
@@ -15,6 +15,7 @@
 
 from tempest.api.volume import base
 from tempest.common.utils import data_utils
+from tempest.common import waiters
 from tempest import config
 from tempest import test
 
@@ -41,18 +42,17 @@
         vol_name = data_utils.rand_name(cls.__name__ + '-Volume')
         cls.name_field = cls.special_fields['name_field']
         params = {cls.name_field: vol_name}
-        cls.volume = \
-            cls.volumes_client.create_volume(**params)['volume']
-        cls.volumes_client.wait_for_volume_status(cls.volume['id'],
-                                                  'available')
+        cls.volume = cls.volumes_client.create_volume(**params)['volume']
+        waiters.wait_for_volume_status(cls.volumes_client,
+                                       cls.volume['id'], 'available')
 
         # Create a test shared snapshot for tests
         snap_name = data_utils.rand_name(cls.__name__ + '-Snapshot')
         params = {cls.name_field: snap_name}
         cls.snapshot = cls.client.create_snapshot(
             volume_id=cls.volume['id'], **params)['snapshot']
-        cls.client.wait_for_snapshot_status(cls.snapshot['id'],
-                                            'available')
+        waiters.wait_for_snapshot_status(cls.client,
+                                         cls.snapshot['id'], 'available')
 
     @classmethod
     def resource_cleanup(cls):
diff --git a/tempest/api/volume/admin/test_volume_types.py b/tempest/api/volume/admin/test_volume_types.py
index c032d9c..7202881 100644
--- a/tempest/api/volume/admin/test_volume_types.py
+++ b/tempest/api/volume/admin/test_volume_types.py
@@ -15,6 +15,7 @@
 
 from tempest.api.volume import base
 from tempest.common.utils import data_utils
+from tempest.common import waiters
 from tempest import config
 from tempest import test
 
@@ -66,12 +67,14 @@
                          "to the requested name")
         self.assertIsNotNone(volume['id'],
                              "Field volume id is empty or not found.")
-        self.volumes_client.wait_for_volume_status(volume['id'], 'available')
+        waiters.wait_for_volume_status(self.volumes_client,
+                                       volume['id'], 'available')
 
         # Update volume with new volume_type
         self.volumes_client.retype_volume(volume['id'],
                                           new_type=volume_types[1]['id'])
-        self.volumes_client.wait_for_volume_status(volume['id'], 'available')
+        waiters.wait_for_volume_status(self.volumes_client,
+                                       volume['id'], 'available')
 
         # Get volume details and Verify
         fetched_volume = self.volumes_client.show_volume(
diff --git a/tempest/api/volume/admin/test_volumes_actions.py b/tempest/api/volume/admin/test_volumes_actions.py
index 253a3e1..bdb313f 100644
--- a/tempest/api/volume/admin/test_volumes_actions.py
+++ b/tempest/api/volume/admin/test_volumes_actions.py
@@ -15,6 +15,7 @@
 
 from tempest.api.volume import base
 from tempest.common.utils import data_utils as utils
+from tempest.common import waiters
 from tempest import test
 
 
@@ -35,7 +36,8 @@
         params = {cls.name_field: vol_name}
 
         cls.volume = cls.client.create_volume(**params)['volume']
-        cls.client.wait_for_volume_status(cls.volume['id'], 'available')
+        waiters.wait_for_volume_status(cls.client,
+                                       cls.volume['id'], 'available')
 
     @classmethod
     def resource_cleanup(cls):
@@ -61,7 +63,8 @@
         vol_name = utils.rand_name('Volume')
         params = {self.name_field: vol_name}
         temp_volume = self.client.create_volume(**params)['volume']
-        self.client.wait_for_volume_status(temp_volume['id'], 'available')
+        waiters.wait_for_volume_status(self.client,
+                                       temp_volume['id'], 'available')
 
         return temp_volume
 
diff --git a/tempest/api/volume/admin/test_volumes_backup.py b/tempest/api/volume/admin/test_volumes_backup.py
index 30c6a15..b09cd2c 100644
--- a/tempest/api/volume/admin/test_volumes_backup.py
+++ b/tempest/api/volume/admin/test_volumes_backup.py
@@ -15,6 +15,7 @@
 
 from tempest.api.volume import base
 from tempest.common.utils import data_utils
+from tempest.common import waiters
 from tempest import config
 from tempest.lib import decorators
 from tempest import test
@@ -50,8 +51,8 @@
         self.addCleanup(self.backups_adm_client.delete_backup,
                         backup['id'])
         self.assertEqual(backup_name, backup['name'])
-        self.admin_volume_client.wait_for_volume_status(
-            self.volume['id'], 'available')
+        waiters.wait_for_volume_status(self.admin_volume_client,
+                                       self.volume['id'], 'available')
         self.backups_adm_client.wait_for_backup_status(backup['id'],
                                                        'available')
 
@@ -74,8 +75,8 @@
         self.assertEqual(backup['id'], restore['backup_id'])
         self.backups_adm_client.wait_for_backup_status(backup['id'],
                                                        'available')
-        self.admin_volume_client.wait_for_volume_status(
-            restore['volume_id'], 'available')
+        waiters.wait_for_volume_status(self.admin_volume_client,
+                                       restore['volume_id'], 'available')
 
     @decorators.skip_because(bug='1455043')
     @test.idempotent_id('a99c54a1-dd80-4724-8a13-13bf58d4068d')
@@ -117,8 +118,8 @@
         self.addCleanup(self.admin_volume_client.delete_volume,
                         restore['volume_id'])
         self.assertEqual(import_backup['id'], restore['backup_id'])
-        self.admin_volume_client.wait_for_volume_status(restore['volume_id'],
-                                                        'available')
+        waiters.wait_for_volume_status(self.admin_volume_client,
+                                       restore['volume_id'], 'available')
 
         # Verify if restored volume is there in volume list
         volumes = self.admin_volume_client.list_volumes()['volumes']
diff --git a/tempest/api/volume/base.py b/tempest/api/volume/base.py
index 82dc2c9..14819e3 100644
--- a/tempest/api/volume/base.py
+++ b/tempest/api/volume/base.py
@@ -15,6 +15,7 @@
 
 from tempest.common import compute
 from tempest.common.utils import data_utils
+from tempest.common import waiters
 from tempest import config
 from tempest import exceptions
 from tempest.lib import exceptions as lib_exc
@@ -112,7 +113,8 @@
         volume = cls.volumes_client.create_volume(**kwargs)['volume']
 
         cls.volumes.append(volume)
-        cls.volumes_client.wait_for_volume_status(volume['id'], 'available')
+        waiters.wait_for_volume_status(cls.volumes_client,
+                                       volume['id'], 'available')
         return volume
 
     @classmethod
@@ -121,8 +123,8 @@
         snapshot = cls.snapshots_client.create_snapshot(
             volume_id=volume_id, **kwargs)['snapshot']
         cls.snapshots.append(snapshot)
-        cls.snapshots_client.wait_for_snapshot_status(snapshot['id'],
-                                                      'available')
+        waiters.wait_for_snapshot_status(cls.snapshots_client,
+                                         snapshot['id'], 'available')
         return snapshot
 
     # NOTE(afazekas): these create_* and clean_* could be defined
diff --git a/tempest/api/volume/test_volume_transfers.py b/tempest/api/volume/test_volume_transfers.py
index 7046dcf..866db3d 100644
--- a/tempest/api/volume/test_volume_transfers.py
+++ b/tempest/api/volume/test_volume_transfers.py
@@ -16,6 +16,7 @@
 from testtools import matchers
 
 from tempest.api.volume import base
+from tempest.common import waiters
 from tempest import config
 from tempest import test
 
@@ -51,8 +52,8 @@
             volume_id=volume['id'])['transfer']
         transfer_id = transfer['id']
         auth_key = transfer['auth_key']
-        self.client.wait_for_volume_status(volume['id'],
-                                           'awaiting-transfer')
+        waiters.wait_for_volume_status(self.client,
+                                       volume['id'], 'awaiting-transfer')
 
         # Get a volume transfer
         body = self.client.show_volume_transfer(transfer_id)['transfer']
@@ -66,7 +67,8 @@
         # Accept a volume transfer by alt_tenant
         body = self.alt_client.accept_volume_transfer(
             transfer_id, auth_key=auth_key)['transfer']
-        self.alt_client.wait_for_volume_status(volume['id'], 'available')
+        waiters.wait_for_volume_status(self.alt_client,
+                                       volume['id'], 'available')
 
     @test.idempotent_id('ab526943-b725-4c07-b875-8e8ef87a2c30')
     def test_create_list_delete_volume_transfer(self):
@@ -78,8 +80,8 @@
         body = self.client.create_volume_transfer(
             volume_id=volume['id'])['transfer']
         transfer_id = body['id']
-        self.client.wait_for_volume_status(volume['id'],
-                                           'awaiting-transfer')
+        waiters.wait_for_volume_status(self.client,
+                                       volume['id'], 'awaiting-transfer')
 
         # List all volume transfers (looking for the one we created)
         body = self.client.list_volume_transfers()['transfers']
@@ -91,7 +93,7 @@
 
         # Delete a volume transfer
         self.client.delete_volume_transfer(transfer_id)
-        self.client.wait_for_volume_status(volume['id'], 'available')
+        waiters.wait_for_volume_status(self.client, volume['id'], 'available')
 
 
 class VolumesV1TransfersTest(VolumesV2TransfersTest):
diff --git a/tempest/api/volume/test_volumes_actions.py b/tempest/api/volume/test_volumes_actions.py
index 5f9ea7f..9c67579 100644
--- a/tempest/api/volume/test_volumes_actions.py
+++ b/tempest/api/volume/test_volumes_actions.py
@@ -43,7 +43,8 @@
 
         # Create a test shared volume for attach/detach tests
         cls.volume = cls.create_volume()
-        cls.client.wait_for_volume_status(cls.volume['id'], 'available')
+        waiters.wait_for_volume_status(cls.client,
+                                       cls.volume['id'], 'available')
 
     @classmethod
     def resource_cleanup(cls):
@@ -64,9 +65,11 @@
         self.client.attach_volume(self.volume['id'],
                                   instance_uuid=self.server['id'],
                                   mountpoint=mountpoint)
-        self.client.wait_for_volume_status(self.volume['id'], 'in-use')
+        waiters.wait_for_volume_status(self.client,
+                                       self.volume['id'], 'in-use')
         self.client.detach_volume(self.volume['id'])
-        self.client.wait_for_volume_status(self.volume['id'], 'available')
+        waiters.wait_for_volume_status(self.client,
+                                       self.volume['id'], 'available')
 
     @test.idempotent_id('63e21b4c-0a0c-41f6-bfc3-7c2816815599')
     @testtools.skipUnless(CONF.volume_feature_enabled.bootable,
@@ -91,10 +94,11 @@
         self.client.attach_volume(self.volume['id'],
                                   instance_uuid=self.server['id'],
                                   mountpoint=mountpoint)
-        self.client.wait_for_volume_status(self.volume['id'], 'in-use')
+        waiters.wait_for_volume_status(self.client,
+                                       self.volume['id'], 'in-use')
         # NOTE(gfidente): added in reverse order because functions will be
         # called in reverse order to the order they are added (LIFO)
-        self.addCleanup(self.client.wait_for_volume_status,
+        self.addCleanup(waiters.wait_for_volume_status, self.client,
                         self.volume['id'],
                         'available')
         self.addCleanup(self.client.detach_volume, self.volume['id'])
@@ -120,7 +124,8 @@
         image_id = body["image_id"]
         self.addCleanup(self.image_client.delete_image, image_id)
         self.image_client.wait_for_image_status(image_id, 'active')
-        self.client.wait_for_volume_status(self.volume['id'], 'available')
+        waiters.wait_for_volume_status(self.client,
+                                       self.volume['id'], 'available')
 
     @test.idempotent_id('92c4ef64-51b2-40c0-9f7e-4749fbaaba33')
     def test_reserve_unreserve_volume(self):
diff --git a/tempest/api/volume/test_volumes_extend.py b/tempest/api/volume/test_volumes_extend.py
index ed1e5c5..1947779 100644
--- a/tempest/api/volume/test_volumes_extend.py
+++ b/tempest/api/volume/test_volumes_extend.py
@@ -14,6 +14,7 @@
 #    under the License.
 
 from tempest.api.volume import base
+from tempest.common import waiters
 from tempest import config
 from tempest import test
 
@@ -33,7 +34,8 @@
         self.volume = self.create_volume()
         extend_size = int(self.volume['size']) + 1
         self.client.extend_volume(self.volume['id'], new_size=extend_size)
-        self.client.wait_for_volume_status(self.volume['id'], 'available')
+        waiters.wait_for_volume_status(self.client,
+                                       self.volume['id'], 'available')
         volume = self.client.show_volume(self.volume['id'])['volume']
         self.assertEqual(int(volume['size']), extend_size)
 
diff --git a/tempest/api/volume/test_volumes_get.py b/tempest/api/volume/test_volumes_get.py
index aa3ef2f..5d83bb0 100644
--- a/tempest/api/volume/test_volumes_get.py
+++ b/tempest/api/volume/test_volumes_get.py
@@ -18,6 +18,7 @@
 
 from tempest.api.volume import base
 from tempest.common.utils import data_utils
+from tempest.common import waiters
 from tempest import config
 from tempest import test
 
@@ -53,7 +54,7 @@
         volume = self.client.create_volume(**kwargs)['volume']
         self.assertIn('id', volume)
         self.addCleanup(self._delete_volume, volume['id'])
-        self.client.wait_for_volume_status(volume['id'], 'available')
+        waiters.wait_for_volume_status(self.client, volume['id'], 'available')
         self.assertIn(self.name_field, volume)
         self.assertEqual(volume[self.name_field], v_name,
                          "The created volume name is not equal "
@@ -113,7 +114,8 @@
         new_volume = self.client.create_volume(**params)['volume']
         self.assertIn('id', new_volume)
         self.addCleanup(self._delete_volume, new_volume['id'])
-        self.client.wait_for_volume_status(new_volume['id'], 'available')
+        waiters.wait_for_volume_status(self.client,
+                                       new_volume['id'], 'available')
 
         params = {self.name_field: volume[self.name_field],
                   self.descrip_field: volume[self.descrip_field]}
diff --git a/tempest/api/volume/test_volumes_snapshots.py b/tempest/api/volume/test_volumes_snapshots.py
index c79235a..347877c 100644
--- a/tempest/api/volume/test_volumes_snapshots.py
+++ b/tempest/api/volume/test_volumes_snapshots.py
@@ -12,6 +12,7 @@
 
 from tempest.api.volume import base
 from tempest.common.utils import data_utils
+from tempest.common import waiters
 from tempest import config
 from tempest import test
 
@@ -37,7 +38,8 @@
     def _detach(self, volume_id):
         """Detach volume."""
         self.volumes_client.detach_volume(volume_id)
-        self.volumes_client.wait_for_volume_status(volume_id, 'available')
+        waiters.wait_for_volume_status(self.volumes_client,
+                                       volume_id, 'available')
 
     def _list_by_param_values_and_assert(self, with_detail=False, **params):
         """list or list_details with given params and validates result."""
@@ -70,9 +72,9 @@
         self.servers_client.attach_volume(
             server['id'], volumeId=self.volume_origin['id'],
             device=mountpoint)
-        self.volumes_client.wait_for_volume_status(self.volume_origin['id'],
-                                                   'in-use')
-        self.addCleanup(self.volumes_client.wait_for_volume_status,
+        waiters.wait_for_volume_status(self.volumes_client,
+                                       self.volume_origin['id'], 'in-use')
+        self.addCleanup(waiters.wait_for_volume_status, self.volumes_client,
                         self.volume_origin['id'], 'available')
         self.addCleanup(self.servers_client.detach_volume, server['id'],
                         self.volume_origin['id'])
@@ -171,7 +173,8 @@
         # NOTE(gfidente): size is required also when passing snapshot_id
         volume = self.volumes_client.create_volume(
             snapshot_id=snapshot['id'])['volume']
-        self.volumes_client.wait_for_volume_status(volume['id'], 'available')
+        waiters.wait_for_volume_status(self.volumes_client,
+                                       volume['id'], 'available')
         self.volumes_client.delete_volume(volume['id'])
         self.volumes_client.wait_for_resource_deletion(volume['id'])
         self.cleanup_snapshot(snapshot)
diff --git a/tempest/clients.py b/tempest/clients.py
index 8931706..fc0cc89 100644
--- a/tempest/clients.py
+++ b/tempest/clients.py
@@ -87,6 +87,8 @@
 from tempest.lib.services.network.ports_client import PortsClient
 from tempest.lib.services.network.quotas_client import QuotasClient \
     as NetworkQuotasClient
+from tempest.lib.services.network.security_group_rules_client import \
+    SecurityGroupRulesClient
 from tempest.lib.services.network.security_groups_client import \
     SecurityGroupsClient
 from tempest.lib.services.network.subnetpools_client import SubnetpoolsClient
@@ -132,8 +134,6 @@
 from tempest.services.image.v2.json.images_client import ImagesClientV2
 from tempest.services.network.json.network_client import NetworkClient
 from tempest.services.network.json.routers_client import RoutersClient
-from tempest.services.network.json.security_group_rules_client import \
-    SecurityGroupRulesClient
 from tempest.services.object_storage.account_client import AccountClient
 from tempest.services.object_storage.container_client import ContainerClient
 from tempest.services.object_storage.object_client import ObjectClient
diff --git a/tempest/cmd/cleanup_service.py b/tempest/cmd/cleanup_service.py
index a0676b6..99933dd 100644
--- a/tempest/cmd/cleanup_service.py
+++ b/tempest/cmd/cleanup_service.py
@@ -462,12 +462,13 @@
 
     def delete(self):
         client = self.routers_client
+        ports_client = self.ports_client
         routers = self.list()
         for router in routers:
             try:
                 rid = router['id']
                 ports = [port for port
-                         in client.list_router_interfaces(rid)['ports']
+                         in ports_client.list_ports(device_id=rid)['ports']
                          if port["device_owner"] == "network:router_interface"]
                 for port in ports:
                     client.remove_router_interface(rid, port_id=port['id'])
diff --git a/tempest/cmd/javelin.py b/tempest/cmd/javelin.py
index 057c227..e3c1c64 100755
--- a/tempest/cmd/javelin.py
+++ b/tempest/cmd/javelin.py
@@ -127,6 +127,7 @@
 from tempest.lib.services.compute import security_group_rules_client
 from tempest.lib.services.compute import security_groups_client
 from tempest.lib.services.compute import servers_client
+from tempest.lib.services.network import ports_client
 from tempest.lib.services.network import subnets_client
 from tempest.services.identity.v2.json import identity_client
 from tempest.services.identity.v2.json import roles_client
@@ -271,6 +272,14 @@
             build_interval=CONF.network.build_interval,
             build_timeout=CONF.network.build_timeout,
             **default_params)
+        self.ports = ports_client.PortsClient(
+            _auth,
+            CONF.network.catalog_type,
+            CONF.network.region or CONF.identity.region,
+            endpoint_type=CONF.network.endpoint_type,
+            build_interval=CONF.network.build_interval,
+            build_timeout=CONF.network.build_timeout,
+            **default_params)
         self.routers = routers_client.RoutersClient(
             _auth,
             CONF.network.catalog_type,
@@ -751,7 +760,7 @@
     n_body = client.routers.list_routers()
     for router in n_body['routers']:
         router_id = router['id']
-        r_body = client.networks.list_router_interfaces(router_id)
+        r_body = client.ports.list_ports(device_id=router_id)
         for port in r_body['ports']:
             if port['network_id'] == network_id:
                 return "qrouter-%s" % router_id
@@ -1022,7 +1031,7 @@
         v_name = volume['name']
         body = client.volumes.create_volume(size=size,
                                             display_name=v_name)['volume']
-        client.volumes.wait_for_volume_status(body['id'], 'available')
+        waiters.wait_for_volume_status(client.volumes, body['id'], 'available')
 
 
 def destroy_volumes(volumes):
diff --git a/tempest/cmd/run_stress.py b/tempest/cmd/run_stress.py
old mode 100644
new mode 100755
index 6fe3928..9c8552f
--- a/tempest/cmd/run_stress.py
+++ b/tempest/cmd/run_stress.py
@@ -23,6 +23,7 @@
     # unittest in python 2.6 does not contain loader, so uses unittest2
     from unittest2 import loader
 import traceback
+import warnings
 
 from cliff import command
 from oslo_log import log as logging
@@ -74,7 +75,17 @@
 
 class TempestRunStress(command.Command):
 
+    @staticmethod
+    def display_deprecation_warning():
+        warnings.simplefilter('once', category=DeprecationWarning)
+        warnings.warn(
+            'Stress tests are deprecated and will be removed from Tempest '
+            'in the Newton release.',
+            DeprecationWarning)
+        warnings.resetwarnings()
+
     def get_parser(self, prog_name):
+        self.display_deprecation_warning()
         pa = super(TempestRunStress, self).get_parser(prog_name)
         pa = add_arguments(pa)
         return pa
@@ -146,6 +157,7 @@
 
 
 def main():
+    TempestRunStress.display_deprecation_warning()
     parser = argparse.ArgumentParser(description='Run stress tests')
     pa = add_arguments(parser)
     ns = pa.parse_args()
diff --git a/tempest/cmd/verify_tempest_config.py b/tempest/cmd/verify_tempest_config.py
index 9049886..0ba322d 100644
--- a/tempest/cmd/verify_tempest_config.py
+++ b/tempest/cmd/verify_tempest_config.py
@@ -97,7 +97,14 @@
                              ca_certs=ca_certs)
     __, body = raw_http.request(endpoint, 'GET')
     client_dict[service].reset_path()
-    body = json.loads(body)
+    try:
+        body = json.loads(body)
+    except ValueError:
+        LOG.error(
+            'Failed to get a JSON response from unversioned endpoint %s '
+            '(versioned endpoint was %s). Response is:\n%s',
+            endpoint, client_dict[service].base_url, body[:100])
+        raise
     if service == 'keystone':
         versions = map(lambda x: x['id'], body['versions']['values'])
     else:
diff --git a/tempest/common/compute.py b/tempest/common/compute.py
index 2d2909a..b5c4547 100644
--- a/tempest/common/compute.py
+++ b/tempest/common/compute.py
@@ -98,8 +98,8 @@
         volume = volumes_client.create_volume(
             display_name=volume_name,
             imageRef=image_id)
-        volumes_client.wait_for_volume_status(volume['volume']['id'],
-                                              'available')
+        waiters.wait_for_volume_status(volumes_client,
+                                       volume['volume']['id'], 'available')
 
         bd_map_v2 = [{
             'uuid': volume['volume']['id'],
diff --git a/tempest/common/glance_http.py b/tempest/common/glance_http.py
index baf796d..00062de 100644
--- a/tempest/common/glance_http.py
+++ b/tempest/common/glance_http.py
@@ -306,13 +306,32 @@
 
     def connect(self):
         """Connect to SSL port and apply per-connection parameters."""
-        sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
-        if self.timeout is not None:
-            # '0' microseconds
-            sock.setsockopt(socket.SOL_SOCKET, socket.SO_RCVTIMEO,
-                            struct.pack('LL', self.timeout, 0))
-        self.sock = OpenSSLConnectionDelegator(self.context, sock)
-        self.sock.connect((self.host, self.port))
+        try:
+            addresses = socket.getaddrinfo(self.host,
+                                           self.port,
+                                           socket.AF_UNSPEC,
+                                           socket.SOCK_STREAM)
+        except OSError as msg:
+            raise exc.RestClientException(msg)
+        for res in addresses:
+            af, socktype, proto, canonname, sa = res
+            sock = socket.socket(af, socket.SOCK_STREAM)
+
+            if self.timeout is not None:
+                # '0' microseconds
+                sock.setsockopt(socket.SOL_SOCKET, socket.SO_RCVTIMEO,
+                                struct.pack('LL', self.timeout, 0))
+            self.sock = OpenSSLConnectionDelegator(self.context, sock)
+            try:
+                self.sock.connect(sa)
+            except OSError as msg:
+                if self.sock:
+                    self.sock = None
+                    continue
+            break
+        if self.sock is None:
+            # Happen only when all results have failed.
+            raise exc.RestClientException('Cannot connect to %s' % self.host)
 
     def close(self):
         if self.sock:
diff --git a/tempest/common/validation_resources.py b/tempest/common/validation_resources.py
index 84d0143..c3c9a41 100644
--- a/tempest/common/validation_resources.py
+++ b/tempest/common/validation_resources.py
@@ -59,7 +59,9 @@
                 create_ssh_security_group(os, add_rule)
         if validation_resources['floating_ip']:
             floating_client = os.compute_floating_ips_client
-            validation_data.update(floating_client.create_floating_ip())
+            validation_data.update(
+                floating_client.create_floating_ip(
+                    pool=CONF.network.floating_network_name))
     return validation_data
 
 
diff --git a/tempest/hacking/checks.py b/tempest/hacking/checks.py
index c666c96..f4b76e5 100644
--- a/tempest/hacking/checks.py
+++ b/tempest/hacking/checks.py
@@ -225,6 +225,27 @@
         yield (0, msg)
 
 
+def dont_import_local_tempest_into_lib(logical_line, filename):
+    """Check that tempest.lib should not import local tempest code
+
+    T112
+    """
+    if 'tempest/lib/' not in filename:
+        return
+
+    if not ('from tempest' in logical_line
+            or 'import tempest' in logical_line):
+        return
+
+    if ('from tempest.lib' in logical_line
+        or 'import tempest.lib' in logical_line):
+        return
+
+    msg = ("T112: tempest.lib should not import local tempest code to avoid "
+           "circular dependency")
+    yield (0, msg)
+
+
 def factory(register):
     register(import_no_clients_in_api_and_scenario_tests)
     register(scenario_tests_need_service_tags)
@@ -236,3 +257,4 @@
     register(no_testtools_skip_decorator)
     register(get_resources_on_service_clients)
     register(delete_resources_on_service_clients)
+    register(dont_import_local_tempest_into_lib)
diff --git a/tempest/lib/common/rest_client.py b/tempest/lib/common/rest_client.py
index 7ce05e3..7d2eda0 100644
--- a/tempest/lib/common/rest_client.py
+++ b/tempest/lib/common/rest_client.py
@@ -548,7 +548,7 @@
         :param str method: The HTTP verb to use for the request
         :param str headers: Headers to use for the request if none are specifed
                             the headers
-        :param str body: Body to to send with the request
+        :param str body: Body to send with the request
         :rtype: tuple
         :return: a tuple with the first entry containing the response headers
                  and the second the response body
@@ -584,7 +584,7 @@
                              specifed the headers returned from the
                              get_headers() method are used. If the request
                              explicitly requires no headers use an empty dict.
-        :param str body: Body to to send with the request
+        :param str body: Body to send with the request
         :rtype: tuple
         :return: a tuple with the first entry containing the response headers
                  and the second the response body
diff --git a/tempest/lib/services/network/networks_client.py b/tempest/lib/services/network/networks_client.py
index 0926634..24c2ec5 100644
--- a/tempest/lib/services/network/networks_client.py
+++ b/tempest/lib/services/network/networks_client.py
@@ -45,3 +45,7 @@
         """
         uri = '/networks'
         return self.create_resource(uri, kwargs)
+
+    def list_dhcp_agents_on_hosting_network(self, network_id):
+        uri = '/networks/%s/dhcp-agents' % network_id
+        return self.list_resources(uri)
diff --git a/tempest/scenario/manager.py b/tempest/scenario/manager.py
index 8ba5f9a..1217dc9 100644
--- a/tempest/scenario/manager.py
+++ b/tempest/scenario/manager.py
@@ -283,7 +283,8 @@
             self.assertEqual(name, volume['display_name'])
         else:
             self.assertEqual(name, volume['name'])
-        self.volumes_client.wait_for_volume_status(volume['id'], 'available')
+        waiters.wait_for_volume_status(self.volumes_client,
+                                       volume['id'], 'available')
         # The volume retrieved on creation has a non-up-to-date status.
         # Retrieval after it becomes active ensures correct details.
         volume = self.volumes_client.show_volume(volume['id'])['volume']
@@ -478,8 +479,8 @@
                 self.addCleanup(
                     self.delete_wrapper, self.snapshots_client.delete_snapshot,
                     snapshot_id)
-                self.snapshots_client.wait_for_snapshot_status(snapshot_id,
-                                                               'available')
+                waiters.wait_for_snapshot_status(self.snapshots_client,
+                                                 snapshot_id, 'available')
 
         image_name = snapshot_image['name']
         self.assertEqual(name, image_name)
@@ -492,14 +493,16 @@
             server['id'], volumeId=volume_to_attach['id'], device='/dev/%s'
             % CONF.compute.volume_device_name)['volumeAttachment']
         self.assertEqual(volume_to_attach['id'], volume['id'])
-        self.volumes_client.wait_for_volume_status(volume['id'], 'in-use')
+        waiters.wait_for_volume_status(self.volumes_client,
+                                       volume['id'], 'in-use')
 
         # Return the updated volume after the attachment
         return self.volumes_client.show_volume(volume['id'])['volume']
 
     def nova_volume_detach(self, server, volume):
         self.servers_client.detach_volume(server['id'], volume['id'])
-        self.volumes_client.wait_for_volume_status(volume['id'], 'available')
+        waiters.wait_for_volume_status(self.volumes_client,
+                                       volume['id'], 'available')
 
         volume = self.volumes_client.show_volume(volume['id'])['volume']
         self.assertEqual('available', volume['status'])
@@ -600,6 +603,8 @@
     def create_floating_ip(self, thing, pool_name=None):
         """Create a floating IP and associates to a server on Nova"""
 
+        if not pool_name:
+            pool_name = CONF.network.floating_network_name
         floating_ip = (self.compute_floating_ips_client.
                        create_floating_ip(pool=pool_name)['floating_ip'])
         self.addCleanup(self.delete_wrapper,
diff --git a/tempest/scenario/test_server_basic_ops.py b/tempest/scenario/test_server_basic_ops.py
index dcb095b..a9f2dff 100644
--- a/tempest/scenario/test_server_basic_ops.py
+++ b/tempest/scenario/test_server_basic_ops.py
@@ -119,7 +119,7 @@
     @test.idempotent_id('7fff3fb3-91d8-4fd0-bd7d-0204f1f180ba')
     @test.attr(type='smoke')
     @test.services('compute', 'network')
-    def test_server_basicops(self):
+    def test_server_basic_ops(self):
         keypair = self.create_keypair()
         self.security_group = self._create_security_group()
         security_groups = [{'name': self.security_group['name']}]
diff --git a/tempest/scenario/test_stamp_pattern.py b/tempest/scenario/test_stamp_pattern.py
index 6121a90..e7223c7 100644
--- a/tempest/scenario/test_stamp_pattern.py
+++ b/tempest/scenario/test_stamp_pattern.py
@@ -19,6 +19,7 @@
 import testtools
 
 from tempest.common.utils import data_utils
+from tempest.common import waiters
 from tempest import config
 from tempest import exceptions
 from tempest.lib import decorators
@@ -67,14 +68,15 @@
             self.snapshots_client.delete_snapshot(snapshot['id'])
             try:
                 while self.snapshots_client.show_snapshot(
-                    snapshot['id'])['snapshot']:
+                        snapshot['id'])['snapshot']:
                     time.sleep(1)
             except lib_exc.NotFound:
                 pass
         self.addCleanup(cleaner)
-        self.volumes_client.wait_for_volume_status(volume['id'], 'available')
-        self.snapshots_client.wait_for_snapshot_status(snapshot['id'],
-                                                       'available')
+        waiters.wait_for_volume_status(self.volumes_client,
+                                       volume['id'], 'available')
+        waiters.wait_for_snapshot_status(self.snapshots_client,
+                                         snapshot['id'], 'available')
         self.assertEqual(snapshot_name, snapshot['display_name'])
         return snapshot
 
diff --git a/tempest/scenario/test_volume_boot_pattern.py b/tempest/scenario/test_volume_boot_pattern.py
index 71bb50e..25d825a 100644
--- a/tempest/scenario/test_volume_boot_pattern.py
+++ b/tempest/scenario/test_volume_boot_pattern.py
@@ -87,7 +87,8 @@
         self.addCleanup(
             self.snapshots_client.wait_for_resource_deletion, snap['id'])
         self.addCleanup(self.snapshots_client.delete_snapshot, snap['id'])
-        self.snapshots_client.wait_for_snapshot_status(snap['id'], 'available')
+        waiters.wait_for_snapshot_status(self.snapshots_client,
+                                         snap['id'], 'available')
 
         # NOTE(e0ne): Cinder API v2 uses name instead of display_name
         if 'display_name' in snap:
diff --git a/tempest/services/network/json/network_client.py b/tempest/services/network/json/network_client.py
index bcef36b..5080657 100644
--- a/tempest/services/network/json/network_client.py
+++ b/tempest/services/network/json/network_client.py
@@ -13,7 +13,6 @@
 import time
 
 from tempest import exceptions
-from tempest.lib.common.utils import misc
 from tempest.lib import exceptions as lib_exc
 from tempest.lib.services.network import base
 
@@ -55,48 +54,3 @@
         except lib_exc.NotFound:
             return True
         return False
-
-    def wait_for_resource_status(self, fetch, status, interval=None,
-                                 timeout=None):
-        """Waits for a network resource to reach a status
-
-        @param fetch: the callable to be used to query the resource status
-        @type fecth: callable that takes no parameters and returns the resource
-        @param status: the status that the resource has to reach
-        @type status: String
-        @param interval: the number of seconds to wait between each status
-          query
-        @type interval: Integer
-        @param timeout: the maximum number of seconds to wait for the resource
-          to reach the desired status
-        @type timeout: Integer
-        """
-        if not interval:
-            interval = self.build_interval
-        if not timeout:
-            timeout = self.build_timeout
-        start_time = time.time()
-
-        while time.time() - start_time <= timeout:
-            resource = fetch()
-            if resource['status'] == status:
-                return
-            time.sleep(interval)
-
-        # At this point, the wait has timed out
-        message = 'Resource %s' % (str(resource))
-        message += ' failed to reach status %s' % status
-        message += ' (current: %s)' % resource['status']
-        message += ' within the required time %s' % timeout
-        caller = misc.find_test_caller()
-        if caller:
-            message = '(%s) %s' % (caller, message)
-        raise exceptions.TimeoutException(message)
-
-    def list_router_interfaces(self, uuid):
-        uri = '/ports?device_id=%s' % uuid
-        return self.list_resources(uri)
-
-    def list_dhcp_agent_hosting_network(self, network_id):
-        uri = '/networks/%s/dhcp-agents' % network_id
-        return self.list_resources(uri)
diff --git a/tempest/services/network/json/security_group_rules_client.py b/tempest/services/network/json/security_group_rules_client.py
deleted file mode 100644
index 944eba6..0000000
--- a/tempest/services/network/json/security_group_rules_client.py
+++ /dev/null
@@ -1,33 +0,0 @@
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from tempest.lib.services.network import base
-
-
-class SecurityGroupRulesClient(base.BaseNetworkClient):
-
-    def create_security_group_rule(self, **kwargs):
-        uri = '/security-group-rules'
-        post_data = {'security_group_rule': kwargs}
-        return self.create_resource(uri, post_data)
-
-    def show_security_group_rule(self, security_group_rule_id, **fields):
-        uri = '/security-group-rules/%s' % security_group_rule_id
-        return self.show_resource(uri, **fields)
-
-    def delete_security_group_rule(self, security_group_rule_id):
-        uri = '/security-group-rules/%s' % security_group_rule_id
-        return self.delete_resource(uri)
-
-    def list_security_group_rules(self, **filters):
-        uri = '/security-group-rules'
-        return self.list_resources(uri, **filters)
diff --git a/tempest/services/network/resources.py b/tempest/services/network/resources.py
index 5512075..e78fcfe 100644
--- a/tempest/services/network/resources.py
+++ b/tempest/services/network/resources.py
@@ -14,9 +14,13 @@
 #    under the License.
 
 import abc
+import time
 
 import six
 
+from tempest import exceptions
+from tempest.lib.common.utils import misc
+
 
 class AttributeDict(dict):
     """Provide attribute access (dict.key) to dictionary values."""
@@ -67,7 +71,35 @@
             self.refresh()
             return self
 
-        return self.client.wait_for_resource_status(helper_get, status)
+        return self.wait_for_resource_status(helper_get, status)
+
+    def wait_for_resource_status(self, fetch, status):
+        """Waits for a network resource to reach a status
+
+        @param fetch: the callable to be used to query the resource status
+        @type fecth: callable that takes no parameters and returns the resource
+        @param status: the status that the resource has to reach
+        @type status: String
+        """
+        interval = self.build_interval
+        timeout = self.build_timeout
+        start_time = time.time()
+
+        while time.time() - start_time <= timeout:
+            resource = fetch()
+            if resource['status'] == status:
+                return
+            time.sleep(interval)
+
+        # At this point, the wait has timed out
+        message = 'Resource %s' % (str(resource))
+        message += ' failed to reach status %s' % status
+        message += ' (current: %s)' % resource['status']
+        message += ' within the required time %s' % timeout
+        caller = misc.find_test_caller()
+        if caller:
+            message = '(%s) %s' % (caller, message)
+        raise exceptions.TimeoutException(message)
 
 
 class DeletableNetwork(DeletableResource):
diff --git a/tempest/services/object_storage/object_client.py b/tempest/services/object_storage/object_client.py
index 9ad8c27..78bda5d 100644
--- a/tempest/services/object_storage/object_client.py
+++ b/tempest/services/object_storage/object_client.py
@@ -210,7 +210,7 @@
     :param contents: a string or a file like object to read object data
                      from; if None, a zero-byte put will be done
     :param chunk_size: chunk size of data to write; it defaults to 65536;
-                       used only if the the contents object has a 'read'
+                       used only if the contents object has a 'read'
                        method, eg. file-like objects, ignored otherwise
     :param headers: additional headers to include in the request, if any
     :param query_string: if set will be appended with '?' to generated path
diff --git a/tempest/services/orchestration/json/orchestration_client.py b/tempest/services/orchestration/json/orchestration_client.py
index 6019cf5..ea5dbe5 100644
--- a/tempest/services/orchestration/json/orchestration_client.py
+++ b/tempest/services/orchestration/json/orchestration_client.py
@@ -151,42 +151,6 @@
         self.expected_success(204, resp.status)
         return rest_client.ResponseBody(resp)
 
-    def wait_for_resource_status(self, stack_identifier, resource_name,
-                                 status, failure_pattern='^.*_FAILED$'):
-        """Waits for a Resource to reach a given status."""
-        start = int(time.time())
-        fail_regexp = re.compile(failure_pattern)
-
-        while True:
-            try:
-                body = self.show_resource(
-                    stack_identifier, resource_name)['resource']
-            except lib_exc.NotFound:
-                # ignore this, as the resource may not have
-                # been created yet
-                pass
-            else:
-                resource_name = body['resource_name']
-                resource_status = body['resource_status']
-                if resource_status == status:
-                    return
-                if fail_regexp.search(resource_status):
-                    raise exceptions.StackResourceBuildErrorException(
-                        resource_name=resource_name,
-                        stack_identifier=stack_identifier,
-                        resource_status=resource_status,
-                        resource_status_reason=body['resource_status_reason'])
-
-            if int(time.time()) - start >= self.build_timeout:
-                message = ('Resource %s failed to reach %s status '
-                           '(current %s) within the required time (%s s).' %
-                           (resource_name,
-                            status,
-                            resource_status,
-                            self.build_timeout))
-                raise exceptions.TimeoutException(message)
-            time.sleep(self.build_interval)
-
     def wait_for_stack_status(self, stack_identifier, status,
                               failure_pattern='^.*_FAILED$'):
         """Waits for a Stack to reach a given status."""
diff --git a/tempest/services/volume/base/base_snapshots_client.py b/tempest/services/volume/base/base_snapshots_client.py
index 5e5637a..68503dd 100644
--- a/tempest/services/volume/base/base_snapshots_client.py
+++ b/tempest/services/volume/base/base_snapshots_client.py
@@ -10,13 +10,10 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
-import time
-
 from oslo_log import log as logging
 from oslo_serialization import jsonutils as json
 from six.moves.urllib import parse as urllib
 
-from tempest import exceptions
 from tempest.lib.common import rest_client
 from tempest.lib import exceptions as lib_exc
 
@@ -70,43 +67,6 @@
         self.expected_success(200, resp.status)
         return rest_client.ResponseBody(resp, body)
 
-    # NOTE(afazekas): just for the wait function
-    def _get_snapshot_status(self, snapshot_id):
-        body = self.show_snapshot(snapshot_id)['snapshot']
-        status = body['status']
-        # NOTE(afazekas): snapshot can reach an "error"
-        # state in a "normal" lifecycle
-        if (status == 'error'):
-            raise exceptions.SnapshotBuildErrorException(
-                snapshot_id=snapshot_id)
-
-        return status
-
-    # NOTE(afazkas): Wait reinvented again. It is not in the correct layer
-    def wait_for_snapshot_status(self, snapshot_id, status):
-        """Waits for a Snapshot to reach a given status."""
-        start_time = time.time()
-        old_value = value = self._get_snapshot_status(snapshot_id)
-        while True:
-            dtime = time.time() - start_time
-            time.sleep(self.build_interval)
-            if value != old_value:
-                LOG.info('Value transition from "%s" to "%s"'
-                         'in %d second(s).', old_value,
-                         value, dtime)
-            if (value == status):
-                return value
-
-            if dtime > self.build_timeout:
-                message = ('Time Limit Exceeded! (%ds)'
-                           'while waiting for %s, '
-                           'but we got %s.' %
-                           (self.build_timeout, status, value))
-                raise exceptions.TimeoutException(message)
-            time.sleep(self.build_interval)
-            old_value = value
-            value = self._get_snapshot_status(snapshot_id)
-
     def delete_snapshot(self, snapshot_id):
         """Delete Snapshot."""
         resp, body = self.delete("snapshots/%s" % str(snapshot_id))
diff --git a/tempest/services/volume/base/base_volumes_client.py b/tempest/services/volume/base/base_volumes_client.py
index f638bb6..4344802 100644
--- a/tempest/services/volume/base/base_volumes_client.py
+++ b/tempest/services/volume/base/base_volumes_client.py
@@ -17,7 +17,6 @@
 import six
 from six.moves.urllib import parse as urllib
 
-from tempest.common import waiters
 from tempest.lib.common import rest_client
 from tempest.lib import exceptions as lib_exc
 
@@ -148,10 +147,6 @@
         self.expected_success(202, resp.status)
         return rest_client.ResponseBody(resp, body)
 
-    def wait_for_volume_status(self, volume_id, status):
-        """Waits for a Volume to reach a given status."""
-        waiters.wait_for_volume_status(self, volume_id, status)
-
     def is_resource_deleted(self, id):
         try:
             self.show_volume(id)
diff --git a/tempest/stress/__init__.py b/tempest/stress/__init__.py
index 987a023..e69de29 100644
--- a/tempest/stress/__init__.py
+++ b/tempest/stress/__init__.py
@@ -1,22 +0,0 @@
-# Copyright 2016 NEC Corporation.  All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import warnings
-
-warnings.simplefilter('once', category=DeprecationWarning)
-warnings.warn(
-    'Stress tests are deprecated and will be removed from Tempest '
-    'in the Newton release.',
-    DeprecationWarning)
-warnings.resetwarnings()
diff --git a/tempest/stress/cleanup.py b/tempest/stress/cleanup.py
index 1c1fb46..3b0a937 100644
--- a/tempest/stress/cleanup.py
+++ b/tempest/stress/cleanup.py
@@ -88,8 +88,8 @@
     LOG.info("Cleanup::remove %s snapshots" % len(snaps))
     for v in snaps:
         try:
-            admin_manager.snapshots_client.\
-                wait_for_snapshot_status(v['id'], 'available')
+            waiters.wait_for_snapshot_status(
+                admin_manager.snapshots_client, v['id'], 'available')
             admin_manager.snapshots_client.delete_snapshot(v['id'])
         except Exception:
             pass
@@ -105,8 +105,8 @@
     LOG.info("Cleanup::remove %s volumes" % len(vols))
     for v in vols:
         try:
-            admin_manager.volumes_client.\
-                wait_for_volume_status(v['id'], 'available')
+            waiters.wait_for_volume_status(
+                admin_manager.volumes_client, v['id'], 'available')
             admin_manager.volumes_client.delete_volume(v['id'])
         except Exception:
             pass
diff --git a/tempest/test.py b/tempest/test.py
index fe3c770..6ba4962 100644
--- a/tempest/test.py
+++ b/tempest/test.py
@@ -611,13 +611,25 @@
                 'dhcp': dhcp}
 
     @classmethod
-    def get_tenant_network(cls):
+    def get_tenant_network(cls, credentials_type='primary'):
         """Get the network to be used in testing
 
+        :param credentials_type: The type of credentials for which to get the
+                                 tenant network
+
         :return: network dict including 'id' and 'name'
         """
+        # Get a manager for the given credentials_type, but at least
+        # always fall back on getting the manager for primary credentials
+        if isinstance(credentials_type, six.string_types):
+            manager = cls.get_client_manager(credential_type=credentials_type)
+        elif isinstance(credentials_type, list):
+            manager = cls.get_client_manager(roles=credentials_type[1:])
+        else:
+            manager = cls.get_client_manager()
+
         # Make sure cred_provider exists and get a network client
-        networks_client = cls.get_client_manager().compute_networks_client
+        networks_client = manager.compute_networks_client
         cred_provider = cls._get_credentials_provider()
         # In case of nova network, isolated tenants are not able to list the
         # network configured in fixed_network_name, even if they can use it
diff --git a/tempest/tests/cmd/test_javelin.py b/tempest/tests/cmd/test_javelin.py
index 57cfe97..2d0256a 100644
--- a/tempest/tests/cmd/test_javelin.py
+++ b/tempest/tests/cmd/test_javelin.py
@@ -213,8 +213,8 @@
                                                 name=self.fake_object['name'],
                                                 ip_version=fake_version)
 
-    def test_create_volumes(self):
-
+    @mock.patch("tempest.common.waiters.wait_for_volume_status")
+    def test_create_volumes(self, mock_wait_for_volume_status):
         self.useFixture(mockpatch.PatchObject(javelin, "client_for_user",
                                               return_value=self.fake_client))
         self.useFixture(mockpatch.PatchObject(javelin, "_get_volume_by_name",
@@ -228,12 +228,12 @@
         mocked_function.assert_called_once_with(
             size=self.fake_object['gb'],
             display_name=self.fake_object['name'])
-        mocked_function = self.fake_client.volumes.wait_for_volume_status
-        mocked_function.assert_called_once_with(
-            self.fake_object.body['volume']['id'],
+        mock_wait_for_volume_status.assert_called_once_with(
+            self.fake_client.volumes, self.fake_object.body['volume']['id'],
             'available')
 
-    def test_create_volume_existing(self):
+    @mock.patch("tempest.common.waiters.wait_for_volume_status")
+    def test_create_volume_existing(self, mock_wait_for_volume_status):
         self.useFixture(mockpatch.PatchObject(javelin, "client_for_user",
                                               return_value=self.fake_client))
         self.useFixture(mockpatch.PatchObject(javelin, "_get_volume_by_name",
@@ -245,8 +245,7 @@
 
         mocked_function = self.fake_client.volumes.create_volume
         self.assertFalse(mocked_function.called)
-        mocked_function = self.fake_client.volumes.wait_for_volume_status
-        self.assertFalse(mocked_function.called)
+        self.assertFalse(mock_wait_for_volume_status.called)
 
     def test_create_router(self):
 
diff --git a/tempest/tests/cmd/test_verify_tempest_config.py b/tempest/tests/cmd/test_verify_tempest_config.py
index 193abc7..dc0ba6f 100644
--- a/tempest/tests/cmd/test_verify_tempest_config.py
+++ b/tempest/tests/cmd/test_verify_tempest_config.py
@@ -82,6 +82,28 @@
         self.assertIn('v2.0', versions)
         self.assertIn('v3.0', versions)
 
+    def test_get_versions_invalid_response(self):
+        # When the response doesn't contain a JSON response, an error is
+        # logged.
+        mock_log_error = self.useFixture(mockpatch.PatchObject(
+            verify_tempest_config.LOG, 'error')).mock
+
+        self.useFixture(mockpatch.PatchObject(
+            verify_tempest_config, '_get_unversioned_endpoint'))
+
+        # Simulated response is not JSON.
+        sample_body = (
+            '<html><head>Sample Response</head><body>This is the sample page '
+            'for the web server. Why are you requesting it?</body></html>')
+        self.useFixture(mockpatch.Patch('httplib2.Http.request',
+                                        return_value=(None, sample_body)))
+
+        # service value doesn't matter, just needs to match what
+        # _get_api_versions puts in its client_dict.
+        self.assertRaises(ValueError, verify_tempest_config._get_api_versions,
+                          os=mock.MagicMock(), service='keystone')
+        self.assertTrue(mock_log_error.called)
+
     def test_verify_api_versions(self):
         api_services = ['cinder', 'glance', 'keystone']
         fake_os = mock.MagicMock()
diff --git a/tempest/tests/lib/test_auth.py b/tempest/tests/lib/test_auth.py
index 6a01490..55f0c4e 100644
--- a/tempest/tests/lib/test_auth.py
+++ b/tempest/tests/lib/test_auth.py
@@ -251,7 +251,7 @@
         """Test empty alternate auth data with no effect
 
         Assert that when alt_part is defined, no auth_data is provided,
-        and the the corresponding original request element was not going to
+        and the corresponding original request element was not going to
         be changed anyways, and exception is raised
         """
         filters = {
diff --git a/tempest/tests/test_base_test.py b/tempest/tests/test_base_test.py
new file mode 100644
index 0000000..dc355b4
--- /dev/null
+++ b/tempest/tests/test_base_test.py
@@ -0,0 +1,110 @@
+# Copyright 2016 IBM Corp.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import mock
+
+from tempest import clients
+from tempest.common import credentials_factory as credentials
+from tempest.common import fixed_network
+from tempest import config
+from tempest import test
+from tempest.tests import base
+from tempest.tests import fake_config
+
+
+class TestBaseTestCase(base.TestCase):
+    def setUp(self):
+        super(TestBaseTestCase, self).setUp()
+        self.useFixture(fake_config.ConfigFixture())
+        self.fixed_network_name = 'fixed-net'
+        config.CONF.compute.fixed_network_name = self.fixed_network_name
+        config.CONF.service_available.neutron = True
+
+    @mock.patch.object(test.BaseTestCase, 'get_client_manager')
+    @mock.patch.object(test.BaseTestCase, '_get_credentials_provider')
+    @mock.patch.object(fixed_network, 'get_tenant_network')
+    def test_get_tenant_network(self, mock_gtn, mock_gprov, mock_gcm):
+        net_client = mock.Mock()
+        mock_prov = mock.Mock()
+        mock_gcm.return_value.compute_networks_client = net_client
+        mock_gprov.return_value = mock_prov
+
+        test.BaseTestCase.get_tenant_network()
+
+        mock_gcm.assert_called_once_with(credential_type='primary')
+        mock_gprov.assert_called_once_with()
+        mock_gtn.assert_called_once_with(mock_prov, net_client,
+                                         self.fixed_network_name)
+
+    @mock.patch.object(test.BaseTestCase, 'get_client_manager')
+    @mock.patch.object(test.BaseTestCase, '_get_credentials_provider')
+    @mock.patch.object(fixed_network, 'get_tenant_network')
+    @mock.patch.object(test.BaseTestCase, 'get_identity_version')
+    @mock.patch.object(credentials, 'is_admin_available')
+    @mock.patch.object(clients, 'Manager')
+    def test_get_tenant_network_with_nova_net(self, mock_man, mock_iaa,
+                                              mock_giv, mock_gtn, mock_gcp,
+                                              mock_gcm):
+        config.CONF.service_available.neutron = False
+        mock_prov = mock.Mock()
+        mock_admin_man = mock.Mock()
+        mock_iaa.return_value = True
+        mock_gcp.return_value = mock_prov
+        mock_man.return_value = mock_admin_man
+
+        test.BaseTestCase.get_tenant_network()
+
+        mock_man.assert_called_once_with(
+            mock_prov.get_admin_creds.return_value)
+        mock_iaa.assert_called_once_with(
+            identity_version=mock_giv.return_value)
+        mock_gcp.assert_called_once_with()
+        mock_gtn.assert_called_once_with(
+            mock_prov, mock_admin_man.compute_networks_client,
+            self.fixed_network_name)
+
+    @mock.patch.object(test.BaseTestCase, 'get_client_manager')
+    @mock.patch.object(test.BaseTestCase, '_get_credentials_provider')
+    @mock.patch.object(fixed_network, 'get_tenant_network')
+    def test_get_tenant_network_with_alt_creds(self, mock_gtn, mock_gprov,
+                                               mock_gcm):
+        net_client = mock.Mock()
+        mock_prov = mock.Mock()
+        mock_gcm.return_value.compute_networks_client = net_client
+        mock_gprov.return_value = mock_prov
+
+        test.BaseTestCase.get_tenant_network(credentials_type='alt')
+
+        mock_gcm.assert_called_once_with(credential_type='alt')
+        mock_gprov.assert_called_once_with()
+        mock_gtn.assert_called_once_with(mock_prov, net_client,
+                                         self.fixed_network_name)
+
+    @mock.patch.object(test.BaseTestCase, 'get_client_manager')
+    @mock.patch.object(test.BaseTestCase, '_get_credentials_provider')
+    @mock.patch.object(fixed_network, 'get_tenant_network')
+    def test_get_tenant_network_with_role_creds(self, mock_gtn, mock_gprov,
+                                                mock_gcm):
+        net_client = mock.Mock()
+        mock_prov = mock.Mock()
+        mock_gcm.return_value.compute_networks_client = net_client
+        mock_gprov.return_value = mock_prov
+        creds = ['foo_type', 'role1']
+
+        test.BaseTestCase.get_tenant_network(credentials_type=creds)
+
+        mock_gcm.assert_called_once_with(roles=['role1'])
+        mock_gprov.assert_called_once_with()
+        mock_gtn.assert_called_once_with(mock_prov, net_client,
+                                         self.fixed_network_name)
diff --git a/tempest/tests/test_glance_http.py b/tempest/tests/test_glance_http.py
index db9db34..1811141 100644
--- a/tempest/tests/test_glance_http.py
+++ b/tempest/tests/test_glance_http.py
@@ -13,6 +13,8 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
+import socket
+
 import mock
 from oslotest import mockpatch
 import six
@@ -101,6 +103,20 @@
         self.assertTrue(isinstance(self.client._get_connection(),
                                    glance_http.VerifiedHTTPSConnection))
 
+    def test_get_connection_ipv4_https(self):
+        endpoint = 'https://127.0.0.1'
+        self.fake_auth.base_url = mock.MagicMock(return_value=endpoint)
+        self.client = glance_http.HTTPClient(self.fake_auth, {})
+        self.assertTrue(isinstance(self.client._get_connection(),
+                                   glance_http.VerifiedHTTPSConnection))
+
+    def test_get_connection_ipv6_https(self):
+        endpoint = 'https://[::1]'
+        self.fake_auth.base_url = mock.MagicMock(return_value=endpoint)
+        self.client = glance_http.HTTPClient(self.fake_auth, {})
+        self.assertTrue(isinstance(self.client._get_connection(),
+                                   glance_http.VerifiedHTTPSConnection))
+
     def test_get_connection_url_not_fount(self):
         self.useFixture(mockpatch.PatchObject(self.client, 'connection_class',
                                               side_effect=httplib.InvalidURL()
@@ -146,6 +162,64 @@
         self.assertEqual(6, len(kwargs.keys()))
 
 
+class TestVerifiedHTTPSConnection(base.TestCase):
+
+    @mock.patch('socket.socket')
+    @mock.patch('tempest.common.glance_http.OpenSSLConnectionDelegator')
+    def test_connect_ipv4(self, mock_delegator, mock_socket):
+        connection = glance_http.VerifiedHTTPSConnection('127.0.0.1')
+        connection.connect()
+
+        mock_socket.assert_called_once_with(socket.AF_INET, socket.SOCK_STREAM)
+        mock_delegator.assert_called_once_with(connection.context,
+                                               mock_socket.return_value)
+        mock_delegator.return_value.connect.assert_called_once_with(
+            (connection.host, 443))
+
+    @mock.patch('socket.socket')
+    @mock.patch('tempest.common.glance_http.OpenSSLConnectionDelegator')
+    def test_connect_ipv6(self, mock_delegator, mock_socket):
+        connection = glance_http.VerifiedHTTPSConnection('[::1]')
+        connection.connect()
+
+        mock_socket.assert_called_once_with(socket.AF_INET6,
+                                            socket.SOCK_STREAM)
+        mock_delegator.assert_called_once_with(connection.context,
+                                               mock_socket.return_value)
+        mock_delegator.return_value.connect.assert_called_once_with(
+            (connection.host, 443, 0, 0))
+
+    @mock.patch('tempest.common.glance_http.OpenSSLConnectionDelegator')
+    @mock.patch('socket.getaddrinfo',
+                side_effect=OSError('Gettaddrinfo failed'))
+    def test_connect_with_address_lookup_failure(self, mock_getaddrinfo,
+                                                 mock_delegator):
+        connection = glance_http.VerifiedHTTPSConnection('127.0.0.1')
+        self.assertRaises(exceptions.RestClientException, connection.connect)
+
+        mock_getaddrinfo.assert_called_once_with(
+            connection.host, connection.port, 0, socket.SOCK_STREAM)
+
+    @mock.patch('socket.socket')
+    @mock.patch('socket.getaddrinfo',
+                return_value=[(2, 1, 6, '', ('127.0.0.1', 443))])
+    @mock.patch('tempest.common.glance_http.OpenSSLConnectionDelegator')
+    def test_connect_with_socket_failure(self, mock_delegator,
+                                         mock_getaddrinfo,
+                                         mock_socket):
+        mock_delegator.return_value.connect.side_effect = \
+            OSError('Connect failed')
+
+        connection = glance_http.VerifiedHTTPSConnection('127.0.0.1')
+        self.assertRaises(exceptions.RestClientException, connection.connect)
+
+        mock_getaddrinfo.assert_called_once_with(
+            connection.host, connection.port, 0, socket.SOCK_STREAM)
+        mock_socket.assert_called_once_with(socket.AF_INET, socket.SOCK_STREAM)
+        mock_delegator.return_value.connect.\
+            assert_called_once_with((connection.host, 443))
+
+
 class TestResponseBodyIterator(base.TestCase):
 
     def test_iter_default_chunk_size_64k(self):
diff --git a/tempest/tests/test_hacking.py b/tempest/tests/test_hacking.py
index 55f00ef..aba2aab 100644
--- a/tempest/tests/test_hacking.py
+++ b/tempest/tests/test_hacking.py
@@ -147,3 +147,23 @@
             " @testtools.skipUnless(CONF.something, 'msg')"))))
         self.assertEqual(0, len(list(checks.no_testtools_skip_decorator(
             " @testtools.skipIf(CONF.something, 'msg')"))))
+
+    def test_dont_import_local_tempest_code_into_lib(self):
+        self.assertEqual(0, len(list(checks.dont_import_local_tempest_into_lib(
+            "from tempest.common import waiters",
+            './tempest/common/compute.py'))))
+        self.assertEqual(0, len(list(checks.dont_import_local_tempest_into_lib(
+            "from tempest import config",
+            './tempest/common/compute.py'))))
+        self.assertEqual(0, len(list(checks.dont_import_local_tempest_into_lib(
+            "import tempest.exception",
+            './tempest/common/compute.py'))))
+        self.assertEqual(1, len(list(checks.dont_import_local_tempest_into_lib(
+            "from tempest.common import waiters",
+            './tempest/lib/common/compute.py'))))
+        self.assertEqual(1, len(list(checks.dont_import_local_tempest_into_lib(
+            "from tempest import config",
+            './tempest/lib/common/compute.py'))))
+        self.assertEqual(1, len(list(checks.dont_import_local_tempest_into_lib(
+            "import tempest.exception",
+            './tempest/lib/common/compute.py'))))