Merge "Split out Neutron security groups client"
diff --git a/README.rst b/README.rst
index 45cb4c0..71e185f 100644
--- a/README.rst
+++ b/README.rst
@@ -87,7 +87,7 @@
    be done with testr directly or any `testr`_ based test runner, like
    `ostestr`_. For example, from the working dir running::
 
-     $ ostestr --regex '(?!.*\[.*\bslow\b.*\])(^tempest\.(api|scenario|thirdparty))'
+     $ ostestr --regex '(?!.*\[.*\bslow\b.*\])(^tempest\.(api|scenario))'
 
    will run the same set of tests as the default gate jobs.
 
diff --git a/doc/source/configuration.rst b/doc/source/configuration.rst
index 1b2b6d2..a16f3b7 100644
--- a/doc/source/configuration.rst
+++ b/doc/source/configuration.rst
@@ -9,16 +9,6 @@
 config file which explains the purpose of each individual option. You can see
 the sample config file here: :ref:`tempest-sampleconf`
 
-Lock Path
----------
-
-There are some tests and operations inside of tempest that need to be
-externally locked when running in parallel to prevent them from running at
-the same time. This is a mandatory step for configuring tempest and is still
-needed even when running serially. All that is needed to do this is:
-
- #. Set the lock_path option in the oslo_concurrency group
-
 Auth/Credentials
 ----------------
 
diff --git a/doc/source/field_guide/thirdparty.rst b/doc/source/field_guide/thirdparty.rst
deleted file mode 120000
index 3fd6a51..0000000
--- a/doc/source/field_guide/thirdparty.rst
+++ /dev/null
@@ -1 +0,0 @@
-../../../tempest/thirdparty/README.rst
\ No newline at end of file
diff --git a/doc/source/index.rst b/doc/source/index.rst
index fe6074f..32e6e51 100644
--- a/doc/source/index.rst
+++ b/doc/source/index.rst
@@ -26,7 +26,6 @@
    field_guide/api
    field_guide/scenario
    field_guide/stress
-   field_guide/thirdparty
    field_guide/unit_tests
 
 ---------------------------
diff --git a/requirements.txt b/requirements.txt
index d470c30..1f225c3 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -7,7 +7,6 @@
 httplib2>=0.7.5
 jsonschema!=2.5.0,<3.0.0,>=2.0.0
 testtools>=1.4.0
-boto>=2.32.1
 paramiko>=1.13.0
 netaddr!=0.7.16,>=0.7.12
 testrepository>=0.0.18
@@ -22,6 +21,6 @@
 iso8601>=0.1.9
 fixtures>=1.3.1
 testscenarios>=0.4
-tempest-lib>=0.11.0
+tempest-lib>=0.12.0
 PyYAML>=3.1.0
 stevedore>=1.5.0 # Apache-2.0
diff --git a/run_tempest.sh b/run_tempest.sh
index a704684..8c8f25f 100755
--- a/run_tempest.sh
+++ b/run_tempest.sh
@@ -14,8 +14,6 @@
   echo "  -C, --config             Config file location"
   echo "  -h, --help               Print this usage message"
   echo "  -d, --debug              Run tests with testtools instead of testr. This allows you to use PDB"
-  echo "  -l, --logging            Enable logging"
-  echo "  -L, --logging-config     Logging config file location.  Default is etc/logging.conf"
   echo "  -- [TESTROPTIONS]        After the first '--' you can pass arbitrary arguments to testr "
 }
 
@@ -31,10 +29,8 @@
 wrapper=""
 config_file=""
 update=0
-logging=0
-logging_config=etc/logging.conf
 
-if ! options=$(getopt -o VNnfusthdC:lL: -l virtual-env,no-virtual-env,no-site-packages,force,update,smoke,serial,help,debug,config:,logging,logging-config: -- "$@")
+if ! options=$(getopt -o VNnfusthdC:lL: -l virtual-env,no-virtual-env,no-site-packages,force,update,smoke,serial,help,debug,config: -- "$@")
 then
     # parse error
     usage
@@ -55,8 +51,6 @@
     -C|--config) config_file=$2; shift;;
     -s|--smoke) testrargs+="smoke";;
     -t|--serial) serial=1;;
-    -l|--logging) logging=1;;
-    -L|--logging-config) logging_config=$2; shift;;
     --) [ "yes" == "$first_uu" ] || testrargs="$testrargs $1"; first_uu=no  ;;
     *) testrargs="$testrargs $1";;
   esac
@@ -69,16 +63,6 @@
     export TEMPEST_CONFIG=`basename "$config_file"`
 fi
 
-if [ $logging -eq 1 ]; then
-    if [ ! -f "$logging_config" ]; then
-        echo "No such logging config file: $logging_config"
-        exit 1
-    fi
-    logging_config=`readlink -f "$logging_config"`
-    export TEMPEST_LOG_CONFIG_DIR=`dirname "$logging_config"`
-    export TEMPEST_LOG_CONFIG=`basename "$logging_config"`
-fi
-
 cd `dirname "$0"`
 
 if [ $no_site_packages -eq 1 ]; then
diff --git a/tempest/README.rst b/tempest/README.rst
index f93a173..113b191 100644
--- a/tempest/README.rst
+++ b/tempest/README.rst
@@ -16,7 +16,6 @@
 |    api/ - API tests
 |    scenario/ - complex scenario tests
 |    stress/ - stress tests
-|    thirdparty/ - 3rd party api tests
 
 Each of these directories contains different types of tests. What
 belongs in each directory, the rules and examples for good tests, are
@@ -56,14 +55,6 @@
 several test jobs in parallel and can run any existing test in Tempest as a
 stress job.
 
-:ref:`third_party_field_guide`
-------------------------------
-
-Many openstack components include 3rdparty API support. It is
-completely legitimate for Tempest to include tests of 3rdparty APIs,
-but those should be kept separate from the normal OpenStack
-validation.
-
 :ref:`unit_tests_field_guide`
 -----------------------------
 
diff --git a/tempest/api/identity/admin/v3/test_endpoints.py b/tempest/api/identity/admin/v3/test_endpoints.py
index 429e2e3..2ac832e 100644
--- a/tempest/api/identity/admin/v3/test_endpoints.py
+++ b/tempest/api/identity/admin/v3/test_endpoints.py
@@ -34,8 +34,8 @@
         s_type = data_utils.rand_name('type')
         s_description = data_utils.rand_name('description')
         cls.service_data = (
-            cls.service_client.create_service(name=s_name, type=s_type,
-                                              description=s_description))
+            cls.services_client.create_service(name=s_name, type=s_type,
+                                               description=s_description))
         cls.service_data = cls.service_data['service']
         cls.service_id = cls.service_data['id']
         cls.service_ids.append(cls.service_id)
@@ -56,7 +56,7 @@
         for e in cls.setup_endpoints:
             cls.client.delete_endpoint(e['id'])
         for s in cls.service_ids:
-            cls.service_client.delete_service(s)
+            cls.services_client.delete_service(s)
         super(EndPointsTestJSON, cls).resource_cleanup()
 
     @test.idempotent_id('c19ecf90-240e-4e23-9966-21cee3f6a618')
@@ -113,8 +113,8 @@
         s_type = data_utils.rand_name('type')
         s_description = data_utils.rand_name('description')
         service2 = (
-            self.service_client.create_service(name=s_name, type=s_type,
-                                               description=s_description))
+            self.services_client.create_service(name=s_name, type=s_type,
+                                                description=s_description))
         service2 = service2['service']
         self.service_ids.append(service2['id'])
         # Updating endpoint with new values
diff --git a/tempest/api/identity/admin/v3/test_endpoints_negative.py b/tempest/api/identity/admin/v3/test_endpoints_negative.py
index 8f9bf2a..372254f 100644
--- a/tempest/api/identity/admin/v3/test_endpoints_negative.py
+++ b/tempest/api/identity/admin/v3/test_endpoints_negative.py
@@ -37,8 +37,8 @@
         s_type = data_utils.rand_name('type')
         s_description = data_utils.rand_name('description')
         cls.service_data = (
-            cls.service_client.create_service(name=s_name, type=s_type,
-                                              description=s_description)
+            cls.services_client.create_service(name=s_name, type=s_type,
+                                               description=s_description)
             ['service'])
         cls.service_id = cls.service_data['id']
         cls.service_ids.append(cls.service_id)
@@ -46,7 +46,7 @@
     @classmethod
     def resource_cleanup(cls):
         for s in cls.service_ids:
-            cls.service_client.delete_service(s)
+            cls.services_client.delete_service(s)
         super(EndpointsNegativeTestJSON, cls).resource_cleanup()
 
     @test.attr(type=['negative'])
diff --git a/tempest/api/identity/admin/v3/test_services.py b/tempest/api/identity/admin/v3/test_services.py
index d1595dd..c6e3df4 100644
--- a/tempest/api/identity/admin/v3/test_services.py
+++ b/tempest/api/identity/admin/v3/test_services.py
@@ -24,9 +24,9 @@
 
     def _del_service(self, service_id):
         # Used for deleting the services created in this class
-        self.service_client.delete_service(service_id)
+        self.services_client.delete_service(service_id)
         # Checking whether service is deleted successfully
-        self.assertRaises(lib_exc.NotFound, self.service_client.show_service,
+        self.assertRaises(lib_exc.NotFound, self.services_client.show_service,
                           service_id)
 
     @test.attr(type='smoke')
@@ -36,7 +36,7 @@
         name = data_utils.rand_name('service')
         serv_type = data_utils.rand_name('type')
         desc = data_utils.rand_name('description')
-        create_service = self.service_client.create_service(
+        create_service = self.services_client.create_service(
             type=serv_type, name=name, description=desc)['service']
         self.addCleanup(self._del_service, create_service['id'])
         self.assertIsNotNone(create_service['id'])
@@ -49,14 +49,14 @@
         s_id = create_service['id']
         resp1_desc = create_service['description']
         s_desc2 = data_utils.rand_name('desc2')
-        update_service = self.service_client.update_service(
+        update_service = self.services_client.update_service(
             s_id, description=s_desc2)['service']
         resp2_desc = update_service['description']
 
         self.assertNotEqual(resp1_desc, resp2_desc)
 
         # Get service
-        fetched_service = self.service_client.show_service(s_id)['service']
+        fetched_service = self.services_client.show_service(s_id)['service']
         resp3_desc = fetched_service['description']
 
         self.assertEqual(resp2_desc, resp3_desc)
@@ -67,9 +67,9 @@
         # Create a service only with name and type
         name = data_utils.rand_name('service')
         serv_type = data_utils.rand_name('type')
-        service = self.service_client.create_service(
+        service = self.services_client.create_service(
             type=serv_type, name=name)['service']
-        self.addCleanup(self.service_client.delete_service, service['id'])
+        self.addCleanup(self.services_client.delete_service, service['id'])
         self.assertIn('id', service)
         expected_data = {'name': name, 'type': serv_type}
         self.assertDictContainsSubset(expected_data, service)
@@ -81,14 +81,14 @@
         for _ in range(3):
             name = data_utils.rand_name('service')
             serv_type = data_utils.rand_name('type')
-            create_service = self.service_client.create_service(
+            create_service = self.services_client.create_service(
                 type=serv_type, name=name)['service']
-            self.addCleanup(self.service_client.delete_service,
+            self.addCleanup(self.services_client.delete_service,
                             create_service['id'])
             service_ids.append(create_service['id'])
 
         # List and Verify Services
-        services = self.service_client.list_services()['services']
+        services = self.services_client.list_services()['services']
         fetched_ids = [service['id'] for service in services]
         found = [s for s in fetched_ids if s in service_ids]
         self.assertEqual(len(found), len(service_ids))
diff --git a/tempest/api/identity/base.py b/tempest/api/identity/base.py
index 1a4c8bb..d98e3b1 100644
--- a/tempest/api/identity/base.py
+++ b/tempest/api/identity/base.py
@@ -144,7 +144,7 @@
         cls.token = cls.os_adm.token_v3_client
         cls.endpoints_client = cls.os_adm.endpoints_client
         cls.region_client = cls.os_adm.region_client
-        cls.service_client = cls.os_adm.service_client
+        cls.services_client = cls.os_adm.identity_services_client
         cls.policy_client = cls.os_adm.policy_client
         cls.creds_client = cls.os_adm.credentials_client
         cls.groups_client = cls.os_adm.groups_client
diff --git a/tempest/api/image/base.py b/tempest/api/image/base.py
index da0ce83..c3205ce 100644
--- a/tempest/api/image/base.py
+++ b/tempest/api/image/base.py
@@ -70,8 +70,10 @@
         container_format = kwargs.pop('container_format')
         disk_format = kwargs.pop('disk_format')
 
-        image = cls.client.create_image(name, container_format,
-                                        disk_format, **kwargs)
+        image = cls.client.create_image(name=name,
+                                        container_format=container_format,
+                                        disk_format=disk_format,
+                                        **kwargs)
         # Image objects returned by the v1 client have the image
         # data inside a dict that is keyed against 'image'.
         if 'image' in image:
@@ -156,7 +158,7 @@
 
     def _create_image(self):
         name = data_utils.rand_name('image')
-        image = self.os_img_client.create_image(name,
+        image = self.os_img_client.create_image(name=name,
                                                 container_format='bare',
                                                 disk_format='raw')
         image_id = image['id']
diff --git a/tempest/api/image/v2/test_images_negative.py b/tempest/api/image/v2/test_images_negative.py
index 71c8c7a..485942e 100644
--- a/tempest/api/image/v2/test_images_negative.py
+++ b/tempest/api/image/v2/test_images_negative.py
@@ -90,10 +90,12 @@
     def test_register_with_invalid_container_format(self):
         # Negative tests for invalid data supplied to POST /images
         self.assertRaises(lib_exc.BadRequest, self.client.create_image,
-                          'test', 'wrong', 'vhd')
+                          name='test', container_format='wrong',
+                          disk_format='vhd')
 
     @test.attr(type=['negative'])
     @test.idempotent_id('70c6040c-5a97-4111-9e13-e73665264ce1')
     def test_register_with_invalid_disk_format(self):
         self.assertRaises(lib_exc.BadRequest, self.client.create_image,
-                          'test', 'bare', 'wrong')
+                          name='test', container_format='bare',
+                          disk_format='wrong')
diff --git a/tempest/api/network/admin/test_agent_management.py b/tempest/api/network/admin/test_agent_management.py
index c5d0d57..64802aa 100644
--- a/tempest/api/network/admin/test_agent_management.py
+++ b/tempest/api/network/admin/test_agent_management.py
@@ -63,7 +63,7 @@
         # one to avoid the negative effect.
         agent_status = {'admin_state_up': origin_status}
         body = self.admin_client.update_agent(agent_id=self.agent['id'],
-                                              agent_info=agent_status)
+                                              agent=agent_status)
         updated_status = body['agent']['admin_state_up']
         self.assertEqual(origin_status, updated_status)
 
@@ -73,7 +73,7 @@
         description = 'description for update agent.'
         agent_description = {'description': description}
         body = self.admin_client.update_agent(agent_id=self.agent['id'],
-                                              agent_info=agent_description)
+                                              agent=agent_description)
         self.addCleanup(self._restore_agent)
         updated_description = body['agent']['description']
         self.assertEqual(updated_description, description)
@@ -84,4 +84,4 @@
         description = self.agent['description'] or ''
         origin_agent = {'description': description}
         self.admin_client.update_agent(agent_id=self.agent['id'],
-                                       agent_info=origin_agent)
+                                       agent=origin_agent)
diff --git a/tempest/api/network/admin/test_dhcp_agent_scheduler.py b/tempest/api/network/admin/test_dhcp_agent_scheduler.py
index 7692b56..f186b36 100644
--- a/tempest/api/network/admin/test_dhcp_agent_scheduler.py
+++ b/tempest/api/network/admin/test_dhcp_agent_scheduler.py
@@ -92,6 +92,6 @@
 
     def _add_dhcp_agent_to_network(self, network_id, agent):
         self.admin_client.add_dhcp_agent_to_network(agent['id'],
-                                                    network_id)
+                                                    network_id=network_id)
         self.assertTrue(self._check_network_in_dhcp_agent(
             network_id, agent))
diff --git a/tempest/api/network/test_routers.py b/tempest/api/network/test_routers.py
index ed191b6..406ad44 100644
--- a/tempest/api/network/test_routers.py
+++ b/tempest/api/network/test_routers.py
@@ -301,7 +301,7 @@
 
         test_routes.sort(key=lambda x: x['destination'])
         extra_route = self.client.update_extra_routes(router['id'],
-                                                      test_routes)
+                                                      routes=test_routes)
         show_body = self.client.show_router(router['id'])
         # Assert the number of routes
         self.assertEqual(routes_num, len(extra_route['router']['routes']))
diff --git a/tempest/api/telemetry/base.py b/tempest/api/telemetry/base.py
index bbd01f0..bbf6db2 100644
--- a/tempest/api/telemetry/base.py
+++ b/tempest/api/telemetry/base.py
@@ -75,10 +75,11 @@
         return body
 
     @classmethod
-    def create_image(cls, client):
-        body = client.create_image(
-            data_utils.rand_name('image'), container_format='bare',
-            disk_format='raw', visibility='private')
+    def create_image(cls, client, **kwargs):
+        body = client.create_image(name=data_utils.rand_name('image'),
+                                   container_format='bare',
+                                   disk_format='raw',
+                                   **kwargs)
         # TODO(jswarren) Move ['image'] up to initial body value assignment
         # once both v1 and v2 glance clients include the full response
         # object.
diff --git a/tempest/api/telemetry/test_telemetry_notification_api.py b/tempest/api/telemetry/test_telemetry_notification_api.py
index 7511505..a575125 100644
--- a/tempest/api/telemetry/test_telemetry_notification_api.py
+++ b/tempest/api/telemetry/test_telemetry_notification_api.py
@@ -39,7 +39,7 @@
     @testtools.skipIf(not CONF.image_feature_enabled.api_v1,
                       "Glance api v1 is disabled")
     def test_check_glance_v1_notifications(self):
-        body = self.create_image(self.image_client)
+        body = self.create_image(self.image_client, is_public=False)
         self.image_client.update_image(body['id'], data='data')
 
         query = 'resource', 'eq', body['id']
@@ -55,7 +55,7 @@
     @testtools.skipIf(not CONF.image_feature_enabled.api_v2,
                       "Glance api v2 is disabled")
     def test_check_glance_v2_notifications(self):
-        body = self.create_image(self.image_client_v2)
+        body = self.create_image(self.image_client_v2, visibility='private')
 
         self.image_client_v2.store_image_file(body['id'], "file")
         self.image_client_v2.show_image_file(body['id'])
diff --git a/tempest/api/volume/admin/test_multi_backend.py b/tempest/api/volume/admin/test_multi_backend.py
index 6d2aaea..8e43b00 100644
--- a/tempest/api/volume/admin/test_multi_backend.py
+++ b/tempest/api/volume/admin/test_multi_backend.py
@@ -71,7 +71,7 @@
         else:
             extra_specs = {spec_key_without_prefix: backend_name_key}
         self.type = self.volume_types_client.create_volume_type(
-            type_name, extra_specs=extra_specs)['volume_type']
+            name=type_name, extra_specs=extra_specs)['volume_type']
         self.volume_type_id_list.append(self.type['id'])
 
         params = {self.name_field: vol_name, 'volume_type': type_name}
diff --git a/tempest/api/volume/admin/test_volume_types.py b/tempest/api/volume/admin/test_volume_types.py
index 2d9019a..acb591d 100644
--- a/tempest/api/volume/admin/test_volume_types.py
+++ b/tempest/api/volume/admin/test_volume_types.py
@@ -32,7 +32,7 @@
 
     @test.idempotent_id('9d9b28e3-1b2e-4483-a2cc-24aa0ea1de54')
     def test_volume_type_list(self):
-        # List Volume types.
+        # List volume types.
         body = self.volume_types_client.list_volume_types()['volume_types']
         self.assertIsInstance(body, list)
 
@@ -50,7 +50,7 @@
         for i in range(2):
             vol_type_name = data_utils.rand_name("volume-type")
             vol_type = self.volume_types_client.create_volume_type(
-                vol_type_name,
+                name=vol_type_name,
                 extra_specs=extra_specs)['volume_type']
             volume_types.append(vol_type)
             self.addCleanup(self._delete_volume_type, vol_type['id'])
@@ -97,7 +97,7 @@
         extra_specs = {"storage_protocol": proto,
                        "vendor_name": vendor}
         body = self.volume_types_client.create_volume_type(
-            name,
+            name=name,
             extra_specs=extra_specs)['volume_type']
         self.assertIn('id', body)
         self.addCleanup(self._delete_volume_type, body['id'])
@@ -125,7 +125,8 @@
         provider = "LuksEncryptor"
         control_location = "front-end"
         name = data_utils.rand_name("volume-type")
-        body = self.volume_types_client.create_volume_type(name)['volume_type']
+        body = self.volume_types_client.create_volume_type(
+            name=name)['volume_type']
         self.addCleanup(self._delete_volume_type, body['id'])
 
         # Create encryption type
diff --git a/tempest/api/volume/admin/test_volume_types_extra_specs.py b/tempest/api/volume/admin/test_volume_types_extra_specs.py
index bec803c..502cd86 100644
--- a/tempest/api/volume/admin/test_volume_types_extra_specs.py
+++ b/tempest/api/volume/admin/test_volume_types_extra_specs.py
@@ -25,7 +25,7 @@
         super(VolumeTypesExtraSpecsV2Test, cls).resource_setup()
         vol_type_name = data_utils.rand_name('Volume-type')
         cls.volume_type = cls.volume_types_client.create_volume_type(
-            vol_type_name)['volume_type']
+            name=vol_type_name)['volume_type']
 
     @classmethod
     def resource_cleanup(cls):
diff --git a/tempest/api/volume/admin/test_volume_types_extra_specs_negative.py b/tempest/api/volume/admin/test_volume_types_extra_specs_negative.py
index 040ef53..6483af3 100644
--- a/tempest/api/volume/admin/test_volume_types_extra_specs_negative.py
+++ b/tempest/api/volume/admin/test_volume_types_extra_specs_negative.py
@@ -30,7 +30,7 @@
         vol_type_name = data_utils.rand_name('Volume-type')
         cls.extra_specs = {"spec1": "val1"}
         cls.volume_type = cls.volume_types_client.create_volume_type(
-            vol_type_name,
+            name=vol_type_name,
             extra_specs=cls.extra_specs)['volume_type']
 
     @classmethod
@@ -70,7 +70,7 @@
     def test_update_multiple_extra_spec(self):
         # Should not update volume type extra specs with multiple specs as
             # body.
-        extra_spec = {"spec1": "val2", 'spec2': 'val1'}
+        extra_spec = {"spec1": "val2", "spec2": "val1"}
         self.assertRaises(
             lib_exc.BadRequest,
             self.volume_types_client.update_volume_type_extra_specs,
@@ -101,7 +101,7 @@
         self.assertRaises(
             lib_exc.BadRequest,
             self.volume_types_client.create_volume_type_extra_specs,
-            self.volume_type['id'], ['invalid'])
+            self.volume_type['id'], extra_specs=['invalid'])
 
     @test.idempotent_id('031cda8b-7d23-4246-8bf6-bbe73fd67074')
     def test_delete_nonexistent_volume_type_id(self):
diff --git a/tempest/api/volume/admin/test_volume_types_negative.py b/tempest/api/volume/admin/test_volume_types_negative.py
index 2694b63..bc32fc9 100644
--- a/tempest/api/volume/admin/test_volume_types_negative.py
+++ b/tempest/api/volume/admin/test_volume_types_negative.py
@@ -36,7 +36,7 @@
     def test_create_with_empty_name(self):
         # Should not be able to create volume type with an empty name.
         self.assertRaises(lib_exc.BadRequest,
-                          self.volume_types_client.create_volume_type, '')
+                          self.volume_types_client.create_volume_type, name='')
 
     @test.idempotent_id('994610d6-0476-4018-a644-a2602ef5d4aa')
     def test_get_nonexistent_type_id(self):
diff --git a/tempest/api/volume/test_qos.py b/tempest/api/volume/test_qos.py
index 2f7c3df..722a39a 100644
--- a/tempest/api/volume/test_qos.py
+++ b/tempest/api/volume/test_qos.py
@@ -53,7 +53,7 @@
     def _create_test_volume_type(self):
         vol_type_name = utils.rand_name("volume-type")
         vol_type = self.volume_types_client.create_volume_type(
-            vol_type_name)['volume_type']
+            name=vol_type_name)['volume_type']
         self.addCleanup(self.volume_types_client.delete_volume_type,
                         vol_type['id'])
         return vol_type
diff --git a/tempest/api_schema/response/compute/v2_1/floating_ips.py b/tempest/api_schema/response/compute/v2_1/floating_ips.py
deleted file mode 100644
index 3551681..0000000
--- a/tempest/api_schema/response/compute/v2_1/floating_ips.py
+++ /dev/null
@@ -1,148 +0,0 @@
-# Copyright 2014 NEC Corporation.  All rights reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from tempest.api_schema.response.compute.v2_1 import parameter_types
-
-common_floating_ip_info = {
-    'type': 'object',
-    'properties': {
-        # NOTE: Now the type of 'id' is integer, but
-        # here allows 'string' also because we will be
-        # able to change it to 'uuid' in the future.
-        'id': {'type': ['integer', 'string']},
-        'pool': {'type': ['string', 'null']},
-        'instance_id': {'type': ['string', 'null']},
-        'ip': parameter_types.ip_address,
-        'fixed_ip': parameter_types.ip_address
-    },
-    'additionalProperties': False,
-    'required': ['id', 'pool', 'instance_id',
-                 'ip', 'fixed_ip'],
-
-}
-list_floating_ips = {
-    'status_code': [200],
-    'response_body': {
-        'type': 'object',
-        'properties': {
-            'floating_ips': {
-                'type': 'array',
-                'items': common_floating_ip_info
-            },
-        },
-        'additionalProperties': False,
-        'required': ['floating_ips'],
-    }
-}
-
-create_get_floating_ip = {
-    'status_code': [200],
-    'response_body': {
-        'type': 'object',
-        'properties': {
-            'floating_ip': common_floating_ip_info
-        },
-        'additionalProperties': False,
-        'required': ['floating_ip'],
-    }
-}
-
-list_floating_ip_pools = {
-    'status_code': [200],
-    'response_body': {
-        'type': 'object',
-        'properties': {
-            'floating_ip_pools': {
-                'type': 'array',
-                'items': {
-                    'type': 'object',
-                    'properties': {
-                        'name': {'type': 'string'}
-                    },
-                    'additionalProperties': False,
-                    'required': ['name'],
-                }
-            }
-        },
-        'additionalProperties': False,
-        'required': ['floating_ip_pools'],
-    }
-}
-
-add_remove_floating_ip = {
-    'status_code': [202]
-}
-
-create_floating_ips_bulk = {
-    'status_code': [200],
-    'response_body': {
-        'type': 'object',
-        'properties': {
-            'floating_ips_bulk_create': {
-                'type': 'object',
-                'properties': {
-                    'interface': {'type': ['string', 'null']},
-                    'ip_range': {'type': 'string'},
-                    'pool': {'type': ['string', 'null']},
-                },
-                'additionalProperties': False,
-                'required': ['interface', 'ip_range', 'pool'],
-            }
-        },
-        'additionalProperties': False,
-        'required': ['floating_ips_bulk_create'],
-    }
-}
-
-delete_floating_ips_bulk = {
-    'status_code': [200],
-    'response_body': {
-        'type': 'object',
-        'properties': {
-            'floating_ips_bulk_delete': {'type': 'string'}
-        },
-        'additionalProperties': False,
-        'required': ['floating_ips_bulk_delete'],
-    }
-}
-
-list_floating_ips_bulk = {
-    'status_code': [200],
-    'response_body': {
-        'type': 'object',
-        'properties': {
-            'floating_ip_info': {
-                'type': 'array',
-                'items': {
-                    'type': 'object',
-                    'properties': {
-                        'address': parameter_types.ip_address,
-                        'instance_uuid': {'type': ['string', 'null']},
-                        'interface': {'type': ['string', 'null']},
-                        'pool': {'type': ['string', 'null']},
-                        'project_id': {'type': ['string', 'null']},
-                        'fixed_ip': parameter_types.ip_address
-                    },
-                    'additionalProperties': False,
-                    # NOTE: fixed_ip is introduced after JUNO release,
-                    # So it is not defined as 'required'.
-                    'required': ['address', 'instance_uuid', 'interface',
-                                 'pool', 'project_id'],
-                }
-            }
-        },
-        'additionalProperties': False,
-        'required': ['floating_ip_info'],
-    }
-}
diff --git a/tempest/api_schema/response/compute/v2_1/security_groups.py b/tempest/api_schema/response/compute/v2_1/security_groups.py
deleted file mode 100644
index 5ed5a5c..0000000
--- a/tempest/api_schema/response/compute/v2_1/security_groups.py
+++ /dev/null
@@ -1,113 +0,0 @@
-# Copyright 2014 NEC Corporation.  All rights reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-common_security_group_rule = {
-    'from_port': {'type': ['integer', 'null']},
-    'to_port': {'type': ['integer', 'null']},
-    'group': {
-        'type': 'object',
-        'properties': {
-            'tenant_id': {'type': 'string'},
-            'name': {'type': 'string'}
-        },
-        'additionalProperties': False,
-    },
-    'ip_protocol': {'type': ['string', 'null']},
-    # 'parent_group_id' can be UUID so defining it as 'string' also.
-    'parent_group_id': {'type': ['string', 'integer', 'null']},
-    'ip_range': {
-        'type': 'object',
-        'properties': {
-            'cidr': {'type': 'string'}
-        },
-        'additionalProperties': False,
-        # When optional argument is provided in request body
-        # like 'group_id' then, attribute 'cidr' does not
-        # comes in response body. So it is not 'required'.
-    },
-    'id': {'type': ['string', 'integer']}
-}
-
-common_security_group = {
-    'type': 'object',
-    'properties': {
-        'id': {'type': ['integer', 'string']},
-        'name': {'type': 'string'},
-        'tenant_id': {'type': 'string'},
-        'rules': {
-            'type': 'array',
-            'items': {
-                'type': ['object', 'null'],
-                'properties': common_security_group_rule,
-                'additionalProperties': False,
-            }
-        },
-        'description': {'type': 'string'},
-    },
-    'additionalProperties': False,
-    'required': ['id', 'name', 'tenant_id', 'rules', 'description'],
-}
-
-list_security_groups = {
-    'status_code': [200],
-    'response_body': {
-        'type': 'object',
-        'properties': {
-            'security_groups': {
-                'type': 'array',
-                'items': common_security_group
-            }
-        },
-        'additionalProperties': False,
-        'required': ['security_groups']
-    }
-}
-
-get_security_group = create_security_group = update_security_group = {
-    'status_code': [200],
-    'response_body': {
-        'type': 'object',
-        'properties': {
-            'security_group': common_security_group
-        },
-        'additionalProperties': False,
-        'required': ['security_group']
-    }
-}
-
-delete_security_group = {
-    'status_code': [202]
-}
-
-create_security_group_rule = {
-    'status_code': [200],
-    'response_body': {
-        'type': 'object',
-        'properties': {
-            'security_group_rule': {
-                'type': 'object',
-                'properties': common_security_group_rule,
-                'additionalProperties': False,
-                'required': ['from_port', 'to_port', 'group', 'ip_protocol',
-                             'parent_group_id', 'id', 'ip_range']
-            }
-        },
-        'additionalProperties': False,
-        'required': ['security_group_rule']
-    }
-}
-
-delete_security_group_rule = {
-    'status_code': [202]
-}
diff --git a/tempest/clients.py b/tempest/clients.py
index 53f4006..1c56edb 100644
--- a/tempest/clients.py
+++ b/tempest/clients.py
@@ -32,6 +32,8 @@
     FloatingIPPoolsClient
 from tempest_lib.services.compute.floating_ips_bulk_client import \
     FloatingIPsBulkClient
+from tempest_lib.services.compute.floating_ips_client import \
+    FloatingIPsClient as ComputeFloatingIPsClient
 from tempest_lib.services.compute.hosts_client import HostsClient
 from tempest_lib.services.compute.hypervisor_client import \
     HypervisorClient
@@ -49,8 +51,12 @@
 from tempest_lib.services.compute.quotas_client import QuotasClient
 from tempest_lib.services.compute.security_group_default_rules_client import \
     SecurityGroupDefaultRulesClient
+from tempest_lib.services.compute.security_group_rules_client import \
+    SecurityGroupRulesClient
 from tempest_lib.services.compute.security_groups_client import \
     SecurityGroupsClient as ComputeSecurityGroupsClient
+from tempest_lib.services.compute.server_groups_client import \
+    ServerGroupsClient
 from tempest_lib.services.compute.services_client import ServicesClient
 from tempest_lib.services.compute.snapshots_client import \
     SnapshotsClient as ComputeSnapshotsClient
@@ -70,14 +76,7 @@
 from tempest import manager
 from tempest.services.baremetal.v1.json.baremetal_client import \
     BaremetalClient
-from tempest.services import botoclients
-from tempest.services.compute.json.floating_ips_client import \
-    FloatingIPsClient as ComputeFloatingIPsClient
 from tempest.services.compute.json.keypairs_client import KeyPairsClient
-from tempest.services.compute.json.security_group_rules_client import \
-    SecurityGroupRulesClient
-from tempest.services.compute.json.server_groups_client import \
-    ServerGroupsClient
 from tempest.services.compute.json.servers_client import ServersClient
 from tempest.services.data_processing.v1_1.data_processing_client import \
     DataProcessingClient
@@ -104,8 +103,8 @@
     PolicyClient as PolicyV3Client
 from tempest.services.identity.v3.json.region_client import \
     RegionClient as RegionV3Client
-from tempest.services.identity.v3.json.service_client import \
-    ServiceClient as ServiceV3Client
+from tempest.services.identity.v3.json.services_client import \
+    ServicesClient as IdentityServicesV3Client
 from tempest.services.image.v1.json.images_client import ImagesClient
 from tempest.services.image.v2.json.images_client import ImagesClientV2
 from tempest.services.messaging.json.messaging_client import \
@@ -330,15 +329,6 @@
         self.negative_client = negative_rest_client.NegativeRestClient(
             self.auth_provider, service, **self.default_params)
 
-        # Generating EC2 credentials in tempest is only supported
-        # with identity v2
-        if CONF.identity_feature_enabled.api_v2 and \
-                CONF.identity.auth_version == 'v2':
-            # EC2 and S3 clients, if used, will check configured AWS
-            # credentials and generate new ones if needed
-            self.ec2api_client = botoclients.APIClientEC2(self.identity_client)
-            self.s3_client = botoclients.ObjectClientS3(self.identity_client)
-
     def _set_compute_clients(self):
         params = {
             'service': CONF.compute.catalog_type,
@@ -468,7 +458,8 @@
                                                    **params_v3)
         self.endpoints_client = EndPointV3Client(self.auth_provider,
                                                  **params_v3)
-        self.service_client = ServiceV3Client(self.auth_provider, **params_v3)
+        self.identity_services_client = IdentityServicesV3Client(
+            self.auth_provider, **params_v3)
         self.policy_client = PolicyV3Client(self.auth_provider, **params_v3)
         self.region_client = RegionV3Client(self.auth_provider, **params_v3)
         self.credentials_client = CredentialsV3Client(self.auth_provider,
diff --git a/tempest/cmd/account_generator.py b/tempest/cmd/account_generator.py
index 297a066..b90ee04 100755
--- a/tempest/cmd/account_generator.py
+++ b/tempest/cmd/account_generator.py
@@ -357,7 +357,7 @@
                 resources['users'].append({
                     'tenant': tenant,
                     'name': user,
-                    'pass': data_utils.rand_name(),
+                    'pass': data_utils.rand_password(),
                     'prefix': user_group['prefix'],
                     'roles': user_group['roles']
                 })
diff --git a/tempest/cmd/cleanup.py b/tempest/cmd/cleanup.py
index 3c32d48..b79bff4 100644
--- a/tempest/cmd/cleanup.py
+++ b/tempest/cmd/cleanup.py
@@ -176,14 +176,14 @@
             svc.run()
 
     def _init_admin_ids(self):
-        id_cl = self.admin_mgr.identity_client
+        tn_cl = self.admin_mgr.tenants_client
         rl_cl = self.admin_mgr.roles_client
 
-        tenant = identity.get_tenant_by_name(id_cl,
+        tenant = identity.get_tenant_by_name(tn_cl,
                                              CONF.auth.admin_tenant_name)
         self.admin_tenant_id = tenant['id']
 
-        user = identity.get_user_by_username(id_cl, self.admin_tenant_id,
+        user = identity.get_user_by_username(tn_cl, self.admin_tenant_id,
                                              CONF.auth.admin_username)
         self.admin_id = user['id']
 
@@ -249,9 +249,9 @@
                               "exists, exception: %s" % ex)
 
     def _tenant_exists(self, tenant_id):
-        id_cl = self.admin_mgr.identity_client
+        tn_cl = self.admin_mgr.tenants_client
         try:
-            t = id_cl.show_tenant(tenant_id)
+            t = tn_cl.show_tenant(tenant_id)
             LOG.debug("Tenant is: %s" % str(t))
             return True
         except Exception as ex:
diff --git a/tempest/cmd/cleanup_service.py b/tempest/cmd/cleanup_service.py
index 4158cc3..2aeb5b1 100644
--- a/tempest/cmd/cleanup_service.py
+++ b/tempest/cmd/cleanup_service.py
@@ -856,7 +856,7 @@
             self.data['users'][user['id']] = user['name']
 
 
-class RoleService(IdentityService):
+class RoleService(BaseService):
 
     def __init__(self, manager, **kwargs):
         super(RoleService, self).__init__(kwargs)
@@ -896,7 +896,7 @@
             self.data['roles'][role['id']] = role['name']
 
 
-class TenantService(IdentityService):
+class TenantService(BaseService):
 
     def __init__(self, manager, **kwargs):
         super(TenantService, self).__init__(kwargs)
diff --git a/tempest/cmd/javelin.py b/tempest/cmd/javelin.py
index cdd0044..c0c645c 100755
--- a/tempest/cmd/javelin.py
+++ b/tempest/cmd/javelin.py
@@ -118,14 +118,14 @@
 from tempest_lib import auth
 from tempest_lib import exceptions as lib_exc
 from tempest_lib.services.compute import flavors_client
+from tempest_lib.services.compute import floating_ips_client
+from tempest_lib.services.compute import security_group_rules_client
 from tempest_lib.services.compute import security_groups_client
 import yaml
 
 from tempest.common import identity
 from tempest.common import waiters
 from tempest import config
-from tempest.services.compute.json import floating_ips_client
-from tempest.services.compute.json import security_group_rules_client
 from tempest.services.compute.json import servers_client
 from tempest.services.identity.v2.json import identity_client
 from tempest.services.identity.v2.json import roles_client
diff --git a/tempest/cmd/run_stress.py b/tempest/cmd/run_stress.py
index f99e5d9..5c6f200 100644
--- a/tempest/cmd/run_stress.py
+++ b/tempest/cmd/run_stress.py
@@ -81,6 +81,9 @@
     def take_action(self, pa):
         return action(pa)
 
+    def get_description(self):
+        return 'Run tempest stress tests'
+
 
 def add_arguments(parser):
     parser.add_argument('-d', '--duration', default=300, type=int,
diff --git a/tempest/common/utils/linux/remote_client.py b/tempest/common/utils/linux/remote_client.py
index 6150b4d..b76c356 100644
--- a/tempest/common/utils/linux/remote_client.py
+++ b/tempest/common/utils/linux/remote_client.py
@@ -73,7 +73,7 @@
             return output.split()[1]
 
     def get_number_of_vcpus(self):
-        output = self.exec_command('grep -c processor /proc/cpuinfo')
+        output = self.exec_command('grep -c ^processor /proc/cpuinfo')
         return int(output)
 
     def get_partitions(self):
diff --git a/tempest/config.py b/tempest/config.py
index ac2f2a0..8f2ca4b 100644
--- a/tempest/config.py
+++ b/tempest/config.py
@@ -17,9 +17,10 @@
 
 import logging as std_logging
 import os
+import tempfile
 
+from oslo_concurrency import lockutils
 from oslo_config import cfg
-
 from oslo_log import log as logging
 
 from tempest.test_discover import plugins
@@ -387,9 +388,6 @@
                 default=True,
                 help='Does the test environment support creating snapshot '
                      'images of running instances?'),
-    cfg.BoolOpt('ec2_api',
-                default=True,
-                help='Does the test environment have the ec2 api running?'),
     cfg.BoolOpt('nova_cert',
                 default=True,
                 help='Does the test environment have the nova cert running?'),
@@ -1003,54 +1001,6 @@
                 help="List of enabled data processing plugins")
 ]
 
-
-boto_group = cfg.OptGroup(name='boto',
-                          title='EC2/S3 options')
-BotoGroup = [
-    cfg.StrOpt('ec2_url',
-               default="http://localhost:8773/services/Cloud",
-               help="EC2 URL"),
-    cfg.StrOpt('s3_url',
-               default="http://localhost:8080",
-               help="S3 URL"),
-    cfg.StrOpt('aws_secret',
-               help="AWS Secret Key",
-               secret=True),
-    cfg.StrOpt('aws_access',
-               help="AWS Access Key"),
-    cfg.StrOpt('aws_zone',
-               default="nova",
-               help="AWS Zone for EC2 tests"),
-    cfg.StrOpt('s3_materials_path',
-               default="/opt/stack/devstack/files/images/"
-                       "s3-materials/cirros-0.3.0",
-               help="S3 Materials Path"),
-    cfg.StrOpt('ari_manifest',
-               default="cirros-0.3.0-x86_64-initrd.manifest.xml",
-               help="ARI Ramdisk Image manifest"),
-    cfg.StrOpt('ami_manifest',
-               default="cirros-0.3.0-x86_64-blank.img.manifest.xml",
-               help="AMI Machine Image manifest"),
-    cfg.StrOpt('aki_manifest',
-               default="cirros-0.3.0-x86_64-vmlinuz.manifest.xml",
-               help="AKI Kernel Image manifest"),
-    cfg.StrOpt('instance_type',
-               default="m1.tiny",
-               help="Instance type"),
-    cfg.IntOpt('http_socket_timeout',
-               default=3,
-               help="boto Http socket timeout"),
-    cfg.IntOpt('num_retries',
-               default=1,
-               help="boto num_retries on error"),
-    cfg.IntOpt('build_timeout',
-               default=60,
-               help="Status Change Timeout"),
-    cfg.IntOpt('build_interval',
-               default=1,
-               help="Status Change Test Interval"),
-]
-
 stress_group = cfg.OptGroup(name='stress', title='Stress Test Options')
 
 StressGroup = [
@@ -1309,7 +1259,6 @@
     (dashboard_group, DashboardGroup),
     (data_processing_group, DataProcessingGroup),
     (data_processing_feature_group, DataProcessingFeaturesGroup),
-    (boto_group, BotoGroup),
     (stress_group, StressGroup),
     (scenario_group, ScenarioGroup),
     (service_available_group, ServiceAvailableGroup),
@@ -1379,7 +1328,6 @@
         self.data_processing = _CONF['data-processing']
         self.data_processing_feature_enabled = _CONF[
             'data-processing-feature-enabled']
-        self.boto = _CONF.boto
         self.stress = _CONF.stress
         self.scenario = _CONF.scenario
         self.service_available = _CONF.service_available
@@ -1424,6 +1372,13 @@
             _CONF([], project='tempest', default_config_files=config_files)
         else:
             _CONF([], project='tempest')
+
+        logging_cfg_path = "%s/logging.conf" % os.path.dirname(path)
+        if (not hasattr(_CONF, 'log_config_append') and
+            os.path.isfile(logging_cfg_path)):
+            # if logging conf is in place we need to set log_config_append
+            _CONF.log_config_append = logging_cfg_path
+
         logging.setup(_CONF, 'tempest')
         LOG = logging.getLogger('tempest')
         LOG.info("Using tempest config file %s" % path)
@@ -1450,6 +1405,8 @@
     def __getattr__(self, attr):
         if not self._config:
             self._fix_log_levels()
+            lock_dir = os.path.join(tempfile.gettempdir(), 'tempest-lock')
+            lockutils.set_defaults(lock_dir)
             self._config = TempestConfigPrivate(config_path=self._path)
 
         return getattr(self._config, attr)
diff --git a/tempest/scenario/manager.py b/tempest/scenario/manager.py
index 6983036..8a00c65 100644
--- a/tempest/scenario/manager.py
+++ b/tempest/scenario/manager.py
@@ -1378,7 +1378,7 @@
         randomized_name = data_utils.rand_name('scenario-type-' + name)
         LOG.debug("Creating a volume type: %s", randomized_name)
         body = client.create_volume_type(
-            randomized_name)['volume_type']
+            name=randomized_name)['volume_type']
         self.assertIn('id', body)
         self.addCleanup(client.delete_volume_type, body['id'])
         return body
diff --git a/tempest/scenario/test_network_basic_ops.py b/tempest/scenario/test_network_basic_ops.py
index a304081..44942b0 100644
--- a/tempest/scenario/test_network_basic_ops.py
+++ b/tempest/scenario/test_network_basic_ops.py
@@ -744,15 +744,23 @@
     def test_port_security_macspoofing_port(self):
         """Tests port_security extension enforces mac spoofing
 
-        1. create a new network
-        2. connect VM to new network
-        4. check VM can ping new network DHCP port
-        5. spoof mac on new new network interface
-        6. check Neutron enforces mac spoofing and blocks pings via spoofed
-            interface
-        7. disable port-security on the spoofed port
-        8. check Neutron allows pings via spoofed interface
+        Neutron security groups always apply anti-spoof rules on the VMs. This
+        allows traffic to originate and terminate at the VM as expected, but
+        prevents traffic to pass through the VM. Anti-spoof rules are not
+        required in cases where the VM routes traffic through it.
+
+        The test steps are :
+        1. Create a new network.
+        2. Connect (hotplug) the VM to a new network.
+        3. Check the VM can ping the DHCP interface of this network.
+        4. Spoof the mac address of the new VM interface.
+        5. Check the Security Group enforces mac spoofing and blocks pings via
+           spoofed interface (VM cannot ping the DHCP interface).
+        6. Disable port-security of the spoofed port- set the flag to false.
+        7. Retest 3rd step and check that the Security Group allows pings via
+        the spoofed interface.
         """
+
         spoof_mac = "00:00:00:00:00:01"
 
         # Create server
diff --git a/tempest/services/botoclients.py b/tempest/services/botoclients.py
deleted file mode 100644
index 9d452ff..0000000
--- a/tempest/services/botoclients.py
+++ /dev/null
@@ -1,217 +0,0 @@
-# Copyright 2012 OpenStack Foundation
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import contextlib
-import types
-
-import boto
-import boto.ec2
-import boto.s3.connection
-from six.moves import configparser as ConfigParser
-from six.moves.urllib import parse as urlparse
-from tempest_lib import exceptions as lib_exc
-
-from tempest import config
-
-CONF = config.CONF
-
-
-class BotoClientBase(object):
-
-    ALLOWED_METHODS = set()
-
-    def __init__(self, identity_client):
-        self.identity_client = identity_client
-
-        self.ca_cert = CONF.identity.ca_certificates_file
-        self.connection_timeout = str(CONF.boto.http_socket_timeout)
-        self.num_retries = str(CONF.boto.num_retries)
-        self.build_timeout = CONF.boto.build_timeout
-
-        self.connection_data = {}
-
-    def _config_boto_timeout(self, timeout, retries):
-        try:
-            boto.config.add_section("Boto")
-        except ConfigParser.DuplicateSectionError:
-            pass
-        boto.config.set("Boto", "http_socket_timeout", timeout)
-        boto.config.set("Boto", "num_retries", retries)
-
-    def _config_boto_ca_certificates_file(self, ca_cert):
-        if ca_cert is None:
-            return
-
-        try:
-            boto.config.add_section("Boto")
-        except ConfigParser.DuplicateSectionError:
-            pass
-        boto.config.set("Boto", "ca_certificates_file", ca_cert)
-
-    def __getattr__(self, name):
-        """Automatically creates methods for the allowed methods set."""
-        if name in self.ALLOWED_METHODS:
-            def func(self, *args, **kwargs):
-                with contextlib.closing(self.get_connection()) as conn:
-                    return getattr(conn, name)(*args, **kwargs)
-
-            func.__name__ = name
-            setattr(self, name, types.MethodType(func, self, self.__class__))
-            setattr(self.__class__, name,
-                    types.MethodType(func, None, self.__class__))
-            return getattr(self, name)
-        else:
-            raise AttributeError(name)
-
-    def get_connection(self):
-        self._config_boto_timeout(self.connection_timeout, self.num_retries)
-        self._config_boto_ca_certificates_file(self.ca_cert)
-
-        ec2_client_args = {'aws_access_key_id': CONF.boto.aws_access,
-                           'aws_secret_access_key': CONF.boto.aws_secret}
-        if not all(ec2_client_args.values()):
-            ec2_client_args = self.get_aws_credentials(self.identity_client)
-
-        self.connection_data.update(ec2_client_args)
-        return self.connect_method(**self.connection_data)
-
-    def get_aws_credentials(self, identity_client):
-        """Obtain existing, or create new AWS credentials
-
-        :param identity_client: identity client with embedded credentials
-        :return: EC2 credentials
-        """
-        ec2_cred_list = identity_client.list_user_ec2_credentials(
-            identity_client.user_id)['credentials']
-        for cred in ec2_cred_list:
-            if cred['tenant_id'] == identity_client.tenant_id:
-                ec2_cred = cred
-                break
-        else:
-            ec2_cred = (identity_client.create_user_ec2_credentials(
-                identity_client.user_id,
-                tenant_id=identity_client.tenant_id)['credential'])
-        if not all((ec2_cred, ec2_cred['access'], ec2_cred['secret'])):
-            raise lib_exc.NotFound("Unable to get access and secret keys")
-        else:
-            ec2_cred_aws = {}
-            ec2_cred_aws['aws_access_key_id'] = ec2_cred['access']
-            ec2_cred_aws['aws_secret_access_key'] = ec2_cred['secret']
-        return ec2_cred_aws
-
-
-class APIClientEC2(BotoClientBase):
-
-    def connect_method(self, *args, **kwargs):
-        return boto.connect_ec2(*args, **kwargs)
-
-    def __init__(self, identity_client):
-        super(APIClientEC2, self).__init__(identity_client)
-        insecure_ssl = CONF.identity.disable_ssl_certificate_validation
-        purl = urlparse.urlparse(CONF.boto.ec2_url)
-
-        region_name = CONF.compute.region
-        if not region_name:
-            region_name = CONF.identity.region
-        region = boto.ec2.regioninfo.RegionInfo(name=region_name,
-                                                endpoint=purl.hostname)
-        port = purl.port
-        if port is None:
-            if purl.scheme is not "https":
-                port = 80
-            else:
-                port = 443
-        else:
-            port = int(port)
-        self.connection_data.update({"is_secure": purl.scheme == "https",
-                                     "validate_certs": not insecure_ssl,
-                                     "region": region,
-                                     "host": purl.hostname,
-                                     "port": port,
-                                     "path": purl.path})
-
-    ALLOWED_METHODS = set(('create_key_pair', 'get_key_pair',
-                           'delete_key_pair', 'import_key_pair',
-                           'get_all_key_pairs',
-                           'get_all_tags',
-                           'create_image', 'get_image',
-                           'register_image', 'deregister_image',
-                           'get_all_images', 'get_image_attribute',
-                           'modify_image_attribute', 'reset_image_attribute',
-                           'get_all_kernels',
-                           'create_volume', 'delete_volume',
-                           'get_all_volume_status', 'get_all_volumes',
-                           'get_volume_attribute', 'modify_volume_attribute'
-                           'bundle_instance', 'cancel_spot_instance_requests',
-                           'confirm_product_instanc',
-                           'get_all_instance_status', 'get_all_instances',
-                           'get_all_reserved_instances',
-                           'get_all_spot_instance_requests',
-                           'get_instance_attribute', 'monitor_instance',
-                           'monitor_instances', 'unmonitor_instance',
-                           'unmonitor_instances',
-                           'purchase_reserved_instance_offering',
-                           'reboot_instances', 'request_spot_instances',
-                           'reset_instance_attribute', 'run_instances',
-                           'start_instances', 'stop_instances',
-                           'terminate_instances',
-                           'attach_network_interface', 'attach_volume',
-                           'detach_network_interface', 'detach_volume',
-                           'get_console_output',
-                           'delete_network_interface', 'create_subnet',
-                           'create_network_interface', 'delete_subnet',
-                           'get_all_network_interfaces',
-                           'allocate_address', 'associate_address',
-                           'disassociate_address', 'get_all_addresses',
-                           'release_address',
-                           'create_snapshot', 'delete_snapshot',
-                           'get_all_snapshots', 'get_snapshot_attribute',
-                           'modify_snapshot_attribute',
-                           'reset_snapshot_attribute', 'trim_snapshots',
-                           'get_all_regions', 'get_all_zones',
-                           'get_all_security_groups', 'create_security_group',
-                           'delete_security_group', 'authorize_security_group',
-                           'authorize_security_group_egress',
-                           'revoke_security_group',
-                           'revoke_security_group_egress'))
-
-
-class ObjectClientS3(BotoClientBase):
-
-    def connect_method(self, *args, **kwargs):
-        return boto.connect_s3(*args, **kwargs)
-
-    def __init__(self, identity_client):
-        super(ObjectClientS3, self).__init__(identity_client)
-        insecure_ssl = CONF.identity.disable_ssl_certificate_validation
-        purl = urlparse.urlparse(CONF.boto.s3_url)
-        port = purl.port
-        if port is None:
-            if purl.scheme is not "https":
-                port = 80
-            else:
-                port = 443
-        else:
-            port = int(port)
-        self.connection_data.update({"is_secure": purl.scheme == "https",
-                                     "validate_certs": not insecure_ssl,
-                                     "host": purl.hostname,
-                                     "port": port,
-                                     "calling_format": boto.s3.connection.
-                                     OrdinaryCallingFormat()})
-
-    ALLOWED_METHODS = set(('create_bucket', 'delete_bucket', 'generate_url',
-                           'get_all_buckets', 'get_bucket', 'delete_key',
-                           'lookup'))
diff --git a/tempest/services/compute/json/floating_ips_client.py b/tempest/services/compute/json/floating_ips_client.py
deleted file mode 100644
index b3e2f2f..0000000
--- a/tempest/services/compute/json/floating_ips_client.py
+++ /dev/null
@@ -1,103 +0,0 @@
-# Copyright 2012 OpenStack Foundation
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from oslo_serialization import jsonutils as json
-from six.moves.urllib import parse as urllib
-from tempest_lib import exceptions as lib_exc
-
-from tempest.api_schema.response.compute.v2_1 import floating_ips as schema
-from tempest.common import service_client
-
-
-class FloatingIPsClient(service_client.ServiceClient):
-
-    def list_floating_ips(self, **params):
-        """Returns a list of all floating IPs filtered by any parameters."""
-        url = 'os-floating-ips'
-        if params:
-            url += '?%s' % urllib.urlencode(params)
-
-        resp, body = self.get(url)
-        body = json.loads(body)
-        self.validate_response(schema.list_floating_ips, resp, body)
-        return service_client.ResponseBody(resp, body)
-
-    def show_floating_ip(self, floating_ip_id):
-        """Get the details of a floating IP."""
-        url = "os-floating-ips/%s" % floating_ip_id
-        resp, body = self.get(url)
-        body = json.loads(body)
-        self.validate_response(schema.create_get_floating_ip, resp, body)
-        return service_client.ResponseBody(resp, body)
-
-    def create_floating_ip(self, **kwargs):
-        """Allocate a floating IP to the project.
-
-        Available params: see http://developer.openstack.org/
-                              api-ref-compute-v2.1.html#createFloatingIP
-        """
-        url = 'os-floating-ips'
-        post_body = json.dumps(kwargs)
-        resp, body = self.post(url, post_body)
-        body = json.loads(body)
-        self.validate_response(schema.create_get_floating_ip, resp, body)
-        return service_client.ResponseBody(resp, body)
-
-    def delete_floating_ip(self, floating_ip_id):
-        """Deletes the provided floating IP from the project."""
-        url = "os-floating-ips/%s" % floating_ip_id
-        resp, body = self.delete(url)
-        self.validate_response(schema.add_remove_floating_ip, resp, body)
-        return service_client.ResponseBody(resp, body)
-
-    def associate_floating_ip_to_server(self, floating_ip, server_id):
-        """Associate the provided floating IP to a specific server."""
-        url = "servers/%s/action" % server_id
-        post_body = {
-            'addFloatingIp': {
-                'address': floating_ip,
-            }
-        }
-
-        post_body = json.dumps(post_body)
-        resp, body = self.post(url, post_body)
-        self.validate_response(schema.add_remove_floating_ip, resp, body)
-        return service_client.ResponseBody(resp, body)
-
-    def disassociate_floating_ip_from_server(self, floating_ip, server_id):
-        """Disassociate the provided floating IP from a specific server."""
-        url = "servers/%s/action" % server_id
-        post_body = {
-            'removeFloatingIp': {
-                'address': floating_ip,
-            }
-        }
-
-        post_body = json.dumps(post_body)
-        resp, body = self.post(url, post_body)
-        self.validate_response(schema.add_remove_floating_ip, resp, body)
-        return service_client.ResponseBody(resp, body)
-
-    def is_resource_deleted(self, id):
-        try:
-            self.show_floating_ip(id)
-        except lib_exc.NotFound:
-            return True
-        return False
-
-    @property
-    def resource_type(self):
-        """Returns the primary type of resource this client works with."""
-        return 'floating_ip'
diff --git a/tempest/services/compute/json/security_group_rules_client.py b/tempest/services/compute/json/security_group_rules_client.py
deleted file mode 100644
index 314b1ed..0000000
--- a/tempest/services/compute/json/security_group_rules_client.py
+++ /dev/null
@@ -1,47 +0,0 @@
-# Copyright 2012 OpenStack Foundation
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from oslo_serialization import jsonutils as json
-
-from tempest.api_schema.response.compute.v2_1 import security_groups as schema
-from tempest.common import service_client
-
-
-class SecurityGroupRulesClient(service_client.ServiceClient):
-
-    def create_security_group_rule(self, **kwargs):
-        """Creating a new security group rules.
-
-        parent_group_id :ID of Security group
-        ip_protocol : ip_proto (icmp, tcp, udp).
-        from_port: Port at start of range.
-        to_port  : Port at end of range.
-        Following optional keyword arguments are accepted:
-        cidr     : CIDR for address range.
-        group_id : ID of the Source group
-        """
-        post_body = json.dumps({'security_group_rule': kwargs})
-        url = 'os-security-group-rules'
-        resp, body = self.post(url, post_body)
-        body = json.loads(body)
-        self.validate_response(schema.create_security_group_rule, resp, body)
-        return service_client.ResponseBody(resp, body)
-
-    def delete_security_group_rule(self, group_rule_id):
-        """Deletes the provided Security Group rule."""
-        resp, body = self.delete('os-security-group-rules/%s' %
-                                 group_rule_id)
-        self.validate_response(schema.delete_security_group_rule, resp, body)
-        return service_client.ResponseBody(resp, body)
diff --git a/tempest/services/compute/json/server_groups_client.py b/tempest/services/compute/json/server_groups_client.py
deleted file mode 100644
index 44ac015..0000000
--- a/tempest/services/compute/json/server_groups_client.py
+++ /dev/null
@@ -1,56 +0,0 @@
-# Copyright 2012 OpenStack Foundation
-# Copyright 2013 Hewlett-Packard Development Company, L.P.
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from oslo_serialization import jsonutils as json
-
-from tempest.api_schema.response.compute.v2_1 import servers as schema
-from tempest.common import service_client
-
-
-class ServerGroupsClient(service_client.ServiceClient):
-
-    def create_server_group(self, **kwargs):
-        """Create the server group
-
-        name : Name of the server-group
-        policies : List of the policies - affinity/anti-affinity)
-        """
-        post_body = json.dumps({'server_group': kwargs})
-        resp, body = self.post('os-server-groups', post_body)
-
-        body = json.loads(body)
-        self.validate_response(schema.create_show_server_group, resp, body)
-        return service_client.ResponseBody(resp, body)
-
-    def delete_server_group(self, server_group_id):
-        """Delete the given server-group."""
-        resp, body = self.delete("os-server-groups/%s" % server_group_id)
-        self.validate_response(schema.delete_server_group, resp, body)
-        return service_client.ResponseBody(resp, body)
-
-    def list_server_groups(self):
-        """List the server-groups."""
-        resp, body = self.get("os-server-groups")
-        body = json.loads(body)
-        self.validate_response(schema.list_server_groups, resp, body)
-        return service_client.ResponseBody(resp, body)
-
-    def show_server_group(self, server_group_id):
-        """Get the details of given server_group."""
-        resp, body = self.get("os-server-groups/%s" % server_group_id)
-        body = json.loads(body)
-        self.validate_response(schema.create_show_server_group, resp, body)
-        return service_client.ResponseBody(resp, body)
diff --git a/tempest/services/identity/v3/json/service_client.py b/tempest/services/identity/v3/json/services_client.py
similarity index 97%
rename from tempest/services/identity/v3/json/service_client.py
rename to tempest/services/identity/v3/json/services_client.py
index 3dbfe5e..dd65f1d 100644
--- a/tempest/services/identity/v3/json/service_client.py
+++ b/tempest/services/identity/v3/json/services_client.py
@@ -22,7 +22,7 @@
 from tempest.common import service_client
 
 
-class ServiceClient(service_client.ServiceClient):
+class ServicesClient(service_client.ServiceClient):
     api_version = "v3"
 
     def update_service(self, service_id, **kwargs):
diff --git a/tempest/services/image/v2/json/images_client.py b/tempest/services/image/v2/json/images_client.py
index 44062ea..72b203a 100644
--- a/tempest/services/image/v2/json/images_client.py
+++ b/tempest/services/image/v2/json/images_client.py
@@ -55,6 +55,11 @@
         return self._http
 
     def update_image(self, image_id, patch):
+        """Update an image.
+
+        Available params: see http://developer.openstack.org/
+                              api-ref-image-v2.html#updateImage-v2
+        """
         data = json.dumps(patch)
         headers = {"Content-Type": "application/openstack-images-v2.0"
                                    "-json-patch"}
@@ -63,21 +68,13 @@
         body = json.loads(body)
         return service_client.ResponseBody(resp, body)
 
-    def create_image(self, name, container_format, disk_format, **kwargs):
-        params = {
-            "name": name,
-            "container_format": container_format,
-            "disk_format": disk_format,
-        }
+    def create_image(self, **kwargs):
+        """Create an image.
 
-        for option in kwargs:
-            value = kwargs.get(option)
-            if isinstance(value, dict) or isinstance(value, tuple):
-                params.update(value)
-            else:
-                params[option] = value
-
-        data = json.dumps(params)
+        Available params: see http://developer.openstack.org/
+                              api-ref-image-v2.html#createImage-v2
+        """
+        data = json.dumps(kwargs)
         resp, body = self.post('v2/images', data)
         self.expected_success(201, resp.status)
         body = json.loads(body)
diff --git a/tempest/services/network/json/network_client.py b/tempest/services/network/json/network_client.py
index e8ac9cb..f1e80f2 100644
--- a/tempest/services/network/json/network_client.py
+++ b/tempest/services/network/json/network_client.py
@@ -218,15 +218,18 @@
         uri = '/ports?device_id=%s' % uuid
         return self.list_resources(uri)
 
-    def update_agent(self, agent_id, agent_info):
+    def update_agent(self, agent_id, **kwargs):
         """Update agent
 
         :param agent_info: Agent update information.
         E.g {"admin_state_up": True}
         """
+        # TODO(piyush): Current api-site doesn't contain this API description.
+        # After fixing the api-site, we need to fix here also for putting the
+        # link to api-site.
+        # LP: https://bugs.launchpad.net/openstack-api-site/+bug/1526673
         uri = '/agents/%s' % agent_id
-        agent = {"agent": agent_info}
-        return self.update_resource(uri, agent)
+        return self.update_resource(uri, kwargs)
 
     def show_agent(self, agent_id, **fields):
         uri = '/agents/%s' % agent_id
@@ -248,6 +251,7 @@
         # TODO(piyush): Current api-site doesn't contain this API description.
         # After fixing the api-site, we need to fix here also for putting the
         # link to api-site.
+        # LP: https://bugs.launchpad.net/openstack-api-site/+bug/1526670
         uri = '/agents/%s/l3-routers' % agent_id
         return self.create_resource(uri, kwargs)
 
@@ -268,13 +272,14 @@
                                                network_id)
         return self.delete_resource(uri)
 
-    def update_extra_routes(self, router_id, routes):
+    def update_extra_routes(self, router_id, **kwargs):
+        """Update Extra routes.
+
+        Available params: see http://developer.openstack.org/
+                              api-ref-networking-v2-ext.html#updateExtraRoutes
+        """
         uri = '/routers/%s' % router_id
-        put_body = {
-            'router': {
-                'routes': routes
-            }
-        }
+        put_body = {'router': kwargs}
         return self.update_resource(uri, put_body)
 
     def delete_extra_routes(self, router_id):
@@ -286,10 +291,13 @@
         }
         return self.update_resource(uri, put_body)
 
-    def add_dhcp_agent_to_network(self, agent_id, network_id):
-        post_body = {'network_id': network_id}
+    def add_dhcp_agent_to_network(self, agent_id, **kwargs):
+        # TODO(piyush): Current api-site doesn't contain this API description.
+        # After fixing the api-site, we need to fix here also for putting the
+        # link to api-site.
+        # LP: https://bugs.launchpad.net/openstack-api-site/+bug/1526212
         uri = '/agents/%s/dhcp-networks' % agent_id
-        return self.create_resource(uri, post_body)
+        return self.create_resource(uri, kwargs)
 
     def list_subnetpools(self, **filters):
         uri = '/subnetpools'
diff --git a/tempest/services/volume/base/admin/base_types_client.py b/tempest/services/volume/base/admin/base_types_client.py
index de6ea8a..867273e 100644
--- a/tempest/services/volume/base/admin/base_types_client.py
+++ b/tempest/services/volume/base/admin/base_types_client.py
@@ -47,10 +47,10 @@
         """Returns the primary type of resource this client works with."""
         return 'volume-type/encryption-type'
 
-    def list_volume_types(self, params=None):
+    def list_volume_types(self, **params):
         """List all the volume_types created."""
         url = 'types'
-        if params is not None:
+        if params:
             url += '?%s' % urllib.urlencode(params)
 
         resp, body = self.get(url)
@@ -66,19 +66,13 @@
         self.expected_success(200, resp.status)
         return service_client.ResponseBody(resp, body)
 
-    def create_volume_type(self, name, **kwargs):
-        """Creates a new Volume_type.
+    def create_volume_type(self, **kwargs):
+        """Create volume type.
 
-        name(Required): Name of volume_type.
-        Following optional keyword arguments are accepted:
-        extra_specs: A dictionary of values to be used as extra_specs.
+        Available params: see http://developer.openstack.org/
+                              api-ref-blockstorage-v2.html#createVolumeType
         """
-        post_body = {
-            'name': name,
-            'extra_specs': kwargs.get('extra_specs'),
-        }
-
-        post_body = json.dumps({'volume_type': post_body})
+        post_body = json.dumps({'volume_type': kwargs})
         resp, body = self.post('types', post_body)
         body = json.loads(body)
         self.expected_success(200, resp.status)
@@ -90,10 +84,17 @@
         self.expected_success(202, resp.status)
         return service_client.ResponseBody(resp, body)
 
-    def list_volume_types_extra_specs(self, vol_type_id, params=None):
-        """List all the volume_types extra specs created."""
+    def list_volume_types_extra_specs(self, vol_type_id, **params):
+        """List all the volume_types extra specs created.
+
+        TODO: Current api-site doesn't contain this API description.
+        After fixing the api-site, we need to fix here also for putting
+        the link to api-site.
+
+
+        """
         url = 'types/%s/extra_specs' % str(vol_type_id)
-        if params is not None:
+        if params:
             url += '?%s' % urllib.urlencode(params)
 
         resp, body = self.get(url)
@@ -101,23 +102,23 @@
         self.expected_success(200, resp.status)
         return service_client.ResponseBody(resp, body)
 
-    def show_volume_type_extra_specs(self, vol_type_id, extra_spec_name):
+    def show_volume_type_extra_specs(self, vol_type_id, extra_specs_name):
         """Returns the details of a single volume_type extra spec."""
         url = "types/%s/extra_specs/%s" % (str(vol_type_id),
-                                           str(extra_spec_name))
+                                           str(extra_specs_name))
         resp, body = self.get(url)
         body = json.loads(body)
         self.expected_success(200, resp.status)
         return service_client.ResponseBody(resp, body)
 
-    def create_volume_type_extra_specs(self, vol_type_id, extra_spec):
+    def create_volume_type_extra_specs(self, vol_type_id, extra_specs):
         """Creates a new Volume_type extra spec.
 
         vol_type_id: Id of volume_type.
         extra_specs: A dictionary of values to be used as extra_specs.
         """
         url = "types/%s/extra_specs" % str(vol_type_id)
-        post_body = json.dumps({'extra_specs': extra_spec})
+        post_body = json.dumps({'extra_specs': extra_specs})
         resp, body = self.post(url, post_body)
         body = json.loads(body)
         self.expected_success(200, resp.status)
@@ -131,7 +132,7 @@
         return service_client.ResponseBody(resp, body)
 
     def update_volume_type_extra_specs(self, vol_type_id, extra_spec_name,
-                                       extra_spec):
+                                       extra_specs):
         """Update a volume_type extra spec.
 
         vol_type_id: Id of volume_type.
@@ -141,7 +142,7 @@
         """
         url = "types/%s/extra_specs/%s" % (str(vol_type_id),
                                            str(extra_spec_name))
-        put_body = json.dumps(extra_spec)
+        put_body = json.dumps(extra_specs)
         resp, body = self.put(url, put_body)
         body = json.loads(body)
         self.expected_success(200, resp.status)
@@ -159,18 +160,14 @@
         return service_client.ResponseBody(resp, body)
 
     def create_encryption_type(self, vol_type_id, **kwargs):
-        """Create a new encryption type for the specified volume type.
+        """Create encryption type.
 
-        vol_type_id: Id of volume_type.
-        provider: Class providing encryption support.
-        cipher: Encryption algorithm/mode to use.
-        key_size: Size of the encryption key, in bits.
-        control_location: Notional service where encryption is performed.
+        TODO: Current api-site doesn't contain this API description.
+        After fixing the api-site, we need to fix here also for putting
+        the link to api-site.
         """
         url = "/types/%s/encryption" % str(vol_type_id)
-        post_body = {}
-        post_body.update(kwargs)
-        post_body = json.dumps({'encryption': post_body})
+        post_body = json.dumps({'encryption': kwargs})
         resp, body = self.post(url, post_body)
         body = json.loads(body)
         self.expected_success(200, resp.status)
diff --git a/tempest/test_discover/test_discover.py b/tempest/test_discover/test_discover.py
index dac7d91..330f370 100644
--- a/tempest/test_discover/test_discover.py
+++ b/tempest/test_discover/test_discover.py
@@ -30,8 +30,7 @@
     base_path = os.path.split(os.path.dirname(os.path.abspath(__file__)))[0]
     base_path = os.path.split(base_path)[0]
     # Load local tempest tests
-    for test_dir in ['tempest/api', 'tempest/scenario',
-                     'tempest/thirdparty']:
+    for test_dir in ['tempest/api', 'tempest/scenario']:
         full_test_dir = os.path.join(base_path, test_dir)
         if not pattern:
             suite.addTests(loader.discover(full_test_dir,
diff --git a/tempest/tests/common/test_service_clients.py b/tempest/tests/common/test_service_clients.py
index c313071..430ef0d 100644
--- a/tempest/tests/common/test_service_clients.py
+++ b/tempest/tests/common/test_service_clients.py
@@ -17,9 +17,6 @@
 import six
 
 from tempest.services.baremetal.v1.json import baremetal_client
-from tempest.services.compute.json import floating_ips_client
-from tempest.services.compute.json import security_group_rules_client
-from tempest.services.compute.json import server_groups_client
 from tempest.services.compute.json import servers_client
 from tempest.services.data_processing.v1_1 import data_processing_client
 from tempest.services.database.json import flavors_client as db_flavor_client
@@ -32,7 +29,7 @@
     identity_v3_identity_client
 from tempest.services.identity.v3.json import policy_client
 from tempest.services.identity.v3.json import region_client
-from tempest.services.identity.v3.json import service_client
+from tempest.services.identity.v3.json import services_client
 from tempest.services.image.v1.json import images_client
 from tempest.services.image.v2.json import images_client as images_v2_client
 from tempest.services.messaging.json import messaging_client
@@ -87,9 +84,6 @@
     def test_service_client_creations_with_specified_args(self, mock_init):
         test_clients = [
             baremetal_client.BaremetalClient,
-            floating_ips_client.FloatingIPsClient,
-            security_group_rules_client.SecurityGroupRulesClient,
-            server_groups_client.ServerGroupsClient,
             servers_client.ServersClient,
             data_processing_client.DataProcessingClient,
             db_flavor_client.DatabaseFlavorsClient,
@@ -128,7 +122,7 @@
             identity_v3_identity_client.IdentityV3Client,
             policy_client.PolicyClient,
             region_client.RegionClient,
-            service_client.ServiceClient,
+            services_client.ServicesClient,
             images_client.ImagesClient,
             images_v2_client.ImagesClientV2
         ]
diff --git a/tempest/tests/common/utils/linux/test_remote_client.py b/tempest/tests/common/utils/linux/test_remote_client.py
index 6b02d02..9c2b99e 100644
--- a/tempest/tests/common/utils/linux/test_remote_client.py
+++ b/tempest/tests/common/utils/linux/test_remote_client.py
@@ -79,7 +79,7 @@
     def test_get_number_of_vcpus(self):
         self.ssh_mock.mock.exec_command.return_value = '16'
         self.assertEqual(self.conn.get_number_of_vcpus(), 16)
-        self._assert_exec_called_with('grep -c processor /proc/cpuinfo')
+        self._assert_exec_called_with('grep -c ^processor /proc/cpuinfo')
 
     def test_get_partitions(self):
         proc_partitions = """major minor  #blocks  name
diff --git a/tempest/tests/services/compute/test_floating_ips_client.py b/tempest/tests/services/compute/test_floating_ips_client.py
deleted file mode 100644
index ee22004..0000000
--- a/tempest/tests/services/compute/test_floating_ips_client.py
+++ /dev/null
@@ -1,113 +0,0 @@
-# Copyright 2015 IBM Corp.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from oslotest import mockpatch
-from tempest_lib import exceptions as lib_exc
-
-from tempest.services.compute.json import floating_ips_client
-from tempest.tests import fake_auth_provider
-from tempest.tests.services.compute import base
-
-
-class TestFloatingIpsClient(base.BaseComputeServiceTest):
-
-    floating_ip = {"fixed_ip": None,
-                   "id": "46d61064-13ba-4bf0-9557-69de824c3d6f",
-                   "instance_id": "a1daa443-a6bb-463e-aea2-104b7d912eb8",
-                   "ip": "10.10.10.1",
-                   "pool": "nova"}
-
-    def setUp(self):
-        super(TestFloatingIpsClient, self).setUp()
-        fake_auth = fake_auth_provider.FakeAuthProvider()
-        self.client = floating_ips_client.FloatingIPsClient(
-            fake_auth, 'compute', 'regionOne')
-
-    def _test_list_floating_ips(self, bytes_body=False):
-        expected = {'floating_ips': [TestFloatingIpsClient.floating_ip]}
-        self.check_service_client_function(
-            self.client.list_floating_ips,
-            'tempest.common.service_client.ServiceClient.get',
-            expected,
-            bytes_body)
-
-    def test_list_floating_ips_str_body(self):
-        self._test_list_floating_ips(bytes_body=False)
-
-    def test_list_floating_ips_byte_body(self):
-        self._test_list_floating_ips(bytes_body=True)
-
-    def _test_show_floating_ip(self, bytes_body=False):
-        expected = {"floating_ip": TestFloatingIpsClient.floating_ip}
-        self.check_service_client_function(
-            self.client.show_floating_ip,
-            'tempest.common.service_client.ServiceClient.get',
-            expected,
-            bytes_body,
-            floating_ip_id='a1daa443-a6bb-463e-aea2-104b7d912eb8')
-
-    def test_show_floating_ip_str_body(self):
-        self._test_show_floating_ip(bytes_body=False)
-
-    def test_show_floating_ip_byte_body(self):
-        self._test_show_floating_ip(bytes_body=True)
-
-    def _test_create_floating_ip(self, bytes_body=False):
-        expected = {"floating_ip": TestFloatingIpsClient.floating_ip}
-        self.check_service_client_function(
-            self.client.create_floating_ip,
-            'tempest.common.service_client.ServiceClient.post',
-            expected,
-            bytes_body,
-            pool_name='nova')
-
-    def test_create_floating_ip_str_body(self):
-        self._test_create_floating_ip(bytes_body=False)
-
-    def test_create_floating_ip_byte_body(self):
-        self._test_create_floating_ip(bytes_body=True)
-
-    def test_delete_floating_ip(self):
-        self.check_service_client_function(
-            self.client.delete_floating_ip,
-            'tempest.common.service_client.ServiceClient.delete',
-            {}, status=202, floating_ip_id='fake-id')
-
-    def test_associate_floating_ip_to_server(self):
-        self.check_service_client_function(
-            self.client.associate_floating_ip_to_server,
-            'tempest.common.service_client.ServiceClient.post',
-            {}, status=202, floating_ip='10.10.10.1',
-            server_id='c782b7a9-33cd-45f0-b795-7f87f456408b')
-
-    def test_disassociate_floating_ip_from_server(self):
-        self.check_service_client_function(
-            self.client.disassociate_floating_ip_from_server,
-            'tempest.common.service_client.ServiceClient.post',
-            {}, status=202, floating_ip='10.10.10.1',
-            server_id='c782b7a9-33cd-45f0-b795-7f87f456408b')
-
-    def test_is_resource_deleted_true(self):
-        self.useFixture(mockpatch.Patch(
-            'tempest.services.compute.json.floating_ips_client.'
-            'FloatingIPsClient.show_floating_ip',
-            side_effect=lib_exc.NotFound()))
-        self.assertTrue(self.client.is_resource_deleted('fake-id'))
-
-    def test_is_resource_deleted_false(self):
-        self.useFixture(mockpatch.Patch(
-            'tempest.services.compute.json.floating_ips_client.'
-            'FloatingIPsClient.show_floating_ip',
-            return_value={"floating_ip": TestFloatingIpsClient.floating_ip}))
-        self.assertFalse(self.client.is_resource_deleted('fake-id'))
diff --git a/tempest/tests/services/compute/test_security_group_rules_client.py b/tempest/tests/services/compute/test_security_group_rules_client.py
deleted file mode 100644
index c182742..0000000
--- a/tempest/tests/services/compute/test_security_group_rules_client.py
+++ /dev/null
@@ -1,66 +0,0 @@
-# Copyright 2015 NEC Corporation.  All rights reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from tempest.services.compute.json import security_group_rules_client
-from tempest.tests import fake_auth_provider
-from tempest.tests.services.compute import base
-
-
-class TestSecurityGroupRulesClient(base.BaseComputeServiceTest):
-
-    FAKE_SECURITY_GROUP_RULE = {
-        "security_group_rule": {
-            "id": "2d021cf1-ce4b-4292-994f-7a785d62a144",
-            "ip_range": {
-                "cidr": "0.0.0.0/0"
-            },
-            "parent_group_id": "48700ff3-30b8-4e63-845f-a79c9633e9fb",
-            "to_port": 443,
-            "ip_protocol": "tcp",
-            "group": {},
-            "from_port": 443
-        }
-    }
-
-    def setUp(self):
-        super(TestSecurityGroupRulesClient, self).setUp()
-        fake_auth = fake_auth_provider.FakeAuthProvider()
-        self.client = security_group_rules_client.SecurityGroupRulesClient(
-            fake_auth, 'compute', 'regionOne')
-
-    def _test_create_security_group_rule(self, bytes_body=False):
-        req_body = {
-            "from_port": "443",
-            "ip_protocol": "tcp",
-            "to_port": "443",
-            "cidr": "0.0.0.0/0",
-            "parent_group_id": "48700ff3-30b8-4e63-845f-a79c9633e9fb"
-        }
-        self.check_service_client_function(
-            self.client.create_security_group_rule,
-            'tempest.common.service_client.ServiceClient.post',
-            self.FAKE_SECURITY_GROUP_RULE,
-            to_utf=bytes_body, **req_body)
-
-    def test_create_security_group_rule_with_str_body(self):
-        self._test_create_security_group_rule()
-
-    def test_create_security_group_rule_with_bytes_body(self):
-        self._test_create_security_group_rule(bytes_body=True)
-
-    def test_delete_security_group_rule(self):
-        self.check_service_client_function(
-            self.client.delete_security_group_rule,
-            'tempest.common.service_client.ServiceClient.delete',
-            {}, status=202, group_rule_id='group-id')
diff --git a/tempest/tests/services/compute/test_server_groups_client.py b/tempest/tests/services/compute/test_server_groups_client.py
deleted file mode 100644
index e531e2f..0000000
--- a/tempest/tests/services/compute/test_server_groups_client.py
+++ /dev/null
@@ -1,84 +0,0 @@
-# Copyright 2015 IBM Corp.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import httplib2
-
-from oslotest import mockpatch
-from tempest_lib.tests import fake_auth_provider
-
-from tempest.services.compute.json import server_groups_client
-from tempest.tests.services.compute import base
-
-
-class TestServerGroupsClient(base.BaseComputeServiceTest):
-
-    server_group = {
-        "id": "5bbcc3c4-1da2-4437-a48a-66f15b1b13f9",
-        "name": "test",
-        "policies": ["anti-affinity"],
-        "members": [],
-        "metadata": {}}
-
-    def setUp(self):
-        super(TestServerGroupsClient, self).setUp()
-        fake_auth = fake_auth_provider.FakeAuthProvider()
-        self.client = server_groups_client.ServerGroupsClient(
-            fake_auth, 'compute', 'regionOne')
-
-    def _test_create_server_group(self, bytes_body=False):
-        expected = {"server_group": TestServerGroupsClient.server_group}
-        self.check_service_client_function(
-            self.client.create_server_group,
-            'tempest.common.service_client.ServiceClient.post', expected,
-            bytes_body, name='fake-group', policies=['affinity'])
-
-    def test_create_server_group_str_body(self):
-        self._test_create_server_group(bytes_body=False)
-
-    def test_create_server_group_byte_body(self):
-        self._test_create_server_group(bytes_body=True)
-
-    def test_delete_server_group(self):
-        response = (httplib2.Response({'status': 204}), None)
-        self.useFixture(mockpatch.Patch(
-            'tempest.common.service_client.ServiceClient.delete',
-            return_value=response))
-        self.client.delete_server_group('fake-group')
-
-    def _test_list_server_groups(self, bytes_body=False):
-        expected = {"server_groups": [TestServerGroupsClient.server_group]}
-        self.check_service_client_function(
-            self.client.list_server_groups,
-            'tempest.common.service_client.ServiceClient.get',
-            expected, bytes_body)
-
-    def test_list_server_groups_str_body(self):
-        self._test_list_server_groups(bytes_body=False)
-
-    def test_list_server_groups_byte_body(self):
-        self._test_list_server_groups(bytes_body=True)
-
-    def _test_show_server_group(self, bytes_body=False):
-        expected = {"server_group": TestServerGroupsClient.server_group}
-        self.check_service_client_function(
-            self.client.show_server_group,
-            'tempest.common.service_client.ServiceClient.get',
-            expected, bytes_body,
-            server_group_id='5bbcc3c4-1da2-4437-a48a-66f15b1b13f9')
-
-    def test_show_server_group_str_body(self):
-        self._test_show_server_group(bytes_body=False)
-
-    def test_show_server_group_byte_body(self):
-        self._test_show_server_group(bytes_body=True)
diff --git a/tempest/thirdparty/README.rst b/tempest/thirdparty/README.rst
deleted file mode 100644
index b0bfdf7..0000000
--- a/tempest/thirdparty/README.rst
+++ /dev/null
@@ -1,35 +0,0 @@
-.. _third_party_field_guide:
-
-Tempest Field Guide to Third Party API tests
-============================================
-
-
-What are these tests?
----------------------
-
-Third party tests are tests for non native OpenStack APIs that are
-part of OpenStack projects. If we ship an API, we're really required
-to ensure that it's working.
-
-An example is that Nova Compute currently has EC2 API support in tree,
-which should be tested as part of normal process.
-
-
-Why are these tests in tempest?
--------------------------------
-
-If we ship an API in an OpenStack component, there should be tests in
-tempest to exercise it in some way.
-
-
-Scope of these tests
---------------------
-
-Third party API testing should be limited to the functional testing of
-third party API compliance. Complex scenarios should be avoided, and
-instead exercised with the OpenStack API, unless the third party API
-can't be tested without those scenarios.
-
-Whenever possible third party API testing should use a client as close
-to the third party API as possible. The point of these tests is API
-validation.
diff --git a/tempest/thirdparty/__init__.py b/tempest/thirdparty/__init__.py
deleted file mode 100644
index e69de29..0000000
--- a/tempest/thirdparty/__init__.py
+++ /dev/null
diff --git a/tempest/thirdparty/boto/__init__.py b/tempest/thirdparty/boto/__init__.py
deleted file mode 100644
index e69de29..0000000
--- a/tempest/thirdparty/boto/__init__.py
+++ /dev/null
diff --git a/tempest/thirdparty/boto/test.py b/tempest/thirdparty/boto/test.py
deleted file mode 100644
index cfd3747..0000000
--- a/tempest/thirdparty/boto/test.py
+++ /dev/null
@@ -1,694 +0,0 @@
-# Copyright 2012 OpenStack Foundation
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import contextlib
-import logging as orig_logging
-import os
-import re
-
-import boto
-from boto import ec2
-from boto import exception
-from boto import s3
-from oslo_log import log as logging
-import six
-from six.moves.urllib import parse as urlparse
-from tempest_lib import exceptions as lib_exc
-
-from tempest.common import credentials_factory as credentials
-from tempest.common.utils import file_utils
-from tempest import config
-from tempest import exceptions
-import tempest.test
-from tempest.thirdparty.boto.utils import wait
-
-CONF = config.CONF
-LOG = logging.getLogger(__name__)
-
-
-def decision_maker():
-    A_I_IMAGES_READY = True  # ari,ami,aki
-    S3_CAN_CONNECT_ERROR = None
-    EC2_CAN_CONNECT_ERROR = None
-    secret_matcher = re.compile("[A-Za-z0-9+/]{32,}")  # 40 in other system
-    id_matcher = re.compile("[A-Za-z0-9]{20,}")
-
-    def all_read(*args):
-        return all(map(file_utils.have_effective_read_access, args))
-
-    materials_path = CONF.boto.s3_materials_path
-    ami_path = materials_path + os.sep + CONF.boto.ami_manifest
-    aki_path = materials_path + os.sep + CONF.boto.aki_manifest
-    ari_path = materials_path + os.sep + CONF.boto.ari_manifest
-
-    A_I_IMAGES_READY = all_read(ami_path, aki_path, ari_path)
-    boto_logger = logging.getLogger('boto')
-    level = boto_logger.logger.level
-    # suppress logging for boto
-    boto_logger.logger.setLevel(orig_logging.CRITICAL)
-
-    def _cred_sub_check(connection_data):
-        if not id_matcher.match(connection_data["aws_access_key_id"]):
-            raise Exception("Invalid AWS access Key")
-        if not secret_matcher.match(connection_data["aws_secret_access_key"]):
-            raise Exception("Invalid AWS secret Key")
-        raise Exception("Unknown (Authentication?) Error")
-    # NOTE(andreaf) Setting up an extra manager here is redundant,
-    # and should be removed.
-    openstack = credentials.ConfiguredUserManager()
-    try:
-        if urlparse.urlparse(CONF.boto.ec2_url).hostname is None:
-            raise Exception("Failed to get hostname from the ec2_url")
-        ec2client = openstack.ec2api_client
-        try:
-            ec2client.get_all_regions()
-        except exception.BotoServerError as exc:
-                if exc.error_code is None:
-                    raise Exception("EC2 target does not looks EC2 service")
-                _cred_sub_check(ec2client.connection_data)
-
-    except lib_exc.Unauthorized:
-        EC2_CAN_CONNECT_ERROR = "AWS credentials not set," +\
-                                " also failed to get it from keystone"
-    except Exception as exc:
-        EC2_CAN_CONNECT_ERROR = str(exc)
-
-    try:
-        if urlparse.urlparse(CONF.boto.s3_url).hostname is None:
-            raise Exception("Failed to get hostname from the s3_url")
-        s3client = openstack.s3_client
-        try:
-            s3client.get_bucket("^INVALID*#()@INVALID.")
-        except exception.BotoServerError as exc:
-            if exc.status == 403:
-                _cred_sub_check(s3client.connection_data)
-    except Exception as exc:
-        S3_CAN_CONNECT_ERROR = str(exc)
-    except lib_exc.Unauthorized:
-        S3_CAN_CONNECT_ERROR = "AWS credentials not set," +\
-                               " failed to get them even by keystoneclient"
-    boto_logger.logger.setLevel(level)
-    return {'A_I_IMAGES_READY': A_I_IMAGES_READY,
-            'S3_CAN_CONNECT_ERROR': S3_CAN_CONNECT_ERROR,
-            'EC2_CAN_CONNECT_ERROR': EC2_CAN_CONNECT_ERROR}
-
-
-class BotoExceptionMatcher(object):
-    STATUS_RE = r'[45]\d\d'
-    CODE_RE = '.*'  # regexp makes sense in group match
-
-    def match(self, exc):
-        """Check boto exception
-
-        :returns: Returns with an error string if it does not match,
-                  returns with None when it matches.
-        """
-        if not isinstance(exc, exception.BotoServerError):
-            return "%r not an BotoServerError instance" % exc
-        LOG.info("Status: %s , error_code: %s", exc.status, exc.error_code)
-        if re.match(self.STATUS_RE, str(exc.status)) is None:
-            return ("Status code (%s) does not match"
-                    "the expected re pattern \"%s\""
-                    % (exc.status, self.STATUS_RE))
-        if re.match(self.CODE_RE, str(exc.error_code)) is None:
-            return ("Error code (%s) does not match" +
-                    "the expected re pattern \"%s\"") %\
-                   (exc.error_code, self.CODE_RE)
-        return None
-
-
-class ClientError(BotoExceptionMatcher):
-    STATUS_RE = r'4\d\d'
-
-
-class ServerError(BotoExceptionMatcher):
-    STATUS_RE = r'5\d\d'
-
-
-def _add_matcher_class(error_cls, error_data, base=BotoExceptionMatcher):
-    """Usable for adding an ExceptionMatcher(s) into the exception tree.
-
-       The not leaf elements does wildcard match
-    """
-    # in error_code just literal and '.' characters expected
-    if not isinstance(error_data, six.string_types):
-        (error_code, status_code) = map(str, error_data)
-    else:
-        status_code = None
-        error_code = error_data
-    parts = error_code.split('.')
-    basematch = ""
-    num_parts = len(parts)
-    max_index = num_parts - 1
-    add_cls = error_cls
-    for i_part in six.moves.xrange(num_parts):
-        part = parts[i_part]
-        leaf = i_part == max_index
-        if not leaf:
-            match = basematch + part + "[.].*"
-        else:
-            match = basematch + part
-
-        basematch += part + "[.]"
-        if not hasattr(add_cls, part):
-            cls_dict = {"CODE_RE": match}
-            if leaf and status_code is not None:
-                cls_dict["STATUS_RE"] = status_code
-            cls = type(part, (base, ), cls_dict)
-            setattr(add_cls, part, cls())
-            add_cls = cls
-        elif leaf:
-            raise LookupError("Tries to redefine an error code \"%s\"" % part)
-        else:
-            add_cls = getattr(add_cls, part)
-
-
-# TODO(afazekas): classmethod handling
-def friendly_function_name_simple(call_able):
-    name = ""
-    if hasattr(call_able, "im_class"):
-        name += call_able.im_class.__name__ + "."
-    name += call_able.__name__
-    return name
-
-
-def friendly_function_call_str(call_able, *args, **kwargs):
-    string = friendly_function_name_simple(call_able)
-    string += "(" + ", ".join(map(str, args))
-    if len(kwargs):
-        if len(args):
-            string += ", "
-    string += ", ".join("=".join(map(str, (key, value)))
-                        for (key, value) in kwargs.items())
-    return string + ")"
-
-
-class BotoTestCase(tempest.test.BaseTestCase):
-    """Recommended to use as base class for boto related test."""
-
-    credentials = ['primary']
-
-    @classmethod
-    def skip_checks(cls):
-        super(BotoTestCase, cls).skip_checks()
-        if not CONF.compute_feature_enabled.ec2_api:
-            raise cls.skipException("The EC2 API is not available")
-        if not CONF.identity_feature_enabled.api_v2 or \
-                not CONF.identity.auth_version == 'v2':
-            raise cls.skipException("Identity v2 is not available")
-
-    @classmethod
-    def resource_setup(cls):
-        super(BotoTestCase, cls).resource_setup()
-        cls.conclusion = decision_maker()
-        # The trash contains cleanup functions and parameters in tuples
-        # (function, *args, **kwargs)
-        cls._resource_trash_bin = {}
-        cls._sequence = -1
-        if (hasattr(cls, "EC2") and
-            cls.conclusion['EC2_CAN_CONNECT_ERROR'] is not None):
-            raise cls.skipException("EC2 " + cls.__name__ + ": " +
-                                    cls.conclusion['EC2_CAN_CONNECT_ERROR'])
-        if (hasattr(cls, "S3") and
-            cls.conclusion['S3_CAN_CONNECT_ERROR'] is not None):
-            raise cls.skipException("S3 " + cls.__name__ + ": " +
-                                    cls.conclusion['S3_CAN_CONNECT_ERROR'])
-
-    @classmethod
-    def addResourceCleanUp(cls, function, *args, **kwargs):
-        """Adds CleanUp callable, used by tearDownClass.
-
-        Recommended to a use (deep)copy on the mutable args.
-        """
-        cls._sequence = cls._sequence + 1
-        cls._resource_trash_bin[cls._sequence] = (function, args, kwargs)
-        return cls._sequence
-
-    @classmethod
-    def cancelResourceCleanUp(cls, key):
-        """Cancel Clean up request."""
-        del cls._resource_trash_bin[key]
-
-    # TODO(afazekas): Add "with" context handling
-    def assertBotoError(self, excMatcher, callableObj,
-                        *args, **kwargs):
-        """Example usage:
-
-            self.assertBotoError(self.ec2_error_code.client.
-                                 InvalidKeyPair.Duplicate,
-                                 self.client.create_keypair,
-                                 key_name)
-        """
-        try:
-            callableObj(*args, **kwargs)
-        except exception.BotoServerError as exc:
-            error_msg = excMatcher.match(exc)
-            if error_msg is not None:
-                raise self.failureException(error_msg)
-        else:
-            raise self.failureException("BotoServerError not raised")
-
-    @classmethod
-    def resource_cleanup(cls):
-        """Calls the callables added by addResourceCleanUp
-
-        when you overwrite this function don't forget to call this too.
-        """
-        fail_count = 0
-        trash_keys = sorted(cls._resource_trash_bin, reverse=True)
-        for key in trash_keys:
-            (function, pos_args, kw_args) = cls._resource_trash_bin[key]
-            try:
-                func_name = friendly_function_call_str(function, *pos_args,
-                                                       **kw_args)
-                LOG.debug("Cleaning up: %s" % func_name)
-                function(*pos_args, **kw_args)
-            except BaseException:
-                fail_count += 1
-                LOG.exception("Cleanup failed %s" % func_name)
-            finally:
-                del cls._resource_trash_bin[key]
-        super(BotoTestCase, cls).resource_cleanup()
-        # NOTE(afazekas): let the super called even on exceptions
-        # The real exceptions already logged, if the super throws another,
-        # does not causes hidden issues
-        if fail_count:
-            raise exceptions.TearDownException(num=fail_count)
-
-    ec2_error_code = BotoExceptionMatcher()
-    # InsufficientInstanceCapacity can be both server and client error
-    ec2_error_code.server = ServerError()
-    ec2_error_code.client = ClientError()
-    s3_error_code = BotoExceptionMatcher()
-    s3_error_code.server = ServerError()
-    s3_error_code.client = ClientError()
-    valid_image_state = set(('available', 'pending', 'failed'))
-    # NOTE(afazekas): 'paused' is not valid status in EC2, but it does not have
-    # a good mapping, because it uses memory, but not really a running machine
-    valid_instance_state = set(('pending', 'running', 'shutting-down',
-                                'terminated', 'stopping', 'stopped', 'paused'))
-    valid_volume_status = set(('creating', 'available', 'in-use',
-                               'deleting', 'deleted', 'error'))
-    valid_snapshot_status = set(('pending', 'completed', 'error'))
-
-    gone_set = set(('_GONE',))
-
-    @classmethod
-    def get_lfunction_gone(cls, obj):
-        # NOTE: If the object is instance of a well know type returns back with
-        # with the corresponding function otherwise it assumes the obj itself
-        # is the function.
-        ec = cls.ec2_error_code
-        if isinstance(obj, ec2.instance.Instance):
-            colusure_matcher = ec.client.InvalidInstanceID.NotFound
-            status_attr = "state"
-        elif isinstance(obj, ec2.image.Image):
-            colusure_matcher = ec.client.InvalidAMIID.NotFound
-            status_attr = "state"
-        elif isinstance(obj, ec2.snapshot.Snapshot):
-            colusure_matcher = ec.client.InvalidSnapshot.NotFound
-            status_attr = "status"
-        elif isinstance(obj, ec2.volume.Volume):
-            colusure_matcher = ec.client.InvalidVolume.NotFound
-            status_attr = "status"
-        else:
-            return obj
-
-        def _status():
-            try:
-                obj.update(validate=True)
-            except ValueError:
-                return "_GONE"
-            except exception.EC2ResponseError as exc:
-                if colusure_matcher.match(exc) is None:
-                    return "_GONE"
-                else:
-                    raise
-            return getattr(obj, status_attr)
-
-        return _status
-
-    def state_wait_gone(self, lfunction, final_set, valid_set):
-        if not isinstance(final_set, set):
-            final_set = set((final_set,))
-        final_set |= self.gone_set
-        lfunction = self.get_lfunction_gone(lfunction)
-        state = wait.state_wait(lfunction, final_set, valid_set)
-        self.assertIn(state, valid_set | self.gone_set)
-        return state
-
-    def waitImageState(self, lfunction, wait_for):
-        return self.state_wait_gone(lfunction, wait_for,
-                                    self.valid_image_state)
-
-    def waitInstanceState(self, lfunction, wait_for):
-        return self.state_wait_gone(lfunction, wait_for,
-                                    self.valid_instance_state)
-
-    def waitSnapshotStatus(self, lfunction, wait_for):
-        return self.state_wait_gone(lfunction, wait_for,
-                                    self.valid_snapshot_status)
-
-    def waitVolumeStatus(self, lfunction, wait_for):
-        return self.state_wait_gone(lfunction, wait_for,
-                                    self.valid_volume_status)
-
-    def assertImageStateWait(self, lfunction, wait_for):
-        state = self.waitImageState(lfunction, wait_for)
-        self.assertIn(state, wait_for)
-
-    def assertInstanceStateWait(self, lfunction, wait_for):
-        state = self.waitInstanceState(lfunction, wait_for)
-        self.assertIn(state, wait_for)
-
-    def assertVolumeStatusWait(self, lfunction, wait_for):
-        state = self.waitVolumeStatus(lfunction, wait_for)
-        self.assertIn(state, wait_for)
-
-    def assertSnapshotStatusWait(self, lfunction, wait_for):
-        state = self.waitSnapshotStatus(lfunction, wait_for)
-        self.assertIn(state, wait_for)
-
-    def assertAddressDisassociatedWait(self, address):
-
-        def _disassociate():
-            cli = self.ec2_client
-            addresses = cli.get_all_addresses(addresses=(address.public_ip,))
-            if len(addresses) != 1:
-                return "INVALID"
-            if addresses[0].instance_id:
-                LOG.info("%s associated to %s",
-                         address.public_ip,
-                         addresses[0].instance_id)
-                return "ASSOCIATED"
-            return "DISASSOCIATED"
-
-        state = wait.state_wait(_disassociate, "DISASSOCIATED",
-                                set(("ASSOCIATED", "DISASSOCIATED")))
-        self.assertEqual(state, "DISASSOCIATED")
-
-    def assertAddressReleasedWait(self, address):
-
-        def _address_delete():
-            # NOTE(afazekas): the filter gives back IP
-            # even if it is not associated to my tenant
-            if (address.public_ip not in map(lambda a: a.public_ip,
-                self.ec2_client.get_all_addresses())):
-                    return "DELETED"
-            return "NOTDELETED"
-
-        state = wait.state_wait(_address_delete, "DELETED")
-        self.assertEqual(state, "DELETED")
-
-    def assertReSearch(self, regexp, string):
-        if re.search(regexp, string) is None:
-            raise self.failureException("regexp: '%s' not found in '%s'" %
-                                        (regexp, string))
-
-    def assertNotReSearch(self, regexp, string):
-        if re.search(regexp, string) is not None:
-            raise self.failureException("regexp: '%s' found in '%s'" %
-                                        (regexp, string))
-
-    def assertReMatch(self, regexp, string):
-        if re.match(regexp, string) is None:
-            raise self.failureException("regexp: '%s' not matches on '%s'" %
-                                        (regexp, string))
-
-    def assertNotReMatch(self, regexp, string):
-        if re.match(regexp, string) is not None:
-            raise self.failureException("regexp: '%s' matches on '%s'" %
-                                        (regexp, string))
-
-    @classmethod
-    def destroy_bucket(cls, connection_data, bucket):
-        """Destroys the bucket and its content, just for teardown."""
-        exc_num = 0
-        try:
-            with contextlib.closing(
-                    boto.connect_s3(**connection_data)) as conn:
-                if isinstance(bucket, basestring):
-                    bucket = conn.lookup(bucket)
-                    assert isinstance(bucket, s3.bucket.Bucket)
-                for obj in bucket.list():
-                    try:
-                        bucket.delete_key(obj.key)
-                        obj.close()
-                    except BaseException:
-                        LOG.exception("Failed to delete key %s " % obj.key)
-                        exc_num += 1
-            conn.delete_bucket(bucket)
-        except BaseException:
-            LOG.exception("Failed to destroy bucket %s " % bucket)
-            exc_num += 1
-        if exc_num:
-            raise exceptions.TearDownException(num=exc_num)
-
-    @classmethod
-    def destroy_reservation(cls, reservation):
-        """Terminate instances in a reservation, just for teardown."""
-        exc_num = 0
-
-        def _instance_state():
-            try:
-                instance.update(validate=True)
-            except ValueError:
-                return "_GONE"
-            except exception.EC2ResponseError as exc:
-                if cls.ec2_error_code.\
-                        client.InvalidInstanceID.NotFound.match(exc) is None:
-                    return "_GONE"
-                # NOTE(afazekas): incorrect code,
-                # but the resource must be destroyed
-                if exc.error_code == "InstanceNotFound":
-                    return "_GONE"
-
-            return instance.state
-
-        for instance in reservation.instances:
-            try:
-                instance.terminate()
-                wait.re_search_wait(_instance_state, "_GONE")
-            except BaseException:
-                LOG.exception("Failed to terminate instance %s " % instance)
-                exc_num += 1
-        if exc_num:
-            raise exceptions.TearDownException(num=exc_num)
-
-    # NOTE(afazekas): The incorrect ErrorCodes makes very, very difficult
-    # to write better teardown
-
-    @classmethod
-    def destroy_security_group_wait(cls, group):
-        """Delete group.
-
-           Use just for teardown!
-        """
-        # NOTE(afazekas): should wait/try until all related instance terminates
-        group.delete()
-
-    @classmethod
-    def destroy_volume_wait(cls, volume):
-        """Delete volume, tries to detach first.
-
-           Use just for teardown!
-        """
-        exc_num = 0
-        snaps = volume.snapshots()
-        if len(snaps):
-            LOG.critical("%s Volume has %s snapshot(s)", volume.id,
-                         map(snaps.id, snaps))
-
-        # NOTE(afazekas): detaching/attaching not valid EC2 status
-        def _volume_state():
-            volume.update(validate=True)
-            try:
-                # NOTE(gmann): Make sure volume is attached.
-                # Checking status as 'not "available"' is not enough to make
-                # sure volume is attached as it can be in "error" state
-                if volume.status == "in-use":
-                    volume.detach(force=True)
-            except BaseException:
-                LOG.exception("Failed to detach volume %s" % volume)
-                # exc_num += 1 "nonlocal" not in python2
-            return volume.status
-
-        try:
-            wait.re_search_wait(_volume_state, "available")
-            # not validates status
-            LOG.info(_volume_state())
-            volume.delete()
-        except BaseException:
-            LOG.exception("Failed to delete volume %s" % volume)
-            exc_num += 1
-        if exc_num:
-            raise exceptions.TearDownException(num=exc_num)
-
-    @classmethod
-    def destroy_snapshot_wait(cls, snapshot):
-        """delete snapshot, wait until it ceases to exist."""
-        snapshot.delete()
-
-        def _update():
-            snapshot.update(validate=True)
-
-        wait.wait_exception(_update)
-
-
-# you can specify tuples if you want to specify the status pattern
-for code in ('AddressLimitExceeded', 'AttachmentLimitExceeded', 'AuthFailure',
-             'Blocked', 'CustomerGatewayLimitExceeded', 'DependencyViolation',
-             'DiskImageSizeTooLarge', 'FilterLimitExceeded',
-             'Gateway.NotAttached', 'IdempotentParameterMismatch',
-             'IncorrectInstanceState', 'IncorrectState',
-             'InstanceLimitExceeded', 'InsufficientInstanceCapacity',
-             'InsufficientReservedInstancesCapacity',
-             'InternetGatewayLimitExceeded', 'InvalidAMIAttributeItemValue',
-             'InvalidAMIID.Malformed', 'InvalidAMIID.NotFound',
-             'InvalidAMIID.Unavailable', 'InvalidAssociationID.NotFound',
-             'InvalidAttachment.NotFound', 'InvalidConversionTaskId',
-             'InvalidCustomerGateway.DuplicateIpAddress',
-             'InvalidCustomerGatewayID.NotFound', 'InvalidDevice.InUse',
-             'InvalidDhcpOptionsID.NotFound', 'InvalidFormat',
-             'InvalidFilter', 'InvalidGatewayID.NotFound',
-             'InvalidGroup.Duplicate', 'InvalidGroupId.Malformed',
-             'InvalidGroup.InUse', 'InvalidGroup.NotFound',
-             'InvalidGroup.Reserved', 'InvalidInstanceID.Malformed',
-             'InvalidInstanceID.NotFound',
-             'InvalidInternetGatewayID.NotFound', 'InvalidIPAddress.InUse',
-             'InvalidKeyPair.Duplicate', 'InvalidKeyPair.Format',
-             'InvalidKeyPair.NotFound', 'InvalidManifest',
-             'InvalidNetworkAclEntry.NotFound',
-             'InvalidNetworkAclID.NotFound', 'InvalidParameterCombination',
-             'InvalidParameterValue', 'InvalidPermission.Duplicate',
-             'InvalidPermission.Malformed', 'InvalidReservationID.Malformed',
-             'InvalidReservationID.NotFound', 'InvalidRoute.NotFound',
-             'InvalidRouteTableID.NotFound',
-             'InvalidSecurity.RequestHasExpired',
-             'InvalidSnapshotID.Malformed', 'InvalidSnapshot.NotFound',
-             'InvalidUserID.Malformed', 'InvalidReservedInstancesId',
-             'InvalidReservedInstancesOfferingId',
-             'InvalidSubnetID.NotFound', 'InvalidVolumeID.Duplicate',
-             'InvalidVolumeID.Malformed', 'InvalidVolumeID.ZoneMismatch',
-             'InvalidVolume.NotFound', 'InvalidVpcID.NotFound',
-             'InvalidVpnConnectionID.NotFound',
-             'InvalidVpnGatewayID.NotFound',
-             'InvalidZone.NotFound', 'LegacySecurityGroup',
-             'MissingParameter', 'NetworkAclEntryAlreadyExists',
-             'NetworkAclEntryLimitExceeded', 'NetworkAclLimitExceeded',
-             'NonEBSInstance', 'PendingSnapshotLimitExceeded',
-             'PendingVerification', 'OptInRequired', 'RequestLimitExceeded',
-             'ReservedInstancesLimitExceeded', 'Resource.AlreadyAssociated',
-             'ResourceLimitExceeded', 'RouteAlreadyExists',
-             'RouteLimitExceeded', 'RouteTableLimitExceeded',
-             'RulesPerSecurityGroupLimitExceeded',
-             'SecurityGroupLimitExceeded',
-             'SecurityGroupsPerInstanceLimitExceeded',
-             'SnapshotLimitExceeded', 'SubnetLimitExceeded',
-             'UnknownParameter', 'UnsupportedOperation',
-             'VolumeLimitExceeded', 'VpcLimitExceeded',
-             'VpnConnectionLimitExceeded',
-             'VpnGatewayAttachmentLimitExceeded', 'VpnGatewayLimitExceeded'):
-    _add_matcher_class(BotoTestCase.ec2_error_code.client,
-                       code, base=ClientError)
-
-for code in ('InsufficientAddressCapacity', 'InsufficientInstanceCapacity',
-             'InsufficientReservedInstanceCapacity', 'InternalError',
-             'Unavailable'):
-    _add_matcher_class(BotoTestCase.ec2_error_code.server,
-                       code, base=ServerError)
-
-
-for code in (('AccessDenied', 403),
-             ('AccountProblem', 403),
-             ('AmbiguousGrantByEmailAddress', 400),
-             ('BadDigest', 400),
-             ('BucketAlreadyExists', 409),
-             ('BucketAlreadyOwnedByYou', 409),
-             ('BucketNotEmpty', 409),
-             ('CredentialsNotSupported', 400),
-             ('CrossLocationLoggingProhibited', 403),
-             ('EntityTooSmall', 400),
-             ('EntityTooLarge', 400),
-             ('ExpiredToken', 400),
-             ('IllegalVersioningConfigurationException', 400),
-             ('IncompleteBody', 400),
-             ('IncorrectNumberOfFilesInPostRequest', 400),
-             ('InlineDataTooLarge', 400),
-             ('InvalidAccessKeyId', 403),
-             'InvalidAddressingHeader',
-             ('InvalidArgument', 400),
-             ('InvalidBucketName', 400),
-             ('InvalidBucketState', 409),
-             ('InvalidDigest', 400),
-             ('InvalidLocationConstraint', 400),
-             ('InvalidPart', 400),
-             ('InvalidPartOrder', 400),
-             ('InvalidPayer', 403),
-             ('InvalidPolicyDocument', 400),
-             ('InvalidRange', 416),
-             ('InvalidRequest', 400),
-             ('InvalidSecurity', 403),
-             ('InvalidSOAPRequest', 400),
-             ('InvalidStorageClass', 400),
-             ('InvalidTargetBucketForLogging', 400),
-             ('InvalidToken', 400),
-             ('InvalidURI', 400),
-             ('KeyTooLong', 400),
-             ('MalformedACLError', 400),
-             ('MalformedPOSTRequest', 400),
-             ('MalformedXML', 400),
-             ('MaxMessageLengthExceeded', 400),
-             ('MaxPostPreDataLengthExceededError', 400),
-             ('MetadataTooLarge', 400),
-             ('MethodNotAllowed', 405),
-             ('MissingAttachment'),
-             ('MissingContentLength', 411),
-             ('MissingRequestBodyError', 400),
-             ('MissingSecurityElement', 400),
-             ('MissingSecurityHeader', 400),
-             ('NoLoggingStatusForKey', 400),
-             ('NoSuchBucket', 404),
-             ('NoSuchKey', 404),
-             ('NoSuchLifecycleConfiguration', 404),
-             ('NoSuchUpload', 404),
-             ('NoSuchVersion', 404),
-             ('NotSignedUp', 403),
-             ('NotSuchBucketPolicy', 404),
-             ('OperationAborted', 409),
-             ('PermanentRedirect', 301),
-             ('PreconditionFailed', 412),
-             ('Redirect', 307),
-             ('RequestIsNotMultiPartContent', 400),
-             ('RequestTimeout', 400),
-             ('RequestTimeTooSkewed', 403),
-             ('RequestTorrentOfBucketError', 400),
-             ('SignatureDoesNotMatch', 403),
-             ('TemporaryRedirect', 307),
-             ('TokenRefreshRequired', 400),
-             ('TooManyBuckets', 400),
-             ('UnexpectedContent', 400),
-             ('UnresolvableGrantByEmailAddress', 400),
-             ('UserKeyMustBeSpecified', 400)):
-    _add_matcher_class(BotoTestCase.s3_error_code.client,
-                       code, base=ClientError)
-
-
-for code in (('InternalError', 500),
-             ('NotImplemented', 501),
-             ('ServiceUnavailable', 503),
-             ('SlowDown', 503)):
-    _add_matcher_class(BotoTestCase.s3_error_code.server,
-                       code, base=ServerError)
diff --git a/tempest/thirdparty/boto/test_ec2_instance_run.py b/tempest/thirdparty/boto/test_ec2_instance_run.py
deleted file mode 100644
index 8fe9406..0000000
--- a/tempest/thirdparty/boto/test_ec2_instance_run.py
+++ /dev/null
@@ -1,363 +0,0 @@
-# Copyright 2012 OpenStack Foundation
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from oslo_log import log as logging
-
-from tempest.common.utils import data_utils
-from tempest.common.utils.linux import remote_client
-from tempest import config
-from tempest import exceptions
-from tempest import test
-from tempest.thirdparty.boto import test as boto_test
-from tempest.thirdparty.boto.utils import s3
-from tempest.thirdparty.boto.utils import wait
-
-CONF = config.CONF
-
-LOG = logging.getLogger(__name__)
-
-
-class InstanceRunTest(boto_test.BotoTestCase):
-
-    @classmethod
-    def setup_clients(cls):
-        super(InstanceRunTest, cls).setup_clients()
-        cls.s3_client = cls.os.s3_client
-        cls.ec2_client = cls.os.ec2api_client
-
-    @classmethod
-    def resource_setup(cls):
-        super(InstanceRunTest, cls).resource_setup()
-        if not cls.conclusion['A_I_IMAGES_READY']:
-            raise cls.skipException("".join(("EC2 ", cls.__name__,
-                                    ": requires ami/aki/ari manifest")))
-        cls.zone = CONF.boto.aws_zone
-        cls.materials_path = CONF.boto.s3_materials_path
-        ami_manifest = CONF.boto.ami_manifest
-        aki_manifest = CONF.boto.aki_manifest
-        ari_manifest = CONF.boto.ari_manifest
-        cls.instance_type = CONF.boto.instance_type
-        cls.bucket_name = data_utils.rand_name("s3bucket")
-        cls.keypair_name = data_utils.rand_name("keypair")
-        cls.keypair = cls.ec2_client.create_key_pair(cls.keypair_name)
-        cls.addResourceCleanUp(cls.ec2_client.delete_key_pair,
-                               cls.keypair_name)
-        bucket = cls.s3_client.create_bucket(cls.bucket_name)
-        cls.addResourceCleanUp(cls.destroy_bucket,
-                               cls.s3_client.connection_data,
-                               cls.bucket_name)
-        s3.s3_upload_dir(bucket, cls.materials_path)
-        cls.images = {"ami":
-                      {"name": data_utils.rand_name("ami-name"),
-                       "location": cls.bucket_name + "/" + ami_manifest},
-                      "aki":
-                      {"name": data_utils.rand_name("aki-name"),
-                       "location": cls.bucket_name + "/" + aki_manifest},
-                      "ari":
-                      {"name": data_utils.rand_name("ari-name"),
-                       "location": cls.bucket_name + "/" + ari_manifest}}
-        for image_type in ("aki", "ari"):
-            image = cls.images[image_type]
-            image["image_id"] = cls.ec2_client.register_image(
-                name=image["name"],
-                image_location=image["location"])
-            cls.addResourceCleanUp(cls.ec2_client.deregister_image,
-                                   image["image_id"])
-        image = cls.images["ami"]
-        image["image_id"] = cls.ec2_client.register_image(
-            name=image["name"],
-            image_location=image["location"],
-            kernel_id=cls.images["aki"]["image_id"],
-            ramdisk_id=cls.images["ari"]["image_id"])
-        cls.addResourceCleanUp(cls.ec2_client.deregister_image,
-                               image["image_id"])
-
-        for image in cls.images.itervalues():
-            def _state():
-                retr = cls.ec2_client.get_image(image["image_id"])
-                return retr.state
-            state = wait.state_wait(_state, "available")
-            if state != "available":
-                for _image in cls.images.itervalues():
-                    cls.ec2_client.deregister_image(_image["image_id"])
-                raise exceptions.EC2RegisterImageException(
-                    image_id=image["image_id"])
-
-    def _terminate_reservation(self, reservation, rcuk):
-        for instance in reservation.instances:
-            instance.terminate()
-        for instance in reservation.instances:
-            self.assertInstanceStateWait(instance, '_GONE')
-        self.cancelResourceCleanUp(rcuk)
-
-    @test.idempotent_id('c881fbb7-d56e-4054-9d76-1c3a60a207b0')
-    def test_run_idempotent_instances(self):
-        # EC2 run instances idempotently
-
-        def _run_instance(client_token):
-            reservation = self.ec2_client.run_instances(
-                image_id=self.images["ami"]["image_id"],
-                kernel_id=self.images["aki"]["image_id"],
-                ramdisk_id=self.images["ari"]["image_id"],
-                instance_type=self.instance_type,
-                client_token=client_token)
-            rcuk = self.addResourceCleanUp(self.destroy_reservation,
-                                           reservation)
-            return (reservation, rcuk)
-
-        reservation_1, rcuk_1 = _run_instance('token_1')
-        reservation_2, rcuk_2 = _run_instance('token_2')
-        reservation_1a, rcuk_1a = _run_instance('token_1')
-
-        self.assertIsNotNone(reservation_1)
-        self.assertIsNotNone(reservation_2)
-        self.assertIsNotNone(reservation_1a)
-
-        # same reservation for token_1
-        self.assertEqual(reservation_1.id, reservation_1a.id)
-
-        # Cancel cleanup -- since it's a duplicate, it's
-        # handled by rcuk1
-        self.cancelResourceCleanUp(rcuk_1a)
-
-        self._terminate_reservation(reservation_1, rcuk_1)
-        self._terminate_reservation(reservation_2, rcuk_2)
-
-    @test.idempotent_id('2ea26a39-f96c-48fc-8374-5c10ec184c67')
-    def test_run_stop_terminate_instance(self):
-        # EC2 run, stop and terminate instance
-        image_ami = self.ec2_client.get_image(self.images["ami"]
-                                              ["image_id"])
-        reservation = image_ami.run(kernel_id=self.images["aki"]["image_id"],
-                                    ramdisk_id=self.images["ari"]["image_id"],
-                                    instance_type=self.instance_type)
-        rcuk = self.addResourceCleanUp(self.destroy_reservation, reservation)
-
-        for instance in reservation.instances:
-            LOG.info("state: %s", instance.state)
-            if instance.state != "running":
-                self.assertInstanceStateWait(instance, "running")
-
-        for instance in reservation.instances:
-            instance.stop()
-            LOG.info("state: %s", instance.state)
-            if instance.state != "stopped":
-                self.assertInstanceStateWait(instance, "stopped")
-
-        self._terminate_reservation(reservation, rcuk)
-
-    @test.idempotent_id('3d77225a-5cec-4e54-a017-9ebf11a266e6')
-    def test_run_stop_terminate_instance_with_tags(self):
-        # EC2 run, stop and terminate instance with tags
-        image_ami = self.ec2_client.get_image(self.images["ami"]
-                                              ["image_id"])
-        reservation = image_ami.run(kernel_id=self.images["aki"]["image_id"],
-                                    ramdisk_id=self.images["ari"]["image_id"],
-                                    instance_type=self.instance_type)
-        rcuk = self.addResourceCleanUp(self.destroy_reservation, reservation)
-
-        for instance in reservation.instances:
-            LOG.info("state: %s", instance.state)
-            if instance.state != "running":
-                self.assertInstanceStateWait(instance, "running")
-            instance.add_tag('key1', value='value1')
-
-        tags = self.ec2_client.get_all_tags()
-        td = dict((item.name, item.value) for item in tags)
-
-        self.assertIn('key1', td)
-        self.assertEqual('value1', td['key1'])
-
-        tags = self.ec2_client.get_all_tags(filters={'key': 'key1'})
-        td = dict((item.name, item.value) for item in tags)
-        self.assertIn('key1', td)
-        self.assertEqual('value1', td['key1'])
-
-        tags = self.ec2_client.get_all_tags(filters={'value': 'value1'})
-        td = dict((item.name, item.value) for item in tags)
-        self.assertIn('key1', td)
-        self.assertEqual('value1', td['key1'])
-
-        tags = self.ec2_client.get_all_tags(filters={'key': 'value2'})
-        td = dict((item.name, item.value) for item in tags)
-        self.assertNotIn('key1', td)
-
-        for instance in reservation.instances:
-            instance.remove_tag('key1', value='value1')
-
-        tags = self.ec2_client.get_all_tags()
-
-        # NOTE: Volume-attach and detach causes metadata (tags) to be created
-        # for the volume. So exclude them while asserting.
-        self.assertNotIn('key1', tags)
-
-        for instance in reservation.instances:
-            instance.stop()
-            LOG.info("state: %s", instance.state)
-            if instance.state != "stopped":
-                self.assertInstanceStateWait(instance, "stopped")
-
-        self._terminate_reservation(reservation, rcuk)
-
-    @test.idempotent_id('252945b5-3294-4fda-ae21-928a42f63f76')
-    def test_run_terminate_instance(self):
-        # EC2 run, terminate immediately
-        image_ami = self.ec2_client.get_image(self.images["ami"]
-                                              ["image_id"])
-        reservation = image_ami.run(kernel_id=self.images["aki"]["image_id"],
-                                    ramdisk_id=self.images["ari"]["image_id"],
-                                    instance_type=self.instance_type)
-
-        for instance in reservation.instances:
-            instance.terminate()
-        self.assertInstanceStateWait(instance, '_GONE')
-
-    @test.idempotent_id('ab836c29-737b-4101-9fb9-87045eaf89e9')
-    def test_compute_with_volumes(self):
-        # EC2 1. integration test (not strict)
-        image_ami = self.ec2_client.get_image(self.images["ami"]["image_id"])
-        sec_group_name = data_utils.rand_name("securitygroup")
-        group_desc = sec_group_name + " security group description "
-        security_group = self.ec2_client.create_security_group(sec_group_name,
-                                                               group_desc)
-        self.addResourceCleanUp(self.destroy_security_group_wait,
-                                security_group)
-        self.assertTrue(
-            self.ec2_client.authorize_security_group(
-                sec_group_name,
-                ip_protocol="icmp",
-                cidr_ip="0.0.0.0/0",
-                from_port=-1,
-                to_port=-1))
-        self.assertTrue(
-            self.ec2_client.authorize_security_group(
-                sec_group_name,
-                ip_protocol="tcp",
-                cidr_ip="0.0.0.0/0",
-                from_port=22,
-                to_port=22))
-        reservation = image_ami.run(kernel_id=self.images["aki"]["image_id"],
-                                    ramdisk_id=self.images["ari"]["image_id"],
-                                    instance_type=self.instance_type,
-                                    key_name=self.keypair_name,
-                                    security_groups=(sec_group_name,))
-
-        LOG.debug("Instance booted - state: %s",
-                  reservation.instances[0].state)
-
-        self.addResourceCleanUp(self.destroy_reservation,
-                                reservation)
-        volume = self.ec2_client.create_volume(CONF.volume.volume_size,
-                                               self.zone)
-        LOG.debug("Volume created - status: %s", volume.status)
-
-        self.addResourceCleanUp(self.destroy_volume_wait, volume)
-        instance = reservation.instances[0]
-        if instance.state != "running":
-            self.assertInstanceStateWait(instance, "running")
-        LOG.debug("Instance now running - state: %s", instance.state)
-
-        address = self.ec2_client.allocate_address()
-        rcuk_a = self.addResourceCleanUp(address.delete)
-        self.assertTrue(address.associate(instance.id))
-
-        rcuk_da = self.addResourceCleanUp(address.disassociate)
-        # TODO(afazekas): ping test. dependecy/permission ?
-
-        self.assertVolumeStatusWait(volume, "available")
-        # NOTE(afazekas): it may be reports available before it is available
-
-        ssh = remote_client.RemoteClient(address.public_ip,
-                                         CONF.validation.image_ssh_user,
-                                         pkey=self.keypair.material)
-        text = data_utils.rand_name("Pattern text for console output")
-        try:
-            resp = ssh.write_to_console(text)
-        except Exception:
-            if not CONF.compute_feature_enabled.console_output:
-                LOG.debug('Console output not supported, cannot log')
-            else:
-                console_output = instance.get_console_output().output
-                LOG.debug('Console output for %s\nbody=\n%s',
-                          instance.id, console_output)
-            raise
-
-        self.assertFalse(resp)
-
-        def _output():
-            output = instance.get_console_output()
-            return output.output
-
-        wait.re_search_wait(_output, text)
-        part_lines = ssh.get_partitions().split('\n')
-        volume.attach(instance.id, "/dev/vdh")
-
-        def _volume_state():
-            """Return volume state realizing that 'in-use' is overloaded."""
-            volume.update(validate=True)
-            status = volume.status
-            attached = volume.attach_data.status
-            LOG.debug("Volume %s is in status: %s, attach_status: %s",
-                      volume.id, status, attached)
-            # Nova reports 'in-use' on 'attaching' volumes because we
-            # have a single volume status, and EC2 has 2. Ensure that
-            # if we aren't attached yet we return something other than
-            # 'in-use'
-            if status == 'in-use' and attached != 'attached':
-                return 'attaching'
-            else:
-                return status
-
-        wait.re_search_wait(_volume_state, "in-use")
-
-        # NOTE(afazekas):  Different Hypervisor backends names
-        # differently the devices,
-        # now we just test is the partition number increased/decrised
-
-        def _part_state():
-            current = ssh.get_partitions().split('\n')
-            LOG.debug("Partition map for instance: %s", current)
-            if current > part_lines:
-                return 'INCREASE'
-            if current < part_lines:
-                return 'DECREASE'
-            return 'EQUAL'
-
-        wait.state_wait(_part_state, 'INCREASE')
-        part_lines = ssh.get_partitions().split('\n')
-
-        # TODO(afazekas): Resource compare to the flavor settings
-
-        volume.detach()
-
-        self.assertVolumeStatusWait(volume, "available")
-
-        wait.state_wait(_part_state, 'DECREASE')
-
-        instance.stop()
-        address.disassociate()
-        self.assertAddressDisassociatedWait(address)
-        self.cancelResourceCleanUp(rcuk_da)
-        address.release()
-        self.assertAddressReleasedWait(address)
-        self.cancelResourceCleanUp(rcuk_a)
-
-        LOG.debug("Instance %s state: %s", instance.id, instance.state)
-        if instance.state != "stopped":
-            self.assertInstanceStateWait(instance, "stopped")
-        # TODO(afazekas): move steps from teardown to the test case
-
-
-# TODO(afazekas): Snapshot/volume read/write test case
diff --git a/tempest/thirdparty/boto/test_ec2_keys.py b/tempest/thirdparty/boto/test_ec2_keys.py
deleted file mode 100644
index 1b58cb4..0000000
--- a/tempest/thirdparty/boto/test_ec2_keys.py
+++ /dev/null
@@ -1,75 +0,0 @@
-# Copyright 2012 OpenStack Foundation
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from tempest.common.utils import data_utils
-from tempest import test
-from tempest.thirdparty.boto import test as boto_test
-
-
-def compare_key_pairs(a, b):
-    return (a.name == b.name and
-            a.fingerprint == b.fingerprint)
-
-
-class EC2KeysTest(boto_test.BotoTestCase):
-
-    @classmethod
-    def setup_clients(cls):
-        super(EC2KeysTest, cls).setup_clients()
-        cls.client = cls.os.ec2api_client
-
-    @classmethod
-    def resource_setup(cls):
-        super(EC2KeysTest, cls).resource_setup()
-        cls.ec = cls.ec2_error_code
-
-# TODO(afazekas): merge create, delete, get test cases
-    @test.idempotent_id('54236804-01b7-4cfe-a6f9-bce1340feec8')
-    def test_create_ec2_keypair(self):
-        # EC2 create KeyPair
-        key_name = data_utils.rand_name("keypair")
-        self.addResourceCleanUp(self.client.delete_key_pair, key_name)
-        keypair = self.client.create_key_pair(key_name)
-        self.assertTrue(compare_key_pairs(keypair,
-                        self.client.get_key_pair(key_name)))
-
-    @test.idempotent_id('3283b898-f90c-4952-b238-3e42b8c3f34f')
-    def test_delete_ec2_keypair(self):
-        # EC2 delete KeyPair
-        key_name = data_utils.rand_name("keypair")
-        self.client.create_key_pair(key_name)
-        self.client.delete_key_pair(key_name)
-        self.assertIsNone(self.client.get_key_pair(key_name))
-
-    @test.idempotent_id('fd89bd26-4d4d-4cf3-a303-65dd9158fcdc')
-    def test_get_ec2_keypair(self):
-        # EC2 get KeyPair
-        key_name = data_utils.rand_name("keypair")
-        self.addResourceCleanUp(self.client.delete_key_pair, key_name)
-        keypair = self.client.create_key_pair(key_name)
-        self.assertTrue(compare_key_pairs(keypair,
-                        self.client.get_key_pair(key_name)))
-
-    @test.idempotent_id('daa73da1-e11c-4558-8d76-a716be79a401')
-    def test_duplicate_ec2_keypair(self):
-        # EC2 duplicate KeyPair
-        key_name = data_utils.rand_name("keypair")
-        self.addResourceCleanUp(self.client.delete_key_pair, key_name)
-        keypair = self.client.create_key_pair(key_name)
-        self.assertBotoError(self.ec.client.InvalidKeyPair.Duplicate,
-                             self.client.create_key_pair,
-                             key_name)
-        self.assertTrue(compare_key_pairs(keypair,
-                        self.client.get_key_pair(key_name)))
diff --git a/tempest/thirdparty/boto/test_ec2_security_groups.py b/tempest/thirdparty/boto/test_ec2_security_groups.py
deleted file mode 100644
index 594dc8b..0000000
--- a/tempest/thirdparty/boto/test_ec2_security_groups.py
+++ /dev/null
@@ -1,72 +0,0 @@
-# Copyright 2012 OpenStack Foundation
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from tempest.common.utils import data_utils
-from tempest import test
-from tempest.thirdparty.boto import test as boto_test
-
-
-class EC2SecurityGroupTest(boto_test.BotoTestCase):
-
-    @classmethod
-    def setup_clients(cls):
-        super(EC2SecurityGroupTest, cls).setup_clients()
-        cls.client = cls.os.ec2api_client
-
-    @test.idempotent_id('519b566e-0c38-4629-905e-7d6b6355f524')
-    def test_create_authorize_security_group(self):
-        # EC2 Create, authorize/revoke security group
-        group_name = data_utils.rand_name("securty_group")
-        group_description = group_name + " security group description "
-        group = self.client.create_security_group(group_name,
-                                                  group_description)
-        self.addResourceCleanUp(self.client.delete_security_group, group_name)
-        groups_get = self.client.get_all_security_groups(
-            groupnames=(group_name,))
-        self.assertEqual(len(groups_get), 1)
-        group_get = groups_get[0]
-        self.assertEqual(group.name, group_get.name)
-        self.assertEqual(group.name, group_get.name)
-        # ping (icmp_echo) and other icmp allowed from everywhere
-        # from_port and to_port act as icmp type
-        success = self.client.authorize_security_group(group_name,
-                                                       ip_protocol="icmp",
-                                                       cidr_ip="0.0.0.0/0",
-                                                       from_port=-1,
-                                                       to_port=-1)
-        self.assertTrue(success)
-        # allow standard ssh port from anywhere
-        success = self.client.authorize_security_group(group_name,
-                                                       ip_protocol="tcp",
-                                                       cidr_ip="0.0.0.0/0",
-                                                       from_port=22,
-                                                       to_port=22)
-        self.assertTrue(success)
-        # TODO(afazekas): Duplicate tests
-        group_get = self.client.get_all_security_groups(
-            groupnames=(group_name,))[0]
-        # remove listed rules
-        for ip_permission in group_get.rules:
-            for cidr in ip_permission.grants:
-                self.assertTrue(self.client.revoke_security_group(group_name,
-                                ip_protocol=ip_permission.ip_protocol,
-                                cidr_ip=cidr,
-                                from_port=ip_permission.from_port,
-                                to_port=ip_permission.to_port))
-
-        group_get = self.client.get_all_security_groups(
-            groupnames=(group_name,))[0]
-        # all rules should be removed now
-        self.assertEqual(0, len(group_get.rules))
diff --git a/tempest/thirdparty/boto/test_ec2_volumes.py b/tempest/thirdparty/boto/test_ec2_volumes.py
deleted file mode 100644
index 483d4c3..0000000
--- a/tempest/thirdparty/boto/test_ec2_volumes.py
+++ /dev/null
@@ -1,78 +0,0 @@
-# Copyright 2012 OpenStack Foundation
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from oslo_log import log as logging
-
-from tempest import config
-from tempest import test
-from tempest.thirdparty.boto import test as boto_test
-
-CONF = config.CONF
-LOG = logging.getLogger(__name__)
-
-
-def compare_volumes(a, b):
-    return (a.id == b.id and
-            a.size == b.size)
-
-
-class EC2VolumesTest(boto_test.BotoTestCase):
-
-    @classmethod
-    def skip_checks(cls):
-        super(EC2VolumesTest, cls).skip_checks()
-        if not CONF.service_available.cinder:
-            skip_msg = ("%s skipped as Cinder is not available" % cls.__name__)
-            raise cls.skipException(skip_msg)
-
-    @classmethod
-    def setup_clients(cls):
-        super(EC2VolumesTest, cls).setup_clients()
-        cls.client = cls.os.ec2api_client
-
-    @classmethod
-    def resource_setup(cls):
-        super(EC2VolumesTest, cls).resource_setup()
-        cls.zone = CONF.boto.aws_zone
-
-    @test.idempotent_id('663f0077-c743-48ad-8ae0-46821cbc0918')
-    def test_create_get_delete(self):
-        # EC2 Create, get, delete Volume
-        volume = self.client.create_volume(CONF.volume.volume_size, self.zone)
-        cuk = self.addResourceCleanUp(self.client.delete_volume, volume.id)
-        self.assertIn(volume.status, self.valid_volume_status)
-        retrieved = self.client.get_all_volumes((volume.id,))
-        self.assertEqual(1, len(retrieved))
-        self.assertTrue(compare_volumes(volume, retrieved[0]))
-        self.assertVolumeStatusWait(volume, "available")
-        self.client.delete_volume(volume.id)
-        self.cancelResourceCleanUp(cuk)
-
-    @test.idempotent_id('c6b60d7a-1af7-4f8e-af21-d539d9496149')
-    def test_create_volume_from_snapshot(self):
-        # EC2 Create volume from snapshot
-        volume = self.client.create_volume(CONF.volume.volume_size, self.zone)
-        self.addResourceCleanUp(self.client.delete_volume, volume.id)
-        self.assertVolumeStatusWait(volume, "available")
-        snap = self.client.create_snapshot(volume.id)
-        self.addResourceCleanUp(self.destroy_snapshot_wait, snap)
-        self.assertSnapshotStatusWait(snap, "completed")
-
-        svol = self.client.create_volume(CONF.volume.volume_size, self.zone,
-                                         snapshot=snap)
-        cuk = self.addResourceCleanUp(svol.delete)
-        self.assertVolumeStatusWait(svol, "available")
-        svol.delete()
-        self.cancelResourceCleanUp(cuk)
diff --git a/tempest/thirdparty/boto/test_s3_buckets.py b/tempest/thirdparty/boto/test_s3_buckets.py
deleted file mode 100644
index f008973..0000000
--- a/tempest/thirdparty/boto/test_s3_buckets.py
+++ /dev/null
@@ -1,41 +0,0 @@
-# Copyright 2012 OpenStack Foundation
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from tempest.common.utils import data_utils
-from tempest import test
-from tempest.thirdparty.boto import test as boto_test
-
-
-class S3BucketsTest(boto_test.BotoTestCase):
-
-    @classmethod
-    def setup_clients(cls):
-        super(S3BucketsTest, cls).setup_clients()
-        cls.client = cls.os.s3_client
-
-    @test.idempotent_id('4678525d-8da0-4518-81c1-f1f67d595b00')
-    def test_create_and_get_delete_bucket(self):
-        # S3 Create, get and delete bucket
-        bucket_name = data_utils.rand_name("s3bucket")
-        cleanup_key = self.addResourceCleanUp(self.client.delete_bucket,
-                                              bucket_name)
-        bucket = self.client.create_bucket(bucket_name)
-        self.assertTrue(bucket.name == bucket_name)
-        bucket = self.client.get_bucket(bucket_name)
-        self.assertTrue(bucket.name == bucket_name)
-        self.client.delete_bucket(bucket_name)
-        self.assertBotoError(self.s3_error_code.client.NoSuchBucket,
-                             self.client.get_bucket, bucket_name)
-        self.cancelResourceCleanUp(cleanup_key)
diff --git a/tempest/thirdparty/boto/test_s3_ec2_images.py b/tempest/thirdparty/boto/test_s3_ec2_images.py
deleted file mode 100644
index c41c7ac..0000000
--- a/tempest/thirdparty/boto/test_s3_ec2_images.py
+++ /dev/null
@@ -1,126 +0,0 @@
-# Copyright 2012 OpenStack Foundation
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import os
-
-from tempest.common.utils import data_utils
-from tempest import config
-from tempest import test
-from tempest.thirdparty.boto import test as boto_test
-from tempest.thirdparty.boto.utils import s3
-
-CONF = config.CONF
-
-
-class S3ImagesTest(boto_test.BotoTestCase):
-
-    @classmethod
-    def setup_clients(cls):
-        super(S3ImagesTest, cls).setup_clients()
-        cls.s3_client = cls.os.s3_client
-        cls.images_client = cls.os.ec2api_client
-
-    @classmethod
-    def resource_setup(cls):
-        super(S3ImagesTest, cls).resource_setup()
-        if not cls.conclusion['A_I_IMAGES_READY']:
-            raise cls.skipException("".join(("EC2 ", cls.__name__,
-                                    ": requires ami/aki/ari manifest")))
-        cls.materials_path = CONF.boto.s3_materials_path
-        cls.ami_manifest = CONF.boto.ami_manifest
-        cls.aki_manifest = CONF.boto.aki_manifest
-        cls.ari_manifest = CONF.boto.ari_manifest
-        cls.ami_path = cls.materials_path + os.sep + cls.ami_manifest
-        cls.aki_path = cls.materials_path + os.sep + cls.aki_manifest
-        cls.ari_path = cls.materials_path + os.sep + cls.ari_manifest
-        cls.bucket_name = data_utils.rand_name("bucket")
-        bucket = cls.s3_client.create_bucket(cls.bucket_name)
-        cls.addResourceCleanUp(cls.destroy_bucket,
-                               cls.s3_client.connection_data,
-                               cls.bucket_name)
-        s3.s3_upload_dir(bucket, cls.materials_path)
-
-    @test.idempotent_id('f9d360a5-0188-4c77-9db2-4c34c28d12a5')
-    def test_register_get_deregister_ami_image(self):
-        # Register and deregister ami image
-        image = {"name": data_utils.rand_name("ami-name"),
-                 "location": self.bucket_name + "/" + self.ami_manifest,
-                 "type": "ami"}
-        image["image_id"] = self.images_client.register_image(
-            name=image["name"],
-            image_location=image["location"])
-        # NOTE(afazekas): delete_snapshot=True might trigger boto lib? bug
-        image["cleanUp"] = self.addResourceCleanUp(
-            self.images_client.deregister_image,
-            image["image_id"])
-        self.assertEqual(image["image_id"][0:3], image["type"])
-        retrieved_image = self.images_client.get_image(image["image_id"])
-        self.assertTrue(retrieved_image.name == image["name"])
-        self.assertTrue(retrieved_image.id == image["image_id"])
-        if retrieved_image.state != "available":
-            self.assertImageStateWait(retrieved_image, "available")
-        self.images_client.deregister_image(image["image_id"])
-        self.assertNotIn(image["image_id"], str(
-            self.images_client.get_all_images()))
-        self.cancelResourceCleanUp(image["cleanUp"])
-
-    @test.idempotent_id('42cca5b0-453b-4618-b99f-dbc039db426f')
-    def test_register_get_deregister_aki_image(self):
-        # Register and deregister aki image
-        image = {"name": data_utils.rand_name("aki-name"),
-                 "location": self.bucket_name + "/" + self.aki_manifest,
-                 "type": "aki"}
-        image["image_id"] = self.images_client.register_image(
-            name=image["name"],
-            image_location=image["location"])
-        image["cleanUp"] = self.addResourceCleanUp(
-            self.images_client.deregister_image,
-            image["image_id"])
-        self.assertEqual(image["image_id"][0:3], image["type"])
-        retrieved_image = self.images_client.get_image(image["image_id"])
-        self.assertTrue(retrieved_image.name == image["name"])
-        self.assertTrue(retrieved_image.id == image["image_id"])
-        self.assertIn(retrieved_image.state, self.valid_image_state)
-        if retrieved_image.state != "available":
-            self.assertImageStateWait(retrieved_image, "available")
-        self.images_client.deregister_image(image["image_id"])
-        self.assertNotIn(image["image_id"], str(
-            self.images_client.get_all_images()))
-        self.cancelResourceCleanUp(image["cleanUp"])
-
-    @test.idempotent_id('1359e860-841c-43bb-80f3-bb389cbfd81d')
-    def test_register_get_deregister_ari_image(self):
-        # Register and deregister ari image
-        image = {"name": data_utils.rand_name("ari-name"),
-                 "location": "/" + self.bucket_name + "/" + self.ari_manifest,
-                 "type": "ari"}
-        image["image_id"] = self.images_client.register_image(
-            name=image["name"],
-            image_location=image["location"])
-        image["cleanUp"] = self.addResourceCleanUp(
-            self.images_client.deregister_image,
-            image["image_id"])
-        self.assertEqual(image["image_id"][0:3], image["type"])
-        retrieved_image = self.images_client.get_image(image["image_id"])
-        self.assertIn(retrieved_image.state, self.valid_image_state)
-        if retrieved_image.state != "available":
-            self.assertImageStateWait(retrieved_image, "available")
-        self.assertIn(retrieved_image.state, self.valid_image_state)
-        self.assertTrue(retrieved_image.name == image["name"])
-        self.assertTrue(retrieved_image.id == image["image_id"])
-        self.images_client.deregister_image(image["image_id"])
-        self.cancelResourceCleanUp(image["cleanUp"])
-
-# TODO(afazekas): less copy-paste style
diff --git a/tempest/thirdparty/boto/test_s3_objects.py b/tempest/thirdparty/boto/test_s3_objects.py
deleted file mode 100644
index c42d85c..0000000
--- a/tempest/thirdparty/boto/test_s3_objects.py
+++ /dev/null
@@ -1,51 +0,0 @@
-# Copyright 2012 OpenStack Foundation
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import contextlib
-
-import boto.s3.key
-
-from tempest.common.utils import data_utils
-from tempest import test
-from tempest.thirdparty.boto import test as boto_test
-
-
-class S3BucketsTest(boto_test.BotoTestCase):
-
-    @classmethod
-    def setup_clients(cls):
-        super(S3BucketsTest, cls).setup_clients()
-        cls.client = cls.os.s3_client
-
-    @test.idempotent_id('4eea567a-b46a-405b-a475-6097e1faebde')
-    def test_create_get_delete_object(self):
-        # S3 Create, get and delete object
-        bucket_name = data_utils.rand_name("s3bucket")
-        object_name = data_utils.rand_name("s3object")
-        content = 'x' * 42
-        bucket = self.client.create_bucket(bucket_name)
-        self.addResourceCleanUp(self.destroy_bucket,
-                                self.client.connection_data,
-                                bucket_name)
-
-        self.assertTrue(bucket.name == bucket_name)
-        with contextlib.closing(boto.s3.key.Key(bucket)) as key:
-            key.key = object_name
-            key.set_contents_from_string(content)
-            readback = key.get_contents_as_string()
-            self.assertTrue(readback == content)
-            bucket.delete_key(key)
-            self.assertBotoError(self.s3_error_code.client.NoSuchKey,
-                                 key.get_contents_as_string)
diff --git a/tempest/thirdparty/boto/utils/__init__.py b/tempest/thirdparty/boto/utils/__init__.py
deleted file mode 100644
index e69de29..0000000
--- a/tempest/thirdparty/boto/utils/__init__.py
+++ /dev/null
diff --git a/tempest/thirdparty/boto/utils/s3.py b/tempest/thirdparty/boto/utils/s3.py
deleted file mode 100644
index 55c1b0a..0000000
--- a/tempest/thirdparty/boto/utils/s3.py
+++ /dev/null
@@ -1,41 +0,0 @@
-# Copyright 2012 OpenStack Foundation
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import contextlib
-import os
-import re
-
-import boto
-import boto.s3.key
-
-from oslo_log import log as logging
-
-LOG = logging.getLogger(__name__)
-
-
-def s3_upload_dir(bucket, path, prefix="", connection_data=None):
-    if isinstance(bucket, basestring):
-        with contextlib.closing(boto.connect_s3(**connection_data)) as conn:
-            bucket = conn.lookup(bucket)
-    for root, dirs, files in os.walk(path):
-        for fil in files:
-            with contextlib.closing(boto.s3.key.Key(bucket)) as key:
-                source = root + os.sep + fil
-                target = re.sub("^" + re.escape(path) + "?/", prefix, source)
-                if os.sep != '/':
-                    target = re.sub(re.escape(os.sep), '/', target)
-                key.key = target
-                LOG.info("Uploading %s to %s/%s", source, bucket.name, target)
-                key.set_contents_from_filename(source)
diff --git a/tempest/thirdparty/boto/utils/wait.py b/tempest/thirdparty/boto/utils/wait.py
deleted file mode 100644
index 8771ed7..0000000
--- a/tempest/thirdparty/boto/utils/wait.py
+++ /dev/null
@@ -1,125 +0,0 @@
-# Copyright 2012 OpenStack Foundation
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import re
-import time
-
-import boto.exception
-from oslo_log import log as logging
-import testtools
-
-from tempest import config
-
-CONF = config.CONF
-LOG = logging.getLogger(__name__)
-
-
-def state_wait(lfunction, final_set=set(), valid_set=None):
-    # TODO(afazekas): evaluate using ABC here
-    if not isinstance(final_set, set):
-        final_set = set((final_set,))
-    if not isinstance(valid_set, set) and valid_set is not None:
-        valid_set = set((valid_set,))
-    start_time = time.time()
-    old_status = status = lfunction()
-    while True:
-        if status != old_status:
-            LOG.info('State transition "%s" ==> "%s" %d second', old_status,
-                     status, time.time() - start_time)
-        if status in final_set:
-            return status
-        if valid_set is not None and status not in valid_set:
-            return status
-        dtime = time.time() - start_time
-        if dtime > CONF.boto.build_timeout:
-            raise testtools.TestCase\
-                .failureException("State change timeout exceeded!"
-                                  '(%ds) While waiting'
-                                  'for %s at "%s"' %
-                                  (dtime, final_set, status))
-        time.sleep(CONF.boto.build_interval)
-        old_status = status
-        status = lfunction()
-
-
-def re_search_wait(lfunction, regexp):
-    """Stops waiting on success."""
-    start_time = time.time()
-    while True:
-        text = lfunction()
-        result = re.search(regexp, text)
-        if result is not None:
-            LOG.info('Pattern "%s" found in %d second in "%s"',
-                     regexp,
-                     time.time() - start_time,
-                     text)
-            return result
-        dtime = time.time() - start_time
-        if dtime > CONF.boto.build_timeout:
-            raise testtools.TestCase\
-                .failureException('Pattern find timeout exceeded!'
-                                  '(%ds) While waiting for'
-                                  '"%s" pattern in "%s"' %
-                                  (dtime, regexp, text))
-        time.sleep(CONF.boto.build_interval)
-
-
-def wait_no_exception(lfunction, exc_class=None, exc_matcher=None):
-    """Stops waiting on success."""
-    start_time = time.time()
-    if exc_matcher is not None:
-        exc_class = boto.exception.BotoServerError
-
-    if exc_class is None:
-        exc_class = BaseException
-    while True:
-        result = None
-        try:
-            result = lfunction()
-            LOG.info('No Exception in %d second',
-                     time.time() - start_time)
-            return result
-        except exc_class as exc:
-            if exc_matcher is not None:
-                res = exc_matcher.match(exc)
-                if res is not None:
-                    LOG.info(res)
-                    raise exc
-        # Let the other exceptions propagate
-        dtime = time.time() - start_time
-        if dtime > CONF.boto.build_timeout:
-            raise testtools.TestCase\
-                .failureException("Wait timeout exceeded! (%ds)" % dtime)
-        time.sleep(CONF.boto.build_interval)
-
-
-# NOTE(afazekas): EC2/boto normally raise exception instead of empty list
-def wait_exception(lfunction):
-    """Returns with the exception or raises one."""
-    start_time = time.time()
-    while True:
-        try:
-            lfunction()
-        except BaseException as exc:
-            LOG.info('Exception in %d second',
-                     time.time() - start_time)
-            return exc
-        dtime = time.time() - start_time
-        if dtime > CONF.boto.build_timeout:
-            raise testtools.TestCase\
-                .failureException("Wait timeout exceeded! (%ds)" % dtime)
-        time.sleep(CONF.boto.build_interval)
-
-# TODO(afazekas): consider strategy design pattern..