Merge "Add unit test for method list_keypairs"
diff --git a/HACKING.rst b/HACKING.rst
index c776c49..45c35df 100644
--- a/HACKING.rst
+++ b/HACKING.rst
@@ -314,6 +314,39 @@
          * Check written content in the instance booted from snapshot
         """
 
+Test Identification with Idempotent ID
+--------------------------------------
+
+Every function that provides a test must have an ``idempotent_id`` decorator
+that is a unique ``uuid-4`` instance. This ID is used to complement the fully
+qualified test name and track test funcionality through refactoring. The
+format of the metadata looks like::
+
+    @test.idempotent_id('585e934c-448e-43c4-acbf-d06a9b899997')
+    def test_list_servers_with_detail(self):
+        # The created server should be in the detailed list of all servers
+        ...
+
+Tempest includes a ``check_uuid.py`` tool that will test for the existence
+and uniqueness of idempotent_id metadata for every test. By default the
+tool runs against the Tempest package by calling::
+
+    python check_uuid.py
+
+It can be invoked against any test suite by passing a package name::
+
+    python check_uuid.py --package <package_name>
+
+Tests without an ``idempotent_id`` can be automatically fixed by running
+the command with the ``--fix`` flag, which will modify the source package
+by inserting randomly generated uuids for every test that does not have
+one::
+
+    python check_uuid.py --fix
+
+The ``check_uuid.py`` tool is used as part of the tempest gate job
+to ensure that all tests have an ``idempotent_id`` decorator.
+
 Branchless Tempest Considerations
 ---------------------------------
 
diff --git a/README.rst b/README.rst
index af24569..d7063ba 100644
--- a/README.rst
+++ b/README.rst
@@ -107,7 +107,7 @@
 ----------
 
 Tempest also has a set of unit tests which test the Tempest code itself. These
-tests can be run by specifing the test discovery path::
+tests can be run by specifying the test discovery path::
 
     $> OS_TEST_PATH=./tempest/tests testr run --parallel
 
diff --git a/doc/source/configuration.rst b/doc/source/configuration.rst
index 0805544..3e6013d 100644
--- a/doc/source/configuration.rst
+++ b/doc/source/configuration.rst
@@ -142,7 +142,7 @@
  #. alt_password
  #. alt_tenant_name
 
-And in the auth secion:
+And in the auth section:
 
  #. allow_tenant_isolation = False
  #. comment out 'test_accounts_file' or keep it as empty
diff --git a/etc/tempest.conf.sample b/etc/tempest.conf.sample
index 1095e77..724bff4 100644
--- a/etc/tempest.conf.sample
+++ b/etc/tempest.conf.sample
@@ -72,7 +72,7 @@
 #logging_exception_prefix = %(asctime)s.%(msecs)03d %(process)d ERROR %(name)s %(instance)s
 
 # List of logger=LEVEL pairs. (list value)
-#default_log_levels = amqp=WARN,amqplib=WARN,boto=WARN,qpid=WARN,sqlalchemy=WARN,suds=INFO,oslo.messaging=INFO,iso8601=WARN,requests.packages.urllib3.connectionpool=WARN,urllib3.connectionpool=WARN,websocket=WARN,requests.packages.urllib3.util.retry=WARN,urllib3.util.retry=WARN,keystonemiddleware=WARN,routes.middleware=WARN,stevedore=WARN
+#default_log_levels = amqp=WARN,amqplib=WARN,boto=WARN,qpid=WARN,sqlalchemy=WARN,suds=INFO,oslo.messaging=INFO,iso8601=WARN,requests.packages.urllib3.connectionpool=WARN,urllib3.connectionpool=WARN,websocket=WARN,requests.packages.urllib3.util.retry=WARN,urllib3.util.retry=WARN,keystonemiddleware=WARN,routes.middleware=WARN,stevedore=WARN,taskflow=WARN
 
 # Enables or disables publication of error events. (boolean value)
 #publish_errors = false
@@ -432,6 +432,11 @@
 # value)
 #attach_encrypted_volume = true
 
+# Does the test environment support creating instances with multiple
+# ports on the same network? This is only valid when using Neutron.
+# (boolean value)
+#allow_duplicate_networks = false
+
 
 [dashboard]
 
diff --git a/setup.cfg b/setup.cfg
index 5c78632..f28c481 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -18,6 +18,12 @@
     Programming Language :: Python :: 3
     Programming Language :: Python :: 3.4
 
+[files]
+packages =
+    tempest
+data_files =
+    /etc/tempest = etc/*
+
 [entry_points]
 console_scripts =
     verify-tempest-config = tempest.cmd.verify_tempest_config:main
@@ -26,7 +32,8 @@
     tempest-cleanup = tempest.cmd.cleanup:main
     tempest-account-generator = tempest.cmd.account_generator:main
     tempest = tempest.cmd.main:main
-
+tempest.cm =
+    init = tempest.cmd.init:TempestInit
 oslo.config.opts =
     tempest.config = tempest.config:list_opts
 
diff --git a/tempest/api/compute/admin/test_aggregates.py b/tempest/api/compute/admin/test_aggregates.py
index 91e55d6..9334fb6 100644
--- a/tempest/api/compute/admin/test_aggregates.py
+++ b/tempest/api/compute/admin/test_aggregates.py
@@ -106,7 +106,7 @@
 
         # set the metadata of the aggregate
         meta = {"key": "value"}
-        body = self.client.set_metadata(aggregate['id'], meta)
+        body = self.client.set_metadata(aggregate['id'], metadata=meta)
         self.assertEqual(meta, body["metadata"])
 
         # verify the metadata has been set
@@ -130,9 +130,10 @@
         new_aggregate_name = aggregate_name + '_new'
         new_az_name = az_name + '_new'
 
-        resp_aggregate = self.client.update_aggregate(aggregate_id,
-                                                      new_aggregate_name,
-                                                      new_az_name)
+        resp_aggregate = self.client.update_aggregate(
+            aggregate_id,
+            name=new_aggregate_name,
+            availability_zone=new_az_name)
         self.assertEqual(new_aggregate_name, resp_aggregate['name'])
         self.assertEqual(new_az_name, resp_aggregate['availability_zone'])
 
@@ -150,13 +151,13 @@
         aggregate = self.client.create_aggregate(name=aggregate_name)
         self.addCleanup(self.client.delete_aggregate, aggregate['id'])
 
-        body = self.client.add_host(aggregate['id'], self.host)
+        body = self.client.add_host(aggregate['id'], host=self.host)
         self.assertEqual(aggregate_name, body['name'])
         self.assertEqual(aggregate['availability_zone'],
                          body['availability_zone'])
         self.assertIn(self.host, body['hosts'])
 
-        body = self.client.remove_host(aggregate['id'], self.host)
+        body = self.client.remove_host(aggregate['id'], host=self.host)
         self.assertEqual(aggregate_name, body['name'])
         self.assertEqual(aggregate['availability_zone'],
                          body['availability_zone'])
@@ -169,8 +170,9 @@
         aggregate_name = data_utils.rand_name(self.aggregate_name_prefix)
         aggregate = self.client.create_aggregate(name=aggregate_name)
         self.addCleanup(self.client.delete_aggregate, aggregate['id'])
-        self.client.add_host(aggregate['id'], self.host)
-        self.addCleanup(self.client.remove_host, aggregate['id'], self.host)
+        self.client.add_host(aggregate['id'], host=self.host)
+        self.addCleanup(self.client.remove_host, aggregate['id'],
+                        host=self.host)
 
         aggregates = self.client.list_aggregates()
         aggs = filter(lambda x: x['id'] == aggregate['id'], aggregates)
@@ -187,8 +189,9 @@
         aggregate_name = data_utils.rand_name(self.aggregate_name_prefix)
         aggregate = self.client.create_aggregate(name=aggregate_name)
         self.addCleanup(self.client.delete_aggregate, aggregate['id'])
-        self.client.add_host(aggregate['id'], self.host)
-        self.addCleanup(self.client.remove_host, aggregate['id'], self.host)
+        self.client.add_host(aggregate['id'], host=self.host)
+        self.addCleanup(self.client.remove_host, aggregate['id'],
+                        host=self.host)
 
         body = self.client.show_aggregate(aggregate['id'])
         self.assertEqual(aggregate_name, body['name'])
@@ -204,8 +207,9 @@
         aggregate = self.client.create_aggregate(
             name=aggregate_name, availability_zone=az_name)
         self.addCleanup(self.client.delete_aggregate, aggregate['id'])
-        self.client.add_host(aggregate['id'], self.host)
-        self.addCleanup(self.client.remove_host, aggregate['id'], self.host)
+        self.client.add_host(aggregate['id'], host=self.host)
+        self.addCleanup(self.client.remove_host, aggregate['id'],
+                        host=self.host)
         server_name = data_utils.rand_name('test_server')
         admin_servers_client = self.os_adm.servers_client
         server = self.create_test_server(name=server_name,
diff --git a/tempest/api/compute/admin/test_aggregates_negative.py b/tempest/api/compute/admin/test_aggregates_negative.py
index 74a8547..231c88f 100644
--- a/tempest/api/compute/admin/test_aggregates_negative.py
+++ b/tempest/api/compute/admin/test_aggregates_negative.py
@@ -143,7 +143,7 @@
         self.addCleanup(self.client.delete_aggregate, aggregate['id'])
 
         self.assertRaises(lib_exc.NotFound, self.client.add_host,
-                          aggregate['id'], non_exist_host)
+                          aggregate['id'], host=non_exist_host)
 
     @test.attr(type=['negative'])
     @test.idempotent_id('7324c334-bd13-4c93-8521-5877322c3d51')
@@ -155,7 +155,7 @@
 
         self.assertRaises(lib_exc.Forbidden,
                           self.user_client.add_host,
-                          aggregate['id'], self.host)
+                          aggregate['id'], host=self.host)
 
     @test.attr(type=['negative'])
     @test.idempotent_id('19dd44e1-c435-4ee1-a402-88c4f90b5950')
@@ -165,11 +165,12 @@
         aggregate = self.client.create_aggregate(name=aggregate_name)
         self.addCleanup(self.client.delete_aggregate, aggregate['id'])
 
-        self.client.add_host(aggregate['id'], self.host)
-        self.addCleanup(self.client.remove_host, aggregate['id'], self.host)
+        self.client.add_host(aggregate['id'], host=self.host)
+        self.addCleanup(self.client.remove_host, aggregate['id'],
+                        host=self.host)
 
         self.assertRaises(lib_exc.Conflict, self.client.add_host,
-                          aggregate['id'], self.host)
+                          aggregate['id'], host=self.host)
 
     @test.attr(type=['negative'])
     @test.idempotent_id('7a53af20-137a-4e44-a4ae-e19260e626d9')
@@ -179,12 +180,13 @@
         aggregate_name = data_utils.rand_name(self.aggregate_name_prefix)
         aggregate = self.client.create_aggregate(name=aggregate_name)
         self.addCleanup(self.client.delete_aggregate, aggregate['id'])
-        self.client.add_host(aggregate['id'], self.host)
-        self.addCleanup(self.client.remove_host, aggregate['id'], self.host)
+        self.client.add_host(aggregate['id'], host=self.host)
+        self.addCleanup(self.client.remove_host, aggregate['id'],
+                        host=self.host)
 
         self.assertRaises(lib_exc.Forbidden,
                           self.user_client.remove_host,
-                          aggregate['id'], self.host)
+                          aggregate['id'], host=self.host)
 
     @test.attr(type=['negative'])
     @test.idempotent_id('95d6a6fa-8da9-4426-84d0-eec0329f2e4d')
@@ -195,4 +197,4 @@
         self.addCleanup(self.client.delete_aggregate, aggregate['id'])
 
         self.assertRaises(lib_exc.NotFound, self.client.remove_host,
-                          aggregate['id'], non_exist_host)
+                          aggregate['id'], host=non_exist_host)
diff --git a/tempest/api/compute/admin/test_fixed_ips.py b/tempest/api/compute/admin/test_fixed_ips.py
index dc9a7ef..3e20b46 100644
--- a/tempest/api/compute/admin/test_fixed_ips.py
+++ b/tempest/api/compute/admin/test_fixed_ips.py
@@ -56,11 +56,9 @@
     @test.idempotent_id('5485077b-7e46-4cec-b402-91dc3173433b')
     @test.services('network')
     def test_set_reserve(self):
-        body = {"reserve": "None"}
-        self.client.reserve_fixed_ip(self.ip, body)
+        self.client.reserve_fixed_ip(self.ip, reserve="None")
 
     @test.idempotent_id('7476e322-b9ff-4710-bf82-49d51bac6e2e')
     @test.services('network')
     def test_set_unreserve(self):
-        body = {"unreserve": "None"}
-        self.client.reserve_fixed_ip(self.ip, body)
+        self.client.reserve_fixed_ip(self.ip, unreserve="None")
diff --git a/tempest/api/compute/admin/test_fixed_ips_negative.py b/tempest/api/compute/admin/test_fixed_ips_negative.py
index 6698638..e67936c 100644
--- a/tempest/api/compute/admin/test_fixed_ips_negative.py
+++ b/tempest/api/compute/admin/test_fixed_ips_negative.py
@@ -60,19 +60,17 @@
     @test.idempotent_id('ce60042c-fa60-4836-8d43-1c8e3359dc47')
     @test.services('network')
     def test_set_reserve_with_non_admin_user(self):
-        body = {"reserve": "None"}
         self.assertRaises(lib_exc.Forbidden,
                           self.non_admin_client.reserve_fixed_ip,
-                          self.ip, body)
+                          self.ip, reserve="None")
 
     @test.attr(type=['negative'])
     @test.idempotent_id('f1f7a35b-0390-48c5-9803-5f27461439db')
     @test.services('network')
     def test_set_unreserve_with_non_admin_user(self):
-        body = {"unreserve": "None"}
         self.assertRaises(lib_exc.Forbidden,
                           self.non_admin_client.reserve_fixed_ip,
-                          self.ip, body)
+                          self.ip, unreserve="None")
 
     @test.attr(type=['negative'])
     @test.idempotent_id('f51cf464-7fc5-4352-bc3e-e75cfa2cb717')
@@ -80,19 +78,17 @@
     def test_set_reserve_with_invalid_ip(self):
         # NOTE(maurosr): since this exercises the same code snippet, we do it
         # only for reserve action
-        body = {"reserve": "None"}
         # NOTE(eliqiao): in Juno, the exception is NotFound, but in master, we
         # change the error code to BadRequest, both exceptions should be
         # accepted by tempest
         self.assertRaises((lib_exc.NotFound, lib_exc.BadRequest),
                           self.client.reserve_fixed_ip,
-                          "my.invalid.ip", body)
+                          "my.invalid.ip", reserve="None")
 
     @test.attr(type=['negative'])
     @test.idempotent_id('fd26ef50-f135-4232-9d32-281aab3f9176')
     @test.services('network')
     def test_fixed_ip_with_invalid_action(self):
-        body = {"invalid_action": "None"}
         self.assertRaises(lib_exc.BadRequest,
                           self.client.reserve_fixed_ip,
-                          self.ip, body)
+                          self.ip, invalid_action="None")
diff --git a/tempest/api/compute/admin/test_simple_tenant_usage.py b/tempest/api/compute/admin/test_simple_tenant_usage.py
index 1c9e5d7..204281c 100644
--- a/tempest/api/compute/admin/test_simple_tenant_usage.py
+++ b/tempest/api/compute/admin/test_simple_tenant_usage.py
@@ -50,7 +50,7 @@
     def test_list_usage_all_tenants(self):
         # Get usage for all tenants
         tenant_usage = self.adm_client.list_tenant_usages(
-            start=self.start, end=self.end, detailed=int(bool(True)))
+            start=self.start, end=self.end, detailed="1")
         self.assertEqual(len(tenant_usage), 8)
 
     @test.idempotent_id('94135049-a4c5-4934-ad39-08fa7da4f22e')
diff --git a/tempest/api/compute/admin/test_simple_tenant_usage_negative.py b/tempest/api/compute/admin/test_simple_tenant_usage_negative.py
index 934fc31..e9b4ad4 100644
--- a/tempest/api/compute/admin/test_simple_tenant_usage_negative.py
+++ b/tempest/api/compute/admin/test_simple_tenant_usage_negative.py
@@ -66,6 +66,6 @@
         # Get usage for all tenants with non admin user
         params = {'start': self.start,
                   'end': self.end,
-                  'detailed': int(bool(True))}
+                  'detailed': "1"}
         self.assertRaises(lib_exc.Forbidden,
                           self.client.list_tenant_usages, **params)
diff --git a/tempest/api/compute/servers/test_attach_interfaces.py b/tempest/api/compute/servers/test_attach_interfaces.py
index 396327b..9e27f33 100644
--- a/tempest/api/compute/servers/test_attach_interfaces.py
+++ b/tempest/api/compute/servers/test_attach_interfaces.py
@@ -98,7 +98,7 @@
     def _test_create_interface_by_network_id(self, server, ifs):
         network_id = ifs[0]['net_id']
         iface = self.client.create_interface(server['id'],
-                                             network_id=network_id)
+                                             net_id=network_id)
         iface = self.wait_for_interface_status(
             server['id'], iface['port_id'], 'ACTIVE')
         self._check_interface(iface, network_id=network_id)
@@ -179,7 +179,7 @@
         self.assertTrue(interface_count > 0)
         self._check_interface(ifs[0])
         network_id = ifs[0]['net_id']
-        self.client.add_fixed_ip(server['id'], network_id)
+        self.client.add_fixed_ip(server['id'], networkId=network_id)
         # Remove the fixed IP from server.
         server_detail = self.os.servers_client.show_server(
             server['id'])
@@ -192,4 +192,4 @@
                     break
             if fixed_ip is not None:
                 break
-        self.client.remove_fixed_ip(server['id'], fixed_ip)
+        self.client.remove_fixed_ip(server['id'], address=fixed_ip)
diff --git a/tempest/api/compute/servers/test_create_server.py b/tempest/api/compute/servers/test_create_server.py
index 39447b8..23a9cb3 100644
--- a/tempest/api/compute/servers/test_create_server.py
+++ b/tempest/api/compute/servers/test_create_server.py
@@ -65,6 +65,20 @@
         cls.password = cls.server_initial['adminPass']
         cls.server = cls.client.show_server(cls.server_initial['id'])
 
+    def _create_net_subnet_ret_net_from_cidr(self, cidr):
+        name_net = data_utils.rand_name(self.__class__.__name__)
+        net = self.network_client.create_network(name=name_net)
+        self.addCleanup(self.network_client.delete_network,
+                        net['network']['id'])
+
+        subnet = self.network_client.create_subnet(
+            network_id=net['network']['id'],
+            cidr=cidr,
+            ip_version=4)
+        self.addCleanup(self.network_client.delete_subnet,
+                        subnet['subnet']['id'])
+        return net
+
     @test.attr(type='smoke')
     @test.idempotent_id('5de47127-9977-400a-936f-abcfbec1218f')
     def test_verify_server_details(self):
@@ -147,29 +161,8 @@
     def test_verify_multiple_nics_order(self):
         # Verify that the networks order given at the server creation is
         # preserved within the server.
-        name_net1 = data_utils.rand_name(self.__class__.__name__)
-        net1 = self.network_client.create_network(name=name_net1)
-        self.addCleanup(self.network_client.delete_network,
-                        net1['network']['id'])
-
-        name_net2 = data_utils.rand_name(self.__class__.__name__)
-        net2 = self.network_client.create_network(name=name_net2)
-        self.addCleanup(self.network_client.delete_network,
-                        net2['network']['id'])
-
-        subnet1 = self.network_client.create_subnet(
-            network_id=net1['network']['id'],
-            cidr='19.80.0.0/24',
-            ip_version=4)
-        self.addCleanup(self.network_client.delete_subnet,
-                        subnet1['subnet']['id'])
-
-        subnet2 = self.network_client.create_subnet(
-            network_id=net2['network']['id'],
-            cidr='19.86.0.0/24',
-            ip_version=4)
-        self.addCleanup(self.network_client.delete_subnet,
-                        subnet2['subnet']['id'])
+        net1 = self._create_net_subnet_ret_net_from_cidr('19.80.0.0/24')
+        net2 = self._create_net_subnet_ret_net_from_cidr('19.86.0.0/24')
 
         networks = [{'uuid': net1['network']['id']},
                     {'uuid': net2['network']['id']}]
@@ -196,13 +189,50 @@
         # other times ['19.80.0.3', '19.86.0.3']. So we check if the first
         # address is in first network, similarly second address is in second
         # network.
-        addr = [addresses[name_net1][0]['addr'],
-                addresses[name_net2][0]['addr']]
+        addr = [addresses[net1['network']['name']][0]['addr'],
+                addresses[net2['network']['name']][0]['addr']]
         networks = [netaddr.IPNetwork('19.80.0.0/24'),
                     netaddr.IPNetwork('19.86.0.0/24')]
         for address, network in zip(addr, networks):
             self.assertIn(address, network)
 
+    @test.idempotent_id('1678d144-ed74-43f8-8e57-ab10dbf9b3c2')
+    @testtools.skipUnless(CONF.service_available.neutron,
+                          'Neutron service must be available.')
+    # The below skipUnless should be removed once Kilo-eol happens.
+    @testtools.skipUnless(CONF.compute_feature_enabled.
+                          allow_duplicate_networks,
+                          'Duplicate networks must be allowed')
+    def test_verify_duplicate_network_nics(self):
+        # Verify that server creation does not fail when more than one nic
+        # is created on the same network.
+        net1 = self._create_net_subnet_ret_net_from_cidr('19.80.0.0/24')
+        net2 = self._create_net_subnet_ret_net_from_cidr('19.86.0.0/24')
+
+        networks = [{'uuid': net1['network']['id']},
+                    {'uuid': net2['network']['id']},
+                    {'uuid': net1['network']['id']}]
+
+        server_multi_nics = self.create_test_server(
+            networks=networks, wait_until='ACTIVE')
+
+        def cleanup_server():
+            self.client.delete_server(server_multi_nics['id'])
+            self.client.wait_for_server_termination(server_multi_nics['id'])
+
+        self.addCleanup(cleanup_server)
+
+        addresses = self.client.list_addresses(server_multi_nics['id'])
+
+        addr = [addresses[net1['network']['name']][0]['addr'],
+                addresses[net2['network']['name']][0]['addr'],
+                addresses[net1['network']['name']][1]['addr']]
+        networks = [netaddr.IPNetwork('19.80.0.0/24'),
+                    netaddr.IPNetwork('19.86.0.0/24'),
+                    netaddr.IPNetwork('19.80.0.0/24')]
+        for address, network in zip(addr, networks):
+            self.assertIn(address, network)
+
 
 class ServersWithSpecificFlavorTestJSON(base.BaseV2ComputeAdminTest):
     disk_config = 'AUTO'
diff --git a/tempest/api/network/admin/test_quotas.py b/tempest/api/network/admin/test_quotas.py
index cb113a7..63395cc 100644
--- a/tempest/api/network/admin/test_quotas.py
+++ b/tempest/api/network/admin/test_quotas.py
@@ -61,7 +61,7 @@
 
         # Change quotas for tenant
         quota_set = self.admin_client.update_quotas(tenant_id,
-                                                    **new_quotas)
+                                                    **new_quotas)['quota']
         self.addCleanup(self.admin_client.reset_quotas, tenant_id)
         for key, value in six.iteritems(new_quotas):
             self.assertEqual(value, quota_set[key])
diff --git a/tempest/api/network/test_ports.py b/tempest/api/network/test_ports.py
index 4f54562..6a8fbec 100644
--- a/tempest/api/network/test_ports.py
+++ b/tempest/api/network/test_ports.py
@@ -156,26 +156,21 @@
         network = self.create_network()
         subnet = self.create_subnet(network)
         self.addCleanup(self.client.delete_subnet, subnet['id'])
-        # Create two ports specifying a fixed_ips
-        address = self._get_ipaddress_from_tempest_conf()
-        _fixed_ip_1 = str(address + 3)
-        _fixed_ip_2 = str(address + 4)
-        fixed_ips_1 = [{'ip_address': _fixed_ip_1}]
-        port_1 = self.client.create_port(network_id=network['id'],
-                                         fixed_ips=fixed_ips_1)
+        # Create two ports
+        port_1 = self.client.create_port(network_id=network['id'])
         self.addCleanup(self.client.delete_port, port_1['port']['id'])
-        fixed_ips_2 = [{'ip_address': _fixed_ip_2}]
-        port_2 = self.client.create_port(network_id=network['id'],
-                                         fixed_ips=fixed_ips_2)
+        port_2 = self.client.create_port(network_id=network['id'])
         self.addCleanup(self.client.delete_port, port_2['port']['id'])
         # List ports filtered by fixed_ips
-        fixed_ips = 'ip_address=' + _fixed_ip_1
+        port_1_fixed_ip = port_1['port']['fixed_ips'][0]['ip_address']
+        fixed_ips = 'ip_address=' + port_1_fixed_ip
         port_list = self.client.list_ports(fixed_ips=fixed_ips)
+        # Check that we got the desired port
         ports = port_list['ports']
         self.assertEqual(len(ports), 1)
         self.assertEqual(ports[0]['id'], port_1['port']['id'])
         self.assertEqual(ports[0]['fixed_ips'][0]['ip_address'],
-                         _fixed_ip_1)
+                         port_1_fixed_ip)
         self.assertEqual(ports[0]['network_id'], network['id'])
 
     @test.idempotent_id('5ad01ed0-0e6e-4c5d-8194-232801b15c72')
diff --git a/tempest/api/volume/admin/test_volumes_backup.py b/tempest/api/volume/admin/test_volumes_backup.py
index 2c545a7..8015c35 100644
--- a/tempest/api/volume/admin/test_volumes_backup.py
+++ b/tempest/api/volume/admin/test_volumes_backup.py
@@ -38,6 +38,10 @@
 
         cls.volume = cls.create_volume()
 
+    def _delete_backup(self, backup_id):
+        self.backups_adm_client.delete_backup(backup_id)
+        self.backups_adm_client.wait_for_backup_deletion(backup_id)
+
     @test.idempotent_id('a66eb488-8ee1-47d4-8e9f-575a095728c6')
     def test_volume_backup_create_get_detailed_list_restore_delete(self):
         # Create backup
@@ -74,6 +78,52 @@
         self.admin_volume_client.wait_for_volume_status(
             restore['volume_id'], 'available')
 
+    @test.idempotent_id('a99c54a1-dd80-4724-8a13-13bf58d4068d')
+    def test_volume_backup_export_import(self):
+        # Create backup
+        backup_name = data_utils.rand_name('Backup')
+        backup = self.backups_adm_client.create_backup(self.volume['id'],
+                                                       name=backup_name)
+        self.addCleanup(self._delete_backup, backup['id'])
+        self.assertEqual(backup_name, backup['name'])
+        self.backups_adm_client.wait_for_backup_status(backup['id'],
+                                                       'available')
+
+        # Export Backup
+        export_backup = self.backups_adm_client.export_backup(backup['id'])
+        self.assertIn('backup_service', export_backup)
+        self.assertIn('backup_url', export_backup)
+        self.assertTrue(export_backup['backup_service'].startswith(
+                        'cinder.backup.drivers'))
+        self.assertIsNotNone(export_backup['backup_url'])
+
+        # Import Backup
+        import_backup = self.backups_adm_client.import_backup(
+            backup_service=export_backup['backup_service'],
+            backup_url=export_backup['backup_url'])
+        self.addCleanup(self._delete_backup, import_backup['id'])
+        self.assertIn("id", import_backup)
+        self.backups_adm_client.wait_for_backup_status(import_backup['id'],
+                                                       'available')
+
+        # Verify Import Backup
+        backups = self.backups_adm_client.list_backups(detail=True)
+        self.assertIn(import_backup['id'], [b['id'] for b in backups])
+
+        # Restore backup
+        restore = self.backups_adm_client.restore_backup(import_backup['id'])
+        self.addCleanup(self.admin_volume_client.delete_volume,
+                        restore['volume_id'])
+        self.assertEqual(import_backup['id'], restore['backup_id'])
+        self.admin_volume_client.wait_for_volume_status(restore['volume_id'],
+                                                        'available')
+
+        # Verify if restored volume is there in volume list
+        volumes = self.admin_volume_client.list_volumes()
+        self.assertIn(restore['volume_id'], [v['id'] for v in volumes])
+        self.backups_adm_client.wait_for_backup_status(import_backup['id'],
+                                                       'available')
+
 
 class VolumesBackupsV1Test(VolumesBackupsV2Test):
     _api_version = 1
diff --git a/tempest/api/volume/base.py b/tempest/api/volume/base.py
index f8ae5eb..b67a6d2 100644
--- a/tempest/api/volume/base.py
+++ b/tempest/api/volume/base.py
@@ -62,6 +62,7 @@
         super(BaseVolumeTest, cls).setup_clients()
         cls.servers_client = cls.os.servers_client
         cls.networks_client = cls.os.networks_client
+        cls.images_client = cls.os.images_client
 
         if cls._api_version == 1:
             cls.snapshots_client = cls.os.snapshots_client
diff --git a/tempest/api/volume/test_volumes_get.py b/tempest/api/volume/test_volumes_get.py
index 29c21ed..a90f9ca 100644
--- a/tempest/api/volume/test_volumes_get.py
+++ b/tempest/api/volume/test_volumes_get.py
@@ -131,7 +131,11 @@
     @test.idempotent_id('54a01030-c7fc-447c-86ee-c1182beae638')
     @test.services('image')
     def test_volume_create_get_update_delete_from_image(self):
-        self._volume_create_get_update_delete(imageRef=CONF.compute.image_ref)
+        image = self.images_client.show_image(CONF.compute.image_ref)
+        min_disk = image.get('minDisk')
+        disk_size = max(min_disk, CONF.volume.volume_size)
+        self._volume_create_get_update_delete(
+            imageRef=CONF.compute.image_ref, size=disk_size)
 
     @test.idempotent_id('3f591b4a-7dc6-444c-bd51-77469506b3a1')
     def test_volume_create_get_update_delete_as_clone(self):
diff --git a/tempest/api_schema/response/compute/v2_1/migrations.py b/tempest/api_schema/response/compute/v2_1/migrations.py
index 722372c..b7d66ea 100644
--- a/tempest/api_schema/response/compute/v2_1/migrations.py
+++ b/tempest/api_schema/response/compute/v2_1/migrations.py
@@ -23,15 +23,15 @@
                     'type': 'object',
                     'properties': {
                         'id': {'type': 'integer'},
-                        'status': {'type': 'string'},
-                        'instance_uuid': {'type': 'string'},
-                        'source_node': {'type': 'string'},
-                        'source_compute': {'type': 'string'},
-                        'dest_node': {'type': 'string'},
-                        'dest_compute': {'type': 'string'},
-                        'dest_host': {'type': 'string'},
-                        'old_instance_type_id': {'type': 'integer'},
-                        'new_instance_type_id': {'type': 'integer'},
+                        'status': {'type': ['string', 'null']},
+                        'instance_uuid': {'type': ['string', 'null']},
+                        'source_node': {'type': ['string', 'null']},
+                        'source_compute': {'type': ['string', 'null']},
+                        'dest_node': {'type': ['string', 'null']},
+                        'dest_compute': {'type': ['string', 'null']},
+                        'dest_host': {'type': ['string', 'null']},
+                        'old_instance_type_id': {'type': ['integer', 'null']},
+                        'new_instance_type_id': {'type': ['integer', 'null']},
                         'created_at': {'type': 'string'},
                         'updated_at': {'type': ['string', 'null']}
                     },
diff --git a/tempest/cmd/cleanup_service.py b/tempest/cmd/cleanup_service.py
index 1de20d6..dcdf7c5 100644
--- a/tempest/cmd/cleanup_service.py
+++ b/tempest/cmd/cleanup_service.py
@@ -1,12 +1,12 @@
 #!/usr/bin/env python
 
-# Copyright 2014 Dell Inc.
+# Copyright 2015 Dell Inc.
 #
 #    Licensed under the Apache License, Version 2.0 (the "License"); you may
 #    not use this file except in compliance with the License. You may obtain
 #    a copy of the License at
 #
-#         http://www.apache.org/licenses/LICENSE-2.0
+#        http://www.apache.org/licenses/LICENSE-2.0
 #
 #    Unless required by applicable law or agreed to in writing, software
 #    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
@@ -153,9 +153,8 @@
         for snap in snaps:
             try:
                 client.delete_snapshot(snap['id'])
-            except Exception as e:
-                LOG.exception("Delete Snapshot exception: %s" % e)
-                pass
+            except Exception:
+                LOG.exception("Delete Snapshot exception.")
 
     def dry_run(self):
         snaps = self.list()
@@ -180,9 +179,8 @@
         for server in servers:
             try:
                 client.delete_server(server['id'])
-            except Exception as e:
-                LOG.exception("Delete Server exception: %s" % e)
-                pass
+            except Exception:
+                LOG.exception("Delete Server exception.")
 
     def dry_run(self):
         servers = self.list()
@@ -203,9 +201,8 @@
         for sg in sgs:
             try:
                 client.delete_server_group(sg['id'])
-            except Exception as e:
-                LOG.exception("Delete Server Group exception: %s" % e)
-                pass
+            except Exception:
+                LOG.exception("Delete Server Group exception.")
 
     def dry_run(self):
         sgs = self.list()
@@ -229,9 +226,8 @@
         for stack in stacks:
             try:
                 client.delete_stack(stack['id'])
-            except Exception as e:
-                LOG.exception("Delete Stack exception: %s " % e)
-                pass
+            except Exception:
+                LOG.exception("Delete Stack exception.")
 
     def dry_run(self):
         stacks = self.list()
@@ -256,9 +252,8 @@
             try:
                 name = k['keypair']['name']
                 client.delete_keypair(name)
-            except Exception as e:
-                LOG.exception("Delete Keypairs exception: %s" % e)
-                pass
+            except Exception:
+                LOG.exception("Delete Keypairs exception.")
 
     def dry_run(self):
         keypairs = self.list()
@@ -283,8 +278,8 @@
         for g in secgrp_del:
             try:
                 client.delete_security_group(g['id'])
-            except Exception as e:
-                LOG.exception("Delete Security Groups exception: %s" % e)
+            except Exception:
+                LOG.exception("Delete Security Groups exception.")
 
     def dry_run(self):
         secgrp_del = self.list()
@@ -308,9 +303,8 @@
         for f in floating_ips:
             try:
                 client.delete_floating_ip(f['id'])
-            except Exception as e:
-                LOG.exception("Delete Floating IPs exception: %s" % e)
-                pass
+            except Exception:
+                LOG.exception("Delete Floating IPs exception.")
 
     def dry_run(self):
         floating_ips = self.list()
@@ -334,9 +328,8 @@
         for v in vols:
             try:
                 client.delete_volume(v['id'])
-            except Exception as e:
-                LOG.exception("Delete Volume exception: %s" % e)
-                pass
+            except Exception:
+                LOG.exception("Delete Volume exception.")
 
     def dry_run(self):
         vols = self.list()
@@ -352,9 +345,8 @@
         client = self.client
         try:
             client.delete_quota_set(self.tenant_id)
-        except Exception as e:
-            LOG.exception("Delete Volume Quotas exception: %s" % e)
-            pass
+        except Exception:
+            LOG.exception("Delete Volume Quotas exception.")
 
     def dry_run(self):
         quotas = self.client.show_quota_usage(self.tenant_id)
@@ -371,9 +363,8 @@
         client = self.client
         try:
             client.delete_quota_set(self.tenant_id)
-        except Exception as e:
-            LOG.exception("Delete Quotas exception: %s" % e)
-            pass
+        except Exception:
+            LOG.exception("Delete Quotas exception.")
 
     def dry_run(self):
         client = self.limits_client
@@ -411,9 +402,8 @@
         for n in networks:
             try:
                 client.delete_network(n['id'])
-            except Exception as e:
-                LOG.exception("Delete Network exception: %s" % e)
-                pass
+            except Exception:
+                LOG.exception("Delete Network exception.")
 
     def dry_run(self):
         networks = self.list()
@@ -436,9 +426,8 @@
         for flip in flips:
             try:
                 client.delete_floatingip(flip['id'])
-            except Exception as e:
-                LOG.exception("Delete Network Floating IP exception: %s" % e)
-                pass
+            except Exception:
+                LOG.exception("Delete Network Floating IP exception.")
 
     def dry_run(self):
         flips = self.list()
@@ -471,9 +460,8 @@
                     subid = port['fixed_ips'][0]['subnet_id']
                     client.remove_router_interface_with_subnet_id(rid, subid)
                 client.delete_router(rid)
-            except Exception as e:
-                LOG.exception("Delete Router exception: %s" % e)
-                pass
+            except Exception:
+                LOG.exception("Delete Router exception.")
 
     def dry_run(self):
         routers = self.list()
@@ -496,9 +484,8 @@
         for hm in hms:
             try:
                 client.delete_health_monitor(hm['id'])
-            except Exception as e:
-                LOG.exception("Delete Health Monitor exception: %s" % e)
-                pass
+            except Exception:
+                LOG.exception("Delete Health Monitor exception.")
 
     def dry_run(self):
         hms = self.list()
@@ -521,9 +508,8 @@
         for member in members:
             try:
                 client.delete_member(member['id'])
-            except Exception as e:
-                LOG.exception("Delete Member exception: %s" % e)
-                pass
+            except Exception:
+                LOG.exception("Delete Member exception.")
 
     def dry_run(self):
         members = self.list()
@@ -546,9 +532,8 @@
         for vip in vips:
             try:
                 client.delete_vip(vip['id'])
-            except Exception as e:
-                LOG.exception("Delete VIP exception: %s" % e)
-                pass
+            except Exception:
+                LOG.exception("Delete VIP exception.")
 
     def dry_run(self):
         vips = self.list()
@@ -571,9 +556,8 @@
         for pool in pools:
             try:
                 client.delete_pool(pool['id'])
-            except Exception as e:
-                LOG.exception("Delete Pool exception: %s" % e)
-                pass
+            except Exception:
+                LOG.exception("Delete Pool exception.")
 
     def dry_run(self):
         pools = self.list()
@@ -596,9 +580,8 @@
         for rule in rules:
             try:
                 client.delete_metering_label_rule(rule['id'])
-            except Exception as e:
-                LOG.exception("Delete Metering Label Rule exception: %s" % e)
-                pass
+            except Exception:
+                LOG.exception("Delete Metering Label Rule exception.")
 
     def dry_run(self):
         rules = self.list()
@@ -621,9 +604,8 @@
         for label in labels:
             try:
                 client.delete_metering_label(label['id'])
-            except Exception as e:
-                LOG.exception("Delete Metering Label exception: %s" % e)
-                pass
+            except Exception:
+                LOG.exception("Delete Metering Label exception.")
 
     def dry_run(self):
         labels = self.list()
@@ -648,9 +630,8 @@
         for port in ports:
             try:
                 client.delete_port(port['id'])
-            except Exception as e:
-                LOG.exception("Delete Port exception: %s" % e)
-                pass
+            except Exception:
+                LOG.exception("Delete Port exception.")
 
     def dry_run(self):
         ports = self.list()
@@ -675,9 +656,8 @@
         for subnet in subnets:
             try:
                 client.delete_subnet(subnet['id'])
-            except Exception as e:
-                LOG.exception("Delete Subnet exception: %s" % e)
-                pass
+            except Exception:
+                LOG.exception("Delete Subnet exception.")
 
     def dry_run(self):
         subnets = self.list()
@@ -702,9 +682,8 @@
         for alarm in alarms:
             try:
                 client.delete_alarm(alarm['id'])
-            except Exception as e:
-                LOG.exception("Delete Alarms exception: %s" % e)
-                pass
+            except Exception:
+                LOG.exception("Delete Alarms exception.")
 
     def dry_run(self):
         alarms = self.list()
@@ -737,9 +716,8 @@
         for flavor in flavors:
             try:
                 client.delete_flavor(flavor['id'])
-            except Exception as e:
-                LOG.exception("Delete Flavor exception: %s" % e)
-                pass
+            except Exception:
+                LOG.exception("Delete Flavor exception.")
 
     def dry_run(self):
         flavors = self.list()
@@ -775,9 +753,8 @@
         for image in images:
             try:
                 client.delete_image(image['id'])
-            except Exception as e:
-                LOG.exception("Delete Image exception: %s" % e)
-                pass
+            except Exception:
+                LOG.exception("Delete Image exception.")
 
     def dry_run(self):
         images = self.list()
@@ -823,9 +800,8 @@
         for user in users:
             try:
                 client.delete_user(user['id'])
-            except Exception as e:
-                LOG.exception("Delete User exception: %s" % e)
-                pass
+            except Exception:
+                LOG.exception("Delete User exception.")
 
     def dry_run(self):
         users = self.list()
@@ -852,8 +828,8 @@
                           and role['name'] != CONF.identity.admin_role)]
                 LOG.debug("List count, %s Roles after reconcile" % len(roles))
             return roles
-        except Exception as ex:
-            LOG.exception("Cannot retrieve Roles, exception: %s" % ex)
+        except Exception:
+            LOG.exception("Cannot retrieve Roles.")
             return []
 
     def delete(self):
@@ -862,9 +838,8 @@
         for role in roles:
             try:
                 client.delete_role(role['id'])
-            except Exception as e:
-                LOG.exception("Delete Role exception: %s" % e)
-                pass
+            except Exception:
+                LOG.exception("Delete Role exception.")
 
     def dry_run(self):
         roles = self.list()
@@ -900,9 +875,8 @@
         for tenant in tenants:
             try:
                 client.delete_tenant(tenant['id'])
-            except Exception as e:
-                LOG.exception("Delete Tenant exception: %s" % e)
-                pass
+            except Exception:
+                LOG.exception("Delete Tenant exception.")
 
     def dry_run(self):
         tenants = self.list()
@@ -938,9 +912,8 @@
             try:
                 client.update_domain(domain['id'], enabled=False)
                 client.delete_domain(domain['id'])
-            except Exception as e:
-                LOG.exception("Delete Domain exception: %s" % e)
-                pass
+            except Exception:
+                LOG.exception("Delete Domain exception.")
 
     def dry_run(self):
         domains = self.list()
diff --git a/tempest/cmd/init.py b/tempest/cmd/init.py
new file mode 100644
index 0000000..c13fbe5
--- /dev/null
+++ b/tempest/cmd/init.py
@@ -0,0 +1,99 @@
+# Copyright 2015 Hewlett-Packard Development Company, L.P.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import os
+import shutil
+import subprocess
+
+from cliff import command
+from oslo_log import log as logging
+from six import moves
+
+LOG = logging.getLogger(__name__)
+
+TESTR_CONF = """[DEFAULT]
+test_command=OS_STDOUT_CAPTURE=${OS_STDOUT_CAPTURE:-1} \\
+    OS_STDERR_CAPTURE=${OS_STDERR_CAPTURE:-1} \\
+    OS_TEST_TIMEOUT=${OS_TEST_TIMEOUT:-500} \\
+    ${PYTHON:-python} -m subunit.run discover -t %s %s $LISTOPT $IDOPTION
+test_id_option=--load-list $IDFILE
+test_list_option=--list
+group_regex=([^\.]*\.)*
+"""
+
+
+class TempestInit(command.Command):
+    """Setup a local working environment for running tempest"""
+
+    def get_parser(self, prog_name):
+        parser = super(TempestInit, self).get_parser(prog_name)
+        parser.add_argument('dir', nargs='?', default=os.getcwd())
+        parser.add_argument('--config-dir', '-c', default='/etc/tempest')
+        return parser
+
+    def generate_testr_conf(self, local_path):
+        testr_conf_path = os.path.join(local_path, '.testr.conf')
+        top_level_path = os.path.dirname(os.path.dirname(__file__))
+        discover_path = os.path.join(top_level_path, 'test_discover')
+        testr_conf = TESTR_CONF % (top_level_path, discover_path)
+        with open(testr_conf_path, 'w+') as testr_conf_file:
+            testr_conf_file.write(testr_conf)
+
+    def update_local_conf(self, conf_path, lock_dir, log_dir):
+        config_parse = moves.configparser.SafeConfigParser()
+        config_parse.optionxform = str
+        with open(conf_path, 'w+') as conf_file:
+            config_parse.readfp(conf_file)
+            # Set local lock_dir in tempest conf
+            if not config_parse.has_section('oslo_concurrency'):
+                config_parse.add_section('oslo_concurrency')
+            config_parse.set('oslo_concurrency', 'lock_path', lock_dir)
+            # Set local log_dir in tempest conf
+            config_parse.set('DEFAULT', 'log_dir', log_dir)
+            # Set default log filename to tempest.log
+            config_parse.set('DEFAULT', 'log_file', 'tempest.log')
+
+    def copy_config(self, etc_dir, config_dir):
+        shutil.copytree(config_dir, etc_dir)
+
+    def create_working_dir(self, local_dir, config_dir):
+        # Create local dir if missing
+        if not os.path.isdir(local_dir):
+            LOG.debug('Creating local working dir: %s' % local_dir)
+            os.mkdir(local_dir)
+        lock_dir = os.path.join(local_dir, 'tempest_lock')
+        etc_dir = os.path.join(local_dir, 'etc')
+        config_path = os.path.join(etc_dir, 'tempest.conf')
+        log_dir = os.path.join(local_dir, 'logs')
+        testr_dir = os.path.join(local_dir, '.testrepository')
+        # Create lock dir
+        if not os.path.isdir(lock_dir):
+            LOG.debug('Creating lock dir: %s' % lock_dir)
+            os.mkdir(lock_dir)
+        # Create log dir
+        if not os.path.isdir(log_dir):
+            LOG.debug('Creating log dir: %s' % log_dir)
+            os.mkdir(log_dir)
+        # Create and copy local etc dir
+        self.copy_config(etc_dir, config_dir)
+        # Update local confs to reflect local paths
+        self.update_local_conf(config_path, lock_dir, log_dir)
+        # Generate a testr conf file
+        self.generate_testr_conf(local_dir)
+        # setup local testr working dir
+        if not os.path.isdir(testr_dir):
+            subprocess.call(['testr', 'init'], cwd=local_dir)
+
+    def take_action(self, parsed_args):
+        self.create_working_dir(parsed_args.dir, parsed_args.config_dir)
diff --git a/tempest/common/accounts.py b/tempest/common/accounts.py
index 650faf1..78e0e72 100644
--- a/tempest/common/accounts.py
+++ b/tempest/common/accounts.py
@@ -31,8 +31,8 @@
 
 
 def read_accounts_yaml(path):
-    yaml_file = open(path, 'r')
-    accounts = yaml.load(yaml_file)
+    with open(path, 'r') as yaml_file:
+        accounts = yaml.load(yaml_file)
     return accounts
 
 
diff --git a/tempest/config.py b/tempest/config.py
index 7382088..5ea4d10 100644
--- a/tempest/config.py
+++ b/tempest/config.py
@@ -22,6 +22,8 @@
 
 from oslo_log import log as logging
 
+from tempest.test_discover import plugins
+
 
 # TODO(marun) Replace use of oslo_config's global ConfigOpts
 # (cfg.CONF) instance with a local instance (cfg.ConfigOpts()) once
@@ -388,6 +390,14 @@
                      'encrypted volume to a running server instance? This may '
                      'depend on the combination of compute_driver in nova and '
                      'the volume_driver(s) in cinder.'),
+    # TODO(mriedem): Remove allow_duplicate_networks once kilo-eol happens
+    # since the option was removed from nova in Liberty and is the default
+    # behavior starting in Liberty.
+    cfg.BoolOpt('allow_duplicate_networks',
+                default=False,
+                help='Does the test environment support creating instances '
+                     'with multiple ports on the same network? This is only '
+                     'valid when using Neutron.'),
 ]
 
 
@@ -1184,8 +1194,12 @@
 
 
 def register_opts():
+    ext_plugins = plugins.TempestTestPluginManager()
+    # Register in-tree tempest config options
     for g, o in _opts:
         register_opt_group(_CONF, g, o)
+    # Call external plugin config option registration
+    ext_plugins.register_plugin_opts(_CONF)
 
 
 def list_opts():
diff --git a/tempest/scenario/test_aggregates_basic_ops.py b/tempest/scenario/test_aggregates_basic_ops.py
index 02d1171..f5f4a61 100644
--- a/tempest/scenario/test_aggregates_basic_ops.py
+++ b/tempest/scenario/test_aggregates_basic_ops.py
@@ -64,12 +64,12 @@
         return computes[0]['host_name']
 
     def _add_host(self, aggregate_id, host):
-        aggregate = self.aggregates_client.add_host(aggregate_id, host)
+        aggregate = self.aggregates_client.add_host(aggregate_id, host=host)
         self.addCleanup(self._remove_host, aggregate['id'], host)
         self.assertIn(host, aggregate['hosts'])
 
     def _remove_host(self, aggregate_id, host):
-        aggregate = self.aggregates_client.remove_host(aggregate_id, host)
+        aggregate = self.aggregates_client.remove_host(aggregate_id, host=host)
         self.assertNotIn(host, aggregate['hosts'])
 
     def _check_aggregate_details(self, aggregate, aggregate_name, azone,
@@ -85,7 +85,7 @@
 
     def _set_aggregate_metadata(self, aggregate, meta):
         aggregate = self.aggregates_client.set_metadata(aggregate['id'],
-                                                        meta)
+                                                        metadata=meta)
 
         for key, value in meta.items():
             self.assertEqual(meta[key], aggregate['metadata'][key])
diff --git a/tempest/scenario/test_network_basic_ops.py b/tempest/scenario/test_network_basic_ops.py
index ff58eea..e676063 100644
--- a/tempest/scenario/test_network_basic_ops.py
+++ b/tempest/scenario/test_network_basic_ops.py
@@ -244,7 +244,7 @@
         old_port = port_list[0]
         interface = self.interface_client.create_interface(
             server_id=server['id'],
-            network_id=self.new_net.id)
+            net_id=self.new_net.id)
         self.addCleanup(self.network_client.wait_for_resource_deletion,
                         'port',
                         interface['port_id'])
diff --git a/tempest/services/compute/json/aggregates_client.py b/tempest/services/compute/json/aggregates_client.py
index 28d4ff5..4114b8b 100644
--- a/tempest/services/compute/json/aggregates_client.py
+++ b/tempest/services/compute/json/aggregates_client.py
@@ -45,13 +45,9 @@
         self.validate_response(schema.create_aggregate, resp, body)
         return service_client.ResponseBody(resp, body['aggregate'])
 
-    def update_aggregate(self, aggregate_id, name, availability_zone=None):
+    def update_aggregate(self, aggregate_id, **kwargs):
         """Update a aggregate."""
-        put_body = {
-            'name': name,
-            'availability_zone': availability_zone
-        }
-        put_body = json.dumps({'aggregate': put_body})
+        put_body = json.dumps({'aggregate': kwargs})
         resp, body = self.put('os-aggregates/%s' % aggregate_id, put_body)
 
         body = json.loads(body)
@@ -76,36 +72,27 @@
         """Returns the primary type of resource this client works with."""
         return 'aggregate'
 
-    def add_host(self, aggregate_id, host):
+    def add_host(self, aggregate_id, **kwargs):
         """Adds a host to the given aggregate."""
-        post_body = {
-            'host': host,
-        }
-        post_body = json.dumps({'add_host': post_body})
+        post_body = json.dumps({'add_host': kwargs})
         resp, body = self.post('os-aggregates/%s/action' % aggregate_id,
                                post_body)
         body = json.loads(body)
         self.validate_response(schema.aggregate_add_remove_host, resp, body)
         return service_client.ResponseBody(resp, body['aggregate'])
 
-    def remove_host(self, aggregate_id, host):
+    def remove_host(self, aggregate_id, **kwargs):
         """Removes a host from the given aggregate."""
-        post_body = {
-            'host': host,
-        }
-        post_body = json.dumps({'remove_host': post_body})
+        post_body = json.dumps({'remove_host': kwargs})
         resp, body = self.post('os-aggregates/%s/action' % aggregate_id,
                                post_body)
         body = json.loads(body)
         self.validate_response(schema.aggregate_add_remove_host, resp, body)
         return service_client.ResponseBody(resp, body['aggregate'])
 
-    def set_metadata(self, aggregate_id, meta):
+    def set_metadata(self, aggregate_id, **kwargs):
         """Replaces the aggregate's existing metadata with new metadata."""
-        post_body = {
-            'metadata': meta,
-        }
-        post_body = json.dumps({'set_metadata': post_body})
+        post_body = json.dumps({'set_metadata': kwargs})
         resp, body = self.post('os-aggregates/%s/action' % aggregate_id,
                                post_body)
         body = json.loads(body)
diff --git a/tempest/services/compute/json/fixed_ips_client.py b/tempest/services/compute/json/fixed_ips_client.py
index 69de79f..d0d9ca1 100644
--- a/tempest/services/compute/json/fixed_ips_client.py
+++ b/tempest/services/compute/json/fixed_ips_client.py
@@ -28,9 +28,9 @@
         self.validate_response(schema.get_fixed_ip, resp, body)
         return service_client.ResponseBody(resp, body['fixed_ip'])
 
-    def reserve_fixed_ip(self, fixed_ip, body):
+    def reserve_fixed_ip(self, fixed_ip, **kwargs):
         """This reserves and unreserves fixed ips."""
         url = "os-fixed-ips/%s/action" % fixed_ip
-        resp, body = self.post(url, json.dumps(body))
+        resp, body = self.post(url, json.dumps(kwargs))
         self.validate_response(schema.reserve_fixed_ip, resp, body)
         return service_client.ResponseBody(resp)
diff --git a/tempest/services/compute/json/interfaces_client.py b/tempest/services/compute/json/interfaces_client.py
index e8b2b64..c437c08 100644
--- a/tempest/services/compute/json/interfaces_client.py
+++ b/tempest/services/compute/json/interfaces_client.py
@@ -29,16 +29,8 @@
         return service_client.ResponseBodyList(resp,
                                                body['interfaceAttachments'])
 
-    def create_interface(self, server_id, port_id=None, network_id=None,
-                         fixed_ip=None):
-        post_body = dict(interfaceAttachment=dict())
-        if port_id:
-            post_body['interfaceAttachment']['port_id'] = port_id
-        if network_id:
-            post_body['interfaceAttachment']['net_id'] = network_id
-        if fixed_ip:
-            fip = dict(ip_address=fixed_ip)
-            post_body['interfaceAttachment']['fixed_ips'] = [fip]
+    def create_interface(self, server_id, **kwargs):
+        post_body = {'interfaceAttachment': kwargs}
         post_body = json.dumps(post_body)
         resp, body = self.post('servers/%s/os-interface' % server_id,
                                body=post_body)
@@ -59,26 +51,18 @@
         self.validate_response(schema.delete_interface, resp, body)
         return service_client.ResponseBody(resp, body)
 
-    def add_fixed_ip(self, server_id, network_id):
+    def add_fixed_ip(self, server_id, **kwargs):
         """Add a fixed IP to input server instance."""
-        post_body = json.dumps({
-            'addFixedIp': {
-                'networkId': network_id
-            }
-        })
+        post_body = json.dumps({'addFixedIp': kwargs})
         resp, body = self.post('servers/%s/action' % server_id,
                                post_body)
         self.validate_response(servers_schema.server_actions_common_schema,
                                resp, body)
         return service_client.ResponseBody(resp, body)
 
-    def remove_fixed_ip(self, server_id, ip_address):
+    def remove_fixed_ip(self, server_id, **kwargs):
         """Remove input fixed IP from input server instance."""
-        post_body = json.dumps({
-            'removeFixedIp': {
-                'address': ip_address
-            }
-        })
+        post_body = json.dumps({'removeFixedIp': kwargs})
         resp, body = self.post('servers/%s/action' % server_id,
                                post_body)
         self.validate_response(servers_schema.server_actions_common_schema,
diff --git a/tempest/services/compute/json/quotas_client.py b/tempest/services/compute/json/quotas_client.py
index 4ea47ed..88d0567 100644
--- a/tempest/services/compute/json/quotas_client.py
+++ b/tempest/services/compute/json/quotas_client.py
@@ -41,59 +41,11 @@
         self.validate_response(schema.get_quota_set, resp, body)
         return service_client.ResponseBody(resp, body['quota_set'])
 
-    def update_quota_set(self, tenant_id, user_id=None,
-                         force=None, injected_file_content_bytes=None,
-                         metadata_items=None, ram=None, floating_ips=None,
-                         fixed_ips=None, key_pairs=None, instances=None,
-                         security_group_rules=None, injected_files=None,
-                         cores=None, injected_file_path_bytes=None,
-                         security_groups=None):
+    def update_quota_set(self, tenant_id, user_id=None, **kwargs):
         """
         Updates the tenant's quota limits for one or more resources
         """
-        post_body = {}
-
-        if force is not None:
-            post_body['force'] = force
-
-        if injected_file_content_bytes is not None:
-            post_body['injected_file_content_bytes'] = \
-                injected_file_content_bytes
-
-        if metadata_items is not None:
-            post_body['metadata_items'] = metadata_items
-
-        if ram is not None:
-            post_body['ram'] = ram
-
-        if floating_ips is not None:
-            post_body['floating_ips'] = floating_ips
-
-        if fixed_ips is not None:
-            post_body['fixed_ips'] = fixed_ips
-
-        if key_pairs is not None:
-            post_body['key_pairs'] = key_pairs
-
-        if instances is not None:
-            post_body['instances'] = instances
-
-        if security_group_rules is not None:
-            post_body['security_group_rules'] = security_group_rules
-
-        if injected_files is not None:
-            post_body['injected_files'] = injected_files
-
-        if cores is not None:
-            post_body['cores'] = cores
-
-        if injected_file_path_bytes is not None:
-            post_body['injected_file_path_bytes'] = injected_file_path_bytes
-
-        if security_groups is not None:
-            post_body['security_groups'] = security_groups
-
-        post_body = json.dumps({'quota_set': post_body})
+        post_body = json.dumps({'quota_set': kwargs})
 
         if user_id:
             resp, body = self.put('os-quota-sets/%s?user_id=%s' %
diff --git a/tempest/services/network/json/network_client.py b/tempest/services/network/json/network_client.py
index 65f3aa7..ce200d2 100644
--- a/tempest/services/network/json/network_client.py
+++ b/tempest/services/network/json/network_client.py
@@ -39,46 +39,12 @@
     version = '2.0'
     uri_prefix = "v2.0"
 
-    def get_uri(self, plural_name):
-        # get service prefix from resource name
-
-        # the following map is used to construct proper URI
-        # for the given neutron resource
-        service_resource_prefix_map = {
-            'networks': '',
-            'subnets': '',
-            'ports': '',
-            'metering_labels': 'metering',
-            'metering_label_rules': 'metering',
-        }
-        service_prefix = service_resource_prefix_map.get(
-            plural_name)
-        plural_name = plural_name.replace("_", "-")
-        if service_prefix:
-            uri = '%s/%s/%s' % (self.uri_prefix, service_prefix,
-                                plural_name)
-        else:
-            uri = '%s/%s' % (self.uri_prefix, plural_name)
-        return uri
-
-    def pluralize(self, resource_name):
-        # get plural from map or just add 's'
-
-        # map from resource name to a plural name
-        # needed only for those which can't be constructed as name + 's'
-        resource_plural_map = {
-            'security_groups': 'security_groups',
-            'security_group_rules': 'security_group_rules',
-            'quotas': 'quotas',
-        }
-        return resource_plural_map.get(resource_name, resource_name + 's')
-
     def _list_resources(self, uri, **filters):
         req_uri = self.uri_prefix + uri
         if filters:
             req_uri += '?' + urllib.urlencode(filters, doseq=1)
         resp, body = self.get(req_uri)
-        body = self.deserialize_list(body)
+        body = json.loads(body)
         self.expected_success(200, resp.status)
         return service_client.ResponseBody(resp, body)
 
@@ -96,23 +62,23 @@
         if fields:
             req_uri += '?' + urllib.urlencode(fields, doseq=1)
         resp, body = self.get(req_uri)
-        body = self.deserialize_single(body)
+        body = json.loads(body)
         self.expected_success(200, resp.status)
         return service_client.ResponseBody(resp, body)
 
     def _create_resource(self, uri, post_data):
         req_uri = self.uri_prefix + uri
-        req_post_data = self.serialize(post_data)
+        req_post_data = json.dumps(post_data)
         resp, body = self.post(req_uri, req_post_data)
-        body = self.deserialize_single(body)
+        body = json.loads(body)
         self.expected_success(201, resp.status)
         return service_client.ResponseBody(resp, body)
 
     def _update_resource(self, uri, post_data):
         req_uri = self.uri_prefix + uri
-        req_post_data = self.serialize(post_data)
+        req_post_data = json.dumps(post_data)
         resp, body = self.put(req_uri, req_post_data)
-        body = self.deserialize_single(body)
+        body = json.loads(body)
         self.expected_success(200, resp.status)
         return service_client.ResponseBody(resp, body)
 
@@ -285,34 +251,21 @@
         uri = '/extensions'
         return self._list_resources(uri, **filters)
 
-    # Common methods that are hard to automate
     def create_bulk_network(self, names):
         network_list = [{'name': name} for name in names]
         post_data = {'networks': network_list}
-        body = self.serialize_list(post_data, "networks", "network")
-        uri = self.get_uri("networks")
-        resp, body = self.post(uri, body)
-        body = self.deserialize_list(body)
-        self.expected_success(201, resp.status)
-        return service_client.ResponseBody(resp, body)
+        uri = '/networks'
+        return self._create_resource(uri, post_data)
 
     def create_bulk_subnet(self, subnet_list):
         post_data = {'subnets': subnet_list}
-        body = self.serialize_list(post_data, 'subnets', 'subnet')
-        uri = self.get_uri('subnets')
-        resp, body = self.post(uri, body)
-        body = self.deserialize_list(body)
-        self.expected_success(201, resp.status)
-        return service_client.ResponseBody(resp, body)
+        uri = '/subnets'
+        return self._create_resource(uri, post_data)
 
     def create_bulk_port(self, port_list):
         post_data = {'ports': port_list}
-        body = self.serialize_list(post_data, 'ports', 'port')
-        uri = self.get_uri('ports')
-        resp, body = self.post(uri, body)
-        body = self.deserialize_list(body)
-        self.expected_success(201, resp.status)
-        return service_client.ResponseBody(resp, body)
+        uri = '/ports'
+        return self._create_resource(uri, post_data)
 
     def wait_for_resource_deletion(self, resource_type, id):
         """Waits for a resource to be deleted."""
@@ -371,32 +324,14 @@
             message = '(%s) %s' % (caller, message)
         raise exceptions.TimeoutException(message)
 
-    def deserialize_single(self, body):
-        return json.loads(body)
-
-    def deserialize_list(self, body):
-        return json.loads(body)
-
-    def serialize(self, data):
-        return json.dumps(data)
-
-    def serialize_list(self, data, root=None, item=None):
-        return self.serialize(data)
-
     def update_quotas(self, tenant_id, **kwargs):
         put_body = {'quota': kwargs}
-        body = json.dumps(put_body)
-        uri = '%s/quotas/%s' % (self.uri_prefix, tenant_id)
-        resp, body = self.put(uri, body)
-        self.expected_success(200, resp.status)
-        body = json.loads(body)
-        return service_client.ResponseBody(resp, body['quota'])
+        uri = '/quotas/%s' % tenant_id
+        return self._update_resource(uri, put_body)
 
     def reset_quotas(self, tenant_id):
-        uri = '%s/quotas/%s' % (self.uri_prefix, tenant_id)
-        resp, body = self.delete(uri)
-        self.expected_success(204, resp.status)
-        return service_client.ResponseBody(resp, body)
+        uri = '/quotas/%s' % tenant_id
+        return self._delete_resource(uri)
 
     def show_quotas(self, tenant_id, **fields):
         uri = '/quotas/%s' % tenant_id
@@ -410,18 +345,12 @@
         post_body = {'router': kwargs}
         post_body['router']['name'] = name
         post_body['router']['admin_state_up'] = admin_state_up
-        body = json.dumps(post_body)
-        uri = '%s/routers' % (self.uri_prefix)
-        resp, body = self.post(uri, body)
-        self.expected_success(201, resp.status)
-        body = json.loads(body)
-        return service_client.ResponseBody(resp, body)
+        uri = '/routers'
+        return self._create_resource(uri, post_body)
 
     def _update_router(self, router_id, set_enable_snat, **kwargs):
-        uri = '%s/routers/%s' % (self.uri_prefix, router_id)
-        resp, body = self.get(uri)
-        self.expected_success(200, resp.status)
-        body = json.loads(body)
+        uri = '/routers/%s' % router_id
+        body = self._show_resource(uri)
         update_body = {}
         update_body['name'] = kwargs.get('name', body['router']['name'])
         update_body['admin_state_up'] = kwargs.get(
@@ -440,11 +369,7 @@
         if 'distributed' in kwargs:
             update_body['distributed'] = kwargs['distributed']
         update_body = dict(router=update_body)
-        update_body = json.dumps(update_body)
-        resp, body = self.put(uri, update_body)
-        self.expected_success(200, resp.status)
-        body = json.loads(body)
-        return service_client.ResponseBody(resp, body)
+        return self._update_resource(uri, update_body)
 
     def update_router(self, router_id, **kwargs):
         """Update a router leaving enable_snat to its default value."""
@@ -476,64 +401,37 @@
         return self._update_router(router_id, set_enable_snat=True, **kwargs)
 
     def add_router_interface_with_subnet_id(self, router_id, subnet_id):
-        uri = '%s/routers/%s/add_router_interface' % (self.uri_prefix,
-                                                      router_id)
+        uri = '/routers/%s/add_router_interface' % router_id
         update_body = {"subnet_id": subnet_id}
-        update_body = json.dumps(update_body)
-        resp, body = self.put(uri, update_body)
-        self.expected_success(200, resp.status)
-        body = json.loads(body)
-        return service_client.ResponseBody(resp, body)
+        return self._update_resource(uri, update_body)
 
     def add_router_interface_with_port_id(self, router_id, port_id):
-        uri = '%s/routers/%s/add_router_interface' % (self.uri_prefix,
-                                                      router_id)
+        uri = '/routers/%s/add_router_interface' % router_id
         update_body = {"port_id": port_id}
-        update_body = json.dumps(update_body)
-        resp, body = self.put(uri, update_body)
-        self.expected_success(200, resp.status)
-        body = json.loads(body)
-        return service_client.ResponseBody(resp, body)
+        return self._update_resource(uri, update_body)
 
     def remove_router_interface_with_subnet_id(self, router_id, subnet_id):
-        uri = '%s/routers/%s/remove_router_interface' % (self.uri_prefix,
-                                                         router_id)
+        uri = '/routers/%s/remove_router_interface' % router_id
         update_body = {"subnet_id": subnet_id}
-        update_body = json.dumps(update_body)
-        resp, body = self.put(uri, update_body)
-        self.expected_success(200, resp.status)
-        body = json.loads(body)
-        return service_client.ResponseBody(resp, body)
+        return self._update_resource(uri, update_body)
 
     def remove_router_interface_with_port_id(self, router_id, port_id):
-        uri = '%s/routers/%s/remove_router_interface' % (self.uri_prefix,
-                                                         router_id)
+        uri = '/routers/%s/remove_router_interface' % router_id
         update_body = {"port_id": port_id}
-        update_body = json.dumps(update_body)
-        resp, body = self.put(uri, update_body)
-        self.expected_success(200, resp.status)
-        body = json.loads(body)
-        return service_client.ResponseBody(resp, body)
+        return self._update_resource(uri, update_body)
 
     def list_router_interfaces(self, uuid):
-        uri = '%s/ports?device_id=%s' % (self.uri_prefix, uuid)
-        resp, body = self.get(uri)
-        self.expected_success(200, resp.status)
-        body = json.loads(body)
-        return service_client.ResponseBody(resp, body)
+        uri = '/ports?device_id=%s' % uuid
+        return self._list_resources(uri)
 
     def update_agent(self, agent_id, agent_info):
         """
         :param agent_info: Agent update information.
         E.g {"admin_state_up": True}
         """
-        uri = '%s/agents/%s' % (self.uri_prefix, agent_id)
+        uri = '/agents/%s' % agent_id
         agent = {"agent": agent_info}
-        body = json.dumps(agent)
-        resp, body = self.put(uri, body)
-        self.expected_success(200, resp.status)
-        body = json.loads(body)
-        return service_client.ResponseBody(resp, body)
+        return self._update_resource(uri, agent)
 
     def show_agent(self, agent_id, **fields):
         uri = '/agents/%s' % agent_id
@@ -544,88 +442,54 @@
         return self._list_resources(uri, **filters)
 
     def list_routers_on_l3_agent(self, agent_id):
-        uri = '%s/agents/%s/l3-routers' % (self.uri_prefix, agent_id)
-        resp, body = self.get(uri)
-        self.expected_success(200, resp.status)
-        body = json.loads(body)
-        return service_client.ResponseBody(resp, body)
+        uri = '/agents/%s/l3-routers' % agent_id
+        return self._list_resources(uri)
 
     def list_l3_agents_hosting_router(self, router_id):
-        uri = '%s/routers/%s/l3-agents' % (self.uri_prefix, router_id)
-        resp, body = self.get(uri)
-        self.expected_success(200, resp.status)
-        body = json.loads(body)
-        return service_client.ResponseBody(resp, body)
+        uri = '/routers/%s/l3-agents' % router_id
+        return self._list_resources(uri)
 
     def add_router_to_l3_agent(self, agent_id, router_id):
-        uri = '%s/agents/%s/l3-routers' % (self.uri_prefix, agent_id)
+        uri = '/agents/%s/l3-routers' % agent_id
         post_body = {"router_id": router_id}
-        body = json.dumps(post_body)
-        resp, body = self.post(uri, body)
-        self.expected_success(201, resp.status)
-        body = json.loads(body)
-        return service_client.ResponseBody(resp, body)
+        return self._create_resource(uri, post_body)
 
     def remove_router_from_l3_agent(self, agent_id, router_id):
-        uri = '%s/agents/%s/l3-routers/%s' % (
-            self.uri_prefix, agent_id, router_id)
-        resp, body = self.delete(uri)
-        self.expected_success(204, resp.status)
-        return service_client.ResponseBody(resp, body)
+        uri = '/agents/%s/l3-routers/%s' % (agent_id, router_id)
+        return self._delete_resource(uri)
 
     def list_dhcp_agent_hosting_network(self, network_id):
-        uri = '%s/networks/%s/dhcp-agents' % (self.uri_prefix, network_id)
-        resp, body = self.get(uri)
-        self.expected_success(200, resp.status)
-        body = json.loads(body)
-        return service_client.ResponseBody(resp, body)
+        uri = '/networks/%s/dhcp-agents' % network_id
+        return self._list_resources(uri)
 
     def list_networks_hosted_by_one_dhcp_agent(self, agent_id):
-        uri = '%s/agents/%s/dhcp-networks' % (self.uri_prefix, agent_id)
-        resp, body = self.get(uri)
-        self.expected_success(200, resp.status)
-        body = json.loads(body)
-        return service_client.ResponseBody(resp, body)
+        uri = '/agents/%s/dhcp-networks' % agent_id
+        return self._list_resources(uri)
 
     def remove_network_from_dhcp_agent(self, agent_id, network_id):
-        uri = '%s/agents/%s/dhcp-networks/%s' % (self.uri_prefix, agent_id,
-                                                 network_id)
-        resp, body = self.delete(uri)
-        self.expected_success(204, resp.status)
-        return service_client.ResponseBody(resp, body)
+        uri = '/agents/%s/dhcp-networks/%s' % (agent_id,
+                                               network_id)
+        return self._delete_resource(uri)
 
     def update_extra_routes(self, router_id, routes):
-        uri = '%s/routers/%s' % (self.uri_prefix, router_id)
+        uri = '/routers/%s' % router_id
         put_body = {
             'router': {
                 'routes': routes
             }
         }
-        body = json.dumps(put_body)
-        resp, body = self.put(uri, body)
-        self.expected_success(200, resp.status)
-        body = json.loads(body)
-        return service_client.ResponseBody(resp, body)
+        return self._update_resource(uri, put_body)
 
     def delete_extra_routes(self, router_id):
-        uri = '%s/routers/%s' % (self.uri_prefix, router_id)
-        null_routes = None
+        uri = '/routers/%s' % router_id
         put_body = {
             'router': {
-                'routes': null_routes
+                'routes': None
             }
         }
-        body = json.dumps(put_body)
-        resp, body = self.put(uri, body)
-        self.expected_success(200, resp.status)
-        body = json.loads(body)
-        return service_client.ResponseBody(resp, body)
+        return self._update_resource(uri, put_body)
 
     def add_dhcp_agent_to_network(self, agent_id, network_id):
         post_body = {'network_id': network_id}
-        body = json.dumps(post_body)
-        uri = '%s/agents/%s/dhcp-networks' % (self.uri_prefix, agent_id)
-        resp, body = self.post(uri, body)
-        self.expected_success(201, resp.status)
-        body = json.loads(body)
-        return service_client.ResponseBody(resp, body)
+        uri = '/agents/%s/dhcp-networks' % agent_id
+        return self._create_resource(uri, post_body)
diff --git a/tempest/services/volume/json/backups_client.py b/tempest/services/volume/json/backups_client.py
index 0f83b8d..8d34230 100644
--- a/tempest/services/volume/json/backups_client.py
+++ b/tempest/services/volume/json/backups_client.py
@@ -17,6 +17,8 @@
 
 from oslo_serialization import jsonutils as json
 
+from tempest_lib import exceptions as lib_exc
+
 from tempest.common import service_client
 from tempest import exceptions
 
@@ -75,6 +77,24 @@
         self.expected_success(200, resp.status)
         return service_client.ResponseBodyList(resp, body['backups'])
 
+    def export_backup(self, backup_id):
+        """Export backup metadata record."""
+        url = "backups/%s/export_record" % backup_id
+        resp, body = self.get(url)
+        body = json.loads(body)
+        self.expected_success(200, resp.status)
+        return service_client.ResponseBody(resp, body['backup-record'])
+
+    def import_backup(self, backup_service, backup_url):
+        """Import backup metadata record."""
+        post_body = {'backup_service': backup_service,
+                     'backup_url': backup_url}
+        post_body = json.dumps({'backup-record': post_body})
+        resp, body = self.post("backups/import_record", post_body)
+        body = json.loads(body)
+        self.expected_success(201, resp.status)
+        return service_client.ResponseBody(resp, body['backup'])
+
     def wait_for_backup_status(self, backup_id, status):
         """Waits for a Backup to reach a given status."""
         body = self.show_backup(backup_id)
@@ -95,6 +115,18 @@
                             self.build_timeout))
                 raise exceptions.TimeoutException(message)
 
+    def wait_for_backup_deletion(self, backup_id):
+        """Waits for backup deletion"""
+        start_time = int(time.time())
+        while True:
+            try:
+                self.show_backup(backup_id)
+            except lib_exc.NotFound:
+                return
+            if int(time.time()) - start_time >= self.build_timeout:
+                raise exceptions.TimeoutException
+            time.sleep(self.build_interval)
+
 
 class BackupsClient(BaseBackupsClient):
     """Volume V1 Backups client"""
diff --git a/tempest/test.py b/tempest/test.py
index 0e60041..df6b30d 100644
--- a/tempest/test.py
+++ b/tempest/test.py
@@ -44,7 +44,7 @@
 
 
 def attr(**kwargs):
-    """A decorator which applies the  testtools attr decorator
+    """A decorator which applies the testtools attr decorator
 
     This decorator applies the testtools.testcase.attr if it is in the list of
     attributes to testtools we want to apply.
diff --git a/tempest/test_discover/plugins.py b/tempest/test_discover/plugins.py
index 2701f02..45cd609 100644
--- a/tempest/test_discover/plugins.py
+++ b/tempest/test_discover/plugins.py
@@ -40,6 +40,17 @@
         """
         return
 
+    @abc.abstractmethod
+    def register_opts(self, conf):
+        """Method to add additional configuration options to tempest. This
+        method will be run for the plugin during the register_opts() function
+        in tempest.config
+
+        :param ConfigOpts conf: The conf object that can be used to register
+            additional options on.
+        """
+        return
+
 
 @misc.singleton
 class TempestTestPluginManager(object):
@@ -64,3 +75,7 @@
         for plug in self.ext_plugins:
             load_tests_dict[plug.name] = plug.obj.load_tests()
         return load_tests_dict
+
+    def register_plugin_opts(self, conf):
+        for plug in self.ext_plugins:
+            plug.obj.register_opts(conf)
diff --git a/tempest/tests/cmd/test_tempest_init.py b/tempest/tests/cmd/test_tempest_init.py
new file mode 100644
index 0000000..6b5af7e
--- /dev/null
+++ b/tempest/tests/cmd/test_tempest_init.py
@@ -0,0 +1,66 @@
+# Copyright 2015 Hewlett-Packard Development Company, L.P.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import os
+
+import fixtures
+
+from tempest.cmd import init
+from tempest.tests import base
+
+
+class TestTempestInit(base.TestCase):
+
+    def test_generate_testr_conf(self):
+        # Create fake conf dir
+        conf_dir = self.useFixture(fixtures.TempDir())
+
+        init_cmd = init.TempestInit(None, None)
+        init_cmd.generate_testr_conf(conf_dir.path)
+
+        # Generate expected file contents
+        top_level_path = os.path.dirname(os.path.dirname(init.__file__))
+        discover_path = os.path.join(top_level_path, 'test_discover')
+        testr_conf_file = init.TESTR_CONF % (top_level_path, discover_path)
+
+        conf_path = conf_dir.join('.testr.conf')
+        conf_file = open(conf_path, 'r')
+        self.addCleanup(conf_file.close)
+        self.assertEqual(conf_file.read(), testr_conf_file)
+
+    def test_create_working_dir(self):
+        fake_local_dir = self.useFixture(fixtures.TempDir())
+        fake_local_conf_dir = self.useFixture(fixtures.TempDir())
+        # Create a fake conf file
+        fake_file = fake_local_conf_dir.join('conf_file.conf')
+        open(fake_file, 'w').close()
+        init_cmd = init.TempestInit(None, None)
+        init_cmd.create_working_dir(fake_local_dir.path,
+                                    fake_local_conf_dir.path)
+        # Assert directories are created
+        lock_path = os.path.join(fake_local_dir.path, 'tempest_lock')
+        etc_dir = os.path.join(fake_local_dir.path, 'etc')
+        log_dir = os.path.join(fake_local_dir.path, 'logs')
+        testr_dir = os.path.join(fake_local_dir.path, '.testrepository')
+        self.assertTrue(os.path.isdir(lock_path))
+        self.assertTrue(os.path.isdir(etc_dir))
+        self.assertTrue(os.path.isdir(log_dir))
+        self.assertTrue(os.path.isdir(testr_dir))
+        # Assert file creation
+        fake_file_moved = os.path.join(etc_dir, 'conf_file.conf')
+        local_conf_file = os.path.join(etc_dir, 'tempest.conf')
+        local_testr_conf = os.path.join(fake_local_dir.path, '.testr.conf')
+        self.assertTrue(os.path.isfile(fake_file_moved))
+        self.assertTrue(os.path.isfile(local_conf_file))
+        self.assertTrue(os.path.isfile(local_testr_conf))
diff --git a/tempest/tests/services/compute/test_agents_client.py b/tempest/tests/services/compute/test_agents_client.py
index e8ea525..d268a18 100644
--- a/tempest/tests/services/compute/test_agents_client.py
+++ b/tempest/tests/services/compute/test_agents_client.py
@@ -32,10 +32,9 @@
                                                  'compute', 'regionOne')
 
     def _test_list_agents(self, bytes_body=False):
+        body = '{"agents": []}'
         if bytes_body:
-            body = bytes(b'{"agents": []}')
-        else:
-            body = '{"agents": []}'
+            body = bytes(body.encode('utf-8'))
         expected = []
         response = (httplib2.Response({'status': 200}), body)
         self.useFixture(mockpatch.Patch(
diff --git a/tempest/tests/services/compute/test_aggregates_client.py b/tempest/tests/services/compute/test_aggregates_client.py
new file mode 100644
index 0000000..9fe4544
--- /dev/null
+++ b/tempest/tests/services/compute/test_aggregates_client.py
@@ -0,0 +1,47 @@
+# Copyright 2015 NEC Corporation.  All rights reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import httplib2
+
+from oslotest import mockpatch
+
+from tempest.services.compute.json import aggregates_client
+from tempest.tests import base
+from tempest.tests import fake_auth_provider
+
+
+class TestAggregatesClient(base.TestCase):
+
+    def setUp(self):
+        super(TestAggregatesClient, self).setUp()
+        fake_auth = fake_auth_provider.FakeAuthProvider()
+        self.client = aggregates_client.AggregatesClient(
+            fake_auth, 'compute', 'regionOne')
+
+    def _test_list_aggregates(self, bytes_body=False):
+        body = '{"aggregates": []}'
+        if bytes_body:
+            body = body.encode('utf-8')
+        expected = []
+        response = (httplib2.Response({'status': 200}), body)
+        self.useFixture(mockpatch.Patch(
+            'tempest.common.service_client.ServiceClient.get',
+            return_value=response))
+        self.assertEqual(expected, self.client.list_aggregates())
+
+    def test_list_aggregates_with_str_body(self):
+        self._test_list_aggregates()
+
+    def test_list_aggregates_with_bytes_body(self):
+        self._test_list_aggregates(bytes_body=True)
diff --git a/test-requirements.txt b/test-requirements.txt
index 8fcf071..65e3531 100644
--- a/test-requirements.txt
+++ b/test-requirements.txt
@@ -7,7 +7,6 @@
 python-subunit>=0.0.18
 oslosphinx>=2.5.0 # Apache-2.0
 mox>=0.5.3
-mock>=1.1;python_version!='2.6'
-mock==1.0.1;python_version=='2.6'
+mock>=1.2
 coverage>=3.6
 oslotest>=1.7.0 # Apache-2.0
diff --git a/tox.ini b/tox.ini
index cf7013d..389fee2 100644
--- a/tox.ini
+++ b/tox.ini
@@ -40,6 +40,16 @@
   find . -type f -name "*.pyc" -delete
   bash tools/pretty_tox.sh '{posargs}'
 
+[testenv:all-plugin]
+sitepackages = True
+# 'all' includes slow tests
+setenv = {[tempestenv]setenv}
+         OS_TEST_TIMEOUT=1200
+deps = {[tempestenv]deps}
+commands =
+  find . -type f -name "*.pyc" -delete
+  bash tools/pretty_tox.sh '{posargs}'
+
 [testenv:full]
 sitepackages = {[tempestenv]sitepackages}
 setenv = {[tempestenv]setenv}