Merge "Use safe_setup instead try block in setUpClass"
diff --git a/HACKING.rst b/HACKING.rst
index 8652971..025bf74 100644
--- a/HACKING.rst
+++ b/HACKING.rst
@@ -12,6 +12,7 @@
 - [T104] Scenario tests require a services decorator
 - [T105] Unit tests cannot use setUpClass
 - [T106] vim configuration should not be kept in source files.
+- [N322] Method's default argument shouldn't be mutable
 
 Test Data/Configuration
 -----------------------
diff --git a/REVIEWING.rst b/REVIEWING.rst
new file mode 100644
index 0000000..d6dc83e
--- /dev/null
+++ b/REVIEWING.rst
@@ -0,0 +1,60 @@
+Reviewing Tempest Code
+======================
+
+To start read the `OpenStack Common Review Checklist
+<https://wiki.openstack.org/wiki/ReviewChecklist#Common_Review_Checklist>`_
+
+
+Ensuring code is executed
+-------------------------
+
+For any new or change to a test it has to be verified in the gate. This means
+that the first thing to check with any change is that a gate job actually runs
+it. Tests which aren't executed either because of configuration or skips should
+not be accepted.
+
+
+Unit Tests
+----------
+
+For any change that adds new functionality to either common functionality or an
+out-of-band tool unit tests are required. This is to ensure we don't introduce
+future regressions and to test conditions which we may not hit in the gate runs.
+Tests, and service clients aren't required to have unit tests since they should
+be self verifying by running them in the gate.
+
+
+API Stability
+-------------
+Tests should only be added for a published stable APIs. If a patch contains
+tests for an API which hasn't been marked as stable or for an API that which
+doesn't conform to the `API stability guidelines
+<https://wiki.openstack.org/wiki/Governance/Approved/APIStability>`_ then it
+should not be approved.
+
+
+Reject Copy and Paste Test Code
+------------------------
+When creating new tests that are similar to existing tests it is tempting to
+simply copy the code and make a few modifications. This increases code size and
+the maintenance burden. Such changes should not be approved if it is easy to
+abstract the duplicated code into a function or method.
+
+
+Being explicit
+--------------
+When tests are being added that depend on a configurable feature or extension,
+polling the API to discover that it is enabled should not be done. This will
+just result in bugs being masked because the test can be skipped automatically.
+Instead the config file should be used to determine whether a test should be
+skipped or not. Do not approve changes that depend on an API call to determine
+whether to skip or not.
+
+
+When to approve
+---------------
+ * Every patch needs two +2s before being approved.
+ * Its ok to hold off on an approval until a subject matter expert reviews it
+ * If a patch has already been approved but requires a trivial rebase to merge,
+   you do not have to wait for a second +2, since the patch has already had
+   two +2s.
diff --git a/doc/source/REVIEWING.rst b/doc/source/REVIEWING.rst
new file mode 120000
index 0000000..841e042
--- /dev/null
+++ b/doc/source/REVIEWING.rst
@@ -0,0 +1 @@
+../../REVIEWING.rst
\ No newline at end of file
diff --git a/doc/source/index.rst b/doc/source/index.rst
index 25bc900..d3118ac 100644
--- a/doc/source/index.rst
+++ b/doc/source/index.rst
@@ -9,6 +9,7 @@
 
    overview
    HACKING
+   REVIEWING
 
 ------------
 Field Guides
diff --git a/etc/tempest.conf.sample b/etc/tempest.conf.sample
index 29f80bd..ef56ab3 100644
--- a/etc/tempest.conf.sample
+++ b/etc/tempest.conf.sample
@@ -1010,9 +1010,9 @@
 # value)
 #trove=false
 
-# Whether or not Marconi is expected to be available (boolean
+# Whether or not Zaqar is expected to be available (boolean
 # value)
-#marconi=false
+#zaqar=false
 
 
 [stress]
diff --git a/tempest/api/baremetal/admin/test_nodes.py b/tempest/api/baremetal/admin/test_nodes.py
index 43ea1e6..ab6aed3 100644
--- a/tempest/api/baremetal/admin/test_nodes.py
+++ b/tempest/api/baremetal/admin/test_nodes.py
@@ -86,3 +86,23 @@
         core_interfaces = ['power', 'deploy']
         for interface in core_interfaces:
             self.assertIn(interface, body)
+
+    @test.attr(type='smoke')
+    def test_set_node_boot_device(self):
+        body = self.client.set_node_boot_device(self.node['uuid'], 'pxe')
+        # No content
+        self.assertEqual('', body)
+
+    @test.attr(type='smoke')
+    def test_get_node_boot_device(self):
+        body = self.client.get_node_boot_device(self.node['uuid'])
+        self.assertIn('boot_device', body)
+        self.assertIn('persistent', body)
+        self.assertTrue(isinstance(body['boot_device'], six.string_types))
+        self.assertTrue(isinstance(body['persistent'], bool))
+
+    @test.attr(type='smoke')
+    def test_get_node_supported_boot_devices(self):
+        body = self.client.get_node_supported_boot_devices(self.node['uuid'])
+        self.assertIn('supported_boot_devices', body)
+        self.assertTrue(isinstance(body['supported_boot_devices'], list))
diff --git a/tempest/api/compute/base.py b/tempest/api/compute/base.py
index 343a39a..47d1254 100644
--- a/tempest/api/compute/base.py
+++ b/tempest/api/compute/base.py
@@ -271,10 +271,10 @@
         return resp, body
 
     @classmethod
-    def create_test_server_group(cls, name="", policy=[]):
+    def create_test_server_group(cls, name="", policy=None):
         if not name:
             name = data_utils.rand_name(cls.__name__ + "-Server-Group")
-        if not policy:
+        if policy is None:
             policy = ['affinity']
         resp, body = cls.servers_client.create_server_group(name, policy)
         cls.server_groups.append(body['id'])
diff --git a/tempest/api/compute/servers/test_servers_negative.py b/tempest/api/compute/servers/test_servers_negative.py
index 792b523..b9ec29e 100644
--- a/tempest/api/compute/servers/test_servers_negative.py
+++ b/tempest/api/compute/servers/test_servers_negative.py
@@ -404,13 +404,6 @@
                           nonexistent_server)
 
     @test.attr(type=['negative', 'gate'])
-    def test_force_delete_server_invalid_state(self):
-        # we can only force-delete a server in 'soft-delete' state
-        self.assertRaises(exceptions.Conflict,
-                          self.client.force_delete_server,
-                          self.server_id)
-
-    @test.attr(type=['negative', 'gate'])
     def test_restore_nonexistent_server_id(self):
         # restore-delete a non existent server
         nonexistent_server = data_utils.rand_uuid()
diff --git a/tempest/api/database/flavors/test_flavors.py b/tempest/api/database/flavors/test_flavors.py
index 7d30f26..a5c8caa 100644
--- a/tempest/api/database/flavors/test_flavors.py
+++ b/tempest/api/database/flavors/test_flavors.py
@@ -27,8 +27,7 @@
     @test.attr(type='smoke')
     def test_get_db_flavor(self):
         # The expected flavor details should be returned
-        resp, flavor = self.client.get_db_flavor_details(self.db_flavor_ref)
-        self.assertEqual(200, resp.status)
+        _, flavor = self.client.get_db_flavor_details(self.db_flavor_ref)
         self.assertEqual(self.db_flavor_ref, str(flavor['id']))
         self.assertIn('ram', flavor)
         self.assertIn('links', flavor)
@@ -36,11 +35,9 @@
 
     @test.attr(type='smoke')
     def test_list_db_flavors(self):
-        resp, flavor = self.client.get_db_flavor_details(self.db_flavor_ref)
-        self.assertEqual(200, resp.status)
+        _, flavor = self.client.get_db_flavor_details(self.db_flavor_ref)
         # List of all flavors should contain the expected flavor
-        resp, flavors = self.client.list_db_flavors()
-        self.assertEqual(200, resp.status)
+        _, flavors = self.client.list_db_flavors()
         self.assertIn(flavor, flavors)
 
     def _check_values(self, names, db_flavor, os_flavor, in_db=True):
@@ -57,17 +54,14 @@
     @test.attr(type='smoke')
     @test.services('compute')
     def test_compare_db_flavors_with_os(self):
-        resp, db_flavors = self.client.list_db_flavors()
-        self.assertEqual(200, resp.status)
-        resp, os_flavors = self.os_flavors_client.list_flavors_with_detail()
-        self.assertEqual(200, resp.status)
+        _, db_flavors = self.client.list_db_flavors()
+        _, os_flavors = self.os_flavors_client.list_flavors_with_detail()
         self.assertEqual(len(os_flavors), len(db_flavors),
                          "OS flavors %s do not match DB flavors %s" %
                          (os_flavors, db_flavors))
         for os_flavor in os_flavors:
-            resp, db_flavor =\
+            _, db_flavor =\
                 self.client.get_db_flavor_details(os_flavor['id'])
-            self.assertEqual(200, resp.status)
             self._check_values(['id', 'name', 'ram'], db_flavor, os_flavor)
             self._check_values(['disk', 'vcpus', 'swap'], db_flavor, os_flavor,
                                in_db=False)
diff --git a/tempest/api/database/versions/test_versions.py b/tempest/api/database/versions/test_versions.py
index 6101f47..453abe7 100644
--- a/tempest/api/database/versions/test_versions.py
+++ b/tempest/api/database/versions/test_versions.py
@@ -27,8 +27,7 @@
 
     @test.attr(type='smoke')
     def test_list_db_versions(self):
-        resp, versions = self.client.list_db_versions()
-        self.assertEqual(200, resp.status)
+        _, versions = self.client.list_db_versions()
         self.assertTrue(len(versions) > 0, "No database versions found")
         # List of all versions should contain the current version, and there
         # should only be one 'current' version
diff --git a/tempest/api/network/base.py b/tempest/api/network/base.py
index d75339c..1a23cb6 100644
--- a/tempest/api/network/base.py
+++ b/tempest/api/network/base.py
@@ -49,6 +49,7 @@
         neutron as True
     """
 
+    _interface = 'json'
     force_tenant_isolation = False
 
     # Default to ipv4.
diff --git a/tempest/api/network/test_networks.py b/tempest/api/network/test_networks.py
index 206f37b..f3da614 100644
--- a/tempest/api/network/test_networks.py
+++ b/tempest/api/network/test_networks.py
@@ -374,7 +374,8 @@
 
     @test.attr(type='smoke')
     def test_create_delete_subnet_with_gw(self):
-        gateway = '2003::2'
+        net = netaddr.IPNetwork(CONF.network.tenant_network_v6_cidr)
+        gateway = str(netaddr.IPAddress(net.first + 2))
         name = data_utils.rand_name('network-')
         _, body = self.client.create_network(name=name)
         network = body['network']
@@ -388,13 +389,15 @@
 
     @test.attr(type='smoke')
     def test_create_delete_subnet_without_gw(self):
+        net = netaddr.IPNetwork(CONF.network.tenant_network_v6_cidr)
+        gateway_ip = str(netaddr.IPAddress(net.first + 1))
         name = data_utils.rand_name('network-')
         _, body = self.client.create_network(name=name)
         network = body['network']
         net_id = network['id']
         subnet = self.create_subnet(network)
         # Verifies Subnet GW in IPv6
-        self.assertEqual(subnet['gateway_ip'], '2003::1')
+        self.assertEqual(subnet['gateway_ip'], gateway_ip)
         # Delete network and subnet
         _, body = self.client.delete_network(net_id)
         self.subnets.pop()
diff --git a/tempest/api/object_storage/test_object_temp_url.py b/tempest/api/object_storage/test_object_temp_url.py
index c597255..264a18a 100644
--- a/tempest/api/object_storage/test_object_temp_url.py
+++ b/tempest/api/object_storage/test_object_temp_url.py
@@ -185,3 +185,20 @@
         resp, body = self.object_client.head(url)
         self.assertIn(int(resp['status']), test.HTTP_SUCCESS)
         self.assertHeaders(resp, 'Object', 'HEAD')
+
+    @test.attr(type='gate')
+    @test.requires_ext(extension='tempurl', service='object')
+    def test_get_object_using_temp_url_with_inline_query_parameter(self):
+        expires = self._get_expiry_date()
+
+        # get a temp URL for the created object
+        url = self._get_temp_url(self.container_name, self.object_name, "GET",
+                                 expires, self.key)
+        url = url + '&inline'
+
+        # trying to get object using temp url within expiry time
+        resp, body = self.object_client.get(url)
+        self.assertIn(int(resp['status']), test.HTTP_SUCCESS)
+        self.assertHeaders(resp, 'Object', 'GET')
+        self.assertEqual(body, self.content)
+        self.assertEqual(resp['content-disposition'], 'inline')
diff --git a/tempest/api/orchestration/base.py b/tempest/api/orchestration/base.py
index d0fb825..0b22de5 100644
--- a/tempest/api/orchestration/base.py
+++ b/tempest/api/orchestration/base.py
@@ -64,8 +64,10 @@
         return admin_client
 
     @classmethod
-    def create_stack(cls, stack_name, template_data, parameters={},
+    def create_stack(cls, stack_name, template_data, parameters=None,
                      environment=None, files=None):
+        if parameters is None:
+            parameters = {}
         resp, body = cls.client.create_stack(
             stack_name,
             template=template_data,
diff --git a/tempest/api/orchestration/stacks/test_neutron_resources.py b/tempest/api/orchestration/stacks/test_neutron_resources.py
index c1e2d59..ffadb16 100644
--- a/tempest/api/orchestration/stacks/test_neutron_resources.py
+++ b/tempest/api/orchestration/stacks/test_neutron_resources.py
@@ -38,6 +38,7 @@
         os = clients.Manager()
         if not CONF.service_available.neutron:
             raise cls.skipException("Neutron support is required")
+        cls.neutron_basic_template = cls.load_template('neutron_basic')
         cls.network_client = os.network_client
         cls.stack_name = data_utils.rand_name('heat')
         template = cls.read_template('neutron_basic')
@@ -87,10 +88,14 @@
     @test.attr(type='slow')
     def test_created_resources(self):
         """Verifies created neutron resources."""
-        resources = [('Network', 'OS::Neutron::Net'),
-                     ('Subnet', 'OS::Neutron::Subnet'),
-                     ('RouterInterface', 'OS::Neutron::RouterInterface'),
-                     ('Server', 'OS::Nova::Server')]
+        resources = [('Network', self.neutron_basic_template['resources'][
+                      'Network']['type']),
+                     ('Subnet', self.neutron_basic_template['resources'][
+                      'Subnet']['type']),
+                     ('RouterInterface', self.neutron_basic_template[
+                      'resources']['RouterInterface']['type']),
+                     ('Server', self.neutron_basic_template['resources'][
+                      'Server']['type'])]
         for resource_name, resource_type in resources:
             resource = self.test_resources.get(resource_name, None)
             self.assertIsInstance(resource, dict)
@@ -107,7 +112,8 @@
         network = body['network']
         self.assertIsInstance(network, dict)
         self.assertEqual(network_id, network['id'])
-        self.assertEqual('NewNetwork', network['name'])
+        self.assertEqual(self.neutron_basic_template['resources'][
+            'Network']['properties']['name'], network['name'])
 
     @test.attr(type='slow')
     @test.services('network')
@@ -119,10 +125,12 @@
         network_id = self.test_resources.get('Network')['physical_resource_id']
         self.assertEqual(subnet_id, subnet['id'])
         self.assertEqual(network_id, subnet['network_id'])
-        self.assertEqual('NewSubnet', subnet['name'])
+        self.assertEqual(self.neutron_basic_template['resources'][
+            'Subnet']['properties']['name'], subnet['name'])
         self.assertEqual(sorted(CONF.network.dns_servers),
                          sorted(subnet['dns_nameservers']))
-        self.assertEqual(4, subnet['ip_version'])
+        self.assertEqual(self.neutron_basic_template['resources'][
+            'Subnet']['properties']['ip_version'], subnet['ip_version'])
         self.assertEqual(str(self.subnet_cidr), subnet['cidr'])
 
     @test.attr(type='slow')
@@ -132,7 +140,8 @@
         router_id = self.test_resources.get('Router')['physical_resource_id']
         _, body = self.network_client.show_router(router_id)
         router = body['router']
-        self.assertEqual('NewRouter', router['name'])
+        self.assertEqual(self.neutron_basic_template['resources'][
+            'Router']['properties']['name'], router['name'])
         self.assertEqual(self.external_network_id,
                          router['external_gateway_info']['network_id'])
         self.assertEqual(True, router['admin_state_up'])
@@ -168,6 +177,7 @@
         _, server = self.servers_client.get_server(server_id)
         self.assertEqual(self.keypair_name, server['key_name'])
         self.assertEqual('ACTIVE', server['status'])
-        network = server['addresses']['NewNetwork'][0]
+        network = server['addresses'][self.neutron_basic_template['resources'][
+                                      'Network']['properties']['name']][0]
         self.assertEqual(4, network['version'])
         self.assertIn(netaddr.IPAddress(network['addr']), self.subnet_cidr)
diff --git a/tempest/api/orchestration/stacks/test_resource_types.py b/tempest/api/orchestration/stacks/test_resource_types.py
new file mode 100644
index 0000000..e204894
--- /dev/null
+++ b/tempest/api/orchestration/stacks/test_resource_types.py
@@ -0,0 +1,44 @@
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+from tempest.api.orchestration import base
+from tempest import test
+
+
+class ResourceTypesTest(base.BaseOrchestrationTest):
+
+    @test.attr(type='smoke')
+    def test_resource_type_list(self):
+        """Verify it is possible to list resource types."""
+        resource_types = self.client.list_resource_types()
+        self.assertIsInstance(resource_types, list)
+        self.assertIn('OS::Nova::Server', resource_types)
+
+    @test.attr(type='smoke')
+    def test_resource_type_show(self):
+        """Verify it is possible to get schema about resource types."""
+        resource_types = self.client.list_resource_types()
+        self.assertNotEmpty(resource_types)
+
+        for resource_type in resource_types:
+            type_schema = self.client.get_resource_type(resource_type)
+            self.assert_fields_in_dict(type_schema, 'properties',
+                                       'attributes', 'resource_type')
+            self.assertEqual(resource_type, type_schema['resource_type'])
+
+    @test.attr(type='smoke')
+    def test_resource_type_template(self):
+        """Verify it is possible to get template about resource types."""
+        type_template = self.client.get_resource_type_template(
+            'OS::Nova::Server')
+        self.assert_fields_in_dict(type_template, 'Outputs',
+            'Parameters', 'Resources')
\ No newline at end of file
diff --git a/tempest/api/queuing/base.py b/tempest/api/queuing/base.py
index f4ff7f1..41a02f2 100644
--- a/tempest/api/queuing/base.py
+++ b/tempest/api/queuing/base.py
@@ -26,7 +26,7 @@
 class BaseQueuingTest(test.BaseTestCase):
 
     """
-    Base class for the Queuing tests that use the Tempest Marconi REST client
+    Base class for the Queuing tests that use the Tempest Zaqar REST client
 
     It is assumed that the following option is defined in the
     [service_available] section of etc/tempest.conf
@@ -37,8 +37,8 @@
     @classmethod
     def setUpClass(cls):
         super(BaseQueuingTest, cls).setUpClass()
-        if not CONF.service_available.marconi:
-            raise cls.skipException("Marconi support is required")
+        if not CONF.service_available.zaqar:
+            raise cls.skipException("Zaqar support is required")
         os = cls.get_client_manager()
         cls.queuing_cfg = CONF.queuing
         cls.client = os.queuing_client
diff --git a/tempest/api_schema/response/compute/servers.py b/tempest/api_schema/response/compute/servers.py
index d6c2ddb..f9c957b 100644
--- a/tempest/api_schema/response/compute/servers.py
+++ b/tempest/api_schema/response/compute/servers.py
@@ -54,14 +54,15 @@
         'id': {'type': 'string'},
         'name': {'type': 'string'},
         'status': {'type': 'string'},
-        'image': {
-            'type': 'object',
-            'properties': {
-                'id': {'type': 'string'},
-                'links': parameter_types.links
-            },
-            'required': ['id', 'links']
-        },
+        'image': {'oneOf': [
+            {'type': 'object',
+                'properties': {
+                    'id': {'type': 'string'},
+                    'links': parameter_types.links
+                },
+                'required': ['id', 'links']},
+            {'type': ['string', 'null']}
+        ]},
         'flavor': {
             'type': 'object',
             'properties': {
diff --git a/tempest/cmd/javelin.py b/tempest/cmd/javelin.py
index 6761a69..f37bfdb 100755
--- a/tempest/cmd/javelin.py
+++ b/tempest/cmd/javelin.py
@@ -20,6 +20,7 @@
 """
 
 import argparse
+import datetime
 import logging
 import os
 import sys
@@ -30,12 +31,14 @@
 import tempest.auth
 from tempest import config
 from tempest import exceptions
+from tempest.openstack.common import timeutils
 from tempest.services.compute.json import flavors_client
 from tempest.services.compute.json import servers_client
 from tempest.services.identity.json import identity_client
 from tempest.services.image.v2.json import image_client
 from tempest.services.object_storage import container_client
 from tempest.services.object_storage import object_client
+from tempest.services.telemetry.json import telemetry_client
 from tempest.services.volume.json import volumes_client
 
 OPTS = {}
@@ -44,6 +47,8 @@
 
 LOG = None
 
+JAVELIN_START = datetime.datetime.utcnow()
+
 
 class OSClient(object):
     _creds = None
@@ -62,6 +67,7 @@
         self.containers = container_client.ContainerClient(_auth)
         self.images = image_client.ImageClientV2JSON(_auth)
         self.flavors = flavors_client.FlavorsClientJSON(_auth)
+        self.telemetry = telemetry_client.TelemetryClientJSON(_auth)
         self.volumes = volumes_client.VolumesClientJSON(_auth)
 
 
@@ -196,6 +202,7 @@
         # TODO(sdague): Volumes not yet working, bring it back once the
         # code is self testing.
         # self.check_volumes()
+        self.check_telemetry()
 
     def check_users(self):
         """Check that the users we expect to exist, do.
@@ -252,6 +259,26 @@
                                 "Server %s is not pingable at %s" % (
                                     server['name'], addr))
 
+    def check_telemetry(self):
+        """Check that ceilometer provides a sane sample.
+
+        Confirm that there are more than one sample and that they have the
+        expected metadata.
+
+        If in check mode confirm that the oldest sample available is from
+        before the upgrade.
+        """
+        LOG.info("checking telemetry")
+        for server in self.res['servers']:
+            client = client_for_user(server['owner'])
+            response, body = client.telemetry.list_samples(
+                'instance',
+                query=('metadata.display_name', 'eq', server['name'])
+            )
+            self.assertEqual(response.status, 200)
+            self.assertTrue(len(body) >= 1, 'expecting at least one sample')
+            self._confirm_telemetry_sample(server, body[-1])
+
     def check_volumes(self):
         """Check that the volumes are still there and attached."""
         if not self.res.get('volumes'):
@@ -270,6 +297,26 @@
             self.assertEqual(volume['id'], attachment['volume_id'])
             self.assertEqual(server_id, attachment['server_id'])
 
+    def _confirm_telemetry_sample(self, server, sample):
+        """Check this sample matches the expected resource metadata."""
+        # Confirm display_name
+        self.assertEqual(server['name'],
+                         sample['resource_metadata']['display_name'])
+        # Confirm instance_type of flavor
+        flavor = sample['resource_metadata'].get(
+            'flavor.name',
+            sample['resource_metadata'].get('instance_type')
+        )
+        self.assertEqual(server['flavor'], flavor)
+        # Confirm the oldest sample was created before upgrade.
+        if OPTS.mode == 'check':
+            oldest_timestamp = timeutils.normalize_time(
+                timeutils.parse_isotime(sample['timestamp']))
+            self.assertTrue(
+                oldest_timestamp < JAVELIN_START,
+                'timestamp should come before start of second javelin run'
+            )
+
 
 #######################
 #
diff --git a/tempest/cmd/verify_tempest_config.py b/tempest/cmd/verify_tempest_config.py
index 70fd27b..cd696a9 100755
--- a/tempest/cmd/verify_tempest_config.py
+++ b/tempest/cmd/verify_tempest_config.py
@@ -247,7 +247,7 @@
         'data_processing': 'sahara',
         'baremetal': 'ironic',
         'identity': 'keystone',
-        'queuing': 'marconi',
+        'queuing': 'zaqar',
         'database': 'trove'
     }
     # Get catalog list for endpoints to use for validation
diff --git a/tempest/common/generator/base_generator.py b/tempest/common/generator/base_generator.py
index 57b98f7..0398af1 100644
--- a/tempest/common/generator/base_generator.py
+++ b/tempest/common/generator/base_generator.py
@@ -13,6 +13,8 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
+import functools
+
 import jsonschema
 
 from tempest.openstack.common import log as logging
@@ -39,6 +41,7 @@
     """
     Decorator for simple generators that return one value
     """
+    @functools.wraps(fn)
     def wrapped(self, schema):
         result = fn(self, schema)
         if result is not None:
diff --git a/tempest/common/rest_client.py b/tempest/common/rest_client.py
index ff92b67..132d0a6 100644
--- a/tempest/common/rest_client.py
+++ b/tempest/common/rest_client.py
@@ -248,8 +248,10 @@
                 return resp[i]
         return ""
 
-    def _log_request_start(self, method, req_url, req_headers={},
+    def _log_request_start(self, method, req_url, req_headers=None,
                            req_body=None):
+        if req_headers is None:
+            req_headers = {}
         caller_name = misc_utils.find_test_caller()
         trace_regex = CONF.debug.trace_requests
         if trace_regex and re.search(trace_regex, caller_name):
@@ -257,8 +259,10 @@
                            (caller_name, method, req_url))
 
     def _log_request(self, method, req_url, resp,
-                     secs="", req_headers={},
+                     secs="", req_headers=None,
                      req_body=None, resp_body=None):
+        if req_headers is None:
+            req_headers = {}
         # if we have the request id, put it in the right part of the log
         extra = dict(request_id=self._get_request_id(resp))
         # NOTE(sdague): while we still have 6 callers to this function
diff --git a/tempest/config.py b/tempest/config.py
index af45ba5..93d4874 100644
--- a/tempest/config.py
+++ b/tempest/config.py
@@ -52,7 +52,6 @@
                 default=False,
                 help="Set to True if using self-signed SSL certificates."),
     cfg.StrOpt('uri',
-               default=None,
                help="Full URI of the OpenStack Identity API (Keystone), v2"),
     cfg.StrOpt('uri_v3',
                help='Full URI of the OpenStack Identity API (Keystone), v3'),
@@ -72,52 +71,40 @@
                         'publicURL', 'adminURL', 'internalURL'],
                help="The endpoint type to use for the identity service."),
     cfg.StrOpt('username',
-               default=None,
                help="Username to use for Nova API requests."),
     cfg.StrOpt('tenant_name',
-               default=None,
                help="Tenant name to use for Nova API requests."),
     cfg.StrOpt('admin_role',
                default='admin',
                help="Role required to administrate keystone."),
     cfg.StrOpt('password',
-               default=None,
                help="API key to use when authenticating.",
                secret=True),
     cfg.StrOpt('domain_name',
-               default=None,
                help="Domain name for authentication (Keystone V3)."
                     "The same domain applies to user and project"),
     cfg.StrOpt('alt_username',
-               default=None,
                help="Username of alternate user to use for Nova API "
                     "requests."),
     cfg.StrOpt('alt_tenant_name',
-               default=None,
                help="Alternate user's Tenant name to use for Nova API "
                     "requests."),
     cfg.StrOpt('alt_password',
-               default=None,
                help="API key to use when authenticating as alternate user.",
                secret=True),
     cfg.StrOpt('alt_domain_name',
-               default=None,
                help="Alternate domain name for authentication (Keystone V3)."
                     "The same domain applies to user and project"),
     cfg.StrOpt('admin_username',
-               default=None,
                help="Administrative Username to use for "
                     "Keystone API requests."),
     cfg.StrOpt('admin_tenant_name',
-               default=None,
                help="Administrative Tenant name to use for Keystone API "
                     "requests."),
     cfg.StrOpt('admin_password',
-               default=None,
                help="API key to use when authenticating as admin.",
                secret=True),
     cfg.StrOpt('admin_domain_name',
-               default=None,
                help="Admin domain name for authentication (Keystone V3)."
                     "The same domain applies to user and project"),
 ]
@@ -246,7 +233,6 @@
                default='computev3',
                help="Catalog type of the Compute v3 service."),
     cfg.StrOpt('path_to_private_key',
-               default=None,
                help="Path to a private key file for SSH access to remote "
                     "hosts"),
     cfg.StrOpt('volume_device_name',
@@ -360,18 +346,14 @@
 
 ComputeAdminGroup = [
     cfg.StrOpt('username',
-               default=None,
                help="Administrative Username to use for Nova API requests."),
     cfg.StrOpt('tenant_name',
-               default=None,
                help="Administrative Tenant name to use for Nova API "
                     "requests."),
     cfg.StrOpt('password',
-               default=None,
                help="API key to use when authenticating as admin.",
                secret=True),
     cfg.StrOpt('domain_name',
-               default=None,
                help="Domain name for authentication as admin (Keystone V3)."
                     "The same domain applies to user and project"),
 ]
@@ -692,11 +674,9 @@
                help="Instance type for tests. Needs to be big enough for a "
                     "full OS plus the test workload"),
     cfg.StrOpt('image_ref',
-               default=None,
                help="Name of heat-cfntools enabled image to use when "
                     "launching test instances."),
     cfg.StrOpt('keypair_name',
-               default=None,
                help="Name of existing keypair to launch servers with."),
     cfg.IntOpt('max_template_size',
                default=524288,
@@ -765,11 +745,9 @@
                default="http://localhost:8080",
                help="S3 URL"),
     cfg.StrOpt('aws_secret',
-               default=None,
                help="AWS Secret Key",
                secret=True),
     cfg.StrOpt('aws_access',
-               default=None,
                help="AWS Access Key"),
     cfg.StrOpt('aws_zone',
                default="nova",
@@ -808,26 +786,20 @@
 
 StressGroup = [
     cfg.StrOpt('nova_logdir',
-               default=None,
                help='Directory containing log files on the compute nodes'),
     cfg.IntOpt('max_instances',
                default=16,
                help='Maximum number of instances to create during test.'),
     cfg.StrOpt('controller',
-               default=None,
                help='Controller host.'),
     # new stress options
     cfg.StrOpt('target_controller',
-               default=None,
                help='Controller host.'),
     cfg.StrOpt('target_ssh_user',
-               default=None,
                help='ssh user.'),
     cfg.StrOpt('target_private_key_path',
-               default=None,
                help='Path to private key.'),
     cfg.StrOpt('target_logfiles',
-               default=None,
                help='regexp for list of log files.'),
     cfg.IntOpt('log_check_interval',
                default=60,
@@ -921,9 +893,9 @@
     cfg.BoolOpt('trove',
                 default=False,
                 help="Whether or not Trove is expected to be available"),
-    cfg.BoolOpt('marconi',
+    cfg.BoolOpt('zaqar',
                 default=False,
-                help="Whether or not Marconi is expected to be available"),
+                help="Whether or not Zaqar is expected to be available"),
 ]
 
 debug_group = cfg.OptGroup(name="debug",
diff --git a/tempest/hacking/checks.py b/tempest/hacking/checks.py
index 93329bc..abc60cb 100644
--- a/tempest/hacking/checks.py
+++ b/tempest/hacking/checks.py
@@ -20,13 +20,14 @@
 
 PYTHON_CLIENTS = ['cinder', 'glance', 'keystone', 'nova', 'swift', 'neutron',
                   'trove', 'ironic', 'savanna', 'heat', 'ceilometer',
-                  'marconi', 'sahara']
+                  'zaqar', 'sahara']
 
 PYTHON_CLIENT_RE = re.compile('import (%s)client' % '|'.join(PYTHON_CLIENTS))
 TEST_DEFINITION = re.compile(r'^\s*def test.*')
 SETUPCLASS_DEFINITION = re.compile(r'^\s*def setUpClass')
 SCENARIO_DECORATOR = re.compile(r'\s*@.*services\((.*)\)')
 VI_HEADER_RE = re.compile(r"^#\s+vim?:.+")
+mutable_default_args = re.compile(r"^\s*def .+\((.+=\{\}|.+=\[\])")
 
 
 def import_no_clients_in_api(physical_line, filename):
@@ -119,6 +120,16 @@
                     'tests')
 
 
+def no_mutable_default_args(logical_line):
+    """Check that mutable object isn't used as default argument
+
+    N322: Method's default argument shouldn't be mutable
+    """
+    msg = "N322: Method's default argument shouldn't be mutable!"
+    if mutable_default_args.match(logical_line):
+        yield (0, msg)
+
+
 def factory(register):
     register(import_no_clients_in_api)
     register(scenario_tests_need_service_tags)
@@ -126,3 +137,4 @@
     register(no_vi_headers)
     register(service_tags_not_in_module_path)
     register(no_official_client_manager_in_api_tests)
+    register(no_mutable_default_args)
diff --git a/tempest/scenario/manager.py b/tempest/scenario/manager.py
index 54f0256..18dc320 100644
--- a/tempest/scenario/manager.py
+++ b/tempest/scenario/manager.py
@@ -39,6 +39,7 @@
 from tempest import exceptions
 from tempest.openstack.common import log
 from tempest.openstack.common import timeutils
+from tempest.services.network import resources as net_resources
 import tempest.test
 
 CONF = config.CONF
@@ -87,6 +88,10 @@
         cls.security_groups_client = cls.manager.security_groups_client
         cls.servers_client = cls.manager.servers_client
         cls.volumes_client = cls.manager.volumes_client
+        cls.snapshots_client = cls.manager.snapshots_client
+        cls.interface_client = cls.manager.interfaces_client
+        # Neutron network client
+        cls.network_client = cls.manager.network_client
 
     @classmethod
     def _get_credentials(cls, get_creds, ctype):
@@ -120,23 +125,24 @@
         # not at the end of the class
         self.addCleanup(self._wait_for_cleanups)
 
-    def delete_wrapper(self, delete_thing, thing_id):
+    def delete_wrapper(self, delete_thing, *args, **kwargs):
         """Ignores NotFound exceptions for delete operations.
 
-        @param delete_thing: delete method of a resource
-        @param thing_id: id of the resource to be deleted
+        @param delete_thing: delete method of a resource. method will be
+            executed as delete_thing(*args, **kwargs)
+
         """
         try:
             # Tempest clients return dicts, so there is no common delete
             # method available. Using a callable instead
-            delete_thing(thing_id)
+            delete_thing(*args, **kwargs)
         except exceptions.NotFound:
             # If the resource is already missing, mission accomplished.
             pass
 
     def addCleanup_with_wait(self, waiter_callable, thing_id, thing_id_param,
-                             cleanup_callable, cleanup_args=[],
-                             cleanup_kwargs={}, ignore_error=True):
+                             cleanup_callable, cleanup_args=None,
+                             cleanup_kwargs=None, ignore_error=True):
         """Adds wait for ansyc resource deletion at the end of cleanups
 
         @param waiter_callable: callable to wait for the resource to delete
@@ -146,6 +152,10 @@
             the following *cleanup_args, **cleanup_kwargs.
             usually a delete method.
         """
+        if cleanup_args is None:
+            cleanup_args = []
+        if cleanup_kwargs is None:
+            cleanup_kwargs = {}
         self.addCleanup(cleanup_callable, *cleanup_args, **cleanup_kwargs)
         wait_dict = {
             'waiter_callable': waiter_callable,
@@ -180,7 +190,7 @@
 
     def create_server(self, name=None, image=None, flavor=None,
                       wait_on_boot=True, wait_on_delete=True,
-                      create_kwargs={}):
+                      create_kwargs=None):
         """Creates VM instance.
 
         @param image: image from which to create the instance
@@ -195,6 +205,8 @@
             image = CONF.compute.image_ref
         if flavor is None:
             flavor = CONF.compute.flavor_ref
+        if create_kwargs is None:
+            create_kwargs = {}
 
         fixed_network_name = CONF.compute.fixed_network_name
         if 'nics' not in create_kwargs and fixed_network_name:
@@ -261,7 +273,7 @@
         _, volume = self.volumes_client.get_volume(volume['id'])
         return volume
 
-    def _create_loginable_secgroup_rule_nova(self, secgroup_id=None):
+    def _create_loginable_secgroup_rule(self, secgroup_id=None):
         _client = self.security_groups_client
         if secgroup_id is None:
             _, sgs = _client.list_security_groups()
@@ -299,7 +311,7 @@
             rules.append(sg_rule)
         return rules
 
-    def _create_security_group_nova(self):
+    def _create_security_group(self):
         # Create security group
         sg_name = data_utils.rand_name(self.__class__.__name__)
         sg_desc = sg_name + " description"
@@ -312,7 +324,7 @@
                         secgroup['id'])
 
         # Add rules to the security group
-        self._create_loginable_secgroup_rule_nova(secgroup['id'])
+        self._create_loginable_secgroup_rule(secgroup['id'])
 
         return secgroup
 
@@ -337,7 +349,9 @@
 
         return linux_client
 
-    def _image_create(self, name, fmt, path, properties={}):
+    def _image_create(self, name, fmt, path, properties=None):
+        if properties is None:
+            properties = {}
         name = data_utils.rand_name('%s-' % name)
         image_file = open(path, 'rb')
         self.addCleanup(image_file.close)
@@ -416,6 +430,492 @@
         return snapshot_image
 
 
+# TODO(yfried): change this class name to NetworkScenarioTest once client
+# migration is complete
+class NeutronScenarioTest(ScenarioTest):
+    """Base class for network scenario tests.
+    This class provide helpers for network scenario tests, using the neutron
+    API. Helpers from ancestor which use the nova network API are overridden
+    with the neutron API.
+
+    This Class also enforces using Neutron instead of novanetwork.
+    Subclassed tests will be skipped if Neutron is not enabled
+
+    """
+
+    @classmethod
+    def check_preconditions(cls):
+        if CONF.service_available.neutron:
+            cls.enabled = True
+            # verify that neutron_available is telling the truth
+            try:
+                cls.network_client.list_networks()
+            except exc.EndpointNotFound:
+                cls.enabled = False
+                raise
+        else:
+            cls.enabled = False
+            msg = 'Neutron not available'
+            raise cls.skipException(msg)
+
+    @classmethod
+    def setUpClass(cls):
+        super(NeutronScenarioTest, cls).setUpClass()
+        cls.tenant_id = cls.manager.identity_client.tenant_id
+        cls.check_preconditions()
+
+    def _create_network(self, tenant_id, namestart='network-smoke-'):
+        name = data_utils.rand_name(namestart)
+        _, result = self.network_client.create_network(name=name,
+                                                       tenant_id=tenant_id)
+        network = net_resources.DeletableNetwork(client=self.network_client,
+                                                 **result['network'])
+        self.assertEqual(network.name, name)
+        self.addCleanup(self.delete_wrapper, network.delete)
+        return network
+
+    def _list_networks(self, *args, **kwargs):
+        """List networks using admin creds """
+        return self._admin_lister('networks')(*args, **kwargs)
+
+    def _list_subnets(self, *args, **kwargs):
+        """List subnets using admin creds """
+        return self._admin_lister('subnets')(*args, **kwargs)
+
+    def _list_routers(self, *args, **kwargs):
+        """List routers using admin creds """
+        return self._admin_lister('routers')(*args, **kwargs)
+
+    def _list_ports(self, *args, **kwargs):
+        """List ports using admin creds """
+        return self._admin_lister('ports')(*args, **kwargs)
+
+    def _admin_lister(self, resource_type):
+        def temp(*args, **kwargs):
+            temp_method = self.admin_manager.network_client.__getattr__(
+                'list_%s' % resource_type)
+            _, resource_list = temp_method(*args, **kwargs)
+            return resource_list[resource_type]
+        return temp
+
+    def _create_subnet(self, network, namestart='subnet-smoke-', **kwargs):
+        """
+        Create a subnet for the given network within the cidr block
+        configured for tenant networks.
+        """
+
+        def cidr_in_use(cidr, tenant_id):
+            """
+            :return True if subnet with cidr already exist in tenant
+                False else
+            """
+            cidr_in_use = self._list_subnets(tenant_id=tenant_id, cidr=cidr)
+            return len(cidr_in_use) != 0
+
+        tenant_cidr = netaddr.IPNetwork(CONF.network.tenant_network_cidr)
+        result = None
+        # Repeatedly attempt subnet creation with sequential cidr
+        # blocks until an unallocated block is found.
+        for subnet_cidr in tenant_cidr.subnet(
+                CONF.network.tenant_network_mask_bits):
+            str_cidr = str(subnet_cidr)
+            if cidr_in_use(str_cidr, tenant_id=network.tenant_id):
+                continue
+
+            subnet = dict(
+                name=data_utils.rand_name(namestart),
+                ip_version=4,
+                network_id=network.id,
+                tenant_id=network.tenant_id,
+                cidr=str_cidr,
+                **kwargs
+            )
+            try:
+                _, result = self.network_client.create_subnet(**subnet)
+                break
+            except exc.NeutronClientException as e:
+                is_overlapping_cidr = 'overlaps with another subnet' in str(e)
+                if not is_overlapping_cidr:
+                    raise
+        self.assertIsNotNone(result, 'Unable to allocate tenant network')
+        subnet = net_resources.DeletableSubnet(client=self.network_client,
+                                               **result['subnet'])
+        self.assertEqual(subnet.cidr, str_cidr)
+        self.addCleanup(self.delete_wrapper, subnet.delete)
+        return subnet
+
+    def _create_port(self, network, namestart='port-quotatest'):
+        name = data_utils.rand_name(namestart)
+        _, result = self.network_client.create_port(
+            name=name,
+            network_id=network.id,
+            tenant_id=network.tenant_id)
+        self.assertIsNotNone(result, 'Unable to allocate port')
+        port = net_resources.DeletablePort(client=self.network_client,
+                                           **result['port'])
+        self.addCleanup(self.delete_wrapper, port.delete)
+        return port
+
+    def _get_server_port_id(self, server, ip_addr=None):
+        ports = self._list_ports(device_id=server['id'],
+                                 fixed_ip=ip_addr)
+        self.assertEqual(len(ports), 1,
+                         "Unable to determine which port to target.")
+        return ports[0]['id']
+
+    def _get_network_by_name(self, network_name):
+        net = self._list_networks(name=network_name)
+        return net_common.AttributeDict(net[0])
+
+    def _create_floating_ip(self, thing, external_network_id, port_id=None):
+        if not port_id:
+            port_id = self._get_server_port_id(thing)
+        _, result = self.network_client.create_floatingip(
+            floating_network_id=external_network_id,
+            port_id=port_id,
+            tenant_id=thing['tenant_id']
+        )
+        floating_ip = net_resources.DeletableFloatingIp(
+            client=self.network_client,
+            **result['floatingip'])
+        self.addCleanup(self.delete_wrapper, floating_ip.delete)
+        return floating_ip
+
+    def _associate_floating_ip(self, floating_ip, server):
+        port_id = self._get_server_port_id(server)
+        floating_ip.update(port_id=port_id)
+        self.assertEqual(port_id, floating_ip.port_id)
+        return floating_ip
+
+    def _disassociate_floating_ip(self, floating_ip):
+        """
+        :param floating_ip: type DeletableFloatingIp
+        """
+        floating_ip.update(port_id=None)
+        self.assertIsNone(floating_ip.port_id)
+        return floating_ip
+
+    def _ping_ip_address(self, ip_address, should_succeed=True):
+        cmd = ['ping', '-c1', '-w1', ip_address]
+
+        def ping():
+            proc = subprocess.Popen(cmd,
+                                    stdout=subprocess.PIPE,
+                                    stderr=subprocess.PIPE)
+            proc.wait()
+            return (proc.returncode == 0) == should_succeed
+
+        return tempest.test.call_until_true(
+            ping, CONF.compute.ping_timeout, 1)
+
+    def _check_vm_connectivity(self, ip_address,
+                               username=None,
+                               private_key=None,
+                               should_connect=True):
+        """
+        :param ip_address: server to test against
+        :param username: server's ssh username
+        :param private_key: server's ssh private key to be used
+        :param should_connect: True/False indicates positive/negative test
+            positive - attempt ping and ssh
+            negative - attempt ping and fail if succeed
+
+        :raises: AssertError if the result of the connectivity check does
+            not match the value of the should_connect param
+        """
+        if should_connect:
+            msg = "Timed out waiting for %s to become reachable" % ip_address
+        else:
+            msg = "ip address %s is reachable" % ip_address
+        self.assertTrue(self._ping_ip_address(ip_address,
+                                              should_succeed=should_connect),
+                        msg=msg)
+        if should_connect:
+            # no need to check ssh for negative connectivity
+            self.get_remote_client(ip_address, username, private_key)
+
+    def _check_public_network_connectivity(self, ip_address, username,
+                                           private_key, should_connect=True,
+                                           msg=None, servers=None):
+        # The target login is assumed to have been configured for
+        # key-based authentication by cloud-init.
+        LOG.debug('checking network connections to IP %s with user: %s' %
+                  (ip_address, username))
+        try:
+            self._check_vm_connectivity(ip_address,
+                                        username,
+                                        private_key,
+                                        should_connect=should_connect)
+        except Exception as e:
+            ex_msg = 'Public network connectivity check failed'
+            if msg:
+                ex_msg += ": " + msg
+            LOG.exception(ex_msg)
+            self._log_console_output(servers)
+            # network debug is called as part of ssh init
+            if not isinstance(e, exceptions.SSHTimeout):
+                debug.log_net_debug()
+            raise
+
+    def _check_tenant_network_connectivity(self, server,
+                                           username,
+                                           private_key,
+                                           should_connect=True,
+                                           servers_for_debug=None):
+        if not CONF.network.tenant_networks_reachable:
+            msg = 'Tenant networks not configured to be reachable.'
+            LOG.info(msg)
+            return
+        # The target login is assumed to have been configured for
+        # key-based authentication by cloud-init.
+        try:
+            for net_name, ip_addresses in server['networks'].iteritems():
+                for ip_address in ip_addresses:
+                    self._check_vm_connectivity(ip_address,
+                                                username,
+                                                private_key,
+                                                should_connect=should_connect)
+        except Exception as e:
+            LOG.exception('Tenant network connectivity check failed')
+            self._log_console_output(servers_for_debug)
+            # network debug is called as part of ssh init
+            if not isinstance(e, exceptions.SSHTimeout):
+                debug.log_net_debug()
+            raise
+
+    def _check_remote_connectivity(self, source, dest, should_succeed=True):
+        """
+        check ping server via source ssh connection
+
+        :param source: RemoteClient: an ssh connection from which to ping
+        :param dest: and IP to ping against
+        :param should_succeed: boolean should ping succeed or not
+        :returns: boolean -- should_succeed == ping
+        :returns: ping is false if ping failed
+        """
+        def ping_remote():
+            try:
+                source.ping_host(dest)
+            except exceptions.SSHExecCommandFailed:
+                LOG.warn('Failed to ping IP: %s via a ssh connection from: %s.'
+                         % (dest, source.ssh_client.host))
+                return not should_succeed
+            return should_succeed
+
+        return tempest.test.call_until_true(ping_remote,
+                                            CONF.compute.ping_timeout,
+                                            1)
+
+    def _create_security_group(self, tenant_id, client=None,
+                               namestart='secgroup-smoke'):
+        if client is None:
+            client = self.network_client
+        secgroup = self._create_empty_security_group(namestart=namestart,
+                                                     client=client,
+                                                     tenant_id=tenant_id)
+
+        # Add rules to the security group
+        rules = self._create_loginable_secgroup_rule(secgroup=secgroup)
+        for rule in rules:
+            self.assertEqual(tenant_id, rule.tenant_id)
+            self.assertEqual(secgroup.id, rule.security_group_id)
+        return secgroup
+
+    def _create_empty_security_group(self, tenant_id, client=None,
+                                     namestart='secgroup-smoke'):
+        """Create a security group without rules.
+
+        Default rules will be created:
+         - IPv4 egress to any
+         - IPv6 egress to any
+
+        :param tenant_id: secgroup will be created in this tenant
+        :returns: DeletableSecurityGroup -- containing the secgroup created
+        """
+        if client is None:
+            client = self.network_client
+        sg_name = data_utils.rand_name(namestart)
+        sg_desc = sg_name + " description"
+        sg_dict = dict(name=sg_name,
+                       description=sg_desc)
+        sg_dict['tenant_id'] = tenant_id
+        _, result = client.create_security_group(**sg_dict)
+        secgroup = net_resources.DeletableSecurityGroup(
+            client=client,
+            **result['security_group']
+        )
+        self.assertEqual(secgroup.name, sg_name)
+        self.assertEqual(tenant_id, secgroup.tenant_id)
+        self.assertEqual(secgroup.description, sg_desc)
+        self.addCleanup(self.delete_wrapper, secgroup.delete)
+        return secgroup
+
+    def _default_security_group(self, tenant_id, client=None):
+        """Get default secgroup for given tenant_id.
+
+        :returns: DeletableSecurityGroup -- default secgroup for given tenant
+        """
+        if client is None:
+            client = self.network_client
+        sgs = [
+            sg for sg in client.list_security_groups().values()[0]
+            if sg['tenant_id'] == tenant_id and sg['name'] == 'default'
+        ]
+        msg = "No default security group for tenant %s." % (tenant_id)
+        self.assertTrue(len(sgs) > 0, msg)
+        if len(sgs) > 1:
+            msg = "Found %d default security groups" % len(sgs)
+            raise exc.NeutronClientNoUniqueMatch(msg=msg)
+        return net_resources.DeletableSecurityGroup(client=client,
+                                                    **sgs[0])
+
+    def _create_security_group_rule(self, client=None, secgroup=None,
+                                    tenant_id=None, **kwargs):
+        """Create a rule from a dictionary of rule parameters.
+
+        Create a rule in a secgroup. if secgroup not defined will search for
+        default secgroup in tenant_id.
+
+        :param secgroup: type DeletableSecurityGroup.
+        :param secgroup_id: search for secgroup by id
+            default -- choose default secgroup for given tenant_id
+        :param tenant_id: if secgroup not passed -- the tenant in which to
+            search for default secgroup
+        :param kwargs: a dictionary containing rule parameters:
+            for example, to allow incoming ssh:
+            rule = {
+                    direction: 'ingress'
+                    protocol:'tcp',
+                    port_range_min: 22,
+                    port_range_max: 22
+                    }
+        """
+        if client is None:
+            client = self.network_client
+        if secgroup is None:
+            secgroup = self._default_security_group(tenant_id)
+
+        ruleset = dict(security_group_id=secgroup.id,
+                       tenant_id=secgroup.tenant_id)
+        ruleset.update(kwargs)
+
+        _, sg_rule = client.create_security_group_rule(**ruleset)
+        sg_rule = net_resources.DeletableSecurityGroupRule(
+            client=client,
+            **sg_rule['security_group_rule']
+        )
+        self.addCleanup(self.delete_wrapper, sg_rule.delete)
+        self.assertEqual(secgroup.tenant_id, sg_rule.tenant_id)
+        self.assertEqual(secgroup.id, sg_rule.security_group_id)
+
+        return sg_rule
+
+    def _create_loginable_secgroup_rule(self, client=None, secgroup=None):
+        """These rules are intended to permit inbound ssh and icmp
+        traffic from all sources, so no group_id is provided.
+        Setting a group_id would only permit traffic from ports
+        belonging to the same security group.
+        """
+
+        if client is None:
+            client = self.network_client
+        rules = []
+        rulesets = [
+            dict(
+                # ssh
+                protocol='tcp',
+                port_range_min=22,
+                port_range_max=22,
+            ),
+            dict(
+                # ping
+                protocol='icmp',
+            )
+        ]
+        for ruleset in rulesets:
+            for r_direction in ['ingress', 'egress']:
+                ruleset['direction'] = r_direction
+                try:
+                    sg_rule = self._create_security_group_rule(
+                        client=client, secgroup=secgroup, **ruleset)
+                except exceptions.Conflict as ex:
+                    # if rule already exist - skip rule and continue
+                    msg = 'Security group rule already exists'
+                    if msg not in ex._error_string:
+                        raise ex
+                else:
+                    self.assertEqual(r_direction, sg_rule.direction)
+                    rules.append(sg_rule)
+
+        return rules
+
+    def _ssh_to_server(self, server, private_key):
+        ssh_login = CONF.compute.image_ssh_user
+        return self.get_remote_client(server,
+                                      username=ssh_login,
+                                      private_key=private_key)
+
+    def _get_router(self, tenant_id):
+        """Retrieve a router for the given tenant id.
+
+        If a public router has been configured, it will be returned.
+
+        If a public router has not been configured, but a public
+        network has, a tenant router will be created and returned that
+        routes traffic to the public network.
+        """
+        router_id = CONF.network.public_router_id
+        network_id = CONF.network.public_network_id
+        if router_id:
+            result = self.network_client.show_router(router_id)
+            return net_resources.AttributeDict(**result['router'])
+        elif network_id:
+            router = self._create_router(tenant_id)
+            router.set_gateway(network_id)
+            return router
+        else:
+            raise Exception("Neither of 'public_router_id' or "
+                            "'public_network_id' has been defined.")
+
+    def _create_router(self, tenant_id, namestart='router-smoke-'):
+        name = data_utils.rand_name(namestart)
+        _, result = self.network_client.create_router(name=name,
+                                                      admin_state_up=True,
+                                                      tenant_id=tenant_id, )
+        router = net_resources.DeletableRouter(client=self.network_client,
+                                               **result['router'])
+        self.assertEqual(router.name, name)
+        self.addCleanup(self.delete_wrapper, router.delete)
+        return router
+
+    def create_networks(self, tenant_id=None):
+        """Create a network with a subnet connected to a router.
+
+        The baremetal driver is a special case since all nodes are
+        on the same shared network.
+
+        :returns: network, subnet, router
+        """
+        if CONF.baremetal.driver_enabled:
+            # NOTE(Shrews): This exception is for environments where tenant
+            # credential isolation is available, but network separation is
+            # not (the current baremetal case). Likely can be removed when
+            # test account mgmt is reworked:
+            # https://blueprints.launchpad.net/tempest/+spec/test-accounts
+            network = self._get_network_by_name(
+                CONF.compute.fixed_network_name)
+            router = None
+            subnet = None
+        else:
+            if tenant_id is None:
+                tenant_id = self.tenant_id
+            network = self._create_network(tenant_id)
+            router = self._get_router(tenant_id)
+            subnet = self._create_subnet(network)
+            subnet.add_to_router(router.id)
+        return network, subnet, router
+
+
 class OfficialClientTest(tempest.test.BaseTestCase):
     """
     Official Client test base class for scenario testing.
@@ -530,8 +1030,8 @@
     def addCleanup_with_wait(self, things, thing_id,
                              error_status='ERROR',
                              exc_type=nova_exceptions.NotFound,
-                             cleanup_callable=None, cleanup_args=[],
-                             cleanup_kwargs={}):
+                             cleanup_callable=None, cleanup_args=None,
+                             cleanup_kwargs=None):
         """Adds wait for ansyc resource deletion at the end of cleanups
 
         @param things: type of the resource to delete
@@ -543,6 +1043,10 @@
             usually a delete method. if not used, will try to use:
             things.delete(thing_id)
         """
+        if cleanup_args is None:
+            cleanup_args = []
+        if cleanup_kwargs is None:
+            cleanup_kwargs = {}
         if cleanup_callable is None:
             LOG.debug("no delete method passed. using {rclass}.delete({id}) as"
                       " default".format(rclass=things, id=thing_id))
@@ -724,7 +1228,7 @@
 
     def create_server(self, client=None, name=None, image=None, flavor=None,
                       wait_on_boot=True, wait_on_delete=True,
-                      create_kwargs={}):
+                      create_kwargs=None):
         """Creates VM instance.
 
         @param client: compute client to create the instance
@@ -742,6 +1246,8 @@
             image = CONF.compute.image_ref
         if flavor is None:
             flavor = CONF.compute.flavor_ref
+        if create_kwargs is None:
+            create_kwargs = {}
 
         fixed_network_name = CONF.compute.fixed_network_name
         if 'nics' not in create_kwargs and fixed_network_name:
@@ -871,7 +1377,9 @@
         self.status_timeout(
             self.volume_client.volumes, volume_id, status)
 
-    def _image_create(self, name, fmt, path, properties={}):
+    def _image_create(self, name, fmt, path, properties=None):
+        if properties is None:
+            properties = {}
         name = data_utils.rand_name('%s-' % name)
         image_file = open(path, 'rb')
         self.addCleanup(image_file.close)
@@ -1273,6 +1781,10 @@
                          "Unable to determine which port to target.")
         return ports[0]['id']
 
+    def _get_network_by_name(self, network_name):
+        net = self._list_networks(name=network_name)
+        return net_common.AttributeDict(net[0])
+
     def _create_floating_ip(self, thing, external_network_id, port_id=None):
         if not port_id:
             port_id = self._get_server_port_id(thing)
@@ -1670,17 +2182,31 @@
         self.addCleanup(self.delete_wrapper, router)
         return router
 
-    def _create_networks(self, tenant_id=None):
+    def create_networks(self, tenant_id=None):
         """Create a network with a subnet connected to a router.
 
+        The baremetal driver is a special case since all nodes are
+        on the same shared network.
+
         :returns: network, subnet, router
         """
-        if tenant_id is None:
-            tenant_id = self.tenant_id
-        network = self._create_network(tenant_id)
-        router = self._get_router(tenant_id)
-        subnet = self._create_subnet(network)
-        subnet.add_to_router(router.id)
+        if CONF.baremetal.driver_enabled:
+            # NOTE(Shrews): This exception is for environments where tenant
+            # credential isolation is available, but network separation is
+            # not (the current baremetal case). Likely can be removed when
+            # test account mgmt is reworked:
+            # https://blueprints.launchpad.net/tempest/+spec/test-accounts
+            network = self._get_network_by_name(
+                CONF.compute.fixed_network_name)
+            router = None
+            subnet = None
+        else:
+            if tenant_id is None:
+                tenant_id = self.tenant_id
+            network = self._create_network(tenant_id)
+            router = self._get_router(tenant_id)
+            subnet = self._create_subnet(network)
+            subnet.add_to_router(router.id)
         return network, subnet, router
 
 
@@ -1813,3 +2339,86 @@
             self.client.stacks.delete(stack_identifier)
         except heat_exceptions.HTTPNotFound:
             pass
+
+
+class SwiftScenarioTest(ScenarioTest):
+    """
+    Provide harness to do Swift scenario tests.
+
+    Subclasses implement the tests that use the methods provided by this
+    class.
+    """
+
+    @classmethod
+    def setUpClass(cls):
+        cls.set_network_resources()
+        super(SwiftScenarioTest, cls).setUpClass()
+        if not CONF.service_available.swift:
+            skip_msg = ("%s skipped as swift is not available" %
+                        cls.__name__)
+            raise cls.skipException(skip_msg)
+        # Clients for Swift
+        cls.account_client = cls.manager.account_client
+        cls.container_client = cls.manager.container_client
+        cls.object_client = cls.manager.object_client
+
+    def _get_swift_stat(self):
+        """get swift status for our user account."""
+        self.account_client.list_account_containers()
+        LOG.debug('Swift status information obtained successfully')
+
+    def _create_container(self, container_name=None):
+        name = container_name or data_utils.rand_name(
+            'swift-scenario-container')
+        self.container_client.create_container(name)
+        # look for the container to assure it is created
+        self._list_and_check_container_objects(name)
+        LOG.debug('Container %s created' % (name))
+        return name
+
+    def _delete_container(self, container_name):
+        self.container_client.delete_container(container_name)
+        LOG.debug('Container %s deleted' % (container_name))
+
+    def _upload_object_to_container(self, container_name, obj_name=None):
+        obj_name = obj_name or data_utils.rand_name('swift-scenario-object')
+        obj_data = data_utils.arbitrary_string()
+        self.object_client.create_object(container_name, obj_name, obj_data)
+        return obj_name, obj_data
+
+    def _delete_object(self, container_name, filename):
+        self.object_client.delete_object(container_name, filename)
+        self._list_and_check_container_objects(container_name,
+                                               not_present_obj=[filename])
+
+    def _list_and_check_container_objects(self, container_name,
+                                          present_obj=None,
+                                          not_present_obj=None):
+        """
+        List objects for a given container and assert which are present and
+        which are not.
+        """
+        if present_obj is None:
+            present_obj = []
+        if not_present_obj is None:
+            not_present_obj = []
+        _, object_list = self.container_client.list_container_contents(
+            container_name)
+        if present_obj:
+            for obj in present_obj:
+                self.assertIn(obj, object_list)
+        if not_present_obj:
+            for obj in not_present_obj:
+                self.assertNotIn(obj, object_list)
+
+    def _change_container_acl(self, container_name, acl):
+        metadata_param = {'metadata_prefix': 'x-container-',
+                          'metadata': {'read': acl}}
+        self.container_client.update_container_metadata(container_name,
+                                                        **metadata_param)
+        resp, _ = self.container_client.list_container_metadata(container_name)
+        self.assertEqual(resp['x-container-read'], acl)
+
+    def _download_and_verify(self, container_name, obj_name, expected_data):
+        _, obj = self.object_client.get_object(container_name, obj_name)
+        self.assertEqual(obj, expected_data)
diff --git a/tempest/scenario/test_minimum_basic.py b/tempest/scenario/test_minimum_basic.py
index 4bc4a98..8a8e387 100644
--- a/tempest/scenario/test_minimum_basic.py
+++ b/tempest/scenario/test_minimum_basic.py
@@ -124,7 +124,7 @@
         self.assertEqual('available', volume['status'])
 
     def create_and_add_security_group(self):
-        secgroup = self._create_security_group_nova()
+        secgroup = self._create_security_group()
         self.servers_client.add_security_group(self.server['id'],
                                                secgroup['name'])
         self.addCleanup(self.servers_client.remove_security_group,
diff --git a/tempest/scenario/test_network_advanced_server_ops.py b/tempest/scenario/test_network_advanced_server_ops.py
index 431de9a..aa04752 100644
--- a/tempest/scenario/test_network_advanced_server_ops.py
+++ b/tempest/scenario/test_network_advanced_server_ops.py
@@ -50,16 +50,13 @@
             cls.enabled = False
             raise cls.skipException(msg)
 
-    def setUp(self):
-        super(TestNetworkAdvancedServerOps, self).setUp()
+    def _setup_network_and_servers(self):
         key_name = data_utils.rand_name('keypair-smoke-')
         self.keypair = self.create_keypair(name=key_name)
         security_group =\
             self._create_security_group_neutron(tenant_id=self.tenant_id)
-        network = self._create_network(self.tenant_id)
-        router = self._get_router(self.tenant_id)
-        subnet = self._create_subnet(network)
-        subnet.add_to_router(router.id)
+        network, subnet, router = self.create_networks(self.tenant_id)
+
         public_network_id = CONF.network.public_network_id
         create_kwargs = {
             'nics': [
@@ -68,7 +65,7 @@
             'key_name': self.keypair.name,
             'security_groups': [security_group.name],
         }
-        server_name = data_utils.rand_name('server-smoke-%d-')
+        server_name = data_utils.rand_name('server-smoke')
         self.server = self.create_server(name=server_name,
                                          create_kwargs=create_kwargs)
         self.floating_ip = self._create_floating_ip(self.server,
@@ -92,6 +89,7 @@
 
     @test.services('compute', 'network')
     def test_server_connectivity_stop_start(self):
+        self._setup_network_and_servers()
         self.server.stop()
         self.status_timeout(self.compute_client.servers, self.server.id,
                             'SHUTOFF')
@@ -101,11 +99,13 @@
 
     @test.services('compute', 'network')
     def test_server_connectivity_reboot(self):
+        self._setup_network_and_servers()
         self.server.reboot()
         self._wait_server_status_and_check_network_connectivity()
 
     @test.services('compute', 'network')
     def test_server_connectivity_rebuild(self):
+        self._setup_network_and_servers()
         image_ref_alt = CONF.compute.image_ref_alt
         self.server.rebuild(image_ref_alt)
         self._wait_server_status_and_check_network_connectivity()
@@ -114,6 +114,7 @@
                           'Pause is not available.')
     @test.services('compute', 'network')
     def test_server_connectivity_pause_unpause(self):
+        self._setup_network_and_servers()
         self.server.pause()
         self.status_timeout(self.compute_client.servers, self.server.id,
                             'PAUSED')
@@ -125,6 +126,7 @@
                           'Suspend is not available.')
     @test.services('compute', 'network')
     def test_server_connectivity_suspend_resume(self):
+        self._setup_network_and_servers()
         self.server.suspend()
         self.status_timeout(self.compute_client.servers, self.server.id,
                             'SUSPENDED')
@@ -140,6 +142,7 @@
         if resize_flavor == CONF.compute.flavor_ref:
             msg = "Skipping test - flavor_ref and flavor_ref_alt are identical"
             raise self.skipException(msg)
+        self._setup_network_and_servers()
         resize_flavor = CONF.compute.flavor_ref_alt
         self.server.resize(resize_flavor)
         self.status_timeout(self.compute_client.servers, self.server.id,
diff --git a/tempest/scenario/test_network_basic_ops.py b/tempest/scenario/test_network_basic_ops.py
index bba034b..21a5d1b 100644
--- a/tempest/scenario/test_network_basic_ops.py
+++ b/tempest/scenario/test_network_basic_ops.py
@@ -18,13 +18,13 @@
 
 import testtools
 
-from tempest.api.network import common as net_common
 from tempest.common import debug
 from tempest.common.utils import data_utils
 from tempest import config
 from tempest import exceptions
 from tempest.openstack.common import log as logging
 from tempest.scenario import manager
+from tempest.services.network import resources as net_resources
 from tempest import test
 
 CONF = config.CONF
@@ -34,7 +34,7 @@
                                            ['floating_ip', 'server'])
 
 
-class TestNetworkBasicOps(manager.NetworkScenarioTest):
+class TestNetworkBasicOps(manager.NeutronScenarioTest):
 
     """
     This smoke test suite assumes that Nova has been configured to
@@ -96,21 +96,23 @@
             if not test.is_extension_enabled(ext, 'network'):
                 msg = "%s extension not enabled." % ext
                 raise cls.skipException(msg)
-        cls.check_preconditions()
 
     def setUp(self):
         super(TestNetworkBasicOps, self).setUp()
+        self.keypairs = {}
+        self.servers = []
+
+    def _setup_network_and_servers(self):
         self.security_group = \
-            self._create_security_group_neutron(tenant_id=self.tenant_id)
-        self.network, self.subnet, self.router = self._create_networks()
+            self._create_security_group(tenant_id=self.tenant_id)
+        self.network, self.subnet, self.router = self.create_networks()
         self.check_networks()
-        self.servers = {}
+
         name = data_utils.rand_name('server-smoke')
-        serv_dict = self._create_server(name, self.network)
-        self.servers[serv_dict['server']] = serv_dict['keypair']
+        server = self._create_server(name, self.network)
         self._check_tenant_network_connectivity()
 
-        self._create_and_associate_floating_ips()
+        self._create_and_associate_floating_ips(server)
 
     def check_networks(self):
         """
@@ -124,47 +126,53 @@
         self.assertIn(self.network.name, seen_names)
         self.assertIn(self.network.id, seen_ids)
 
-        seen_subnets = self._list_subnets()
-        seen_net_ids = [n['network_id'] for n in seen_subnets]
-        seen_subnet_ids = [n['id'] for n in seen_subnets]
-        self.assertIn(self.network.id, seen_net_ids)
-        self.assertIn(self.subnet.id, seen_subnet_ids)
+        if self.subnet:
+            seen_subnets = self._list_subnets()
+            seen_net_ids = [n['network_id'] for n in seen_subnets]
+            seen_subnet_ids = [n['id'] for n in seen_subnets]
+            self.assertIn(self.network.id, seen_net_ids)
+            self.assertIn(self.subnet.id, seen_subnet_ids)
 
-        seen_routers = self._list_routers()
-        seen_router_ids = [n['id'] for n in seen_routers]
-        seen_router_names = [n['name'] for n in seen_routers]
-        self.assertIn(self.router.name,
-                      seen_router_names)
-        self.assertIn(self.router.id,
-                      seen_router_ids)
+        if self.router:
+            seen_routers = self._list_routers()
+            seen_router_ids = [n['id'] for n in seen_routers]
+            seen_router_names = [n['name'] for n in seen_routers]
+            self.assertIn(self.router.name,
+                          seen_router_names)
+            self.assertIn(self.router.id,
+                          seen_router_ids)
 
     def _create_server(self, name, network):
-        keypair = self.create_keypair(name='keypair-%s' % name)
-        security_groups = [self.security_group.name]
+        keypair = self.create_keypair()
+        self.keypairs[keypair['name']] = keypair
+        security_groups = [self.security_group]
         create_kwargs = {
             'nics': [
                 {'net-id': network.id},
             ],
-            'key_name': keypair.name,
+            'key_name': keypair['name'],
             'security_groups': security_groups,
         }
         server = self.create_server(name=name, create_kwargs=create_kwargs)
-        return dict(server=server, keypair=keypair)
+        self.servers.append(server)
+        return server
+
+    def _get_server_key(self, server):
+        return self.keypairs[server['key_name']]['private_key']
 
     def _check_tenant_network_connectivity(self):
         ssh_login = CONF.compute.image_ssh_user
-        for server, key in self.servers.iteritems():
+        for server in self.servers:
             # call the common method in the parent class
             super(TestNetworkBasicOps, self).\
                 _check_tenant_network_connectivity(
-                    server, ssh_login, key.private_key,
-                    servers_for_debug=self.servers.keys())
+                    server, ssh_login, self._get_server_key(server),
+                    servers_for_debug=self.servers)
 
-    def _create_and_associate_floating_ips(self):
+    def _create_and_associate_floating_ips(self, server):
         public_network_id = CONF.network.public_network_id
-        for server in self.servers.keys():
-            floating_ip = self._create_floating_ip(server, public_network_id)
-            self.floating_ip_tuple = Floating_IP_tuple(floating_ip, server)
+        floating_ip = self._create_floating_ip(server, public_network_id)
+        self.floating_ip_tuple = Floating_IP_tuple(floating_ip, server)
 
     def _check_public_network_connectivity(self, should_connect=True,
                                            msg=None):
@@ -173,11 +181,11 @@
         ip_address = floating_ip.floating_ip_address
         private_key = None
         if should_connect:
-            private_key = self.servers[server].private_key
+            private_key = self._get_server_key(server)
         # call the common method in the parent class
         super(TestNetworkBasicOps, self)._check_public_network_connectivity(
             ip_address, ssh_login, private_key, should_connect, msg,
-            self.servers.keys())
+            self.servers)
 
     def _disassociate_floating_ips(self):
         floating_ip, server = self.floating_ip_tuple
@@ -189,11 +197,10 @@
         floating_ip, server = self.floating_ip_tuple
         name = data_utils.rand_name('new_server-smoke-')
         # create a new server for the floating ip
-        serv_dict = self._create_server(name, self.network)
-        self.servers[serv_dict['server']] = serv_dict['keypair']
-        self._associate_floating_ip(floating_ip, serv_dict['server'])
+        server = self._create_server(name, self.network)
+        self._associate_floating_ip(floating_ip, server)
         self.floating_ip_tuple = Floating_IP_tuple(
-            floating_ip, serv_dict['server'])
+            floating_ip, server)
 
     def _create_new_network(self):
         self.new_net = self._create_network(self.tenant_id)
@@ -204,27 +211,27 @@
     def _hotplug_server(self):
         old_floating_ip, server = self.floating_ip_tuple
         ip_address = old_floating_ip.floating_ip_address
-        private_key = self.servers[server].private_key
+        private_key = self._get_server_key(server)
         ssh_client = self.get_remote_client(ip_address,
                                             private_key=private_key)
         old_nic_list = self._get_server_nics(ssh_client)
         # get a port from a list of one item
-        port_list = self._list_ports(device_id=server.id)
+        port_list = self._list_ports(device_id=server['id'])
         self.assertEqual(1, len(port_list))
         old_port = port_list[0]
-        self.compute_client.servers.interface_attach(server=server,
-                                                     net_id=self.new_net.id,
-                                                     port_id=None,
-                                                     fixed_ip=None)
-        # move server to the head of the cleanup list
-        self.addCleanup(self.delete_timeout,
-                        self.compute_client.servers,
-                        server.id)
-        self.addCleanup(self.delete_wrapper, server)
+        _, interface = self.interface_client.create_interface(
+            server=server['id'],
+            network_id=self.new_net.id)
+        self.addCleanup(self.network_client.wait_for_resource_deletion,
+                        'port',
+                        interface['port_id'])
+        self.addCleanup(self.delete_wrapper,
+                        self.interface_client.delete_interface,
+                        server['id'], interface['port_id'])
 
         def check_ports():
             self.new_port_list = [port for port in
-                                  self._list_ports(device_id=server.id)
+                                  self._list_ports(device_id=server['id'])
                                   if port != old_port]
             return len(self.new_port_list) == 1
 
@@ -233,8 +240,8 @@
             raise exceptions.TimeoutException("No new port attached to the "
                                               "server in time (%s sec) !"
                                               % CONF.network.build_timeout)
-        new_port = net_common.DeletablePort(client=self.network_client,
-                                            **self.new_port_list[0])
+        new_port = net_resources.DeletablePort(client=self.network_client,
+                                               **self.new_port_list[0])
 
         def check_new_nic():
             new_nic_list = self._get_server_nics(ssh_client)
@@ -267,7 +274,7 @@
         # get internal ports' ips:
         # get all network ports in the new network
         internal_ips = (p['fixed_ips'][0]['ip_address'] for p in
-                        self._list_ports(tenant_id=server.tenant_id,
+                        self._list_ports(tenant_id=server['tenant_id'],
                                          network_id=network.id)
                         if p['device_owner'].startswith('network'))
 
@@ -283,8 +290,8 @@
             LOG.info(msg)
             return
 
-        subnet = self.network_client.list_subnets(
-            network_id=CONF.network.public_network_id)['subnets']
+        subnet = self._list_subnets(
+            network_id=CONF.network.public_network_id)
         self.assertEqual(1, len(subnet), "Found %d subnets" % len(subnet))
 
         external_ips = [subnet[0]['gateway_ip']]
@@ -293,7 +300,7 @@
 
     def _check_server_connectivity(self, floating_ip, address_list):
         ip_address = floating_ip.floating_ip_address
-        private_key = self.servers[self.floating_ip_tuple.server].private_key
+        private_key = self._get_server_key(self.floating_ip_tuple.server)
         ssh_source = self._ssh_to_server(ip_address, private_key)
 
         for remote_ip in address_list:
@@ -345,6 +352,7 @@
 
 
         """
+        self._setup_network_and_servers()
         self._check_public_network_connectivity(should_connect=True)
         self._check_network_internal_connectivity(network=self.network)
         self._check_network_external_connectivity()
@@ -370,7 +378,7 @@
         4. check VM can ping new network dhcp port
 
         """
-
+        self._setup_network_and_servers()
         self._check_public_network_connectivity(should_connect=True)
         self._create_new_network()
         self._hotplug_server()
diff --git a/tempest/scenario/test_security_groups_basic_ops.py b/tempest/scenario/test_security_groups_basic_ops.py
index ecb802f..e9ca770 100644
--- a/tempest/scenario/test_security_groups_basic_ops.py
+++ b/tempest/scenario/test_security_groups_basic_ops.py
@@ -131,6 +131,10 @@
                    'public_network_id must be defined.')
             cls.enabled = False
             raise cls.skipException(msg)
+        if CONF.baremetal.driver_enabled:
+            msg = ('Not currently supported by baremetal.')
+            cls.enabled = False
+            raise cls.skipException(msg)
 
     @classmethod
     def setUpClass(cls):
@@ -272,7 +276,7 @@
         self.floating_ips.setdefault(server, floating_ip)
 
     def _create_tenant_network(self, tenant):
-        network, subnet, router = self._create_networks(tenant.creds.tenant_id)
+        network, subnet, router = self.create_networks(tenant.creds.tenant_id)
         tenant.set_network(network, subnet, router)
 
     def _set_compute_context(self, tenant):
diff --git a/tempest/scenario/test_server_advanced_ops.py b/tempest/scenario/test_server_advanced_ops.py
index 5a1dc04..463f5aa 100644
--- a/tempest/scenario/test_server_advanced_ops.py
+++ b/tempest/scenario/test_server_advanced_ops.py
@@ -25,7 +25,7 @@
 LOG = logging.getLogger(__name__)
 
 
-class TestServerAdvancedOps(manager.OfficialClientTest):
+class TestServerAdvancedOps(manager.ScenarioTest):
 
     """
     This test case stresses some advanced server instance operations:
@@ -49,19 +49,19 @@
     def test_resize_server_confirm(self):
         # We create an instance for use in this test
         instance = self.create_server()
-        instance_id = instance.id
+        instance_id = instance['id']
         resize_flavor = CONF.compute.flavor_ref_alt
         LOG.debug("Resizing instance %s from flavor %s to flavor %s",
-                  instance.id, instance.flavor, resize_flavor)
-        instance.resize(resize_flavor)
-        self.status_timeout(self.compute_client.servers, instance_id,
-                            'VERIFY_RESIZE')
+                  instance['id'], instance['flavor']['id'], resize_flavor)
+        self.servers_client.resize(instance_id, resize_flavor)
+        self.servers_client.wait_for_server_status(instance_id,
+                                                   'VERIFY_RESIZE')
 
         LOG.debug("Confirming resize of instance %s", instance_id)
-        instance.confirm_resize()
+        self.servers_client.confirm_resize(instance_id)
 
-        self.status_timeout(
-            self.compute_client.servers, instance_id, 'ACTIVE')
+        self.servers_client.wait_for_server_status(instance_id,
+                                                   'ACTIVE')
 
     @testtools.skipUnless(CONF.compute_feature_enabled.suspend,
                           'Suspend is not available.')
@@ -69,24 +69,27 @@
     def test_server_sequence_suspend_resume(self):
         # We create an instance for use in this test
         instance = self.create_server()
-        instance_id = instance.id
+        instance_id = instance['id']
         LOG.debug("Suspending instance %s. Current status: %s",
-                  instance_id, instance.status)
-        instance.suspend()
-        self.status_timeout(self.compute_client.servers, instance_id,
-                            'SUSPENDED')
+                  instance_id, instance['status'])
+        self.servers_client.suspend_server(instance_id)
+        self.servers_client.wait_for_server_status(instance_id,
+                                                   'SUSPENDED')
+        _, fetched_instance = self.servers_client.get_server(instance_id)
         LOG.debug("Resuming instance %s. Current status: %s",
-                  instance_id, instance.status)
-        instance.resume()
-        self.status_timeout(self.compute_client.servers, instance_id,
-                            'ACTIVE')
+                  instance_id, fetched_instance['status'])
+        self.servers_client.resume_server(instance_id)
+        self.servers_client.wait_for_server_status(instance_id,
+                                                   'ACTIVE')
+        _, fetched_instance = self.servers_client.get_server(instance_id)
         LOG.debug("Suspending instance %s. Current status: %s",
-                  instance_id, instance.status)
-        instance.suspend()
-        self.status_timeout(self.compute_client.servers, instance_id,
-                            'SUSPENDED')
+                  instance_id, fetched_instance['status'])
+        self.servers_client.suspend_server(instance_id)
+        self.servers_client.wait_for_server_status(instance_id,
+                                                   'SUSPENDED')
+        _, fetched_instance = self.servers_client.get_server(instance_id)
         LOG.debug("Resuming instance %s. Current status: %s",
-                  instance_id, instance.status)
-        instance.resume()
-        self.status_timeout(self.compute_client.servers, instance_id,
-                            'ACTIVE')
+                  instance_id, fetched_instance['status'])
+        self.servers_client.resume_server(instance_id)
+        self.servers_client.wait_for_server_status(instance_id,
+                                                   'ACTIVE')
diff --git a/tempest/scenario/test_server_basic_ops.py b/tempest/scenario/test_server_basic_ops.py
index 38686d9..b38b1a3 100644
--- a/tempest/scenario/test_server_basic_ops.py
+++ b/tempest/scenario/test_server_basic_ops.py
@@ -26,7 +26,7 @@
 load_tests = test_utils.load_tests_input_scenario_utils
 
 
-class TestServerBasicOps(manager.OfficialClientTest):
+class TestServerBasicOps(manager.ScenarioTest):
 
     """
     This smoke test case follows this basic set of operations:
@@ -69,9 +69,9 @@
 
     def boot_instance(self):
         # Create server with image and flavor from input scenario
-        security_groups = [self.security_group.name]
+        security_groups = [self.security_group]
         create_kwargs = {
-            'key_name': self.keypair.id,
+            'key_name': self.keypair['name'],
             'security_groups': security_groups
         }
         self.instance = self.create_server(image=self.image_ref,
@@ -81,16 +81,19 @@
     def verify_ssh(self):
         if self.run_ssh:
             # Obtain a floating IP
-            floating_ip = self.compute_client.floating_ips.create()
-            self.addCleanup(self.delete_wrapper, floating_ip)
+            _, floating_ip = self.floating_ips_client.create_floating_ip()
+            self.addCleanup(self.delete_wrapper,
+                            self.floating_ips_client.delete_floating_ip,
+                            floating_ip['id'])
             # Attach a floating IP
-            self.instance.add_floating_ip(floating_ip)
+            self.floating_ips_client.associate_floating_ip_to_server(
+                floating_ip['ip'], self.instance['id'])
             # Check ssh
             try:
                 self.get_remote_client(
-                    server_or_ip=floating_ip.ip,
+                    server_or_ip=floating_ip['ip'],
                     username=self.image_utils.ssh_user(self.image_ref),
-                    private_key=self.keypair.private_key)
+                    private_key=self.keypair['private_key'])
             except Exception:
                 LOG.exception('ssh to server failed')
                 self._log_console_output()
@@ -99,7 +102,7 @@
     @test.services('compute', 'network')
     def test_server_basicops(self):
         self.add_keypair()
-        self.security_group = self._create_security_group_nova()
+        self.security_group = self._create_security_group()
         self.boot_instance()
         self.verify_ssh()
-        self.instance.delete()
+        self.servers_client.delete_server(self.instance['id'])
diff --git a/tempest/scenario/test_snapshot_pattern.py b/tempest/scenario/test_snapshot_pattern.py
index d500065..dc32edc 100644
--- a/tempest/scenario/test_snapshot_pattern.py
+++ b/tempest/scenario/test_snapshot_pattern.py
@@ -82,7 +82,7 @@
     def test_snapshot_pattern(self):
         # prepare for booting a instance
         self._add_keypair()
-        self.security_group = self._create_security_group_nova()
+        self.security_group = self._create_security_group()
 
         # boot a instance and create a timestamp file in it
         server = self._boot_image(CONF.compute.image_ref)
diff --git a/tempest/scenario/test_swift_basic_ops.py b/tempest/scenario/test_swift_basic_ops.py
index 3fa6d2c..ad74ec4 100644
--- a/tempest/scenario/test_swift_basic_ops.py
+++ b/tempest/scenario/test_swift_basic_ops.py
@@ -14,7 +14,6 @@
 #    under the License.
 
 from tempest.common import http
-from tempest.common.utils import data_utils
 from tempest import config
 from tempest.openstack.common import log as logging
 from tempest.scenario import manager
@@ -25,9 +24,9 @@
 LOG = logging.getLogger(__name__)
 
 
-class TestSwiftBasicOps(manager.ScenarioTest):
+class TestSwiftBasicOps(manager.SwiftScenarioTest):
     """
-    Test swift with the follow operations:
+    Test swift basic ops.
      * get swift stat.
      * create container.
      * upload a file to the created container.
@@ -40,75 +39,6 @@
      * change ACL of the container and make sure it works successfully
     """
 
-    @classmethod
-    def setUpClass(cls):
-        cls.set_network_resources()
-        super(TestSwiftBasicOps, cls).setUpClass()
-        if not CONF.service_available.swift:
-            skip_msg = ("%s skipped as swift is not available" %
-                        cls.__name__)
-            raise cls.skipException(skip_msg)
-        # Clients for Swift
-        cls.account_client = cls.manager.account_client
-        cls.container_client = cls.manager.container_client
-        cls.object_client = cls.manager.object_client
-
-    def _get_swift_stat(self):
-        """get swift status for our user account."""
-        self.account_client.list_account_containers()
-        LOG.debug('Swift status information obtained successfully')
-
-    def _create_container(self, container_name=None):
-        name = container_name or data_utils.rand_name(
-            'swift-scenario-container')
-        self.container_client.create_container(name)
-        # look for the container to assure it is created
-        self._list_and_check_container_objects(name)
-        LOG.debug('Container %s created' % (name))
-        return name
-
-    def _delete_container(self, container_name):
-        self.container_client.delete_container(container_name)
-        LOG.debug('Container %s deleted' % (container_name))
-
-    def _upload_object_to_container(self, container_name, obj_name=None):
-        obj_name = obj_name or data_utils.rand_name('swift-scenario-object')
-        obj_data = data_utils.arbitrary_string()
-        self.object_client.create_object(container_name, obj_name, obj_data)
-        return obj_name, obj_data
-
-    def _delete_object(self, container_name, filename):
-        self.object_client.delete_object(container_name, filename)
-        self._list_and_check_container_objects(container_name,
-                                               not_present_obj=[filename])
-
-    def _list_and_check_container_objects(self, container_name, present_obj=[],
-                                          not_present_obj=[]):
-        """
-        List objects for a given container and assert which are present and
-        which are not.
-        """
-        _, object_list = self.container_client.list_container_contents(
-            container_name)
-        if present_obj:
-            for obj in present_obj:
-                self.assertIn(obj, object_list)
-        if not_present_obj:
-            for obj in not_present_obj:
-                self.assertNotIn(obj, object_list)
-
-    def _change_container_acl(self, container_name, acl):
-        metadata_param = {'metadata_prefix': 'x-container-',
-                          'metadata': {'read': acl}}
-        self.container_client.update_container_metadata(container_name,
-                                                        **metadata_param)
-        resp, _ = self.container_client.list_container_metadata(container_name)
-        self.assertEqual(resp['x-container-read'], acl)
-
-    def _download_and_verify(self, container_name, obj_name, expected_data):
-        _, obj = self.object_client.get_object(container_name, obj_name)
-        self.assertEqual(obj, expected_data)
-
     @test.services('object_storage')
     def test_swift_basic_ops(self):
         self._get_swift_stat()
diff --git a/tempest/scenario/test_volume_boot_pattern.py b/tempest/scenario/test_volume_boot_pattern.py
index bf5d1f6..fdda423 100644
--- a/tempest/scenario/test_volume_boot_pattern.py
+++ b/tempest/scenario/test_volume_boot_pattern.py
@@ -10,8 +10,6 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
-from cinderclient import exceptions as cinder_exc
-
 from tempest.common.utils import data_utils
 from tempest import config
 from tempest.openstack.common import log
@@ -23,7 +21,7 @@
 LOG = log.getLogger(__name__)
 
 
-class TestVolumeBootPattern(manager.OfficialClientTest):
+class TestVolumeBootPattern(manager.ScenarioTest):
 
     """
     This test case attempts to reproduce the following steps:
@@ -54,28 +52,32 @@
         # dev_name=id:type:size:delete_on_terminate
         # where type needs to be "snap" if the server is booted
         # from a snapshot, size instead can be safely left empty
-        bd_map = {
-            'vda': vol_id + ':::0'
-        }
-        security_groups = [self.security_group.name]
+        bd_map = [{
+            'device_name': 'vda',
+            'volume_id': vol_id,
+            'delete_on_termination': '0'}]
+        self.security_group = self._create_security_group()
+        security_groups = [{'name': self.security_group['name']}]
         create_kwargs = {
             'block_device_mapping': bd_map,
-            'key_name': keypair.name,
+            'key_name': keypair['name'],
             'security_groups': security_groups
         }
         return self.create_server(image='', create_kwargs=create_kwargs)
 
     def _create_snapshot_from_volume(self, vol_id):
-        volume_snapshots = self.volume_client.volume_snapshots
         snap_name = data_utils.rand_name('snapshot')
-        snap = volume_snapshots.create(volume_id=vol_id,
-                                       force=True,
-                                       display_name=snap_name)
-        self.addCleanup_with_wait(self.volume_client.volume_snapshots, snap.id,
-                                  exc_type=cinder_exc.NotFound)
-        self.status_timeout(volume_snapshots,
-                            snap.id,
-                            'available')
+        _, snap = self.snapshots_client.create_snapshot(
+            volume_id=vol_id,
+            force=True,
+            display_name=snap_name)
+        self.addCleanup_with_wait(
+            waiter_callable=self.snapshots_client.wait_for_resource_deletion,
+            thing_id=snap['id'], thing_id_param='id',
+            cleanup_callable=self.delete_wrapper,
+            cleanup_args=[self.snapshots_client.delete_snapshot, snap['id']])
+        self.snapshots_client.wait_for_snapshot_status(snap['id'], 'available')
+        self.assertEqual(snap_name, snap['display_name'])
         return snap
 
     def _create_volume_from_snapshot(self, snap_id):
@@ -85,27 +87,26 @@
     def _stop_instances(self, instances):
         # NOTE(gfidente): two loops so we do not wait for the status twice
         for i in instances:
-            self.compute_client.servers.stop(i)
+            self.servers_client.stop(i['id'])
         for i in instances:
-            self.status_timeout(self.compute_client.servers,
-                                i.id,
-                                'SHUTOFF')
+            self.servers_client.wait_for_server_status(i['id'], 'SHUTOFF')
 
     def _detach_volumes(self, volumes):
         # NOTE(gfidente): two loops so we do not wait for the status twice
         for v in volumes:
-            self.volume_client.volumes.detach(v)
+            self.volumes_client.detach_volume(v['id'])
         for v in volumes:
-            self.status_timeout(self.volume_client.volumes,
-                                v.id,
-                                'available')
+            self.volumes_client.wait_for_volume_status(v['id'], 'available')
 
     def _ssh_to_server(self, server, keypair):
         if CONF.compute.use_floatingip_for_ssh:
-            floating_ip = self.compute_client.floating_ips.create()
-            self.addCleanup(self.delete_wrapper, floating_ip)
-            server.add_floating_ip(floating_ip)
-            ip = floating_ip.ip
+            _, floating_ip = self.floating_ips_client.create_floating_ip()
+            self.addCleanup(self.delete_wrapper,
+                            self.floating_ips_client.delete_floating_ip,
+                            floating_ip['id'])
+            self.floating_ips_client.associate_floating_ip_to_server(
+                floating_ip['ip'], server['id'])
+            ip = floating_ip['ip']
         else:
             network_name_for_ssh = CONF.compute.network_for_ssh
             ip = server.networks[network_name_for_ssh][0]
@@ -113,10 +114,10 @@
         try:
             return self.get_remote_client(
                 ip,
-                private_key=keypair.private_key)
+                private_key=keypair['private_key'])
         except Exception:
             LOG.exception('ssh to server failed')
-            self._log_console_output()
+            self._log_console_output(servers=[server])
             raise
 
     def _get_content(self, ssh_client):
@@ -129,8 +130,8 @@
         return self._get_content(ssh_client)
 
     def _delete_server(self, server):
-        self.compute_client.servers.delete(server)
-        self.delete_timeout(self.compute_client.servers, server.id)
+        self.servers_client.delete_server(server['id'])
+        self.servers_client.wait_for_server_termination(server['id'])
 
     def _check_content_of_written_file(self, ssh_client, expected):
         actual = self._get_content(ssh_client)
@@ -139,11 +140,11 @@
     @test.services('compute', 'volume', 'image')
     def test_volume_boot_pattern(self):
         keypair = self.create_keypair()
-        self.security_group = self._create_security_group_nova()
+        self.security_group = self._create_security_group()
 
         # create an instance from volume
         volume_origin = self._create_volume_from_image()
-        instance_1st = self._boot_instance_from_volume(volume_origin.id,
+        instance_1st = self._boot_instance_from_volume(volume_origin['id'],
                                                        keypair)
 
         # write content to volume on instance
@@ -155,7 +156,7 @@
         self._delete_server(instance_1st)
 
         # create a 2nd instance from volume
-        instance_2nd = self._boot_instance_from_volume(volume_origin.id,
+        instance_2nd = self._boot_instance_from_volume(volume_origin['id'],
                                                        keypair)
 
         # check the content of written file
@@ -164,11 +165,11 @@
         self._check_content_of_written_file(ssh_client_for_instance_2nd, text)
 
         # snapshot a volume
-        snapshot = self._create_snapshot_from_volume(volume_origin.id)
+        snapshot = self._create_snapshot_from_volume(volume_origin['id'])
 
         # create a 3rd instance from snapshot
-        volume = self._create_volume_from_snapshot(snapshot.id)
-        instance_from_snapshot = self._boot_instance_from_volume(volume.id,
+        volume = self._create_volume_from_snapshot(snapshot['id'])
+        instance_from_snapshot = self._boot_instance_from_volume(volume['id'],
                                                                  keypair)
 
         # check the content of written file
@@ -186,10 +187,11 @@
         bdms = [{'uuid': vol_id, 'source_type': 'volume',
                  'destination_type': 'volume', 'boot_index': 0,
                  'delete_on_termination': False}]
-        security_groups = [self.security_group.name]
+        self.security_group = self._create_security_group()
+        security_groups = [{'name': self.security_group['name']}]
         create_kwargs = {
             'block_device_mapping_v2': bdms,
-            'key_name': keypair.name,
+            'key_name': keypair['name'],
             'security_groups': security_groups
         }
         return self.create_server(image='', create_kwargs=create_kwargs)
diff --git a/tempest/services/baremetal/v1/base_v1.py b/tempest/services/baremetal/v1/base_v1.py
index 9c753c2..07eee8a 100644
--- a/tempest/services/baremetal/v1/base_v1.py
+++ b/tempest/services/baremetal/v1/base_v1.py
@@ -264,3 +264,47 @@
                                                    postf='validate')
 
         return self._show_request('nodes', node_uuid, uri=uri)
+
+    @base.handle_errors
+    def set_node_boot_device(self, node_uuid, boot_device, persistent=False):
+        """
+        Set the boot device of the specified node.
+
+        :param node_uuid: The unique identifier of the node.
+        :param boot_device: The boot device name.
+        :param persistent: Boolean value. True if the boot device will
+                           persist to all future boots, False if not.
+                           Default: False.
+
+        """
+        request = {'boot_device': boot_device, 'persistent': persistent}
+        resp, body = self._put_request('nodes/%s/management/boot_device' %
+                                       node_uuid, request)
+        self.expected_success(204, resp.status)
+        return body
+
+    @base.handle_errors
+    def get_node_boot_device(self, node_uuid):
+        """
+        Get the current boot device of the specified node.
+
+        :param node_uuid: The unique identifier of the node.
+
+        """
+        path = 'nodes/%s/management/boot_device' % node_uuid
+        resp, body = self._list_request(path)
+        self.expected_success(200, resp.status)
+        return body
+
+    @base.handle_errors
+    def get_node_supported_boot_devices(self, node_uuid):
+        """
+        Get the supported boot devices of the specified node.
+
+        :param node_uuid: The unique identifier of the node.
+
+        """
+        path = 'nodes/%s/management/boot_device/supported' % node_uuid
+        resp, body = self._list_request(path)
+        self.expected_success(200, resp.status)
+        return body
diff --git a/tempest/services/compute/json/servers_client.py b/tempest/services/compute/json/servers_client.py
index f44be29..88b68d3 100644
--- a/tempest/services/compute/json/servers_client.py
+++ b/tempest/services/compute/json/servers_client.py
@@ -58,6 +58,7 @@
         disk_config: Determines if user or admin controls disk configuration.
         return_reservation_id: Enable/Disable the return of reservation id
         block_device_mapping: Block device mapping for the server.
+        block_device_mapping_v2: Block device mapping V2 for the server.
         """
         post_body = {
             'name': name,
@@ -70,7 +71,8 @@
                        'availability_zone', 'accessIPv4', 'accessIPv6',
                        'min_count', 'max_count', ('metadata', 'meta'),
                        ('OS-DCF:diskConfig', 'disk_config'),
-                       'return_reservation_id', 'block_device_mapping']:
+                       'return_reservation_id', 'block_device_mapping',
+                       'block_device_mapping_v2']:
             if isinstance(option, tuple):
                 post_param = option[0]
                 key = option[1]
@@ -80,6 +82,7 @@
             value = kwargs.get(key)
             if value is not None:
                 post_body[post_param] = value
+
         post_body = {'server': post_body}
 
         if 'sched_hints' in kwargs:
diff --git a/tempest/services/database/json/flavors_client.py b/tempest/services/database/json/flavors_client.py
index 2ec0405..f276a45 100644
--- a/tempest/services/database/json/flavors_client.py
+++ b/tempest/services/database/json/flavors_client.py
@@ -33,8 +33,10 @@
             url += '?%s' % urllib.urlencode(params)
 
         resp, body = self.get(url)
+        self.expected_success(200, resp.status)
         return resp, self._parse_resp(body)
 
     def get_db_flavor_details(self, db_flavor_id):
         resp, body = self.get("flavors/%s" % str(db_flavor_id))
+        self.expected_success(200, resp.status)
         return resp, self._parse_resp(body)
diff --git a/tempest/services/database/json/versions_client.py b/tempest/services/database/json/versions_client.py
index 0269c43..81c0e6c 100644
--- a/tempest/services/database/json/versions_client.py
+++ b/tempest/services/database/json/versions_client.py
@@ -35,4 +35,5 @@
             url += '?%s' % urllib.urlencode(params)
 
         resp, body = self.get(url)
+        self.expected_success(200, resp.status)
         return resp, self._parse_resp(body)
diff --git a/tempest/services/network/resources.py b/tempest/services/network/resources.py
new file mode 100644
index 0000000..b2feb87
--- /dev/null
+++ b/tempest/services/network/resources.py
@@ -0,0 +1,163 @@
+# Copyright 2013 Hewlett-Packard Development Company, L.P.
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import abc
+
+import six
+
+
+class AttributeDict(dict):
+
+    """
+    Provide attribute access (dict.key) to dictionary values.
+    """
+
+    def __getattr__(self, name):
+        """Allow attribute access for all keys in the dict."""
+        if name in self:
+            return self[name]
+        return super(AttributeDict, self).__getattribute__(name)
+
+
+@six.add_metaclass(abc.ABCMeta)
+class DeletableResource(AttributeDict):
+
+    """
+    Support deletion of neutron resources (networks, subnets) via a
+    delete() method, as is supported by keystone and nova resources.
+    """
+
+    def __init__(self, *args, **kwargs):
+        self.client = kwargs.pop('client', None)
+        super(DeletableResource, self).__init__(*args, **kwargs)
+
+    def __str__(self):
+        return '<%s id="%s" name="%s">' % (self.__class__.__name__,
+                                           self.id, self.name)
+
+    @abc.abstractmethod
+    def delete(self):
+        return
+
+    def __hash__(self):
+        return hash(self.id)
+
+
+class DeletableNetwork(DeletableResource):
+
+    def delete(self):
+        self.client.delete_network(self.id)
+
+
+class DeletableSubnet(DeletableResource):
+
+    def __init__(self, *args, **kwargs):
+        super(DeletableSubnet, self).__init__(*args, **kwargs)
+        self._router_ids = set()
+
+    def update(self, *args, **kwargs):
+        _, result = self.client.update_subnet(subnet=self.id, *args, **kwargs)
+        super(DeletableSubnet, self).update(**result['subnet'])
+
+    def add_to_router(self, router_id):
+        self._router_ids.add(router_id)
+        self.client.add_router_interface_with_subnet_id(router_id,
+                                                        subnet_id=self.id)
+
+    def delete(self):
+        for router_id in self._router_ids.copy():
+            self.client.remove_router_interface_with_subnet_id(
+                router_id,
+                subnet_id=self.id)
+            self._router_ids.remove(router_id)
+        self.client.delete_subnet(self.id)
+
+
+class DeletableRouter(DeletableResource):
+
+    def set_gateway(self, network_id):
+        return self.update(external_gateway_info=dict(network_id=network_id))
+
+    def unset_gateway(self):
+        return self.update(external_gateway_info=dict())
+
+    def update(self, *args, **kwargs):
+        _, result = self.client.update_router(self.id,
+                                              *args,
+                                              **kwargs)
+        return super(DeletableRouter, self).update(**result['router'])
+
+    def delete(self):
+        self.unset_gateway()
+        self.client.delete_router(self.id)
+
+
+class DeletableFloatingIp(DeletableResource):
+
+    def update(self, *args, **kwargs):
+        _, result = self.client.update_floatingip(self.id,
+                                                  *args,
+                                                  **kwargs)
+        super(DeletableFloatingIp, self).update(**result['floatingip'])
+
+    def __repr__(self):
+        return '<%s addr="%s">' % (self.__class__.__name__,
+                                   self.floating_ip_address)
+
+    def __str__(self):
+        return '<"FloatingIP" addr="%s" id="%s">' % (self.floating_ip_address,
+                                                     self.id)
+
+    def delete(self):
+        self.client.delete_floatingip(self.id)
+
+
+class DeletablePort(DeletableResource):
+
+    def delete(self):
+        self.client.delete_port(self.id)
+
+
+class DeletableSecurityGroup(DeletableResource):
+
+    def delete(self):
+        self.client.delete_security_group(self.id)
+
+
+class DeletableSecurityGroupRule(DeletableResource):
+
+    def __repr__(self):
+        return '<%s id="%s">' % (self.__class__.__name__, self.id)
+
+    def delete(self):
+        self.client.delete_security_group_rule(self.id)
+
+
+class DeletablePool(DeletableResource):
+
+    def delete(self):
+        self.client.delete_pool(self.id)
+
+
+class DeletableMember(DeletableResource):
+
+    def delete(self):
+        self.client.delete_member(self.id)
+
+
+class DeletableVip(DeletableResource):
+
+    def delete(self):
+        self.client.delete_vip(self.id)
diff --git a/tempest/services/object_storage/account_client.py b/tempest/services/object_storage/account_client.py
index be0f888..eca57c0 100644
--- a/tempest/services/object_storage/account_client.py
+++ b/tempest/services/object_storage/account_client.py
@@ -32,11 +32,15 @@
 
     def create_account(self, data=None,
                        params=None,
-                       metadata={},
-                       remove_metadata={},
+                       metadata=None,
+                       remove_metadata=None,
                        metadata_prefix='X-Account-Meta-',
                        remove_metadata_prefix='X-Remove-Account-Meta-'):
         """Create an account."""
+        if metadata is None:
+            metadata = {}
+        if remove_metadata is None:
+            remove_metadata = {}
         url = ''
         if params:
             url += '?%s' % urllib.urlencode(params)
diff --git a/tempest/services/orchestration/json/orchestration_client.py b/tempest/services/orchestration/json/orchestration_client.py
index dd166dd..d3867cd 100644
--- a/tempest/services/orchestration/json/orchestration_client.py
+++ b/tempest/services/orchestration/json/orchestration_client.py
@@ -45,9 +45,11 @@
         body = json.loads(body)
         return resp, body['stacks']
 
-    def create_stack(self, name, disable_rollback=True, parameters={},
+    def create_stack(self, name, disable_rollback=True, parameters=None,
                      timeout_mins=60, template=None, template_url=None,
                      environment=None, files=None):
+        if parameters is None:
+            parameters = {}
         headers, body = self._prepare_update_create(
             name,
             disable_rollback,
@@ -63,8 +65,10 @@
         return resp, body
 
     def update_stack(self, stack_identifier, name, disable_rollback=True,
-                     parameters={}, timeout_mins=60, template=None,
+                     parameters=None, timeout_mins=60, template=None,
                      template_url=None, environment=None, files=None):
+        if parameters is None:
+            parameters = {}
         headers, body = self._prepare_update_create(
             name,
             disable_rollback,
@@ -80,9 +84,11 @@
         return resp, body
 
     def _prepare_update_create(self, name, disable_rollback=True,
-                               parameters={}, timeout_mins=60,
+                               parameters=None, timeout_mins=60,
                                template=None, template_url=None,
                                environment=None, files=None):
+        if parameters is None:
+            parameters = {}
         post_body = {
             "stack_name": name,
             "disable_rollback": disable_rollback,
@@ -264,22 +270,47 @@
         body = json.loads(body)
         return resp, body
 
-    def validate_template(self, template, parameters={}):
+    def validate_template(self, template, parameters=None):
         """Returns the validation result for a template with parameters."""
+        if parameters is None:
+            parameters = {}
         post_body = {
             'template': template,
             'parameters': parameters,
         }
         return self._validate_template(post_body)
 
-    def validate_template_url(self, template_url, parameters={}):
+    def validate_template_url(self, template_url, parameters=None):
         """Returns the validation result for a template with parameters."""
+        if parameters is None:
+            parameters = {}
         post_body = {
             'template_url': template_url,
             'parameters': parameters,
         }
         return self._validate_template(post_body)
 
+    def list_resource_types(self):
+        """List resource types."""
+        resp, body = self.get('resource_types')
+        self.expected_success(200, resp.status)
+        body = json.loads(body)
+        return body['resource_types']
+
+    def get_resource_type(self, resource_type_name):
+        """Return the schema of a resource type."""
+        url = 'resource_types/%s' % resource_type_name
+        resp, body = self.get(url)
+        self.expected_success(200, resp.status)
+        return json.loads(body)
+
+    def get_resource_type_template(self, resource_type_name):
+        """Return the template of a resource type."""
+        url = 'resource_types/%s/template' % resource_type_name
+        resp, body = self.get(url)
+        self.expected_success(200, resp.status)
+        return json.loads(body)
+
     def create_software_config(self, name=None, config=None, group=None,
                                inputs=None, outputs=None, options=None):
         headers, body = self._prep_software_config_create(
diff --git a/tempest/test.py b/tempest/test.py
index 68806c2..f34933e 100644
--- a/tempest/test.py
+++ b/tempest/test.py
@@ -70,7 +70,7 @@
     """A decorator used to wrap the setUpClass for cleaning up resources
        when setUpClass failed.
     """
-
+    @functools.wraps(f)
     def decorator(cls):
             try:
                 f(cls)
diff --git a/tempest/tests/test_hacking.py b/tempest/tests/test_hacking.py
index 52fdf7e..9c13013 100644
--- a/tempest/tests/test_hacking.py
+++ b/tempest/tests/test_hacking.py
@@ -107,3 +107,16 @@
         self.assertFalse(checks.no_official_client_manager_in_api_tests(
             "cls.official_client = clients.OfficialClientManager(credentials)",
             "tempest/scenario/fake_test.py"))
+
+    def test_no_mutable_default_args(self):
+        self.assertEqual(1, len(list(checks.no_mutable_default_args(
+            " def function1(para={}):"))))
+
+        self.assertEqual(1, len(list(checks.no_mutable_default_args(
+            "def function2(para1, para2, para3=[])"))))
+
+        self.assertEqual(0, len(list(checks.no_mutable_default_args(
+            "defined = []"))))
+
+        self.assertEqual(0, len(list(checks.no_mutable_default_args(
+            "defined, undefined = [], {}"))))
diff --git a/tools/check_logs.py b/tools/check_logs.py
index eab9f73..917aaaf 100755
--- a/tools/check_logs.py
+++ b/tools/check_logs.py
@@ -26,8 +26,9 @@
 import yaml
 
 
-is_grenade = (os.environ.get('DEVSTACK_GATE_GRENADE', "0") == "1" or
-              os.environ.get('DEVSTACK_GATE_GRENADE_FORWARD', "0") == "1")
+# DEVSTACK_GATE_GRENADE is either unset if grenade is not running
+# or a string describing what type of grenade run to perform.
+is_grenade = os.environ.get('DEVSTACK_GATE_GRENADE') is not None
 dump_all_errors = True
 
 # As logs are made clean, add to this set