Merge "Extend compute-manage cli tests"
diff --git a/cli/__init__.py b/cli/__init__.py
index bc967cc..6ffe229 100644
--- a/cli/__init__.py
+++ b/cli/__init__.py
@@ -19,9 +19,12 @@
 import shlex
 import subprocess
 
-from tempest.openstack.common import cfg
+from oslo.config import cfg
+
+import cli.output_parser
 import tempest.test
 
+
 LOG = logging.getLogger(__name__)
 
 cli_opts = [
@@ -49,6 +52,7 @@
         super(ClientTestBase, cls).setUpClass()
 
     def __init__(self, *args, **kwargs):
+        self.parser = cli.output_parser
         super(ClientTestBase, self).__init__(*args, **kwargs)
 
     def nova(self, action, flags='', params='', admin=True, fail_ok=False):
@@ -61,6 +65,11 @@
         return self.cmd(
             'nova-manage', action, flags, params, fail_ok)
 
+    def keystone(self, action, flags='', params='', admin=True, fail_ok=False):
+        """Executes keystone command for the given action."""
+        return self.cmd_with_auth(
+            'keystone', action, flags, params, admin, fail_ok)
+
     def cmd_with_auth(self, cmd, action, flags='', params='',
                       admin=True, fail_ok=False):
         """Executes given command with auth attributes appended."""
@@ -84,3 +93,9 @@
             LOG.error("command output:\n%s" % e.output)
             raise
         return result
+
+    def assertTableStruct(self, items, field_names):
+        """Verify that all items has keys listed in field_names."""
+        for item in items:
+            for field in field_names:
+                self.assertIn(field, item)
diff --git a/cli/output_parser.py b/cli/output_parser.py
new file mode 100644
index 0000000..840839b
--- /dev/null
+++ b/cli/output_parser.py
@@ -0,0 +1,168 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2013 OpenStack Foundation
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+"""Collection of utilities for parsing CLI clients output."""
+
+
+import logging
+import re
+
+
+LOG = logging.getLogger(__name__)
+
+
+delimiter_line = re.compile('^\+\-[\+\-]+\-\+$')
+
+
+def details_multiple(output_lines, with_label=False):
+    """Return list of dicts with item details from cli output tables.
+
+    If with_label is True, key '__label' is added to each items dict.
+    For more about 'label' see OutputParser.tables().
+    """
+    items = []
+    tables_ = tables(output_lines)
+    for table_ in tables_:
+        if 'Property' not in table_['headers'] \
+           or 'Value' not in table_['headers']:
+            raise Exception('Invalid structure of table with details')
+        item = {}
+        for value in table_['values']:
+            item[value[0]] = value[1]
+        if with_label:
+            item['__label'] = table_['label']
+        items.append(item)
+    return items
+
+
+def details(output_lines, with_label=False):
+    """Return dict with details of first item (table) found in output."""
+    items = details_multiple(output_lines, with_label)
+    return items[0]
+
+
+def listing(output_lines):
+    """Return list of dicts with basic item info parsed from cli output.
+    """
+
+    items = []
+    table_ = table(output_lines)
+    for row in table_['values']:
+        item = {}
+        for col_idx, col_key in enumerate(table_['headers']):
+            item[col_key] = row[col_idx]
+        items.append(item)
+    return items
+
+
+def tables(output_lines):
+    """Find all ascii-tables in output and parse them.
+
+    Return list of tables parsed from cli output as dicts.
+    (see OutputParser.table())
+
+    And, if found, label key (separated line preceding the table)
+    is added to each tables dict.
+    """
+    tables_ = []
+
+    table_ = []
+    label = None
+
+    start = False
+    header = False
+
+    if not isinstance(output_lines, list):
+        output_lines = output_lines.split('\n')
+
+    for line in output_lines:
+        if delimiter_line.match(line):
+            if not start:
+                start = True
+            elif not header:
+                # we are after head area
+                header = True
+            else:
+                # table ends here
+                start = header = None
+                table_.append(line)
+
+                parsed = table(table_)
+                parsed['label'] = label
+                tables_.append(parsed)
+
+                table_ = []
+                label = None
+                continue
+        if start:
+            table_.append(line)
+        else:
+            if label is None:
+                label = line
+            else:
+                LOG.warn('Invalid line between tables: %s' % line)
+    if len(table_) > 0:
+        LOG.warn('Missing end of table')
+
+    return tables_
+
+
+def table(output_lines):
+    """Parse single table from cli output.
+
+    Return dict with list of column names in 'headers' key and
+    rows in 'values' key.
+    """
+    table_ = {'headers': [], 'values': []}
+    columns = None
+
+    if not isinstance(output_lines, list):
+        output_lines = output_lines.split('\n')
+
+    for line in output_lines:
+        if delimiter_line.match(line):
+            columns = _table_columns(line)
+            continue
+        if '|' not in line:
+            LOG.warn('skipping invalid table line: %s' % line)
+            continue
+        row = []
+        for col in columns:
+            row.append(line[col[0]:col[1]].strip())
+        if table_['headers']:
+            table_['values'].append(row)
+        else:
+            table_['headers'] = row
+
+    return table_
+
+
+def _table_columns(first_table_row):
+    """Find column ranges in output line.
+
+    Return list of touples (start,end) for each column
+    detected by plus (+) characters in delimiter line.
+    """
+    positions = []
+    start = 1  # there is '+' at 0
+    while start < len(first_table_row):
+        end = first_table_row.find('+', start)
+        if end == -1:
+            break
+        positions.append((start, end))
+        start = end + 1
+    return positions
diff --git a/cli/simple_read_only/test_compute.py b/cli/simple_read_only/test_compute.py
index 38e8944..43c3c45 100644
--- a/cli/simple_read_only/test_compute.py
+++ b/cli/simple_read_only/test_compute.py
@@ -18,11 +18,11 @@
 import logging
 import subprocess
 
+from oslo.config import cfg
 import testtools
 
 import cli
-
-from tempest.openstack.common import cfg
+from tempest import config
 
 
 CONF = cfg.CONF
@@ -73,7 +73,7 @@
     def test_admin_dns_domains(self):
         self.nova('dns-domains')
 
-    @testtools.skip("needs parameters")
+    @testtools.skip("Test needs parameters, Bug: 1157349")
     def test_admin_dns_list(self):
         self.nova('dns-list')
 
@@ -111,7 +111,7 @@
     def test_admin_image_list(self):
         self.nova('image-list')
 
-    @testtools.skip("needs parameters")
+    @testtools.skip("Test needs parameters, Bug: 1157349")
     def test_admin_interface_list(self):
         self.nova('interface-list')
 
@@ -136,7 +136,7 @@
     def test_admin_secgroup_list(self):
         self.nova('secgroup-list')
 
-    @testtools.skip("needs parameters")
+    @testtools.skip("Test needs parameters, Bug: 1157349")
     def test_admin_secgroup_list_rules(self):
         self.nova('secgroup-list-rules')
 
diff --git a/cli/simple_read_only/test_keystone.py b/cli/simple_read_only/test_keystone.py
new file mode 100644
index 0000000..4b14c3c
--- /dev/null
+++ b/cli/simple_read_only/test_keystone.py
@@ -0,0 +1,109 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2013 OpenStack Foundation
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import logging
+import re
+import subprocess
+
+import cli
+
+
+LOG = logging.getLogger(__name__)
+
+
+class SimpleReadOnlyKeystoneClientTest(cli.ClientTestBase):
+    """Basic, read-only tests for Keystone CLI client.
+
+    Checks return values and output of read-only commands.
+    These tests do not presume any content, nor do they create
+    their own. They only verify the structure of output if present.
+    """
+
+    def test_admin_fake_action(self):
+        self.assertRaises(subprocess.CalledProcessError,
+                          self.keystone,
+                          'this-does-not-exist')
+
+    def test_admin_catalog_list(self):
+        out = self.keystone('catalog')
+        catalog = self.parser.details_multiple(out, with_label=True)
+        for svc in catalog:
+            self.assertTrue(svc['__label'].startswith('Service:'))
+
+    def test_admin_endpoint_list(self):
+        out = self.keystone('endpoint-list')
+        endpoints = self.parser.listing(out)
+        self.assertTableStruct(endpoints, [
+            'id', 'region', 'publicurl', 'internalurl',
+            'adminurl', 'service_id'])
+
+    def test_admin_endpoint_service_match(self):
+        endpoints = self.parser.listing(self.keystone('endpoint-list'))
+        services = self.parser.listing(self.keystone('service-list'))
+        svc_by_id = {}
+        for svc in services:
+            svc_by_id[svc['id']] = svc
+        for endpoint in endpoints:
+            self.assertIn(endpoint['service_id'], svc_by_id)
+
+    def test_admin_role_list(self):
+        roles = self.parser.listing(self.keystone('role-list'))
+        self.assertTableStruct(roles, ['id', 'name'])
+
+    def test_admin_service_list(self):
+        services = self.parser.listing(self.keystone('service-list'))
+        self.assertTableStruct(services, ['id', 'name', 'type', 'description'])
+
+    def test_admin_tenant_list(self):
+        tenants = self.parser.listing(self.keystone('tenant-list'))
+        self.assertTableStruct(tenants, ['id', 'name', 'enabled'])
+
+    def test_admin_user_list(self):
+        users = self.parser.listing(self.keystone('user-list'))
+        self.assertTableStruct(users, [
+            'id', 'name', 'enabled', 'email'])
+
+    def test_admin_user_role_list(self):
+        user_roles = self.parser.listing(self.keystone('user-role-list'))
+        self.assertTableStruct(user_roles, [
+            'id', 'name', 'user_id', 'tenant_id'])
+
+    def test_admin_discover(self):
+        discovered = self.keystone('discover')
+        self.assertIn('Keystone found at http', discovered)
+        self.assertIn('supports version', discovered)
+
+    def test_admin_help(self):
+        help_text = self.keystone('help')
+        lines = help_text.split('\n')
+        self.assertTrue(lines[0].startswith('usage: keystone'))
+
+        commands = []
+        cmds_start = lines.index('Positional arguments:')
+        cmds_end = lines.index('Optional arguments:')
+        command_pattern = re.compile('^ {4}([a-z0-9\-\_]+)')
+        for line in lines[cmds_start:cmds_end]:
+            match = command_pattern.match(line)
+            if match:
+                commands.append(match.group(1))
+        commands = set(commands)
+        wanted_commands = set(('catalog', 'endpoint-list', 'help',
+                               'token-get', 'discover', 'bootstrap'))
+        self.assertFalse(wanted_commands - commands)
+
+    def test_admin_bashcompletion(self):
+        self.keystone('bash-completion')
diff --git a/etc/tempest.conf.sample b/etc/tempest.conf.sample
index a70a7ab..02bfdcb 100644
--- a/etc/tempest.conf.sample
+++ b/etc/tempest.conf.sample
@@ -193,6 +193,9 @@
 # for each tenant to have their own router.
 public_router_id = {$PUBLIC_ROUTER_ID}
 
+# Whether or not quantum is expected to be available
+quantum_available = false
+
 [volume]
 # This section contains the configuration options used when executing tests
 # against the OpenStack Block Storage API service
diff --git a/openstack-common.conf b/openstack-common.conf
index 0c9e43e..501328c 100644
--- a/openstack-common.conf
+++ b/openstack-common.conf
@@ -1,7 +1,7 @@
 [DEFAULT]
 
 # The list of modules to copy from openstack-common
-modules=setup,cfg,iniparser,install_venv_common
+modules=setup,install_venv_common
 
 # The base module to hold the copy of openstack.common
 base=tempest
diff --git a/tempest/clients.py b/tempest/clients.py
index ef07d9c..b3b5906 100644
--- a/tempest/clients.py
+++ b/tempest/clients.py
@@ -52,7 +52,8 @@
 from tempest.services.identity.json.identity_client import TokenClientJSON
 from tempest.services.identity.xml.identity_client import IdentityClientXML
 from tempest.services.identity.xml.identity_client import TokenClientXML
-from tempest.services.image.json.image_client import ImageClientJSON
+from tempest.services.image.v1.json.image_client import ImageClientJSON
+from tempest.services.image.v2.json.image_client import ImageClientV2JSON
 from tempest.services.network.json.network_client import NetworkClient
 from tempest.services.object_storage.account_client import AccountClient
 from tempest.services.object_storage.account_client import \
@@ -69,6 +70,10 @@
     VolumeTypesClientXML
 from tempest.services.volume.xml.snapshots_client import SnapshotsClientXML
 from tempest.services.volume.xml.volumes_client import VolumesClientXML
+from tempest.services.compute.json.interfaces_client import \
+    InterfacesClientJSON
+from tempest.services.compute.xml.interfaces_client import \
+    InterfacesClientXML
 
 LOG = logging.getLogger(__name__)
 
@@ -147,6 +152,11 @@
     "xml": SecurityGroupsClientXML,
 }
 
+INTERFACES_CLIENT = {
+    "json": InterfacesClientJSON,
+    "xml": InterfacesClientXML,
+}
+
 
 class Manager(object):
 
@@ -208,6 +218,7 @@
             self.token_client = TOKEN_CLIENT[interface](self.config)
             self.security_groups_client = \
                 SECURITY_GROUPS_CLIENT[interface](*client_args)
+            self.interfaces_client = INTERFACES_CLIENT[interface](*client_args)
         except KeyError:
             msg = "Unsupported interface type `%s'" % interface
             raise exceptions.InvalidConfiguration(msg)
@@ -215,6 +226,7 @@
         self.hosts_client = HostsClientJSON(*client_args)
         self.account_client = AccountClient(*client_args)
         self.image_client = ImageClientJSON(*client_args)
+        self.image_client_v2 = ImageClientV2JSON(*client_args)
         self.container_client = ContainerClient(*client_args)
         self.object_client = ObjectClient(*client_args)
         self.ec2api_client = botoclients.APIClientEC2(*client_args)
diff --git a/tempest/common/glance_http.py b/tempest/common/glance_http.py
index 36a9abd..0902239 100644
--- a/tempest/common/glance_http.py
+++ b/tempest/common/glance_http.py
@@ -18,6 +18,7 @@
 import copy
 import hashlib
 import httplib
+import json
 import logging
 import posixpath
 import re
@@ -26,10 +27,6 @@
 import struct
 import urlparse
 
-try:
-    import json
-except ImportError:
-    import simplejson as json
 
 # Python 2.5 compat fix
 if not hasattr(urlparse, 'parse_qsl'):
@@ -129,11 +126,11 @@
             resp = conn.getresponse()
         except socket.gaierror as e:
             message = "Error finding address for %(url)s: %(e)s" % locals()
-            raise exc.EndpointNotFound
+            raise exc.EndpointNotFound(message)
         except (socket.error, socket.timeout) as e:
             endpoint = self.endpoint
             message = "Error communicating with %(endpoint)s %(e)s" % locals()
-            raise exc.TimeoutException
+            raise exc.TimeoutException(message)
 
         body_iter = ResponseBodyIterator(resp)
 
diff --git a/tempest/common/rest_client.py b/tempest/common/rest_client.py
index c582826..d68b9ed 100644
--- a/tempest/common/rest_client.py
+++ b/tempest/common/rest_client.py
@@ -15,6 +15,7 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
+import collections
 import hashlib
 import httplib2
 import json
@@ -60,6 +61,8 @@
                                        'location', 'proxy-authenticate',
                                        'retry-after', 'server',
                                        'vary', 'www-authenticate'))
+        dscv = self.config.identity.disable_ssl_certificate_validation
+        self.http_obj = httplib2.Http(disable_ssl_certificate_validation=dscv)
 
     def _set_auth(self):
         """
@@ -105,8 +108,6 @@
         params['headers'] = {'User-Agent': 'Test-Client', 'X-Auth-User': user,
                              'X-Auth-Key': password}
 
-        dscv = self.config.identity.disable_ssl_certificate_validation
-        self.http_obj = httplib2.Http(disable_ssl_certificate_validation=dscv)
         resp, body = self.http_obj.request(auth_url, 'GET', **params)
         try:
             return resp['x-auth-token'], resp['x-server-management-url']
@@ -132,8 +133,6 @@
             }
         }
 
-        dscv = self.config.identity.disable_ssl_certificate_validation
-        self.http_obj = httplib2.Http(disable_ssl_certificate_validation=dscv)
         headers = {'Content-Type': 'application/json'}
         body = json.dumps(creds)
         resp, body = self.http_obj.request(auth_url, 'POST',
@@ -177,8 +176,8 @@
     def post(self, url, body, headers):
         return self.request('POST', url, headers, body)
 
-    def get(self, url, headers=None, wait=None):
-        return self.request('GET', url, headers, wait=wait)
+    def get(self, url, headers=None):
+        return self.request('GET', url, headers)
 
     def delete(self, url, headers=None):
         return self.request('DELETE', url, headers)
@@ -192,6 +191,13 @@
     def copy(self, url, headers=None):
         return self.request('COPY', url, headers)
 
+    def get_versions(self):
+        resp, body = self.get('')
+        body = self._parse_resp(body)
+        body = body['versions']
+        versions = map(lambda x: x['id'], body)
+        return resp, versions
+
     def _log_request(self, method, req_url, headers, body):
         self.LOG.info('Request: ' + method + ' ' + req_url)
         if headers:
@@ -252,23 +258,14 @@
         # Usually RFC2616 says error responses SHOULD contain an explanation.
         # The warning is normal for SHOULD/SHOULD NOT case
 
-        # Likely it will cause error
-        if not body and resp.status >= 400:
+        # Likely it will cause an error
+        if not resp_body and resp.status >= 400:
             self.LOG.warning("status >= 400 response with empty body")
 
-    def request(self, method, url,
-                headers=None, body=None, depth=0, wait=None):
+    def _request(self, method, url,
+                 headers=None, body=None):
         """A simple HTTP request interface."""
 
-        if (self.token is None) or (self.base_url is None):
-            self._set_auth()
-
-        dscv = self.config.identity.disable_ssl_certificate_validation
-        self.http_obj = httplib2.Http(disable_ssl_certificate_validation=dscv)
-        if headers is None:
-            headers = {}
-        headers['X-Auth-Token'] = self.token
-
         req_url = "%s/%s" % (self.base_url, url)
         self._log_request(method, req_url, headers, body)
         resp, resp_body = self.http_obj.request(req_url, method,
@@ -276,13 +273,37 @@
         self._log_response(resp, resp_body)
         self.response_checker(method, url, headers, body, resp, resp_body)
 
-        self._error_checker(method, url, headers, body, resp, resp_body, depth,
-                            wait)
+        return resp, resp_body
 
+    def request(self, method, url,
+                headers=None, body=None):
+        retry = 0
+        if (self.token is None) or (self.base_url is None):
+            self._set_auth()
+
+        if headers is None:
+            headers = {}
+        headers['X-Auth-Token'] = self.token
+
+        resp, resp_body = self._request(method, url,
+                                        headers=headers, body=body)
+
+        while (resp.status == 413 and
+               'retry-after' in resp and
+                not self.is_absolute_limit(
+                    resp, self._parse_resp(resp_body)) and
+                retry < MAX_RECURSION_DEPTH):
+            retry += 1
+            delay = int(resp['retry-after'])
+            time.sleep(delay)
+            resp, resp_body = self._request(method, url,
+                                            headers=headers, body=body)
+        self._error_checker(method, url, headers, body,
+                            resp, resp_body)
         return resp, resp_body
 
     def _error_checker(self, method, url,
-                       headers, body, resp, resp_body, depth=0, wait=None):
+                       headers, body, resp, resp_body):
 
         # NOTE(mtreinish): Check for httplib response from glance_http. The
         # object can't be used here because importing httplib breaks httplib2.
@@ -334,9 +355,10 @@
         if resp.status == 413:
             if parse_resp:
                 resp_body = self._parse_resp(resp_body)
-            #Checking whether Absolute/Rate limit
-            return self.check_over_limit(resp_body, method, url, headers, body,
-                                         depth, wait)
+            if self.is_absolute_limit(resp, resp_body):
+                raise exceptions.OverLimit(resp_body)
+            else:
+                raise exceptions.RateLimitExceeded(resp_body)
 
         if resp.status == 422:
             if parse_resp:
@@ -366,35 +388,14 @@
                 resp_body = self._parse_resp(resp_body)
             raise exceptions.RestClientException(str(resp.status))
 
-    def check_over_limit(self, resp_body, method, url,
-                         headers, body, depth, wait):
-        self.is_absolute_limit(resp_body['overLimit'])
-        return self.is_rate_limit_retry_max_recursion_depth(
-            resp_body['overLimit'], method, url, headers,
-            body, depth, wait)
-
-    def is_absolute_limit(self, resp_body):
-        if 'exceeded' in resp_body['message']:
-            raise exceptions.OverLimit(resp_body['message'])
-        else:
-            return
-
-    def is_rate_limit_retry_max_recursion_depth(self, resp_body, method,
-                                                url, headers, body, depth,
-                                                wait):
-        if 'retryAfter' in resp_body:
-            if depth < MAX_RECURSION_DEPTH:
-                delay = resp_body['retryAfter']
-                time.sleep(int(delay))
-                return self.request(method, url, headers=headers,
-                                    body=body,
-                                    depth=depth + 1, wait=wait)
-            else:
-                raise exceptions.RateLimitExceeded(
-                    message=resp_body['overLimitFault']['message'],
-                    details=resp_body['overLimitFault']['details'])
-        else:
-            raise exceptions.OverLimit(resp_body['message'])
+    def is_absolute_limit(self, resp, resp_body):
+        if (not isinstance(resp_body, collections.Mapping) or
+            'retry-after' not in resp):
+            return True
+        over_limit = resp_body.get('overLimit', None)
+        if not over_limit:
+            return True
+        return 'exceed' in over_limit.get('message', 'blabla')
 
     def wait_for_resource_deletion(self, id):
         """Waits for a resource to be deleted."""
@@ -421,9 +422,8 @@
     def _parse_resp(self, body):
         return xml_to_json(etree.fromstring(body))
 
-    def check_over_limit(self, resp_body, method, url,
-                         headers, body, depth, wait):
-        self.is_absolute_limit(resp_body)
-        return self.is_rate_limit_retry_max_recursion_depth(
-            resp_body, method, url, headers,
-            body, depth, wait)
+    def is_absolute_limit(self, resp, resp_body):
+        if (not isinstance(resp_body, collections.Mapping) or
+            'retry-after' not in resp):
+            return True
+        return 'exceed' in resp_body.get('message', 'blabla')
diff --git a/tempest/common/ssh.py b/tempest/common/ssh.py
index 151060f..be6fe27 100644
--- a/tempest/common/ssh.py
+++ b/tempest/common/ssh.py
@@ -106,6 +106,7 @@
         ssh = self._get_ssh_connection()
         transport = ssh.get_transport()
         channel = transport.open_session()
+        channel.fileno()  # Register event pipe
         channel.exec_command(cmd)
         channel.shutdown_write()
         out_data = []
diff --git a/tempest/config.py b/tempest/config.py
index 856be16..3a4a8c9 100644
--- a/tempest/config.py
+++ b/tempest/config.py
@@ -19,8 +19,9 @@
 import os
 import sys
 
+from oslo.config import cfg
+
 from tempest.common.utils.misc import singleton
-from tempest.openstack.common import cfg
 
 LOG = logging.getLogger(__name__)
 
@@ -280,6 +281,9 @@
                default="",
                help="Id of the public router that provides external "
                     "connectivity"),
+    cfg.BoolOpt('quantum_available',
+                default=False,
+                help="Whether or not quantum is expected to be available"),
 ]
 
 
diff --git a/tempest/openstack/common/cfg.py b/tempest/openstack/common/cfg.py
deleted file mode 100644
index cae4ecc..0000000
--- a/tempest/openstack/common/cfg.py
+++ /dev/null
@@ -1,1749 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright 2012 Red Hat, Inc.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-r"""
-Configuration options which may be set on the command line or in config files.
-
-The schema for each option is defined using the Opt sub-classes, e.g.:
-
-::
-
-    common_opts = [
-        cfg.StrOpt('bind_host',
-                   default='0.0.0.0',
-                   help='IP address to listen on'),
-        cfg.IntOpt('bind_port',
-                   default=9292,
-                   help='Port number to listen on')
-    ]
-
-Options can be strings, integers, floats, booleans, lists or 'multi strings'::
-
-    enabled_apis_opt = cfg.ListOpt('enabled_apis',
-                                   default=['ec2', 'osapi_compute'],
-                                   help='List of APIs to enable by default')
-
-    DEFAULT_EXTENSIONS = [
-        'nova.api.openstack.compute.contrib.standard_extensions'
-    ]
-    osapi_compute_extension_opt = cfg.MultiStrOpt('osapi_compute_extension',
-                                                  default=DEFAULT_EXTENSIONS)
-
-Option schemas are registered with the config manager at runtime, but before
-the option is referenced::
-
-    class ExtensionManager(object):
-
-        enabled_apis_opt = cfg.ListOpt(...)
-
-        def __init__(self, conf):
-            self.conf = conf
-            self.conf.register_opt(enabled_apis_opt)
-            ...
-
-        def _load_extensions(self):
-            for ext_factory in self.conf.osapi_compute_extension:
-                ....
-
-A common usage pattern is for each option schema to be defined in the module or
-class which uses the option::
-
-    opts = ...
-
-    def add_common_opts(conf):
-        conf.register_opts(opts)
-
-    def get_bind_host(conf):
-        return conf.bind_host
-
-    def get_bind_port(conf):
-        return conf.bind_port
-
-An option may optionally be made available via the command line. Such options
-must registered with the config manager before the command line is parsed (for
-the purposes of --help and CLI arg validation)::
-
-    cli_opts = [
-        cfg.BoolOpt('verbose',
-                    short='v',
-                    default=False,
-                    help='Print more verbose output'),
-        cfg.BoolOpt('debug',
-                    short='d',
-                    default=False,
-                    help='Print debugging output'),
-    ]
-
-    def add_common_opts(conf):
-        conf.register_cli_opts(cli_opts)
-
-The config manager has two CLI options defined by default, --config-file
-and --config-dir::
-
-    class ConfigOpts(object):
-
-        def __call__(self, ...):
-
-            opts = [
-                MultiStrOpt('config-file',
-                        ...),
-                StrOpt('config-dir',
-                       ...),
-            ]
-
-            self.register_cli_opts(opts)
-
-Option values are parsed from any supplied config files using
-openstack.common.iniparser. If none are specified, a default set is used
-e.g. glance-api.conf and glance-common.conf::
-
-    glance-api.conf:
-      [DEFAULT]
-      bind_port = 9292
-
-    glance-common.conf:
-      [DEFAULT]
-      bind_host = 0.0.0.0
-
-Option values in config files override those on the command line. Config files
-are parsed in order, with values in later files overriding those in earlier
-files.
-
-The parsing of CLI args and config files is initiated by invoking the config
-manager e.g.::
-
-    conf = ConfigOpts()
-    conf.register_opt(BoolOpt('verbose', ...))
-    conf(sys.argv[1:])
-    if conf.verbose:
-        ...
-
-Options can be registered as belonging to a group::
-
-    rabbit_group = cfg.OptGroup(name='rabbit',
-                                title='RabbitMQ options')
-
-    rabbit_host_opt = cfg.StrOpt('host',
-                                 default='localhost',
-                                 help='IP/hostname to listen on'),
-    rabbit_port_opt = cfg.IntOpt('port',
-                                 default=5672,
-                                 help='Port number to listen on')
-
-    def register_rabbit_opts(conf):
-        conf.register_group(rabbit_group)
-        # options can be registered under a group in either of these ways:
-        conf.register_opt(rabbit_host_opt, group=rabbit_group)
-        conf.register_opt(rabbit_port_opt, group='rabbit')
-
-If it no group attributes are required other than the group name, the group
-need not be explicitly registered e.g.
-
-    def register_rabbit_opts(conf):
-        # The group will automatically be created, equivalent calling::
-        #   conf.register_group(OptGroup(name='rabbit'))
-        conf.register_opt(rabbit_port_opt, group='rabbit')
-
-If no group is specified, options belong to the 'DEFAULT' section of config
-files::
-
-    glance-api.conf:
-      [DEFAULT]
-      bind_port = 9292
-      ...
-
-      [rabbit]
-      host = localhost
-      port = 5672
-      use_ssl = False
-      userid = guest
-      password = guest
-      virtual_host = /
-
-Command-line options in a group are automatically prefixed with the
-group name::
-
-    --rabbit-host localhost --rabbit-port 9999
-
-Option values in the default group are referenced as attributes/properties on
-the config manager; groups are also attributes on the config manager, with
-attributes for each of the options associated with the group::
-
-    server.start(app, conf.bind_port, conf.bind_host, conf)
-
-    self.connection = kombu.connection.BrokerConnection(
-        hostname=conf.rabbit.host,
-        port=conf.rabbit.port,
-        ...)
-
-Option values may reference other values using PEP 292 string substitution::
-
-    opts = [
-        cfg.StrOpt('state_path',
-                   default=os.path.join(os.path.dirname(__file__), '../'),
-                   help='Top-level directory for maintaining nova state'),
-        cfg.StrOpt('sqlite_db',
-                   default='nova.sqlite',
-                   help='file name for sqlite'),
-        cfg.StrOpt('sql_connection',
-                   default='sqlite:///$state_path/$sqlite_db',
-                   help='connection string for sql database'),
-    ]
-
-Note that interpolation can be avoided by using '$$'.
-
-Options may be declared as required so that an error is raised if the user
-does not supply a value for the option.
-
-Options may be declared as secret so that their values are not leaked into
-log files::
-
-     opts = [
-        cfg.StrOpt('s3_store_access_key', secret=True),
-        cfg.StrOpt('s3_store_secret_key', secret=True),
-        ...
-     ]
-
-This module also contains a global instance of the ConfigOpts class
-in order to support a common usage pattern in OpenStack::
-
-    from tempest.openstack.common import cfg
-
-    opts = [
-        cfg.StrOpt('bind_host', default='0.0.0.0'),
-        cfg.IntOpt('bind_port', default=9292),
-    ]
-
-    CONF = cfg.CONF
-    CONF.register_opts(opts)
-
-    def start(server, app):
-        server.start(app, CONF.bind_port, CONF.bind_host)
-
-Positional command line arguments are supported via a 'positional' Opt
-constructor argument::
-
-    >>> conf = ConfigOpts()
-    >>> conf.register_cli_opt(MultiStrOpt('bar', positional=True))
-    True
-    >>> conf(['a', 'b'])
-    >>> conf.bar
-    ['a', 'b']
-
-It is also possible to use argparse "sub-parsers" to parse additional
-command line arguments using the SubCommandOpt class:
-
-    >>> def add_parsers(subparsers):
-    ...     list_action = subparsers.add_parser('list')
-    ...     list_action.add_argument('id')
-    ...
-    >>> conf = ConfigOpts()
-    >>> conf.register_cli_opt(SubCommandOpt('action', handler=add_parsers))
-    True
-    >>> conf(args=['list', '10'])
-    >>> conf.action.name, conf.action.id
-    ('list', '10')
-
-"""
-
-import argparse
-import collections
-import copy
-import functools
-import glob
-import os
-import string
-import sys
-
-from tempest.openstack.common import iniparser
-
-
-class Error(Exception):
-    """Base class for cfg exceptions."""
-
-    def __init__(self, msg=None):
-        self.msg = msg
-
-    def __str__(self):
-        return self.msg
-
-
-class ArgsAlreadyParsedError(Error):
-    """Raised if a CLI opt is registered after parsing."""
-
-    def __str__(self):
-        ret = "arguments already parsed"
-        if self.msg:
-            ret += ": " + self.msg
-        return ret
-
-
-class NoSuchOptError(Error, AttributeError):
-    """Raised if an opt which doesn't exist is referenced."""
-
-    def __init__(self, opt_name, group=None):
-        self.opt_name = opt_name
-        self.group = group
-
-    def __str__(self):
-        if self.group is None:
-            return "no such option: %s" % self.opt_name
-        else:
-            return "no such option in group %s: %s" % (self.group.name,
-                                                       self.opt_name)
-
-
-class NoSuchGroupError(Error):
-    """Raised if a group which doesn't exist is referenced."""
-
-    def __init__(self, group_name):
-        self.group_name = group_name
-
-    def __str__(self):
-        return "no such group: %s" % self.group_name
-
-
-class DuplicateOptError(Error):
-    """Raised if multiple opts with the same name are registered."""
-
-    def __init__(self, opt_name):
-        self.opt_name = opt_name
-
-    def __str__(self):
-        return "duplicate option: %s" % self.opt_name
-
-
-class RequiredOptError(Error):
-    """Raised if an option is required but no value is supplied by the user."""
-
-    def __init__(self, opt_name, group=None):
-        self.opt_name = opt_name
-        self.group = group
-
-    def __str__(self):
-        if self.group is None:
-            return "value required for option: %s" % self.opt_name
-        else:
-            return "value required for option: %s.%s" % (self.group.name,
-                                                         self.opt_name)
-
-
-class TemplateSubstitutionError(Error):
-    """Raised if an error occurs substituting a variable in an opt value."""
-
-    def __str__(self):
-        return "template substitution error: %s" % self.msg
-
-
-class ConfigFilesNotFoundError(Error):
-    """Raised if one or more config files are not found."""
-
-    def __init__(self, config_files):
-        self.config_files = config_files
-
-    def __str__(self):
-        return ('Failed to read some config files: %s' %
-                string.join(self.config_files, ','))
-
-
-class ConfigFileParseError(Error):
-    """Raised if there is an error parsing a config file."""
-
-    def __init__(self, config_file, msg):
-        self.config_file = config_file
-        self.msg = msg
-
-    def __str__(self):
-        return 'Failed to parse %s: %s' % (self.config_file, self.msg)
-
-
-class ConfigFileValueError(Error):
-    """Raised if a config file value does not match its opt type."""
-    pass
-
-
-def _fixpath(p):
-    """Apply tilde expansion and absolutization to a path."""
-    return os.path.abspath(os.path.expanduser(p))
-
-
-def _get_config_dirs(project=None):
-    """Return a list of directors where config files may be located.
-
-    :param project: an optional project name
-
-    If a project is specified, following directories are returned::
-
-      ~/.${project}/
-      ~/
-      /etc/${project}/
-      /etc/
-
-    Otherwise, these directories::
-
-      ~/
-      /etc/
-    """
-    cfg_dirs = [
-        _fixpath(os.path.join('~', '.' + project)) if project else None,
-        _fixpath('~'),
-        os.path.join('/etc', project) if project else None,
-        '/etc'
-    ]
-
-    return filter(bool, cfg_dirs)
-
-
-def _search_dirs(dirs, basename, extension=""):
-    """Search a list of directories for a given filename.
-
-    Iterator over the supplied directories, returning the first file
-    found with the supplied name and extension.
-
-    :param dirs: a list of directories
-    :param basename: the filename, e.g. 'glance-api'
-    :param extension: the file extension, e.g. '.conf'
-    :returns: the path to a matching file, or None
-    """
-    for d in dirs:
-        path = os.path.join(d, '%s%s' % (basename, extension))
-        if os.path.exists(path):
-            return path
-
-
-def find_config_files(project=None, prog=None, extension='.conf'):
-    """Return a list of default configuration files.
-
-    :param project: an optional project name
-    :param prog: the program name, defaulting to the basename of sys.argv[0]
-    :param extension: the type of the config file
-
-    We default to two config files: [${project}.conf, ${prog}.conf]
-
-    And we look for those config files in the following directories::
-
-      ~/.${project}/
-      ~/
-      /etc/${project}/
-      /etc/
-
-    We return an absolute path for (at most) one of each the default config
-    files, for the topmost directory it exists in.
-
-    For example, if project=foo, prog=bar and /etc/foo/foo.conf, /etc/bar.conf
-    and ~/.foo/bar.conf all exist, then we return ['/etc/foo/foo.conf',
-    '~/.foo/bar.conf']
-
-    If no project name is supplied, we only look for ${prog.conf}.
-    """
-    if prog is None:
-        prog = os.path.basename(sys.argv[0])
-
-    cfg_dirs = _get_config_dirs(project)
-
-    config_files = []
-    if project:
-        config_files.append(_search_dirs(cfg_dirs, project, extension))
-    config_files.append(_search_dirs(cfg_dirs, prog, extension))
-
-    return filter(bool, config_files)
-
-
-def _is_opt_registered(opts, opt):
-    """Check whether an opt with the same name is already registered.
-
-    The same opt may be registered multiple times, with only the first
-    registration having any effect. However, it is an error to attempt
-    to register a different opt with the same name.
-
-    :param opts: the set of opts already registered
-    :param opt: the opt to be registered
-    :returns: True if the opt was previously registered, False otherwise
-    :raises: DuplicateOptError if a naming conflict is detected
-    """
-    if opt.dest in opts:
-        if opts[opt.dest]['opt'] != opt:
-            raise DuplicateOptError(opt.name)
-        return True
-    else:
-        return False
-
-
-def set_defaults(opts, **kwargs):
-    for opt in opts:
-        if opt.dest in kwargs:
-            opt.default = kwargs[opt.dest]
-            break
-
-
-class Opt(object):
-
-    """Base class for all configuration options.
-
-    An Opt object has no public methods, but has a number of public string
-    properties:
-
-      name:
-        the name of the option, which may include hyphens
-      dest:
-        the (hyphen-less) ConfigOpts property which contains the option value
-      short:
-        a single character CLI option name
-      default:
-        the default value of the option
-      positional:
-        True if the option is a positional CLI argument
-      metavar:
-        the name shown as the argument to a CLI option in --help output
-      help:
-        an string explaining how the options value is used
-    """
-    multi = False
-
-    def __init__(self, name, dest=None, short=None, default=None,
-                 positional=False, metavar=None, help=None,
-                 secret=False, required=False, deprecated_name=None):
-        """Construct an Opt object.
-
-        The only required parameter is the option's name. However, it is
-        common to also supply a default and help string for all options.
-
-        :param name: the option's name
-        :param dest: the name of the corresponding ConfigOpts property
-        :param short: a single character CLI option name
-        :param default: the default value of the option
-        :param positional: True if the option is a positional CLI argument
-        :param metavar: the option argument to show in --help
-        :param help: an explanation of how the option is used
-        :param secret: true iff the value should be obfuscated in log output
-        :param required: true iff a value must be supplied for this option
-        :param deprecated_name: deprecated name option.  Acts like an alias
-        """
-        self.name = name
-        if dest is None:
-            self.dest = self.name.replace('-', '_')
-        else:
-            self.dest = dest
-        self.short = short
-        self.default = default
-        self.positional = positional
-        self.metavar = metavar
-        self.help = help
-        self.secret = secret
-        self.required = required
-        if deprecated_name is not None:
-            self.deprecated_name = deprecated_name.replace('-', '_')
-        else:
-            self.deprecated_name = None
-
-    def __ne__(self, another):
-        return vars(self) != vars(another)
-
-    def _get_from_config_parser(self, cparser, section):
-        """Retrieves the option value from a MultiConfigParser object.
-
-        This is the method ConfigOpts uses to look up the option value from
-        config files. Most opt types override this method in order to perform
-        type appropriate conversion of the returned value.
-
-        :param cparser: a ConfigParser object
-        :param section: a section name
-        """
-        return self._cparser_get_with_deprecated(cparser, section)
-
-    def _cparser_get_with_deprecated(self, cparser, section):
-        """If cannot find option as dest try deprecated_name alias."""
-        if self.deprecated_name is not None:
-            return cparser.get(section, [self.dest, self.deprecated_name])
-        return cparser.get(section, [self.dest])
-
-    def _add_to_cli(self, parser, group=None):
-        """Makes the option available in the command line interface.
-
-        This is the method ConfigOpts uses to add the opt to the CLI interface
-        as appropriate for the opt type. Some opt types may extend this method,
-        others may just extend the helper methods it uses.
-
-        :param parser: the CLI option parser
-        :param group: an optional OptGroup object
-        """
-        container = self._get_argparse_container(parser, group)
-        kwargs = self._get_argparse_kwargs(group)
-        prefix = self._get_argparse_prefix('', group)
-        self._add_to_argparse(container, self.name, self.short, kwargs, prefix,
-                              self.positional, self.deprecated_name)
-
-    def _add_to_argparse(self, container, name, short, kwargs, prefix='',
-                         positional=False, deprecated_name=None):
-        """Add an option to an argparse parser or group.
-
-        :param container: an argparse._ArgumentGroup object
-        :param name: the opt name
-        :param short: the short opt name
-        :param kwargs: the keyword arguments for add_argument()
-        :param prefix: an optional prefix to prepend to the opt name
-        :param position: whether the optional is a positional CLI argument
-        :raises: DuplicateOptError if a naming confict is detected
-        """
-        def hyphen(arg):
-            return arg if not positional else ''
-
-        args = [hyphen('--') + prefix + name]
-        if short:
-            args.append(hyphen('-') + short)
-        if deprecated_name:
-            args.append(hyphen('--') + prefix + deprecated_name)
-
-        try:
-            container.add_argument(*args, **kwargs)
-        except argparse.ArgumentError as e:
-            raise DuplicateOptError(e)
-
-    def _get_argparse_container(self, parser, group):
-        """Returns an argparse._ArgumentGroup.
-
-        :param parser: an argparse.ArgumentParser
-        :param group: an (optional) OptGroup object
-        :returns: an argparse._ArgumentGroup if group is given, else parser
-        """
-        if group is not None:
-            return group._get_argparse_group(parser)
-        else:
-            return parser
-
-    def _get_argparse_kwargs(self, group, **kwargs):
-        """Build a dict of keyword arguments for argparse's add_argument().
-
-        Most opt types extend this method to customize the behaviour of the
-        options added to argparse.
-
-        :param group: an optional group
-        :param kwargs: optional keyword arguments to add to
-        :returns: a dict of keyword arguments
-        """
-        if not self.positional:
-            dest = self.dest
-            if group is not None:
-                dest = group.name + '_' + dest
-            kwargs['dest'] = dest
-        else:
-            kwargs['nargs'] = '?'
-        kwargs.update({'default': None,
-                       'metavar': self.metavar,
-                       'help': self.help, })
-        return kwargs
-
-    def _get_argparse_prefix(self, prefix, group):
-        """Build a prefix for the CLI option name, if required.
-
-        CLI options in a group are prefixed with the group's name in order
-        to avoid conflicts between similarly named options in different
-        groups.
-
-        :param prefix: an existing prefix to append to (e.g. 'no' or '')
-        :param group: an optional OptGroup object
-        :returns: a CLI option prefix including the group name, if appropriate
-        """
-        if group is not None:
-            return group.name + '-' + prefix
-        else:
-            return prefix
-
-
-class StrOpt(Opt):
-    """
-    String opts do not have their values transformed and are returned as
-    str objects.
-    """
-    pass
-
-
-class BoolOpt(Opt):
-
-    """
-    Bool opts are set to True or False on the command line using --optname or
-    --noopttname respectively.
-
-    In config files, boolean values are case insensitive and can be set using
-    1/0, yes/no, true/false or on/off.
-    """
-
-    _boolean_states = {'1': True, 'yes': True, 'true': True, 'on': True,
-                       '0': False, 'no': False, 'false': False, 'off': False}
-
-    def __init__(self, *args, **kwargs):
-        if 'positional' in kwargs:
-            raise ValueError('positional boolean args not supported')
-        super(BoolOpt, self).__init__(*args, **kwargs)
-
-    def _get_from_config_parser(self, cparser, section):
-        """Retrieve the opt value as a boolean from ConfigParser."""
-        def convert_bool(v):
-            value = self._boolean_states.get(v.lower())
-            if value is None:
-                raise ValueError('Unexpected boolean value %r' % v)
-
-            return value
-
-        return [convert_bool(v) for v in
-                self._cparser_get_with_deprecated(cparser, section)]
-
-    def _add_to_cli(self, parser, group=None):
-        """Extends the base class method to add the --nooptname option."""
-        super(BoolOpt, self)._add_to_cli(parser, group)
-        self._add_inverse_to_argparse(parser, group)
-
-    def _add_inverse_to_argparse(self, parser, group):
-        """Add the --nooptname option to the option parser."""
-        container = self._get_argparse_container(parser, group)
-        kwargs = self._get_argparse_kwargs(group, action='store_false')
-        prefix = self._get_argparse_prefix('no', group)
-        kwargs["help"] = "The inverse of --" + self.name
-        self._add_to_argparse(container, self.name, None, kwargs, prefix,
-                              self.positional, self.deprecated_name)
-
-    def _get_argparse_kwargs(self, group, action='store_true', **kwargs):
-        """Extends the base argparse keyword dict for boolean options."""
-
-        kwargs = super(BoolOpt, self)._get_argparse_kwargs(group, **kwargs)
-
-        # metavar has no effect for BoolOpt
-        if 'metavar' in kwargs:
-            del kwargs['metavar']
-
-        if action != 'store_true':
-            action = 'store_false'
-
-        kwargs['action'] = action
-
-        return kwargs
-
-
-class IntOpt(Opt):
-
-    """Int opt values are converted to integers using the int() builtin."""
-
-    def _get_from_config_parser(self, cparser, section):
-        """Retrieve the opt value as a integer from ConfigParser."""
-        return [int(v) for v in self._cparser_get_with_deprecated(cparser,
-                section)]
-
-    def _get_argparse_kwargs(self, group, **kwargs):
-        """Extends the base argparse keyword dict for integer options."""
-        return super(IntOpt,
-                     self)._get_argparse_kwargs(group, type=int, **kwargs)
-
-
-class FloatOpt(Opt):
-
-    """Float opt values are converted to floats using the float() builtin."""
-
-    def _get_from_config_parser(self, cparser, section):
-        """Retrieve the opt value as a float from ConfigParser."""
-        return [float(v) for v in
-                self._cparser_get_with_deprecated(cparser, section)]
-
-    def _get_argparse_kwargs(self, group, **kwargs):
-        """Extends the base argparse keyword dict for float options."""
-        return super(FloatOpt, self)._get_argparse_kwargs(group,
-                                                          type=float, **kwargs)
-
-
-class ListOpt(Opt):
-
-    """
-    List opt values are simple string values separated by commas. The opt value
-    is a list containing these strings.
-    """
-
-    class _StoreListAction(argparse.Action):
-        """
-        An argparse action for parsing an option value into a list.
-        """
-        def __call__(self, parser, namespace, values, option_string=None):
-            if values is not None:
-                values = [a.strip() for a in values.split(',')]
-            setattr(namespace, self.dest, values)
-
-    def _get_from_config_parser(self, cparser, section):
-        """Retrieve the opt value as a list from ConfigParser."""
-        return [[a.strip() for a in v.split(',')] for v in
-                self._cparser_get_with_deprecated(cparser, section)]
-
-    def _get_argparse_kwargs(self, group, **kwargs):
-        """Extends the base argparse keyword dict for list options."""
-        return Opt._get_argparse_kwargs(self,
-                                        group,
-                                        action=ListOpt._StoreListAction,
-                                        **kwargs)
-
-
-class MultiStrOpt(Opt):
-
-    """
-    Multistr opt values are string opts which may be specified multiple times.
-    The opt value is a list containing all the string values specified.
-    """
-    multi = True
-
-    def _get_argparse_kwargs(self, group, **kwargs):
-        """Extends the base argparse keyword dict for multi str options."""
-        kwargs = super(MultiStrOpt, self)._get_argparse_kwargs(group)
-        if not self.positional:
-            kwargs['action'] = 'append'
-        else:
-            kwargs['nargs'] = '*'
-        return kwargs
-
-    def _cparser_get_with_deprecated(self, cparser, section):
-        """If cannot find option as dest try deprecated_name alias."""
-        if self.deprecated_name is not None:
-            return cparser.get(section, [self.dest, self.deprecated_name],
-                               multi=True)
-        return cparser.get(section, [self.dest], multi=True)
-
-
-class SubCommandOpt(Opt):
-
-    """
-    Sub-command options allow argparse sub-parsers to be used to parse
-    additional command line arguments.
-
-    The handler argument to the SubCommandOpt contructor is a callable
-    which is supplied an argparse subparsers object. Use this handler
-    callable to add sub-parsers.
-
-    The opt value is SubCommandAttr object with the name of the chosen
-    sub-parser stored in the 'name' attribute and the values of other
-    sub-parser arguments available as additional attributes.
-    """
-
-    def __init__(self, name, dest=None, handler=None,
-                 title=None, description=None, help=None):
-        """Construct an sub-command parsing option.
-
-        This behaves similarly to other Opt sub-classes but adds a
-        'handler' argument. The handler is a callable which is supplied
-        an subparsers object when invoked. The add_parser() method on
-        this subparsers object can be used to register parsers for
-        sub-commands.
-
-        :param name: the option's name
-        :param dest: the name of the corresponding ConfigOpts property
-        :param title: title of the sub-commands group in help output
-        :param description: description of the group in help output
-        :param help: a help string giving an overview of available sub-commands
-        """
-        super(SubCommandOpt, self).__init__(name, dest=dest, help=help)
-        self.handler = handler
-        self.title = title
-        self.description = description
-
-    def _add_to_cli(self, parser, group=None):
-        """Add argparse sub-parsers and invoke the handler method."""
-        dest = self.dest
-        if group is not None:
-            dest = group.name + '_' + dest
-
-        subparsers = parser.add_subparsers(dest=dest,
-                                           title=self.title,
-                                           description=self.description,
-                                           help=self.help)
-
-        if self.handler is not None:
-            self.handler(subparsers)
-
-
-class OptGroup(object):
-
-    """
-    Represents a group of opts.
-
-    CLI opts in the group are automatically prefixed with the group name.
-
-    Each group corresponds to a section in config files.
-
-    An OptGroup object has no public methods, but has a number of public string
-    properties:
-
-      name:
-        the name of the group
-      title:
-        the group title as displayed in --help
-      help:
-        the group description as displayed in --help
-    """
-
-    def __init__(self, name, title=None, help=None):
-        """Constructs an OptGroup object.
-
-        :param name: the group name
-        :param title: the group title for --help
-        :param help: the group description for --help
-        """
-        self.name = name
-        if title is None:
-            self.title = "%s options" % title
-        else:
-            self.title = title
-        self.help = help
-
-        self._opts = {}  # dict of dicts of (opt:, override:, default:)
-        self._argparse_group = None
-
-    def _register_opt(self, opt, cli=False):
-        """Add an opt to this group.
-
-        :param opt: an Opt object
-        :param cli: whether this is a CLI option
-        :returns: False if previously registered, True otherwise
-        :raises: DuplicateOptError if a naming conflict is detected
-        """
-        if _is_opt_registered(self._opts, opt):
-            return False
-
-        self._opts[opt.dest] = {'opt': opt, 'cli': cli}
-
-        return True
-
-    def _unregister_opt(self, opt):
-        """Remove an opt from this group.
-
-        :param opt: an Opt object
-        """
-        if opt.dest in self._opts:
-            del self._opts[opt.dest]
-
-    def _get_argparse_group(self, parser):
-        if self._argparse_group is None:
-            """Build an argparse._ArgumentGroup for this group."""
-            self._argparse_group = parser.add_argument_group(self.title,
-                                                             self.help)
-        return self._argparse_group
-
-    def _clear(self):
-        """Clear this group's option parsing state."""
-        self._argparse_group = None
-
-
-class ParseError(iniparser.ParseError):
-    def __init__(self, msg, lineno, line, filename):
-        super(ParseError, self).__init__(msg, lineno, line)
-        self.filename = filename
-
-    def __str__(self):
-        return 'at %s:%d, %s: %r' % (self.filename, self.lineno,
-                                     self.msg, self.line)
-
-
-class ConfigParser(iniparser.BaseParser):
-    def __init__(self, filename, sections):
-        super(ConfigParser, self).__init__()
-        self.filename = filename
-        self.sections = sections
-        self.section = None
-
-    def parse(self):
-        with open(self.filename) as f:
-            return super(ConfigParser, self).parse(f)
-
-    def new_section(self, section):
-        self.section = section
-        self.sections.setdefault(self.section, {})
-
-    def assignment(self, key, value):
-        if not self.section:
-            raise self.error_no_section()
-
-        self.sections[self.section].setdefault(key, [])
-        self.sections[self.section][key].append('\n'.join(value))
-
-    def parse_exc(self, msg, lineno, line=None):
-        return ParseError(msg, lineno, line, self.filename)
-
-    def error_no_section(self):
-        return self.parse_exc('Section must be started before assignment',
-                              self.lineno)
-
-
-class MultiConfigParser(object):
-    def __init__(self):
-        self.parsed = []
-
-    def read(self, config_files):
-        read_ok = []
-
-        for filename in config_files:
-            sections = {}
-            parser = ConfigParser(filename, sections)
-
-            try:
-                parser.parse()
-            except IOError:
-                continue
-            self.parsed.insert(0, sections)
-            read_ok.append(filename)
-
-        return read_ok
-
-    def get(self, section, names, multi=False):
-        rvalue = []
-        for sections in self.parsed:
-            if section not in sections:
-                continue
-            for name in names:
-                if name in sections[section]:
-                    if multi:
-                        rvalue = sections[section][name] + rvalue
-                    else:
-                        return sections[section][name]
-        if multi and rvalue != []:
-            return rvalue
-        raise KeyError
-
-
-class ConfigOpts(collections.Mapping):
-
-    """
-    Config options which may be set on the command line or in config files.
-
-    ConfigOpts is a configuration option manager with APIs for registering
-    option schemas, grouping options, parsing option values and retrieving
-    the values of options.
-    """
-
-    def __init__(self):
-        """Construct a ConfigOpts object."""
-        self._opts = {}  # dict of dicts of (opt:, override:, default:)
-        self._groups = {}
-
-        self._args = None
-
-        self._oparser = None
-        self._cparser = None
-        self._cli_values = {}
-        self.__cache = {}
-        self._config_opts = []
-
-    def _pre_setup(self, project, prog, version, usage, default_config_files):
-        """Initialize a ConfigCliParser object for option parsing."""
-
-        if prog is None:
-            prog = os.path.basename(sys.argv[0])
-
-        if default_config_files is None:
-            default_config_files = find_config_files(project, prog)
-
-        self._oparser = argparse.ArgumentParser(prog=prog, usage=usage)
-        self._oparser.add_argument('--version',
-                                   action='version',
-                                   version=version)
-
-        return prog, default_config_files
-
-    def _setup(self, project, prog, version, usage, default_config_files):
-        """Initialize a ConfigOpts object for option parsing."""
-
-        self._config_opts = [
-            MultiStrOpt('config-file',
-                        default=default_config_files,
-                        metavar='PATH',
-                        help='Path to a config file to use. Multiple config '
-                             'files can be specified, with values in later '
-                             'files taking precedence. The default files '
-                             ' used are: %s' % (default_config_files, )),
-            StrOpt('config-dir',
-                   metavar='DIR',
-                   help='Path to a config directory to pull *.conf '
-                        'files from. This file set is sorted, so as to '
-                        'provide a predictable parse order if individual '
-                        'options are over-ridden. The set is parsed after '
-                        'the file(s), if any, specified via --config-file, '
-                        'hence over-ridden options in the directory take '
-                        'precedence.'),
-        ]
-        self.register_cli_opts(self._config_opts)
-
-        self.project = project
-        self.prog = prog
-        self.version = version
-        self.usage = usage
-        self.default_config_files = default_config_files
-
-    def __clear_cache(f):
-        @functools.wraps(f)
-        def __inner(self, *args, **kwargs):
-            if kwargs.pop('clear_cache', True):
-                self.__cache.clear()
-            return f(self, *args, **kwargs)
-
-        return __inner
-
-    def __call__(self,
-                 args=None,
-                 project=None,
-                 prog=None,
-                 version=None,
-                 usage=None,
-                 default_config_files=None):
-        """Parse command line arguments and config files.
-
-        Calling a ConfigOpts object causes the supplied command line arguments
-        and config files to be parsed, causing opt values to be made available
-        as attributes of the object.
-
-        The object may be called multiple times, each time causing the previous
-        set of values to be overwritten.
-
-        Automatically registers the --config-file option with either a supplied
-        list of default config files, or a list from find_config_files().
-
-        If the --config-dir option is set, any *.conf files from this
-        directory are pulled in, after all the file(s) specified by the
-        --config-file option.
-
-        :param args: command line arguments (defaults to sys.argv[1:])
-        :param project: the toplevel project name, used to locate config files
-        :param prog: the name of the program (defaults to sys.argv[0] basename)
-        :param version: the program version (for --version)
-        :param usage: a usage string (%prog will be expanded)
-        :param default_config_files: config files to use by default
-        :returns: the list of arguments left over after parsing options
-        :raises: SystemExit, ConfigFilesNotFoundError, ConfigFileParseError,
-                 RequiredOptError, DuplicateOptError
-        """
-
-        self.clear()
-
-        prog, default_config_files = self._pre_setup(project,
-                                                     prog,
-                                                     version,
-                                                     usage,
-                                                     default_config_files)
-
-        self._setup(project, prog, version, usage, default_config_files)
-
-        self._cli_values = self._parse_cli_opts(args)
-
-        self._parse_config_files()
-
-        self._check_required_opts()
-
-    def __getattr__(self, name):
-        """Look up an option value and perform string substitution.
-
-        :param name: the opt name (or 'dest', more precisely)
-        :returns: the option value (after string subsititution) or a GroupAttr
-        :raises: NoSuchOptError,ConfigFileValueError,TemplateSubstitutionError
-        """
-        return self._get(name)
-
-    def __getitem__(self, key):
-        """Look up an option value and perform string substitution."""
-        return self.__getattr__(key)
-
-    def __contains__(self, key):
-        """Return True if key is the name of a registered opt or group."""
-        return key in self._opts or key in self._groups
-
-    def __iter__(self):
-        """Iterate over all registered opt and group names."""
-        for key in self._opts.keys() + self._groups.keys():
-            yield key
-
-    def __len__(self):
-        """Return the number of options and option groups."""
-        return len(self._opts) + len(self._groups)
-
-    def reset(self):
-        """Clear the object state and unset overrides and defaults."""
-        self._unset_defaults_and_overrides()
-        self.clear()
-
-    @__clear_cache
-    def clear(self):
-        """Clear the state of the object to before it was called.
-
-        Any subparsers added using the add_cli_subparsers() will also be
-        removed as a side-effect of this method.
-        """
-        self._args = None
-        self._cli_values.clear()
-        self._oparser = argparse.ArgumentParser()
-        self._cparser = None
-        self.unregister_opts(self._config_opts)
-        for group in self._groups.values():
-            group._clear()
-
-    @__clear_cache
-    def register_opt(self, opt, group=None, cli=False):
-        """Register an option schema.
-
-        Registering an option schema makes any option value which is previously
-        or subsequently parsed from the command line or config files available
-        as an attribute of this object.
-
-        :param opt: an instance of an Opt sub-class
-        :param cli: whether this is a CLI option
-        :param group: an optional OptGroup object or group name
-        :return: False if the opt was already register, True otherwise
-        :raises: DuplicateOptError
-        """
-        if group is not None:
-            group = self._get_group(group, autocreate=True)
-            return group._register_opt(opt, cli)
-
-        if _is_opt_registered(self._opts, opt):
-            return False
-
-        self._opts[opt.dest] = {'opt': opt, 'cli': cli}
-
-        return True
-
-    @__clear_cache
-    def register_opts(self, opts, group=None):
-        """Register multiple option schemas at once."""
-        for opt in opts:
-            self.register_opt(opt, group, clear_cache=False)
-
-    @__clear_cache
-    def register_cli_opt(self, opt, group=None):
-        """Register a CLI option schema.
-
-        CLI option schemas must be registered before the command line and
-        config files are parsed. This is to ensure that all CLI options are
-        show in --help and option validation works as expected.
-
-        :param opt: an instance of an Opt sub-class
-        :param group: an optional OptGroup object or group name
-        :return: False if the opt was already register, True otherwise
-        :raises: DuplicateOptError, ArgsAlreadyParsedError
-        """
-        if self._args is not None:
-            raise ArgsAlreadyParsedError("cannot register CLI option")
-
-        return self.register_opt(opt, group, cli=True, clear_cache=False)
-
-    @__clear_cache
-    def register_cli_opts(self, opts, group=None):
-        """Register multiple CLI option schemas at once."""
-        for opt in opts:
-            self.register_cli_opt(opt, group, clear_cache=False)
-
-    def register_group(self, group):
-        """Register an option group.
-
-        An option group must be registered before options can be registered
-        with the group.
-
-        :param group: an OptGroup object
-        """
-        if group.name in self._groups:
-            return
-
-        self._groups[group.name] = copy.copy(group)
-
-    @__clear_cache
-    def unregister_opt(self, opt, group=None):
-        """Unregister an option.
-
-        :param opt: an Opt object
-        :param group: an optional OptGroup object or group name
-        :raises: ArgsAlreadyParsedError, NoSuchGroupError
-        """
-        if self._args is not None:
-            raise ArgsAlreadyParsedError("reset before unregistering options")
-
-        if group is not None:
-            self._get_group(group)._unregister_opt(opt)
-        elif opt.dest in self._opts:
-            del self._opts[opt.dest]
-
-    @__clear_cache
-    def unregister_opts(self, opts, group=None):
-        """Unregister multiple CLI option schemas at once."""
-        for opt in opts:
-            self.unregister_opt(opt, group, clear_cache=False)
-
-    def import_opt(self, name, module_str, group=None):
-        """Import an option definition from a module.
-
-        Import a module and check that a given option is registered.
-
-        This is intended for use with global configuration objects
-        like cfg.CONF where modules commonly register options with
-        CONF at module load time. If one module requires an option
-        defined by another module it can use this method to explicitly
-        declare the dependency.
-
-        :param name: the name/dest of the opt
-        :param module_str: the name of a module to import
-        :param group: an option OptGroup object or group name
-        :raises: NoSuchOptError, NoSuchGroupError
-        """
-        __import__(module_str)
-        self._get_opt_info(name, group)
-
-    def import_group(self, group, module_str):
-        """Import an option group from a module.
-
-        Import a module and check that a given option group is registered.
-
-        This is intended for use with global configuration objects
-        like cfg.CONF where modules commonly register options with
-        CONF at module load time. If one module requires an option group
-        defined by another module it can use this method to explicitly
-        declare the dependency.
-
-        :param group: an option OptGroup object or group name
-        :param module_str: the name of a module to import
-        :raises: ImportError, NoSuchGroupError
-        """
-        __import__(module_str)
-        self._get_group(group)
-
-    @__clear_cache
-    def set_override(self, name, override, group=None):
-        """Override an opt value.
-
-        Override the command line, config file and default values of a
-        given option.
-
-        :param name: the name/dest of the opt
-        :param override: the override value
-        :param group: an option OptGroup object or group name
-        :raises: NoSuchOptError, NoSuchGroupError
-        """
-        opt_info = self._get_opt_info(name, group)
-        opt_info['override'] = override
-
-    @__clear_cache
-    def set_default(self, name, default, group=None):
-        """Override an opt's default value.
-
-        Override the default value of given option. A command line or
-        config file value will still take precedence over this default.
-
-        :param name: the name/dest of the opt
-        :param default: the default value
-        :param group: an option OptGroup object or group name
-        :raises: NoSuchOptError, NoSuchGroupError
-        """
-        opt_info = self._get_opt_info(name, group)
-        opt_info['default'] = default
-
-    @__clear_cache
-    def clear_override(self, name, group=None):
-        """Clear an override an opt value.
-
-        Clear a previously set override of the command line, config file
-        and default values of a given option.
-
-        :param name: the name/dest of the opt
-        :param group: an option OptGroup object or group name
-        :raises: NoSuchOptError, NoSuchGroupError
-        """
-        opt_info = self._get_opt_info(name, group)
-        opt_info.pop('override', None)
-
-    @__clear_cache
-    def clear_default(self, name, group=None):
-        """Clear an override an opt's default value.
-
-        Clear a previously set override of the default value of given option.
-
-        :param name: the name/dest of the opt
-        :param group: an option OptGroup object or group name
-        :raises: NoSuchOptError, NoSuchGroupError
-        """
-        opt_info = self._get_opt_info(name, group)
-        opt_info.pop('default', None)
-
-    def _all_opt_infos(self):
-        """A generator function for iteration opt infos."""
-        for info in self._opts.values():
-            yield info, None
-        for group in self._groups.values():
-            for info in group._opts.values():
-                yield info, group
-
-    def _all_cli_opts(self):
-        """A generator function for iterating CLI opts."""
-        for info, group in self._all_opt_infos():
-            if info['cli']:
-                yield info['opt'], group
-
-    def _unset_defaults_and_overrides(self):
-        """Unset any default or override on all options."""
-        for info, group in self._all_opt_infos():
-            info.pop('default', None)
-            info.pop('override', None)
-
-    def find_file(self, name):
-        """Locate a file located alongside the config files.
-
-        Search for a file with the supplied basename in the directories
-        which we have already loaded config files from and other known
-        configuration directories.
-
-        The directory, if any, supplied by the config_dir option is
-        searched first. Then the config_file option is iterated over
-        and each of the base directories of the config_files values
-        are searched. Failing both of these, the standard directories
-        searched by the module level find_config_files() function is
-        used. The first matching file is returned.
-
-        :param basename: the filename, e.g. 'policy.json'
-        :returns: the path to a matching file, or None
-        """
-        dirs = []
-        if self.config_dir:
-            dirs.append(_fixpath(self.config_dir))
-
-        for cf in reversed(self.config_file):
-            dirs.append(os.path.dirname(_fixpath(cf)))
-
-        dirs.extend(_get_config_dirs(self.project))
-
-        return _search_dirs(dirs, name)
-
-    def log_opt_values(self, logger, lvl):
-        """Log the value of all registered opts.
-
-        It's often useful for an app to log its configuration to a log file at
-        startup for debugging. This method dumps to the entire config state to
-        the supplied logger at a given log level.
-
-        :param logger: a logging.Logger object
-        :param lvl: the log level (e.g. logging.DEBUG) arg to logger.log()
-        """
-        logger.log(lvl, "*" * 80)
-        logger.log(lvl, "Configuration options gathered from:")
-        logger.log(lvl, "command line args: %s", self._args)
-        logger.log(lvl, "config files: %s", self.config_file)
-        logger.log(lvl, "=" * 80)
-
-        def _sanitize(opt, value):
-            """Obfuscate values of options declared secret."""
-            return value if not opt.secret else '*' * len(str(value))
-
-        for opt_name in sorted(self._opts):
-            opt = self._get_opt_info(opt_name)['opt']
-            logger.log(lvl, "%-30s = %s", opt_name,
-                       _sanitize(opt, getattr(self, opt_name)))
-
-        for group_name in self._groups:
-            group_attr = self.GroupAttr(self, self._get_group(group_name))
-            for opt_name in sorted(self._groups[group_name]._opts):
-                opt = self._get_opt_info(opt_name, group_name)['opt']
-                logger.log(lvl, "%-30s = %s",
-                           "%s.%s" % (group_name, opt_name),
-                           _sanitize(opt, getattr(group_attr, opt_name)))
-
-        logger.log(lvl, "*" * 80)
-
-    def print_usage(self, file=None):
-        """Print the usage message for the current program."""
-        self._oparser.print_usage(file)
-
-    def print_help(self, file=None):
-        """Print the help message for the current program."""
-        self._oparser.print_help(file)
-
-    def _get(self, name, group=None):
-        if isinstance(group, OptGroup):
-            key = (group.name, name)
-        else:
-            key = (group, name)
-        try:
-            return self.__cache[key]
-        except KeyError:
-            value = self._substitute(self._do_get(name, group))
-            self.__cache[key] = value
-            return value
-
-    def _do_get(self, name, group=None):
-        """Look up an option value.
-
-        :param name: the opt name (or 'dest', more precisely)
-        :param group: an OptGroup
-        :returns: the option value, or a GroupAttr object
-        :raises: NoSuchOptError, NoSuchGroupError, ConfigFileValueError,
-                 TemplateSubstitutionError
-        """
-        if group is None and name in self._groups:
-            return self.GroupAttr(self, self._get_group(name))
-
-        info = self._get_opt_info(name, group)
-        opt = info['opt']
-
-        if isinstance(opt, SubCommandOpt):
-            return self.SubCommandAttr(self, group, opt.dest)
-
-        if 'override' in info:
-            return info['override']
-
-        values = []
-        if self._cparser is not None:
-            section = group.name if group is not None else 'DEFAULT'
-            try:
-                value = opt._get_from_config_parser(self._cparser, section)
-            except KeyError:
-                pass
-            except ValueError as ve:
-                raise ConfigFileValueError(str(ve))
-            else:
-                if not opt.multi:
-                    # No need to continue since the last value wins
-                    return value[-1]
-                values.extend(value)
-
-        name = name if group is None else group.name + '_' + name
-        value = self._cli_values.get(name)
-        if value is not None:
-            if not opt.multi:
-                return value
-
-            # argparse ignores default=None for nargs='*'
-            if opt.positional and not value:
-                value = opt.default
-
-            return value + values
-
-        if values:
-            return values
-
-        if 'default' in info:
-            return info['default']
-
-        return opt.default
-
-    def _substitute(self, value):
-        """Perform string template substitution.
-
-        Substitute any template variables (e.g. $foo, ${bar}) in the supplied
-        string value(s) with opt values.
-
-        :param value: the string value, or list of string values
-        :returns: the substituted string(s)
-        """
-        if isinstance(value, list):
-            return [self._substitute(i) for i in value]
-        elif isinstance(value, str):
-            tmpl = string.Template(value)
-            return tmpl.safe_substitute(self.StrSubWrapper(self))
-        else:
-            return value
-
-    def _get_group(self, group_or_name, autocreate=False):
-        """Looks up a OptGroup object.
-
-        Helper function to return an OptGroup given a parameter which can
-        either be the group's name or an OptGroup object.
-
-        The OptGroup object returned is from the internal dict of OptGroup
-        objects, which will be a copy of any OptGroup object that users of
-        the API have access to.
-
-        :param group_or_name: the group's name or the OptGroup object itself
-        :param autocreate: whether to auto-create the group if it's not found
-        :raises: NoSuchGroupError
-        """
-        group = group_or_name if isinstance(group_or_name, OptGroup) else None
-        group_name = group.name if group else group_or_name
-
-        if group_name not in self._groups:
-            if group is not None or not autocreate:
-                raise NoSuchGroupError(group_name)
-
-            self.register_group(OptGroup(name=group_name))
-
-        return self._groups[group_name]
-
-    def _get_opt_info(self, opt_name, group=None):
-        """Return the (opt, override, default) dict for an opt.
-
-        :param opt_name: an opt name/dest
-        :param group: an optional group name or OptGroup object
-        :raises: NoSuchOptError, NoSuchGroupError
-        """
-        if group is None:
-            opts = self._opts
-        else:
-            group = self._get_group(group)
-            opts = group._opts
-
-        if opt_name not in opts:
-            raise NoSuchOptError(opt_name, group)
-
-        return opts[opt_name]
-
-    def _parse_config_files(self):
-        """Parse the config files from --config-file and --config-dir.
-
-        :raises: ConfigFilesNotFoundError, ConfigFileParseError
-        """
-        config_files = list(self.config_file)
-
-        if self.config_dir:
-            config_dir_glob = os.path.join(self.config_dir, '*.conf')
-            config_files += sorted(glob.glob(config_dir_glob))
-
-        config_files = [_fixpath(p) for p in config_files]
-
-        self._cparser = MultiConfigParser()
-
-        try:
-            read_ok = self._cparser.read(config_files)
-        except iniparser.ParseError as pe:
-            raise ConfigFileParseError(pe.filename, str(pe))
-
-        if read_ok != config_files:
-            not_read_ok = filter(lambda f: f not in read_ok, config_files)
-            raise ConfigFilesNotFoundError(not_read_ok)
-
-    def _check_required_opts(self):
-        """Check that all opts marked as required have values specified.
-
-        :raises: RequiredOptError
-        """
-        for info, group in self._all_opt_infos():
-            opt = info['opt']
-
-            if opt.required:
-                if 'default' in info or 'override' in info:
-                    continue
-
-                if self._get(opt.dest, group) is None:
-                    raise RequiredOptError(opt.name, group)
-
-    def _parse_cli_opts(self, args):
-        """Parse command line options.
-
-        Initializes the command line option parser and parses the supplied
-        command line arguments.
-
-        :param args: the command line arguments
-        :returns: a dict of parsed option values
-        :raises: SystemExit, DuplicateOptError
-
-        """
-        self._args = args
-
-        for opt, group in sorted(self._all_cli_opts()):
-            opt._add_to_cli(self._oparser, group)
-
-        return vars(self._oparser.parse_args(args))
-
-    class GroupAttr(collections.Mapping):
-
-        """
-        A helper class representing the option values of a group as a mapping
-        and attributes.
-        """
-
-        def __init__(self, conf, group):
-            """Construct a GroupAttr object.
-
-            :param conf: a ConfigOpts object
-            :param group: an OptGroup object
-            """
-            self._conf = conf
-            self._group = group
-
-        def __getattr__(self, name):
-            """Look up an option value and perform template substitution."""
-            return self._conf._get(name, self._group)
-
-        def __getitem__(self, key):
-            """Look up an option value and perform string substitution."""
-            return self.__getattr__(key)
-
-        def __contains__(self, key):
-            """Return True if key is the name of a registered opt or group."""
-            return key in self._group._opts
-
-        def __iter__(self):
-            """Iterate over all registered opt and group names."""
-            for key in self._group._opts.keys():
-                yield key
-
-        def __len__(self):
-            """Return the number of options and option groups."""
-            return len(self._group._opts)
-
-    class SubCommandAttr(object):
-
-        """
-        A helper class representing the name and arguments of an argparse
-        sub-parser.
-        """
-
-        def __init__(self, conf, group, dest):
-            """Construct a SubCommandAttr object.
-
-            :param conf: a ConfigOpts object
-            :param group: an OptGroup object
-            :param dest: the name of the sub-parser
-            """
-            self._conf = conf
-            self._group = group
-            self._dest = dest
-
-        def __getattr__(self, name):
-            """Look up a sub-parser name or argument value."""
-            if name == 'name':
-                name = self._dest
-                if self._group is not None:
-                    name = self._group.name + '_' + name
-                return self._conf._cli_values[name]
-
-            if name in self._conf:
-                raise DuplicateOptError(name)
-
-            try:
-                return self._conf._cli_values[name]
-            except KeyError:
-                raise NoSuchOptError(name)
-
-    class StrSubWrapper(object):
-
-        """
-        A helper class exposing opt values as a dict for string substitution.
-        """
-
-        def __init__(self, conf):
-            """Construct a StrSubWrapper object.
-
-            :param conf: a ConfigOpts object
-            """
-            self.conf = conf
-
-        def __getitem__(self, key):
-            """Look up an opt value from the ConfigOpts object.
-
-            :param key: an opt name
-            :returns: an opt value
-            :raises: TemplateSubstitutionError if attribute is a group
-            """
-            value = getattr(self.conf, key)
-            if isinstance(value, self.conf.GroupAttr):
-                raise TemplateSubstitutionError(
-                    'substituting group %s not supported' % key)
-            return value
-
-
-CONF = ConfigOpts()
diff --git a/tempest/openstack/common/iniparser.py b/tempest/openstack/common/iniparser.py
deleted file mode 100644
index 9a8762a..0000000
--- a/tempest/openstack/common/iniparser.py
+++ /dev/null
@@ -1,130 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright 2012 OpenStack LLC.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-
-class ParseError(Exception):
-    def __init__(self, message, lineno, line):
-        self.msg = message
-        self.line = line
-        self.lineno = lineno
-
-    def __str__(self):
-        return 'at line %d, %s: %r' % (self.lineno, self.msg, self.line)
-
-
-class BaseParser(object):
-    lineno = 0
-    parse_exc = ParseError
-
-    def _assignment(self, key, value):
-        self.assignment(key, value)
-        return None, []
-
-    def _get_section(self, line):
-        if line[-1] != ']':
-            return self.error_no_section_end_bracket(line)
-        if len(line) <= 2:
-            return self.error_no_section_name(line)
-
-        return line[1:-1]
-
-    def _split_key_value(self, line):
-        colon = line.find(':')
-        equal = line.find('=')
-        if colon < 0 and equal < 0:
-            return self.error_invalid_assignment(line)
-
-        if colon < 0 or (equal >= 0 and equal < colon):
-            key, value = line[:equal], line[equal + 1:]
-        else:
-            key, value = line[:colon], line[colon + 1:]
-
-        value = value.strip()
-        if ((value and value[0] == value[-1]) and
-                (value[0] == "\"" or value[0] == "'")):
-            value = value[1:-1]
-        return key.strip(), [value]
-
-    def parse(self, lineiter):
-        key = None
-        value = []
-
-        for line in lineiter:
-            self.lineno += 1
-
-            line = line.rstrip()
-            if not line:
-                # Blank line, ends multi-line values
-                if key:
-                    key, value = self._assignment(key, value)
-                continue
-            elif line[0] in (' ', '\t'):
-                # Continuation of previous assignment
-                if key is None:
-                    self.error_unexpected_continuation(line)
-                else:
-                    value.append(line.lstrip())
-                continue
-
-            if key:
-                # Flush previous assignment, if any
-                key, value = self._assignment(key, value)
-
-            if line[0] == '[':
-                # Section start
-                section = self._get_section(line)
-                if section:
-                    self.new_section(section)
-            elif line[0] in '#;':
-                self.comment(line[1:].lstrip())
-            else:
-                key, value = self._split_key_value(line)
-                if not key:
-                    return self.error_empty_key(line)
-
-        if key:
-            # Flush previous assignment, if any
-            self._assignment(key, value)
-
-    def assignment(self, key, value):
-        """Called when a full assignment is parsed."""
-        raise NotImplementedError()
-
-    def new_section(self, section):
-        """Called when a new section is started."""
-        raise NotImplementedError()
-
-    def comment(self, comment):
-        """Called when a comment is parsed."""
-        pass
-
-    def error_invalid_assignment(self, line):
-        raise self.parse_exc("No ':' or '=' found in assignment",
-                             self.lineno, line)
-
-    def error_empty_key(self, line):
-        raise self.parse_exc('Key cannot be empty', self.lineno, line)
-
-    def error_unexpected_continuation(self, line):
-        raise self.parse_exc('Unexpected continuation line',
-                             self.lineno, line)
-
-    def error_no_section_end_bracket(self, line):
-        raise self.parse_exc('Invalid section (must end with ])',
-                             self.lineno, line)
-
-    def error_no_section_name(self, line):
-        raise self.parse_exc('Empty section name', self.lineno, line)
diff --git a/tempest/openstack/common/setup.py b/tempest/openstack/common/setup.py
index 2fb9cf2..80a0ece 100644
--- a/tempest/openstack/common/setup.py
+++ b/tempest/openstack/common/setup.py
@@ -43,6 +43,11 @@
     return mapping
 
 
+def _parse_git_mailmap(git_dir, mailmap='.mailmap'):
+    mailmap = os.path.join(os.path.dirname(git_dir), mailmap)
+    return parse_mailmap(mailmap)
+
+
 def canonicalize_emails(changelog, mapping):
     """Takes in a string and an email alias mapping and replaces all
        instances of the aliases in the string with their real email.
@@ -117,9 +122,9 @@
         output = subprocess.Popen(["/bin/sh", "-c", cmd],
                                   stdout=subprocess.PIPE,
                                   stderr=subprocess.PIPE)
+    out = output.communicate()
     if output.returncode and throw_on_error:
         raise Exception("%s returned %d" % cmd, output.returncode)
-    out = output.communicate()
     if len(out) == 0:
         return None
     if len(out[0].strip()) == 0:
@@ -127,14 +132,26 @@
     return out[0].strip()
 
 
+def _get_git_directory():
+    parent_dir = os.path.dirname(__file__)
+    while True:
+        git_dir = os.path.join(parent_dir, '.git')
+        if os.path.exists(git_dir):
+            return git_dir
+        parent_dir, child = os.path.split(parent_dir)
+        if not child:   # reached to root dir
+            return None
+
+
 def write_git_changelog():
     """Write a changelog based on the git changelog."""
     new_changelog = 'ChangeLog'
+    git_dir = _get_git_directory()
     if not os.getenv('SKIP_WRITE_GIT_CHANGELOG'):
-        if os.path.isdir('.git'):
-            git_log_cmd = 'git log --stat'
+        if git_dir:
+            git_log_cmd = 'git --git-dir=%s log --stat' % git_dir
             changelog = _run_shell_command(git_log_cmd)
-            mailmap = parse_mailmap()
+            mailmap = _parse_git_mailmap(git_dir)
             with open(new_changelog, "w") as changelog_file:
                 changelog_file.write(canonicalize_emails(changelog, mailmap))
     else:
@@ -146,13 +163,15 @@
     jenkins_email = 'jenkins@review.(openstack|stackforge).org'
     old_authors = 'AUTHORS.in'
     new_authors = 'AUTHORS'
+    git_dir = _get_git_directory()
     if not os.getenv('SKIP_GENERATE_AUTHORS'):
-        if os.path.isdir('.git'):
+        if git_dir:
             # don't include jenkins email address in AUTHORS file
-            git_log_cmd = ("git log --format='%aN <%aE>' | sort -u | "
+            git_log_cmd = ("git --git-dir=" + git_dir +
+                           " log --format='%aN <%aE>' | sort -u | "
                            "egrep -v '" + jenkins_email + "'")
             changelog = _run_shell_command(git_log_cmd)
-            mailmap = parse_mailmap()
+            mailmap = _parse_git_mailmap(git_dir)
             with open(new_authors, 'w') as new_authors_fh:
                 new_authors_fh.write(canonicalize_emails(changelog, mailmap))
                 if os.path.exists(old_authors):
@@ -258,40 +277,44 @@
     return cmdclass
 
 
-def _get_revno():
+def _get_revno(git_dir):
     """Return the number of commits since the most recent tag.
 
     We use git-describe to find this out, but if there are no
     tags then we fall back to counting commits since the beginning
     of time.
     """
-    describe = _run_shell_command("git describe --always")
+    describe = _run_shell_command(
+        "git --git-dir=%s describe --always" % git_dir)
     if "-" in describe:
         return describe.rsplit("-", 2)[-2]
 
     # no tags found
-    revlist = _run_shell_command("git rev-list --abbrev-commit HEAD")
+    revlist = _run_shell_command(
+        "git --git-dir=%s rev-list --abbrev-commit HEAD" % git_dir)
     return len(revlist.splitlines())
 
 
 def _get_version_from_git(pre_version):
     """Return a version which is equal to the tag that's on the current
     revision if there is one, or tag plus number of additional revisions
-    if the current revision has no tag.
-    """
+    if the current revision has no tag."""
 
-    if os.path.isdir('.git'):
+    git_dir = _get_git_directory()
+    if git_dir:
         if pre_version:
             try:
                 return _run_shell_command(
-                    "git describe --exact-match",
+                    "git --git-dir=" + git_dir + " describe --exact-match",
                     throw_on_error=True).replace('-', '.')
             except Exception:
-                sha = _run_shell_command("git log -n1 --pretty=format:%h")
-                return "%s.a%s.g%s" % (pre_version, _get_revno(), sha)
+                sha = _run_shell_command(
+                    "git --git-dir=" + git_dir + " log -n1 --pretty=format:%h")
+                return "%s.a%s.g%s" % (pre_version, _get_revno(git_dir), sha)
         else:
             return _run_shell_command(
-                "git describe --always").replace('-', '.')
+                "git --git-dir=" + git_dir + " describe --always").replace(
+                    '-', '.')
     return None
 
 
diff --git a/tempest/services/compute/json/interfaces_client.py b/tempest/services/compute/json/interfaces_client.py
new file mode 100644
index 0000000..468a5c2
--- /dev/null
+++ b/tempest/services/compute/json/interfaces_client.py
@@ -0,0 +1,57 @@
+# Copyright 2013 IBM Corp.
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import json
+
+from tempest.common.rest_client import RestClient
+
+
+class InterfacesClientJSON(RestClient):
+
+    def __init__(self, config, username, password, auth_url, tenant_name=None):
+        super(InterfacesClientJSON, self).__init__(config, username, password,
+                                                   auth_url, tenant_name)
+        self.service = self.config.compute.catalog_type
+
+    def list_interfaces(self, server):
+        resp, body = self.get('servers/%s/os-interface' % server)
+        body = json.loads(body)
+        return resp, body['interfaceAttachments']
+
+    def create_interface(self, server, port_id=None, network_id=None,
+                         fixed_ip=None):
+        post_body = dict(interfaceAttachment=dict())
+        if port_id:
+            post_body['port_id'] = port_id
+        if network_id:
+            post_body['net_id'] = network_id
+        if fixed_ip:
+            post_body['fixed_ips'] = [dict(ip_address=fixed_ip)]
+        post_body = json.dumps(post_body)
+        resp, body = self.post('servers/%s/os-interface' % server,
+                               headers=self.headers,
+                               body=post_body)
+        body = json.loads(body)
+        return resp, body['interfaceAttachment']
+
+    def show_interface(self, server, port_id):
+        resp, body = self.get('servers/%s/os-interface/%s' % (server, port_id))
+        body = json.loads(body)
+        return resp, body['interfaceAttachment']
+
+    def delete_interface(self, server, port_id):
+        resp, body = self.delete('servers/%s/os-interface/%s' % (server,
+                                                                 port_id))
+        return resp, body
diff --git a/tempest/services/compute/json/security_groups_client.py b/tempest/services/compute/json/security_groups_client.py
index 95f2831..7f430d8 100644
--- a/tempest/services/compute/json/security_groups_client.py
+++ b/tempest/services/compute/json/security_groups_client.py
@@ -95,3 +95,12 @@
     def delete_security_group_rule(self, group_rule_id):
         """Deletes the provided Security Group rule."""
         return self.delete('os-security-group-rules/%s' % str(group_rule_id))
+
+    def list_security_group_rules(self, security_group_id):
+        """List all rules for a security group."""
+        resp, body = self.get('os-security-groups')
+        body = json.loads(body)
+        for sg in body['security_groups']:
+            if sg['id'] == security_group_id:
+                return resp, sg['rules']
+        raise exceptions.NotFound('No such Security Group')
diff --git a/tempest/services/compute/json/servers_client.py b/tempest/services/compute/json/servers_client.py
index e8d1153..bc9d9bd 100644
--- a/tempest/services/compute/json/servers_client.py
+++ b/tempest/services/compute/json/servers_client.py
@@ -1,6 +1,7 @@
 # vim: tabstop=4 shiftwidth=4 softtabstop=4
 
 # Copyright 2012 OpenStack, LLC
+# Copyright 2013 Hewlett-Packard Development Company, L.P.
 # All Rights Reserved.
 #
 #    Licensed under the Apache License, Version 2.0 (the "License"); you may
@@ -375,3 +376,11 @@
         resp, body = self.get('/'.join(['servers', server_id,
                               'os-virtual-interfaces']))
         return resp, json.loads(body)
+
+    def rescue_server(self, server_id, adminPass=None):
+        """Rescue the provided server."""
+        return self.action(server_id, 'rescue', None, adminPass=adminPass)
+
+    def unrescue_server(self, server_id):
+        """Unrescue the provided server."""
+        return self.action(server_id, 'unrescue', None)
diff --git a/tempest/services/compute/json/volumes_extensions_client.py b/tempest/services/compute/json/volumes_extensions_client.py
index e4271d9..d12b97b 100644
--- a/tempest/services/compute/json/volumes_extensions_client.py
+++ b/tempest/services/compute/json/volumes_extensions_client.py
@@ -53,10 +53,10 @@
         body = json.loads(body)
         return resp, body['volumes']
 
-    def get_volume(self, volume_id, wait=None):
+    def get_volume(self, volume_id):
         """Returns the details of a single volume."""
         url = "os-volumes/%s" % str(volume_id)
-        resp, body = self.get(url, wait=wait)
+        resp, body = self.get(url)
         body = json.loads(body)
         return resp, body['volume']
 
diff --git a/tempest/services/compute/xml/common.py b/tempest/services/compute/xml/common.py
index bbc4e38..4b1b11a 100644
--- a/tempest/services/compute/xml/common.py
+++ b/tempest/services/compute/xml/common.py
@@ -100,7 +100,8 @@
     """
     json = {}
     for attr in node.keys():
-        json[attr] = node.get(attr)
+        if not attr.startswith("xmlns"):
+            json[attr] = node.get(attr)
     if not node.getchildren():
         return node.text or json
     for child in node.getchildren():
diff --git a/tempest/services/compute/xml/interfaces_client.py b/tempest/services/compute/xml/interfaces_client.py
new file mode 100644
index 0000000..4a692a1
--- /dev/null
+++ b/tempest/services/compute/xml/interfaces_client.py
@@ -0,0 +1,82 @@
+# Copyright 2013 IBM Corp.
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+from lxml import etree
+
+from tempest.common.rest_client import RestClientXML
+from tempest.services.compute.xml.common import Document
+from tempest.services.compute.xml.common import Element
+from tempest.services.compute.xml.common import Text
+from tempest.services.compute.xml.common import xml_to_json
+
+
+class InterfacesClientXML(RestClientXML):
+
+    def __init__(self, config, username, password, auth_url, tenant_name=None):
+        super(InterfacesClientXML, self).__init__(config, username, password,
+                                                  auth_url, tenant_name)
+        self.service = self.config.compute.catalog_type
+
+    def _process_xml_interface(self, node):
+        iface = xml_to_json(node)
+        # NOTE(danms): if multiple addresses per interface is ever required,
+        # xml_to_json will need to be fixed or replaced in this case
+        iface['fixed_ips'] = [dict(iface['fixed_ips']['fixed_ip'].items())]
+        return iface
+
+    def list_interfaces(self, server):
+        resp, body = self.get('servers/%s/os-interface' % server, self.headers)
+        node = etree.fromstring(body)
+        interfaces = [self._process_xml_interface(x)
+                      for x in node.getchildren()]
+        return resp, interfaces
+
+    def create_interface(self, server, port_id=None, network_id=None,
+                         fixed_ip=None):
+        doc = Document()
+        iface = Element('interfaceAttachment')
+        if port_id:
+            _port_id = Element('port_id')
+            _port_id.append(Text(port_id))
+            iface.append(_port_id)
+        if network_id:
+            _network_id = Element('net_id')
+            _network_id.append(Text(network_id))
+            iface.append(_network_id)
+        if fixed_ip:
+            _fixed_ips = Element('fixed_ips')
+            _fixed_ip = Element('fixed_ip')
+            _ip_address = Element('ip_address')
+            _ip_address.append(Text(fixed_ip))
+            _fixed_ip.append(_ip_address)
+            _fixed_ips.append(_fixed_ip)
+            iface.append(_fixed_ips)
+        doc.append(iface)
+        resp, body = self.post('servers/%s/os-interface' % server,
+                               headers=self.headers,
+                               body=str(doc))
+        body = self._process_xml_interface(etree.fromstring(body))
+        return resp, body
+
+    def show_interface(self, server, port_id):
+        resp, body = self.get('servers/%s/os-interface/%s' % (server, port_id),
+                              self.headers)
+        body = self._process_xml_interface(etree.fromstring(body))
+        return resp, body
+
+    def delete_interface(self, server, port_id):
+        resp, body = self.delete('servers/%s/os-interface/%s' % (server,
+                                                                 port_id))
+        return resp, body
diff --git a/tempest/services/compute/xml/security_groups_client.py b/tempest/services/compute/xml/security_groups_client.py
index ac70f1b..7db60a1 100644
--- a/tempest/services/compute/xml/security_groups_client.py
+++ b/tempest/services/compute/xml/security_groups_client.py
@@ -23,6 +23,7 @@
 from tempest.services.compute.xml.common import Element
 from tempest.services.compute.xml.common import Text
 from tempest.services.compute.xml.common import xml_to_json
+from tempest.services.compute.xml.common import XMLNS_11
 
 
 class SecurityGroupsClientXML(RestClientXML):
@@ -128,3 +129,16 @@
         """Deletes the provided Security Group rule."""
         return self.delete('os-security-group-rules/%s' %
                            str(group_rule_id), self.headers)
+
+    def list_security_group_rules(self, security_group_id):
+        """List all rules for a security group."""
+        url = "os-security-groups"
+        resp, body = self.get(url, self.headers)
+        body = etree.fromstring(body)
+        secgroups = body.getchildren()
+        for secgroup in secgroups:
+            if secgroup.get('id') == security_group_id:
+                node = secgroup.find('{%s}rules' % XMLNS_11)
+                rules = [xml_to_json(x) for x in node.getchildren()]
+                return resp, rules
+        raise exceptions.NotFound('No such Security Group')
diff --git a/tempest/services/compute/xml/servers_client.py b/tempest/services/compute/xml/servers_client.py
index efb28e6..fceeb28 100644
--- a/tempest/services/compute/xml/servers_client.py
+++ b/tempest/services/compute/xml/servers_client.py
@@ -1,6 +1,7 @@
 # vim: tabstop=4 shiftwidth=4 softtabstop=4
 #
 # Copyright 2012 IBM
+# Copyright 2013 Hewlett-Packard Development Company, L.P.
 # All Rights Reserved.
 #
 #    Licensed under the Apache License, Version 2.0 (the "License"); you may
@@ -159,7 +160,7 @@
         return array
 
     def list_servers(self, params=None):
-        url = 'servers/detail'
+        url = 'servers'
         if params:
             url += '?%s' % urllib.urlencode(params)
 
@@ -182,13 +183,13 @@
         server = Element("server")
         doc.append(server)
 
-        if name:
+        if name is not None:
             server.add_attr("name", name)
-        if accessIPv4:
+        if accessIPv4 is not None:
             server.add_attr("accessIPv4", accessIPv4)
-        if accessIPv6:
+        if accessIPv6 is not None:
             server.add_attr("accessIPv6", accessIPv6)
-        if meta:
+        if meta is not None:
             metadata = Element("metadata")
             server.append(metadata)
             for k, v in meta:
@@ -228,10 +229,26 @@
                          flavorRef=flavor_ref,
                          name=name)
 
-        for attr in ["adminPass", "accessIPv4", "accessIPv6", "key_name"]:
+        for attr in ["adminPass", "accessIPv4", "accessIPv6", "key_name",
+                     "user_data", "availability_zone"]:
             if attr in kwargs:
                 server.add_attr(attr, kwargs[attr])
 
+        if 'security_groups' in kwargs:
+            secgroups = Element("security_groups")
+            server.append(secgroups)
+            for secgroup in kwargs['security_groups']:
+                s = Element("security_group", name=secgroup['name'])
+                secgroups.append(s)
+
+        if 'networks' in kwargs:
+            networks = Element("networks")
+            server.append(networks)
+            for network in kwargs['networks']:
+                s = Element("network", uuid=network['uuid'],
+                            fixed_ip=network['fixed_ip'])
+                networks.append(s)
+
         if 'meta' in kwargs:
             metadata = Element("metadata")
             server.append(metadata)
@@ -305,7 +322,8 @@
         resp, body = self.get("servers/%s/ips" % str(server_id), self.headers)
 
         networks = {}
-        for child in etree.fromstring(body.getchildren()):
+        xml_list = etree.fromstring(body)
+        for child in xml_list.getchildren():
             network = self._parse_network(child)
             networks.update(**network)
 
@@ -383,6 +401,58 @@
     def remove_security_group(self, server_id, name):
         return self.action(server_id, 'removeSecurityGroup', None, name=name)
 
+    def list_server_metadata(self, server_id):
+        resp, body = self.get("servers/%s/metadata" % str(server_id),
+                              self.headers)
+        body = self._parse_key_value(etree.fromstring(body))
+        return resp, body
+
+    def set_server_metadata(self, server_id, meta):
+        doc = Document()
+        metadata = Element("metadata")
+        doc.append(metadata)
+        for k, v in meta.items():
+            meta_element = Element("meta", key=k)
+            meta_element.append(Text(v))
+            metadata.append(meta_element)
+        resp, body = self.put('servers/%s/metadata' % str(server_id),
+                              str(doc), self.headers)
+        return resp, xml_to_json(etree.fromstring(body))
+
+    def update_server_metadata(self, server_id, meta):
+        doc = Document()
+        metadata = Element("metadata")
+        doc.append(metadata)
+        for k, v in meta.items():
+            meta_element = Element("meta", key=k)
+            meta_element.append(Text(v))
+            metadata.append(meta_element)
+        resp, body = self.post("/servers/%s/metadata" % str(server_id),
+                               str(doc), headers=self.headers)
+        body = xml_to_json(etree.fromstring(body))
+        return resp, body
+
+    def get_server_metadata_item(self, server_id, key):
+        resp, body = self.get("servers/%s/metadata/%s" % (str(server_id), key),
+                              headers=self.headers)
+        return resp, dict([(etree.fromstring(body).attrib['key'],
+                            xml_to_json(etree.fromstring(body)))])
+
+    def set_server_metadata_item(self, server_id, key, meta):
+        doc = Document()
+        for k, v in meta.items():
+            meta_element = Element("meta", key=k)
+            meta_element.append(Text(v))
+            doc.append(meta_element)
+        resp, body = self.put('servers/%s/metadata/%s' % (str(server_id), key),
+                              str(doc), self.headers)
+        return resp, xml_to_json(etree.fromstring(body))
+
+    def delete_server_metadata_item(self, server_id, key):
+        resp, body = self.delete("servers/%s/metadata/%s" %
+                                 (str(server_id), key))
+        return resp, body
+
     def get_console_output(self, server_id, length):
         return self.action(server_id, 'os-getConsoleOutput', 'output',
                            length=length)
@@ -395,3 +465,25 @@
                               'os-virtual-interfaces']), self.headers)
         virt_int = self._parse_xml_virtual_interfaces(etree.fromstring(body))
         return resp, virt_int
+
+    def rescue_server(self, server_id, adminPass=None):
+        """Rescue the provided server."""
+        return self.action(server_id, 'rescue', None, adminPass=adminPass)
+
+    def unrescue_server(self, server_id):
+        """Unrescue the provided server."""
+        return self.action(server_id, 'unrescue', None)
+
+    def attach_volume(self, server_id, volume_id, device='/dev/vdz'):
+        post_body = Element("volumeAttachment", volumeId=volume_id,
+                            device=device)
+        resp, body = self.post('servers/%s/os-volume_attachments' % server_id,
+                               str(Document(post_body)), self.headers)
+        return resp, body
+
+    def detach_volume(self, server_id, volume_id):
+        headers = {'Content-Type': 'application/xml',
+                   'Accept': 'application/xml'}
+        resp, body = self.delete('servers/%s/os-volume_attachments/%s' %
+                                 (server_id, volume_id), headers)
+        return resp, body
diff --git a/tempest/services/compute/xml/volumes_extensions_client.py b/tempest/services/compute/xml/volumes_extensions_client.py
index 69b9bac..06cfcfb 100644
--- a/tempest/services/compute/xml/volumes_extensions_client.py
+++ b/tempest/services/compute/xml/volumes_extensions_client.py
@@ -81,10 +81,10 @@
             volumes += [self._parse_volume(vol) for vol in list(body)]
         return resp, volumes
 
-    def get_volume(self, volume_id, wait=None):
+    def get_volume(self, volume_id):
         """Returns the details of a single volume."""
         url = "os-volumes/%s" % str(volume_id)
-        resp, body = self.get(url, self.headers, wait=wait)
+        resp, body = self.get(url, self.headers)
         body = etree.fromstring(body)
         return resp, self._parse_volume(body)
 
diff --git a/tempest/services/image/json/__init__.py b/tempest/services/image/v1/__init__.py
similarity index 100%
copy from tempest/services/image/json/__init__.py
copy to tempest/services/image/v1/__init__.py
diff --git a/tempest/services/image/json/__init__.py b/tempest/services/image/v1/json/__init__.py
similarity index 100%
rename from tempest/services/image/json/__init__.py
rename to tempest/services/image/v1/json/__init__.py
diff --git a/tempest/services/image/json/image_client.py b/tempest/services/image/v1/json/image_client.py
similarity index 81%
rename from tempest/services/image/json/image_client.py
rename to tempest/services/image/v1/json/image_client.py
index 277075e..77c9cd2 100644
--- a/tempest/services/image/json/image_client.py
+++ b/tempest/services/image/v1/json/image_client.py
@@ -19,7 +19,6 @@
 import errno
 import json
 import os
-import time
 import urllib
 
 from tempest.common import glance_http
@@ -122,28 +121,25 @@
         body = json.loads(''.join([c for c in body_iter]))
         return resp, body['image']
 
-    def create_image(self, name, container_format, disk_format, is_public=True,
-                     location=None, properties=None, data=None):
+    def create_image(self, name, container_format, disk_format, **kwargs):
         params = {
             "name": name,
             "container_format": container_format,
             "disk_format": disk_format,
-            "is_public": is_public,
         }
+
         headers = {}
 
-        if location is not None:
-            params['location'] = location
-
-        if properties is not None:
-            params['properties'] = properties
+        for option in ['is_public', 'location', 'properties']:
+            if option in kwargs:
+                params[option] = kwargs.get(option)
 
         headers.update(self._image_meta_to_headers(params))
 
-        if data is not None:
-            return self._create_with_data(headers, data)
+        if 'data' in kwargs:
+            return self._create_with_data(headers, kwargs.get('data'))
 
-        resp, body = self.post('v1/images', data, headers)
+        resp, body = self.post('v1/images', None, headers)
         body = json.loads(body)
         return resp, body['image']
 
@@ -191,15 +187,47 @@
         body = json.loads(body)
         return resp, body['images']
 
-    def get_image(self, image_id, wait=None):
+    def get_image(self, image_id):
         url = 'v1/images/%s' % image_id
-        resp, __ = self.get(url, wait=wait)
+        resp, __ = self.get(url)
         body = self._image_meta_from_headers(resp)
         return resp, body
 
     def is_resource_deleted(self, id):
         try:
-            self.get_image(id, wait=True)
+            self.get_image(id)
         except exceptions.NotFound:
             return True
         return False
+
+    def get_image_membership(self, image_id):
+        url = 'v1/images/%s/members' % image_id
+        resp, body = self.get(url)
+        body = json.loads(body)
+        return resp, body
+
+    def get_shared_images(self, member_id):
+        url = 'v1/shared-images/%s' % member_id
+        resp, body = self.get(url)
+        body = json.loads(body)
+        return resp, body
+
+    def add_member(self, member_id, image_id, can_share=False):
+        url = 'v1/images/%s/members/%s' % (image_id, member_id)
+        body = None
+        if can_share:
+            body = json.dumps({'member': {'can_share': True}})
+        resp, __ = self.put(url, body, self.headers)
+        return resp
+
+    def delete_member(self, member_id, image_id):
+        url = 'v1/images/%s/members/%s' % (image_id, member_id)
+        resp, __ = self.delete(url)
+        return resp
+
+    def replace_membership_list(self, image_id, member_list):
+        url = 'v1/images/%s/members' % image_id
+        body = json.dumps({'membership': member_list})
+        resp, data = self.put(url, body, self.headers)
+        data = json.loads(data)
+        return resp, data
diff --git a/tempest/services/image/json/__init__.py b/tempest/services/image/v2/__init__.py
similarity index 100%
copy from tempest/services/image/json/__init__.py
copy to tempest/services/image/v2/__init__.py
diff --git a/tempest/services/image/json/__init__.py b/tempest/services/image/v2/json/__init__.py
similarity index 100%
copy from tempest/services/image/json/__init__.py
copy to tempest/services/image/v2/json/__init__.py
diff --git a/tempest/services/image/v2/json/image_client.py b/tempest/services/image/v2/json/image_client.py
new file mode 100644
index 0000000..2c50a8d
--- /dev/null
+++ b/tempest/services/image/v2/json/image_client.py
@@ -0,0 +1,126 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+#
+# Copyright 2013 IBM
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import json
+import urllib
+
+import jsonschema
+
+from tempest.common import glance_http
+from tempest.common import rest_client
+from tempest import exceptions
+
+
+class ImageClientV2JSON(rest_client.RestClient):
+
+    def __init__(self, config, username, password, auth_url, tenant_name=None):
+        super(ImageClientV2JSON, self).__init__(config, username, password,
+                                                auth_url, tenant_name)
+        self.service = self.config.images.catalog_type
+        self.http = self._get_http()
+
+    def _get_http(self):
+        token, endpoint = self.keystone_auth(self.user, self.password,
+                                             self.auth_url, self.service,
+                                             self.tenant_name)
+        dscv = self.config.identity.disable_ssl_certificate_validation
+        return glance_http.HTTPClient(endpoint=endpoint, token=token,
+                                      insecure=dscv)
+
+    def get_images_schema(self):
+        url = 'v2/schemas/images'
+        resp, body = self.get(url)
+        body = json.loads(body)
+        return resp, body
+
+    def get_image_schema(self):
+        url = 'v2/schemas/image'
+        resp, body = self.get(url)
+        body = json.loads(body)
+        return resp, body
+
+    def _validate_schema(self, body, type='image'):
+        if type == 'image':
+            resp, schema = self.get_image_schema()
+        elif type == 'images':
+            resp, schema = self.get_images_schema()
+        else:
+            raise ValueError("%s is not a valid schema type" % type)
+
+        jsonschema.validate(body, schema)
+
+    def create_image(self, name, container_format, disk_format, **kwargs):
+        params = {
+            "name": name,
+            "container_format": container_format,
+            "disk_format": disk_format,
+        }
+
+        for option in ['visibility']:
+            if option in kwargs:
+                value = kwargs.get(option)
+                if isinstance(value, dict) or isinstance(value, tuple):
+                    params.update(value)
+                else:
+                    params[option] = value
+
+        data = json.dumps(params)
+        self._validate_schema(data)
+
+        resp, body = self.post('v2/images', data, self.headers)
+        body = json.loads(body)
+        return resp, body
+
+    def delete_image(self, image_id):
+        url = 'v2/images/%s' % image_id
+        self.delete(url)
+
+    def image_list(self, params=None):
+        url = 'v2/images'
+
+        if params:
+            url += '?%s' % urllib.urlencode(params)
+
+        resp, body = self.get(url)
+        body = json.loads(body)
+        self._validate_schema(body, type='images')
+        return resp, body['images']
+
+    def get_image_metadata(self, image_id):
+        url = 'v2/images/%s' % image_id
+        resp, body = self.get(url)
+        body = json.loads(body)
+        return resp, body
+
+    def is_resource_deleted(self, id):
+        try:
+            self.get_image_metadata(id)
+        except exceptions.NotFound:
+            return True
+        return False
+
+    def store_image(self, image_id, data):
+        url = 'v2/images/%s/file' % image_id
+        headers = {'Content-Type': 'application/octet-stream'}
+        resp, body = self.http.raw_request('PUT', url, headers=headers,
+                                           body=data)
+        return resp, body
+
+    def get_image_file(self, image_id):
+        url = 'v2/images/%s/file' % image_id
+        resp, body = self.get(url)
+        return resp, body
diff --git a/tempest/services/object_storage/account_client.py b/tempest/services/object_storage/account_client.py
index fec273c..a71a287 100644
--- a/tempest/services/object_storage/account_client.py
+++ b/tempest/services/object_storage/account_client.py
@@ -103,7 +103,7 @@
         self.service = self.config.object_storage.catalog_type
         self.format = 'json'
 
-    def request(self, method, url, headers=None, body=None, wait=None):
+    def request(self, method, url, headers=None, body=None):
         """A simple HTTP request interface."""
         self.http_obj = httplib2.Http()
         if headers is None:
diff --git a/tempest/services/object_storage/container_client.py b/tempest/services/object_storage/container_client.py
index 7b5efff..93477fa 100644
--- a/tempest/services/object_storage/container_client.py
+++ b/tempest/services/object_storage/container_client.py
@@ -97,7 +97,6 @@
         #TODO(dwalleck):  Rewite using json format to avoid newlines at end of
         #obj names. Set limit to API limit - 1 (max returned items = 9999)
         limit = 9999
-        marker = None
         if params is not None:
             if 'limit' in params:
                 limit = params['limit']
diff --git a/tempest/services/object_storage/object_client.py b/tempest/services/object_storage/object_client.py
index ac1859a..9626b6b 100644
--- a/tempest/services/object_storage/object_client.py
+++ b/tempest/services/object_storage/object_client.py
@@ -156,7 +156,7 @@
         self.service = self.config.object_storage.catalog_type
         self.format = 'json'
 
-    def request(self, method, url, headers=None, body=None, wait=None):
+    def request(self, method, url, headers=None, body=None):
         """A simple HTTP request interface."""
         dscv = self.config.identity.disable_ssl_certificate_validation
         self.http_obj = httplib2.Http(disable_ssl_certificate_validation=dscv)
diff --git a/tempest/services/volume/json/volumes_client.py b/tempest/services/volume/json/volumes_client.py
index ff1556f..6b0befd 100644
--- a/tempest/services/volume/json/volumes_client.py
+++ b/tempest/services/volume/json/volumes_client.py
@@ -56,10 +56,10 @@
         body = json.loads(body)
         return resp, body['volumes']
 
-    def get_volume(self, volume_id, wait=None):
+    def get_volume(self, volume_id):
         """Returns the details of a single volume."""
         url = "volumes/%s" % str(volume_id)
-        resp, body = self.get(url, wait=wait)
+        resp, body = self.get(url)
         body = json.loads(body)
         return resp, body['volume']
 
diff --git a/tempest/services/volume/xml/admin/volume_types_client.py b/tempest/services/volume/xml/admin/volume_types_client.py
index 74d4631..49cbadb 100644
--- a/tempest/services/volume/xml/admin/volume_types_client.py
+++ b/tempest/services/volume/xml/admin/volume_types_client.py
@@ -149,11 +149,16 @@
         url = "types/%s/extra_specs" % str(vol_type_id)
         extra_specs = Element("extra_specs", xmlns=XMLNS_11)
         if extra_spec:
-            for key, value in extra_spec.items():
-                spec = Element('extra_spec')
-                spec.add_attr('key', key)
-                spec.append(Text(value))
-                extra_specs.append(spec)
+            if isinstance(extra_spec, list):
+                extra_specs.append(extra_spec)
+            else:
+                for key, value in extra_spec.items():
+                    spec = Element('extra_spec')
+                    spec.add_attr('key', key)
+                    spec.append(Text(value))
+                    extra_specs.append(spec)
+        else:
+            extra_specs = None
 
         resp, body = self.post(url, str(Document(extra_specs)),
                                self.headers)
@@ -177,11 +182,14 @@
         url = "types/%s/extra_specs/%s" % (str(vol_type_id),
                                            str(extra_spec_name))
         extra_specs = Element("extra_specs", xmlns=XMLNS_11)
-        for key, value in extra_spec.items():
-            spec = Element('extra_spec')
-            spec.add_attr('key', key)
-            spec.append(Text(value))
-            extra_specs.append(spec)
+
+        if extra_spec is not None:
+            for key, value in extra_spec.items():
+                spec = Element('extra_spec')
+                spec.add_attr('key', key)
+                spec.append(Text(value))
+                extra_specs.append(spec)
+
         resp, body = self.put(url, str(Document(extra_specs)),
                               self.headers)
         body = xml_to_json(etree.fromstring(body))
diff --git a/tempest/services/volume/xml/volumes_client.py b/tempest/services/volume/xml/volumes_client.py
index 5041869..4c15256 100644
--- a/tempest/services/volume/xml/volumes_client.py
+++ b/tempest/services/volume/xml/volumes_client.py
@@ -84,10 +84,10 @@
             volumes += [self._parse_volume(vol) for vol in list(body)]
         return resp, volumes
 
-    def get_volume(self, volume_id, wait=None):
+    def get_volume(self, volume_id):
         """Returns the details of a single volume."""
         url = "volumes/%s" % str(volume_id)
-        resp, body = self.get(url, self.headers, wait=wait)
+        resp, body = self.get(url, self.headers)
         body = etree.fromstring(body)
         return resp, self._parse_volume(body)
 
diff --git a/tempest/tests/boto/test_ec2_instance_run.py b/tempest/tests/boto/test_ec2_instance_run.py
index 09bfc10..3293dea 100644
--- a/tempest/tests/boto/test_ec2_instance_run.py
+++ b/tempest/tests/boto/test_ec2_instance_run.py
@@ -142,7 +142,6 @@
     #NOTE(afazekas): doctored test case,
     # with normal validation it would fail
     @attr("slow", type='smoke')
-    @testtools.skip("Skipped until the Bug #1117555 is resolved")
     def test_integration_1(self):
         # EC2 1. integration test (not strict)
         image_ami = self.ec2_client.get_image(self.images["ami"]["image_id"])
@@ -152,16 +151,18 @@
                                                                group_desc)
         self.addResourceCleanUp(self.destroy_security_group_wait,
                                 security_group)
-        self.ec2_client.authorize_security_group(sec_group_name,
-                                                 ip_protocol="icmp",
-                                                 cidr_ip="0.0.0.0/0",
-                                                 from_port=-1,
-                                                 to_port=-1)
-        self.ec2_client.authorize_security_group(sec_group_name,
-                                                 ip_protocol="tcp",
-                                                 cidr_ip="0.0.0.0/0",
-                                                 from_port=22,
-                                                 to_port=22)
+        self.assertTrue(self.ec2_client.authorize_security_group(
+                sec_group_name,
+                ip_protocol="icmp",
+                cidr_ip="0.0.0.0/0",
+                from_port=-1,
+                to_port=-1))
+        self.assertTrue(self.ec2_client.authorize_security_group(
+                sec_group_name,
+                ip_protocol="tcp",
+                cidr_ip="0.0.0.0/0",
+                from_port=22,
+                to_port=22))
         reservation = image_ami.run(kernel_id=self.images["aki"]["image_id"],
                                     ramdisk_id=self.images["ari"]["image_id"],
                                     instance_type=self.instance_type,
@@ -178,7 +179,7 @@
 
         address = self.ec2_client.allocate_address()
         rcuk_a = self.addResourceCleanUp(address.delete)
-        address.associate(instance.id)
+        self.assertTrue(address.associate(instance.id))
 
         rcuk_da = self.addResourceCleanUp(address.disassociate)
         #TODO(afazekas): ping test. dependecy/permission ?
diff --git a/tempest/tests/boto/test_s3_objects.py b/tempest/tests/boto/test_s3_objects.py
index d50dc45..c735215 100644
--- a/tempest/tests/boto/test_s3_objects.py
+++ b/tempest/tests/boto/test_s3_objects.py
@@ -35,7 +35,6 @@
         cls.os = clients.Manager()
         cls.client = cls.os.s3_client
 
-    @testtools.skip("Skipped until the Bug #1076534 is resolved")
     @attr(type='smoke')
     def test_create_get_delete_object(self):
         # S3 Create, get and delete object
diff --git a/tempest/tests/compute/admin/test_flavors_extra_specs.py b/tempest/tests/compute/admin/test_flavors_extra_specs.py
index 711b73f..01bff98 100644
--- a/tempest/tests/compute/admin/test_flavors_extra_specs.py
+++ b/tempest/tests/compute/admin/test_flavors_extra_specs.py
@@ -16,9 +16,9 @@
 #    under the License.
 
 from tempest import exceptions
+from tempest.test import attr
 from tempest.tests import compute
 from tempest.tests.compute import base
-import testtools
 
 
 class FlavorsExtraSpecsTestJSON(base.BaseComputeAdminTest):
@@ -80,51 +80,35 @@
             self.client.unset_flavor_extra_spec(self.flavor['id'], "key1")
         self.assertEqual(unset_resp.status, 200)
 
-    @testtools.skip('Until bug 1094142 is resolved.')
-    def test_flavor_non_admin_set_get_unset_keys(self):
-        #Test to SET, GET UNSET flavor extra spec as a user
-        #with out admin privileges.
-        self.nonadmin_client = self.flavors_client
-        #Assigning extra specs values that are to be set
+    @attr('negative')
+    def test_flavor_non_admin_set_keys(self):
+        #Test to SET flavor extra spec as a user without admin privileges.
         specs = {"key1": "value1", "key2": "value2"}
-        msg = None
+        self.assertRaises(exceptions.Unauthorized,
+                          self.flavors_client.set_flavor_extra_spec,
+                          self.flavor['id'],
+                          specs)
 
-        #Verify if able to SET flavor extraspec with non-admin user
-        try:
-            set_resp, set_body = \
-                self.nonadmin_client.set_flavor_extra_spec(
-                    self.flavor['id'], specs)
-        except exceptions.Unauthorized:
-            pass
-        else:
-            msg = "Flavor extra specs is being SET"
-            msg += " by unauthorized non-admin user.\n"
-        #SET flavor extra specs with admin user
-        #so as to check GET/UNSET flavor extra specs with non-admin
-        set_resp, set_body =\
-            self.client.set_flavor_extra_spec(self.flavor['id'], specs)
-        #Verify if able to GET flavor extraspec with non-admin user
-        try:
-            get_resp, get_body = \
-                self.nonadmin_client.get_flavor_extra_spec('')
-            self.assertEqual(get_resp.status, 200)
-        except Exception as e:
-            msg += "Got an exception when GET Flavor extra specs"
-            msg += " by non-admin user. Exception is: %s\n" % e
-        #Verify if able to UNSET flavor extraspec with non-admin user
-        try:
-            unset_resp, _ = \
-                self.nonadmin_client.unset_flavor_extra_spec(self.flavor['id'],
-                                                             "key1")
-        except exceptions.Unauthorized:
-            pass
-        else:
-            msg += "Flavor extra specs is being UNSET"
-            msg += " by unauthorized non-admin user.\n"
-        #Verification to check if actions failed.
-        #msg variable  would contain the message according to the failures.
-        if msg is not None:
-            self.fail(msg)
+    def test_flavor_non_admin_get_keys(self):
+        specs = {"key1": "value1", "key2": "value2"}
+        set_resp, set_body = self.client.set_flavor_extra_spec(
+                                                self.flavor['id'], specs)
+        resp, body = self.flavors_client.get_flavor_extra_spec(
+                                                            self.flavor['id'])
+        self.assertEqual(resp.status, 200)
+        for key in specs:
+            self.assertEquals(body[key], specs[key])
+
+    @attr('negative')
+    def test_flavor_non_admin_unset_keys(self):
+        specs = {"key1": "value1", "key2": "value2"}
+        set_resp, set_body = self.client.set_flavor_extra_spec(
+                                                self.flavor['id'], specs)
+
+        self.assertRaises(exceptions.Unauthorized,
+                          self.flavors_client.unset_flavor_extra_spec,
+                          self.flavor['id'],
+                          'key1')
 
 
 class FlavorsExtraSpecsTestXML(FlavorsExtraSpecsTestJSON):
diff --git a/tempest/tests/compute/admin/test_quotas.py b/tempest/tests/compute/admin/test_quotas.py
index 7430a7c..befcad3 100644
--- a/tempest/tests/compute/admin/test_quotas.py
+++ b/tempest/tests/compute/admin/test_quotas.py
@@ -44,7 +44,7 @@
         cls.default_quota_set = {'injected_file_content_bytes': 10240,
                                  'metadata_items': 128, 'injected_files': 5,
                                  'ram': 51200, 'floating_ips': 10,
-                                 'key_pairs': 100,
+                                 'fixed_ips': 10, 'key_pairs': 100,
                                  'injected_file_path_bytes': 255,
                                  'instances': 10, 'security_group_rules': 20,
                                  'cores': 20, 'security_groups': 10}
@@ -60,6 +60,9 @@
 
     @attr(type='smoke')
     def test_get_default_quotas(self):
+        # Tempest two step
+        self.skipTest('Skipped until the Bug 1125468 is resolved')
+
         # Admin can get the default resource quota set for a tenant
         expected_quota_set = self.default_quota_set.copy()
         expected_quota_set['id'] = self.demo_tenant_id
@@ -71,13 +74,16 @@
             self.fail("Admin could not get the default quota set for a tenant")
 
     def test_update_all_quota_resources_for_tenant(self):
+        # Tempest two step
+        self.skipTest('Skipped until the Bug 1125468 is resolved')
+
         # Admin can update all the resource quota limits for a tenant
         new_quota_set = {'injected_file_content_bytes': 20480,
                          'metadata_items': 256, 'injected_files': 10,
-                         'ram': 10240, 'floating_ips': 20, 'key_pairs': 200,
-                         'injected_file_path_bytes': 512, 'instances': 20,
-                         'security_group_rules': 20, 'cores': 2,
-                         'security_groups': 20}
+                         'ram': 10240, 'floating_ips': 20, 'fixed_ips': 10,
+                         'key_pairs': 200, 'injected_file_path_bytes': 512,
+                         'instances': 20, 'security_group_rules': 20,
+                         'cores': 2, 'security_groups': 20}
         try:
             # Update limits for all quota resources
             resp, quota_set = self.adm_client.update_quota_set(
@@ -97,6 +103,9 @@
 
     #TODO(afazekas): merge these test cases
     def test_get_updated_quotas(self):
+        # Tempest two step
+        self.skipTest('Skipped until the Bug 1125468 is resolved')
+
         # Verify that GET shows the updated quota set
         self.adm_client.update_quota_set(self.demo_tenant_id,
                                          ram='5120')
@@ -122,15 +131,10 @@
 
         resp, quota_set = self.adm_client.update_quota_set(self.demo_tenant_id,
                                                            cores=vcpu_quota)
-        try:
-            self.create_server()
-        except exceptions.OverLimit:
-            pass
-        else:
-            self.fail("Could create servers over the VCPU quota limit")
-        finally:
-            self.adm_client.update_quota_set(self.demo_tenant_id,
-                                             cores=default_vcpu_quota)
+
+        self.addCleanup(self.adm_client.update_quota_set, self.demo_tenant_id,
+                        cores=default_vcpu_quota)
+        self.assertRaises(exceptions.OverLimit, self.create_server)
 
     def test_create_server_when_memory_quota_is_full(self):
         # Disallow server creation when tenant's memory quota is full
@@ -140,15 +144,10 @@
 
         self.adm_client.update_quota_set(self.demo_tenant_id,
                                          ram=mem_quota)
-        try:
-            self.create_server()
-        except exceptions.OverLimit:
-            pass
-        else:
-            self.fail("Could create servers over the memory quota limit")
-        finally:
-            self.adm_client.update_quota_set(self.demo_tenant_id,
-                                             ram=default_mem_quota)
+
+        self.addCleanup(self.adm_client.update_quota_set, self.demo_tenant_id,
+                        ram=default_mem_quota)
+        self.assertRaises(exceptions.OverLimit, self.create_server)
 
 #TODO(afazekas): Add test that tried to update the quota_set as a regular user
 
diff --git a/tempest/tests/compute/base.py b/tempest/tests/compute/base.py
index 94fff13..3b2026e 100644
--- a/tempest/tests/compute/base.py
+++ b/tempest/tests/compute/base.py
@@ -59,6 +59,7 @@
         cls.limits_client = os.limits_client
         cls.volumes_extensions_client = os.volumes_extensions_client
         cls.volumes_client = os.volumes_client
+        cls.interfaces_client = os.interfaces_client
         cls.build_interval = cls.config.compute.build_interval
         cls.build_timeout = cls.config.compute.build_timeout
         cls.ssh_user = cls.config.compute.ssh_user
diff --git a/tempest/tests/compute/floating_ips/test_floating_ips_actions.py b/tempest/tests/compute/floating_ips/test_floating_ips_actions.py
index 0ff81e1..888481a 100644
--- a/tempest/tests/compute/floating_ips/test_floating_ips_actions.py
+++ b/tempest/tests/compute/floating_ips/test_floating_ips_actions.py
@@ -33,10 +33,7 @@
         cls.servers_client = cls.servers_client
 
         #Server creation
-        resp, server = cls.servers_client.create_server('floating-server',
-                                                        cls.image_ref,
-                                                        cls.flavor_ref)
-        cls.servers_client.wait_for_server_status(server['id'], 'ACTIVE')
+        resp, server = cls.create_server(wait_until='ACTIVE')
         cls.server_id = server['id']
         resp, body = cls.servers_client.get_server(server['id'])
         #Floating IP creation
@@ -55,10 +52,8 @@
 
     @classmethod
     def tearDownClass(cls):
-        super(FloatingIPsTestJSON, cls).tearDownClass()
-        #Deleting the server which is created in this method
-        resp, body = cls.servers_client.delete_server(cls.server_id)
         #Deleting the floating IP which is created in this method
+        super(FloatingIPsTestJSON, cls).tearDownClass()
         resp, body = cls.client.delete_floating_ip(cls.floating_ip_id)
 
     @attr(type='positive')
diff --git a/tempest/tests/compute/images/test_image_metadata.py b/tempest/tests/compute/images/test_image_metadata.py
index 311ee8e..918075c 100644
--- a/tempest/tests/compute/images/test_image_metadata.py
+++ b/tempest/tests/compute/images/test_image_metadata.py
@@ -30,14 +30,9 @@
         cls.servers_client = cls.servers_client
         cls.client = cls.images_client
 
-        name = rand_name('server')
-        resp, server = cls.servers_client.create_server(name, cls.image_ref,
-                                                        cls.flavor_ref)
+        resp, server = cls.create_server(wait_until='ACTIVE')
         cls.server_id = server['id']
 
-        #Wait for the server to become active
-        cls.servers_client.wait_for_server_status(cls.server_id, 'ACTIVE')
-
         # Snapshot the server once to save time
         name = rand_name('image')
         resp, _ = cls.client.create_image(cls.server_id, name, {})
@@ -49,7 +44,6 @@
     @classmethod
     def tearDownClass(cls):
         cls.client.delete_image(cls.image_id)
-        cls.servers_client.delete_server(cls.server_id)
         super(ImagesMetadataTest, cls).tearDownClass()
 
     def setUp(self):
diff --git a/tempest/tests/compute/images/test_images.py b/tempest/tests/compute/images/test_images.py
index a61cef6..fb0364a 100644
--- a/tempest/tests/compute/images/test_images.py
+++ b/tempest/tests/compute/images/test_images.py
@@ -56,6 +56,14 @@
             self.image_ids.remove(image_id)
         super(ImagesTestJSON, self).tearDown()
 
+    def __create_image__(self, server_id, name, meta=None):
+        resp, body = self.client.create_image(server_id, name, meta)
+        image_id = parse_image_id(resp['location'])
+        self.client.wait_for_image_resp_code(image_id, 200)
+        self.client.wait_for_image_status(image_id, 'ACTIVE')
+        self.image_ids.append(image_id)
+        return resp, body
+
     @attr(type='negative')
     def test_create_image_from_deleted_server(self):
         # An image should not be created if the server instance is removed
@@ -63,43 +71,24 @@
 
         # Delete server before trying to create server
         self.servers_client.delete_server(server['id'])
-
-        try:
-            # Create a new image after server is deleted
-            name = rand_name('image')
-            meta = {'image_type': 'test'}
-            resp, body = self.client.create_image(server['id'], name, meta)
-
-        except Exception:
-            pass
-
-        else:
-            image_id = parse_image_id(resp['location'])
-            self.client.wait_for_image_resp_code(image_id, 200)
-            self.client.wait_for_image_status(image_id, 'ACTIVE')
-            self.client.delete_image(image_id)
-            self.fail("Should not create snapshot from deleted instance!")
+        self.servers_client.wait_for_server_termination(server['id'])
+        # Create a new image after server is deleted
+        name = rand_name('image')
+        meta = {'image_type': 'test'}
+        self.assertRaises(exceptions.NotFound,
+                          self.__create_image__,
+                          server['id'], name, meta)
 
     @attr(type='negative')
     def test_create_image_from_invalid_server(self):
         # An image should not be created with invalid server id
-        try:
-            # Create a new image with invalid server id
-            name = rand_name('image')
-            meta = {'image_type': 'test'}
-            resp = {}
-            resp['status'] = None
-            resp, body = self.client.create_image('!@#$%^&*()', name, meta)
-
-        except exceptions.NotFound:
-            pass
-
-        finally:
-            if (resp['status'] is not None):
-                image_id = parse_image_id(resp['location'])
-                resp, _ = self.client.delete_image(image_id)
-                self.fail("An image should not be created "
-                          "with invalid server id")
+        # Create a new image with invalid server id
+        name = rand_name('image')
+        meta = {'image_type': 'test'}
+        resp = {}
+        resp['status'] = None
+        self.assertRaises(exceptions.NotFound, self.__create_image__,
+                          '!@#$%^&*()', name, meta)
 
     @attr(type='negative')
     def test_create_image_when_server_is_terminating(self):
diff --git a/tempest/tests/compute/images/test_images_oneserver.py b/tempest/tests/compute/images/test_images_oneserver.py
index d89b6dd..f7008f0 100644
--- a/tempest/tests/compute/images/test_images_oneserver.py
+++ b/tempest/tests/compute/images/test_images_oneserver.py
@@ -154,7 +154,6 @@
         self.client.wait_for_image_status(image_id, 'ACTIVE')
 
     @attr(type='negative')
-    @testtools.skip("Until Bug 1004564 is fixed")
     def test_create_image_specify_name_over_256_chars(self):
         # Return an error if snapshot name over 256 characters is passed
 
diff --git a/tempest/tests/compute/images/test_images_whitebox.py b/tempest/tests/compute/images/test_images_whitebox.py
index 8af812c..105a38a 100644
--- a/tempest/tests/compute/images/test_images_whitebox.py
+++ b/tempest/tests/compute/images/test_images_whitebox.py
@@ -36,15 +36,8 @@
 
     @classmethod
     def tearDownClass(cls):
-        """Terminate test instances created after a test is executed."""
-
-        for server in cls.servers:
-            cls.update_state(server['id'], "active", None)
-            resp, body = cls.servers_client.delete_server(server['id'])
-            if resp['status'] == '204':
-                cls.servers.remove(server)
-                cls.servers_client.wait_for_server_termination(server['id'])
-
+        """Delete images after a test is executed."""
+        super(ImagesWhiteboxTest, cls).tearDownClass()
         for image_id in cls.image_ids:
             cls.client.delete_image(image_id)
             cls.image_ids.remove(image_id)
diff --git a/tempest/tests/compute/keypairs/test_keypairs.py b/tempest/tests/compute/keypairs/test_keypairs.py
index b48b439..87c71aa 100644
--- a/tempest/tests/compute/keypairs/test_keypairs.py
+++ b/tempest/tests/compute/keypairs/test_keypairs.py
@@ -15,8 +15,6 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
-import testtools
-
 from tempest.common.utils.data_utils import rand_name
 from tempest import exceptions
 from tempest.test import attr
@@ -82,7 +80,6 @@
         self.assertEqual(202, resp.status)
 
     @attr(type='positive')
-    @testtools.skip("Skipped until the Bug #980688 is resolved")
     def test_get_keypair_detail(self):
         # Keypair should be created, Got details by name and deleted
         k_name = rand_name('keypair-')
@@ -138,7 +135,6 @@
                           self.client.create_keypair, k_name, pub_key)
 
     @attr(type='negative')
-    @testtools.skip("Skipped until the Bug #1086980 is resolved")
     def test_keypair_delete_nonexistant_key(self):
         # Non-existant key deletion should throw a proper error
         k_name = rand_name("keypair-non-existant-")
diff --git a/tempest/tests/compute/limits/test_absolute_limits.py b/tempest/tests/compute/limits/test_absolute_limits.py
index 129339c..2b31680 100644
--- a/tempest/tests/compute/limits/test_absolute_limits.py
+++ b/tempest/tests/compute/limits/test_absolute_limits.py
@@ -15,8 +15,6 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
-import testtools
-
 from tempest.tests.compute import base
 
 
diff --git a/tempest/tests/compute/security_groups/test_security_group_rules.py b/tempest/tests/compute/security_groups/test_security_group_rules.py
index 32ac52b..99d9a5d 100644
--- a/tempest/tests/compute/security_groups/test_security_group_rules.py
+++ b/tempest/tests/compute/security_groups/test_security_group_rules.py
@@ -37,18 +37,19 @@
             #Creating a Security Group to add rules to it
             s_name = rand_name('securitygroup-')
             s_description = rand_name('description-')
-            resp, securitygroup =\
-            self.client.create_security_group(s_name, s_description)
+            resp, securitygroup = \
+                self.client.create_security_group(s_name, s_description)
             securitygroup_id = securitygroup['id']
             #Adding rules to the created Security Group
             parent_group_id = securitygroup['id']
             ip_protocol = 'tcp'
             from_port = 22
             to_port = 22
-            resp, rule =\
-            self.client.create_security_group_rule(parent_group_id,
-                                                   ip_protocol, from_port,
-                                                   to_port)
+            resp, rule = \
+                self.client.create_security_group_rule(parent_group_id,
+                                                       ip_protocol,
+                                                       from_port,
+                                                       to_port)
             self.assertEqual(200, resp.status)
         finally:
             #Deleting the Security Group rule, created in this method
@@ -70,14 +71,14 @@
             #Creating a Security Group to add rules to it
             s_name = rand_name('securitygroup-')
             s_description = rand_name('description-')
-            resp, securitygroup =\
-            self.client.create_security_group(s_name, s_description)
+            resp, securitygroup = \
+                self.client.create_security_group(s_name, s_description)
             secgroup1 = securitygroup['id']
             #Creating a Security Group so as to assign group_id to the rule
             s_name2 = rand_name('securitygroup-')
             s_description2 = rand_name('description-')
-            resp, securitygroup =\
-            self.client.create_security_group(s_name2, s_description2)
+            resp, securitygroup = \
+                self.client.create_security_group(s_name2, s_description2)
             secgroup2 = securitygroup['id']
             #Adding rules to the created Security Group with optional arguments
             parent_group_id = secgroup1
@@ -86,12 +87,13 @@
             to_port = 22
             cidr = '10.2.3.124/24'
             group_id = secgroup2
-            resp, rule =\
-            self.client.create_security_group_rule(parent_group_id,
-                                                   ip_protocol,
-                                                   from_port, to_port,
-                                                   cidr=cidr,
-                                                   group_id=group_id)
+            resp, rule = \
+                self.client.create_security_group_rule(parent_group_id,
+                                                       ip_protocol,
+                                                       from_port,
+                                                       to_port,
+                                                       cidr=cidr,
+                                                       group_id=group_id)
             rule_id = rule['id']
             self.assertEqual(200, resp.status)
         finally:
@@ -112,18 +114,19 @@
             #Creating a Security Group to add rule to it
             s_name = rand_name('securitygroup-')
             s_description = rand_name('description-')
-            resp, securitygroup =\
-            self.client.create_security_group(s_name, s_description)
+            resp, securitygroup = \
+                self.client.create_security_group(s_name, s_description)
             securitygroup_id = securitygroup['id']
             #Adding rules to the created Security Group
             parent_group_id = securitygroup['id']
             ip_protocol = 'tcp'
             from_port = 22
             to_port = 22
-            resp, rule =\
-            self.client.create_security_group_rule(parent_group_id,
-                                                   ip_protocol,
-                                                   from_port, to_port)
+            resp, rule = \
+                self.client.create_security_group_rule(parent_group_id,
+                                                       ip_protocol,
+                                                       from_port,
+                                                       to_port)
         finally:
             #Deleting the Security Group rule, created in this method
             group_rule_id = rule['id']
@@ -203,6 +206,25 @@
                           parent_group_id, ip_protocol, from_port, to_port)
 
     @attr(type='negative')
+    def test_security_group_rules_create_with_invalid_port_range(self):
+        # Negative test: Creation of Security Group rule should FAIL
+        # with invalid port range.
+        # Creating a Security Group to add rule to it.
+        s_name = rand_name('securitygroup-')
+        s_description = rand_name('description-')
+        resp, securitygroup = self.client.create_security_group(s_name,
+                                                                s_description)
+        # Adding a rule to the created Security Group
+        secgroup_id = securitygroup['id']
+        ip_protocol = 'tcp'
+        from_port = 22
+        to_port = 21
+        self.addCleanup(self.client.delete_security_group, securitygroup['id'])
+        self.assertRaises(exceptions.BadRequest,
+                          self.client.create_security_group_rule,
+                          secgroup_id, ip_protocol, from_port, to_port)
+
+    @attr(type='negative')
     def test_security_group_rules_delete_with_invalid_id(self):
         # Negative test: Deletion of Security Group rule should be FAIL
         # with invalid rule id
@@ -210,6 +232,49 @@
                           self.client.delete_security_group_rule,
                           rand_name('999'))
 
+    @attr(type='positive')
+    def test_security_group_rules_list(self):
+        # Positive test: Created Security Group rules should be
+        # in the list of all rules
+        # Creating a Security Group to add rules to it
+        s_name = rand_name('securitygroup-')
+        s_description = rand_name('description-')
+        resp, securitygroup = \
+            self.client.create_security_group(s_name, s_description)
+        securitygroup_id = securitygroup['id']
+        # Delete the Security Group at the end of this method
+        self.addCleanup(self.client.delete_security_group, securitygroup_id)
+
+        # Add a first rule to the created Security Group
+        ip_protocol1 = 'tcp'
+        from_port1 = 22
+        to_port1 = 22
+        resp, rule = \
+            self.client.create_security_group_rule(securitygroup_id,
+                                                   ip_protocol1,
+                                                   from_port1, to_port1)
+        rule1_id = rule['id']
+        # Delete the Security Group rule1 at the end of this method
+        self.addCleanup(self.client.delete_security_group_rule, rule1_id)
+
+        # Add a second rule to the created Security Group
+        ip_protocol2 = 'icmp'
+        from_port2 = -1
+        to_port2 = -1
+        resp, rule = \
+            self.client.create_security_group_rule(securitygroup_id,
+                                                   ip_protocol2,
+                                                   from_port2, to_port2)
+        rule2_id = rule['id']
+        # Delete the Security Group rule2 at the end of this method
+        self.addCleanup(self.client.delete_security_group_rule, rule2_id)
+
+        # Get rules of the created Security Group
+        resp, rules = \
+            self.client.list_security_group_rules(securitygroup_id)
+        self.assertTrue(any([i for i in rules if i['id'] == rule1_id]))
+        self.assertTrue(any([i for i in rules if i['id'] == rule2_id]))
+
 
 class SecurityGroupRulesTestXML(SecurityGroupRulesTestJSON):
     _interface = 'xml'
diff --git a/tempest/tests/compute/security_groups/test_security_groups.py b/tempest/tests/compute/security_groups/test_security_groups.py
index e5b0380..70a01a0 100644
--- a/tempest/tests/compute/security_groups/test_security_groups.py
+++ b/tempest/tests/compute/security_groups/test_security_groups.py
@@ -38,8 +38,8 @@
             for i in range(3):
                 s_name = rand_name('securitygroup-')
                 s_description = rand_name('description-')
-                resp, securitygroup =\
-                self.client.create_security_group(s_name, s_description)
+                resp, securitygroup = \
+                    self.client.create_security_group(s_name, s_description)
                 self.assertEqual(200, resp.status)
                 security_group_list.append(securitygroup)
             #Fetch all Security Groups and verify the list
@@ -47,8 +47,8 @@
             resp, fetched_list = self.client.list_security_groups()
             self.assertEqual(200, resp.status)
             #Now check if all the created Security Groups are in fetched list
-            missing_sgs =\
-            [sg for sg in security_group_list if sg not in fetched_list]
+            missing_sgs = \
+                [sg for sg in security_group_list if sg not in fetched_list]
             self.assertFalse(missing_sgs,
                              "Failed to find Security Group %s in fetched "
                              "list" % ', '.join(m_group['name']
@@ -56,8 +56,8 @@
         finally:
             #Delete all the Security Groups created in this method
             for securitygroup in security_group_list:
-                resp, _ =\
-                self.client.delete_security_group(securitygroup['id'])
+                resp, _ = \
+                    self.client.delete_security_group(securitygroup['id'])
                 self.assertEqual(202, resp.status)
 
     @attr(type='positive')
@@ -67,7 +67,7 @@
             s_name = rand_name('securitygroup-')
             s_description = rand_name('description-')
             resp, securitygroup = \
-            self.client.create_security_group(s_name, s_description)
+                self.client.create_security_group(s_name, s_description)
             self.assertEqual(200, resp.status)
             self.assertTrue('id' in securitygroup)
             securitygroup_id = securitygroup['id']
@@ -88,12 +88,12 @@
         try:
             s_name = rand_name('securitygroup-')
             s_description = rand_name('description-')
-            resp, securitygroup =\
-            self.client.create_security_group(s_name, s_description)
+            resp, securitygroup = \
+                self.client.create_security_group(s_name, s_description)
             self.assertEqual(200, resp.status)
             #Now fetch the created Security Group by its 'id'
-            resp, fetched_group =\
-            self.client.get_security_group(securitygroup['id'])
+            resp, fetched_group = \
+                self.client.get_security_group(securitygroup['id'])
             self.assertEqual(200, resp.status)
             self.assertEqual(securitygroup, fetched_group,
                              "The fetched Security Group is different "
@@ -172,6 +172,20 @@
                           s_description)
 
     @attr(type='negative')
+    def test_delete_the_default_security_group(self):
+        # Negative test:Deletion of the "default" Security Group should Fail
+        default_security_group_id = None
+        resp, body = self.client.list_security_groups()
+        for i in range(len(body)):
+            if body[i]['name'] == 'default':
+                default_security_group_id = body[i]['id']
+                break
+        #Deleting the "default" Security Group
+        self.assertRaises(exceptions.BadRequest,
+                          self.client.delete_security_group,
+                          default_security_group_id)
+
+    @attr(type='negative')
     def test_delete_nonexistant_security_group(self):
         # Negative test:Deletion of a nonexistant Security Group should Fail
         security_group_id = []
diff --git a/tempest/tests/compute/servers/test_attach_interfaces.py b/tempest/tests/compute/servers/test_attach_interfaces.py
new file mode 100644
index 0000000..47c0575
--- /dev/null
+++ b/tempest/tests/compute/servers/test_attach_interfaces.py
@@ -0,0 +1,112 @@
+# Copyright 2013 IBM Corp.
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+from tempest import clients
+from tempest.tests.compute import base
+
+import time
+
+
+class AttachInterfacesTestJSON(base.BaseComputeTest):
+    _interface = 'json'
+
+    @classmethod
+    def setUpClass(cls):
+        super(AttachInterfacesTestJSON, cls).setUpClass()
+        os = clients.Manager()
+        if not os.config.network.quantum_available:
+            raise cls.skipException("Quantum is required")
+        cls.client = os.interfaces_client
+
+    def _check_interface(self, iface, port_id=None, network_id=None,
+                         fixed_ip=None):
+        self.assertIn('port_state', iface)
+        if port_id:
+            self.assertEqual(iface['port_id'], port_id)
+        if network_id:
+            self.assertEqual(iface['net_id'], network_id)
+        if fixed_ip:
+            self.assertEqual(iface['fixed_ips'][0]['ip_address'], fixed_ip)
+
+    def _create_server_get_interfaces(self):
+        server = self.create_server()
+        self.os.servers_client.wait_for_server_status(server['id'], 'ACTIVE')
+        resp, ifs = self.client.list_interfaces(server['id'])
+        return server, ifs
+
+    def _test_create_interface(self, server):
+        resp, iface = self.client.create_interface(server['id'])
+        self._check_interface(iface)
+        return iface
+
+    def _test_create_interface_by_network_id(self, server, ifs):
+        network_id = ifs[0]['net_id']
+        resp, iface = self.client.create_interface(server['id'],
+                                                   network_id=network_id)
+        self._check_interface(iface, network_id=network_id)
+        return iface
+
+    def _test_show_interface(self, server, ifs):
+        iface = ifs[0]
+        resp, _iface = self.client.show_interface(server['id'],
+                                                  iface['port_id'])
+        self.assertEqual(iface, _iface)
+
+    def _test_delete_interface(self, server, ifs):
+        # NOTE(danms): delete not the first or last, but one in the middle
+        iface = ifs[1]
+        self.client.delete_interface(server['id'], iface['port_id'])
+        for i in range(0, 5):
+            _r, _ifs = self.client.list_interfaces(server['id'])
+            if len(ifs) != len(_ifs):
+                break
+            time.sleep(1)
+
+        self.assertEqual(len(_ifs), len(ifs) - 1)
+        for _iface in _ifs:
+            self.assertNotEqual(iface['port_id'], _iface['port_id'])
+        return _ifs
+
+    def _compare_iface_list(self, list1, list2):
+        # NOTE(danms): port_state will likely have changed, so just
+        # confirm the port_ids are the same at least
+        list1 = [x['port_id'] for x in list1]
+        list2 = [x['port_id'] for x in list2]
+
+        self.assertEqual(sorted(list1), sorted(list2))
+
+    def test_create_list_show_delete_interfaces(self):
+        server, ifs = self._create_server_get_interfaces()
+        interface_count = len(ifs)
+        self.assertTrue(interface_count > 0)
+        self._check_interface(ifs[0])
+
+        iface = self._test_create_interface(server)
+        ifs.append(iface)
+
+        iface = self._test_create_interface_by_network_id(server, ifs)
+        ifs.append(iface)
+
+        resp, _ifs = self.client.list_interfaces(server['id'])
+        self._compare_iface_list(ifs, _ifs)
+
+        self._test_show_interface(server, ifs)
+
+        _ifs = self._test_delete_interface(server, ifs)
+        self.assertEqual(len(ifs) - 1, len(_ifs))
+
+
+class AttachInterfacesTestXML(AttachInterfacesTestJSON):
+    _interface = 'xml'
diff --git a/tempest/tests/compute/servers/test_disk_config.py b/tempest/tests/compute/servers/test_disk_config.py
index 3a1ec20..2fbb876 100644
--- a/tempest/tests/compute/servers/test_disk_config.py
+++ b/tempest/tests/compute/servers/test_disk_config.py
@@ -17,7 +17,6 @@
 
 import testtools
 
-from tempest.common.utils.data_utils import rand_name
 from tempest.test import attr
 from tempest.tests import compute
 from tempest.tests.compute import base
diff --git a/tempest/tests/compute/servers/test_list_server_filters.py b/tempest/tests/compute/servers/test_list_server_filters.py
index 897ca34..ff599fe 100644
--- a/tempest/tests/compute/servers/test_list_server_filters.py
+++ b/tempest/tests/compute/servers/test_list_server_filters.py
@@ -73,12 +73,6 @@
         cls.client.wait_for_server_status(cls.s3['id'], 'ACTIVE')
         resp, cls.s3 = cls.client.get_server(cls.s3['id'])
 
-        # The list server call returns minimal results, so we need
-        # a less detailed version of each server also
-        cls.s1_min = cls._convert_to_min_details(cls.s1)
-        cls.s2_min = cls._convert_to_min_details(cls.s2)
-        cls.s3_min = cls._convert_to_min_details(cls.s3)
-
     @classmethod
     def tearDownClass(cls):
         cls.client.delete_server(cls.s1['id'])
@@ -86,10 +80,6 @@
         cls.client.delete_server(cls.s3['id'])
         super(ListServerFiltersTestJSON, cls).tearDownClass()
 
-    def _server_id_in_results(self, server_id, results):
-        ids = [row['id'] for row in results]
-        return server_id in ids
-
     @utils.skip_unless_attr('multiple_images', 'Only one image found')
     @attr(type='positive')
     def test_list_servers_filter_by_image(self):
@@ -98,9 +88,9 @@
         resp, body = self.client.list_servers(params)
         servers = body['servers']
 
-        self.assertTrue(self._server_id_in_results(self.s1['id'], servers))
-        self.assertFalse(self._server_id_in_results(self.s2['id'], servers))
-        self.assertTrue(self._server_id_in_results(self.s3['id'], servers))
+        self.assertIn(self.s1['id'], map(lambda x: x['id'], servers))
+        self.assertNotIn(self.s2['id'], map(lambda x: x['id'], servers))
+        self.assertIn(self.s3['id'], map(lambda x: x['id'], servers))
 
     @attr(type='positive')
     def test_list_servers_filter_by_flavor(self):
@@ -109,9 +99,9 @@
         resp, body = self.client.list_servers(params)
         servers = body['servers']
 
-        self.assertFalse(self._server_id_in_results(self.s1['id'], servers))
-        self.assertFalse(self._server_id_in_results(self.s2['id'], servers))
-        self.assertTrue(self._server_id_in_results(self.s3['id'], servers))
+        self.assertNotIn(self.s1['id'], map(lambda x: x['id'], servers))
+        self.assertNotIn(self.s2['id'], map(lambda x: x['id'], servers))
+        self.assertIn(self.s3['id'], map(lambda x: x['id'], servers))
 
     @attr(type='positive')
     def test_list_servers_filter_by_server_name(self):
@@ -120,9 +110,9 @@
         resp, body = self.client.list_servers(params)
         servers = body['servers']
 
-        self.assertTrue(self._server_id_in_results(self.s1['id'], servers))
-        self.assertFalse(self._server_id_in_results(self.s2['id'], servers))
-        self.assertFalse(self._server_id_in_results(self.s3['id'], servers))
+        self.assertIn(self.s1_name, map(lambda x: x['name'], servers))
+        self.assertNotIn(self.s2_name, map(lambda x: x['name'], servers))
+        self.assertNotIn(self.s3_name, map(lambda x: x['name'], servers))
 
     @attr(type='positive')
     def test_list_servers_filter_by_server_status(self):
@@ -131,12 +121,12 @@
         resp, body = self.client.list_servers(params)
         servers = body['servers']
 
-        self.assertTrue(self._server_id_in_results(self.s1['id'], servers))
-        self.assertTrue(self._server_id_in_results(self.s2['id'], servers))
-        self.assertTrue(self._server_id_in_results(self.s3['id'], servers))
+        self.assertIn(self.s1['id'], map(lambda x: x['id'], servers))
+        self.assertIn(self.s2['id'], map(lambda x: x['id'], servers))
+        self.assertIn(self.s3['id'], map(lambda x: x['id'], servers))
 
     @attr(type='positive')
-    def test_list_servers_limit_results(self):
+    def test_list_servers_detailed_filter_by_limit(self):
         # Verify only the expected number of servers are returned
         params = {'limit': 1}
         resp, servers = self.client.list_servers_with_detail(params)
@@ -150,9 +140,9 @@
         resp, body = self.client.list_servers_with_detail(params)
         servers = body['servers']
 
-        self.assertTrue(self._server_id_in_results(self.s1['id'], servers))
-        self.assertFalse(self._server_id_in_results(self.s2['id'], servers))
-        self.assertTrue(self._server_id_in_results(self.s3['id'], servers))
+        self.assertIn(self.s1['id'], map(lambda x: x['id'], servers))
+        self.assertNotIn(self.s2['id'], map(lambda x: x['id'], servers))
+        self.assertIn(self.s3['id'], map(lambda x: x['id'], servers))
 
     @attr(type='positive')
     def test_list_servers_detailed_filter_by_flavor(self):
@@ -161,9 +151,9 @@
         resp, body = self.client.list_servers_with_detail(params)
         servers = body['servers']
 
-        self.assertFalse(self._server_id_in_results(self.s1['id'], servers))
-        self.assertFalse(self._server_id_in_results(self.s2['id'], servers))
-        self.assertTrue(self._server_id_in_results(self.s3['id'], servers))
+        self.assertNotIn(self.s1['id'], map(lambda x: x['id'], servers))
+        self.assertNotIn(self.s2['id'], map(lambda x: x['id'], servers))
+        self.assertIn(self.s3['id'], map(lambda x: x['id'], servers))
 
     @attr(type='positive')
     def test_list_servers_detailed_filter_by_server_name(self):
@@ -172,9 +162,9 @@
         resp, body = self.client.list_servers_with_detail(params)
         servers = body['servers']
 
-        self.assertTrue(self._server_id_in_results(self.s1['id'], servers))
-        self.assertFalse(self._server_id_in_results(self.s2['id'], servers))
-        self.assertFalse(self._server_id_in_results(self.s3['id'], servers))
+        self.assertIn(self.s1_name, map(lambda x: x['name'], servers))
+        self.assertNotIn(self.s2_name, map(lambda x: x['name'], servers))
+        self.assertNotIn(self.s3_name, map(lambda x: x['name'], servers))
 
     @attr(type='positive')
     def test_list_servers_detailed_filter_by_server_status(self):
@@ -183,9 +173,10 @@
         resp, body = self.client.list_servers_with_detail(params)
         servers = body['servers']
 
-        self.assertTrue(self._server_id_in_results(self.s1['id'], servers))
-        self.assertTrue(self._server_id_in_results(self.s2['id'], servers))
-        self.assertTrue(self._server_id_in_results(self.s3['id'], servers))
+        self.assertIn(self.s1['id'], map(lambda x: x['id'], servers))
+        self.assertIn(self.s2['id'], map(lambda x: x['id'], servers))
+        self.assertIn(self.s3['id'], map(lambda x: x['id'], servers))
+        self.assertEqual(['ACTIVE'] * 3, [x['status'] for x in servers])
 
     @attr(type='positive')
     def test_list_servers_detailed_limit_results(self):
@@ -194,14 +185,6 @@
         resp, servers = self.client.list_servers_with_detail(params)
         self.assertEqual(1, len(servers['servers']))
 
-    @classmethod
-    def _convert_to_min_details(self, server):
-        min_detail = {}
-        min_detail['name'] = server['name']
-        min_detail['links'] = server['links']
-        min_detail['id'] = server['id']
-        return min_detail
-
 
 class ListServerFiltersTestXML(ListServerFiltersTestJSON):
     _interface = 'xml'
diff --git a/tempest/tests/compute/servers/test_server_actions.py b/tempest/tests/compute/servers/test_server_actions.py
index 5046ec2..d5b2650 100644
--- a/tempest/tests/compute/servers/test_server_actions.py
+++ b/tempest/tests/compute/servers/test_server_actions.py
@@ -41,7 +41,7 @@
         # Check if the server is in a clean state after test
         try:
             self.client.wait_for_server_status(self.server_id, 'ACTIVE')
-        except exceptions:
+        except Exception:
             # Rebuild server if something happened to it during a test
             self.rebuild_servers()
 
diff --git a/tempest/tests/compute/servers/test_server_addresses.py b/tempest/tests/compute/servers/test_server_addresses.py
index c69f68d..cb8e85e 100644
--- a/tempest/tests/compute/servers/test_server_addresses.py
+++ b/tempest/tests/compute/servers/test_server_addresses.py
@@ -29,16 +29,7 @@
         super(ServerAddressesTest, cls).setUpClass()
         cls.client = cls.servers_client
 
-        cls.name = rand_name('server')
-        resp, cls.server = cls.client.create_server(cls.name,
-                                                    cls.image_ref,
-                                                    cls.flavor_ref)
-        cls.client.wait_for_server_status(cls.server['id'], 'ACTIVE')
-
-    @classmethod
-    def tearDownClass(cls):
-        cls.client.delete_server(cls.server['id'])
-        super(ServerAddressesTest, cls).tearDownClass()
+        resp, cls.server = cls.create_server(wait_until='ACTIVE')
 
     @attr(type='negative', category='server-addresses')
     def test_list_server_addresses_invalid_server_id(self):
@@ -88,3 +79,7 @@
             addr = addr[addr_type]
             for address in addresses[addr_type]:
                 self.assertTrue(any([a for a in addr if a == address]))
+
+
+class ServerAddressesTestXML(ServerAddressesTest):
+    _interface = 'xml'
diff --git a/tempest/tests/compute/servers/test_server_metadata.py b/tempest/tests/compute/servers/test_server_metadata.py
index 4b17fa2..69c0ad9 100644
--- a/tempest/tests/compute/servers/test_server_metadata.py
+++ b/tempest/tests/compute/servers/test_server_metadata.py
@@ -15,40 +15,29 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
-from tempest.common.utils.data_utils import rand_name
 from tempest import exceptions
 from tempest.test import attr
 from tempest.tests.compute import base
 
 
-class ServerMetadataTest(base.BaseComputeTest):
+class ServerMetadataTestJSON(base.BaseComputeTest):
     _interface = 'json'
 
     @classmethod
     def setUpClass(cls):
-        super(ServerMetadataTest, cls).setUpClass()
+        super(ServerMetadataTestJSON, cls).setUpClass()
         cls.client = cls.servers_client
         cls.quotas = cls.quotas_client
         cls.admin_client = cls._get_identity_admin_client()
         resp, tenants = cls.admin_client.list_tenants()
         cls.tenant_id = [tnt['id'] for tnt in tenants if tnt['name'] ==
                          cls.client.tenant_name][0]
-        #Create a server to be used for all read only tests
-        name = rand_name('server')
-        resp, server = cls.client.create_server(name, cls.image_ref,
-                                                cls.flavor_ref, meta={})
+        resp, server = cls.create_server(meta={}, wait_until='ACTIVE')
+
         cls.server_id = server['id']
 
-        #Wait for the server to become active
-        cls.client.wait_for_server_status(cls.server_id, 'ACTIVE')
-
-    @classmethod
-    def tearDownClass(cls):
-        cls.client.delete_server(cls.server_id)
-        super(ServerMetadataTest, cls).tearDownClass()
-
     def setUp(self):
-        super(ServerMetadataTest, self).setUp()
+        super(ServerMetadataTestJSON, self).setUp()
         meta = {'key1': 'value1', 'key2': 'value2'}
         resp, _ = self.client.set_server_metadata(self.server_id, meta)
         self.assertEqual(resp.status, 200)
@@ -238,3 +227,7 @@
         self.assertRaises(exceptions.BadRequest,
                           self.client.set_server_metadata,
                           self.server_id, meta=meta)
+
+
+class ServerMetadataTestXML(ServerMetadataTestJSON):
+    _interface = 'xml'
diff --git a/tempest/tests/compute/servers/test_server_personality.py b/tempest/tests/compute/servers/test_server_personality.py
index 0bafc2c..0546859 100644
--- a/tempest/tests/compute/servers/test_server_personality.py
+++ b/tempest/tests/compute/servers/test_server_personality.py
@@ -17,7 +17,6 @@
 
 import base64
 
-from tempest.common.utils.data_utils import rand_name
 from tempest import exceptions
 from tempest.test import attr
 from tempest.tests.compute import base
@@ -43,36 +42,26 @@
             path = 'etc/test' + str(i) + '.txt'
             personality.append({'path': path,
                                 'contents': base64.b64encode(file_contents)})
-        try:
-            self.create_server(personality=personality)
-        except exceptions.OverLimit:
-            pass
-        else:
-            self.fail('This request did not fail as expected')
+        self.assertRaises(exceptions.OverLimit, self.create_server,
+                          personality=personality)
 
     @attr(type='positive')
     def test_can_create_server_with_max_number_personality_files(self):
         # Server should be created successfully if maximum allowed number of
         # files is injected into the server during creation.
-        try:
-            file_contents = 'This is a test file.'
-
-            max_file_limit = \
-                self.user_client.get_specific_absolute_limit("maxPersonality")
-
-            person = []
-            for i in range(0, int(max_file_limit)):
-                path = 'etc/test' + str(i) + '.txt'
-                person.append({
-                    'path': path,
-                    'contents': base64.b64encode(file_contents),
-                })
-            resp, server = self.create_server(personality=person)
-            self.assertEqual('202', resp['status'])
-
-        #Teardown
-        finally:
-            self.client.delete_server(server['id'])
+        file_contents = 'This is a test file.'
+        max_file_limit = \
+            self.user_client.get_specific_absolute_limit("maxPersonality")
+        person = []
+        for i in range(0, int(max_file_limit)):
+            path = 'etc/test' + str(i) + '.txt'
+            person.append({
+                'path': path,
+                'contents': base64.b64encode(file_contents),
+            })
+        resp, server = self.create_server(personality=person)
+        self.addCleanup(self.client.delete_server, server['id'])
+        self.assertEqual('202', resp['status'])
 
 
 class ServerPersonalityTestXML(ServerPersonalityTestJSON):
diff --git a/tempest/tests/compute/servers/test_server_rescue.py b/tempest/tests/compute/servers/test_server_rescue.py
new file mode 100644
index 0000000..9230305
--- /dev/null
+++ b/tempest/tests/compute/servers/test_server_rescue.py
@@ -0,0 +1,239 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2013 Hewlett-Packard Development Company, L.P.
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import base64
+import time
+
+import testtools
+
+from tempest.common.utils.data_utils import rand_name
+from tempest.common.utils.linux.remote_client import RemoteClient
+import tempest.config
+from tempest import exceptions
+from tempest.test import attr
+from tempest.tests import compute
+from tempest.tests.compute import base
+
+
+class ServerRescueTestJSON(base.BaseComputeTest):
+    _interface = 'json'
+
+    run_ssh = tempest.config.TempestConfig().compute.run_ssh
+
+    @classmethod
+    def setUpClass(cls):
+        super(ServerRescueTestJSON, cls).setUpClass()
+        cls.device = 'vdf'
+
+        #Floating IP creation
+        resp, body = cls.floating_ips_client.create_floating_ip()
+        cls.floating_ip_id = str(body['id']).strip()
+        cls.floating_ip = str(body['ip']).strip()
+
+        #Security group creation
+        cls.sg_name = rand_name('sg')
+        cls.sg_desc = rand_name('sg-desc')
+        resp, cls.sg = \
+        cls.security_groups_client.create_security_group(cls.sg_name,
+                                                         cls.sg_desc)
+        cls.sg_id = cls.sg['id']
+
+        # Create a volume and wait for it to become ready for attach
+        resp, cls.volume_to_attach = \
+        cls.volumes_extensions_client.create_volume(1,
+                                                    display_name=
+                                                    'test_attach')
+        cls.volumes_extensions_client.wait_for_volume_status
+        (cls.volume_to_attach['id'], 'available')
+
+        # Create a volume and wait for it to become ready for attach
+        resp, cls.volume_to_detach = \
+        cls.volumes_extensions_client.create_volume(1,
+                                                    display_name=
+                                                    'test_detach')
+        cls.volumes_extensions_client.wait_for_volume_status
+        (cls.volume_to_detach['id'], 'available')
+
+        # Server for positive tests
+        resp, server = cls.create_server(image_id=cls.image_ref,
+                                         flavor=cls.flavor_ref,
+                                         wait_until='BUILD')
+        resp, resc_server = cls.create_server(image_id=cls.image_ref,
+                                              flavor=cls.flavor_ref,
+                                              wait_until='ACTIVE')
+        cls.server_id = server['id']
+        cls.password = server['adminPass']
+        cls.servers_client.wait_for_server_status(cls.server_id, 'ACTIVE')
+
+        # Server for negative tests
+        cls.rescue_id = resc_server['id']
+        cls.rescue_password = resc_server['adminPass']
+
+        cls.servers_client.rescue_server(
+            cls.rescue_id, cls.rescue_password)
+        cls.servers_client.wait_for_server_status(cls.rescue_id, 'RESCUE')
+
+    def setUp(self):
+        super(ServerRescueTestJSON, self).setUp()
+
+    @classmethod
+    def tearDownClass(cls):
+        super(ServerRescueTestJSON, cls).tearDownClass()
+        #Deleting the floating IP which is created in this method
+        cls.floating_ips_client.delete_floating_ip(cls.floating_ip_id)
+        client = cls.volumes_extensions_client
+        client.delete_volume(str(cls.volume_to_attach['id']).strip())
+        client.delete_volume(str(cls.volume_to_detach['id']).strip())
+        resp, cls.sg = \
+        cls.security_groups_client.delete_security_group(cls.sg_id)
+
+    def tearDown(self):
+        super(ServerRescueTestJSON, self).tearDown()
+
+    def _detach(self, server_id, volume_id):
+        self.servers_client.detach_volume(server_id, volume_id)
+        self.volumes_extensions_client.wait_for_volume_status(volume_id,
+                                                              'available')
+
+    def _delete(self, volume_id):
+        self.volumes_extensions_client.delete_volume(volume_id)
+
+    @attr(type='smoke')
+    def test_rescue_unrescue_instance(self):
+        resp, body = self.servers_client.rescue_server(
+            self.server_id, self.password)
+        self.assertEqual(200, resp.status)
+        self.servers_client.wait_for_server_status(self.server_id, 'RESCUE')
+        resp, body = self.servers_client.unrescue_server(self.server_id)
+        self.assertEqual(202, resp.status)
+        self.servers_client.wait_for_server_status(self.server_id, 'ACTIVE')
+
+    @attr(type='negative')
+    @testtools.skip("Skipped until Bug:1126163 is resolved")
+    def test_rescued_vm_reboot(self):
+        self.assertRaises(exceptions.BadRequest, self.servers_client.reboot,
+                          self.rescue_id, 'HARD')
+
+    @attr(type='negative')
+    def test_rescued_vm_rebuild(self):
+        self.assertRaises(exceptions.Duplicate,
+                          self.servers_client.rebuild,
+                          self.rescue_id,
+                          self.image_ref_alt)
+
+    @attr(type='positive')
+    @testtools.skip("Skipped due to Bug:1126187")
+    def test_rescued_vm_attach_volume(self):
+        client = self.volumes_extensions_client
+
+        # Rescue the server
+        self.servers_client.rescue_server(self.server_id, self.password)
+        self.servers_client.wait_for_server_status(self.server_id, 'RESCUE')
+
+        # Attach the volume to the server
+        resp, body = \
+        self.servers_client.attach_volume(self.server_id,
+                                          self.volume_to_attach['id'],
+                                          device='/dev/%s' % self.device)
+        self.assertEqual(200, resp.status)
+        client.wait_for_volume_status(self.volume_to_attach['id'], 'in-use')
+
+        # Detach the volume to the server
+        resp, body = \
+        self.servers_client.detach_volume(self.server_id,
+                                          self.volume_to_attach['id'])
+        self.assertEqual(202, resp.status)
+        client.wait_for_volume_status(self.volume_to_attach['id'], 'available')
+
+        # Unrescue the server
+        resp, body = self.servers_client.unrescue_server(self.server_id)
+        self.assertEqual(202, resp.status)
+        self.servers_client.wait_for_server_status(self.server_id, 'ACTIVE')
+
+    @attr(type='positive')
+    @testtools.skip("Skipped until Bug:1126187 is resolved")
+    def test_rescued_vm_detach_volume(self):
+        # Attach the volume to the server
+        self.servers_client.attach_volume(self.server_id,
+                                          self.volume_to_detach['id'],
+                                          device='/dev/%s' % self.device)
+        self.volumes_extensions_client.wait_for_volume_status
+        (self.volume_to_detach['id'], 'in-use')
+
+        # Rescue the server
+        self.servers_client.rescue_server(self.server_id, self.password)
+        self.servers_client.wait_for_server_status(self.server_id, 'RESCUE')
+
+        # Detach the volume to the server
+        resp, body = \
+        self.servers_client.detach_volume(self.server_id,
+                                          self.volume_to_detach['id'])
+        self.assertEqual(202, resp.status)
+        client = self.volumes_extensions_client
+        client.wait_for_volume_status(self.volume_to_detach['id'], 'available')
+
+        # Unrescue the server
+        resp, body = self.servers_client.unrescue_server(self.server_id)
+        self.assertEqual(202, resp.status)
+        self.servers_client.wait_for_server_status(self.server_id, 'ACTIVE')
+
+    @attr(type='positive')
+    def test_rescued_vm_associate_dissociate_floating_ip(self):
+        # Rescue the server
+        self.servers_client.rescue_server(
+            self.server_id, self.password)
+        self.servers_client.wait_for_server_status(self.server_id, 'RESCUE')
+
+        #Association of floating IP to a rescued vm
+        client = self.floating_ips_client
+        resp, body =\
+        client.associate_floating_ip_to_server(self.floating_ip,
+                                               self.server_id)
+        self.assertEqual(202, resp.status)
+
+        #Disassociation of floating IP that was associated in this method
+        resp, body = \
+            client.disassociate_floating_ip_from_server(self.floating_ip,
+                                                        self.server_id)
+        self.assertEqual(202, resp.status)
+
+        # Unrescue the server
+        resp, body = self.servers_client.unrescue_server(self.server_id)
+        self.assertEqual(202, resp.status)
+        self.servers_client.wait_for_server_status(self.server_id, 'ACTIVE')
+
+    @attr(type='positive')
+    @testtools.skip("Skipped until Bug: 1126257 is resolved")
+    def test_rescued_vm_add_remove_security_group(self):
+        #Add Security group
+        resp, body = self.servers_client.add_security_group(self.server_id,
+                                                            self.sg_name)
+        self.assertEqual(202, resp.status)
+
+        #Delete Security group
+        resp, body = self.servers_client.remove_security_group(self.server_id,
+                                                               self.sg_id)
+        self.assertEqual(202, resp.status)
+
+        # Unrescue the server
+        resp, body = self.servers_client.unrescue_server(self.server_id)
+        self.assertEqual(202, resp.status)
+        self.servers_client.wait_for_server_status(self.server_id, 'ACTIVE')
+
+
+class ServerRescueTestXML(ServerRescueTestJSON):
+    _interface = 'xml'
diff --git a/tempest/tests/compute/servers/test_servers.py b/tempest/tests/compute/servers/test_servers.py
index a8d28df..4796e86 100644
--- a/tempest/tests/compute/servers/test_servers.py
+++ b/tempest/tests/compute/servers/test_servers.py
@@ -28,111 +28,81 @@
         super(ServersTestJSON, cls).setUpClass()
         cls.client = cls.servers_client
 
+    def tearDown(self):
+        self.clear_servers()
+        super(ServersTestJSON, self).tearDown()
+
     @attr(type='positive')
     def test_create_server_with_admin_password(self):
         # If an admin password is provided on server creation, the server's
         # root password should be set to that password.
+        resp, server = self.create_server(adminPass='testpassword')
 
-        try:
-            server = None
-            resp, server = self.create_server(adminPass='testpassword')
-
-            #Verify the password is set correctly in the response
-            self.assertEqual('testpassword', server['adminPass'])
-
-        #Teardown
-        finally:
-            if server:
-                self.client.delete_server(server['id'])
+        # Verify the password is set correctly in the response
+        self.assertEqual('testpassword', server['adminPass'])
 
     def test_create_with_existing_server_name(self):
         # Creating a server with a name that already exists is allowed
 
         # TODO(sdague): clear out try, we do cleanup one layer up
-        try:
-            id1 = None
-            id2 = None
-            server_name = rand_name('server')
-            resp, server = self.create_server(name=server_name,
-                                              wait_until='ACTIVE')
-            id1 = server['id']
-            resp, server = self.create_server(name=server_name,
-                                              wait_until='ACTIVE')
-            id2 = server['id']
-            self.assertNotEqual(id1, id2, "Did not create a new server")
-            resp, server = self.client.get_server(id1)
-            name1 = server['name']
-            resp, server = self.client.get_server(id2)
-            name2 = server['name']
-            self.assertEqual(name1, name2)
-        finally:
-            for server_id in (id1, id2):
-                if server_id:
-                    self.client.delete_server(server_id)
+        server_name = rand_name('server')
+        resp, server = self.create_server(name=server_name,
+                                          wait_until='ACTIVE')
+        id1 = server['id']
+        resp, server = self.create_server(name=server_name,
+                                          wait_until='ACTIVE')
+        id2 = server['id']
+        self.assertNotEqual(id1, id2, "Did not create a new server")
+        resp, server = self.client.get_server(id1)
+        name1 = server['name']
+        resp, server = self.client.get_server(id2)
+        name2 = server['name']
+        self.assertEqual(name1, name2)
 
     @attr(type='positive')
     def test_create_specify_keypair(self):
         # Specify a keypair while creating a server
 
-        try:
-            server = None
-            key_name = rand_name('key')
-            resp, keypair = self.keypairs_client.create_keypair(key_name)
-            resp, body = self.keypairs_client.list_keypairs()
-            resp, server = self.create_server(key_name=key_name)
-            self.assertEqual('202', resp['status'])
-            self.client.wait_for_server_status(server['id'], 'ACTIVE')
-            resp, server = self.client.get_server(server['id'])
-            self.assertEqual(key_name, server['key_name'])
-        finally:
-            if server:
-                self.client.delete_server(server['id'])
+        key_name = rand_name('key')
+        resp, keypair = self.keypairs_client.create_keypair(key_name)
+        resp, body = self.keypairs_client.list_keypairs()
+        resp, server = self.create_server(key_name=key_name)
+        self.assertEqual('202', resp['status'])
+        self.client.wait_for_server_status(server['id'], 'ACTIVE')
+        resp, server = self.client.get_server(server['id'])
+        self.assertEqual(key_name, server['key_name'])
 
     @attr(type='positive')
     def test_update_server_name(self):
         # The server name should be changed to the the provided value
-        try:
-            server = None
-            resp, server = self.create_server(wait_until='ACTIVE')
+        resp, server = self.create_server(wait_until='ACTIVE')
 
-            #Update the server with a new name
-            resp, server = self.client.update_server(server['id'],
-                                                     name='newname')
-            self.assertEquals(200, resp.status)
-            self.client.wait_for_server_status(server['id'], 'ACTIVE')
+        # Update the server with a new name
+        resp, server = self.client.update_server(server['id'],
+                                                 name='newname')
+        self.assertEquals(200, resp.status)
+        self.client.wait_for_server_status(server['id'], 'ACTIVE')
 
-            #Verify the name of the server has changed
-            resp, server = self.client.get_server(server['id'])
-            self.assertEqual('newname', server['name'])
-
-        #Teardown
-        finally:
-            if server:
-                self.client.delete_server(server['id'])
+        # Verify the name of the server has changed
+        resp, server = self.client.get_server(server['id'])
+        self.assertEqual('newname', server['name'])
 
     @attr(type='positive')
     def test_update_access_server_address(self):
         # The server's access addresses should reflect the provided values
-        try:
-            server = None
-            resp, server = self.create_server(wait_until='ACTIVE')
+        resp, server = self.create_server(wait_until='ACTIVE')
 
-            #Update the IPv4 and IPv6 access addresses
-            resp, body = self.client.update_server(server['id'],
-                                                   accessIPv4='1.1.1.1',
-                                                   accessIPv6='::babe:202:202')
-            self.assertEqual(200, resp.status)
-            self.client.wait_for_server_status(server['id'], 'ACTIVE')
+        # Update the IPv4 and IPv6 access addresses
+        resp, body = self.client.update_server(server['id'],
+                                               accessIPv4='1.1.1.1',
+                                               accessIPv6='::babe:202:202')
+        self.assertEqual(200, resp.status)
+        self.client.wait_for_server_status(server['id'], 'ACTIVE')
 
-            #Verify the access addresses have been updated
-            resp, server = self.client.get_server(server['id'])
-            self.assertEqual('1.1.1.1', server['accessIPv4'])
-            self.assertEqual('::babe:202:202', server['accessIPv6'])
-
-        #Teardown
-        finally:
-            if server:
-                self.client.delete_server(server['id'])
+        # Verify the access addresses have been updated
+        resp, server = self.client.get_server(server['id'])
+        self.assertEqual('1.1.1.1', server['accessIPv4'])
+        self.assertEqual('::babe:202:202', server['accessIPv6'])
 
     def test_delete_server_while_in_building_state(self):
         # Delete a server while it's VM state is Building
diff --git a/tempest/tests/compute/servers/test_servers_negative.py b/tempest/tests/compute/servers/test_servers_negative.py
index 366b630..9013b36 100644
--- a/tempest/tests/compute/servers/test_servers_negative.py
+++ b/tempest/tests/compute/servers/test_servers_negative.py
@@ -29,7 +29,6 @@
 
     @classmethod
     def setUpClass(cls):
-        raise cls.skipException("Until Bug 1046870 is fixed")
         super(ServersNegativeTest, cls).setUpClass()
         cls.client = cls.servers_client
         cls.img_client = cls.images_client
@@ -115,6 +114,8 @@
     @attr(type='negative')
     def test_create_numeric_server_name(self):
         # Create a server with a numeric name
+        if self.__class__._interface == "xml":
+            raise self.skipException("Not testable in XML")
 
         server_name = 12345
         self.assertRaises(exceptions.BadRequest,
@@ -182,7 +183,7 @@
     def test_update_server_of_another_tenant(self):
         # Update name of a server that belongs to another tenant
 
-        server = self.create_server()
+        resp, server = self.create_server(wait_until='ACTIVE')
         new_name = server['id'] + '_new'
         self.assertRaises(exceptions.NotFound,
                           self.alt_client.update_server, server['id'],
@@ -192,7 +193,7 @@
     def test_update_server_name_length_exceeds_256(self):
         # Update name of server exceed the name length limit
 
-        server = self.create_server()
+        resp, server = self.create_server(wait_until='ACTIVE')
         new_name = 'a' * 256
         self.assertRaises(exceptions.BadRequest,
                           self.client.update_server,
@@ -210,7 +211,7 @@
     def test_delete_a_server_of_another_tenant(self):
         # Delete a server that belongs to another tenant
         try:
-            server = self.create_server()
+            resp, server = self.create_server(wait_until='ACTIVE')
             self.assertRaises(exceptions.NotFound,
                               self.alt_client.delete_server,
                               server['id'])
@@ -245,3 +246,7 @@
 
         self.assertRaises(exceptions.NotFound, self.client.get_server,
                           '999erra43')
+
+
+class ServersNegativeTestXML(ServersNegativeTest):
+    _interface = 'xml'
diff --git a/tempest/tests/compute/servers/test_virtual_interfaces.py b/tempest/tests/compute/servers/test_virtual_interfaces.py
index 4c48366..476a556 100644
--- a/tempest/tests/compute/servers/test_virtual_interfaces.py
+++ b/tempest/tests/compute/servers/test_virtual_interfaces.py
@@ -30,20 +30,10 @@
     @classmethod
     def setUpClass(cls):
         super(VirtualInterfacesTestJSON, cls).setUpClass()
-        cls.name = rand_name('server')
         cls.client = cls.servers_client
-        resp, server = cls.servers_client.create_server(cls.name,
-                                                        cls.image_ref,
-                                                        cls.flavor_ref)
+        resp, server = cls.create_server(wait_until='ACTIVE')
         cls.server_id = server['id']
 
-        cls.servers_client.wait_for_server_status(cls.server_id, 'ACTIVE')
-
-    @classmethod
-    def tearDownClass(cls):
-        cls.servers_client.delete_server(cls.server_id)
-        super(VirtualInterfacesTestJSON, cls).tearDownClass()
-
     @attr(type='positive')
     def test_list_virtual_interfaces(self):
         # Positive test:Should be able to GET the virtual interfaces list
diff --git a/tempest/tests/compute/test_authorization.py b/tempest/tests/compute/test_authorization.py
index 52e457d..4ca197a 100644
--- a/tempest/tests/compute/test_authorization.py
+++ b/tempest/tests/compute/test_authorization.py
@@ -57,10 +57,7 @@
         cls.alt_security_client = cls.alt_manager.security_groups_client
 
         cls.alt_security_client._set_auth()
-        name = rand_name('server')
-        resp, server = cls.client.create_server(name, cls.image_ref,
-                                                cls.flavor_ref)
-        cls.client.wait_for_server_status(server['id'], 'ACTIVE')
+        resp, server = cls.create_server(wait_until='ACTIVE')
         resp, cls.server = cls.client.get_server(server['id'])
 
         name = rand_name('image')
@@ -92,7 +89,6 @@
     @classmethod
     def tearDownClass(cls):
         if compute.MULTI_USER:
-            cls.client.delete_server(cls.server['id'])
             cls.images_client.delete_image(cls.image['id'])
             cls.keypairs_client.delete_keypair(cls.keypairname)
             cls.security_client.delete_security_group(cls.security_group['id'])
@@ -207,7 +203,6 @@
                           self.alt_keypairs_client.get_keypair,
                           self.keypairname)
 
-    @testtools.skip("Skipped until the Bug #1086980 is resolved")
     def test_delete_keypair_of_alt_account_fails(self):
         # A DELETE request for another user's keypair should fail
         self.assertRaises(exceptions.NotFound,
@@ -316,60 +311,43 @@
         # A get metadata for another user's server should fail
         req_metadata = {'meta1': 'data1'}
         self.client.set_server_metadata(self.server['id'], req_metadata)
-        try:
-            resp, meta = \
-            self.alt_client.get_server_metadata_item(self.server['id'],
-                                                     'meta1')
-        except exceptions.NotFound:
-            pass
-        finally:
-            resp, body = \
-            self.client.delete_server_metadata_item(self.server['id'], 'meta1')
+        self.addCleanup(self.client.delete_server_metadata_item,
+                        self.server['id'], 'meta1')
+        self.assertRaises(exceptions.NotFound,
+                          self.alt_client.get_server_metadata_item,
+                          self.server['id'], 'meta1')
 
     def test_get_metadata_of_alt_account_image_fails(self):
         # A get metadata for another user's image should fail
         req_metadata = {'meta1': 'value1'}
+        self.addCleanup(self.images_client.delete_image_metadata_item,
+                        self.image['id'], 'meta1')
         self.images_client.set_image_metadata(self.image['id'],
                                               req_metadata)
-        try:
-            resp, meta = \
-            self.alt_images_client.get_image_metadata_item(self.image['id'],
-                                                           'meta1')
-        except exceptions.NotFound:
-            pass
-        finally:
-            resp, body = self.images_client.delete_image_metadata_item(
-                                self.image['id'], 'meta1')
+        self.assertRaises(exceptions.NotFound,
+                          self.alt_images_client.get_image_metadata_item,
+                          self.image['id'], 'meta1')
 
     def test_delete_metadata_of_alt_account_server_fails(self):
         # A delete metadata for another user's server should fail
         req_metadata = {'meta1': 'data1'}
+        self.addCleanup(self.client.delete_server_metadata_item,
+                        self.server['id'], 'meta1')
         self.client.set_server_metadata(self.server['id'], req_metadata)
-        try:
-            resp, body = \
-            self.alt_client.delete_server_metadata_item(self.server['id'],
-                                                        'meta1')
-        except exceptions.NotFound:
-            pass
-        finally:
-            resp, body = \
-            self.client.delete_server_metadata_item(self.server['id'], 'meta1')
+        self.assertRaises(exceptions.NotFound,
+                          self.alt_client.delete_server_metadata_item,
+                          self.server['id'], 'meta1')
 
     def test_delete_metadata_of_alt_account_image_fails(self):
         # A delete metadata for another user's image should fail
         req_metadata = {'meta1': 'data1'}
+        self.addCleanup(self.images_client.delete_image_metadata_item,
+                        self.image['id'], 'meta1')
         self.images_client.set_image_metadata(self.image['id'],
                                               req_metadata)
-        try:
-            resp, body = \
-            self.alt_images_client.delete_image_metadata_item(self.image['id'],
-                                                              'meta1')
-        except exceptions.NotFound:
-            pass
-        finally:
-            resp, body = \
-            self.images_client.delete_image_metadata_item(self.image['id'],
-                                                          'meta1')
+        self.assertRaises(exceptions.NotFound,
+                          self.alt_images_client.delete_image_metadata_item,
+                          self.image['id'], 'meta1')
 
     def test_get_console_output_of_alt_account_server_fails(self):
         # A Get Console Output for another user's server should fail
diff --git a/tempest/tests/compute/test_live_block_migration.py b/tempest/tests/compute/test_live_block_migration.py
index f2ec753..abaaf85 100644
--- a/tempest/tests/compute/test_live_block_migration.py
+++ b/tempest/tests/compute/test_live_block_migration.py
@@ -108,7 +108,6 @@
         self.servers_client.wait_for_server_status(server_id, 'ACTIVE')
         self.assertEquals(target_host, self._get_host_for_server(server_id))
 
-    @testtools.skip('Until bug 1051881 is dealt with.')
     @testtools.skipIf(not live_migration_available,
                       'Block Live migration not available')
     def test_invalid_host_for_migration(self):
diff --git a/tempest/tests/compute/test_quotas.py b/tempest/tests/compute/test_quotas.py
index dbff275..233d639 100644
--- a/tempest/tests/compute/test_quotas.py
+++ b/tempest/tests/compute/test_quotas.py
@@ -33,11 +33,14 @@
 
     @attr(type='smoke')
     def test_get_default_quotas(self):
+        # Tempest two step
+        self.skipTest('Skipped until the Bug 1125468 is resolved')
+
         # User can get the default quota set for it's tenant
         expected_quota_set = {'injected_file_content_bytes': 10240,
                               'metadata_items': 128, 'injected_files': 5,
                               'ram': 51200, 'floating_ips': 10,
-                              'key_pairs': 100,
+                              'fixed_ips': 10, 'key_pairs': 100,
                               'injected_file_path_bytes': 255, 'instances': 10,
                               'security_group_rules': 20, 'cores': 20,
                               'id': self.tenant_id, 'security_groups': 10}
diff --git a/tempest/tests/compute/volumes/test_attach_volume.py b/tempest/tests/compute/volumes/test_attach_volume.py
index 2679312..d9abe41 100644
--- a/tempest/tests/compute/volumes/test_attach_volume.py
+++ b/tempest/tests/compute/volumes/test_attach_volume.py
@@ -28,6 +28,12 @@
     _interface = 'json'
     run_ssh = tempest.config.TempestConfig().compute.run_ssh
 
+    def __init__(self, *args, **kwargs):
+        super(AttachVolumeTestJSON, self).__init__(*args, **kwargs)
+        self.server = None
+        self.volume = None
+        self.attached = False
+
     @classmethod
     def setUpClass(cls):
         super(AttachVolumeTestJSON, cls).setUpClass()
@@ -37,19 +43,15 @@
         self.servers_client.detach_volume(server_id, volume_id)
         self.volumes_client.wait_for_volume_status(volume_id, 'available')
 
-    def _delete(self, server_id, volume_id):
-        self.volumes_client.delete_volume(volume_id)
-        self.servers_client.delete_server(server_id)
+    def _delete(self, volume):
+        if self.volume:
+            self.volumes_client.delete_volume(self.volume['id'])
+            self.volume = None
 
     def _create_and_attach(self):
-        name = rand_name('server')
-
         # Start a server and wait for it to become ready
-        resp, server = self.servers_client.create_server(name,
-                                                         self.image_ref,
-                                                         self.flavor_ref,
-                                                         adminPass='password')
-        self.servers_client.wait_for_server_status(server['id'], 'ACTIVE')
+        resp, server = self.create_server(wait_until='ACTIVE',
+                                          adminPass='password')
 
         # Record addresses so that we can ssh later
         resp, server['addresses'] = \
@@ -58,6 +60,7 @@
         # Create a volume and wait for it to become ready
         resp, volume = self.volumes_client.create_volume(1,
                                                          display_name='test')
+        self.volume = volume
         self.volumes_client.wait_for_volume_status(volume['id'], 'available')
 
         # Attach the volume to the server
@@ -65,18 +68,18 @@
                                           device='/dev/%s' % self.device)
         self.volumes_client.wait_for_volume_status(volume['id'], 'in-use')
 
-        return server, volume
+        self.attached = True
 
     @attr(type='positive')
     @testtools.skipIf(not run_ssh, 'SSH required for this test')
     def test_attach_detach_volume(self):
         # Stop and Start a server with an attached volume, ensuring that
         # the volume remains attached.
-        server, volume = self._create_and_attach()
-
-        attached = True
-
         try:
+            self._create_and_attach()
+            server = self.server
+            volume = self.volume
+
             self.servers_client.stop(server['id'])
             self.servers_client.wait_for_server_status(server['id'], 'SHUTOFF')
 
@@ -101,10 +104,14 @@
                                         self.ssh_user, server['adminPass'])
             partitions = linux_client.get_partitions()
             self.assertFalse(self.device in partitions)
+        except Exception:
+            self.fail("The test_attach_detach_volume is faild!")
         finally:
-            if attached:
+            if self.attached:
                 self._detach(server['id'], volume['id'])
-            self._delete(server['id'], volume['id'])
+            # NOTE(maurosr): here we do the cleanup for volume, servers are
+            # dealt on BaseComputeTest.tearDownClass
+            self._delete(self.volume)
 
 
 class AttachVolumeTestXML(AttachVolumeTestJSON):
diff --git a/tempest/tests/identity/admin/test_services.py b/tempest/tests/identity/admin/test_services.py
index caf57bd..35b2463 100644
--- a/tempest/tests/identity/admin/test_services.py
+++ b/tempest/tests/identity/admin/test_services.py
@@ -77,20 +77,17 @@
             services.append(service)
         service_ids = map(lambda x: x['id'], services)
 
+        def delete_services():
+            for service_id in service_ids:
+                self.client.delete_service(service_id)
+
+        self.addCleanup(delete_services)
         # List and Verify Services
         resp, body = self.client.list_services()
         self.assertTrue(resp['status'].startswith('2'))
         found = [service for service in body if service['id'] in service_ids]
         self.assertEqual(len(found), len(services), 'Services not found')
 
-        # Delete Services
-        for service in services:
-            resp, body = self.client.delete_service(service['id'])
-            self.assertTrue(resp['status'].startswith('2'))
-        resp, body = self.client.list_services()
-        found = [service for service in body if service['id'] in service_ids]
-        self.assertFalse(any(found), 'Services failed to delete')
-
 
 class ServicesTestXML(ServicesTestJSON):
     _interface = 'xml'
diff --git a/tempest/tests/image/base.py b/tempest/tests/image/base.py
new file mode 100644
index 0000000..65d81b6
--- /dev/null
+++ b/tempest/tests/image/base.py
@@ -0,0 +1,94 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2013 IBM Corp.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import logging
+import time
+
+from tempest import clients
+from tempest.common.utils.data_utils import rand_name
+from tempest import exceptions
+import tempest.test
+
+LOG = logging.getLogger(__name__)
+
+
+class BaseImageTest(tempest.test.BaseTestCase):
+    """Base test class for Image API tests."""
+
+    @classmethod
+    def setUpClass(cls):
+        cls.os = clients.Manager()
+        cls.created_images = []
+
+    @classmethod
+    def tearDownClass(cls):
+        for image_id in cls.created_images:
+            try:
+                cls.client.delete_image(image_id)
+            except exceptions.NotFound:
+                pass
+
+        for image_id in cls.created_images:
+                cls.client.wait_for_resource_deletion(image_id)
+
+    @classmethod
+    def create_image(cls, **kwargs):
+        """Wrapper that returns a test image."""
+        name = rand_name(cls.__name__ + "-instance")
+
+        if 'name' in kwargs:
+            name = kwargs.pop('name')
+
+        container_format = kwargs.pop('container_format')
+        disk_format = kwargs.pop('disk_format')
+
+        resp, image = cls.client.create_image(name, container_format,
+                                              disk_format, **kwargs)
+        cls.created_images.append(image['id'])
+        return resp, image
+
+    @classmethod
+    def _check_version(cls, version):
+        __, versions = cls.client.get_versions()
+        if version == 'v2.0':
+            if 'v2.0' in versions:
+                return True
+        elif version == 'v1.0':
+            if 'v1.1' in versions or 'v1.0' in versions:
+                return True
+        return False
+
+
+class BaseV1ImageTest(BaseImageTest):
+
+    @classmethod
+    def setUpClass(cls):
+        super(BaseV1ImageTest, cls).setUpClass()
+        cls.client = cls.os.image_client
+        if not cls._check_version('v1.0'):
+            msg = "Glance API v1 not supported"
+            raise cls.skipException(msg)
+
+
+class BaseV2ImageTest(BaseImageTest):
+
+    @classmethod
+    def setUpClass(cls):
+        super(BaseV2ImageTest, cls).setUpClass()
+        cls.client = cls.os.image_client_v2
+        if not cls._check_version('v2.0'):
+            msg = "Glance API v2 not supported"
+            raise cls.skipException(msg)
diff --git a/tempest/services/image/json/__init__.py b/tempest/tests/image/v1/__init__.py
similarity index 100%
copy from tempest/services/image/json/__init__.py
copy to tempest/tests/image/v1/__init__.py
diff --git a/tempest/tests/image/v1/test_image_members.py b/tempest/tests/image/v1/test_image_members.py
new file mode 100644
index 0000000..92052fc
--- /dev/null
+++ b/tempest/tests/image/v1/test_image_members.py
@@ -0,0 +1,81 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2013 IBM Corp.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import cStringIO as StringIO
+
+from tempest import clients
+from tempest.tests.image import base
+
+
+class ImageMembersTests(base.BaseV1ImageTest):
+
+    @classmethod
+    def setUpClass(cls):
+        super(ImageMembersTests, cls).setUpClass()
+        admin = clients.AdminManager(interface='json')
+        cls.admin_client = admin.identity_client
+        cls.tenants = cls._get_tenants()
+
+    @classmethod
+    def _get_tenants(cls):
+        resp, tenants = cls.admin_client.list_tenants()
+        tenants = map(lambda x: x['id'], tenants)
+        return tenants
+
+    def _create_image(self):
+        image_file = StringIO.StringIO('*' * 1024)
+        resp, image = self.create_image(container_format='bare',
+                                        disk_format='raw',
+                                        is_public=True,
+                                        data=image_file)
+        self.assertEquals(201, resp.status)
+        image_id = image['id']
+        return image_id
+
+    def test_add_image_member(self):
+        image = self._create_image()
+        resp = self.client.add_member(self.tenants[0], image)
+        self.assertEquals(204, resp.status)
+        resp, body = self.client.get_image_membership(image)
+        self.assertEquals(200, resp.status)
+        members = body['members']
+        members = map(lambda x: x['member_id'], members)
+        self.assertIn(self.tenants[0], members)
+
+    def test_get_shared_images(self):
+        image = self._create_image()
+        resp = self.client.add_member(self.tenants[0], image)
+        self.assertEquals(204, resp.status)
+        share_image = self._create_image()
+        resp = self.client.add_member(self.tenants[0], share_image)
+        self.assertEquals(204, resp.status)
+        resp, body = self.client.get_shared_images(self.tenants[0])
+        self.assertEquals(200, resp.status)
+        images = body['shared_images']
+        images = map(lambda x: x['image_id'], images)
+        self.assertIn(share_image, images)
+        self.assertIn(image, images)
+
+    def test_remove_member(self):
+        image_id = self._create_image()
+        resp = self.client.add_member(self.tenants[0], image_id)
+        self.assertEquals(204, resp.status)
+        resp = self.client.delete_member(self.tenants[0], image_id)
+        self.assertEquals(204, resp.status)
+        resp, body = self.client.get_image_membership(image_id)
+        self.assertEquals(200, resp.status)
+        members = body['members']
+        self.assertEquals(0, len(members))
diff --git a/tempest/tests/image/test_images.py b/tempest/tests/image/v1/test_images.py
similarity index 78%
rename from tempest/tests/image/test_images.py
rename to tempest/tests/image/v1/test_images.py
index 84bb650..af09b79 100644
--- a/tempest/tests/image/test_images.py
+++ b/tempest/tests/image/v1/test_images.py
@@ -19,26 +19,12 @@
 
 from tempest import clients
 from tempest import exceptions
-import tempest.test
 from tempest.test import attr
+from tempest.tests.image import base
 
 
-class CreateRegisterImagesTest(tempest.test.BaseTestCase):
-
-    """
-    Here we test the registration and creation of images
-    """
-
-    @classmethod
-    def setUpClass(cls):
-        cls.os = clients.Manager()
-        cls.client = cls.os.image_client
-        cls.created_images = []
-
-    @classmethod
-    def tearDownClass(cls):
-        for image_id in cls.created_images:
-            cls.client.delete(image_id)
+class CreateRegisterImagesTest(base.BaseV1ImageTest):
+    """Here we test the registration and creation of images."""
 
     @attr(type='negative')
     def test_register_with_invalid_container_format(self):
@@ -55,19 +41,17 @@
     def test_register_then_upload(self):
         # Register, then upload an image
         properties = {'prop1': 'val1'}
-        resp, body = self.client.create_image('New Name', 'bare', 'raw',
-                                              is_public=True,
-                                              properties=properties)
+        resp, body = self.create_image(name='New Name',
+                                       container_format='bare',
+                                       disk_format='raw',
+                                       is_public=True,
+                                       properties=properties)
         self.assertTrue('id' in body)
         image_id = body.get('id')
         self.created_images.append(image_id)
-        self.assertTrue('name' in body)
         self.assertEqual('New Name', body.get('name'))
-        self.assertTrue('is_public' in body)
         self.assertTrue(body.get('is_public'))
-        self.assertTrue('status' in body)
         self.assertEqual('queued', body.get('status'))
-        self.assertTrue('properties' in body)
         for key, val in properties.items():
             self.assertEqual(val, body.get('properties')[key])
 
@@ -80,22 +64,20 @@
     @attr(type='image')
     def test_register_remote_image(self):
         # Register a new remote image
-        resp, body = self.client.create_image('New Remote Image', 'bare',
-                                              'raw', is_public=True,
-                                              location='http://example.com'
-                                                       '/someimage.iso')
+        resp, body = self.create_image(name='New Remote Image',
+                                       container_format='bare',
+                                       disk_format='raw', is_public=True,
+                                       location='http://example.com'
+                                                '/someimage.iso')
         self.assertTrue('id' in body)
         image_id = body.get('id')
         self.created_images.append(image_id)
-        self.assertTrue('name' in body)
         self.assertEqual('New Remote Image', body.get('name'))
-        self.assertTrue('is_public' in body)
         self.assertTrue(body.get('is_public'))
-        self.assertTrue('status' in body)
         self.assertEqual('active', body.get('status'))
 
 
-class ListImagesTest(tempest.test.BaseTestCase):
+class ListImagesTest(base.BaseV1ImageTest):
 
     """
     Here we test the listing of image information
@@ -103,9 +85,7 @@
 
     @classmethod
     def setUpClass(cls):
-        cls.os = clients.Manager()
-        cls.client = cls.os.image_client
-        cls.created_images = []
+        super(ListImagesTest, cls).setUpClass()
 
         # We add a few images here to test the listing functionality of
         # the images API
@@ -132,12 +112,6 @@
         cls.dup_set = set((img3, img4))
 
     @classmethod
-    def tearDownClass(cls):
-        for image_id in cls.created_images:
-            cls.client.delete_image(image_id)
-            cls.client.wait_for_resource_deletion(image_id)
-
-    @classmethod
     def _create_remote_image(cls, name, container_format, disk_format):
         """
         Create a new remote image and return the ID of the newly-registered
@@ -145,12 +119,12 @@
         """
         name = 'New Remote Image %s' % name
         location = 'http://example.com/someimage_%s.iso' % name
-        resp, image = cls.client.create_image(name,
-                                              container_format, disk_format,
-                                              is_public=True,
-                                              location=location)
+        resp, image = cls.create_image(name=name,
+                                       container_format=container_format,
+                                       disk_format=disk_format,
+                                       is_public=True,
+                                       location=location)
         image_id = image['id']
-        cls.created_images.append(image_id)
         return image_id
 
     @classmethod
@@ -163,11 +137,11 @@
         """
         image_file = StringIO.StringIO('*' * size)
         name = 'New Standard Image %s' % name
-        resp, image = cls.client.create_image(name,
-                                              container_format, disk_format,
-                                              is_public=True, data=image_file)
+        resp, image = cls.create_image(name=name,
+                                       container_format=container_format,
+                                       disk_format=disk_format,
+                                       is_public=True, data=image_file)
         image_id = image['id']
-        cls.created_images.append(image_id)
         return image_id
 
     @attr(type='image')
diff --git a/tempest/services/image/json/__init__.py b/tempest/tests/image/v2/__init__.py
similarity index 100%
copy from tempest/services/image/json/__init__.py
copy to tempest/tests/image/v2/__init__.py
diff --git a/tempest/tests/image/v2/test_images.py b/tempest/tests/image/v2/test_images.py
new file mode 100644
index 0000000..19a7a95
--- /dev/null
+++ b/tempest/tests/image/v2/test_images.py
@@ -0,0 +1,109 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2013 OpenStack, LLC
+# All Rights Reserved.
+# Copyright 2013 IBM Corp
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import cStringIO as StringIO
+import random
+
+from tempest import clients
+from tempest import exceptions
+from tempest.test import attr
+from tempest.tests.image import base
+
+
+class CreateRegisterImagesTest(base.BaseV2ImageTest):
+
+    """
+    Here we test the registration and creation of images
+    """
+
+    @attr(type='negative')
+    def test_register_with_invalid_container_format(self):
+        # Negative tests for invalid data supplied to POST /images
+        self.assertRaises(exceptions.BadRequest, self.client.create_image,
+                          'test', 'wrong', 'vhd')
+
+    @attr(type='negative')
+    def test_register_with_invalid_disk_format(self):
+        self.assertRaises(exceptions.BadRequest, self.client.create_image,
+                          'test', 'bare', 'wrong')
+
+    @attr(type='image')
+    def test_register_then_upload(self):
+        # Register, then upload an image
+        resp, body = self.create_image(name='New Name',
+                                       container_format='bare',
+                                       disk_format='raw',
+                                       visibility='public')
+        self.assertTrue('id' in body)
+        image_id = body.get('id')
+        self.created_images.append(image_id)
+        self.assertTrue('name' in body)
+        self.assertEqual('New Name', body.get('name'))
+        self.assertTrue('visibility' in body)
+        self.assertTrue(body.get('visibility') == 'public')
+        self.assertTrue('status' in body)
+        self.assertEqual('queued', body.get('status'))
+
+        # Now try uploading an image file
+        image_file = StringIO.StringIO(('*' * 1024))
+        resp, body = self.client.store_image(image_id, image_file)
+        self.assertEqual(resp.status, 204)
+        resp, body = self.client.get_image_metadata(image_id)
+        self.assertTrue('size' in body)
+        self.assertEqual(1024, body.get('size'))
+
+
+class ListImagesTest(base.BaseV2ImageTest):
+
+    """
+    Here we test the listing of image information
+    """
+
+    @classmethod
+    def setUpClass(cls):
+        super(ListImagesTest, cls).setUpClass()
+        # We add a few images here to test the listing functionality of
+        # the images API
+        for x in xrange(0, 10):
+            cls.created_images.append(cls._create_standard_image(x))
+
+    @classmethod
+    def _create_standard_image(cls, number):
+        """
+        Create a new standard image and return the ID of the newly-registered
+        image. Note that the size of the new image is a random number between
+        1024 and 4096
+        """
+        image_file = StringIO.StringIO('*' * random.randint(1024, 4096))
+        name = 'New Standard Image %s' % number
+        resp, body = cls.create_image(name=name, container_format='bare',
+                                      disk_format='raw',
+                                      visibility='public')
+        image_id = body['id']
+        resp, body = cls.client.store_image(image_id, data=image_file)
+
+        return image_id
+
+    @attr(type='image')
+    def test_index_no_params(self):
+        # Simple test to see all fixture images returned
+        resp, images_list = self.client.image_list()
+        self.assertEqual(resp['status'], '200')
+        image_list = map(lambda x: x['id'], images_list)
+        for image in self.created_images:
+            self.assertTrue(image in image_list)
diff --git a/tempest/tests/network/base.py b/tempest/tests/network/base.py
index 4cc8b29..1b09513 100644
--- a/tempest/tests/network/base.py
+++ b/tempest/tests/network/base.py
@@ -18,7 +18,6 @@
 
 from tempest import clients
 from tempest.common.utils.data_utils import rand_name
-from tempest import exceptions
 import tempest.test
 
 
@@ -27,15 +26,9 @@
     @classmethod
     def setUpClass(cls):
         os = clients.Manager()
-        client = os.network_client
 
-        # Validate that there is even an endpoint configured
-        # for networks, and mark the attr for skipping if not
-        try:
-            client.list_networks()
-        except exceptions.EndpointNotFound:
-            skip_msg = "No OpenStack Network API endpoint"
-            raise cls.skipException(skip_msg)
+        if not os.config.network.quantum_available:
+            raise cls.skipException("Quantum support is required")
 
     @classmethod
     def tearDownClass(cls):
diff --git a/tempest/tests/object_storage/test_object_services.py b/tempest/tests/object_storage/test_object_services.py
index e0a2fbb..76fab0b 100644
--- a/tempest/tests/object_storage/test_object_services.py
+++ b/tempest/tests/object_storage/test_object_services.py
@@ -326,7 +326,6 @@
                 self.assertIn('x-container-read', resp)
                 self.assertEqual(resp['x-container-read'], 'x')
 
-    @testtools.skip('Until Bug 1091669  is resolved.')
     @attr(type='smoke')
     def test_access_public_object_with_another_user_creds(self):
         #Make container public-readable, and access the object
diff --git a/tempest/tests/volume/admin/test_volume_types_extra_specs_negative.py b/tempest/tests/volume/admin/test_volume_types_extra_specs_negative.py
index e201853..f528cec 100644
--- a/tempest/tests/volume/admin/test_volume_types_extra_specs_negative.py
+++ b/tempest/tests/volume/admin/test_volume_types_extra_specs_negative.py
@@ -40,7 +40,6 @@
         super(ExtraSpecsNegativeTest, cls).tearDownClass()
         cls.client.delete_volume_type(cls.volume_type['id'])
 
-    @testtools.skip('Until bug 1090320 is fixed')
     def test_update_no_body(self):
         # Should not update volume type extra specs with no body
         extra_spec = {"spec1": "val2"}
@@ -80,14 +79,12 @@
                           self.client.create_volume_type_extra_specs,
                           str(uuid.uuid4()), extra_specs)
 
-    @testtools.skip('Until bug 1090322 is fixed')
     def test_create_none_body(self):
         # Should not create volume type extra spec for none POST body.
         self.assertRaises(exceptions.BadRequest,
                           self.client.create_volume_type_extra_specs,
                           self.volume_type['id'], None)
 
-    @testtools.skip('Until bug 1090322 is fixed')
     def test_create_invalid_body(self):
         # Should not create volume type extra spec for invalid POST body.
         self.assertRaises(exceptions.BadRequest,
diff --git a/tempest/tests/volume/admin/test_volume_types_negative.py b/tempest/tests/volume/admin/test_volume_types_negative.py
index c706f3d..1b11d68 100644
--- a/tempest/tests/volume/admin/test_volume_types_negative.py
+++ b/tempest/tests/volume/admin/test_volume_types_negative.py
@@ -32,7 +32,6 @@
                           display_name=str(uuid.uuid4()),
                           volume_type=str(uuid.uuid4()))
 
-    @testtools.skip('Until bug 1090356 is fixed')
     def test_create_with_empty_name(self):
         # Should not be able to create volume type with an empty name.
         self.assertRaises(exceptions.BadRequest,
diff --git a/tools/find_stack_traces.py b/tools/find_stack_traces.py
new file mode 100755
index 0000000..e6c1990
--- /dev/null
+++ b/tools/find_stack_traces.py
@@ -0,0 +1,83 @@
+#!/usr/bin/env python
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2013 IBM Corp.
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import gzip
+import re
+import StringIO
+import sys
+import urllib2
+
+
+def hunt_for_stacktrace(url):
+    """Return TRACE or ERROR lines out of logs."""
+    page = urllib2.urlopen(url)
+    buf = StringIO.StringIO(page.read())
+    f = gzip.GzipFile(fileobj=buf)
+    content = f.read()
+    traces = re.findall('^(.*? (TRACE|ERROR) .*?)$', content, re.MULTILINE)
+    tracelist = map(lambda x: x[0], traces)
+    # filter out log definitions as false possitives
+    return filter(lambda x: not re.search('logging_exception_prefix', x),
+                  tracelist)
+
+
+def log_url(url, log):
+    return "%s/%s" % (url, log)
+
+
+def collect_logs(url):
+    page = urllib2.urlopen(url)
+    content = page.read()
+    logs = re.findall('(screen-[\w-]+\.txt\.gz)</a>', content)
+    return logs
+
+
+def usage():
+    print """
+Usage: find_stack_traces.py <logurl>
+
+Hunts for stack traces in a devstack run. Must provide it a base log url
+from a tempest devstack run. Should start with http and end with /logs/.
+
+Returns a report listing stack traces out of the various files where
+they are found.
+"""
+    sys.exit(0)
+
+
+def main():
+    if len(sys.argv) == 2:
+        url = sys.argv[1]
+        loglist = collect_logs(url)
+
+        # probably wrong base url
+        if not loglist:
+            usage()
+
+        for log in loglist:
+            logurl = log_url(url, log)
+            traces = hunt_for_stacktrace(logurl)
+            if traces:
+                print "\n\nTRACES found in %s\n" % log
+                for line in traces:
+                    print line
+    else:
+        usage()
+
+if __name__ == '__main__':
+    main()
diff --git a/tools/install_venv_common.py b/tools/install_venv_common.py
index 8f91251..fd9076f 100644
--- a/tools/install_venv_common.py
+++ b/tools/install_venv_common.py
@@ -21,20 +21,12 @@
 Synced in from openstack-common
 """
 
+import argparse
 import os
 import subprocess
 import sys
 
 
-possible_topdir = os.getcwd()
-if os.path.exists(os.path.join(possible_topdir, "tempest",
-                               "__init__.py")):
-    sys.path.insert(0, possible_topdir)
-
-
-from tempest.openstack.common import cfg
-
-
 class InstallVenv(object):
 
     def __init__(self, root, venv, pip_requires, test_requires, py_version,
@@ -139,17 +131,12 @@
 
     def parse_args(self, argv):
         """Parses command-line arguments."""
-        cli_opts = [
-            cfg.BoolOpt('no-site-packages',
-                        default=False,
-                        short='n',
-                        help="Do not inherit packages from global Python"
-                             "install"),
-        ]
-        CLI = cfg.ConfigOpts()
-        CLI.register_cli_opts(cli_opts)
-        CLI(argv[1:])
-        return CLI
+        parser = argparse.ArgumentParser()
+        parser.add_argument('-n', '--no-site-packages',
+                            action='store_true',
+                            help="Do not inherit packages from global Python "
+                                 "install")
+        return parser.parse_args(argv[1:])
 
 
 class Distro(InstallVenv):
diff --git a/tools/pip-requires b/tools/pip-requires
index 220f1a6..e85cced 100644
--- a/tools/pip-requires
+++ b/tools/pip-requires
@@ -13,3 +13,4 @@
 testresources
 keyring
 testrepository
+oslo.config>=1.1.0
diff --git a/tools/skip_tracker.py b/tools/skip_tracker.py
index e890e92..a4cf394 100755
--- a/tools/skip_tracker.py
+++ b/tools/skip_tracker.py
@@ -61,7 +61,7 @@
     """
     Return the skip tuples in a test file
     """
-    BUG_RE = re.compile(r'.*skip\(.*[bB]ug\s*(\d+)')
+    BUG_RE = re.compile(r'.*skip\(.*bug:*\s*\#*(\d+)', re.IGNORECASE)
     DEF_RE = re.compile(r'.*def (\w+)\(')
     bug_found = False
     results = []
@@ -89,6 +89,7 @@
     results = find_skips()
     unique_bugs = sorted(set([bug for (method, bug) in results]))
     unskips = []
+    duplicates = []
     info("Total bug skips found: %d", len(results))
     info("Total unique bugs causing skips: %d", len(unique_bugs))
     lp = launchpad.Launchpad.login_anonymously('grabbing bugs',
@@ -96,12 +97,26 @@
                                                LPCACHEDIR)
     for bug_no in unique_bugs:
         bug = lp.bugs[bug_no]
+        duplicate = bug.duplicate_of_link
+        if duplicate is not None:
+            dup_id = duplicate.split('/')[-1]
+            duplicates.append((bug_no, dup_id))
         for task in bug.bug_tasks:
             info("Bug #%7s (%12s - %12s)", bug_no,
                  task.importance, task.status)
             if task.status in ('Fix Released', 'Fix Committed'):
                 unskips.append(bug_no)
 
+    for bug_id, dup_id in duplicates:
+        if bug_id not in unskips:
+            dup_bug = lp.bugs[dup_id]
+            for task in dup_bug.bug_tasks:
+                info("Bug #%7s is a duplicate of Bug#%7s (%12s - %12s)",
+                     bug_id, dup_id, task.importance, task.status)
+                if task.status in ('Fix Released', 'Fix Committed'):
+                    unskips.append(bug_id)
+
+    unskips = sorted(set(unskips))
     if unskips:
         print "The following bugs have been fixed and the corresponding skips"
         print "should be removed from the test cases:"
diff --git a/tools/tempest_coverage.py b/tools/tempest_coverage.py
index 267eafa..a46d0fb 100755
--- a/tools/tempest_coverage.py
+++ b/tools/tempest_coverage.py
@@ -20,9 +20,10 @@
 import shutil
 import sys
 
+from oslo.config import cfg
+
 from tempest.common.rest_client import RestClient
 from tempest import config
-from tempest.openstack.common import cfg
 from tempest.tests.compute import base
 
 CONF = config.TempestConfig()
diff --git a/tox.ini b/tox.ini
index 1b18586..92ce6bc 100644
--- a/tox.ini
+++ b/tox.ini
@@ -19,5 +19,4 @@
            python -m tools/tempest_coverage -c report --html
 
 [testenv:pep8]
-deps = pep8==1.3.3
 commands = python tools/hacking.py --ignore=E122,E125,E126 --repeat --show-source --exclude=.venv,.tox,dist,doc,openstack,*egg .