Merge "Adapt negative testing documentation"
diff --git a/etc/tempest.conf.sample b/etc/tempest.conf.sample
index 13ee8fe..b70b446 100644
--- a/etc/tempest.conf.sample
+++ b/etc/tempest.conf.sample
@@ -467,9 +467,6 @@
 # [nova.vnc]->vnc_enabled in nova.conf (boolean value)
 #vnc_console = false
 
-# If false skip all v2 api tests with xml (boolean value)
-#xml_api_v2 = true
-
 
 [dashboard]
 
@@ -632,9 +629,6 @@
 # (boolean value)
 #trust = true
 
-# If false, skip all identity api tests with xml (boolean value)
-#xml_api = false
-
 
 [image]
 
@@ -810,9 +804,6 @@
 # attributes ipv6_ra_mode and ipv6_address_mode (boolean value)
 #ipv6_subnet_attributes = false
 
-# If false, skip all network api tests with xml (boolean value)
-#xml_api = false
-
 
 [object-storage]
 
diff --git a/requirements.txt b/requirements.txt
index e939c5c..ce5886e 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -6,7 +6,6 @@
 httplib2>=0.7.5
 jsonschema>=2.0.0,<3.0.0
 testtools>=0.9.36,!=1.2.0
-lxml>=2.3
 boto>=2.32.1
 paramiko>=1.13.0
 netaddr>=0.7.12
diff --git a/tempest/api/compute/admin/test_flavors_negative.py b/tempest/api/compute/admin/test_flavors_negative.py
index 5bc3d10..fb27360 100644
--- a/tempest/api/compute/admin/test_flavors_negative.py
+++ b/tempest/api/compute/admin/test_flavors_negative.py
@@ -18,9 +18,13 @@
 from tempest.api.compute import base
 from tempest.api_schema.request.compute.v2 import flavors
 from tempest.common.utils import data_utils
+from tempest import config
 from tempest import exceptions
 from tempest import test
 
+
+CONF = config.CONF
+
 load_tests = test.NegativeAutoTest.load_tests
 
 
@@ -106,5 +110,5 @@
 class FlavorCreateNegativeTestJSON(base.BaseV2ComputeAdminTest,
                                    test.NegativeAutoTest):
     _interface = 'json'
-    _service = 'compute'
+    _service = CONF.compute.catalog_type
     _schema = flavors.flavor_create
diff --git a/tempest/api/compute/base.py b/tempest/api/compute/base.py
index 2f53a0b..6a3ee44 100644
--- a/tempest/api/compute/base.py
+++ b/tempest/api/compute/base.py
@@ -38,9 +38,6 @@
     def resource_setup(cls):
         cls.set_network_resources()
         super(BaseComputeTest, cls).resource_setup()
-        if getattr(cls, '_interface', None) == 'xml' and cls._api_version == 2:
-            if not CONF.compute_feature_enabled.xml_api_v2:
-                raise cls.skipException('XML API is not enabled')
 
         # TODO(andreaf) WE should care also for the alt_manager here
         # but only once client lazy load in the manager is done
diff --git a/tempest/api/compute/flavors/test_flavors_negative.py b/tempest/api/compute/flavors/test_flavors_negative.py
index cae1ac4..83f8e19 100644
--- a/tempest/api/compute/flavors/test_flavors_negative.py
+++ b/tempest/api/compute/flavors/test_flavors_negative.py
@@ -15,23 +15,26 @@
 
 from tempest.api.compute import base
 from tempest.api_schema.request.compute.v2 import flavors
+from tempest import config
 from tempest import test
 
 
+CONF = config.CONF
+
 load_tests = test.NegativeAutoTest.load_tests
 
 
 @test.SimpleNegativeAutoTest
 class FlavorsListWithDetailsNegativeTestJSON(base.BaseV2ComputeTest,
                                              test.NegativeAutoTest):
-    _service = 'compute'
+    _service = CONF.compute.catalog_type
     _schema = flavors.flavor_list
 
 
 @test.SimpleNegativeAutoTest
 class FlavorDetailsNegativeTestJSON(base.BaseV2ComputeTest,
                                     test.NegativeAutoTest):
-    _service = 'compute'
+    _service = CONF.compute.catalog_type
     _schema = flavors.flavors_details
 
     @classmethod
diff --git a/tempest/api/compute/servers/test_server_metadata_negative.py b/tempest/api/compute/servers/test_server_metadata_negative.py
index ad1ec70..8b074fd 100644
--- a/tempest/api/compute/servers/test_server_metadata_negative.py
+++ b/tempest/api/compute/servers/test_server_metadata_negative.py
@@ -40,7 +40,7 @@
         for sz in [256, 257, 511, 1023]:
             key = "k" * sz
             meta = {key: 'data1'}
-            self.assertRaises(exceptions.OverLimit,
+            self.assertRaises((exceptions.BadRequest, exceptions.OverLimit),
                               self.create_test_server,
                               meta=meta)
 
diff --git a/tempest/api/identity/admin/v3/test_policies.py b/tempest/api/identity/admin/v3/test_policies.py
index 9ea61df..ef7d22c 100644
--- a/tempest/api/identity/admin/v3/test_policies.py
+++ b/tempest/api/identity/admin/v3/test_policies.py
@@ -32,8 +32,8 @@
         for _ in range(3):
             blob = data_utils.rand_name('BlobName-')
             policy_type = data_utils.rand_name('PolicyType-')
-            resp, policy = self.policy_client.create_policy(blob,
-                                                            policy_type)
+            policy = self.policy_client.create_policy(blob,
+                                                      policy_type)
             # Delete the Policy at the end of this method
             self.addCleanup(self._delete_policy, policy['id'])
             policy_ids.append(policy['id'])
@@ -49,7 +49,7 @@
         # Test to update policy
         blob = data_utils.rand_name('BlobName-')
         policy_type = data_utils.rand_name('PolicyType-')
-        _, policy = self.policy_client.create_policy(blob, policy_type)
+        policy = self.policy_client.create_policy(blob, policy_type)
         self.addCleanup(self._delete_policy, policy['id'])
         self.assertIn('id', policy)
         self.assertIn('type', policy)
diff --git a/tempest/api/image/base.py b/tempest/api/image/base.py
index 74baba6..76b6f17 100644
--- a/tempest/api/image/base.py
+++ b/tempest/api/image/base.py
@@ -15,7 +15,7 @@
 import cStringIO as StringIO
 
 from tempest import clients
-from tempest.common import isolated_creds
+from tempest.common import credentials
 from tempest.common.utils import data_utils
 from tempest import config
 from tempest import exceptions
@@ -36,7 +36,7 @@
         super(BaseImageTest, cls).resource_setup()
         cls.created_images = []
         cls._interface = 'json'
-        cls.isolated_creds = isolated_creds.IsolatedCreds(
+        cls.isolated_creds = credentials.get_isolated_credentials(
             cls.__name__, network_resources=cls.network_resources)
         if not CONF.service_available.glance:
             skip_msg = ("%s skipped as glance is not available" % cls.__name__)
diff --git a/tempest/api/network/base.py b/tempest/api/network/base.py
index 4b5f107..b13dd22 100644
--- a/tempest/api/network/base.py
+++ b/tempest/api/network/base.py
@@ -62,9 +62,6 @@
         super(BaseNetworkTest, cls).resource_setup()
         if not CONF.service_available.neutron:
             raise cls.skipException("Neutron support is required")
-        if getattr(cls, '_interface', None) == 'xml':
-            if not CONF.network_feature_enabled.xml_api:
-                raise cls.skipException('XML API is not enabled')
         if cls._ip_version == 6 and not CONF.network_feature_enabled.ipv6:
             raise cls.skipException("IPv6 Tests are disabled.")
 
diff --git a/tempest/api/object_storage/base.py b/tempest/api/object_storage/base.py
index 2e39cf9..7fabb7d 100644
--- a/tempest/api/object_storage/base.py
+++ b/tempest/api/object_storage/base.py
@@ -16,8 +16,8 @@
 
 from tempest.api.identity import base
 from tempest import clients
+from tempest.common import credentials
 from tempest.common import custom_matchers
-from tempest.common import isolated_creds
 from tempest import config
 from tempest import exceptions
 import tempest.test
@@ -34,7 +34,7 @@
         if not CONF.service_available.swift:
             skip_msg = ("%s skipped as swift is not available" % cls.__name__)
             raise cls.skipException(skip_msg)
-        cls.isolated_creds = isolated_creds.IsolatedCreds(
+        cls.isolated_creds = credentials.get_isolated_credentials(
             cls.__name__, network_resources=cls.network_resources)
         # Get isolated creds for normal user
         cls.os = clients.Manager(cls.isolated_creds.get_primary_creds())
diff --git a/tempest/api/orchestration/stacks/templates/non_empty_stack.yaml b/tempest/api/orchestration/stacks/templates/non_empty_stack.yaml
index 8690941..4f9df91 100644
--- a/tempest/api/orchestration/stacks/templates/non_empty_stack.yaml
+++ b/tempest/api/orchestration/stacks/templates/non_empty_stack.yaml
@@ -7,6 +7,8 @@
     Default: not_yet
   image:
     Type: String
+  flavor:
+    Type: String
 Resources:
   fluffy:
     Type: AWS::AutoScaling::LaunchConfiguration
@@ -16,7 +18,7 @@
       - Stinky
     Properties:
       ImageId: {Ref: image}
-      InstanceType: not_used
+      InstanceType: {Ref: flavor}
       UserData:
         Fn::Replace:
         - variable_a: {Ref: trigger}
diff --git a/tempest/api/orchestration/stacks/test_non_empty_stack.py b/tempest/api/orchestration/stacks/test_non_empty_stack.py
index 759cbbe..bf6c79c 100644
--- a/tempest/api/orchestration/stacks/test_non_empty_stack.py
+++ b/tempest/api/orchestration/stacks/test_non_empty_stack.py
@@ -31,13 +31,15 @@
         template = cls.read_template('non_empty_stack')
         image_id = (CONF.orchestration.image_ref or
                     cls._create_image()['id'])
+        flavor = CONF.orchestration.instance_type
         # create the stack
         cls.stack_identifier = cls.create_stack(
             cls.stack_name,
             template,
             parameters={
                 'trigger': 'start',
-                'image': image_id
+                'image': image_id,
+                'flavor': flavor
             })
         cls.stack_id = cls.stack_identifier.split('/')[1]
         cls.resource_name = 'fluffy'
diff --git a/tempest/api/volume/admin/test_volume_quotas.py b/tempest/api/volume/admin/test_volume_quotas.py
index 0cc2471..50bab56 100644
--- a/tempest/api/volume/admin/test_volume_quotas.py
+++ b/tempest/api/volume/admin/test_volume_quotas.py
@@ -22,13 +22,13 @@
 QUOTA_USAGE_KEYS = ['reserved', 'limit', 'in_use']
 
 
-class VolumeQuotasAdminTestJSON(base.BaseVolumeV1AdminTest):
+class BaseVolumeQuotasAdminV2TestJSON(base.BaseVolumeAdminTest):
     _interface = "json"
     force_tenant_isolation = True
 
     @classmethod
     def resource_setup(cls):
-        super(VolumeQuotasAdminTestJSON, cls).resource_setup()
+        super(BaseVolumeQuotasAdminV2TestJSON, cls).resource_setup()
         cls.demo_tenant_id = cls.isolated_creds.get_primary_creds().tenant_id
 
     @test.attr(type='gate')
@@ -113,3 +113,7 @@
         self.quotas_client.delete_quota_set(tenant_id)
         _, quota_set_new = self.quotas_client.get_quota_set(tenant_id)
         self.assertEqual(volume_default, quota_set_new['volumes'])
+
+
+class VolumeQuotasAdminV1TestJSON(BaseVolumeQuotasAdminV2TestJSON):
+    _api_version = 1
diff --git a/tempest/api/volume/admin/test_volume_quotas_negative.py b/tempest/api/volume/admin/test_volume_quotas_negative.py
index 0fff561..c367ebb 100644
--- a/tempest/api/volume/admin/test_volume_quotas_negative.py
+++ b/tempest/api/volume/admin/test_volume_quotas_negative.py
@@ -18,13 +18,13 @@
 from tempest import test
 
 
-class VolumeQuotasNegativeTestJSON(base.BaseVolumeV1AdminTest):
+class BaseVolumeQuotasNegativeV2TestJSON(base.BaseVolumeAdminTest):
     _interface = "json"
     force_tenant_isolation = True
 
     @classmethod
     def resource_setup(cls):
-        super(VolumeQuotasNegativeTestJSON, cls).resource_setup()
+        super(BaseVolumeQuotasNegativeV2TestJSON, cls).resource_setup()
         demo_user = cls.isolated_creds.get_primary_creds()
         cls.demo_tenant_id = demo_user.tenant_id
         cls.shared_quota_set = {'gigabytes': 3, 'volumes': 1, 'snapshots': 1}
@@ -76,3 +76,7 @@
         self.assertRaises(exceptions.OverLimit,
                           self.snapshots_client.create_snapshot,
                           self.volume['id'])
+
+
+class VolumeQuotasNegativeV1TestJSON(BaseVolumeQuotasNegativeV2TestJSON):
+    _api_version = 1
diff --git a/tempest/api/volume/base.py b/tempest/api/volume/base.py
index 5d99123..2a52e55 100644
--- a/tempest/api/volume/base.py
+++ b/tempest/api/volume/base.py
@@ -165,8 +165,6 @@
 
         cls.qos_specs = []
 
-        cls.quotas_client = cls.os_adm.volume_quotas_client
-
         if cls._api_version == 1:
             if not CONF.volume_feature_enabled.api_v1:
                 msg = "Volume API v1 is disabled"
@@ -177,6 +175,7 @@
             cls.hosts_client = cls.os_adm.volume_hosts_client
             cls.admin_snapshots_client = cls.os_adm.snapshots_client
             cls.backups_adm_client = cls.os_adm.backups_client
+            cls.quotas_client = cls.os_adm.volume_quotas_client
         elif cls._api_version == 2:
             if not CONF.volume_feature_enabled.api_v2:
                 msg = "Volume API v2 is disabled"
@@ -187,6 +186,7 @@
             cls.hosts_client = cls.os_adm.volume_hosts_v2_client
             cls.admin_snapshots_client = cls.os_adm.snapshots_v2_client
             cls.backups_adm_client = cls.os_adm.backups_v2_client
+            cls.quotas_client = cls.os_adm.volume_quotas_v2_client
 
     @classmethod
     def resource_cleanup(cls):
diff --git a/tempest/api_schema/response/compute/services.py b/tempest/api_schema/response/compute/services.py
index fc42b89..6f361ef 100644
--- a/tempest/api_schema/response/compute/services.py
+++ b/tempest/api_schema/response/compute/services.py
@@ -22,7 +22,8 @@
                 'items': {
                     'type': 'object',
                     'properties': {
-                        'id': {'type': 'integer'},
+                        'id': {'type': ['integer', 'string'],
+                               'pattern': '^[a-zA-Z!]*@[0-9]+$'},
                         'zone': {'type': 'string'},
                         'host': {'type': 'string'},
                         'state': {'type': 'string'},
diff --git a/tempest/cli/simple_read_only/network/test_neutron.py b/tempest/cli/simple_read_only/network/test_neutron.py
index 6090882..6cf0640 100644
--- a/tempest/cli/simple_read_only/network/test_neutron.py
+++ b/tempest/cli/simple_read_only/network/test_neutron.py
@@ -198,6 +198,31 @@
                                             'auth_mode', 'status'])
 
     @test.attr(type='smoke')
+    @test.requires_ext(extension='fwaas', service='network')
+    def test_neutron_firewall_list(self):
+        firewall_list = self.parser.listing(self.neutron
+                                            ('firewall-list'))
+        self.assertTableStruct(firewall_list, ['id', 'name',
+                                               'firewall_policy_id'])
+
+    @test.attr(type='smoke')
+    @test.requires_ext(extension='fwaas', service='network')
+    def test_neutron_firewall_policy_list(self):
+        firewall_policy = self.parser.listing(self.neutron
+                                              ('firewall-policy-list'))
+        self.assertTableStruct(firewall_policy, ['id', 'name',
+                                                 'firewall_rules'])
+
+    @test.attr(type='smoke')
+    @test.requires_ext(extension='fwaas', service='network')
+    def test_neutron_firewall_rule_list(self):
+        firewall_rule = self.parser.listing(self.neutron
+                                            ('firewall-rule-list'))
+        self.assertTableStruct(firewall_rule, ['id', 'name',
+                                               'firewall_policy_id',
+                                               'summary', 'enabled'])
+
+    @test.attr(type='smoke')
     def test_neutron_help(self):
         help_text = self.neutron('help')
         lines = help_text.split('\n')
diff --git a/tempest/clients.py b/tempest/clients.py
index 486eef2..5873a85 100644
--- a/tempest/clients.py
+++ b/tempest/clients.py
@@ -138,6 +138,8 @@
 from tempest.services.volume.json.volumes_client import VolumesClientJSON
 from tempest.services.volume.v2.json.admin.volume_hosts_client import \
     VolumeHostsV2ClientJSON
+from tempest.services.volume.v2.json.admin.volume_quotas_client import \
+    VolumeQuotasV2Client
 from tempest.services.volume.v2.json.admin.volume_types_client import \
     VolumeTypesV2ClientJSON
 from tempest.services.volume.v2.json.availability_zone_client import \
@@ -302,6 +304,7 @@
         self.volume_hosts_v2_client = VolumeHostsV2ClientJSON(
             self.auth_provider)
         self.volume_quotas_client = VolumeQuotasClientJSON(self.auth_provider)
+        self.volume_quotas_v2_client = VolumeQuotasV2Client(self.auth_provider)
         self.volumes_extension_client = VolumeExtensionClientJSON(
             self.auth_provider)
         self.volumes_v2_extension_client = VolumeV2ExtensionClientJSON(
diff --git a/tempest/cmd/cleanup.py b/tempest/cmd/cleanup.py
index a305e42..f36ef56 100755
--- a/tempest/cmd/cleanup.py
+++ b/tempest/cmd/cleanup.py
@@ -9,7 +9,7 @@
 #
 # Unless required by applicable law or agreed to in writing, software
 # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.  See the
 # License for the specific language governing permissions and limitations
 # under the License.
 
@@ -36,14 +36,14 @@
 **NOTE**: The _tenants_to_clean array in dry-run.json lists the
 tenants that cleanup will loop through and delete child objects, not
 delete the tenant itself. This may differ from the tenants array as you
-can clean the tempest and alternate tempest tenants but not delete the
-tenants themselves.  This is actually the default behavior.
+can clean the tempest and alternate tempest tenants but by default,
+cleanup deletes the objects in the tempest and alternate tempest tenants
+but does not delete those tenants unless the --delete-tempest-conf-objects
+flag is used to force their deletion.
 
 **Normal mode**: running with no arguments, will query your deployment and
-build a list of objects to delete after filtering out out the objects
-found in saved_state.json and based on the
---preserve-tempest-conf-objects and
---delete-tempest-conf-objects flags.
+build a list of objects to delete after filtering out the objects found in
+saved_state.json and based on the --delete-tempest-conf-objects flag.
 
 By default the tempest and alternate tempest users and tenants are not
 deleted and the admin user specified in tempest.conf is never deleted.
@@ -84,7 +84,6 @@
         # available services
         self.tenant_services = cleanup_service.get_tenant_cleanup_services()
         self.global_services = cleanup_service.get_global_cleanup_services()
-        cleanup_service.init_conf()
 
     def run(self):
         opts = self.options
@@ -98,7 +97,7 @@
     def _cleanup(self):
         LOG.debug("Begin cleanup")
         is_dry_run = self.options.dry_run
-        is_preserve = self.options.preserve_tempest_conf_objects
+        is_preserve = not self.options.delete_tempest_conf_objects
         is_save_state = False
 
         if is_dry_run:
@@ -149,7 +148,7 @@
         LOG.debug("Cleaning tenant:  %s " % tenant['name'])
         is_dry_run = self.options.dry_run
         dry_run_data = self.dry_run_data
-        is_preserve = self.options.preserve_tempest_conf_objects
+        is_preserve = not self.options.delete_tempest_conf_objects
         tenant_id = tenant['id']
         tenant_name = tenant['name']
         tenant_data = None
@@ -194,23 +193,16 @@
                             dest='init_saved_state', default=False,
                             help="Creates JSON file: " + SAVED_STATE_JSON +
                             ", representing the current state of your "
-                            "deployment,  specifically objects types "
-                            "Tempest creates and destroys during a run. "
+                            "deployment,  specifically object types "
+                            "tempest creates and destroys during a run. "
                             "You must run with this flag prior to "
-                            "executing cleanup.")
-        parser.add_argument('--preserve-tempest-conf-objects',
-                            action="store_true",
-                            dest='preserve_tempest_conf_objects',
-                            default=True, help="Do not delete the "
-                            "tempest and alternate tempest users and "
-                            "tenants, so they may be used for future "
-                            "tempest runs. By default this is argument "
-                            "is true.")
+                            "executing cleanup in normal mode, which is with "
+                            "no arguments.")
         parser.add_argument('--delete-tempest-conf-objects',
-                            action="store_false",
-                            dest='preserve_tempest_conf_objects',
+                            action="store_true",
+                            dest='delete_tempest_conf_objects',
                             default=False,
-                            help="Delete the tempest and "
+                            help="Force deletion of the tempest and "
                             "alternate tempest users and tenants.")
         parser.add_argument('--dry-run', action="store_true",
                             dest='dry_run', default=False,
@@ -291,6 +283,7 @@
 
 
 def main():
+    cleanup_service.init_conf()
     cleanup = Cleanup()
     cleanup.run()
     LOG.info('Cleanup finished!')
diff --git a/tempest/cmd/cleanup_service.py b/tempest/cmd/cleanup_service.py
index 8adfbef..67843e6 100644
--- a/tempest/cmd/cleanup_service.py
+++ b/tempest/cmd/cleanup_service.py
@@ -1,3 +1,5 @@
+#!/usr/bin/env python
+
 # Copyright 2014 Dell Inc.
 #
 #    Licensed under the Apache License, Version 2.0 (the "License"); you may
@@ -12,6 +14,7 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
+from tempest import clients
 from tempest import config
 from tempest.openstack.common import log as logging
 from tempest import test
@@ -19,13 +22,14 @@
 LOG = logging.getLogger(__name__)
 CONF = config.CONF
 
-CONF_USERS = None
-CONF_TENANTS = None
-CONF_PUB_NETWORK = None
-CONF_PRIV_NETWORK_NAME = None
-CONF_PUB_ROUTER = None
 CONF_FLAVORS = None
 CONF_IMAGES = None
+CONF_NETWORKS = []
+CONF_PRIV_NETWORK_NAME = None
+CONF_PUB_NETWORK = None
+CONF_PUB_ROUTER = None
+CONF_TENANTS = None
+CONF_USERS = None
 
 IS_CEILOMETER = None
 IS_CINDER = None
@@ -36,14 +40,15 @@
 
 
 def init_conf():
-    global CONF_USERS
-    global CONF_TENANTS
-    global CONF_PUB_NETWORK
-    global CONF_PRIV_NETWORK_NAME
-    global CONF_PUB_ROUTER
     global CONF_FLAVORS
     global CONF_IMAGES
-
+    global CONF_NETWORKS
+    global CONF_PRIV_NETWORK
+    global CONF_PRIV_NETWORK_NAME
+    global CONF_PUB_NETWORK
+    global CONF_PUB_ROUTER
+    global CONF_TENANTS
+    global CONF_USERS
     global IS_CEILOMETER
     global IS_CINDER
     global IS_GLANCE
@@ -51,17 +56,6 @@
     global IS_NEUTRON
     global IS_NOVA
 
-    CONF_USERS = [CONF.identity.admin_username, CONF.identity.username,
-                  CONF.identity.alt_username]
-    CONF_TENANTS = [CONF.identity.admin_tenant_name,
-                    CONF.identity.tenant_name,
-                    CONF.identity.alt_tenant_name]
-    CONF_PUB_NETWORK = CONF.network.public_network_id
-    CONF_PRIV_NETWORK_NAME = CONF.compute.fixed_network_name
-    CONF_PUB_ROUTER = CONF.network.public_router_id
-    CONF_FLAVORS = [CONF.compute.flavor_ref, CONF.compute.flavor_ref_alt]
-    CONF_IMAGES = [CONF.compute.image_ref, CONF.compute.image_ref_alt]
-
     IS_CEILOMETER = CONF.service_available.ceilometer
     IS_CINDER = CONF.service_available.cinder
     IS_GLANCE = CONF.service_available.glance
@@ -69,6 +63,38 @@
     IS_NEUTRON = CONF.service_available.neutron
     IS_NOVA = CONF.service_available.nova
 
+    CONF_FLAVORS = [CONF.compute.flavor_ref, CONF.compute.flavor_ref_alt]
+    CONF_IMAGES = [CONF.compute.image_ref, CONF.compute.image_ref_alt]
+    CONF_PRIV_NETWORK_NAME = CONF.compute.fixed_network_name
+    CONF_PUB_NETWORK = CONF.network.public_network_id
+    CONF_PUB_ROUTER = CONF.network.public_router_id
+    CONF_TENANTS = [CONF.identity.admin_tenant_name,
+                    CONF.identity.tenant_name,
+                    CONF.identity.alt_tenant_name]
+    CONF_USERS = [CONF.identity.admin_username, CONF.identity.username,
+                  CONF.identity.alt_username]
+
+    if IS_NEUTRON:
+        CONF_PRIV_NETWORK = _get_priv_net_id(CONF.compute.fixed_network_name,
+                                             CONF.identity.tenant_name)
+        CONF_NETWORKS = [CONF_PUB_NETWORK, CONF_PRIV_NETWORK]
+
+
+def _get_priv_net_id(prv_net_name, tenant_name):
+    am = clients.AdminManager()
+    net_cl = am.network_client
+    id_cl = am.identity_client
+
+    _, networks = net_cl.list_networks()
+    tenant = id_cl.get_tenant_by_name(tenant_name)
+    t_id = tenant['id']
+    n_id = None
+    for net in networks['networks']:
+        if (net['tenant_id'] == t_id and net['name'] == prv_net_name):
+            n_id = net['id']
+            break
+    return n_id
+
 
 class BaseService(object):
     def __init__(self, kwargs):
@@ -84,11 +110,8 @@
                 or 'tenant_id' not in item_list[0]):
             return item_list
 
-        _filtered_list = []
-        for item in item_list:
-            if item['tenant_id'] == self.tenant_id:
-                _filtered_list.append(item)
-        return _filtered_list
+        return [item for item in item_list
+                if item['tenant_id'] == self.tenant_id]
 
     def list(self):
         pass
@@ -325,6 +348,13 @@
         super(NetworkService, self).__init__(kwargs)
         self.client = manager.network_client
 
+    def _filter_by_conf_networks(self, item_list):
+        if not item_list or not all(('network_id' in i for i in item_list)):
+            return item_list
+
+        return [item for item in item_list if item['network_id']
+                not in CONF_NETWORKS]
+
     def list(self):
         client = self.client
         _, networks = client.list_networks()
@@ -332,8 +362,7 @@
         # filter out networks declared in tempest.conf
         if self.is_preserve:
             networks = [network for network in networks
-                        if (network['name'] != CONF_PRIV_NETWORK_NAME
-                            and network['id'] != CONF_PUB_NETWORK)]
+                        if network['id'] not in CONF_NETWORKS]
         LOG.debug("List count, %s Networks" % networks)
         return networks
 
@@ -527,7 +556,7 @@
                 for port in ports:
                     subid = port['fixed_ips'][0]['subnet_id']
                     client.remove_router_interface_with_subnet_id(rid, subid)
-                    client.delete_router(rid)
+                client.delete_router(rid)
             except Exception as e:
                 LOG.exception("Delete Router exception: %s" % e)
                 pass
@@ -694,6 +723,8 @@
         _, ports = client.list_ports()
         ports = ports['ports']
         ports = self._filter_by_tenant_id(ports)
+        if self.is_preserve:
+            ports = self._filter_by_conf_networks(ports)
         LOG.debug("List count, %s Ports" % len(ports))
         return ports
 
@@ -719,6 +750,8 @@
         _, subnets = client.list_subnets()
         subnets = subnets['subnets']
         subnets = self._filter_by_tenant_id(subnets)
+        if self.is_preserve:
+            subnets = self._filter_by_conf_networks(subnets)
         LOG.debug("List count, %s Subnets" % len(subnets))
         return subnets
 
diff --git a/tempest/common/isolated_creds.py b/tempest/common/isolated_creds.py
index 228e47c..1ce1e39 100644
--- a/tempest/common/isolated_creds.py
+++ b/tempest/common/isolated_creds.py
@@ -203,9 +203,8 @@
                 if 'overlaps with another subnet' not in str(e):
                     raise
         else:
-            e = exceptions.BuildErrorException()
-            e.message = 'Available CIDR for subnet creation could not be found'
-            raise e
+            message = 'Available CIDR for subnet creation could not be found'
+            raise Exception(message)
         return resp_body['subnet']
 
     def _create_router(self, router_name, tenant_id):
diff --git a/tempest/common/rest_client.py b/tempest/common/rest_client.py
index c9448a7..f4fe92b 100644
--- a/tempest/common/rest_client.py
+++ b/tempest/common/rest_client.py
@@ -52,6 +52,23 @@
         return text
 
 
+class ResponseBody(dict):
+    """Class that wraps an http response and body into a single value.
+
+    Callers that receive this object will normally use it as a dict but
+    can extract the response if needed.
+    """
+
+    def __init__(self, response, body=None):
+        body_data = body or {}
+        self.update(body_data)
+        self.response = response
+
+    def __str__(self):
+        body = super.__str__(self)
+        return "response: %s\nBody: %s" % (self.response, body)
+
+
 class RestClient(object):
 
     TYPE = "json"
diff --git a/tempest/config.py b/tempest/config.py
index 616a476..b467f83 100644
--- a/tempest/config.py
+++ b/tempest/config.py
@@ -143,9 +143,6 @@
     cfg.BoolOpt('api_v3',
                 default=True,
                 help='Is the v3 identity API enabled'),
-    cfg.BoolOpt('xml_api',
-                default=False,
-                help='If false, skip all identity api tests with xml'),
 ]
 
 compute_group = cfg.OptGroup(name='compute',
@@ -280,9 +277,6 @@
     cfg.BoolOpt('api_v3',
                 default=False,
                 help="If false, skip all nova v3 tests."),
-    cfg.BoolOpt('xml_api_v2',
-                default=True,
-                help="If false skip all v2 api tests with xml"),
     cfg.BoolOpt('disk_config',
                 default=True,
                 help="If false, skip disk config tests"),
@@ -491,9 +485,6 @@
                      "the extended IPv6 attributes ipv6_ra_mode "
                      "and ipv6_address_mode"
                 ),
-    cfg.BoolOpt('xml_api',
-                default=False,
-                help='If false, skip all network api tests with xml')
 ]
 
 messaging_group = cfg.OptGroup(name='messaging',
diff --git a/tempest/scenario/test_dashboard_basic_ops.py b/tempest/scenario/test_dashboard_basic_ops.py
index 875a1d9..2014293 100644
--- a/tempest/scenario/test_dashboard_basic_ops.py
+++ b/tempest/scenario/test_dashboard_basic_ops.py
@@ -12,11 +12,10 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
+import HTMLParser
 import urllib
 import urllib2
 
-from lxml import html
-
 from tempest import config
 from tempest.scenario import manager
 from tempest import test
@@ -24,6 +23,30 @@
 CONF = config.CONF
 
 
+class HorizonHTMLParser(HTMLParser.HTMLParser):
+    csrf_token = None
+    region = None
+
+    def _find_name(self, attrs, name):
+        for attrpair in attrs:
+            if attrpair[0] == 'name' and attrpair[1] == name:
+                return True
+        return False
+
+    def _find_value(self, attrs):
+        for attrpair in attrs:
+            if attrpair[0] == 'value':
+                return attrpair[1]
+        return None
+
+    def handle_starttag(self, tag, attrs):
+        if tag == 'input':
+            if self._find_name(attrs, 'csrfmiddlewaretoken'):
+                self.csrf_token = self._find_value(attrs)
+            if self._find_name(attrs, 'region'):
+                self.region = self._find_value(attrs)
+
+
 class TestDashboardBasicOps(manager.ScenarioTest):
 
     """
@@ -42,17 +65,15 @@
 
     def check_login_page(self):
         response = urllib2.urlopen(CONF.dashboard.dashboard_url)
-        self.assertIn("<h3>Log In</h3>", response.read())
+        self.assertIn("Log In", response.read())
 
     def user_login(self):
         self.opener = urllib2.build_opener(urllib2.HTTPCookieProcessor())
         response = self.opener.open(CONF.dashboard.dashboard_url).read()
 
         # Grab the CSRF token and default region
-        csrf_token = html.fromstring(response).xpath(
-            '//input[@name="csrfmiddlewaretoken"]/@value')[0]
-        region = html.fromstring(response).xpath(
-            '//input[@name="region"]/@value')[0]
+        parser = HorizonHTMLParser()
+        parser.feed(response)
 
         # Prepare login form request
         req = urllib2.Request(CONF.dashboard.login_url)
@@ -60,8 +81,8 @@
         req.add_header('Referer', CONF.dashboard.dashboard_url)
         params = {'username': CONF.identity.username,
                   'password': CONF.identity.password,
-                  'region': region,
-                  'csrfmiddlewaretoken': csrf_token}
+                  'region': parser.region,
+                  'csrfmiddlewaretoken': parser.csrf_token}
         self.opener.open(req, urllib.urlencode(params))
 
     def check_home_page(self):
diff --git a/tempest/scenario/test_shelve_instance.py b/tempest/scenario/test_shelve_instance.py
new file mode 100644
index 0000000..3ee71dd
--- /dev/null
+++ b/tempest/scenario/test_shelve_instance.py
@@ -0,0 +1,96 @@
+# Copyright 2014 Scality
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import testtools
+
+from tempest import config
+from tempest.openstack.common import log
+from tempest.scenario import manager
+from tempest import test
+
+CONF = config.CONF
+
+LOG = log.getLogger(__name__)
+
+
+class TestShelveInstance(manager.ScenarioTest):
+    """
+    This test shelves then unshelves a Nova instance
+    The following is the scenario outline:
+     * boot a instance and create a timestamp file in it
+     * shelve the instance
+     * unshelve the instance
+     * check the existence of the timestamp file in the unshelved instance
+
+    """
+
+    def _write_timestamp(self, server_or_ip):
+        ssh_client = self.get_remote_client(server_or_ip)
+        ssh_client.exec_command('date > /tmp/timestamp; sync')
+        self.timestamp = ssh_client.exec_command('cat /tmp/timestamp')
+
+    def _check_timestamp(self, server_or_ip):
+        ssh_client = self.get_remote_client(server_or_ip)
+        got_timestamp = ssh_client.exec_command('cat /tmp/timestamp')
+        self.assertEqual(self.timestamp, got_timestamp)
+
+    def _shelve_then_unshelve_server(self, server):
+        self.servers_client.shelve_server(server['id'])
+        offload_time = CONF.compute.shelved_offload_time
+        if offload_time >= 0:
+            self.servers_client.wait_for_server_status(
+                server['id'], 'SHELVED_OFFLOADED', extra_timeout=offload_time)
+        else:
+            self.servers_client.wait_for_server_status(server['id'], 'SHELVED')
+            self.servers_client.shelve_offload_server(server['id'])
+            self.servers_client.wait_for_server_status(server['id'],
+                                                       'SHELVED_OFFLOADED')
+        self.servers_client.unshelve_server(server['id'])
+        self.servers_client.wait_for_server_status(server['id'], 'ACTIVE')
+
+    @testtools.skipUnless(CONF.compute_feature_enabled.shelve,
+                          'Shelve is not available.')
+    @test.services('compute', 'network', 'image')
+    def test_shelve_instance(self):
+        self.keypair = self.create_keypair()
+
+        self.security_group = self._create_security_group()
+
+        create_kwargs = {
+            'key_name': self.keypair['name'],
+            'security_groups': [self.security_group]
+        }
+        server = self.create_server(image=CONF.compute.image_ref,
+                                    create_kwargs=create_kwargs)
+
+        if CONF.compute.use_floatingip_for_ssh:
+            _, floating_ip = self.floating_ips_client.create_floating_ip()
+            self.addCleanup(self.delete_wrapper,
+                            self.floating_ips_client.delete_floating_ip,
+                            floating_ip['id'])
+            self.floating_ips_client.associate_floating_ip_to_server(
+                floating_ip['ip'], server['id'])
+            self._write_timestamp(floating_ip['ip'])
+        else:
+            self._write_timestamp(server)
+
+        # Prevent bug #1257594 from coming back
+        # Unshelve used to boot the instance with the original image, not
+        # with the instance snapshot
+        self._shelve_then_unshelve_server(server)
+        if CONF.compute.use_floatingip_for_ssh:
+            self._check_timestamp(floating_ip['ip'])
+        else:
+            self._check_timestamp(server)
diff --git a/tempest/services/compute/json/volumes_extensions_client.py b/tempest/services/compute/json/volumes_extensions_client.py
index 309dc5b..b23b20b 100644
--- a/tempest/services/compute/json/volumes_extensions_client.py
+++ b/tempest/services/compute/json/volumes_extensions_client.py
@@ -73,10 +73,9 @@
         metadata: A dictionary of values to be used as metadata.
         """
         post_body = {
-            'size': size,
-            'display_name': kwargs.get('display_name'),
-            'metadata': kwargs.get('metadata'),
+            'size': size
         }
+        post_body.update(kwargs)
 
         post_body = json.dumps({'volume': post_body})
         resp, body = self.post('os-volumes', post_body)
@@ -93,7 +92,6 @@
     def wait_for_volume_status(self, volume_id, status):
         """Waits for a Volume to reach a given status."""
         resp, body = self.get_volume(volume_id)
-        volume_name = body['displayName']
         volume_status = body['status']
         start = int(time.time())
 
@@ -107,7 +105,7 @@
             if int(time.time()) - start >= self.build_timeout:
                 message = ('Volume %s failed to reach %s status within '
                            'the required time (%s s).' %
-                           (volume_name, status, self.build_timeout))
+                           (volume_id, status, self.build_timeout))
                 raise exceptions.TimeoutException(message)
 
     def is_resource_deleted(self, id):
diff --git a/tempest/services/identity/v3/json/policy_client.py b/tempest/services/identity/v3/json/policy_client.py
index e093260..41b0b59 100644
--- a/tempest/services/identity/v3/json/policy_client.py
+++ b/tempest/services/identity/v3/json/policy_client.py
@@ -39,7 +39,7 @@
         resp, body = self.post('policies', post_body)
         self.expected_success(201, resp.status)
         body = json.loads(body)
-        return resp, body['policy']
+        return rest_client.ResponseBody(resp, body['policy'])
 
     def list_policies(self):
         """Lists the policies."""
diff --git a/tempest/services/volume/json/admin/volume_quotas_client.py b/tempest/services/volume/json/admin/volume_quotas_client.py
index 90790e3..5b49040 100644
--- a/tempest/services/volume/json/admin/volume_quotas_client.py
+++ b/tempest/services/volume/json/admin/volume_quotas_client.py
@@ -23,7 +23,7 @@
 CONF = config.CONF
 
 
-class VolumeQuotasClientJSON(rest_client.RestClient):
+class BaseVolumeQuotasClientJSON(rest_client.RestClient):
     """
     Client class to send CRUD Volume Quotas API requests to a Cinder endpoint
     """
@@ -31,7 +31,7 @@
     TYPE = "json"
 
     def __init__(self, auth_provider):
-        super(VolumeQuotasClientJSON, self).__init__(auth_provider)
+        super(BaseVolumeQuotasClientJSON, self).__init__(auth_provider)
 
         self.service = CONF.volume.catalog_type
         self.build_interval = CONF.volume.build_interval
@@ -85,3 +85,9 @@
         """Delete the tenant's quota set."""
         resp, body = self.delete('os-quota-sets/%s' % tenant_id)
         self.expected_success(200, resp.status)
+
+
+class VolumeQuotasClientJSON(BaseVolumeQuotasClientJSON):
+    """
+    Client class to send CRUD Volume Type API V1 requests to a Cinder endpoint
+    """
diff --git a/tempest/services/volume/v2/json/admin/volume_quotas_client.py b/tempest/services/volume/v2/json/admin/volume_quotas_client.py
new file mode 100644
index 0000000..64f4f33
--- /dev/null
+++ b/tempest/services/volume/v2/json/admin/volume_quotas_client.py
@@ -0,0 +1,27 @@
+# Copyright 2014 OpenStack Foundation
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+from tempest.services.volume.json.admin import volume_quotas_client
+
+
+class VolumeQuotasV2Client(volume_quotas_client.BaseVolumeQuotasClientJSON):
+    """
+    Client class to send CRUD Volume V2 API requests to a Cinder endpoint
+    """
+
+    def __init__(self, auth_provider):
+        super(VolumeQuotasV2Client, self).__init__(auth_provider)
+
+        self.api_version = "v2"
diff --git a/tempest/test.py b/tempest/test.py
index 14cf3bb..7db0376 100644
--- a/tempest/test.py
+++ b/tempest/test.py
@@ -414,12 +414,8 @@
         else:
             standard_tests, module, loader = args
         for test in testtools.iterate_tests(standard_tests):
-            schema_file = getattr(test, '_schema_file', None)
             schema = getattr(test, '_schema', None)
-            if schema_file is not None:
-                setattr(test, 'scenarios',
-                        NegativeAutoTest.generate_scenario(schema_file))
-            elif schema is not None:
+            if schema is not None:
                 setattr(test, 'scenarios',
                         NegativeAutoTest.generate_scenario(schema))
         return testscenarios.load_tests_apply_scenarios(*args)