Merge "Avoid errors in log when neutron tests are skipped"
diff --git a/etc/tempest.conf.sample b/etc/tempest.conf.sample
index 8a7ad9c..f80fc1b 100644
--- a/etc/tempest.conf.sample
+++ b/etc/tempest.conf.sample
@@ -391,12 +391,14 @@
 
 # A list of enabled compute extensions with a special entry
 # all which indicates every extension is enabled. Each
-# extension should be specified with alias name (list value)
+# extension should be specified with alias name. Empty list
+# indicates all extensions are disabled (list value)
 #api_extensions=all
 
 # A list of enabled v3 extensions with a special entry all
 # which indicates every extension is enabled. Each extension
-# should be specified with alias name (list value)
+# should be specified with alias name. Empty list indicates
+# all extensions are disabled (list value)
 #api_v3_extensions=all
 
 # Does the test environment support changing the admin
@@ -441,6 +443,11 @@
 # (boolean value)
 #rescue=true
 
+# Enables returning of the instance password by the relevant
+# server API calls such as create, rebuild or rescue. (boolean
+# value)
+#enable_instance_password=true
+
 
 [dashboard]
 
@@ -750,7 +757,8 @@
 #ipv6=true
 
 # A list of enabled network extensions with a special entry
-# all which indicates every extension is enabled (list value)
+# all which indicates every extension is enabled. Empty list
+# indicates all extensions are disabled (list value)
 #api_extensions=all
 
 # Allow the execution of IPv6 subnet tests that use the
@@ -1105,7 +1113,8 @@
 #snapshot=true
 
 # A list of enabled volume extensions with a special entry all
-# which indicates every extension is enabled (list value)
+# which indicates every extension is enabled. Empty list
+# indicates all extensions are disabled (list value)
 #api_extensions=all
 
 # Is the v1 volume API enabled (boolean value)
diff --git a/tempest/README.rst b/tempest/README.rst
index 18c7cf3..fb25151 100644
--- a/tempest/README.rst
+++ b/tempest/README.rst
@@ -23,9 +23,8 @@
 belongs in each directory, the rules and examples for good tests, are
 documented in a README.rst file in the directory.
 
-
-api
----
+:ref:`api_field_guide`
+----------------------
 
 API tests are validation tests for the OpenStack API. They should not
 use the existing python clients for OpenStack, but should instead use
@@ -39,8 +38,8 @@
 frameworks.
 
 
-cli
----
+:ref:`cli_field_guide`
+----------------------
 
 CLI tests use the openstack CLI to interact with the OpenStack
 cloud. CLI testing in unit tests is somewhat difficult because unlike
@@ -49,8 +48,8 @@
 prereqs having a running OpenStack cloud.
 
 
-scenario
---------
+:ref:`scenario_field_guide`
+---------------------------
 
 Scenario tests are complex "through path" tests for OpenStack
 functionality. They are typically a series of steps where complicated
@@ -59,18 +58,26 @@
 Scenario tests can and should use the OpenStack python clients.
 
 
-stress
-------
+:ref:`stress_field_guide`
+-------------------------
 
 Stress tests are designed to stress an OpenStack environment by running a high
 workload against it and seeing what breaks. The stress test framework runs
 several test jobs in parallel and can run any existing test in Tempest as a
 stress job.
 
-thirdparty
-----------
+:ref:`third_party_field_guide`
+-----------------------------
 
 Many openstack components include 3rdparty API support. It is
 completely legitimate for Tempest to include tests of 3rdparty APIs,
 but those should be kept separate from the normal OpenStack
 validation.
+
+:ref:`unit_tests_field_guide`
+-----------------------------
+
+Unit tests are the self checks for Tempest. They provide functional
+verification and regression checking for the internal components of tempest.
+They should be used to just verify that the individual pieces of tempest are
+working as expected.
diff --git a/tempest/api/README.rst b/tempest/api/README.rst
index 9eac19d..91e6ad6 100644
--- a/tempest/api/README.rst
+++ b/tempest/api/README.rst
@@ -1,3 +1,5 @@
+.. _api_field_guide:
+
 Tempest Field Guide to API tests
 ================================
 
diff --git a/tempest/api/compute/admin/test_quotas_negative.py b/tempest/api/compute/admin/test_quotas_negative.py
index f147b9c..599b058 100644
--- a/tempest/api/compute/admin/test_quotas_negative.py
+++ b/tempest/api/compute/admin/test_quotas_negative.py
@@ -111,7 +111,9 @@
                         security_groups=default_sg_quota)
 
         # Check we cannot create anymore
-        self.assertRaises(exceptions.OverLimit,
+        # A 403 Forbidden or 413 Overlimit (old behaviour) exception
+        # will be raised when out of quota
+        self.assertRaises((exceptions.Unauthorized, exceptions.OverLimit),
                           self.sg_client.create_security_group,
                           "sg-overlimit", "sg-desc")
 
@@ -147,7 +149,9 @@
         ip_protocol = 'tcp'
 
         # Check we cannot create SG rule anymore
-        self.assertRaises(exceptions.OverLimit,
+        # A 403 Forbidden or 413 Overlimit (old behaviour) exception
+        # will be raised when out of quota
+        self.assertRaises((exceptions.OverLimit, exceptions.Unauthorized),
                           self.sg_client.create_security_group_rule,
                           secgroup_id, ip_protocol, 1025, 1025)
 
diff --git a/tempest/api/compute/admin/test_security_group_default_rules.py b/tempest/api/compute/admin/test_security_group_default_rules.py
new file mode 100644
index 0000000..07408a8
--- /dev/null
+++ b/tempest/api/compute/admin/test_security_group_default_rules.py
@@ -0,0 +1,127 @@
+# Copyright 2014 NEC Corporation.  All rights reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import testtools
+
+from tempest.api.compute import base
+from tempest import config
+from tempest import exceptions
+from tempest import test
+
+CONF = config.CONF
+
+
+class SecurityGroupDefaultRulesTest(base.BaseV2ComputeAdminTest):
+
+    @classmethod
+    # TODO(GMann): Once Bug# 1311500 is fixed, these test can run
+    # for Neutron also.
+    @testtools.skipIf(CONF.service_available.neutron,
+                      "Skip as this functionality is not yet "
+                      "implemented in Neutron. Related Bug#1311500")
+    @test.safe_setup
+    def setUpClass(cls):
+        # A network and a subnet will be created for these tests
+        cls.set_network_resources(network=True, subnet=True)
+        super(SecurityGroupDefaultRulesTest, cls).setUpClass()
+        cls.adm_client = cls.os_adm.security_group_default_rules_client
+
+    def _create_security_group_default_rules(self, ip_protocol='tcp',
+                                             from_port=22, to_port=22,
+                                             cidr='10.10.0.0/24'):
+        # Create Security Group default rule
+        _, rule = self.adm_client.create_security_default_group_rule(
+            ip_protocol,
+            from_port,
+            to_port,
+            cidr=cidr)
+        self.assertEqual(ip_protocol, rule['ip_protocol'])
+        self.assertEqual(from_port, rule['from_port'])
+        self.assertEqual(to_port, rule['to_port'])
+        self.assertEqual(cidr, rule['ip_range']['cidr'])
+        return rule
+
+    @test.attr(type='smoke')
+    def test_create_delete_security_group_default_rules(self):
+        # Create and delete Security Group default rule
+        ip_protocols = {'tcp', 'udp', 'icmp'}
+        for ip_protocol in ip_protocols:
+            rule = self._create_security_group_default_rules(ip_protocol)
+            # Delete Security Group default rule
+            self.adm_client.delete_security_group_default_rule(rule['id'])
+            self.assertRaises(exceptions.NotFound,
+                              self.adm_client.get_security_group_default_rule,
+                              rule['id'])
+
+    @test.attr(type='smoke')
+    def test_create_security_group_default_rule_without_cidr(self):
+        ip_protocol = 'udp'
+        from_port = 80
+        to_port = 80
+        _, rule = self.adm_client.create_security_default_group_rule(
+            ip_protocol,
+            from_port,
+            to_port)
+        self.addCleanup(self.adm_client.delete_security_group_default_rule,
+                        rule['id'])
+        self.assertNotEqual(0, rule['id'])
+        self.assertEqual('0.0.0.0/0', rule['ip_range']['cidr'])
+
+    @test.attr(type='smoke')
+    def test_create_security_group_default_rule_with_blank_cidr(self):
+        ip_protocol = 'icmp'
+        from_port = 10
+        to_port = 10
+        cidr = ''
+        _, rule = self.adm_client.create_security_default_group_rule(
+            ip_protocol,
+            from_port,
+            to_port,
+            cidr=cidr)
+        self.addCleanup(self.adm_client.delete_security_group_default_rule,
+                        rule['id'])
+        self.assertNotEqual(0, rule['id'])
+        self.assertEqual('0.0.0.0/0', rule['ip_range']['cidr'])
+
+    @test.attr(type='smoke')
+    def test_security_group_default_rules_list(self):
+        ip_protocol = 'tcp'
+        from_port = 22
+        to_port = 22
+        cidr = '10.10.0.0/24'
+        rule = self._create_security_group_default_rules(ip_protocol,
+                                                         from_port,
+                                                         to_port,
+                                                         cidr)
+        self.addCleanup(self.adm_client.delete_security_group_default_rule,
+                        rule['id'])
+        _, rules = self.adm_client.list_security_group_default_rules()
+        self.assertNotEqual(0, len(rules))
+        self.assertIn(rule, rules)
+
+    @test.attr(type='smoke')
+    def test_default_security_group_default_rule_show(self):
+        ip_protocol = 'tcp'
+        from_port = 22
+        to_port = 22
+        cidr = '10.10.0.0/24'
+        rule = self._create_security_group_default_rules(ip_protocol,
+                                                         from_port,
+                                                         to_port,
+                                                         cidr)
+        self.addCleanup(self.adm_client.delete_security_group_default_rule,
+                        rule['id'])
+        _, fetched_rule = self.adm_client.get_security_group_default_rule(
+            rule['id'])
+        self.assertEqual(rule, fetched_rule)
diff --git a/tempest/api/compute/admin/test_servers_negative.py b/tempest/api/compute/admin/test_servers_negative.py
index cccaf13..f4d010e 100644
--- a/tempest/api/compute/admin/test_servers_negative.py
+++ b/tempest/api/compute/admin/test_servers_negative.py
@@ -127,6 +127,8 @@
                           self.client.migrate_server,
                           str(uuid.uuid4()))
 
+    @testtools.skipUnless(CONF.compute_feature_enabled.resize,
+                          'Resize not available.')
     @testtools.skipUnless(CONF.compute_feature_enabled.suspend,
                           'Suspend is not available.')
     @test.attr(type=['negative', 'gate'])
diff --git a/tempest/api/compute/base.py b/tempest/api/compute/base.py
index 70a9604..a3295eb 100644
--- a/tempest/api/compute/base.py
+++ b/tempest/api/compute/base.py
@@ -83,6 +83,8 @@
             cls.hypervisor_client = cls.os.hypervisor_client
             cls.certificates_client = cls.os.certificates_client
             cls.migrations_client = cls.os.migrations_client
+            cls.security_group_default_rules_client = (
+                cls.os.security_group_default_rules_client)
 
         elif cls._api_version == 3:
             if not CONF.compute_feature_enabled.api_v3:
diff --git a/tempest/api/compute/servers/test_server_metadata_negative.py b/tempest/api/compute/servers/test_server_metadata_negative.py
index b55833c..fbda401 100644
--- a/tempest/api/compute/servers/test_server_metadata_negative.py
+++ b/tempest/api/compute/servers/test_server_metadata_negative.py
@@ -119,20 +119,22 @@
 
     @test.attr(type=['negative', 'gate'])
     def test_metadata_items_limit(self):
-        # Raise a 413 OverLimit exception while exceeding metadata items limit
-        # for tenant.
+        # A 403 Forbidden or 413 Overlimit (old behaviour) exception
+        # will be raised while exceeding metadata items limit for
+        # tenant.
         _, quota_set = self.quotas.get_quota_set(self.tenant_id)
         quota_metadata = quota_set['metadata_items']
         req_metadata = {}
         for num in range(1, quota_metadata + 2):
             req_metadata['key' + str(num)] = 'val' + str(num)
-        self.assertRaises(exceptions.OverLimit,
+        self.assertRaises((exceptions.OverLimit, exceptions.Unauthorized),
                           self.client.set_server_metadata,
                           self.server_id, req_metadata)
 
-        # Raise a 413 OverLimit exception while exceeding metadata items limit
-        # for tenant (update).
-        self.assertRaises(exceptions.OverLimit,
+        # A 403 Forbidden or 413 Overlimit (old behaviour) exception
+        # will be raised while exceeding metadata items limit for
+        # tenant.
+        self.assertRaises((exceptions.Unauthorized, exceptions.OverLimit),
                           self.client.update_server_metadata,
                           self.server_id, req_metadata)
 
diff --git a/tempest/api/data_processing/base.py b/tempest/api/data_processing/base.py
index ab0e83a..cfb5a3d 100644
--- a/tempest/api/data_processing/base.py
+++ b/tempest/api/data_processing/base.py
@@ -75,16 +75,16 @@
         object. All resources created in this method will be automatically
         removed in tearDownClass method.
         """
-        resp, body = cls.client.create_node_group_template(name, plugin_name,
-                                                           hadoop_version,
-                                                           node_processes,
-                                                           flavor_id,
-                                                           node_configs,
-                                                           **kwargs)
+        _, resp_body = cls.client.create_node_group_template(name, plugin_name,
+                                                             hadoop_version,
+                                                             node_processes,
+                                                             flavor_id,
+                                                             node_configs,
+                                                             **kwargs)
         # store id of created node group template
-        cls._node_group_templates.append(body['id'])
+        cls._node_group_templates.append(resp_body['id'])
 
-        return resp, body
+        return resp_body
 
     @classmethod
     def create_cluster_template(cls, name, plugin_name, hadoop_version,
@@ -95,15 +95,15 @@
         object. All resources created in this method will be automatically
         removed in tearDownClass method.
         """
-        resp, body = cls.client.create_cluster_template(name, plugin_name,
-                                                        hadoop_version,
-                                                        node_groups,
-                                                        cluster_configs,
-                                                        **kwargs)
+        _, resp_body = cls.client.create_cluster_template(name, plugin_name,
+                                                          hadoop_version,
+                                                          node_groups,
+                                                          cluster_configs,
+                                                          **kwargs)
         # store id of created cluster template
-        cls._cluster_templates.append(body['id'])
+        cls._cluster_templates.append(resp_body['id'])
 
-        return resp, body
+        return resp_body
 
     @classmethod
     def create_data_source(cls, name, type, url, **kwargs):
@@ -113,11 +113,11 @@
         object. All resources created in this method will be automatically
         removed in tearDownClass method.
         """
-        resp, body = cls.client.create_data_source(name, type, url, **kwargs)
+        _, resp_body = cls.client.create_data_source(name, type, url, **kwargs)
         # store id of created data source
-        cls._data_sources.append(body['id'])
+        cls._data_sources.append(resp_body['id'])
 
-        return resp, body
+        return resp_body
 
     @classmethod
     def create_job_binary_internal(cls, name, data):
@@ -126,11 +126,11 @@
         It returns created object. All resources created in this method will
         be automatically removed in tearDownClass method.
         """
-        resp, body = cls.client.create_job_binary_internal(name, data)
+        _, resp_body = cls.client.create_job_binary_internal(name, data)
         # store id of created job binary internal
-        cls._job_binary_internals.append(body['id'])
+        cls._job_binary_internals.append(resp_body['id'])
 
-        return resp, body
+        return resp_body
 
     def create_job_binary(cls, name, url, extra=None, **kwargs):
         """Creates watched job binary with specified params.
@@ -139,8 +139,8 @@
         object. All resources created in this method will be automatically
         removed in tearDownClass method.
         """
-        resp, body = cls.client.create_job_binary(name, url, extra, **kwargs)
+        _, resp_body = cls.client.create_job_binary(name, url, extra, **kwargs)
         # store id of created job binary
-        cls._job_binaries.append(body['id'])
+        cls._job_binaries.append(resp_body['id'])
 
-        return resp, body
+        return resp_body
diff --git a/tempest/api/data_processing/test_cluster_templates.py b/tempest/api/data_processing/test_cluster_templates.py
index ad9ed2a..ff67c1c 100644
--- a/tempest/api/data_processing/test_cluster_templates.py
+++ b/tempest/api/data_processing/test_cluster_templates.py
@@ -39,7 +39,7 @@
                 }
             }
         }
-        resp_body = cls.create_node_group_template(**node_group_template)[1]
+        resp_body = cls.create_node_group_template(**node_group_template)
         node_group_template_id = resp_body['id']
 
         cls.full_cluster_template = {
@@ -95,23 +95,22 @@
     def _create_cluster_template(self, template_name=None):
         """Creates Cluster Template with optional name specified.
 
-        It creates template and ensures response status, template name and
-        response body. Returns id and name of created template.
+        It creates template, ensures template name and response body.
+        Returns id and name of created template.
         """
         if not template_name:
             # generate random name if it's not specified
             template_name = data_utils.rand_name('sahara-cluster-template')
 
         # create cluster template
-        resp, body = self.create_cluster_template(template_name,
-                                                  **self.full_cluster_template)
+        resp_body = self.create_cluster_template(template_name,
+                                                 **self.full_cluster_template)
 
         # ensure that template created successfully
-        self.assertEqual(202, resp.status)
-        self.assertEqual(template_name, body['name'])
-        self.assertDictContainsSubset(self.cluster_template, body)
+        self.assertEqual(template_name, resp_body['name'])
+        self.assertDictContainsSubset(self.cluster_template, resp_body)
 
-        return body['id'], template_name
+        return resp_body['id'], template_name
 
     @test.attr(type='smoke')
     def test_cluster_template_create(self):
@@ -122,8 +121,7 @@
         template_info = self._create_cluster_template()
 
         # check for cluster template in list
-        resp, templates = self.client.list_cluster_templates()
-        self.assertEqual(200, resp.status)
+        _, templates = self.client.list_cluster_templates()
         templates_info = [(template['id'], template['name'])
                           for template in templates]
         self.assertIn(template_info, templates_info)
@@ -133,16 +131,14 @@
         template_id, template_name = self._create_cluster_template()
 
         # check cluster template fetch by id
-        resp, template = self.client.get_cluster_template(template_id)
-        self.assertEqual(200, resp.status)
+        _, template = self.client.get_cluster_template(template_id)
         self.assertEqual(template_name, template['name'])
         self.assertDictContainsSubset(self.cluster_template, template)
 
     @test.attr(type='smoke')
     def test_cluster_template_delete(self):
-        template_id = self._create_cluster_template()[0]
+        template_id, _ = self._create_cluster_template()
 
         # delete the cluster template by id
-        resp = self.client.delete_cluster_template(template_id)[0]
-        self.assertEqual(204, resp.status)
+        self.client.delete_cluster_template(template_id)
         # TODO(ylobankov): check that cluster template is really deleted
diff --git a/tempest/api/data_processing/test_data_sources.py b/tempest/api/data_processing/test_data_sources.py
index 345153b..aae56c4 100644
--- a/tempest/api/data_processing/test_data_sources.py
+++ b/tempest/api/data_processing/test_data_sources.py
@@ -48,65 +48,59 @@
     def _create_data_source(self, source_body, source_name=None):
         """Creates Data Source with optional name specified.
 
-        It creates a link to input-source file (it may not exist) and ensures
-        response status and source name. Returns id and name of created source.
+        It creates a link to input-source file (it may not exist), ensures
+        source name and response body. Returns id and name of created source.
         """
         if not source_name:
             # generate random name if it's not specified
             source_name = data_utils.rand_name('sahara-data-source')
 
         # create data source
-        resp, body = self.create_data_source(source_name, **source_body)
+        resp_body = self.create_data_source(source_name, **source_body)
 
         # ensure that source created successfully
-        self.assertEqual(202, resp.status)
-        self.assertEqual(source_name, body['name'])
+        self.assertEqual(source_name, resp_body['name'])
         if source_body['type'] == 'swift':
             source_body = self.swift_data_source
-        self.assertDictContainsSubset(source_body, body)
+        self.assertDictContainsSubset(source_body, resp_body)
 
-        return body['id'], source_name
+        return resp_body['id'], source_name
 
     def _list_data_sources(self, source_info):
         # check for data source in list
-        resp, sources = self.client.list_data_sources()
-        self.assertEqual(200, resp.status)
+        _, sources = self.client.list_data_sources()
         sources_info = [(source['id'], source['name']) for source in sources]
         self.assertIn(source_info, sources_info)
 
     def _get_data_source(self, source_id, source_name, source_body):
         # check data source fetch by id
-        resp, source = self.client.get_data_source(source_id)
-        self.assertEqual(200, resp.status)
+        _, source = self.client.get_data_source(source_id)
         self.assertEqual(source_name, source['name'])
         self.assertDictContainsSubset(source_body, source)
 
-    def _delete_data_source(self, source_id):
-        # delete the data source by id
-        resp = self.client.delete_data_source(source_id)[0]
-        self.assertEqual(204, resp.status)
-
     @test.attr(type='smoke')
     def test_swift_data_source_create(self):
         self._create_data_source(self.swift_data_source_with_creds)
 
     @test.attr(type='smoke')
     def test_swift_data_source_list(self):
-        source_info = self._create_data_source(
-            self.swift_data_source_with_creds)
+        source_info = (
+            self._create_data_source(self.swift_data_source_with_creds))
         self._list_data_sources(source_info)
 
     @test.attr(type='smoke')
     def test_swift_data_source_get(self):
-        source_id, source_name = self._create_data_source(
-            self.swift_data_source_with_creds)
+        source_id, source_name = (
+            self._create_data_source(self.swift_data_source_with_creds))
         self._get_data_source(source_id, source_name, self.swift_data_source)
 
     @test.attr(type='smoke')
     def test_swift_data_source_delete(self):
-        source_id = self._create_data_source(
-            self.swift_data_source_with_creds)[0]
-        self._delete_data_source(source_id)
+        source_id, _ = (
+            self._create_data_source(self.swift_data_source_with_creds))
+
+        # delete the data source by id
+        self.client.delete_data_source(source_id)
 
     @test.attr(type='smoke')
     def test_local_hdfs_data_source_create(self):
@@ -119,15 +113,17 @@
 
     @test.attr(type='smoke')
     def test_local_hdfs_data_source_get(self):
-        source_id, source_name = self._create_data_source(
-            self.local_hdfs_data_source)
+        source_id, source_name = (
+            self._create_data_source(self.local_hdfs_data_source))
         self._get_data_source(
             source_id, source_name, self.local_hdfs_data_source)
 
     @test.attr(type='smoke')
     def test_local_hdfs_data_source_delete(self):
-        source_id = self._create_data_source(self.local_hdfs_data_source)[0]
-        self._delete_data_source(source_id)
+        source_id, _ = self._create_data_source(self.local_hdfs_data_source)
+
+        # delete the data source by id
+        self.client.delete_data_source(source_id)
 
     @test.attr(type='smoke')
     def test_external_hdfs_data_source_create(self):
@@ -140,12 +136,14 @@
 
     @test.attr(type='smoke')
     def test_external_hdfs_data_source_get(self):
-        source_id, source_name = self._create_data_source(
-            self.external_hdfs_data_source)
+        source_id, source_name = (
+            self._create_data_source(self.external_hdfs_data_source))
         self._get_data_source(
             source_id, source_name, self.external_hdfs_data_source)
 
     @test.attr(type='smoke')
     def test_external_hdfs_data_source_delete(self):
-        source_id = self._create_data_source(self.external_hdfs_data_source)[0]
-        self._delete_data_source(source_id)
+        source_id, _ = self._create_data_source(self.external_hdfs_data_source)
+
+        # delete the data source by id
+        self.client.delete_data_source(source_id)
diff --git a/tempest/api/data_processing/test_job_binaries.py b/tempest/api/data_processing/test_job_binaries.py
index 689c1fe..15ee145 100644
--- a/tempest/api/data_processing/test_job_binaries.py
+++ b/tempest/api/data_processing/test_job_binaries.py
@@ -40,8 +40,8 @@
 
         name = data_utils.rand_name('sahara-internal-job-binary')
         cls.job_binary_data = 'Some script may be data'
-        job_binary_internal = cls.create_job_binary_internal(
-            name, cls.job_binary_data)[1]
+        job_binary_internal = (
+            cls.create_job_binary_internal(name, cls.job_binary_data))
         cls.internal_db_job_binary = {
             'url': 'internal-db://%s' % job_binary_internal['id'],
             'description': 'Test job binary',
@@ -50,26 +50,25 @@
     def _create_job_binary(self, binary_body, binary_name=None):
         """Creates Job Binary with optional name specified.
 
-        It creates a link to data (jar, pig files, etc.) and ensures response
-        status, job binary name and response body. Returns id and name of
-        created job binary. Data may not exist when using Swift
-        as data storage. In other cases data must exist in storage.
+        It creates a link to data (jar, pig files, etc.), ensures job binary
+        name and response body. Returns id and name of created job binary.
+        Data may not exist when using Swift as data storage.
+        In other cases data must exist in storage.
         """
         if not binary_name:
             # generate random name if it's not specified
             binary_name = data_utils.rand_name('sahara-job-binary')
 
         # create job binary
-        resp, body = self.create_job_binary(binary_name, **binary_body)
+        resp_body = self.create_job_binary(binary_name, **binary_body)
 
         # ensure that binary created successfully
-        self.assertEqual(202, resp.status)
-        self.assertEqual(binary_name, body['name'])
+        self.assertEqual(binary_name, resp_body['name'])
         if 'swift' in binary_body['url']:
             binary_body = self.swift_job_binary
-        self.assertDictContainsSubset(binary_body, body)
+        self.assertDictContainsSubset(binary_body, resp_body)
 
-        return body['id'], binary_name
+        return resp_body['id'], binary_name
 
     @test.attr(type='smoke')
     def test_swift_job_binary_create(self):
@@ -80,30 +79,27 @@
         binary_info = self._create_job_binary(self.swift_job_binary_with_extra)
 
         # check for job binary in list
-        resp, binaries = self.client.list_job_binaries()
-        self.assertEqual(200, resp.status)
+        _, binaries = self.client.list_job_binaries()
         binaries_info = [(binary['id'], binary['name']) for binary in binaries]
         self.assertIn(binary_info, binaries_info)
 
     @test.attr(type='smoke')
     def test_swift_job_binary_get(self):
-        binary_id, binary_name = self._create_job_binary(
-            self.swift_job_binary_with_extra)
+        binary_id, binary_name = (
+            self._create_job_binary(self.swift_job_binary_with_extra))
 
         # check job binary fetch by id
-        resp, binary = self.client.get_job_binary(binary_id)
-        self.assertEqual(200, resp.status)
+        _, binary = self.client.get_job_binary(binary_id)
         self.assertEqual(binary_name, binary['name'])
         self.assertDictContainsSubset(self.swift_job_binary, binary)
 
     @test.attr(type='smoke')
     def test_swift_job_binary_delete(self):
-        binary_id = self._create_job_binary(
-            self.swift_job_binary_with_extra)[0]
+        binary_id, _ = (
+            self._create_job_binary(self.swift_job_binary_with_extra))
 
         # delete the job binary by id
-        resp = self.client.delete_job_binary(binary_id)[0]
-        self.assertEqual(204, resp.status)
+        self.client.delete_job_binary(binary_id)
 
     @test.attr(type='smoke')
     def test_internal_db_job_binary_create(self):
@@ -114,35 +110,31 @@
         binary_info = self._create_job_binary(self.internal_db_job_binary)
 
         # check for job binary in list
-        resp, binaries = self.client.list_job_binaries()
-        self.assertEqual(200, resp.status)
+        _, binaries = self.client.list_job_binaries()
         binaries_info = [(binary['id'], binary['name']) for binary in binaries]
         self.assertIn(binary_info, binaries_info)
 
     @test.attr(type='smoke')
     def test_internal_db_job_binary_get(self):
-        binary_id, binary_name = self._create_job_binary(
-            self.internal_db_job_binary)
+        binary_id, binary_name = (
+            self._create_job_binary(self.internal_db_job_binary))
 
         # check job binary fetch by id
-        resp, binary = self.client.get_job_binary(binary_id)
-        self.assertEqual(200, resp.status)
+        _, binary = self.client.get_job_binary(binary_id)
         self.assertEqual(binary_name, binary['name'])
         self.assertDictContainsSubset(self.internal_db_job_binary, binary)
 
     @test.attr(type='smoke')
     def test_internal_db_job_binary_delete(self):
-        binary_id = self._create_job_binary(self.internal_db_job_binary)[0]
+        binary_id, _ = self._create_job_binary(self.internal_db_job_binary)
 
         # delete the job binary by id
-        resp = self.client.delete_job_binary(binary_id)[0]
-        self.assertEqual(204, resp.status)
+        self.client.delete_job_binary(binary_id)
 
     @test.attr(type='smoke')
     def test_job_binary_get_data(self):
-        binary_id = self._create_job_binary(self.internal_db_job_binary)[0]
+        binary_id, _ = self._create_job_binary(self.internal_db_job_binary)
 
         # get data of job binary by id
-        resp, data = self.client.get_job_binary_data(binary_id)
-        self.assertEqual(200, resp.status)
+        _, data = self.client.get_job_binary_data(binary_id)
         self.assertEqual(data, self.job_binary_data)
diff --git a/tempest/api/data_processing/test_job_binary_internals.py b/tempest/api/data_processing/test_job_binary_internals.py
index 6d59177..45e1140 100644
--- a/tempest/api/data_processing/test_job_binary_internals.py
+++ b/tempest/api/data_processing/test_job_binary_internals.py
@@ -29,23 +29,22 @@
     def _create_job_binary_internal(self, binary_name=None):
         """Creates Job Binary Internal with optional name specified.
 
-        It puts data into Sahara database and ensures response status and
-        job binary internal name. Returns id and name of created job binary
-        internal.
+        It puts data into Sahara database and ensures job binary internal name.
+        Returns id and name of created job binary internal.
         """
         if not binary_name:
             # generate random name if it's not specified
             binary_name = data_utils.rand_name('sahara-job-binary-internal')
 
         # create job binary internal
-        resp, body = self.create_job_binary_internal(
-            binary_name, self.job_binary_internal_data)
+        resp_body = (
+            self.create_job_binary_internal(binary_name,
+                                            self.job_binary_internal_data))
 
         # ensure that job binary internal created successfully
-        self.assertEqual(202, resp.status)
-        self.assertEqual(binary_name, body['name'])
+        self.assertEqual(binary_name, resp_body['name'])
 
-        return body['id'], binary_name
+        return resp_body['id'], binary_name
 
     @test.attr(type='smoke')
     def test_job_binary_internal_create(self):
@@ -56,8 +55,7 @@
         binary_info = self._create_job_binary_internal()
 
         # check for job binary internal in list
-        resp, binaries = self.client.list_job_binary_internals()
-        self.assertEqual(200, resp.status)
+        _, binaries = self.client.list_job_binary_internals()
         binaries_info = [(binary['id'], binary['name']) for binary in binaries]
         self.assertIn(binary_info, binaries_info)
 
@@ -66,23 +64,20 @@
         binary_id, binary_name = self._create_job_binary_internal()
 
         # check job binary internal fetch by id
-        resp, binary = self.client.get_job_binary_internal(binary_id)
-        self.assertEqual(200, resp.status)
+        _, binary = self.client.get_job_binary_internal(binary_id)
         self.assertEqual(binary_name, binary['name'])
 
     @test.attr(type='smoke')
     def test_job_binary_internal_delete(self):
-        binary_id = self._create_job_binary_internal()[0]
+        binary_id, _ = self._create_job_binary_internal()
 
         # delete the job binary internal by id
-        resp = self.client.delete_job_binary_internal(binary_id)[0]
-        self.assertEqual(204, resp.status)
+        self.client.delete_job_binary_internal(binary_id)
 
     @test.attr(type='smoke')
     def test_job_binary_internal_get_data(self):
-        binary_id = self._create_job_binary_internal()[0]
+        binary_id, _ = self._create_job_binary_internal()
 
         # get data of job binary internal by id
-        resp, data = self.client.get_job_binary_internal_data(binary_id)
-        self.assertEqual(200, resp.status)
+        _, data = self.client.get_job_binary_internal_data(binary_id)
         self.assertEqual(data, self.job_binary_internal_data)
diff --git a/tempest/api/data_processing/test_node_group_templates.py b/tempest/api/data_processing/test_node_group_templates.py
index 04f98b4..c2c0075 100644
--- a/tempest/api/data_processing/test_node_group_templates.py
+++ b/tempest/api/data_processing/test_node_group_templates.py
@@ -43,7 +43,7 @@
     def _create_node_group_template(self, template_name=None):
         """Creates Node Group Template with optional name specified.
 
-        It creates template and ensures response status and template name.
+        It creates template, ensures template name and response body.
         Returns id and name of created template.
         """
         if not template_name:
@@ -51,15 +51,14 @@
             template_name = data_utils.rand_name('sahara-ng-template')
 
         # create node group template
-        resp, body = self.create_node_group_template(
-            template_name, **self.node_group_template)
+        resp_body = self.create_node_group_template(template_name,
+                                                    **self.node_group_template)
 
         # ensure that template created successfully
-        self.assertEqual(202, resp.status)
-        self.assertEqual(template_name, body['name'])
-        self.assertDictContainsSubset(self.node_group_template, body)
+        self.assertEqual(template_name, resp_body['name'])
+        self.assertDictContainsSubset(self.node_group_template, resp_body)
 
-        return body['id'], template_name
+        return resp_body['id'], template_name
 
     @test.attr(type='smoke')
     def test_node_group_template_create(self):
@@ -70,8 +69,7 @@
         template_info = self._create_node_group_template()
 
         # check for node group template in list
-        resp, templates = self.client.list_node_group_templates()
-        self.assertEqual(200, resp.status)
+        _, templates = self.client.list_node_group_templates()
         templates_info = [(template['id'], template['name'])
                           for template in templates]
         self.assertIn(template_info, templates_info)
@@ -81,15 +79,13 @@
         template_id, template_name = self._create_node_group_template()
 
         # check node group template fetch by id
-        resp, template = self.client.get_node_group_template(template_id)
-        self.assertEqual(200, resp.status)
+        _, template = self.client.get_node_group_template(template_id)
         self.assertEqual(template_name, template['name'])
         self.assertDictContainsSubset(self.node_group_template, template)
 
     @test.attr(type='smoke')
     def test_node_group_template_delete(self):
-        template_id = self._create_node_group_template()[0]
+        template_id, _ = self._create_node_group_template()
 
         # delete the node group template by id
-        resp = self.client.delete_node_group_template(template_id)[0]
-        self.assertEqual(204, resp.status)
+        self.client.delete_node_group_template(template_id)
diff --git a/tempest/api/data_processing/test_plugins.py b/tempest/api/data_processing/test_plugins.py
index d643f23..9fd7a17 100644
--- a/tempest/api/data_processing/test_plugins.py
+++ b/tempest/api/data_processing/test_plugins.py
@@ -20,10 +20,9 @@
     def _list_all_plugin_names(self):
         """Returns all enabled plugin names.
 
-        It ensures response status and main plugins availability.
+        It ensures main plugins availability.
         """
-        resp, plugins = self.client.list_plugins()
-        self.assertEqual(200, resp.status)
+        _, plugins = self.client.list_plugins()
         plugins_names = [plugin['name'] for plugin in plugins]
         self.assertIn('vanilla', plugins_names)
         self.assertIn('hdp', plugins_names)
@@ -37,14 +36,12 @@
     @test.attr(type='smoke')
     def test_plugin_get(self):
         for plugin_name in self._list_all_plugin_names():
-            resp, plugin = self.client.get_plugin(plugin_name)
-            self.assertEqual(200, resp.status)
+            _, plugin = self.client.get_plugin(plugin_name)
             self.assertEqual(plugin_name, plugin['name'])
 
             for plugin_version in plugin['versions']:
-                resp, detailed_plugin = self.client.get_plugin(plugin_name,
-                                                               plugin_version)
-                self.assertEqual(200, resp.status)
+                _, detailed_plugin = self.client.get_plugin(plugin_name,
+                                                            plugin_version)
                 self.assertEqual(plugin_name, detailed_plugin['name'])
 
                 # check that required image tags contains name and version
diff --git a/tempest/api/identity/admin/v3/test_users.py b/tempest/api/identity/admin/v3/test_users.py
index 558575e..3c25819 100644
--- a/tempest/api/identity/admin/v3/test_users.py
+++ b/tempest/api/identity/admin/v3/test_users.py
@@ -65,6 +65,28 @@
         self.assertEqual('false', str(new_user_get['enabled']).lower())
 
     @test.attr(type='gate')
+    def test_update_user_password(self):
+        # Creating User to check password updation
+        u_name = data_utils.rand_name('user')
+        original_password = data_utils.rand_name('pass')
+        _, user = self.client.create_user(
+            u_name, password=original_password)
+        # Delete the User at the end all test methods
+        self.addCleanup(self.client.delete_user, user['id'])
+        # Update user with new password
+        new_password = data_utils.rand_name('pass1')
+        self.client.update_user_password(user['id'], new_password,
+                                         original_password)
+        resp, body = self.token.auth(user['id'], new_password)
+        self.assertEqual(201, resp.status)
+        subject_token = resp['x-subject-token']
+        # Perform GET Token to verify and confirm password is updated
+        _, token_details = self.client.get_token(subject_token)
+        self.assertEqual(resp['x-subject-token'], subject_token)
+        self.assertEqual(token_details['user']['id'], user['id'])
+        self.assertEqual(token_details['user']['name'], u_name)
+
+    @test.attr(type='gate')
     def test_list_user_projects(self):
         # List the projects that a user has access upon
         assigned_project_ids = list()
diff --git a/tempest/api/image/base.py b/tempest/api/image/base.py
index 02d391b..c875b2f 100644
--- a/tempest/api/image/base.py
+++ b/tempest/api/image/base.py
@@ -100,7 +100,7 @@
         cls.alt_tenant_id = cls.alt_img_cli.tenant_id
 
     def _create_image(self):
-        image_file = StringIO.StringIO('*' * 1024)
+        image_file = StringIO.StringIO(data_utils.random_bytes())
         resp, image = self.create_image(container_format='bare',
                                         disk_format='raw',
                                         is_public=False,
diff --git a/tempest/api/image/v1/test_images.py b/tempest/api/image/v1/test_images.py
index 8528e42..bf55b89 100644
--- a/tempest/api/image/v1/test_images.py
+++ b/tempest/api/image/v1/test_images.py
@@ -44,7 +44,7 @@
             self.assertEqual(val, body.get('properties')[key])
 
         # Now try uploading an image file
-        image_file = StringIO.StringIO(('*' * 1024))
+        image_file = StringIO.StringIO(data_utils.random_bytes())
         _, body = self.client.update_image(image_id, data=image_file)
         self.assertIn('size', body)
         self.assertEqual(1024, body.get('size'))
@@ -157,7 +157,7 @@
         image. Note that the size of the new image is a random number between
         1024 and 4096
         """
-        image_file = StringIO.StringIO('*' * size)
+        image_file = StringIO.StringIO(data_utils.random_bytes(size))
         name = 'New Standard Image %s' % name
         _, image = cls.create_image(name=name,
                                     container_format=container_format,
@@ -338,10 +338,9 @@
                                disk_format, size):
         """
         Create a new standard image and return the ID of the newly-registered
-        image. Note that the size of the new image is a random number between
-        1024 and 4096
+        image.
         """
-        image_file = StringIO.StringIO('*' * size)
+        image_file = StringIO.StringIO(data_utils.random_bytes(size))
         name = 'New Standard Image %s' % name
         _, image = cls.create_image(name=name,
                                     container_format=container_format,
diff --git a/tempest/api/image/v2/test_images.py b/tempest/api/image/v2/test_images.py
index ae777eb..a974ebb 100644
--- a/tempest/api/image/v2/test_images.py
+++ b/tempest/api/image/v2/test_images.py
@@ -52,7 +52,7 @@
         self.assertEqual('queued', body['status'])
 
         # Now try uploading an image file
-        file_content = '*' * 1024
+        file_content = data_utils.random_bytes()
         image_file = StringIO.StringIO(file_content)
         self.client.store_image(image_id, image_file)
 
@@ -86,7 +86,8 @@
 
         # Verifying deletion
         _, images = self.client.image_list()
-        self.assertNotIn(image_id, images)
+        images_id = [item['id'] for item in images]
+        self.assertNotIn(image_id, images_id)
 
     @test.attr(type='gate')
     def test_update_image(self):
@@ -103,8 +104,7 @@
         image_id = body['id']
 
         # Now try uploading an image file
-        file_content = '*' * 1024
-        image_file = StringIO.StringIO(file_content)
+        image_file = StringIO.StringIO(data_utils.random_bytes())
         self.client.store_image(image_id, image_file)
 
         # Update Image
@@ -145,7 +145,8 @@
         image. Note that the size of the new image is a random number between
         1024 and 4096
         """
-        image_file = StringIO.StringIO('*' * random.randint(1024, 4096))
+        size = random.randint(1024, 4096)
+        image_file = StringIO.StringIO(data_utils.random_bytes(size))
         name = data_utils.rand_name('image-')
         _, body = cls.create_image(name=name,
                                    container_format=container_format,
diff --git a/tempest/api/network/admin/test_quotas.py b/tempest/api/network/admin/test_quotas.py
index d1a8faf..9fa54b1 100644
--- a/tempest/api/network/admin/test_quotas.py
+++ b/tempest/api/network/admin/test_quotas.py
@@ -46,8 +46,7 @@
             raise cls.skipException(msg)
         cls.identity_admin_client = cls.os_adm.identity_client
 
-    @test.attr(type='gate')
-    def test_quotas(self):
+    def _check_quotas(self, new_quotas):
         # Add a tenant to conduct the test
         test_tenant = data_utils.rand_name('test_tenant_')
         test_description = data_utils.rand_name('desc_')
@@ -56,14 +55,15 @@
             description=test_description)
         tenant_id = tenant['id']
         self.addCleanup(self.identity_admin_client.delete_tenant, tenant_id)
+
         # Change quotas for tenant
-        new_quotas = {'network': 0, 'security_group': 0}
         resp, quota_set = self.admin_client.update_quotas(tenant_id,
                                                           **new_quotas)
         self.assertEqual('200', resp['status'])
         self.addCleanup(self.admin_client.reset_quotas, tenant_id)
-        self.assertEqual(0, quota_set['network'])
-        self.assertEqual(0, quota_set['security_group'])
+        for key, value in new_quotas.iteritems():
+            self.assertEqual(value, quota_set[key])
+
         # Confirm our tenant is listed among tenants with non default quotas
         resp, non_default_quotas = self.admin_client.list_quotas()
         self.assertEqual('200', resp['status'])
@@ -72,12 +72,14 @@
             if qs['tenant_id'] == tenant_id:
                 found = True
         self.assertTrue(found)
-        # Confirm from APi quotas were changed as requested for tenant
+
+        # Confirm from API quotas were changed as requested for tenant
         resp, quota_set = self.admin_client.show_quotas(tenant_id)
         quota_set = quota_set['quota']
         self.assertEqual('200', resp['status'])
-        self.assertEqual(0, quota_set['network'])
-        self.assertEqual(0, quota_set['security_group'])
+        for key, value in new_quotas.iteritems():
+            self.assertEqual(value, quota_set[key])
+
         # Reset quotas to default and confirm
         resp, body = self.admin_client.reset_quotas(tenant_id)
         self.assertEqual('204', resp['status'])
@@ -86,49 +88,14 @@
         for q in non_default_quotas['quotas']:
             self.assertNotEqual(tenant_id, q['tenant_id'])
 
+    @test.attr(type='gate')
+    def test_quotas(self):
+        new_quotas = {'network': 0, 'security_group': 0}
+        self._check_quotas(new_quotas)
+
     @test.requires_ext(extension='lbaas', service='network')
     @test.attr(type='gate')
     def test_lbaas_quotas(self):
-        # Add a tenant to conduct the test
-        test_tenant = data_utils.rand_name('test_tenant_')
-        test_description = data_utils.rand_name('desc_')
-        _, tenant = self.identity_admin_client.create_tenant(
-            name=test_tenant,
-            description=test_description)
-        tenant_id = tenant['id']
-        self.addCleanup(self.identity_admin_client.delete_tenant, tenant_id)
-        # Change lbaas quotas for tenant
         new_quotas = {'vip': 1, 'pool': 2,
                       'member': 3, 'health_monitor': 4}
-
-        resp, quota_set = self.admin_client.update_quotas(tenant_id,
-                                                          **new_quotas)
-        self.assertEqual('200', resp['status'])
-        self.addCleanup(self.admin_client.reset_quotas, tenant_id)
-        self.assertEqual(1, quota_set['vip'])
-        self.assertEqual(2, quota_set['pool'])
-        self.assertEqual(3, quota_set['member'])
-        self.assertEqual(4, quota_set['health_monitor'])
-        # Confirm our tenant is listed among tenants with non default quotas
-        resp, non_default_quotas = self.admin_client.list_quotas()
-        self.assertEqual('200', resp['status'])
-        found = False
-        for qs in non_default_quotas['quotas']:
-            if qs['tenant_id'] == tenant_id:
-                found = True
-        self.assertTrue(found)
-        # Confirm from APi quotas were changed as requested for tenant
-        resp, quota_set = self.admin_client.show_quotas(tenant_id)
-        quota_set = quota_set['quota']
-        self.assertEqual('200', resp['status'])
-        self.assertEqual(1, quota_set['vip'])
-        self.assertEqual(2, quota_set['pool'])
-        self.assertEqual(3, quota_set['member'])
-        self.assertEqual(4, quota_set['health_monitor'])
-        # Reset quotas to default and confirm
-        resp, body = self.admin_client.reset_quotas(tenant_id)
-        self.assertEqual('204', resp['status'])
-        resp, non_default_quotas = self.admin_client.list_quotas()
-        self.assertEqual('200', resp['status'])
-        for q in non_default_quotas['quotas']:
-            self.assertNotEqual(tenant_id, q['tenant_id'])
+        self._check_quotas(new_quotas)
diff --git a/tempest/api/orchestration/base.py b/tempest/api/orchestration/base.py
index f83169f..531df2d 100644
--- a/tempest/api/orchestration/base.py
+++ b/tempest/api/orchestration/base.py
@@ -85,11 +85,8 @@
                 pass
 
         for stack_identifier in cls.stacks:
-            try:
-                cls.client.wait_for_stack_status(
-                    stack_identifier, 'DELETE_COMPLETE')
-            except exceptions.NotFound:
-                pass
+            cls.client.wait_for_stack_status(
+                stack_identifier, 'DELETE_COMPLETE')
 
     @classmethod
     def _create_keypair(cls, name_start='keypair-heat-'):
diff --git a/tempest/api/orchestration/stacks/test_environment.py b/tempest/api/orchestration/stacks/test_environment.py
index bc46901..96e1c50 100644
--- a/tempest/api/orchestration/stacks/test_environment.py
+++ b/tempest/api/orchestration/stacks/test_environment.py
@@ -65,7 +65,10 @@
 
         # random_string.yaml specifies a length of 10
         random_value = self.get_stack_output(stack_identifier, 'random_value')
-        self.assertEqual(10, len(random_value))
+        random_string_template = self.load_template('random_string')
+        expected_length = random_string_template['parameters'][
+            'random_length']['default']
+        self.assertEqual(expected_length, len(random_value))
 
     @test.attr(type='gate')
     def test_files_provider_resource(self):
@@ -90,4 +93,7 @@
 
         # random_string.yaml specifies a length of 10
         random_value = self.get_stack_output(stack_identifier, 'random_value')
-        self.assertEqual(10, len(random_value))
+        random_string_template = self.load_template('random_string')
+        expected_length = random_string_template['parameters'][
+            'random_length']['default']
+        self.assertEqual(expected_length, len(random_value))
diff --git a/tempest/api/orchestration/stacks/test_nova_keypair_resources.py b/tempest/api/orchestration/stacks/test_nova_keypair_resources.py
index 336fc99..e22a08b 100644
--- a/tempest/api/orchestration/stacks/test_nova_keypair_resources.py
+++ b/tempest/api/orchestration/stacks/test_nova_keypair_resources.py
@@ -23,6 +23,8 @@
 
 class NovaKeyPairResourcesYAMLTest(base.BaseOrchestrationTest):
     _tpl_type = 'yaml'
+    _resource = 'resources'
+    _type = 'type'
 
     @classmethod
     def setUpClass(cls):
@@ -49,8 +51,15 @@
     @test.attr(type='slow')
     def test_created_resources(self):
         """Verifies created keypair resource."""
-        resources = [('KeyPairSavePrivate', 'OS::Nova::KeyPair'),
-                     ('KeyPairDontSavePrivate', 'OS::Nova::KeyPair')]
+
+        nova_keypair_template = self.load_template('nova_keypair',
+                                                   ext=self._tpl_type)
+        resources = [('KeyPairSavePrivate',
+                      nova_keypair_template[self._resource][
+                      'KeyPairSavePrivate'][self._type]),
+                     ('KeyPairDontSavePrivate',
+                      nova_keypair_template[self._resource][
+                      'KeyPairDontSavePrivate'][self._type])]
 
         for resource_name, resource_type in resources:
             resource = self.test_resources.get(resource_name, None)
@@ -85,3 +94,5 @@
 
 class NovaKeyPairResourcesAWSTest(NovaKeyPairResourcesYAMLTest):
     _tpl_type = 'json'
+    _resource = 'Resources'
+    _type = 'Type'
diff --git a/tempest/api/orchestration/stacks/test_stacks.py b/tempest/api/orchestration/stacks/test_stacks.py
index 5b45d82..d5e66e8 100644
--- a/tempest/api/orchestration/stacks/test_stacks.py
+++ b/tempest/api/orchestration/stacks/test_stacks.py
@@ -64,3 +64,4 @@
         # delete the stack
         resp = self.client.delete_stack(stack_identifier)
         self.assertEqual('204', resp[0]['status'])
+        self.client.wait_for_stack_status(stack_identifier, 'DELETE_COMPLETE')
diff --git a/tempest/api/orchestration/stacks/test_swift_resources.py b/tempest/api/orchestration/stacks/test_swift_resources.py
index 2ba2811..adab8c3 100644
--- a/tempest/api/orchestration/stacks/test_swift_resources.py
+++ b/tempest/api/orchestration/stacks/test_swift_resources.py
@@ -49,8 +49,11 @@
 
     def test_created_resources(self):
         """Created stack should be in the list of existing stacks."""
-        resources = [('SwiftContainer', 'OS::Swift::Container'),
-                     ('SwiftContainerWebsite', 'OS::Swift::Container')]
+        swift_basic_template = self.load_template('swift_basic')
+        resources = [('SwiftContainer', swift_basic_template['resources'][
+                      'SwiftContainer']['type']),
+                     ('SwiftContainerWebsite', swift_basic_template[
+                      'resources']['SwiftContainerWebsite']['type'])]
         for resource_name, resource_type in resources:
             resource = self.test_resources.get(resource_name)
             self.assertIsInstance(resource, dict)
@@ -84,10 +87,9 @@
             self.assertIn(h, headers)
 
     def test_metadata(self):
-        metadatas = {
-            "web-index": "index.html",
-            "web-error": "error.html"
-        }
+        swift_basic_template = self.load_template('swift_basic')
+        metadatas = swift_basic_template['resources']['SwiftContainerWebsite'][
+            'properties']['X-Container-Meta']
         swcont_website = self.test_resources.get(
             'SwiftContainerWebsite')['physical_resource_id']
         headers, _ = self.container_client.list_container_metadata(
diff --git a/tempest/api/telemetry/base.py b/tempest/api/telemetry/base.py
index 2b422fd..b5b2bb1 100644
--- a/tempest/api/telemetry/base.py
+++ b/tempest/api/telemetry/base.py
@@ -34,19 +34,27 @@
         cls.telemetry_client = os.telemetry_client
         cls.servers_client = os.servers_client
         cls.flavors_client = os.flavors_client
+        cls.image_client = os.image_client
+        cls.image_client_v2 = os.image_client_v2
 
         cls.nova_notifications = ['memory', 'vcpus', 'disk.root.size',
                                   'disk.ephemeral.size']
+
+        cls.glance_notifications = ['image.update', 'image.upload',
+                                    'image.delete']
+
+        cls.glance_v2_notifications = ['image.download', 'image.serve']
+
         cls.server_ids = []
         cls.alarm_ids = []
+        cls.image_ids = []
 
     @classmethod
     def create_alarm(cls, **kwargs):
         resp, body = cls.telemetry_client.create_alarm(
             name=data_utils.rand_name('telemetry_alarm'),
             type='threshold', **kwargs)
-        if resp['status'] == '201':
-            cls.alarm_ids.append(body['alarm_id'])
+        cls.alarm_ids.append(body['alarm_id'])
         return resp, body
 
     @classmethod
@@ -55,8 +63,15 @@
             data_utils.rand_name('ceilometer-instance'),
             CONF.compute.image_ref, CONF.compute.flavor_ref,
             wait_until='ACTIVE')
-        if resp['status'] == '202':
-            cls.server_ids.append(body['id'])
+        cls.server_ids.append(body['id'])
+        return resp, body
+
+    @classmethod
+    def create_image(cls, client):
+        resp, body = client.create_image(
+            data_utils.rand_name('image'), container_format='bare',
+            disk_format='raw', visibility='private')
+        cls.image_ids.append(body['id'])
         return resp, body
 
     @staticmethod
@@ -71,6 +86,7 @@
     def tearDownClass(cls):
         cls.cleanup_resources(cls.telemetry_client.delete_alarm, cls.alarm_ids)
         cls.cleanup_resources(cls.servers_client.delete_server, cls.server_ids)
+        cls.cleanup_resources(cls.image_client.delete_image, cls.image_ids)
         cls.clear_isolated_creds()
         super(BaseTelemetryTest, cls).tearDownClass()
 
diff --git a/tempest/api/telemetry/test_telemetry_notification_api.py b/tempest/api/telemetry/test_telemetry_notification_api.py
index 148f5a3..2a170c7 100644
--- a/tempest/api/telemetry/test_telemetry_notification_api.py
+++ b/tempest/api/telemetry/test_telemetry_notification_api.py
@@ -32,6 +32,7 @@
     @test.attr(type="gate")
     @testtools.skipIf(not CONF.service_available.nova,
                       "Nova is not available.")
+    @test.skip_because(bug="1336755")
     def test_check_nova_notification(self):
 
         resp, body = self.create_server()
@@ -42,6 +43,36 @@
         for metric in self.nova_notifications:
             self.await_samples(metric, query)
 
+    @test.attr(type="smoke")
+    @test.services("image")
+    @testtools.skipIf(not CONF.image_feature_enabled.api_v1,
+                      "Glance api v1 is disabled")
+    def test_check_glance_v1_notifications(self):
+        _, body = self.create_image(self.image_client)
+        self.image_client.update_image(body['id'], data='data')
+
+        query = 'resource', 'eq', body['id']
+
+        self.image_client.delete_image(body['id'])
+
+        for metric in self.glance_notifications:
+            self.await_samples(metric, query)
+
+    @test.attr(type="smoke")
+    @test.services("image")
+    @testtools.skipIf(not CONF.image_feature_enabled.api_v2,
+                      "Glance api v2 is disabled")
+    def test_check_glance_v2_notifications(self):
+        _, body = self.create_image(self.image_client_v2)
+
+        self.image_client_v2.store_image(body['id'], "file")
+        self.image_client_v2.get_image_file(body['id'])
+
+        query = 'resource', 'eq', body['id']
+
+        for metric in self.glance_v2_notifications:
+            self.await_samples(metric, query)
+
 
 class TelemetryNotificationAPITestXML(TelemetryNotificationAPITestJSON):
     _interface = 'xml'
diff --git a/tempest/api/volume/admin/test_multi_backend.py b/tempest/api/volume/admin/test_multi_backend.py
index e79d23c..d451517 100644
--- a/tempest/api/volume/admin/test_multi_backend.py
+++ b/tempest/api/volume/admin/test_multi_backend.py
@@ -36,42 +36,55 @@
 
         cls.volume_client = cls.os_adm.volumes_client
         cls.volume_type_id_list = []
-        cls.volume_id_list = []
+        cls.volume_id_list_with_prefix = []
+        cls.volume_id_list_without_prefix = []
 
-        # Volume/Type creation (uses backend1_name)
-        type1_name = data_utils.rand_name('Type-')
-        vol1_name = data_utils.rand_name('Volume-')
-        extra_specs1 = {"volume_backend_name": cls.backend1_name}
-        resp, cls.type1 = cls.client.create_volume_type(
-            type1_name, extra_specs=extra_specs1)
-        cls.volume_type_id_list.append(cls.type1['id'])
-
-        resp, cls.volume1 = cls.volume_client.create_volume(
-            size=1, display_name=vol1_name, volume_type=type1_name)
-        cls.volume_id_list.append(cls.volume1['id'])
-        cls.volume_client.wait_for_volume_status(cls.volume1['id'],
-                                                 'available')
+        # Volume/Type creation (uses volume_backend_name)
+        cls._create_type_and_volume(cls.backend1_name, False)
+        # Volume/Type creation (uses capabilities:volume_backend_name)
+        cls._create_type_and_volume(cls.backend1_name, True)
 
         if cls.backend1_name != cls.backend2_name:
             # Volume/Type creation (uses backend2_name)
-            type2_name = data_utils.rand_name('Type-')
-            vol2_name = data_utils.rand_name('Volume-')
-            extra_specs2 = {"volume_backend_name": cls.backend2_name}
-            resp, cls.type2 = cls.client.create_volume_type(
-                type2_name, extra_specs=extra_specs2)
-            cls.volume_type_id_list.append(cls.type2['id'])
+            cls._create_type_and_volume(cls.backend2_name, False)
+            # Volume/Type creation (uses capabilities:volume_backend_name)
+            cls._create_type_and_volume(cls.backend2_name, True)
 
-            resp, cls.volume2 = cls.volume_client.create_volume(
-                size=1, display_name=vol2_name, volume_type=type2_name)
-            cls.volume_id_list.append(cls.volume2['id'])
-            cls.volume_client.wait_for_volume_status(cls.volume2['id'],
-                                                     'available')
+    @classmethod
+    def _create_type_and_volume(self, backend_name_key, with_prefix):
+        # Volume/Type creation
+        type_name = data_utils.rand_name('Type')
+        vol_name = data_utils.rand_name('Volume')
+        spec_key_with_prefix = "capabilities:volume_backend_name"
+        spec_key_without_prefix = "volume_backend_name"
+        if with_prefix:
+            extra_specs = {spec_key_with_prefix: backend_name_key}
+        else:
+            extra_specs = {spec_key_without_prefix: backend_name_key}
+        resp, self.type = self.client.create_volume_type(
+            type_name, extra_specs=extra_specs)
+        self.volume_type_id_list.append(self.type['id'])
+
+        resp, self.volume = self.volume_client.create_volume(
+            size=1, display_name=vol_name, volume_type=type_name)
+        self.volume_client.wait_for_volume_status(
+            self.volume['id'], 'available')
+        if with_prefix:
+            self.volume_id_list_with_prefix.append(self.volume['id'])
+        else:
+            self.volume_id_list_without_prefix.append(
+                self.volume['id'])
 
     @classmethod
     def tearDownClass(cls):
         # volumes deletion
-        volume_id_list = getattr(cls, 'volume_id_list', [])
-        for volume_id in volume_id_list:
+        vid_prefix = getattr(cls, 'volume_id_list_with_prefix', [])
+        for volume_id in vid_prefix:
+            cls.volume_client.delete_volume(volume_id)
+            cls.volume_client.wait_for_resource_deletion(volume_id)
+
+        vid_no_pre = getattr(cls, 'volume_id_list_without_prefix', [])
+        for volume_id in vid_no_pre:
             cls.volume_client.delete_volume(volume_id)
             cls.volume_client.wait_for_resource_deletion(volume_id)
 
@@ -84,32 +97,57 @@
 
     @test.attr(type='smoke')
     def test_backend_name_reporting(self):
+        # get volume id which created by type without prefix
+        volume_id = self.volume_id_list_without_prefix[0]
+        self._test_backend_name_reporting_by_volume_id(volume_id)
+
+    @test.attr(type='smoke')
+    def test_backend_name_reporting_with_prefix(self):
+        # get volume id which created by type with prefix
+        volume_id = self.volume_id_list_with_prefix[0]
+        self._test_backend_name_reporting_by_volume_id(volume_id)
+
+    @test.attr(type='gate')
+    def test_backend_name_distinction(self):
+        if self.backend1_name == self.backend2_name:
+            raise self.skipException("backends configured with same name")
+        # get volume id which created by type without prefix
+        volume1_id = self.volume_id_list_without_prefix[0]
+        volume2_id = self.volume_id_list_without_prefix[1]
+        self._test_backend_name_distinction(volume1_id, volume2_id)
+
+    @test.attr(type='gate')
+    def test_backend_name_distinction_with_prefix(self):
+        if self.backend1_name == self.backend2_name:
+            raise self.skipException("backends configured with same name")
+        # get volume id which created by type without prefix
+        volume1_id = self.volume_id_list_with_prefix[0]
+        volume2_id = self.volume_id_list_with_prefix[1]
+        self._test_backend_name_distinction(volume1_id, volume2_id)
+
+    def _test_backend_name_reporting_by_volume_id(self, volume_id):
         # this test checks if os-vol-attr:host is populated correctly after
         # the multi backend feature has been enabled
         # if multi-backend is enabled: os-vol-attr:host should be like:
         # host@backend_name
-        resp, volume = self.volume_client.get_volume(self.volume1['id'])
+        resp, volume = self.volume_client.get_volume(volume_id)
         self.assertEqual(200, resp.status)
 
         volume1_host = volume['os-vol-host-attr:host']
         msg = ("multi-backend reporting incorrect values for volume %s" %
-               self.volume1['id'])
+               volume_id)
         self.assertTrue(len(volume1_host.split("@")) > 1, msg)
 
-    @test.attr(type='gate')
-    def test_backend_name_distinction(self):
+    def _test_backend_name_distinction(self, volume1_id, volume2_id):
         # this test checks that the two volumes created at setUp don't
         # belong to the same backend (if they are, than the
         # volume backend distinction is not working properly)
-        if self.backend1_name == self.backend2_name:
-            raise self.skipException("backends configured with same name")
-
-        resp, volume = self.volume_client.get_volume(self.volume1['id'])
+        resp, volume = self.volume_client.get_volume(volume1_id)
         volume1_host = volume['os-vol-host-attr:host']
 
-        resp, volume = self.volume_client.get_volume(self.volume2['id'])
+        resp, volume = self.volume_client.get_volume(volume2_id)
         volume2_host = volume['os-vol-host-attr:host']
 
         msg = ("volumes %s and %s were created in the same backend" %
-               (self.volume1['id'], self.volume2['id']))
+               (volume1_id, volume2_id))
         self.assertNotEqual(volume1_host, volume2_host, msg)
diff --git a/tempest/api/volume/base.py b/tempest/api/volume/base.py
index c5be1f3..abf3c6b 100644
--- a/tempest/api/volume/base.py
+++ b/tempest/api/volume/base.py
@@ -61,12 +61,20 @@
             cls.volumes_extension_client = cls.os.volumes_extension_client
             cls.availability_zone_client = (
                 cls.os.volume_availability_zone_client)
+            # Special fields and resp code for cinder v1
+            cls.special_fields = {'name_field': 'display_name',
+                                  'descrip_field': 'display_description',
+                                  'create_resp': 200}
 
         elif cls._api_version == 2:
             if not CONF.volume_feature_enabled.api_v2:
                 msg = "Volume API v2 is disabled"
                 raise cls.skipException(msg)
             cls.volumes_client = cls.os.volumes_v2_client
+            # Special fields and resp code for cinder v2
+            cls.special_fields = {'name_field': 'name',
+                                  'descrip_field': 'description',
+                                  'create_resp': 202}
 
         else:
             msg = ("Invalid Cinder API version (%s)" % cls._api_version)
@@ -82,15 +90,15 @@
     @classmethod
     def create_volume(cls, size=1, **kwargs):
         """Wrapper utility that returns a test volume."""
-        vol_name = data_utils.rand_name('Volume')
-        if cls._api_version == 1:
-            resp, volume = cls.volumes_client.create_volume(
-                size, display_name=vol_name, **kwargs)
-            assert 200 == resp.status
-        elif cls._api_version == 2:
-            resp, volume = cls.volumes_client.create_volume(
-                size, name=vol_name, **kwargs)
-            assert 202 == resp.status
+        name = data_utils.rand_name('Volume')
+
+        name_field = cls.special_fields['name_field']
+        expect_status = cls.special_fields['create_resp']
+
+        kwargs[name_field] = name
+        resp, volume = cls.volumes_client.create_volume(size, **kwargs)
+        assert expect_status == resp.status
+
         cls.volumes.append(volume)
         cls.volumes_client.wait_for_volume_status(volume['id'], 'available')
         return volume
diff --git a/tempest/api/volume/test_volume_metadata.py b/tempest/api/volume/test_volume_metadata.py
index 0d57d47..0505f19 100644
--- a/tempest/api/volume/test_volume_metadata.py
+++ b/tempest/api/volume/test_volume_metadata.py
@@ -19,13 +19,12 @@
 from tempest import test
 
 
-class VolumeMetadataTest(base.BaseVolumeV1Test):
-    _interface = "json"
+class VolumesV2MetadataTest(base.BaseVolumeTest):
 
     @classmethod
     @test.safe_setup
     def setUpClass(cls):
-        super(VolumeMetadataTest, cls).setUpClass()
+        super(VolumesV2MetadataTest, cls).setUpClass()
         # Create a volume
         cls.volume = cls.create_volume()
         cls.volume_id = cls.volume['id']
@@ -33,7 +32,7 @@
     def tearDown(self):
         # Update the metadata to {}
         self.volumes_client.update_volume_metadata(self.volume_id, {})
-        super(VolumeMetadataTest, self).tearDown()
+        super(VolumesV2MetadataTest, self).tearDown()
 
     @test.attr(type='gate')
     def test_create_get_delete_volume_metadata(self):
@@ -117,5 +116,13 @@
         self.assertThat(body.items(), matchers.ContainsAll(expect.items()))
 
 
-class VolumeMetadataTestXML(VolumeMetadataTest):
+class VolumesV2MetadataTestXML(VolumesV2MetadataTest):
+    _interface = "xml"
+
+
+class VolumesV1MetadataTest(VolumesV2MetadataTest):
+    _api_version = 1
+
+
+class VolumesV1MetadataTestXML(VolumesV1MetadataTest):
     _interface = "xml"
diff --git a/tempest/api/volume/test_volume_transfers.py b/tempest/api/volume/test_volume_transfers.py
index 82d1364..bf61222 100644
--- a/tempest/api/volume/test_volume_transfers.py
+++ b/tempest/api/volume/test_volume_transfers.py
@@ -23,12 +23,11 @@
 CONF = config.CONF
 
 
-class VolumesTransfersTest(base.BaseVolumeV1Test):
-    _interface = "json"
+class VolumesV2TransfersTest(base.BaseVolumeTest):
 
     @classmethod
     def setUpClass(cls):
-        super(VolumesTransfersTest, cls).setUpClass()
+        super(VolumesV2TransfersTest, cls).setUpClass()
 
         # Add another tenant to test volume-transfer
         if CONF.compute.allow_tenant_isolation:
@@ -110,5 +109,13 @@
         self.client.wait_for_volume_status(volume['id'], 'available')
 
 
-class VolumesTransfersTestXML(VolumesTransfersTest):
+class VolumesV2TransfersTestXML(VolumesV2TransfersTest):
+    _interface = "xml"
+
+
+class VolumesV1TransfersTest(VolumesV2TransfersTest):
+    _api_version = 1
+
+
+class VolumesV1TransfersTestXML(VolumesV1TransfersTest):
     _interface = "xml"
diff --git a/tempest/api/volume/test_volumes_actions.py b/tempest/api/volume/test_volumes_actions.py
index cfab0bd..6fef564 100644
--- a/tempest/api/volume/test_volumes_actions.py
+++ b/tempest/api/volume/test_volumes_actions.py
@@ -21,13 +21,12 @@
 CONF = config.CONF
 
 
-class VolumesActionsTest(base.BaseVolumeV1Test):
-    _interface = "json"
+class VolumesV2ActionsTest(base.BaseVolumeTest):
 
     @classmethod
     @test.safe_setup
     def setUpClass(cls):
-        super(VolumesActionsTest, cls).setUpClass()
+        super(VolumesV2ActionsTest, cls).setUpClass()
         cls.client = cls.volumes_client
         cls.image_client = cls.os.image_client
 
@@ -47,7 +46,7 @@
         cls.servers_client.delete_server(cls.server['id'])
         cls.servers_client.wait_for_server_termination(cls.server['id'])
 
-        super(VolumesActionsTest, cls).tearDownClass()
+        super(VolumesV2ActionsTest, cls).tearDownClass()
 
     @test.stresstest(class_setup_per='process')
     @test.attr(type='smoke')
@@ -165,5 +164,13 @@
         self.assertEqual(False, bool_flag)
 
 
-class VolumesActionsTestXML(VolumesActionsTest):
+class VolumesV2ActionsTestXML(VolumesV2ActionsTest):
+    _interface = "xml"
+
+
+class VolumesV1ActionsTest(VolumesV2ActionsTest):
+    _api_version = 1
+
+
+class VolumesV1ActionsTestXML(VolumesV1ActionsTest):
     _interface = "xml"
diff --git a/tempest/api/volume/test_volumes_get.py b/tempest/api/volume/test_volumes_get.py
index 2745b95..82208aa 100644
--- a/tempest/api/volume/test_volumes_get.py
+++ b/tempest/api/volume/test_volumes_get.py
@@ -23,14 +23,17 @@
 CONF = config.CONF
 
 
-class VolumesGetTest(base.BaseVolumeV1Test):
-    _interface = "json"
+class VolumesV2GetTest(base.BaseVolumeTest):
 
     @classmethod
     def setUpClass(cls):
-        super(VolumesGetTest, cls).setUpClass()
+        super(VolumesV2GetTest, cls).setUpClass()
         cls.client = cls.volumes_client
 
+        cls.name_field = cls.special_fields['name_field']
+        cls.descrip_field = cls.special_fields['descrip_field']
+        cls.create_resp = cls.special_fields['create_resp']
+
     def _delete_volume(self, volume_id):
         resp, _ = self.client.delete_volume(volume_id)
         self.assertEqual(202, resp.status)
@@ -51,24 +54,24 @@
         v_name = data_utils.rand_name('Volume')
         metadata = {'Type': 'Test'}
         # Create a volume
-        resp, volume = self.client.create_volume(display_name=v_name,
-                                                 metadata=metadata,
-                                                 **kwargs)
-        self.assertEqual(200, resp.status)
+        kwargs[self.name_field] = v_name
+        kwargs['metadata'] = metadata
+        resp, volume = self.client.create_volume(**kwargs)
+        self.assertEqual(self.create_resp, resp.status)
         self.assertIn('id', volume)
         self.addCleanup(self._delete_volume, volume['id'])
-        self.assertIn('display_name', volume)
-        self.assertEqual(volume['display_name'], v_name,
+        self.client.wait_for_volume_status(volume['id'], 'available')
+        self.assertIn(self.name_field, volume)
+        self.assertEqual(volume[self.name_field], v_name,
                          "The created volume name is not equal "
                          "to the requested name")
         self.assertTrue(volume['id'] is not None,
                         "Field volume id is empty or not found.")
-        self.client.wait_for_volume_status(volume['id'], 'available')
         # Get Volume information
         resp, fetched_volume = self.client.get_volume(volume['id'])
         self.assertEqual(200, resp.status)
         self.assertEqual(v_name,
-                         fetched_volume['display_name'],
+                         fetched_volume[self.name_field],
                          'The fetched Volume name is different '
                          'from the created Volume')
         self.assertEqual(volume['id'],
@@ -90,27 +93,25 @@
 
         # Update Volume
         # Test volume update when display_name is same with original value
-        resp, update_volume = \
-            self.client.update_volume(volume['id'],
-                                      display_name=v_name)
+        params = {self.name_field: v_name}
+        resp, update_volume = self.client.update_volume(volume['id'], **params)
         self.assertEqual(200, resp.status)
         # Test volume update when display_name is new
         new_v_name = data_utils.rand_name('new-Volume')
         new_desc = 'This is the new description of volume'
-        resp, update_volume = \
-            self.client.update_volume(volume['id'],
-                                      display_name=new_v_name,
-                                      display_description=new_desc)
+        params = {self.name_field: new_v_name,
+                  self.descrip_field: new_desc}
+        resp, update_volume = self.client.update_volume(volume['id'], **params)
         # Assert response body for update_volume method
         self.assertEqual(200, resp.status)
-        self.assertEqual(new_v_name, update_volume['display_name'])
-        self.assertEqual(new_desc, update_volume['display_description'])
+        self.assertEqual(new_v_name, update_volume[self.name_field])
+        self.assertEqual(new_desc, update_volume[self.descrip_field])
         # Assert response body for get_volume method
         resp, updated_volume = self.client.get_volume(volume['id'])
         self.assertEqual(200, resp.status)
         self.assertEqual(volume['id'], updated_volume['id'])
-        self.assertEqual(new_v_name, updated_volume['display_name'])
-        self.assertEqual(new_desc, updated_volume['display_description'])
+        self.assertEqual(new_v_name, updated_volume[self.name_field])
+        self.assertEqual(new_desc, updated_volume[self.descrip_field])
         self.assertThat(updated_volume['metadata'].items(),
                         matchers.ContainsAll(metadata.items()),
                         'The fetched Volume metadata misses data '
@@ -120,20 +121,18 @@
         # then test volume update if display_name is duplicated
         new_volume = {}
         new_v_desc = data_utils.rand_name('@#$%^* description')
-        resp, new_volume = \
-            self.client.create_volume(
-                size=1,
-                display_description=new_v_desc,
-                availability_zone=volume['availability_zone'])
-        self.assertEqual(200, resp.status)
+        params = {self.descrip_field: new_v_desc,
+                  'availability_zone': volume['availability_zone']}
+        resp, new_volume = self.client.create_volume(size=1, **params)
+        self.assertEqual(self.create_resp, resp.status)
         self.assertIn('id', new_volume)
         self.addCleanup(self._delete_volume, new_volume['id'])
         self.client.wait_for_volume_status(new_volume['id'], 'available')
-        resp, update_volume = \
-            self.client.update_volume(
-                new_volume['id'],
-                display_name=volume['display_name'],
-                display_description=volume['display_description'])
+
+        params = {self.name_field: volume[self.name_field],
+                  self.descrip_field: volume[self.descrip_field]}
+        resp, update_volume = self.client.update_volume(new_volume['id'],
+                                                        **params)
         self.assertEqual(200, resp.status)
 
         # NOTE(jdg): Revert back to strict true/false checking
@@ -159,5 +158,13 @@
         self._volume_create_get_update_delete(source_volid=origin['id'])
 
 
-class VolumesGetTestXML(VolumesGetTest):
+class VolumesV2GetTestXML(VolumesV2GetTest):
+    _interface = "xml"
+
+
+class VolumesV1GetTest(VolumesV2GetTest):
+    _api_version = 1
+
+
+class VolumesV1GetTestXML(VolumesV1GetTest):
     _interface = "xml"
diff --git a/tempest/api/volume/test_volumes_negative.py b/tempest/api/volume/test_volumes_negative.py
index bc5b1dc..8bd4c88 100644
--- a/tempest/api/volume/test_volumes_negative.py
+++ b/tempest/api/volume/test_volumes_negative.py
@@ -21,15 +21,16 @@
 from tempest import test
 
 
-class VolumesNegativeTest(base.BaseVolumeV1Test):
-    _interface = 'json'
+class VolumesV2NegativeTest(base.BaseVolumeTest):
 
     @classmethod
     @test.safe_setup
     def setUpClass(cls):
-        super(VolumesNegativeTest, cls).setUpClass()
+        super(VolumesV2NegativeTest, cls).setUpClass()
         cls.client = cls.volumes_client
 
+        cls.name_field = cls.special_fields['name_field']
+
         # Create a test shared instance and volume for attach/detach tests
         cls.volume = cls.create_volume()
         cls.mountpoint = "/dev/vdc"
@@ -237,7 +238,7 @@
     @test.attr(type=['negative', 'gate'])
     def test_list_volumes_with_nonexistent_name(self):
         v_name = data_utils.rand_name('Volume-')
-        params = {'display_name': v_name}
+        params = {self.name_field: v_name}
         resp, fetched_volume = self.client.list_volumes(params)
         self.assertEqual(200, resp.status)
         self.assertEqual(0, len(fetched_volume))
@@ -245,7 +246,7 @@
     @test.attr(type=['negative', 'gate'])
     def test_list_volumes_detail_with_nonexistent_name(self):
         v_name = data_utils.rand_name('Volume-')
-        params = {'display_name': v_name}
+        params = {self.name_field: v_name}
         resp, fetched_volume = self.client.list_volumes_with_detail(params)
         self.assertEqual(200, resp.status)
         self.assertEqual(0, len(fetched_volume))
@@ -265,5 +266,14 @@
         self.assertEqual(0, len(fetched_volume))
 
 
-class VolumesNegativeTestXML(VolumesNegativeTest):
+class VolumesV2NegativeTestXML(VolumesV2NegativeTest):
+    _interface = 'xml'
+
+
+class VolumesV1NegativeTest(VolumesV2NegativeTest):
+    _api_version = 1
+    _name = 'display_name'
+
+
+class VolumesV1NegativeTestXML(VolumesV1NegativeTest):
     _interface = 'xml'
diff --git a/tempest/api_schema/compute/v2/servers.py b/tempest/api_schema/compute/v2/servers.py
index 95c5760..551924a 100644
--- a/tempest/api_schema/compute/v2/servers.py
+++ b/tempest/api_schema/compute/v2/servers.py
@@ -270,3 +270,14 @@
 # without these attributes. So they are not 'required'.
 list_servers_detail['response_body']['properties']['servers']['items'][
     'required'].append('hostId')
+
+rebuild_server = copy.deepcopy(update_server)
+rebuild_server['status_code'] = [202]
+del rebuild_server['response_body']['properties']['server'][
+    'properties']['OS-DCF:diskConfig']
+
+rebuild_server_with_admin_pass = copy.deepcopy(rebuild_server)
+rebuild_server_with_admin_pass['response_body']['properties']['server'][
+    'properties'].update({'adminPass': {'type': 'string'}})
+rebuild_server_with_admin_pass['response_body']['properties']['server'][
+    'required'].append('adminPass')
diff --git a/tempest/api_schema/compute/v3/servers.py b/tempest/api_schema/compute/v3/servers.py
index dc800cd..cebb2d7 100644
--- a/tempest/api_schema/compute/v3/servers.py
+++ b/tempest/api_schema/compute/v3/servers.py
@@ -181,3 +181,12 @@
 # attributes. So they are not 'required'.
 list_servers_detail['response_body']['properties']['servers']['items'][
     'required'].append('host_id')
+
+rebuild_server = copy.deepcopy(update_server)
+rebuild_server['status_code'] = [202]
+
+rebuild_server_with_admin_pass = copy.deepcopy(rebuild_server)
+rebuild_server_with_admin_pass['response_body']['properties']['server'][
+    'properties'].update({'admin_password': {'type': 'string'}})
+rebuild_server_with_admin_pass['response_body']['properties']['server'][
+    'required'].append('admin_password')
diff --git a/tempest/auth.py b/tempest/auth.py
index 830dca9..6dad3a4 100644
--- a/tempest/auth.py
+++ b/tempest/auth.py
@@ -13,10 +13,12 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
+import abc
 import copy
 import datetime
 import exceptions
 import re
+import six
 import urlparse
 
 from tempest import config
@@ -31,6 +33,7 @@
 LOG = logging.getLogger(__name__)
 
 
+@six.add_metaclass(abc.ABCMeta)
 class AuthProvider(object):
     """
     Provide authentication
@@ -70,18 +73,21 @@
                    interface=self.interface, cache=self.cache
                )
 
+    @abc.abstractmethod
     def _decorate_request(self, filters, method, url, headers=None, body=None,
                           auth_data=None):
         """
         Decorate request with authentication data
         """
-        raise NotImplementedError
+        return
 
+    @abc.abstractmethod
     def _get_auth(self):
-        raise NotImplementedError
+        return
 
+    @abc.abstractmethod
     def _fill_credentials(self, auth_data_body):
-        raise NotImplementedError
+        return
 
     def fill_credentials(self):
         """
@@ -130,8 +136,9 @@
         self.cache = None
         self.credentials.reset()
 
+    @abc.abstractmethod
     def is_expired(self, auth_data):
-        raise NotImplementedError
+        return
 
     def auth_request(self, method, url, headers=None, body=None, filters=None):
         """
@@ -188,11 +195,12 @@
         self.alt_part = request_part
         self.alt_auth_data = auth_data
 
+    @abc.abstractmethod
     def base_url(self, filters, auth_data=None):
         """
         Extracts the base_url based on provided filters
         """
-        raise NotImplementedError
+        return
 
 
 class KeystoneAuthProvider(AuthProvider):
@@ -225,11 +233,13 @@
         # no change to method or body
         return str(_url), _headers, body
 
+    @abc.abstractmethod
     def _auth_client(self):
-        raise NotImplementedError
+        return
 
+    @abc.abstractmethod
     def _auth_params(self):
-        raise NotImplementedError
+        return
 
     def _get_auth(self):
         # Bypasses the cache
diff --git a/tempest/cli/README.rst b/tempest/cli/README.rst
index dcd940b..bc18084 100644
--- a/tempest/cli/README.rst
+++ b/tempest/cli/README.rst
@@ -1,3 +1,5 @@
+.. _cli_field_guide:
+
 Tempest Field Guide to CLI tests
 ================================
 
diff --git a/tempest/cli/__init__.py b/tempest/cli/__init__.py
index d7b4a16..02f8c05 100644
--- a/tempest/cli/__init__.py
+++ b/tempest/cli/__init__.py
@@ -19,6 +19,7 @@
 
 import tempest.cli.output_parser
 from tempest import config
+from tempest import exceptions
 from tempest.openstack.common import log as logging
 import tempest.test
 
@@ -130,10 +131,10 @@
             cmd, stdout=stdout, stderr=stderr)
         result, result_err = proc.communicate()
         if not fail_ok and proc.returncode != 0:
-            raise CommandFailed(proc.returncode,
-                                cmd,
-                                result,
-                                result_err)
+            raise exceptions.CommandFailed(proc.returncode,
+                                           cmd,
+                                           result,
+                                           result_err)
         return result
 
     def assertTableStruct(self, items, field_names):
@@ -146,17 +147,3 @@
         self.assertTrue(lines[0].startswith(beginning),
                         msg=('Beginning of first line has invalid content: %s'
                              % lines[:3]))
-
-
-class CommandFailed(Exception):
-    def __init__(self, returncode, cmd, output, stderr):
-        super(CommandFailed, self).__init__()
-        self.returncode = returncode
-        self.cmd = cmd
-        self.stdout = output
-        self.stderr = stderr
-
-    def __str__(self):
-        return ("Command '%s' returned non-zero exit status %d.\n"
-        "stdout:\n%s\n"
-        "stderr:\n%s" % (self.cmd, self.returncode, self.stdout, self.stderr))
diff --git a/tempest/cli/simple_read_only/test_cinder.py b/tempest/cli/simple_read_only/test_cinder.py
index 9a6b159..04971c1 100644
--- a/tempest/cli/simple_read_only/test_cinder.py
+++ b/tempest/cli/simple_read_only/test_cinder.py
@@ -19,6 +19,7 @@
 
 from tempest import cli
 from tempest import config
+from tempest import exceptions
 
 CONF = config.CONF
 LOG = logging.getLogger(__name__)
@@ -40,7 +41,7 @@
         super(SimpleReadOnlyCinderClientTest, cls).setUpClass()
 
     def test_cinder_fake_action(self):
-        self.assertRaises(cli.CommandFailed,
+        self.assertRaises(exceptions.CommandFailed,
                           self.cinder,
                           'this-does-not-exist')
 
@@ -65,7 +66,7 @@
                                       'Attached to'])
         self.cinder('list', params='--all-tenants 1')
         self.cinder('list', params='--all-tenants 0')
-        self.assertRaises(cli.CommandFailed,
+        self.assertRaises(exceptions.CommandFailed,
                           self.cinder,
                           'list',
                           params='--all-tenants bad')
diff --git a/tempest/cli/simple_read_only/test_glance.py b/tempest/cli/simple_read_only/test_glance.py
index 3fb1120..90cdc55 100644
--- a/tempest/cli/simple_read_only/test_glance.py
+++ b/tempest/cli/simple_read_only/test_glance.py
@@ -17,6 +17,7 @@
 
 from tempest import cli
 from tempest import config
+from tempest import exceptions
 from tempest.openstack.common import log as logging
 
 CONF = config.CONF
@@ -40,7 +41,7 @@
         super(SimpleReadOnlyGlanceClientTest, cls).setUpClass()
 
     def test_glance_fake_action(self):
-        self.assertRaises(cli.CommandFailed,
+        self.assertRaises(exceptions.CommandFailed,
                           self.glance,
                           'this-does-not-exist')
 
diff --git a/tempest/cli/simple_read_only/test_keystone.py b/tempest/cli/simple_read_only/test_keystone.py
index f8dcdba..9218fcd 100644
--- a/tempest/cli/simple_read_only/test_keystone.py
+++ b/tempest/cli/simple_read_only/test_keystone.py
@@ -17,6 +17,7 @@
 
 from tempest import cli
 from tempest import config
+from tempest import exceptions
 from tempest.openstack.common import log as logging
 
 CONF = config.CONF
@@ -34,7 +35,7 @@
     """
 
     def test_admin_fake_action(self):
-        self.assertRaises(cli.CommandFailed,
+        self.assertRaises(exceptions.CommandFailed,
                           self.keystone,
                           'this-does-not-exist')
 
diff --git a/tempest/cli/simple_read_only/test_neutron.py b/tempest/cli/simple_read_only/test_neutron.py
index 2643596..87f6b67 100644
--- a/tempest/cli/simple_read_only/test_neutron.py
+++ b/tempest/cli/simple_read_only/test_neutron.py
@@ -17,6 +17,7 @@
 
 from tempest import cli
 from tempest import config
+from tempest import exceptions
 from tempest.openstack.common import log as logging
 from tempest import test
 
@@ -42,7 +43,7 @@
 
     @test.attr(type='smoke')
     def test_neutron_fake_action(self):
-        self.assertRaises(cli.CommandFailed,
+        self.assertRaises(exceptions.CommandFailed,
                           self.neutron,
                           'this-does-not-exist')
 
@@ -88,7 +89,7 @@
     def _test_neutron_lbaas_command(self, command):
         try:
             self.neutron(command)
-        except cli.CommandFailed as e:
+        except exceptions.CommandFailed as e:
             if '404 Not Found' not in e.stderr:
                 self.fail('%s: Unexpected failure.' % command)
 
diff --git a/tempest/cli/simple_read_only/test_nova.py b/tempest/cli/simple_read_only/test_nova.py
index 70eb9ef..7085cc9 100644
--- a/tempest/cli/simple_read_only/test_nova.py
+++ b/tempest/cli/simple_read_only/test_nova.py
@@ -17,6 +17,7 @@
 
 from tempest import cli
 from tempest import config
+from tempest import exceptions
 from tempest.openstack.common import log as logging
 import tempest.test
 
@@ -47,7 +48,7 @@
         super(SimpleReadOnlyNovaClientTest, cls).setUpClass()
 
     def test_admin_fake_action(self):
-        self.assertRaises(cli.CommandFailed,
+        self.assertRaises(exceptions.CommandFailed,
                           self.nova,
                           'this-does-nova-exist')
 
@@ -84,11 +85,11 @@
         self.nova('endpoints')
 
     def test_admin_flavor_acces_list(self):
-        self.assertRaises(cli.CommandFailed,
+        self.assertRaises(exceptions.CommandFailed,
                           self.nova,
                           'flavor-access-list')
         # Failed to get access list for public flavor type
-        self.assertRaises(cli.CommandFailed,
+        self.assertRaises(exceptions.CommandFailed,
                           self.nova,
                           'flavor-access-list',
                           params='--flavor m1.tiny')
@@ -125,7 +126,7 @@
         self.nova('list')
         self.nova('list', params='--all-tenants 1')
         self.nova('list', params='--all-tenants 0')
-        self.assertRaises(cli.CommandFailed,
+        self.assertRaises(exceptions.CommandFailed,
                           self.nova,
                           'list',
                           params='--all-tenants bad')
diff --git a/tempest/cli/simple_read_only/test_nova_manage.py b/tempest/cli/simple_read_only/test_nova_manage.py
index 67c19d8..dae0cf8 100644
--- a/tempest/cli/simple_read_only/test_nova_manage.py
+++ b/tempest/cli/simple_read_only/test_nova_manage.py
@@ -15,6 +15,7 @@
 
 from tempest import cli
 from tempest import config
+from tempest import exceptions
 from tempest.openstack.common import log as logging
 
 
@@ -46,7 +47,7 @@
         super(SimpleReadOnlyNovaManageTest, cls).setUpClass()
 
     def test_admin_fake_action(self):
-        self.assertRaises(cli.CommandFailed,
+        self.assertRaises(exceptions.CommandFailed,
                           self.nova_manage,
                           'this-does-nova-exist')
 
diff --git a/tempest/cli/simple_read_only/test_sahara.py b/tempest/cli/simple_read_only/test_sahara.py
index 773921a..2c6e0e2 100644
--- a/tempest/cli/simple_read_only/test_sahara.py
+++ b/tempest/cli/simple_read_only/test_sahara.py
@@ -17,6 +17,7 @@
 
 from tempest import cli
 from tempest import config
+from tempest import exceptions
 from tempest import test
 
 CONF = config.CONF
@@ -41,7 +42,7 @@
 
     @test.attr(type='negative')
     def test_sahara_fake_action(self):
-        self.assertRaises(cli.CommandFailed,
+        self.assertRaises(exceptions.CommandFailed,
                           self.sahara,
                           'this-does-not-exist')
 
diff --git a/tempest/cli/simple_read_only/test_swift.py b/tempest/cli/simple_read_only/test_swift.py
index c778542..069a384 100644
--- a/tempest/cli/simple_read_only/test_swift.py
+++ b/tempest/cli/simple_read_only/test_swift.py
@@ -17,6 +17,7 @@
 
 from tempest import cli
 from tempest import config
+from tempest import exceptions
 
 CONF = config.CONF
 
@@ -37,7 +38,7 @@
         super(SimpleReadOnlySwiftClientTest, cls).setUpClass()
 
     def test_swift_fake_action(self):
-        self.assertRaises(cli.CommandFailed,
+        self.assertRaises(exceptions.CommandFailed,
                           self.swift,
                           'this-does-not-exist')
 
diff --git a/tempest/clients.py b/tempest/clients.py
index 4e2205e..519e686 100644
--- a/tempest/clients.py
+++ b/tempest/clients.py
@@ -52,6 +52,8 @@
     MigrationsClientJSON
 from tempest.services.compute.json.quotas_client import QuotaClassesClientJSON
 from tempest.services.compute.json.quotas_client import QuotasClientJSON
+from tempest.services.compute.json.security_group_default_rules_client import \
+    SecurityGroupDefaultRulesClientJSON
 from tempest.services.compute.json.security_groups_client import \
     SecurityGroupsClientJSON
 from tempest.services.compute.json.servers_client import ServersClientJSON
@@ -406,6 +408,8 @@
         self.data_processing_client = DataProcessingClient(
             self.auth_provider)
         self.migrations_client = MigrationsClientJSON(self.auth_provider)
+        self.security_group_default_rules_client = (
+            SecurityGroupDefaultRulesClientJSON(self.auth_provider))
 
 
 class AltManager(Manager):
diff --git a/tempest/cmd/javelin.py b/tempest/cmd/javelin.py
index 96bbd03..67b92b0 100755
--- a/tempest/cmd/javelin.py
+++ b/tempest/cmd/javelin.py
@@ -219,7 +219,7 @@
 
     def check_objects(self):
         """Check that the objects created are still there."""
-        if 'objects' not in self.res:
+        if not self.res.get('objects'):
             return
         LOG.info("checking objects")
         for obj in self.res['objects']:
@@ -231,7 +231,7 @@
 
     def check_servers(self):
         """Check that the servers are still up and running."""
-        if 'servers' not in self.res:
+        if not self.res.get('servers'):
             return
         LOG.info("checking servers")
         for server in self.res['servers']:
@@ -244,13 +244,17 @@
             r, found = client.servers.get_server(found['id'])
             # get the ipv4 address
             addr = found['addresses']['private'][0]['addr']
-            self.assertEqual(os.system("ping -c 1 " + addr), 0,
-                             "Server %s is not pingable at %s" % (
-                                 server['name'], addr))
+            for count in range(60):
+                return_code = os.system("ping -c1 " + addr)
+                if return_code is 0:
+                    break
+            self.assertNotEqual(count, 59,
+                               "Server %s is not pingable at %s" % (
+                               server['name'], addr))
 
     def check_volumes(self):
         """Check that the volumes are still there and attached."""
-        if 'volumes' not in self.res:
+        if not self.res.get('volumes'):
             return
         LOG.info("checking volumes")
         for volume in self.res['volumes']:
@@ -308,6 +312,7 @@
 def create_images(images):
     if not images:
         return
+    LOG.info("Creating images")
     for image in images:
         client = client_for_user(image['owner'])
 
@@ -315,6 +320,7 @@
         r, body = client.images.image_list()
         names = [x['name'] for x in body]
         if image['name'] in names:
+            LOG.info("Image '%s' already exists" % image['name'])
             continue
 
         # special handling for 3 part image
@@ -372,15 +378,37 @@
 def create_servers(servers):
     if not servers:
         return
+    LOG.info("Creating servers")
     for server in servers:
         client = client_for_user(server['owner'])
 
         if _get_server_by_name(client, server['name']):
+            LOG.info("Server '%s' already exists" % server['name'])
             continue
 
         image_id = _get_image_by_name(client, server['image'])['id']
         flavor_id = _get_flavor_by_name(client, server['flavor'])['id']
-        client.servers.create_server(server['name'], image_id, flavor_id)
+        resp, body = client.servers.create_server(server['name'], image_id,
+                                                 flavor_id)
+        server_id = body['id']
+        client.servers.wait_for_server_status(server_id, 'ACTIVE')
+
+
+def destroy_servers(servers):
+    if not servers:
+        return
+    LOG.info("Destroying servers")
+    for server in servers:
+        client = client_for_user(server['owner'])
+
+        response = _get_server_by_name(client, server['name'])
+        if not response:
+            LOG.info("Server '%s' does not exist" % server['name'])
+            continue
+
+        client.servers.delete_server(response['id'])
+        client.servers.wait_for_server_termination(response['id'],
+                ignore_error=True)
 
 
 #######################
@@ -441,6 +469,23 @@
     # attach_volumes(RES['volumes'])
 
 
+def destroy_resources():
+    LOG.info("Destroying Resources")
+    # Destroy in inverse order of create
+
+    # Future
+    # detach_volumes
+    # destroy_volumes
+
+    destroy_servers(RES['servers'])
+    LOG.warn("Destroy mode incomplete")
+    # destroy_images
+    # destroy_objects
+
+    # destroy_users
+    # destroy_tenants
+
+
 def get_options():
     global OPTS
     parser = argparse.ArgumentParser(
@@ -512,12 +557,16 @@
 
     if OPTS.mode == 'create':
         create_resources()
+        # Make sure the resources we just created actually work
+        checker = JavelinCheck(USERS, RES)
+        checker.check()
     elif OPTS.mode == 'check':
         collect_users(RES['users'])
         checker = JavelinCheck(USERS, RES)
         checker.check()
     elif OPTS.mode == 'destroy':
-        LOG.warn("Destroy mode not yet implemented")
+        collect_users(RES['users'])
+        destroy_resources()
     else:
         LOG.error('Unknown mode %s' % OPTS.mode)
         return 1
diff --git a/tempest/common/utils/data_utils.py b/tempest/common/utils/data_utils.py
index 174e557..5a29ea0 100644
--- a/tempest/common/utils/data_utils.py
+++ b/tempest/common/utils/data_utils.py
@@ -71,3 +71,11 @@
     if not base_text:
         base_text = 'test'
     return ''.join(itertools.islice(itertools.cycle(base_text), size))
+
+
+def random_bytes(size=1024):
+    """
+    Return size randomly selected bytes as a string.
+    """
+    return ''.join([chr(random.randint(0, 255))
+                    for i in range(size)])
diff --git a/tempest/config.py b/tempest/config.py
index c83f500..01bc243 100644
--- a/tempest/config.py
+++ b/tempest/config.py
@@ -268,12 +268,14 @@
                 default=['all'],
                 help='A list of enabled compute extensions with a special '
                      'entry all which indicates every extension is enabled. '
-                     'Each extension should be specified with alias name'),
+                     'Each extension should be specified with alias name. '
+                     'Empty list indicates all extensions are disabled'),
     cfg.ListOpt('api_v3_extensions',
                 default=['all'],
                 help='A list of enabled v3 extensions with a special entry all'
                      ' which indicates every extension is enabled. '
-                     'Each extension should be specified with alias name'),
+                     'Each extension should be specified with alias name. '
+                     'Empty list indicates all extensions are disabled'),
     cfg.BoolOpt('change_password',
                 default=False,
                 help="Does the test environment support changing the admin "
@@ -314,7 +316,12 @@
     cfg.BoolOpt('rescue',
                 default=True,
                 help='Does the test environment support instance rescue '
-                     'mode?')
+                     'mode?'),
+    cfg.BoolOpt('enable_instance_password',
+                default=True,
+                help='Enables returning of the instance password by the '
+                     'relevant server API calls such as create, rebuild '
+                     'or rescue.')
 ]
 
 
@@ -441,7 +448,8 @@
     cfg.ListOpt('api_extensions',
                 default=['all'],
                 help='A list of enabled network extensions with a special '
-                     'entry all which indicates every extension is enabled'),
+                     'entry all which indicates every extension is enabled. '
+                     'Empty list indicates all extensions are disabled'),
     cfg.BoolOpt('ipv6_subnet_attributes',
                 default=False,
                 help="Allow the execution of IPv6 subnet tests that use "
@@ -546,7 +554,8 @@
     cfg.ListOpt('api_extensions',
                 default=['all'],
                 help='A list of enabled volume extensions with a special '
-                     'entry all which indicates every extension is enabled'),
+                     'entry all which indicates every extension is enabled. '
+                     'Empty list indicates all extensions are disabled'),
     cfg.BoolOpt('api_v1',
                 default=True,
                 help="Is the v1 volume API enabled"),
diff --git a/tempest/exceptions.py b/tempest/exceptions.py
index 4eb1cea..9d443cc 100644
--- a/tempest/exceptions.py
+++ b/tempest/exceptions.py
@@ -211,3 +211,17 @@
 
 class InvalidStructure(TempestException):
     message = "Invalid structure of table with details"
+
+
+class CommandFailed(Exception):
+    def __init__(self, returncode, cmd, output, stderr):
+        super(CommandFailed, self).__init__()
+        self.returncode = returncode
+        self.cmd = cmd
+        self.stdout = output
+        self.stderr = stderr
+
+    def __str__(self):
+        return ("Command '%s' returned non-zero exit status %d.\n"
+        "stdout:\n%s\n"
+        "stderr:\n%s" % (self.cmd, self.returncode, self.stdout, self.stderr))
diff --git a/tempest/scenario/README.rst b/tempest/scenario/README.rst
index 835ba99..5a287d6 100644
--- a/tempest/scenario/README.rst
+++ b/tempest/scenario/README.rst
@@ -1,3 +1,5 @@
+.. _scenario_field_guide:
+
 Tempest Field Guide to Scenario tests
 =====================================
 
diff --git a/tempest/scenario/manager.py b/tempest/scenario/manager.py
index aa24c31..3cfc698 100644
--- a/tempest/scenario/manager.py
+++ b/tempest/scenario/manager.py
@@ -113,6 +113,11 @@
         cls.ceilometer_client = cls.manager.ceilometer_client
 
     @classmethod
+    def tearDownClass(cls):
+        cls.isolated_creds.clear_isolated_creds()
+        super(OfficialClientTest, cls).tearDownClass()
+
+    @classmethod
     def _get_credentials(cls, get_creds, ctype):
         if CONF.compute.allow_tenant_isolation:
             creds = get_creds()
diff --git a/tempest/services/compute/json/security_group_default_rules_client.py b/tempest/services/compute/json/security_group_default_rules_client.py
new file mode 100644
index 0000000..6d29837
--- /dev/null
+++ b/tempest/services/compute/json/security_group_default_rules_client.py
@@ -0,0 +1,74 @@
+# Copyright 2014 NEC Corporation.
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import json
+
+from tempest.common import rest_client
+from tempest import config
+
+CONF = config.CONF
+
+
+class SecurityGroupDefaultRulesClientJSON(rest_client.RestClient):
+
+    def __init__(self, auth_provider):
+        super(SecurityGroupDefaultRulesClientJSON,
+              self).__init__(auth_provider)
+        self.service = CONF.compute.catalog_type
+
+    def create_security_default_group_rule(self, ip_protocol, from_port,
+                                           to_port, **kwargs):
+        """
+        Creating security group default rules.
+        ip_protocol : ip_protocol (icmp, tcp, udp).
+        from_port: Port at start of range.
+        to_port  : Port at end of range.
+        cidr     : CIDR for address range.
+        """
+        post_body = {
+            'ip_protocol': ip_protocol,
+            'from_port': from_port,
+            'to_port': to_port,
+            'cidr': kwargs.get('cidr'),
+        }
+        post_body = json.dumps({'security_group_default_rule': post_body})
+        url = 'os-security-group-default-rules'
+        resp, body = self.post(url, post_body)
+        self.expected_success(200, resp.status)
+        body = json.loads(body)
+        return resp, body['security_group_default_rule']
+
+    def delete_security_group_default_rule(self,
+                                           security_group_default_rule_id):
+        """Deletes the provided Security Group default rule."""
+        resp, body = self.delete('os-security-group-default-rules/%s' % str(
+            security_group_default_rule_id))
+        self.expected_success(204, resp.status)
+        return resp, body
+
+    def list_security_group_default_rules(self):
+        """List all Security Group default rules."""
+        resp, body = self.get('os-security-group-default-rules')
+        self.expected_success(200, resp.status)
+        body = json.loads(body)
+        return resp, body['security_group_default_rules']
+
+    def get_security_group_default_rule(self, security_group_default_rule_id):
+        """Return the details of provided Security Group default rule."""
+        resp, body = self.get('os-security-group-default-rules/%s' % str(
+            security_group_default_rule_id))
+        self.expected_success(200, resp.status)
+        body = json.loads(body)
+        return resp, body['security_group_default_rule']
diff --git a/tempest/services/compute/json/servers_client.py b/tempest/services/compute/json/servers_client.py
index 80bb711..05f74cd 100644
--- a/tempest/services/compute/json/servers_client.py
+++ b/tempest/services/compute/json/servers_client.py
@@ -269,7 +269,12 @@
         if 'disk_config' in kwargs:
             kwargs['OS-DCF:diskConfig'] = kwargs['disk_config']
             del kwargs['disk_config']
-        return self.action(server_id, 'rebuild', 'server', None, **kwargs)
+        if CONF.compute_feature_enabled.enable_instance_password:
+            rebuild_schema = schema.rebuild_server_with_admin_pass
+        else:
+            rebuild_schema = schema.rebuild_server
+        return self.action(server_id, 'rebuild', 'server',
+                           rebuild_schema, **kwargs)
 
     def resize(self, server_id, flavor_ref, **kwargs):
         """Changes the flavor of a server."""
diff --git a/tempest/services/compute/v3/json/servers_client.py b/tempest/services/compute/v3/json/servers_client.py
index a5b31d3..27e95e8 100644
--- a/tempest/services/compute/v3/json/servers_client.py
+++ b/tempest/services/compute/v3/json/servers_client.py
@@ -272,7 +272,12 @@
         if 'disk_config' in kwargs:
             kwargs['os-disk-config:disk_config'] = kwargs['disk_config']
             del kwargs['disk_config']
-        return self.action(server_id, 'rebuild', 'server', None, **kwargs)
+        if CONF.compute_feature_enabled.enable_instance_password:
+            rebuild_schema = schema.rebuild_server_with_admin_pass
+        else:
+            rebuild_schema = schema.rebuild_server
+        return self.action(server_id, 'rebuild', 'server',
+                           rebuild_schema, **kwargs)
 
     def resize(self, server_id, flavor_ref, **kwargs):
         """Changes the flavor of a server."""
diff --git a/tempest/services/data_processing/v1_1/client.py b/tempest/services/data_processing/v1_1/client.py
index c2c7fd1..1fe0cf1 100644
--- a/tempest/services/data_processing/v1_1/client.py
+++ b/tempest/services/data_processing/v1_1/client.py
@@ -25,29 +25,42 @@
         super(DataProcessingClient, self).__init__(auth_provider)
         self.service = CONF.data_processing.catalog_type
 
-    @classmethod
-    def _request_and_parse(cls, req_fun, uri, res_name, *args, **kwargs):
-        """Make a request using specified req_fun and parse response.
+    def _request_and_check_resp(self, request_func, uri, resp_status):
+        """Make a request using specified request_func and check response
+        status code.
+
+        It returns pair: resp and response body.
+        """
+        resp, body = request_func(uri)
+        self.expected_success(resp_status, resp.status)
+        return resp, body
+
+    def _request_check_and_parse_resp(self, request_func, uri, resp_status,
+                                      resource_name, *args, **kwargs):
+        """Make a request using specified request_func, check response status
+        code and parse response body.
 
         It returns pair: resp and parsed resource(s) body.
         """
-        resp, body = req_fun(uri, headers={
-            'Content-Type': 'application/json'
-        }, *args, **kwargs)
+        headers = {'Content-Type': 'application/json'}
+        resp, body = request_func(uri, headers=headers, *args, **kwargs)
+        self.expected_success(resp_status, resp.status)
         body = json.loads(body)
-        return resp, body[res_name]
+        return resp, body[resource_name]
 
     def list_node_group_templates(self):
         """List all node group templates for a user."""
 
         uri = 'node-group-templates'
-        return self._request_and_parse(self.get, uri, 'node_group_templates')
+        return self._request_check_and_parse_resp(self.get, uri,
+                                                  200, 'node_group_templates')
 
     def get_node_group_template(self, tmpl_id):
         """Returns the details of a single node group template."""
 
         uri = 'node-group-templates/%s' % tmpl_id
-        return self._request_and_parse(self.get, uri, 'node_group_template')
+        return self._request_check_and_parse_resp(self.get, uri,
+                                                  200, 'node_group_template')
 
     def create_node_group_template(self, name, plugin_name, hadoop_version,
                                    node_processes, flavor_id,
@@ -67,20 +80,22 @@
             'flavor_id': flavor_id,
             'node_configs': node_configs or dict(),
         })
-        return self._request_and_parse(self.post, uri, 'node_group_template',
-                                       body=json.dumps(body))
+        return self._request_check_and_parse_resp(self.post, uri, 202,
+                                                  'node_group_template',
+                                                  body=json.dumps(body))
 
     def delete_node_group_template(self, tmpl_id):
         """Deletes the specified node group template by id."""
 
         uri = 'node-group-templates/%s' % tmpl_id
-        return self.delete(uri)
+        return self._request_and_check_resp(self.delete, uri, 204)
 
     def list_plugins(self):
         """List all enabled plugins."""
 
         uri = 'plugins'
-        return self._request_and_parse(self.get, uri, 'plugins')
+        return self._request_check_and_parse_resp(self.get,
+                                                  uri, 200, 'plugins')
 
     def get_plugin(self, plugin_name, plugin_version=None):
         """Returns the details of a single plugin."""
@@ -88,19 +103,21 @@
         uri = 'plugins/%s' % plugin_name
         if plugin_version:
             uri += '/%s' % plugin_version
-        return self._request_and_parse(self.get, uri, 'plugin')
+        return self._request_check_and_parse_resp(self.get, uri, 200, 'plugin')
 
     def list_cluster_templates(self):
         """List all cluster templates for a user."""
 
         uri = 'cluster-templates'
-        return self._request_and_parse(self.get, uri, 'cluster_templates')
+        return self._request_check_and_parse_resp(self.get, uri,
+                                                  200, 'cluster_templates')
 
     def get_cluster_template(self, tmpl_id):
         """Returns the details of a single cluster template."""
 
         uri = 'cluster-templates/%s' % tmpl_id
-        return self._request_and_parse(self.get, uri, 'cluster_template')
+        return self._request_check_and_parse_resp(self.get,
+                                                  uri, 200, 'cluster_template')
 
     def create_cluster_template(self, name, plugin_name, hadoop_version,
                                 node_groups, cluster_configs=None,
@@ -119,26 +136,29 @@
             'node_groups': node_groups,
             'cluster_configs': cluster_configs or dict(),
         })
-        return self._request_and_parse(self.post, uri, 'cluster_template',
-                                       body=json.dumps(body))
+        return self._request_check_and_parse_resp(self.post, uri, 202,
+                                                  'cluster_template',
+                                                  body=json.dumps(body))
 
     def delete_cluster_template(self, tmpl_id):
         """Deletes the specified cluster template by id."""
 
         uri = 'cluster-templates/%s' % tmpl_id
-        return self.delete(uri)
+        return self._request_and_check_resp(self.delete, uri, 204)
 
     def list_data_sources(self):
         """List all data sources for a user."""
 
         uri = 'data-sources'
-        return self._request_and_parse(self.get, uri, 'data_sources')
+        return self._request_check_and_parse_resp(self.get,
+                                                  uri, 200, 'data_sources')
 
     def get_data_source(self, source_id):
         """Returns the details of a single data source."""
 
         uri = 'data-sources/%s' % source_id
-        return self._request_and_parse(self.get, uri, 'data_source')
+        return self._request_check_and_parse_resp(self.get,
+                                                  uri, 200, 'data_source')
 
     def create_data_source(self, name, data_source_type, url, **kwargs):
         """Creates data source with specified params.
@@ -153,57 +173,62 @@
             'type': data_source_type,
             'url': url
         })
-        return self._request_and_parse(self.post, uri, 'data_source',
-                                       body=json.dumps(body))
+        return self._request_check_and_parse_resp(self.post, uri,
+                                                  202, 'data_source',
+                                                  body=json.dumps(body))
 
     def delete_data_source(self, source_id):
         """Deletes the specified data source by id."""
 
         uri = 'data-sources/%s' % source_id
-        return self.delete(uri)
+        return self._request_and_check_resp(self.delete, uri, 204)
 
     def list_job_binary_internals(self):
         """List all job binary internals for a user."""
 
         uri = 'job-binary-internals'
-        return self._request_and_parse(self.get, uri, 'binaries')
+        return self._request_check_and_parse_resp(self.get,
+                                                  uri, 200, 'binaries')
 
     def get_job_binary_internal(self, job_binary_id):
         """Returns the details of a single job binary internal."""
 
         uri = 'job-binary-internals/%s' % job_binary_id
-        return self._request_and_parse(self.get, uri, 'job_binary_internal')
+        return self._request_check_and_parse_resp(self.get, uri,
+                                                  200, 'job_binary_internal')
 
     def create_job_binary_internal(self, name, data):
         """Creates job binary internal with specified params."""
 
         uri = 'job-binary-internals/%s' % name
-        return self._request_and_parse(self.put, uri, 'job_binary_internal',
-                                       data)
+        return self._request_check_and_parse_resp(self.put, uri, 202,
+                                                  'job_binary_internal', data)
 
     def delete_job_binary_internal(self, job_binary_id):
         """Deletes the specified job binary internal by id."""
 
         uri = 'job-binary-internals/%s' % job_binary_id
-        return self.delete(uri)
+        return self._request_and_check_resp(self.delete, uri, 204)
 
     def get_job_binary_internal_data(self, job_binary_id):
         """Returns data of a single job binary internal."""
 
         uri = 'job-binary-internals/%s/data' % job_binary_id
-        return self.get(uri)
+        return self._request_and_check_resp(self.get, uri, 200)
 
     def list_job_binaries(self):
         """List all job binaries for a user."""
 
         uri = 'job-binaries'
-        return self._request_and_parse(self.get, uri, 'binaries')
+        return self._request_check_and_parse_resp(self.get,
+                                                  uri, 200, 'binaries')
 
     def get_job_binary(self, job_binary_id):
         """Returns the details of a single job binary."""
 
         uri = 'job-binaries/%s' % job_binary_id
-        return self._request_and_parse(self.get, uri, 'job_binary')
+        return self._request_check_and_parse_resp(self.get,
+                                                  uri, 200, 'job_binary')
 
     def create_job_binary(self, name, url, extra=None, **kwargs):
         """Creates job binary with specified params.
@@ -218,17 +243,18 @@
             'url': url,
             'extra': extra or dict(),
         })
-        return self._request_and_parse(self.post, uri, 'job_binary',
-                                       body=json.dumps(body))
+        return self._request_check_and_parse_resp(self.post, uri,
+                                                  202, 'job_binary',
+                                                  body=json.dumps(body))
 
     def delete_job_binary(self, job_binary_id):
         """Deletes the specified job binary by id."""
 
         uri = 'job-binaries/%s' % job_binary_id
-        return self.delete(uri)
+        return self._request_and_check_resp(self.delete, uri, 204)
 
     def get_job_binary_data(self, job_binary_id):
         """Returns data of a single job binary."""
 
         uri = 'job-binaries/%s/data' % job_binary_id
-        return self.get(uri)
+        return self._request_and_check_resp(self.get, uri, 200)
diff --git a/tempest/services/identity/v3/json/identity_client.py b/tempest/services/identity/v3/json/identity_client.py
index 593bd15..0188c2a 100644
--- a/tempest/services/identity/v3/json/identity_client.py
+++ b/tempest/services/identity/v3/json/identity_client.py
@@ -77,6 +77,17 @@
         body = json.loads(body)
         return resp, body['user']
 
+    def update_user_password(self, user_id, password, original_password):
+        """Updates a user password."""
+        update_user = {
+            'password': password,
+            'original_password': original_password
+        }
+        update_user = json.dumps({'user': update_user})
+        resp, _ = self.post('users/%s/password' % user_id, update_user)
+        self.expected_success(204, resp.status)
+        return resp
+
     def list_user_projects(self, user_id):
         """Lists the projects on which a user has roles assigned."""
         resp, body = self.get('users/%s/projects' % user_id)
diff --git a/tempest/services/identity/v3/xml/identity_client.py b/tempest/services/identity/v3/xml/identity_client.py
index 3790f13..f3e084e 100644
--- a/tempest/services/identity/v3/xml/identity_client.py
+++ b/tempest/services/identity/v3/xml/identity_client.py
@@ -139,6 +139,17 @@
         body = self._parse_body(etree.fromstring(body))
         return resp, body
 
+    def update_user_password(self, user_id, password, original_password):
+        """Updates a user password."""
+        update_user = common.Element("user",
+                                     xmlns=XMLNS,
+                                     password=password,
+                                     original_password=original_password)
+        resp, _ = self.post('users/%s/password' % user_id,
+                            str(common.Document(update_user)))
+        self.expected_success(204, resp.status)
+        return resp
+
     def list_user_projects(self, user_id):
         """Lists the projects on which a user has roles assigned."""
         resp, body = self.get('users/%s/projects' % user_id)
diff --git a/tempest/services/orchestration/json/orchestration_client.py b/tempest/services/orchestration/json/orchestration_client.py
index d325eb5..46b0ec4 100644
--- a/tempest/services/orchestration/json/orchestration_client.py
+++ b/tempest/services/orchestration/json/orchestration_client.py
@@ -181,7 +181,11 @@
         fail_regexp = re.compile(failure_pattern)
 
         while True:
-            resp, body = self.get_stack(stack_identifier)
+            try:
+                resp, body = self.get_stack(stack_identifier)
+            except exceptions.NotFound:
+                if status == 'DELETE_COMPLETE':
+                    return
             stack_name = body['stack_name']
             stack_status = body['stack_status']
             if stack_status == status:
diff --git a/tempest/stress/README.rst b/tempest/stress/README.rst
index 0a63679..4f1f56c 100644
--- a/tempest/stress/README.rst
+++ b/tempest/stress/README.rst
@@ -1,3 +1,5 @@
+.. _stress_field_guide:
+
 Tempest Field Guide to Stress Tests
 ===================================
 
diff --git a/tempest/test.py b/tempest/test.py
index afe7a96..5b7330b 100644
--- a/tempest/test.py
+++ b/tempest/test.py
@@ -215,6 +215,8 @@
         'network': CONF.network_feature_enabled.api_extensions,
         'object': CONF.object_storage_feature_enabled.discoverable_apis,
     }
+    if len(config_dict[service]) == 0:
+        return False
     if config_dict[service][0] == 'all':
         return True
     if extension_name in config_dict[service]:
diff --git a/tempest/tests/README.rst b/tempest/tests/README.rst
index 33d321f..e54d4c0 100644
--- a/tempest/tests/README.rst
+++ b/tempest/tests/README.rst
@@ -1,3 +1,5 @@
+.. _unit_tests_field_guide:
+
 Tempest Field Guide to Unit tests
 =================================
 
diff --git a/tempest/tests/cli/test_command_failed.py b/tempest/tests/cli/test_command_failed.py
index c539ac6..36a4fc8 100644
--- a/tempest/tests/cli/test_command_failed.py
+++ b/tempest/tests/cli/test_command_failed.py
@@ -10,7 +10,7 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
-from tempest import cli
+from tempest import exceptions
 from tempest.tests import base
 
 
@@ -22,8 +22,8 @@
         stdout = "output"
         stderr = "error"
         try:
-            raise cli.CommandFailed(returncode, cmd, stdout, stderr)
-        except cli.CommandFailed as e:
+            raise exceptions.CommandFailed(returncode, cmd, stdout, stderr)
+        except exceptions.CommandFailed as e:
             self.assertIn(str(returncode), str(e))
             self.assertIn(cmd, str(e))
             self.assertIn(stdout, str(e))
diff --git a/tempest/tests/test_auth.py b/tempest/tests/test_auth.py
index 1dcddad..6a2e335 100644
--- a/tempest/tests/test_auth.py
+++ b/tempest/tests/test_auth.py
@@ -59,12 +59,24 @@
     obviously don't test not implemented method or the ones which strongly
     depends on them.
     """
-    _auth_provider_class = auth.AuthProvider
 
-    def test_check_credentials_class(self):
-        self.assertRaises(NotImplementedError,
-                          self.auth_provider.check_credentials,
-                          auth.Credentials())
+    class FakeAuthProviderImpl(auth.AuthProvider):
+        def _decorate_request():
+            pass
+
+        def _fill_credentials():
+            pass
+
+        def _get_auth():
+            pass
+
+        def base_url():
+            pass
+
+        def is_expired():
+            pass
+
+    _auth_provider_class = FakeAuthProviderImpl
 
     def test_check_credentials_bad_type(self):
         self.assertFalse(self.auth_provider.check_credentials([]))
@@ -74,16 +86,6 @@
         auth_provider = self._auth(credentials={})
         self.assertIsInstance(auth_provider.credentials, auth.Credentials)
 
-    def test_instantiate_with_bad_credentials_type(self):
-        """
-        Assure that credentials with bad type fail with TypeError
-        """
-        self.assertRaises(TypeError, self._auth, [])
-
-    def test_auth_data_property(self):
-        self.assertRaises(NotImplementedError, getattr, self.auth_provider,
-                          'auth_data')
-
     def test_auth_data_property_when_cache_exists(self):
         self.auth_provider.cache = 'foo'
         self.useFixture(mockpatch.PatchObject(self.auth_provider,
@@ -110,9 +112,10 @@
         self.assertIsNone(self.auth_provider.alt_part)
         self.assertIsNone(self.auth_provider.alt_auth_data)
 
-    def test_fill_credentials(self):
-        self.assertRaises(NotImplementedError,
-                          self.auth_provider.fill_credentials)
+    def test_auth_class(self):
+        self.assertRaises(TypeError,
+                          auth.AuthProvider,
+                          fake_credentials.FakeCredentials)
 
 
 class TestKeystoneV2AuthProvider(BaseAuthTestsSetUp):
diff --git a/tempest/thirdparty/README.rst b/tempest/thirdparty/README.rst
index 53cb54b..b0bfdf7 100644
--- a/tempest/thirdparty/README.rst
+++ b/tempest/thirdparty/README.rst
@@ -1,3 +1,5 @@
+.. _third_party_field_guide:
+
 Tempest Field Guide to Third Party API tests
 ============================================