Merge "Add python-cinderclient to requirements.txt"
diff --git a/etc/tempest.conf.sample b/etc/tempest.conf.sample
index 282e455..12f7cc3 100644
--- a/etc/tempest.conf.sample
+++ b/etc/tempest.conf.sample
@@ -226,12 +226,12 @@
 # Number of seconds to time out on waiting for a volume
 # to be available or reach an expected status
 build_timeout = 300
-# Runs Cinder multi-backend tests (requires 2 backend declared in cinder.conf)
+# Runs Cinder multi-backend tests (requires 2 backends declared in cinder.conf)
 # They must have different volume_backend_name (backend1_name and backend2_name
 # have to be different)
 multi_backend_enabled = false
-backend1_name = LVM_iSCSI
-backend2_name = LVM_iSCSI_1
+backend1_name = BACKEND_1
+backend2_name = BACKEND_2
 
 [object-storage]
 # This section contains configuration options used when executing tests
diff --git a/tempest/api/compute/base.py b/tempest/api/compute/base.py
index 9883c00..48ef296 100644
--- a/tempest/api/compute/base.py
+++ b/tempest/api/compute/base.py
@@ -186,15 +186,24 @@
         flavor = kwargs.get('flavor', cls.flavor_ref)
         image_id = kwargs.get('image_id', cls.image_ref)
 
-        resp, server = cls.servers_client.create_server(
+        resp, body = cls.servers_client.create_server(
             name, image_id, flavor, **kwargs)
-        cls.servers.append(server)
+
+        # handle the case of multiple servers
+        servers = [body]
+        if 'min_count' in kwargs or 'max_count' in kwargs:
+            # Get servers created which name match with name param.
+            r, b = cls.servers_client.list_servers()
+            servers = [s for s in b['servers'] if s['name'].startswith(name)]
+
+        cls.servers.extend(servers)
 
         if 'wait_until' in kwargs:
-            cls.servers_client.wait_for_server_status(
-                server['id'], kwargs['wait_until'])
+            for server in servers:
+                cls.servers_client.wait_for_server_status(
+                    server['id'], kwargs['wait_until'])
 
-        return resp, server
+        return resp, body
 
     def wait_for(self, condition):
         """Repeatedly calls condition() until a timeout."""
diff --git a/tempest/api/compute/images/test_images_oneserver.py b/tempest/api/compute/images/test_images_oneserver.py
index c7f0b23..4163245 100644
--- a/tempest/api/compute/images/test_images_oneserver.py
+++ b/tempest/api/compute/images/test_images_oneserver.py
@@ -131,7 +131,7 @@
         # Verify the image was deleted correctly
         resp, body = self.client.delete_image(image_id)
         self.assertEqual('204', resp['status'])
-        self.assertRaises(exceptions.NotFound, self.client.get_image, image_id)
+        self.client.wait_for_resource_deletion(image_id)
 
     @testtools.skipUnless(compute.MULTI_USER,
                           'Need multiple users for this test.')
diff --git a/tempest/api/compute/servers/test_list_servers_negative.py b/tempest/api/compute/servers/test_list_servers_negative.py
index 0f35ee5..db9bdc1 100644
--- a/tempest/api/compute/servers/test_list_servers_negative.py
+++ b/tempest/api/compute/servers/test_list_servers_negative.py
@@ -15,6 +15,7 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
+import datetime
 
 from tempest.api import compute
 from tempest.api.compute import base
@@ -170,7 +171,8 @@
     @attr(type='gate')
     def test_list_servers_by_changes_since(self):
         # Servers are listed by specifying changes-since date
-        changes_since = {'changes-since': '2011-01-01T12:34:00Z'}
+        since = datetime.datetime.utcnow() - datetime.timedelta(minutes=2)
+        changes_since = {'changes-since': since.isoformat()}
         resp, body = self.client.list_servers(changes_since)
         self.assertEqual('200', resp['status'])
         # changes-since returns all instances, including deleted.
diff --git a/tempest/api/compute/servers/test_multiple_create.py b/tempest/api/compute/servers/test_multiple_create.py
index 63bb86d..9fde618 100644
--- a/tempest/api/compute/servers/test_multiple_create.py
+++ b/tempest/api/compute/servers/test_multiple_create.py
@@ -25,16 +25,6 @@
     _interface = 'json'
     _name = 'multiple-create-test'
 
-    def _get_created_servers(self, name):
-        """Get servers created which name match with name param."""
-        resp, body = self.servers_client.list_servers()
-        servers = body['servers']
-        servers_created = []
-        for server in servers:
-            if server['name'].startswith(name):
-                servers_created.append(server)
-        return servers_created
-
     def _generate_name(self):
         return rand_name(self._name)
 
@@ -45,18 +35,6 @@
         """
         kwargs['name'] = kwargs.get('name', self._generate_name())
         resp, body = self.create_server(**kwargs)
-        created_servers = self._get_created_servers(kwargs['name'])
-        # NOTE(maurosr): append it to cls.servers list from base.BaseCompute
-        # class.
-        self.servers.extend(created_servers)
-        # NOTE(maurosr): get a server list, check status of the ones with names
-        # that match and wait for them become active. At a first look, since
-        # they are building in parallel, wait inside the for doesn't seem be
-        # harmful to the performance
-        if wait_until is not None:
-            for server in created_servers:
-                self.servers_client.wait_for_server_status(server['id'],
-                                                           wait_until)
 
         return resp, body
 
diff --git a/tempest/api/compute/servers/test_servers_negative.py b/tempest/api/compute/servers/test_servers_negative.py
index 5f53080..bbe489c 100644
--- a/tempest/api/compute/servers/test_servers_negative.py
+++ b/tempest/api/compute/servers/test_servers_negative.py
@@ -236,7 +236,11 @@
         # Create a server with a nonexistent security group
 
         security_groups = [{'name': 'does_not_exist'}]
-        self.assertRaises(exceptions.BadRequest,
+        if self.config.network.quantum_available:
+            expected_exception = exceptions.NotFound
+        else:
+            expected_exception = exceptions.BadRequest
+        self.assertRaises(expected_exception,
                           self.create_server,
                           security_groups=security_groups)
 
diff --git a/tempest/api/image/v1/test_images.py b/tempest/api/image/v1/test_images.py
index c5d3f93..640daa5 100644
--- a/tempest/api/image/v1/test_images.py
+++ b/tempest/api/image/v1/test_images.py
@@ -47,7 +47,6 @@
                                        properties=properties)
         self.assertTrue('id' in body)
         image_id = body.get('id')
-        self.created_images.append(image_id)
         self.assertEqual('New Name', body.get('name'))
         self.assertTrue(body.get('is_public'))
         self.assertEqual('queued', body.get('status'))
@@ -71,8 +70,6 @@
                                        properties={'key1': 'value1',
                                                    'key2': 'value2'})
         self.assertTrue('id' in body)
-        image_id = body.get('id')
-        self.created_images.append(image_id)
         self.assertEqual('New Remote Image', body.get('name'))
         self.assertTrue(body.get('is_public'))
         self.assertEqual('active', body.get('status'))
@@ -88,7 +85,6 @@
                                        copy_from=self.config.images.http_image)
         self.assertTrue('id' in body)
         image_id = body.get('id')
-        self.created_images.append(image_id)
         self.assertEqual('New Http Image', body.get('name'))
         self.assertTrue(body.get('is_public'))
         self.client.wait_for_image_status(image_id, 'active')
@@ -106,8 +102,6 @@
                                        min_ram=40,
                                        properties=properties)
         self.assertTrue('id' in body)
-        image_id = body.get('id')
-        self.created_images.append(image_id)
         self.assertEqual('New_image_with_min_ram', body.get('name'))
         self.assertTrue(body.get('is_public'))
         self.assertEqual('queued', body.get('status'))
diff --git a/tempest/api/image/v2/test_images.py b/tempest/api/image/v2/test_images.py
index 966adc3..34db6e3 100644
--- a/tempest/api/image/v2/test_images.py
+++ b/tempest/api/image/v2/test_images.py
@@ -50,7 +50,6 @@
                                        visibility='public')
         self.assertTrue('id' in body)
         image_id = body.get('id')
-        self.created_images.append(image_id)
         self.assertTrue('name' in body)
         self.assertEqual('New Name', body.get('name'))
         self.assertTrue('visibility' in body)
@@ -79,7 +78,7 @@
         # We add a few images here to test the listing functionality of
         # the images API
         for x in xrange(0, 10):
-            cls.created_images.append(cls._create_standard_image(x))
+            cls._create_standard_image(x)
 
     @classmethod
     def _create_standard_image(cls, number):
diff --git a/tempest/api/object_storage/test_object_services.py b/tempest/api/object_storage/test_object_services.py
index 2b8feef..31e7eb9 100644
--- a/tempest/api/object_storage/test_object_services.py
+++ b/tempest/api/object_storage/test_object_services.py
@@ -17,8 +17,6 @@
 
 import time
 
-import testtools
-
 from tempest.api.object_storage import base
 from tempest.common.utils.data_utils import arbitrary_string
 from tempest.common.utils.data_utils import rand_name
@@ -443,71 +441,3 @@
         except Exception as e:
             self.fail("Failed to get public readable object with another"
                       " user creds raised exception is %s" % e)
-
-    @testtools.skip('Until Bug #1020722 is resolved.')
-    @attr(type='smoke')
-    def test_write_public_object_without_using_creds(self):
-        # make container public-writable, and create object anonymously, e.g.
-        # without using credentials
-        try:
-            # update container metadata to make publicly writable
-            cont_headers = {'X-Container-Write': '-*'}
-            resp_meta, body = self.container_client.update_container_metadata(
-                self.container_name, metadata=cont_headers, metadata_prefix='')
-            self.assertEqual(resp_meta['status'], '204')
-            # list container metadata
-            resp, _ = self.container_client.list_container_metadata(
-                self.container_name)
-            self.assertEqual(resp['status'], '204')
-            self.assertIn('x-container-write', resp)
-            self.assertEqual(resp['x-container-write'], '-*')
-
-            object_name = rand_name(name='Object')
-            data = arbitrary_string(size=len(object_name),
-                                    base_text=object_name)
-            headers = {'Content-Type': 'application/json',
-                       'Accept': 'application/json'}
-            # create object as anonymous user
-            resp, body = self.custom_object_client.create_object(
-                self.container_name, object_name, data, metadata=headers)
-            self.assertEqual(resp['status'], '201')
-
-        except Exception as e:
-            self.fail("Failed to create public writable object without using"
-                      " creds raised exception is %s" % e)
-
-    @testtools.skip('Until Bug #1020722 is resolved.')
-    @attr(type='smoke')
-    def test_write_public_with_another_user_creds(self):
-        # make container public-writable, and create object with another user's
-        # credentials
-        try:
-            # update container metadata to make it publicly writable
-            cont_headers = {'X-Container-Write': '-*'}
-            resp_meta, body = self.container_client.update_container_metadata(
-                self.container_name, metadata=cont_headers,
-                metadata_prefix='')
-            self.assertEqual(resp_meta['status'], '204')
-            # list container metadata
-            resp, _ = self.container_client.list_container_metadata(
-                self.container_name)
-            self.assertEqual(resp['status'], '204')
-            self.assertIn('x-container-write', resp)
-            self.assertEqual(resp['x-container-write'], '-*')
-
-            # trying to get auth token of alternative user
-            token = self.identity_client_alt.get_auth()
-            headers = {'Content-Type': 'application/json',
-                       'Accept': 'application/json',
-                       'X-Auth-Token': token}
-
-            # trying to create an object with another user's creds
-            object_name = rand_name(name='Object')
-            data = arbitrary_string(size=len(object_name),
-                                    base_text=object_name)
-            resp, body = self.custom_object_client.create_object(
-                self.container_name, object_name, data, metadata=headers)
-            self.assertEqual(resp['status'], '201')
-        except Exception as e:
-            self.fail("Failed to create public writable object with another"
-                      " user creds raised exception is %s" % e)
diff --git a/tempest/api/orchestration/stacks/test_instance_cfn_init.py b/tempest/api/orchestration/stacks/test_instance_cfn_init.py
new file mode 100644
index 0000000..2349830
--- /dev/null
+++ b/tempest/api/orchestration/stacks/test_instance_cfn_init.py
@@ -0,0 +1,152 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import json
+import logging
+
+from tempest.api.orchestration import base
+from tempest.common.utils.data_utils import rand_name
+from tempest.test import attr
+
+
+LOG = logging.getLogger(__name__)
+
+
+class InstanceCfnInitTestJSON(base.BaseOrchestrationTest):
+    _interface = 'json'
+
+    template = """
+HeatTemplateFormatVersion: '2012-12-12'
+Description: |
+  Template which uses a wait condition to confirm that a minimal
+  cfn-init and cfn-signal has worked
+Parameters:
+  KeyName:
+    Type: String
+  InstanceType:
+    Type: String
+  ImageId:
+    Type: String
+Resources:
+  CfnUser:
+    Type: AWS::IAM::User
+  SmokeKeys:
+    Type: AWS::IAM::AccessKey
+    Properties:
+      UserName: {Ref: CfnUser}
+  SmokeServer:
+    Type: AWS::EC2::Instance
+    Metadata:
+      AWS::CloudFormation::Init:
+        config:
+          files:
+            /tmp/smoke-status:
+              content: smoke test complete
+            /etc/cfn/cfn-credentials:
+              content:
+                Fn::Join:
+                - ''
+                - - AWSAccessKeyId=
+                  - {Ref: SmokeKeys}
+                  - '
+
+                    '
+                  - AWSSecretKey=
+                  - Fn::GetAtt: [SmokeKeys, SecretAccessKey]
+                  - '
+
+                    '
+              mode: '000400'
+              owner: root
+              group: root
+    Properties:
+      ImageId: {Ref: ImageId}
+      InstanceType: {Ref: InstanceType}
+      KeyName: {Ref: KeyName}
+      UserData:
+        Fn::Base64:
+          Fn::Join:
+          - ''
+          - - |-
+                #!/bin/bash -v
+                /opt/aws/bin/cfn-init
+            - |-
+                || error_exit ''Failed to run cfn-init''
+                /opt/aws/bin/cfn-signal -e 0 --data "`cat /tmp/smoke-status`" '
+            - {Ref: WaitHandle}
+            - '''
+
+              '
+  WaitHandle:
+    Type: AWS::CloudFormation::WaitConditionHandle
+  WaitCondition:
+    Type: AWS::CloudFormation::WaitCondition
+    DependsOn: SmokeServer
+    Properties:
+      Handle: {Ref: WaitHandle}
+      Timeout: '600'
+Outputs:
+  WaitConditionStatus:
+    Description: Contents of /tmp/smoke-status on SmokeServer
+    Value:
+      Fn::GetAtt: [WaitCondition, Data]
+"""
+
+    @classmethod
+    def setUpClass(cls):
+        super(InstanceCfnInitTestJSON, cls).setUpClass()
+        if not cls.orchestration_cfg.image_ref:
+            raise cls.skipException("No image available to test")
+        cls.client = cls.orchestration_client
+
+    def setUp(self):
+        super(InstanceCfnInitTestJSON, self).setUp()
+        stack_name = rand_name('heat')
+        keypair_name = (self.orchestration_cfg.keypair_name or
+                        self._create_keypair()['name'])
+
+        # create the stack
+        self.stack_identifier = self.create_stack(
+            stack_name,
+            self.template,
+            parameters={
+                'KeyName': keypair_name,
+                'InstanceType': self.orchestration_cfg.instance_type,
+                'ImageId': self.orchestration_cfg.image_ref
+            })
+
+    @attr(type='gate')
+    def test_stack_wait_condition_data(self):
+
+        sid = self.stack_identifier
+
+        # wait for create to complete.
+        self.client.wait_for_stack_status(sid, 'CREATE_COMPLETE')
+
+        # fetch the stack
+        resp, body = self.client.get_stack(sid)
+        self.assertEqual('CREATE_COMPLETE', body['stack_status'])
+
+        # fetch the stack
+        resp, body = self.client.get_stack(sid)
+        self.assertEqual('CREATE_COMPLETE', body['stack_status'])
+
+        # This is an assert of great significance, as it means the following
+        # has happened:
+        # - cfn-init read the provided metadata and wrote out a file
+        # - a user was created and credentials written to the instance
+        # - a cfn-signal was built which was signed with provided credentials
+        # - the wait condition was fulfilled and the stack has changed state
+        wait_status = json.loads(body['outputs'][0]['output_value'])
+        self.assertEqual('smoke test complete', wait_status['00000'])
diff --git a/tempest/api/volume/admin/test_multi_backend.py b/tempest/api/volume/admin/test_multi_backend.py
index e278f59..086b981 100644
--- a/tempest/api/volume/admin/test_multi_backend.py
+++ b/tempest/api/volume/admin/test_multi_backend.py
@@ -1,8 +1,5 @@
 # vim: tabstop=4 shiftwidth=4 softtabstop=4
 
-# Copyright 2013 OpenStack Foundation
-# All Rights Reserved.
-#
 #    Licensed under the Apache License, Version 2.0 (the "License"); you may
 #    not use this file except in compliance with the License. You may obtain
 #    a copy of the License at
@@ -15,12 +12,9 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
-import testtools
-
 from tempest.api.volume import base
 from tempest.common import log as logging
 from tempest.common.utils.data_utils import rand_name
-from tempest import config
 from tempest.services.volume.json.admin import volume_types_client
 from tempest.services.volume.json import volumes_client
 from tempest.test import attr
@@ -31,66 +25,62 @@
 class VolumeMultiBackendTest(base.BaseVolumeAdminTest):
     _interface = "json"
 
-    multi_backend_enabled = config.TempestConfig().volume.multi_backend_enabled
-    backend1_name = config.TempestConfig().volume.backend1_name
-    backend2_name = config.TempestConfig().volume.backend2_name
-    backend_names_equal = False
-    if (backend1_name == backend2_name):
-        backend_names_equal = True
-
     @classmethod
-    @testtools.skipIf(not multi_backend_enabled,
-                      "Cinder multi-backend feature is not available")
     def setUpClass(cls):
         super(VolumeMultiBackendTest, cls).setUpClass()
+        if not cls.config.volume.multi_backend_enabled:
+            raise cls.skipException("Cinder multi-backend feature disabled")
+
+        cls.backend1_name = cls.config.volume.backend1_name
+        cls.backend2_name = cls.config.volume.backend2_name
 
         adm_user = cls.config.identity.admin_username
         adm_pass = cls.config.identity.admin_password
         adm_tenant = cls.config.identity.admin_tenant_name
         auth_url = cls.config.identity.uri
 
-        cls.client = volumes_client.VolumesClientJSON(cls.config,
-                                                      adm_user,
-                                                      adm_pass,
-                                                      auth_url,
-                                                      adm_tenant)
-        cls.client2 = volume_types_client.VolumeTypesClientJSON(cls.config,
-                                                                adm_user,
-                                                                adm_pass,
-                                                                auth_url,
-                                                                adm_tenant)
+        cls.volume_client = volumes_client.VolumesClientJSON(cls.config,
+                                                             adm_user,
+                                                             adm_pass,
+                                                             auth_url,
+                                                             adm_tenant)
+        cls.type_client = volume_types_client.VolumeTypesClientJSON(cls.config,
+                                                                    adm_user,
+                                                                    adm_pass,
+                                                                    auth_url,
+                                                                    adm_tenant)
 
-        ## variables initialization
-        type_name1 = rand_name('type-')
-        type_name2 = rand_name('type-')
-        cls.volume_type_list = []
-
-        vol_name1 = rand_name('Volume-')
-        vol_name2 = rand_name('Volume-')
+        cls.volume_type_id_list = []
         cls.volume_id_list = []
-
         try:
-            ## Volume types creation
+            # Volume/Type creation (uses backend1_name)
+            type1_name = rand_name('Type-')
+            vol1_name = rand_name('Volume-')
             extra_specs1 = {"volume_backend_name": cls.backend1_name}
-            resp, cls.body1 = cls.client2.create_volume_type(
-                type_name1, extra_specs=extra_specs1)
-            cls.volume_type_list.append(cls.body1)
+            resp, cls.type1 = cls.type_client.create_volume_type(
+                type1_name, extra_specs=extra_specs1)
+            cls.volume_type_id_list.append(cls.type1['id'])
 
-            extra_specs2 = {"volume_backend_name": cls.backend2_name}
-            resp, cls.body2 = cls.client2.create_volume_type(
-                type_name2, extra_specs=extra_specs2)
-            cls.volume_type_list.append(cls.body2)
-
-            ## Volumes creation
-            resp, cls.volume1 = cls.client.create_volume(
-                size=1, display_name=vol_name1, volume_type=type_name1)
-            cls.client.wait_for_volume_status(cls.volume1['id'], 'available')
+            resp, cls.volume1 = cls.volume_client.create_volume(
+                size=1, display_name=vol1_name, volume_type=type1_name)
             cls.volume_id_list.append(cls.volume1['id'])
+            cls.volume_client.wait_for_volume_status(cls.volume1['id'],
+                                                     'available')
 
-            resp, cls.volume2 = cls.client.create_volume(
-                size=1, display_name=vol_name2, volume_type=type_name2)
-            cls.client.wait_for_volume_status(cls.volume2['id'], 'available')
-            cls.volume_id_list.append(cls.volume2['id'])
+            if cls.backend1_name != cls.backend2_name:
+                # Volume/Type creation (uses backend2_name)
+                type2_name = rand_name('Type-')
+                vol2_name = rand_name('Volume-')
+                extra_specs2 = {"volume_backend_name": cls.backend2_name}
+                resp, cls.type2 = cls.type_client.create_volume_type(
+                    type2_name, extra_specs=extra_specs2)
+                cls.volume_type_id_list.append(cls.type2['id'])
+
+                resp, cls.volume2 = cls.volume_client.create_volume(
+                    size=1, display_name=vol2_name, volume_type=type2_name)
+                cls.volume_id_list.append(cls.volume2['id'])
+                cls.volume_client.wait_for_volume_status(cls.volume2['id'],
+                                                         'available')
         except Exception:
             LOG.exception("setup failed")
             cls.tearDownClass()
@@ -100,60 +90,43 @@
     def tearDownClass(cls):
         ## volumes deletion
         for volume_id in cls.volume_id_list:
-            cls.client.delete_volume(volume_id)
-            cls.client.wait_for_resource_deletion(volume_id)
+            cls.volume_client.delete_volume(volume_id)
+            cls.volume_client.wait_for_resource_deletion(volume_id)
 
         ## volume types deletion
-        for volume_type in cls.volume_type_list:
-            cls.client2.delete_volume_type(volume_type)
+        for volume_type_id in cls.volume_type_id_list:
+            cls.type_client.delete_volume_type(volume_type_id)
 
         super(VolumeMultiBackendTest, cls).tearDownClass()
 
     @attr(type='smoke')
-    def test_multi_backend_enabled(self):
-        # this test checks that multi backend is enabled for at least the
-        # computes where the volumes created in setUp were made
+    def test_backend_name_reporting(self):
+        # this test checks if os-vol-attr:host is populated correctly after
+        # the multi backend feature has been enabled
         # if multi-backend is enabled: os-vol-attr:host should be like:
         # host@backend_name
-        # this test fails if:
-        # - multi backend is not enabled
-        resp, fetched_volume = self.client.get_volume(self.volume1['id'])
+        resp, volume = self.volume_client.get_volume(self.volume1['id'])
         self.assertEqual(200, resp.status)
 
-        volume_host1 = fetched_volume['os-vol-host-attr:host']
-        msg = ("Multi-backend is not available for at least host "
-               "%(volume_host1)s") % locals()
-        self.assertTrue(len(volume_host1.split("@")) > 1, msg)
-
-        resp, fetched_volume = self.client.get_volume(self.volume2['id'])
-        self.assertEqual(200, resp.status)
-
-        volume_host2 = fetched_volume['os-vol-host-attr:host']
-        msg = ("Multi-backend is not available for at least host "
-               "%(volume_host2)s") % locals()
-        self.assertTrue(len(volume_host2.split("@")) > 1, msg)
+        volume1_host = volume['os-vol-host-attr:host']
+        msg = ("multi-backend reporting incorrect values for volume %s" %
+               self.volume1['id'])
+        self.assertTrue(len(volume1_host.split("@")) > 1, msg)
 
     @attr(type='gate')
     def test_backend_name_distinction(self):
-        # this test checks that the two volumes created at setUp doesn't
-        # belong to the same backend (if they are in the same backend, that
-        # means, volume_backend_name distinction is not working properly)
-        # this test fails if:
-        # - tempest.conf is not well configured
-        # - the two volumes belongs to the same backend
+        # this test checks that the two volumes created at setUp don't
+        # belong to the same backend (if they are, than the
+        # volume backend distinction is not working properly)
+        if self.backend1_name == self.backend2_name:
+            raise self.skipException("backends configured with same name")
 
-        # checks tempest.conf
-        msg = ("tempest.conf is not well configured, "
-               "backend1_name and backend2_name are equal")
-        self.assertEqual(self.backend_names_equal, False, msg)
+        resp, volume = self.volume_client.get_volume(self.volume1['id'])
+        volume1_host = volume['os-vol-host-attr:host']
 
-        # checks the two volumes belongs to different backend
-        resp, fetched_volume = self.client.get_volume(self.volume1['id'])
-        volume_host1 = fetched_volume['os-vol-host-attr:host']
+        resp, volume = self.volume_client.get_volume(self.volume2['id'])
+        volume2_host = volume['os-vol-host-attr:host']
 
-        resp, fetched_volume = self.client.get_volume(self.volume2['id'])
-        volume_host2 = fetched_volume['os-vol-host-attr:host']
-
-        msg = ("volume2 was created in the same backend as volume1: "
-               "%(volume_host2)s.") % locals()
-        self.assertNotEqual(volume_host2, volume_host1, msg)
+        msg = ("volumes %s and %s were created in the same backend" %
+               (self.volume1['id'], self.volume2['id']))
+        self.assertNotEqual(volume1_host, volume2_host, msg)
diff --git a/tempest/cli/simple_read_only/test_keystone.py b/tempest/cli/simple_read_only/test_keystone.py
index 067f58c..45d519b 100644
--- a/tempest/cli/simple_read_only/test_keystone.py
+++ b/tempest/cli/simple_read_only/test_keystone.py
@@ -107,3 +107,14 @@
 
     def test_admin_bashcompletion(self):
         self.keystone('bash-completion')
+
+    # Optional arguments:
+
+    def test_admin_version(self):
+        self.keystone('', flags='--version')
+
+    def test_admin_debug_list(self):
+        self.keystone('catalog', flags='--debug')
+
+    def test_admin_timeout(self):
+        self.keystone('catalog', flags='--timeout 15')
diff --git a/tempest/config.py b/tempest/config.py
index 89a3614..150d561 100644
--- a/tempest/config.py
+++ b/tempest/config.py
@@ -325,12 +325,12 @@
                help="Catalog type of the Volume Service"),
     cfg.BoolOpt('multi_backend_enabled',
                 default=False,
-                help="Runs Cinder multi-backend test (requires 2 backend)"),
+                help="Runs Cinder multi-backend test (requires 2 backends)"),
     cfg.StrOpt('backend1_name',
-               default='LVM_iSCSI',
+               default='BACKEND_1',
                help="Name of the backend1 (must be declared in cinder.conf)"),
     cfg.StrOpt('backend2_name',
-               default='LVM_iSCSI_1',
+               default='BACKEND_2',
                help="Name of the backend2 (must be declared in cinder.conf)"),
 ]
 
diff --git a/tempest/scenario/test_server_advanced_ops.py b/tempest/scenario/test_server_advanced_ops.py
index 9ac0cc0..6202e91 100644
--- a/tempest/scenario/test_server_advanced_ops.py
+++ b/tempest/scenario/test_server_advanced_ops.py
@@ -28,6 +28,7 @@
     This test case stresses some advanced server instance operations:
 
      * Resizing an instance
+     * Sequence suspend resume
     """
 
     @classmethod
@@ -44,11 +45,6 @@
             msg = "Skipping test - flavor_ref and flavor_ref_alt are identical"
             raise cls.skipException(msg)
 
-    @classmethod
-    def tearDownClass(cls):
-        for thing in cls.resources:
-            thing.delete()
-
     def test_resize_server_confirm(self):
         # We create an instance for use in this test
         i_name = rand_name('instance')
@@ -56,12 +52,8 @@
         base_image_id = self.config.compute.image_ref
         self.instance = self.compute_client.servers.create(
             i_name, base_image_id, flavor_id)
-        try:
-            self.assertEqual(self.instance.name, i_name)
-            self.set_resource('instance', self.instance)
-        except AttributeError:
-            self.fail("Instance not successfully created.")
-
+        self.assertEqual(self.instance.name, i_name)
+        self.set_resource('instance', self.instance)
         self.assertEqual(self.instance.status, 'BUILD')
         instance_id = self.get_resource('instance').id
         self.status_timeout(
@@ -77,5 +69,42 @@
 
         LOG.debug("Confirming resize of instance %s", instance_id)
         instance.confirm_resize()
+
         self.status_timeout(
             self.compute_client.servers, instance_id, 'ACTIVE')
+
+    def test_server_sequence_suspend_resume(self):
+        # We create an instance for use in this test
+        i_name = rand_name('instance')
+        flavor_id = self.config.compute.flavor_ref
+        base_image_id = self.config.compute.image_ref
+        self.instance = self.compute_client.servers.create(
+            i_name, base_image_id, flavor_id)
+        self.assertEqual(self.instance.name, i_name)
+        self.set_resource('instance', self.instance)
+        self.assertEqual(self.instance.status, 'BUILD')
+        instance_id = self.get_resource('instance').id
+        self.status_timeout(
+            self.compute_client.servers, instance_id, 'ACTIVE')
+        instance = self.get_resource('instance')
+        instance_id = instance.id
+        LOG.debug("Suspending instance %s. Current status: %s",
+                  instance_id, instance.status)
+        instance.suspend()
+        self.status_timeout(self.compute_client.servers, instance_id,
+                            'SUSPENDED')
+        LOG.debug("Resuming instance %s. Current status: %s",
+                  instance_id, instance.status)
+        instance.resume()
+        self.status_timeout(self.compute_client.servers, instance_id,
+                            'ACTIVE')
+        LOG.debug("Suspending instance %s. Current status: %s",
+                  instance_id, instance.status)
+        instance.suspend()
+        self.status_timeout(self.compute_client.servers, instance_id,
+                            'SUSPENDED')
+        LOG.debug("Resuming instance %s. Current status: %s",
+                  instance_id, instance.status)
+        instance.resume()
+        self.status_timeout(self.compute_client.servers, instance_id,
+                            'ACTIVE')
diff --git a/tempest/services/compute/json/images_client.py b/tempest/services/compute/json/images_client.py
index 376dafc..b13d0f1 100644
--- a/tempest/services/compute/json/images_client.py
+++ b/tempest/services/compute/json/images_client.py
@@ -150,3 +150,10 @@
         resp, body = self.delete("images/%s/metadata/%s" %
                                  (str(image_id), key))
         return resp, body
+
+    def is_resource_deleted(self, id):
+        try:
+            self.get_image(id)
+        except exceptions.NotFound:
+            return True
+        return False
diff --git a/tempest/services/compute/xml/images_client.py b/tempest/services/compute/xml/images_client.py
index c7e337b..cc13aa1 100644
--- a/tempest/services/compute/xml/images_client.py
+++ b/tempest/services/compute/xml/images_client.py
@@ -226,3 +226,10 @@
         """Deletes a single image metadata key/value pair."""
         return self.delete("images/%s/metadata/%s" % (str(image_id), key),
                            self.headers)
+
+    def is_resource_deleted(self, id):
+        try:
+            self.get_image(id)
+        except exceptions.NotFound:
+            return True
+        return False
diff --git a/tempest/services/identity/v3/json/endpoints_client.py b/tempest/services/identity/v3/json/endpoints_client.py
old mode 100755
new mode 100644
diff --git a/tempest/services/identity/v3/xml/endpoints_client.py b/tempest/services/identity/v3/xml/endpoints_client.py
old mode 100755
new mode 100644
diff --git a/tempest/thirdparty/boto/test_s3_ec2_images.py b/tempest/thirdparty/boto/test_s3_ec2_images.py
index 2c0d8ae..0f836d0 100644
--- a/tempest/thirdparty/boto/test_s3_ec2_images.py
+++ b/tempest/thirdparty/boto/test_s3_ec2_images.py
@@ -72,21 +72,22 @@
         retrieved_image = self.images_client.get_image(image["image_id"])
         self.assertTrue(retrieved_image.name == image["name"])
         self.assertTrue(retrieved_image.id == image["image_id"])
-        if retrieved_image.state != "available":
+        state = retrieved_image.state
+        if state != "available":
             def _state():
                 retr = self.images_client.get_image(image["image_id"])
                 return retr.state
             state = state_wait(_state, "available")
         self.assertEqual("available", state)
         self.images_client.deregister_image(image["image_id"])
-        #TODO(afazekas): double deregister ?
+        self.assertNotIn(image["image_id"], str(
+            self.images_client.get_all_images()))
         self.cancelResourceCleanUp(image["cleanUp"])
 
-    @testtools.skip("Skipped until the Bug #1074904 is resolved")
     def test_register_get_deregister_aki_image(self):
         # Register and deregister aki image
         image = {"name": rand_name("aki-name-"),
-                 "location": self.bucket_name + "/" + self.ari_manifest,
+                 "location": self.bucket_name + "/" + self.aki_manifest,
                  "type": "aki"}
         image["image_id"] = self.images_client.register_image(
             name=image["name"],
@@ -102,9 +103,8 @@
         if retrieved_image.state != "available":
             self.assertImageStateWait(retrieved_image, "available")
         self.images_client.deregister_image(image["image_id"])
-        #TODO(afazekas): verify deregister in  a better way
-        retrieved_image = self.images_client.get_image(image["image_id"])
-        self.assertIn(retrieved_image.state, self.valid_image_state)
+        self.assertNotIn(image["image_id"], str(
+            self.images_client.get_all_images()))
         self.cancelResourceCleanUp(image["cleanUp"])
 
     @testtools.skip("Skipped until the Bug #1074908 and #1074904 is resolved")