Merge "Update service test case - V3"
diff --git a/.gitignore b/.gitignore
index f5f51ab..0f4880f 100644
--- a/.gitignore
+++ b/.gitignore
@@ -14,3 +14,4 @@
 .venv
 dist
 build
+.testrepository
diff --git a/etc/tempest.conf.sample b/etc/tempest.conf.sample
index ac18490..b64b047 100644
--- a/etc/tempest.conf.sample
+++ b/etc/tempest.conf.sample
@@ -164,6 +164,9 @@
 # The version of the OpenStack Images API to use
 api_version = 1
 
+# HTTP image to use for glance http image testing
+http_image = http://download.cirros-cloud.net/0.3.1/cirros-0.3.1-x86_64-uec.tar.gz
+
 [network]
 # This section contains configuration options used when executing tests
 # against the OpenStack Network API.
@@ -210,6 +213,12 @@
 # Number of seconds to time out on waiting for a volume
 # to be available or reach an expected status
 build_timeout = 300
+# Runs Cinder multi-backend tests (requires 2 backend declared in cinder.conf)
+# They must have different volume_backend_name (backend1_name and backend2_name
+# have to be different)
+multi_backend_enabled = false
+backend1_name = LVM_iSCSI
+backend2_name = LVM_iSCSI_1
 
 [object-storage]
 # This section contains configuration options used when executing tests
diff --git a/tempest/config.py b/tempest/config.py
index 556e2a7..a90767e 100644
--- a/tempest/config.py
+++ b/tempest/config.py
@@ -243,7 +243,11 @@
                help="Version of the API"),
     cfg.StrOpt('catalog_type',
                default='image',
-               help='Catalog type of the Image service.')
+               help='Catalog type of the Image service.'),
+    cfg.StrOpt('http_image',
+               default='http://download.cirros-cloud.net/0.3.1/'
+               'cirros-0.3.1-x86_64-uec.tar.gz',
+               help='http accessable image')
 ]
 
 
@@ -303,6 +307,15 @@
     cfg.StrOpt('catalog_type',
                default='Volume',
                help="Catalog type of the Volume Service"),
+    cfg.BoolOpt('multi_backend_enabled',
+                default=False,
+                help="Runs Cinder multi-backend test (requires 2 backend)"),
+    cfg.StrOpt('backend1_name',
+               default='LVM_iSCSI',
+               help="Name of the backend1 (must be declared in cinder.conf)"),
+    cfg.StrOpt('backend2_name',
+               default='LVM_iSCSI_1',
+               help="Name of the backend2 (must be declared in cinder.conf)"),
 ]
 
 
diff --git a/tempest/tests/image/v1/test_images.py b/tempest/tests/image/v1/test_images.py
index c01aeaf..19c0aa0 100644
--- a/tempest/tests/image/v1/test_images.py
+++ b/tempest/tests/image/v1/test_images.py
@@ -81,32 +81,10 @@
         self.assertEqual(properties['key2'], 'value2')
 
     def test_register_http_image(self):
-        container_client = self.os.container_client
-        object_client = self.os.object_client
-        container_name = "image_container"
-        object_name = "test_image.img"
-        container_client.create_container(container_name)
-        self.addCleanup(container_client.delete_container, container_name)
-        cont_headers = {'X-Container-Read': '.r:*'}
-        resp, _ = container_client.update_container_metadata(
-            container_name,
-            metadata=cont_headers,
-            metadata_prefix='')
-        self.assertEqual(resp['status'], '204')
-
-        data = "TESTIMAGE"
-        resp, _ = object_client.create_object(container_name,
-                                              object_name, data)
-        self.addCleanup(object_client.delete_object, container_name,
-                        object_name)
-        self.assertEqual(resp['status'], '201')
-        object_url = '/'.join((object_client.base_url,
-                               container_name,
-                               object_name))
         resp, body = self.create_image(name='New Http Image',
                                        container_format='bare',
                                        disk_format='raw', is_public=True,
-                                       copy_from=object_url)
+                                       copy_from=self.config.images.http_image)
         self.assertTrue('id' in body)
         image_id = body.get('id')
         self.created_images.append(image_id)
@@ -115,7 +93,6 @@
         self.client.wait_for_image_status(image_id, 'active')
         resp, body = self.client.get_image(image_id)
         self.assertEqual(resp['status'], '200')
-        self.assertEqual(body, data)
 
     @attr(type='image')
     def test_register_image_with_min_ram(self):
diff --git a/tempest/tests/volume/admin/test_multi_backend.py b/tempest/tests/volume/admin/test_multi_backend.py
new file mode 100644
index 0000000..04007c9
--- /dev/null
+++ b/tempest/tests/volume/admin/test_multi_backend.py
@@ -0,0 +1,156 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2013 OpenStack Foundation
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import logging
+import testtools
+
+from tempest.common.utils.data_utils import rand_name
+from tempest import config
+from tempest.services.volume.json.admin import volume_types_client
+from tempest.services.volume.json import volumes_client
+from tempest.tests.volume import base
+
+LOG = logging.getLogger(__name__)
+
+
+class VolumeMultiBackendTest(base.BaseVolumeAdminTest):
+    _interface = "json"
+
+    multi_backend_enabled = config.TempestConfig().volume.multi_backend_enabled
+    backend1_name = config.TempestConfig().volume.backend1_name
+    backend2_name = config.TempestConfig().volume.backend2_name
+    backend_names_equal = False
+    if (backend1_name == backend2_name):
+        backend_names_equal = True
+
+    @classmethod
+    @testtools.skipIf(not multi_backend_enabled,
+                      "Cinder multi-backend feature is not available")
+    def setUpClass(cls):
+        super(VolumeMultiBackendTest, cls).setUpClass()
+
+        adm_user = cls.config.identity.admin_username
+        adm_pass = cls.config.identity.admin_password
+        adm_tenant = cls.config.identity.admin_tenant_name
+        auth_url = cls.config.identity.uri
+
+        cls.client = volumes_client.VolumesClientJSON(cls.config,
+                                                      adm_user,
+                                                      adm_pass,
+                                                      auth_url,
+                                                      adm_tenant)
+        cls.client2 = volume_types_client.VolumeTypesClientJSON(cls.config,
+                                                                adm_user,
+                                                                adm_pass,
+                                                                auth_url,
+                                                                adm_tenant)
+
+        ## variables initialization
+        type_name1 = rand_name('type-')
+        type_name2 = rand_name('type-')
+        cls.volume_type_list = []
+
+        vol_name1 = rand_name('Volume-')
+        vol_name2 = rand_name('Volume-')
+        cls.volume_id_list = []
+
+        try:
+            ## Volume types creation
+            extra_specs1 = {"volume_backend_name": cls.backend1_name}
+            resp, cls.body1 = cls.client2.create_volume_type(
+                type_name1, extra_specs=extra_specs1)
+            cls.volume_type_list.append(cls.body1)
+
+            extra_specs2 = {"volume_backend_name": cls.backend2_name}
+            resp, cls.body2 = cls.client2.create_volume_type(
+                type_name2, extra_specs=extra_specs2)
+            cls.volume_type_list.append(cls.body2)
+
+            ## Volumes creation
+            resp, cls.volume1 = cls.client.create_volume(
+                size=1, display_name=vol_name1, volume_type=type_name1)
+            cls.client.wait_for_volume_status(cls.volume1['id'], 'available')
+            cls.volume_id_list.append(cls.volume1['id'])
+
+            resp, cls.volume2 = cls.client.create_volume(
+                size=1, display_name=vol_name2, volume_type=type_name2)
+            cls.client.wait_for_volume_status(cls.volume2['id'], 'available')
+            cls.volume_id_list.append(cls.volume2['id'])
+        except Exception:
+            LOG.exception("setup failed")
+            cls.tearDownClass()
+            raise
+
+    @classmethod
+    def tearDownClass(cls):
+        super(VolumeMultiBackendTest, cls).tearDownClass()
+
+        ## volumes deletion
+        for volume_id in cls.volume_id_list:
+            cls.client.delete_volume(volume_id)
+            cls.client.wait_for_resource_deletion(volume_id)
+
+        ## volume types deletion
+        for volume_type in cls.volume_type_list:
+            cls.client2.delete_volume_type(volume_type)
+
+    def test_multi_backend_enabled(self):
+        # this test checks that multi backend is enabled for at least the
+        # computes where the volumes created in setUp were made
+        # if multi-backend is enabled: os-vol-attr:host should be like:
+        # host@backend_name
+        # this test fails if:
+        # - multi backend is not enabled
+        resp, fetched_volume = self.client.get_volume(self.volume1['id'])
+        self.assertEqual(200, resp.status)
+
+        volume_host1 = fetched_volume['os-vol-host-attr:host']
+        msg = ("Multi-backend is not available for at least host "
+               "%(volume_host1)s") % locals()
+        self.assertTrue(len(volume_host1.split("@")) > 1, msg)
+
+        resp, fetched_volume = self.client.get_volume(self.volume2['id'])
+        self.assertEqual(200, resp.status)
+
+        volume_host2 = fetched_volume['os-vol-host-attr:host']
+        msg = ("Multi-backend is not available for at least host "
+               "%(volume_host2)s") % locals()
+        self.assertTrue(len(volume_host2.split("@")) > 1, msg)
+
+    def test_backend_name_distinction(self):
+        # this test checks that the two volumes created at setUp doesn't
+        # belong to the same backend (if they are in the same backend, that
+        # means, volume_backend_name distinction is not working properly)
+        # this test fails if:
+        # - tempest.conf is not well configured
+        # - the two volumes belongs to the same backend
+
+        # checks tempest.conf
+        msg = ("tempest.conf is not well configured, "
+               "backend1_name and backend2_name are equal")
+        self.assertEqual(self.backend_names_equal, False, msg)
+
+        # checks the two volumes belongs to different backend
+        resp, fetched_volume = self.client.get_volume(self.volume1['id'])
+        volume_host1 = fetched_volume['os-vol-host-attr:host']
+
+        resp, fetched_volume = self.client.get_volume(self.volume2['id'])
+        volume_host2 = fetched_volume['os-vol-host-attr:host']
+
+        msg = ("volume2 was created in the same backend as volume1: "
+               "%(volume_host2)s.") % locals()
+        self.assertNotEqual(volume_host2, volume_host1, msg)
diff --git a/tempest/tests/volume/test_volumes_snapshots.py b/tempest/tests/volume/test_volumes_snapshots.py
index e7fa97d..ba8ba6c 100644
--- a/tempest/tests/volume/test_volumes_snapshots.py
+++ b/tempest/tests/volume/test_volumes_snapshots.py
@@ -12,27 +12,59 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
+import logging
+
+from tempest.test import attr
 from tempest.tests.volume import base
 
+LOG = logging.getLogger(__name__)
+
 
 class VolumesSnapshotTest(base.BaseVolumeTest):
     _interface = "json"
 
-    def test_volume_from_snapshot(self):
-        volume_origin = self.create_volume(size=1)
-        snapshot = self.create_snapshot(volume_origin['id'])
-        volume_snap = self.create_volume(size=1,
-                                         snapshot_id=
-                                         snapshot['id'])
+    @classmethod
+    def setUpClass(cls):
+        super(VolumesSnapshotTest, cls).setUpClass()
+        try:
+            cls.volume_origin = cls.create_volume()
+        except Exception:
+            LOG.exception("setup failed")
+            cls.tearDownClass()
+            raise
+
+    @classmethod
+    def tearDownClass(cls):
+        super(VolumesSnapshotTest, cls).tearDownClass()
+
+    @attr(type='smoke')
+    def test_snapshot_create_get_delete(self):
+        # Create a snapshot, get some of the details and then deletes it
+        resp, snapshot = self.snapshots_client.create_snapshot(
+            self.volume_origin['id'])
+        self.assertEqual(200, resp.status)
+        self.snapshots_client.wait_for_snapshot_status(snapshot['id'],
+                                                       'available')
+        errmsg = "Referred volume origin ID mismatch"
+        self.assertEqual(self.volume_origin['id'],
+                         snapshot['volume_id'],
+                         errmsg)
         self.snapshots_client.delete_snapshot(snapshot['id'])
-        self.volumes_client.delete_volume(volume_snap['id'])
         self.snapshots_client.wait_for_resource_deletion(snapshot['id'])
-        self.snapshots.remove(snapshot)
-        self.volumes_client.delete_volume(volume_origin['id'])
-        self.volumes_client.wait_for_resource_deletion(volume_snap['id'])
-        self.volumes.remove(volume_snap)
-        self.volumes_client.wait_for_resource_deletion(volume_origin['id'])
-        self.volumes.remove(volume_origin)
+
+    def test_volume_from_snapshot(self):
+        # Create a temporary snap using wrapper method from base, then
+        # create a snap based volume, check resp code and deletes it
+        snapshot = self.create_snapshot(self.volume_origin['id'])
+        # NOTE: size is required also when passing snapshot_id
+        resp, volume = self.volumes_client.create_volume(
+            size=1,
+            snapshot_id=snapshot['id'])
+        self.assertEqual(200, resp.status)
+        self.volumes_client.wait_for_volume_status(volume['id'], 'available')
+        self.volumes_client.delete_volume(volume['id'])
+        self.volumes_client.wait_for_resource_deletion(volume['id'])
+        self.clear_snapshots()
 
 
 class VolumesSnapshotTestXML(VolumesSnapshotTest):