Adding new test for iSCSI live block migration

Requrires a new config value as the test depends on a
specific patch being made to XAPI to support the
relax-xsm-sr-check behaviour

Change-Id: I81cc39fed392fce9dfbda5683ffba29eeba936be
diff --git a/etc/tempest.conf.sample b/etc/tempest.conf.sample
index b64b047..91d83a3 100644
--- a/etc/tempest.conf.sample
+++ b/etc/tempest.conf.sample
@@ -114,6 +114,10 @@
 # performed, which requires XenServer pools in case of using XS)
 use_block_migration_for_live_migration = false
 
+# Supports iSCSI block migration - depends on a XAPI supporting
+# relax-xsm-sr-check
+block_migrate_supports_cinder_iscsi = false
+
 # By default, rely on the status of the diskConfig extension to
 # decide if to execute disk config tests. When set to false, tests
 # are forced to skip, regardless of the extension status
diff --git a/tempest/config.py b/tempest/config.py
index a90767e..de3e11d 100644
--- a/tempest/config.py
+++ b/tempest/config.py
@@ -129,6 +129,10 @@
                 default=False,
                 help="Does the test environment use block devices for live "
                      "migration"),
+    cfg.BoolOpt('block_migrate_supports_cinder_iscsi',
+                default=False,
+                help="Does the test environment block migration support "
+                     "cinder iSCSI volumes"),
     cfg.BoolOpt('change_password_available',
                 default=False,
                 help="Does the test environment support changing the admin "
diff --git a/tempest/tests/compute/test_live_block_migration.py b/tempest/tests/compute/test_live_block_migration.py
index 30ff882..e22d45a 100644
--- a/tempest/tests/compute/test_live_block_migration.py
+++ b/tempest/tests/compute/test_live_block_migration.py
@@ -91,6 +91,13 @@
             self.created_server_ids.append(server_id)
             return server_id
 
+    def _volume_clean_up(self, server_id, volume_id):
+        resp, body = self.volumes_client.get_volume(volume_id)
+        if body['status'] == 'in-use':
+            self.servers_client.detach_volume(server_id, volume_id)
+            self.volumes_client.wait_for_volume_status(volume_id, 'available')
+        self.volumes_client.delete_volume(volume_id)
+
     @attr(type='positive')
     @testtools.skipIf(not CONF.compute.live_migration_available,
                       'Live migration not available')
@@ -117,6 +124,37 @@
                           server_id, target_host)
         self.assertEquals('ACTIVE', self._get_server_status(server_id))
 
+    @attr(type='positive')
+    @testtools.skipIf(not CONF.compute.live_migration_available or
+                      not CONF.compute.use_block_migration_for_live_migration,
+                      'Block Live migration not available')
+    @testtools.skipIf(not CONF.compute.block_migrate_supports_cinder_iscsi,
+                      'Block Live migration not configured for iSCSI')
+    def test_iscsi_volume(self):
+        # Live block migrate an instance to another host
+        if len(self._get_compute_hostnames()) < 2:
+            raise self.skipTest(
+                "Less than 2 compute nodes, skipping migration test.")
+        server_id = self._get_an_active_server()
+        actual_host = self._get_host_for_server(server_id)
+        target_host = self._get_host_other_than(actual_host)
+
+        resp, volume = self.volumes_client.create_volume(1,
+                                                         display_name='test')
+
+        self.volumes_client.wait_for_volume_status(volume['id'],
+                                                   'available')
+        self.addCleanup(self._volume_clean_up, server_id, volume['id'])
+
+        # Attach the volume to the server
+        self.servers_client.attach_volume(server_id, volume['id'],
+                                          device='/dev/xvdb')
+        self.volumes_client.wait_for_volume_status(volume['id'], 'in-use')
+
+        self._migrate_server_to(server_id, target_host)
+        self.servers_client.wait_for_server_status(server_id, 'ACTIVE')
+        self.assertEquals(target_host, self._get_host_for_server(server_id))
+
     @classmethod
     def tearDownClass(cls):
         for server_id in cls.created_server_ids: