Merge "Add compute API admin test for swap volume"
diff --git a/tempest/api/compute/admin/test_volume_swap.py b/tempest/api/compute/admin/test_volume_swap.py
new file mode 100644
index 0000000..f603abd
--- /dev/null
+++ b/tempest/api/compute/admin/test_volume_swap.py
@@ -0,0 +1,75 @@
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+from tempest.api.compute import base
+from tempest.common import waiters
+from tempest import config
+from tempest import test
+
+CONF = config.CONF
+
+
+class TestVolumeSwap(base.BaseV2ComputeAdminTest):
+    """The test suite for swapping of volume with admin user.
+
+    The following is the scenario outline:
+    1. Create a volume "volume1" with non-admin.
+    2. Create a volume "volume2" with non-admin.
+    3. Boot an instance "instance1" with non-admin.
+    4. Attach "volume1" to "instance1" with non-admin.
+    5. Swap volume from "volume1" to "volume2" as admin.
+    6. Check the swap volume is successful and "volume2"
+       is attached to "instance1" and "volume1" is in available state.
+    """
+
+    @classmethod
+    def skip_checks(cls):
+        super(TestVolumeSwap, cls).skip_checks()
+        if not CONF.compute_feature_enabled.swap_volume:
+            raise cls.skipException("Swapping volumes is not supported.")
+
+    @classmethod
+    def setup_clients(cls):
+        super(TestVolumeSwap, cls).setup_clients()
+        # We need the admin client for performing the update (swap) volume call
+        cls.servers_admin_client = cls.os_adm.servers_client
+
+    @test.idempotent_id('1769f00d-a693-4d67-a631-6a3496773813')
+    @test.services('volume')
+    def test_volume_swap(self):
+        # Create two volumes.
+        # NOTE(gmann): Volumes are created before server creation so that
+        # volumes cleanup can happen successfully irrespective of which volume
+        # is attached to server.
+        volume1 = self.create_volume()
+        volume2 = self.create_volume()
+        # Boot server
+        server = self.create_test_server(wait_until='ACTIVE')
+        # Attach "volume1" to server
+        self.attach_volume(server, volume1)
+        # Swap volume from "volume1" to "volume2"
+        self.servers_admin_client.update_attached_volume(
+            server['id'], volume1['id'], volumeId=volume2['id'])
+        waiters.wait_for_volume_status(self.volumes_client,
+                                       volume1['id'], 'available')
+        waiters.wait_for_volume_status(self.volumes_client,
+                                       volume2['id'], 'in-use')
+        self.addCleanup(self.servers_client.detach_volume,
+                        server['id'], volume2['id'])
+        # Verify "volume2" is attached to the server
+        vol_attachments = self.servers_client.list_volume_attachments(
+            server['id'])['volumeAttachments']
+        self.assertEqual(1, len(vol_attachments))
+        self.assertIn(volume2['id'], vol_attachments[0]['volumeId'])
+
+        # TODO(mriedem): Test swapping back from volume2 to volume1 after
+        # nova bug 1490236 is fixed.
diff --git a/tempest/api/compute/base.py b/tempest/api/compute/base.py
index 27afff3..a4578ae 100644
--- a/tempest/api/compute/base.py
+++ b/tempest/api/compute/base.py
@@ -120,6 +120,7 @@
         cls.images = []
         cls.security_groups = []
         cls.server_groups = []
+        cls.volumes = []
 
     @classmethod
     def resource_cleanup(cls):
@@ -127,6 +128,7 @@
         cls.clear_servers()
         cls.clear_security_groups()
         cls.clear_server_groups()
+        cls.clear_volumes()
         super(BaseV2ComputeTest, cls).resource_cleanup()
 
     @classmethod
@@ -370,6 +372,55 @@
         self.useFixture(api_microversion_fixture.APIMicroversionFixture(
             self.request_microversion))
 
+    @classmethod
+    def create_volume(cls):
+        """Create a volume and wait for it to become 'available'.
+
+        :returns: The available volume.
+        """
+        vol_name = data_utils.rand_name(cls.__name__ + '-volume')
+        volume = cls.volumes_client.create_volume(
+            size=CONF.volume.volume_size, display_name=vol_name)['volume']
+        cls.volumes.append(volume)
+        waiters.wait_for_volume_status(cls.volumes_client,
+                                       volume['id'], 'available')
+        return volume
+
+    @classmethod
+    def clear_volumes(cls):
+        LOG.debug('Clearing volumes: %s', ','.join(
+            volume['id'] for volume in cls.volumes))
+        for volume in cls.volumes:
+            try:
+                test_utils.call_and_ignore_notfound_exc(
+                    cls.volumes_client.delete_volume, volume['id'])
+            except Exception:
+                LOG.exception('Deleting volume %s failed', volume['id'])
+
+        for volume in cls.volumes:
+            try:
+                cls.volumes_client.wait_for_resource_deletion(volume['id'])
+            except Exception:
+                LOG.exception('Waiting for deletion of volume %s failed',
+                              volume['id'])
+
+    def attach_volume(self, server, volume):
+        """Attaches volume to server and waits for 'in-use' volume status."""
+        self.servers_client.attach_volume(
+            server['id'], volumeId=volume['id'])
+        # On teardown detach the volume and wait for it to be available. This
+        # is so we don't error out when trying to delete the volume during
+        # teardown.
+        self.addCleanup(waiters.wait_for_volume_status,
+                        self.volumes_client, volume['id'], 'available')
+        # Ignore 404s on detach in case the server is deleted or the volume
+        # is already detached.
+        self.addCleanup(test_utils.call_and_ignore_notfound_exc,
+                        self.servers_client.detach_volume,
+                        server['id'], volume['id'])
+        waiters.wait_for_volume_status(self.volumes_client,
+                                       volume['id'], 'in-use')
+
 
 class BaseV2ComputeAdminTest(BaseV2ComputeTest):
     """Base test case class for Compute Admin API tests."""
diff --git a/tempest/config.py b/tempest/config.py
index b6fca7e..eeafea6 100644
--- a/tempest/config.py
+++ b/tempest/config.py
@@ -414,7 +414,10 @@
                      "list indicates all filters are disabled. The full "
                      "available list of filters is in nova.conf: "
                      "DEFAULT.scheduler_available_filters"),
-
+    cfg.BoolOpt('swap_volume',
+                default=False,
+                help='Does the test environment support in-place swapping of '
+                     'volumes attached to a server instance?'),
 ]