Merge "Add service tagging for 'test_iscsi_volume'"
diff --git a/.zuul.yaml b/.zuul.yaml
index 04d60fe..8dcb935 100644
--- a/.zuul.yaml
+++ b/.zuul.yaml
@@ -274,6 +274,16 @@
               - ^setup.cfg$
               - ^tempest/hacking/.*$
               - ^tempest/tests/.*$
+        - nova-live-migration:
+            irrelevant-files:
+              - ^(test-|)requirements.txt$
+              - ^.*\.rst$
+              - ^doc/.*$
+              - ^etc/.*$
+              - ^releasenotes/.*$
+              - ^setup.cfg$
+              - ^tempest/hacking/.*$
+              - ^tempest/tests/.*$
     periodic-stable:
       jobs:
         - tempest-full-queens
diff --git a/tempest/api/volume/admin/test_volume_retype_with_migration.py b/tempest/api/volume/admin/test_volume_retype_with_migration.py
index f0b3a4f..025c1be 100644
--- a/tempest/api/volume/admin/test_volume_retype_with_migration.py
+++ b/tempest/api/volume/admin/test_volume_retype_with_migration.py
@@ -46,13 +46,10 @@
         extra_specs_src = {"volume_backend_name": backend_src}
         extra_specs_dst = {"volume_backend_name": backend_dst}
 
-        src_vol_type = cls.create_volume_type(extra_specs=extra_specs_src)
+        cls.src_vol_type = cls.create_volume_type(extra_specs=extra_specs_src)
         cls.dst_vol_type = cls.create_volume_type(extra_specs=extra_specs_dst)
 
-        cls.src_vol = cls.create_volume(volume_type=src_vol_type['name'])
-
-    @classmethod
-    def resource_cleanup(cls):
+    def _wait_for_internal_volume_cleanup(self, vol):
         # When retyping a volume, Cinder creates an internal volume in the
         # target backend. The volume in the source backend is deleted after
         # the migration, so we need to wait for Cinder delete this volume
@@ -60,40 +57,37 @@
 
         # This list should return 2 volumes until the copy and cleanup
         # process is finished.
-        fetched_list = cls.admin_volume_client.list_volumes(
+        fetched_list = self.admin_volume_client.list_volumes(
             params={'all_tenants': True,
-                    'display_name': cls.src_vol['name']})['volumes']
+                    'display_name': vol['name']})['volumes']
 
         for fetched_vol in fetched_list:
-            if fetched_vol['id'] != cls.src_vol['id']:
+            if fetched_vol['id'] != vol['id']:
                 # This is the Cinder internal volume
                 LOG.debug('Waiting for internal volume %s deletion',
                           fetched_vol['id'])
-                cls.admin_volume_client.wait_for_resource_deletion(
+                self.admin_volume_client.wait_for_resource_deletion(
                     fetched_vol['id'])
                 break
 
-        super(VolumeRetypeWithMigrationTest, cls).resource_cleanup()
-
-    @decorators.idempotent_id('a1a41f3f-9dad-493e-9f09-3ff197d477cd')
-    def test_available_volume_retype_with_migration(self):
-
+    def _retype_volume(self, volume):
         keys_with_no_change = ('id', 'size', 'description', 'name', 'user_id',
                                'os-vol-tenant-attr:tenant_id')
         keys_with_change = ('volume_type', 'os-vol-host-attr:host')
 
         volume_source = self.admin_volume_client.show_volume(
-            self.src_vol['id'])['volume']
+            volume['id'])['volume']
 
         self.volumes_client.retype_volume(
-            self.src_vol['id'],
+            volume['id'],
             new_type=self.dst_vol_type['name'],
             migration_policy='on-demand')
-
-        waiters.wait_for_volume_retype(self.volumes_client, self.src_vol['id'],
+        self.addCleanup(self._wait_for_internal_volume_cleanup, volume)
+        waiters.wait_for_volume_retype(self.volumes_client, volume['id'],
                                        self.dst_vol_type['name'])
+
         volume_dest = self.admin_volume_client.show_volume(
-            self.src_vol['id'])['volume']
+            volume['id'])['volume']
 
         # Check the volume information after the migration.
         self.assertEqual('success',
@@ -105,3 +99,27 @@
 
         for key in keys_with_change:
             self.assertNotEqual(volume_source[key], volume_dest[key])
+
+    @decorators.idempotent_id('a1a41f3f-9dad-493e-9f09-3ff197d477cd')
+    def test_available_volume_retype_with_migration(self):
+        src_vol = self.create_volume(volume_type=self.src_vol_type['name'])
+        self._retype_volume(src_vol)
+
+    @decorators.idempotent_id('d0d9554f-e7a5-4104-8973-f35b27ccb60d')
+    def test_volume_from_snapshot_retype_with_migration(self):
+        # Create a volume in the first backend
+        src_vol = self.create_volume(volume_type=self.src_vol_type['name'])
+
+        # Create a volume snapshot
+        snapshot = self.create_snapshot(src_vol['id'])
+
+        # Create a volume from the snapshot
+        src_vol = self.create_volume(volume_type=self.src_vol_type['name'],
+                                     snapshot_id=snapshot['id'])
+
+        # Delete the snapshot
+        self.snapshots_client.delete_snapshot(snapshot['id'])
+        self.snapshots_client.wait_for_resource_deletion(snapshot['id'])
+
+        # Migrate the volume from snapshot to the second backend
+        self._retype_volume(src_vol)
diff --git a/tempest/cmd/init.py b/tempest/cmd/init.py
index 9a85d89..84c8631 100644
--- a/tempest/cmd/init.py
+++ b/tempest/cmd/init.py
@@ -136,7 +136,7 @@
         if not os.path.isdir(local_dir):
             LOG.debug('Creating local working dir: %s', local_dir)
             os.mkdir(local_dir)
-        elif not os.listdir(local_dir) == []:
+        elif os.listdir(local_dir):
             raise OSError("Directory you are trying to initialize already "
                           "exists and is not empty: %s" % local_dir)
 
diff --git a/tempest/lib/api_schema/response/compute/v2_19/servers.py b/tempest/lib/api_schema/response/compute/v2_19/servers.py
index 05cc32c..fd9e933 100644
--- a/tempest/lib/api_schema/response/compute/v2_19/servers.py
+++ b/tempest/lib/api_schema/response/compute/v2_19/servers.py
@@ -14,9 +14,9 @@
 
 import copy
 
-from tempest.lib.api_schema.response.compute.v2_1 import servers as serversv21
 from tempest.lib.api_schema.response.compute.v2_16 import servers \
     as serversv216
+from tempest.lib.api_schema.response.compute.v2_9 import servers as serversv29
 
 list_servers = copy.deepcopy(serversv216.list_servers)
 
@@ -32,20 +32,20 @@
 list_servers_detail['response_body']['properties']['servers']['items'][
     'required'].append('description')
 
-update_server = copy.deepcopy(serversv21.update_server)
+update_server = copy.deepcopy(serversv29.update_server)
 update_server['response_body']['properties']['server'][
     'properties'].update({'description': {'type': ['string', 'null']}})
 update_server['response_body']['properties']['server'][
     'required'].append('description')
 
-rebuild_server = copy.deepcopy(serversv21.rebuild_server)
+rebuild_server = copy.deepcopy(serversv29.rebuild_server)
 rebuild_server['response_body']['properties']['server'][
     'properties'].update({'description': {'type': ['string', 'null']}})
 rebuild_server['response_body']['properties']['server'][
     'required'].append('description')
 
 rebuild_server_with_admin_pass = copy.deepcopy(
-    serversv21.rebuild_server_with_admin_pass)
+    serversv29.rebuild_server_with_admin_pass)
 rebuild_server_with_admin_pass['response_body']['properties']['server'][
     'properties'].update({'description': {'type': ['string', 'null']}})
 rebuild_server_with_admin_pass['response_body']['properties']['server'][
diff --git a/tempest/lib/api_schema/response/compute/v2_9/servers.py b/tempest/lib/api_schema/response/compute/v2_9/servers.py
index e260e48..7df02d5 100644
--- a/tempest/lib/api_schema/response/compute/v2_9/servers.py
+++ b/tempest/lib/api_schema/response/compute/v2_9/servers.py
@@ -14,6 +14,7 @@
 
 import copy
 
+from tempest.lib.api_schema.response.compute.v2_1 import servers as servers_21
 from tempest.lib.api_schema.response.compute.v2_6 import servers
 
 list_servers = copy.deepcopy(servers.list_servers)
@@ -29,3 +30,22 @@
     'properties'].update({'locked': {'type': 'boolean'}})
 list_servers_detail['response_body']['properties']['servers']['items'][
     'required'].append('locked')
+
+update_server = copy.deepcopy(servers_21.update_server)
+update_server['response_body']['properties']['server'][
+    'properties'].update({'locked': {'type': 'boolean'}})
+update_server['response_body']['properties']['server'][
+    'required'].append('locked')
+
+rebuild_server = copy.deepcopy(servers_21.rebuild_server)
+rebuild_server['response_body']['properties']['server'][
+    'properties'].update({'locked': {'type': 'boolean'}})
+rebuild_server['response_body']['properties']['server'][
+    'required'].append('locked')
+
+rebuild_server_with_admin_pass = copy.deepcopy(
+    servers_21.rebuild_server_with_admin_pass)
+rebuild_server_with_admin_pass['response_body']['properties']['server'][
+    'properties'].update({'locked': {'type': 'boolean'}})
+rebuild_server_with_admin_pass['response_body']['properties']['server'][
+    'required'].append('locked')