Merge "Force os_primary and os_project_member to be the same" into mcp/epoxy
diff --git a/requirements.txt b/requirements.txt
index a9046fb..845422f 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -22,3 +22,4 @@
 fasteners>=0.16.0 # Apache-2.0
 testscenarios>=0.5.0
 tenacity>=4.4.0 # Apache-2.0
+websocket-client # LGPLv2+
diff --git a/tempest/api/compute/admin/test_create_server.py b/tempest/api/compute/admin/test_create_server.py
index 3cc2928..aa358d7 100644
--- a/tempest/api/compute/admin/test_create_server.py
+++ b/tempest/api/compute/admin/test_create_server.py
@@ -172,3 +172,60 @@
         console_log = self.client.get_console_output(server['id'])['output']
         self.assertTrue(console_log, "Console output was empty.")
         self.assertIn(uefi_boot_loader, console_log)
+
+
+class WindowsServersBaseTest(base.BaseV2ComputeAdminTest):
+    """Test Windows OS guest servers"""
+
+    image_id = None
+    flavor_id = None
+
+    @classmethod
+    def skip_checks(cls):
+        super(WindowsServersBaseTest, cls).skip_checks()
+
+        if not (cls.image_id and cls.flavor_id):
+            skip_msg = ("Environment is not prepared for testing "
+                        "Windows servers")
+            raise cls.skipException(skip_msg)
+
+    @classmethod
+    def setup_credentials(cls):
+        cls.prepare_instance_network()
+        super(WindowsServersBaseTest, cls).setup_credentials()
+
+    @classmethod
+    def setup_clients(cls):
+        super(WindowsServersBaseTest, cls).setup_clients()
+        cls.client = cls.servers_client
+
+    def _test_create_server(self):
+        # Create the server and wait for it to become ready
+        validation_resources = self.get_class_validation_resources(
+            self.os_primary)
+        self.create_test_server(
+            image_id=self.image_id,
+            flavor=self.flavor_id,
+            validatable=True,
+            validation_resources=validation_resources,
+            wait_until='PINGABLE')
+
+
+class WindowsServers10Test(WindowsServersBaseTest):
+
+    image_id = CONF.compute.windows10_image_ref
+    flavor_id = CONF.compute.windows10_flavor_ref
+
+    @decorators.idempotent_id('4d54bcfa-08d3-48eb-b7a1-3568db4fc607')
+    def test_create_server(self):
+        self._test_create_server()
+
+
+class WindowsServers11Test(WindowsServersBaseTest):
+
+    image_id = CONF.compute.windows11_image_ref
+    flavor_id = CONF.compute.windows11_flavor_ref
+
+    @decorators.idempotent_id('1cff7fea-f251-4a05-a667-9b946913a3c5')
+    def test_create_server(self):
+        self._test_create_server()
diff --git a/tempest/api/compute/admin/test_live_migration.py b/tempest/api/compute/admin/test_live_migration.py
index f6a1ae9..20fa7bd 100644
--- a/tempest/api/compute/admin/test_live_migration.py
+++ b/tempest/api/compute/admin/test_live_migration.py
@@ -381,3 +381,6 @@
     min_microversion = '2.25'
     max_microversion = 'latest'
     block_migration = 'auto'
+
+    if CONF.compute_feature_enabled.volume_multiattach:
+        min_microversion = '2.60'
diff --git a/tempest/api/compute/admin/test_migrations.py b/tempest/api/compute/admin/test_migrations.py
index fa8a737..6b27355 100644
--- a/tempest/api/compute/admin/test_migrations.py
+++ b/tempest/api/compute/admin/test_migrations.py
@@ -38,8 +38,9 @@
         self.client.list_migrations()
 
     @decorators.idempotent_id('1b512062-8093-438e-b47a-37d2f597cd64')
-    @testtools.skipUnless(CONF.compute_feature_enabled.resize,
-                          'Resize not available.')
+    @testtools.skipIf(not (CONF.compute_feature_enabled.cold_migration and
+                           CONF.compute_feature_enabled.resize),
+                      'Cold migration/Resize not available.')
     def test_list_migrations_in_flavor_resize_situation(self):
         """Admin can get the migrations list containing the resized server"""
         server = self.create_test_server(wait_until="ACTIVE")
@@ -60,8 +61,9 @@
             pass
 
     @decorators.idempotent_id('33f1fec3-ba18-4470-8e4e-1d888e7c3593')
-    @testtools.skipUnless(CONF.compute_feature_enabled.resize,
-                          'Resize not available.')
+    @testtools.skipIf(not (CONF.compute_feature_enabled.cold_migration and
+                           CONF.compute_feature_enabled.resize),
+                      'Cold migration/Resize not available.')
     def test_resize_server_revert_deleted_flavor(self):
         """Test reverting resized server with original flavor deleted
 
diff --git a/tempest/api/compute/admin/test_servers_negative.py b/tempest/api/compute/admin/test_servers_negative.py
index c933c80..0d0e164 100644
--- a/tempest/api/compute/admin/test_servers_negative.py
+++ b/tempest/api/compute/admin/test_servers_negative.py
@@ -43,8 +43,9 @@
         cls.s1_id = server['id']
 
     @decorators.idempotent_id('28dcec23-f807-49da-822c-56a92ea3c687')
-    @testtools.skipUnless(CONF.compute_feature_enabled.resize,
-                          'Resize not available.')
+    @testtools.skipIf(not (CONF.compute_feature_enabled.cold_migration and
+                           CONF.compute_feature_enabled.resize),
+                      'Cold migration/Resize not available.')
     @decorators.attr(type=['negative'])
     def test_resize_server_using_overlimit_ram(self):
         """Test resizing server using over limit ram should fail"""
@@ -78,8 +79,9 @@
                           self.s1_id)
 
     @decorators.idempotent_id('7368a427-2f26-4ad9-9ba9-911a0ec2b0db')
-    @testtools.skipUnless(CONF.compute_feature_enabled.resize,
-                          'Resize not available.')
+    @testtools.skipIf(not (CONF.compute_feature_enabled.cold_migration and
+                           CONF.compute_feature_enabled.resize),
+                      'Cold migration/Resize not available.')
     @decorators.attr(type=['negative'])
     def test_resize_server_using_overlimit_vcpus(self):
         """Test resizing server using over limit vcpus should fail"""
diff --git a/tempest/api/compute/admin/test_servers_on_multinodes.py b/tempest/api/compute/admin/test_servers_on_multinodes.py
index e0290e4..db19522 100644
--- a/tempest/api/compute/admin/test_servers_on_multinodes.py
+++ b/tempest/api/compute/admin/test_servers_on_multinodes.py
@@ -146,6 +146,8 @@
     def skip_checks(cls):
         super(UnshelveToHostMultiNodesTest, cls).skip_checks()
 
+        if not CONF.compute_feature_enabled.shelve:
+            raise cls.skipException("Shelve is not available.")
         if CONF.compute.min_compute_nodes < 2:
             raise cls.skipException(
                 "Less than 2 compute nodes, skipping multi-nodes test.")
diff --git a/tempest/api/compute/admin/test_volume.py b/tempest/api/compute/admin/test_volume.py
index b0f20f6..9092f9c 100644
--- a/tempest/api/compute/admin/test_volume.py
+++ b/tempest/api/compute/admin/test_volume.py
@@ -43,6 +43,9 @@
 class AttachSCSIVolumeTestJSON(BaseAttachSCSIVolumeTest):
     """Test attaching scsi volume to server"""
 
+    if CONF.compute_feature_enabled.volume_multiattach:
+        min_microversion = '2.60'
+
     @testtools.skipIf(
         CONF.compute_feature_enabled.barbican_integration_enabled,
         "Not supported when barbican integration enabled.")
diff --git a/tempest/api/compute/admin/test_volume_swap.py b/tempest/api/compute/admin/test_volume_swap.py
index 0d8370d..46500cc 100644
--- a/tempest/api/compute/admin/test_volume_swap.py
+++ b/tempest/api/compute/admin/test_volume_swap.py
@@ -30,16 +30,34 @@
         super(TestVolumeSwapBase, cls).setup_credentials()
 
     @classmethod
+    def setup_clients(cls):
+        super(TestVolumeSwapBase, cls).setup_clients()
+
+        cls.volume_type_client = cls.os_admin.volume_types_client_latest
+        cls.encryption_client = cls.os_admin.encryption_types_client_latest
+
+    @classmethod
     def skip_checks(cls):
         super(TestVolumeSwapBase, cls).skip_checks()
         if not CONF.service_available.cinder:
             raise cls.skipException("Cinder is not available")
         if not CONF.compute_feature_enabled.swap_volume:
             raise cls.skipException("Swapping volumes is not supported.")
-        if CONF.compute_feature_enabled.attach_encrypted_volume:
-            raise cls.skipException(
-                'Volume swap is not available for OS configurations '
-                'with crypted volumes.')
+
+    @classmethod
+    def _check_default_volume_type(cls):
+        default_volume_type = cls.volume_type_client.\
+            show_default_volume_type()["volume_type"]["id"]
+        volume_encryption = cls.encryption_client.show_encryption_type(
+            default_volume_type)
+        if volume_encryption and volume_encryption.get("provider"):
+            raise cls.skipException("Not allowed to run this test with "
+                                    "encrypted volume")
+
+    @classmethod
+    def resource_setup(cls):
+        cls._check_default_volume_type()
+        super(TestVolumeSwapBase, cls).resource_setup()
 
     def wait_for_server_volume_swap(self, server_id, old_volume_id,
                                     new_volume_id):
@@ -80,6 +98,9 @@
 class TestVolumeSwap(TestVolumeSwapBase):
     """The test suite for swapping of volume with admin user"""
 
+    if CONF.compute_feature_enabled.volume_multiattach:
+        min_microversion = '2.60'
+
     # NOTE(mriedem): This is an uncommon scenario to call the compute API
     # to swap volumes directly; swap volume is primarily only for volume
     # live migration and retype callbacks from the volume service, and is slow
diff --git a/tempest/api/compute/admin/test_volumes_negative.py b/tempest/api/compute/admin/test_volumes_negative.py
index 8b07d9a..32892a6 100644
--- a/tempest/api/compute/admin/test_volumes_negative.py
+++ b/tempest/api/compute/admin/test_volumes_negative.py
@@ -27,6 +27,10 @@
 
     create_default_network = True
 
+    if CONF.compute_feature_enabled.volume_multiattach:
+        min_microversion = '2.60'
+        max_microversion = 'latest'
+
     @classmethod
     def setup_credentials(cls):
         cls.prepare_instance_network()
diff --git a/tempest/api/compute/images/test_images.py b/tempest/api/compute/images/test_images.py
index a90d500..8ce4434 100644
--- a/tempest/api/compute/images/test_images.py
+++ b/tempest/api/compute/images/test_images.py
@@ -157,6 +157,9 @@
         self.addCleanup(self.client.delete_image, image['id'])
         self.assertEqual(snapshot_name, image['name'])
 
+    @testtools.skipIf(
+        CONF.compute_feature_enabled.barbican_integration_enabled,
+        "Not supported when barbican integration enabled.")
     @decorators.idempotent_id('f3cac456-e3fe-4183-a7a7-a59f7f017088')
     def test_create_server_from_snapshot(self):
         # Create one server normally
diff --git a/tempest/api/compute/servers/test_console.py b/tempest/api/compute/servers/test_console.py
new file mode 100644
index 0000000..0cbeb41
--- /dev/null
+++ b/tempest/api/compute/servers/test_console.py
@@ -0,0 +1,327 @@
+# Copyright 2016-2017 OpenStack Foundation
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import ssl
+import struct
+import urllib.parse as urlparse
+import urllib3
+import websocket
+
+from tempest.api.compute import base
+from tempest.common import compute
+from tempest import config
+from tempest.lib import decorators
+
+CONF = config.CONF
+
+
+class ConsoleTestBase(base.BaseV2ComputeTest):
+    create_default_network = True
+
+    def setUp(self):
+        super(ConsoleTestBase, self).setUp()
+        self._websocket = None
+
+    def tearDown(self):
+        super(ConsoleTestBase, self).tearDown()
+        if self._websocket is not None:
+            self._websocket.close()
+        # NOTE(zhufl): Because server_check_teardown will raise Exception
+        # which will prevent other cleanup steps from being executed, so
+        # server_check_teardown should be called after super's tearDown.
+        self.server_check_teardown()
+
+    @classmethod
+    def setup_clients(cls):
+        super(ConsoleTestBase, cls).setup_clients()
+        cls.client = cls.servers_client
+
+    @classmethod
+    def resource_setup(cls):
+        super(ConsoleTestBase, cls).resource_setup()
+        cls.server = cls.create_test_server(wait_until="ACTIVE")
+        cls.use_get_remote_console = False
+        if not cls.is_requested_microversion_compatible("2.5"):
+            cls.use_get_remote_console = True
+
+    @property
+    def cert_params(self):
+        ssl_opt = {}
+        if CONF.identity.disable_ssl_certificate_validation:
+            ssl_opt["cert_reqs"] = ssl.CERT_NONE
+        else:
+            ssl_opt["ca_certs"] = CONF.identity.ca_certificates_file
+        return ssl_opt
+
+    def _validate_html(self, url, js_title):
+        """Verify we can connect to console and get back the javascript."""
+
+        resp = urllib3.PoolManager(**self.cert_params).request("GET", url)
+        # Make sure that the GET request was accepted by the console proxy
+        self.assertEqual(
+            resp.status,
+            200,
+            "Got a Bad HTTP Response on the "
+            "initial call: " + str(resp.status),
+        )
+        # Do some basic validation to make sure it is an expected HTML document
+        resp_data = resp.data.decode()
+        # This is needed in the case of example: <html lang="en">
+        self.assertRegex(
+            resp_data, "<html.*>", "Not a valid html document in the response."
+        )
+        self.assertIn(
+            "</html>", resp_data, "Not a valid html document in the response."
+        )
+        # Just try to make sure we got JavaScript back for console, since we
+        # won't actually use it since not inside of a browser
+        self.assertIn(
+            js_title,
+            resp_data,
+            "Not a valid console javascript html document.",
+        )
+        self.assertIn(
+            "<script",
+            resp_data,
+            "Not a valid console javascript html document.",
+        )
+
+    def _validate_websocket_upgrade(self):
+        """Verify that the websocket upgrade was successful.
+
+        Parses response and ensures that required response
+        fields are present and accurate.
+        (https://tools.ietf.org/html/rfc7231#section-6.2.2)
+        """
+
+        self.assertTrue(
+            self._websocket.response.startswith(
+                b"HTTP/1.1 101 Switching Protocols"
+            ),
+            "Incorrect HTTP return status code: {}".format(
+                str(self._websocket.response)
+            ),
+        )
+        _required_header = "upgrade: websocket"
+        _response = str(self._websocket.response).lower()
+        self.assertIn(
+            _required_header,
+            _response,
+            "Did not get the expected WebSocket HTTP Response.",
+        )
+
+    def _get_console_body(self, type, protocol, get_console):
+        if self.use_get_remote_console:
+            return self.client.get_remote_console(
+                self.server["id"], type=type, protocol=protocol
+            )["remote_console"]
+        return getattr(self.client, get_console)(self.server["id"], type=type)[
+            "console"
+        ]
+
+    def _test_console_bad_token(self, type, protocol, get_console):
+        body = self._get_console_body(type, protocol, get_console)
+        self.assertEqual(type, body["type"])
+        # Do the WebSockify HTTP Request to console proxy with a bad token
+        parts = urlparse.urlparse(body["url"])
+        qparams = urlparse.parse_qs(parts.query)
+        if "path" in qparams:
+            qparams["path"] = urlparse.unquote(qparams["path"][0]).replace(
+                "token=", "token=bad"
+            )
+        elif "token" in qparams:
+            qparams["token"] = "bad" + qparams["token"][0]
+        new_query = urlparse.urlencode(qparams)
+        new_parts = urlparse.ParseResult(
+            parts.scheme,
+            parts.netloc,
+            parts.path,
+            parts.params,
+            new_query,
+            parts.fragment,
+        )
+        url = urlparse.urlunparse(new_parts)
+        self._websocket = compute.create_websocket(url)
+        # Make sure the console proxy rejected the connection and closed it
+        data = self._websocket.receive_frame()
+        self.assertTrue(
+            data is None or not data,
+            "The console proxy actually sent us some data, but we "
+            "expected it to close the connection.",
+        )
+
+
+class NoVNCConsoleTestJSON(ConsoleTestBase):
+    """Test novnc console"""
+
+    @classmethod
+    def skip_checks(cls):
+        super(NoVNCConsoleTestJSON, cls).skip_checks()
+        if not CONF.compute_feature_enabled.vnc_console:
+            raise cls.skipException("VNC Console feature is disabled.")
+
+    def _validate_rfb_negotiation(self):
+        """Verify we can connect to novnc and do the websocket connection."""
+        # Turn the Socket into a WebSocket to do the communication
+        data = self._websocket.receive_frame()
+        self.assertFalse(
+            data is None or not data,
+            "Token must be invalid because the connection closed.",
+        )
+        # Parse the RFB version from the data to make sure it is valid
+        # and belong to the known supported RFB versions.
+        version = float(
+            "%d.%d" % (int(data[4:7], base=10), int(data[8:11], base=10))
+        )
+        # Add the max RFB versions supported
+        supported_versions = [3.3, 3.8]
+        self.assertIn(
+            version, supported_versions, "Bad RFB Version: " + str(version)
+        )
+        # Send our RFB version to the server
+        self._websocket.send_frame(data)
+        # Get the sever authentication type and make sure None is supported
+        data = self._websocket.receive_frame()
+        self.assertIsNotNone(data, "Expected authentication type None.")
+        data_length = len(data)
+        if version == 3.3:
+            # For RFB 3.3: in the security handshake, rather than a two-way
+            # negotiation, the server decides the security type and sends a
+            # single word(4 bytes).
+            self.assertEqual(
+                data_length, 4, "Expected authentication type None."
+            )
+            self.assertIn(
+                1,
+                [int(data[i]) for i in (0, 3)],
+                "Expected authentication type None.",
+            )
+        else:
+            self.assertGreaterEqual(
+                len(data), 2, "Expected authentication type None."
+            )
+            self.assertIn(
+                1,
+                [int(data[i + 1]) for i in range(int(data[0]))],
+                "Expected authentication type None.",
+            )
+            # Send to the server that we only support authentication
+            # type None
+            self._websocket.send_frame(bytes((1,)))
+
+            # The server should send 4 bytes of 0's if security
+            # handshake succeeded
+            data = self._websocket.receive_frame()
+            self.assertEqual(
+                len(data), 4, "Server did not think security was successful."
+            )
+            self.assertEqual(
+                [int(i) for i in data],
+                [0, 0, 0, 0],
+                "Server did not think security was successful.",
+            )
+
+        # Say to leave the desktop as shared as part of client initialization
+        self._websocket.send_frame(bytes((1,)))
+        # Get the server initialization packet back and make sure it is the
+        # right structure where bytes 20-24 is the name length and
+        # 24-N is the name
+        data = self._websocket.receive_frame()
+        data_length = len(data) if data is not None else 0
+        self.assertFalse(
+            data_length <= 24 or
+            data_length != (struct.unpack(">L", data[20:24])[0] + 24),
+            "Server initialization was not the right format.",
+        )
+        # Since the rest of the data on the screen is arbitrary, we will
+        # close the socket and end our validation of the data at this point
+        # Assert that the latest check was false, meaning that the server
+        # initialization was the right format
+        self.assertFalse(
+            data_length <= 24 or
+            data_length != (struct.unpack(">L", data[20:24])[0] + 24)
+        )
+
+    @decorators.idempotent_id("c640fdff-8ab4-45a4-a5d8-7e6146cbd0dc")
+    def test_novnc(self):
+        """Test accessing novnc console of server"""
+        body = self._get_console_body("novnc", "vnc", "get_vnc_console")
+        self.assertEqual("novnc", body["type"])
+        # Do the initial HTTP Request to novncproxy to get the JavaScript
+        self._validate_html(body["url"], "noVNC")
+        # Do the WebSockify HTTP Request to novncproxy to do the RFB connection
+        self._websocket = compute.create_websocket(body["url"])
+        # Validate that we successfully connected and upgraded to Web Sockets
+        self._validate_websocket_upgrade()
+        # Validate the RFB Negotiation to determine if a valid VNC session
+        self._validate_rfb_negotiation()
+
+    @decorators.idempotent_id("f9c79937-addc-4aaa-9e0e-841eef02aeb7")
+    def test_novnc_bad_token(self):
+        """Test accessing novnc console with bad token
+
+        Do the WebSockify HTTP Request to novnc proxy with a bad token,
+        the novnc proxy should reject the connection and closed it.
+        """
+        self._test_console_bad_token("novnc", "vnc", "get_vnc_console")
+
+
+class SpiceConsoleTestJSON(ConsoleTestBase):
+    """Test spice console"""
+
+    @classmethod
+    def skip_checks(cls):
+        super(SpiceConsoleTestJSON, cls).skip_checks()
+        if not CONF.compute_feature_enabled.spice_console:
+            raise cls.skipException("SPICE Console feature is disabled.")
+
+    def _validate_websocket_connection(self, body):
+        # Protocol Magic number UINT8[4] { 0x52, 0x45, 0x44, 0x51} // "REDQ"
+        spice_magic = b"REDQ"
+        scheme = {"https": "wss", "http": "ws"}
+
+        q = urlparse.urlparse(body["url"])
+        ws = websocket.WebSocket(sslopt=self.cert_params)
+        ws.connect(
+            f"{scheme[q.scheme]}://{q.netloc}/websockify", cookie=q.query,
+            subprotocols=["binary"]
+        )
+        ws.send_binary(b"\r\n\r\n")
+        opcode, data = ws.recv_data()
+        self.assertEqual(opcode, websocket.ABNF.OPCODE_BINARY)
+        self.assertTrue(data.startswith(spice_magic))
+
+    @decorators.idempotent_id("0914a681-72dd-4fad-8457-b45195373d3d")
+    def test_spice(self):
+        """Test accessing spice console of server"""
+        body = self._get_console_body(
+            "spice-html5", "spice", "get_spice_console"
+        )
+        self.assertEqual("spice-html5", body["type"])
+        # Do the initial HTTP Request to spiceproxy to get the JavaScript
+        self._validate_html(body["url"], "Spice Javascript client")
+        # Validate that we successfully connected to Web Sockets
+        self._validate_websocket_connection(body)
+
+    @decorators.idempotent_id("6f4b0690-d078-4a28-a2ce-33dafdfca7ac")
+    def test_spice_bad_token(self):
+        """Test accessing spice console with bad token
+
+        Do the WebSockify HTTP Request to spice proxy with a bad token,
+        the spice proxy should reject the connection and closed it.
+        """
+        self._test_console_bad_token(
+            "spice-html5", "spice", "get_spice_console"
+        )
diff --git a/tempest/api/compute/servers/test_delete_server.py b/tempest/api/compute/servers/test_delete_server.py
index 596d2bd..bbaf21d 100644
--- a/tempest/api/compute/servers/test_delete_server.py
+++ b/tempest/api/compute/servers/test_delete_server.py
@@ -29,6 +29,9 @@
     """Test deleting servers in various states"""
     create_default_network = True
 
+    if CONF.compute_feature_enabled.volume_multiattach:
+        min_microversion = '2.60'
+
     # NOTE: Server creations of each test class should be under 10
     # for preventing "Quota exceeded for instances"
 
@@ -94,8 +97,9 @@
         waiters.wait_for_server_termination(self.client, server['id'])
 
     @decorators.idempotent_id('ab0c38b4-cdd8-49d3-9b92-0cb898723c01')
-    @testtools.skipIf(not CONF.compute_feature_enabled.resize,
-                      'Resize not available.')
+    @testtools.skipIf(not (CONF.compute_feature_enabled.cold_migration and
+                           CONF.compute_feature_enabled.resize),
+                      'Cold migration/Resize not available.')
     def test_delete_server_while_in_verify_resize_state(self):
         """Test deleting a server while it's VM state is VERIFY_RESIZE"""
         server = self.create_test_server(wait_until='ACTIVE')
diff --git a/tempest/api/compute/servers/test_device_tagging.py b/tempest/api/compute/servers/test_device_tagging.py
index d2fdd52..18f9652 100644
--- a/tempest/api/compute/servers/test_device_tagging.py
+++ b/tempest/api/compute/servers/test_device_tagging.py
@@ -83,6 +83,10 @@
     # 2.32 and 2.36 inclusive; the 2.37 microversion broke tags for networks.
     max_microversion = '2.32'
 
+    if CONF.compute_feature_enabled.volume_multiattach:
+        min_microversion = '2.60'
+        max_microversion = 'latest'
+
     def verify_device_metadata(self, md_json):
         try:
             md_dict = json.loads(md_json)
@@ -161,6 +165,13 @@
         subnet2 = self.subnets_client.create_subnet(
             network_id=net2['id'],
             cidr='10.2.2.0/24',
+            # Add allocation pool to prevent IP address conflicts.
+            allocation_pools=[
+                {
+                    "start": "10.2.2.10",
+                    "end": "10.2.2.90"
+                }
+            ],
             ip_version=4)['subnet']
         self.addCleanup(self.subnets_client.delete_subnet, subnet2['id'])
 
@@ -297,6 +308,10 @@
     min_microversion = '2.42'
     max_microversion = 'latest'
 
+    if CONF.compute_feature_enabled.volume_multiattach:
+        min_microversion = '2.60'
+        max_microversion = 'latest'
+
 
 class TaggedAttachmentsTest(DeviceTaggingBase):
     """Test tagged attachments with compute microversion greater than 2.48"""
@@ -304,6 +319,10 @@
     min_microversion = '2.49'
     max_microversion = 'latest'
 
+    if CONF.compute_feature_enabled.volume_multiattach:
+        min_microversion = '2.60'
+        max_microversion = 'latest'
+
     @classmethod
     def skip_checks(cls):
         super(TaggedAttachmentsTest, cls).skip_checks()
diff --git a/tempest/api/compute/servers/test_disk_config.py b/tempest/api/compute/servers/test_disk_config.py
index e5e051a..35c5619 100644
--- a/tempest/api/compute/servers/test_disk_config.py
+++ b/tempest/api/compute/servers/test_disk_config.py
@@ -88,8 +88,9 @@
         self.assertEqual('AUTO', server['OS-DCF:diskConfig'])
 
     @decorators.idempotent_id('414e7e93-45b5-44bc-8e03-55159c6bfc97')
-    @testtools.skipUnless(CONF.compute_feature_enabled.resize,
-                          'Resize not available.')
+    @testtools.skipIf(not (CONF.compute_feature_enabled.cold_migration and
+                           CONF.compute_feature_enabled.resize),
+                      'Cold migration/Resize not available.')
     def test_resize_server_from_manual_to_auto(self):
         """A server should be resized from manual to auto disk config"""
         server = self.create_test_server(wait_until='ACTIVE')
@@ -104,8 +105,9 @@
         self.assertEqual('AUTO', server['OS-DCF:diskConfig'])
 
     @decorators.idempotent_id('693d16f3-556c-489a-8bac-3d0ca2490bad')
-    @testtools.skipUnless(CONF.compute_feature_enabled.resize,
-                          'Resize not available.')
+    @testtools.skipIf(not (CONF.compute_feature_enabled.cold_migration and
+                           CONF.compute_feature_enabled.resize),
+                      'Cold migration/Resize not available.')
     def test_resize_server_from_auto_to_manual(self):
         """A server should be resized from auto to manual disk config"""
         server = self.create_test_server(wait_until='ACTIVE')
diff --git a/tempest/api/compute/servers/test_novnc.py b/tempest/api/compute/servers/test_novnc.py
deleted file mode 100644
index c90aea8..0000000
--- a/tempest/api/compute/servers/test_novnc.py
+++ /dev/null
@@ -1,238 +0,0 @@
-# Copyright 2016-2017 OpenStack Foundation
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import struct
-import urllib.parse as urlparse
-import urllib3
-
-from tempest.api.compute import base
-from tempest.common import compute
-from tempest import config
-from tempest.lib import decorators
-
-CONF = config.CONF
-
-
-class NoVNCConsoleTestJSON(base.BaseV2ComputeTest):
-    """Test novnc console"""
-
-    create_default_network = True
-
-    @classmethod
-    def skip_checks(cls):
-        super(NoVNCConsoleTestJSON, cls).skip_checks()
-        if not CONF.compute_feature_enabled.vnc_console:
-            raise cls.skipException('VNC Console feature is disabled.')
-
-    def setUp(self):
-        super(NoVNCConsoleTestJSON, self).setUp()
-        self._websocket = None
-
-    def tearDown(self):
-        super(NoVNCConsoleTestJSON, self).tearDown()
-        if self._websocket is not None:
-            self._websocket.close()
-        # NOTE(zhufl): Because server_check_teardown will raise Exception
-        # which will prevent other cleanup steps from being executed, so
-        # server_check_teardown should be called after super's tearDown.
-        self.server_check_teardown()
-
-    @classmethod
-    def setup_clients(cls):
-        super(NoVNCConsoleTestJSON, cls).setup_clients()
-        cls.client = cls.servers_client
-
-    @classmethod
-    def resource_setup(cls):
-        super(NoVNCConsoleTestJSON, cls).resource_setup()
-        cls.server = cls.create_test_server(wait_until="ACTIVE")
-        cls.use_get_remote_console = False
-        if not cls.is_requested_microversion_compatible('2.5'):
-            cls.use_get_remote_console = True
-
-    def _validate_novnc_html(self, vnc_url):
-        """Verify we can connect to novnc and get back the javascript."""
-        cert_params = {}
-
-        if CONF.identity.disable_ssl_certificate_validation:
-            cert_params['cert_reqs'] = "CERT_NONE"
-        else:
-            cert_params["cert_reqs"] = "CERT_REQUIRED"
-            cert_params["ca_certs"] = CONF.identity.ca_certificates_file
-
-        resp = urllib3.PoolManager(**cert_params).request('GET', vnc_url)
-        # Make sure that the GET request was accepted by the novncproxy
-        self.assertEqual(resp.status, 200, 'Got a Bad HTTP Response on the '
-                         'initial call: ' + str(resp.status))
-        # Do some basic validation to make sure it is an expected HTML document
-        resp_data = resp.data.decode()
-        # This is needed in the case of example: <html lang="en">
-        self.assertRegex(resp_data, '<html.*>',
-                         'Not a valid html document in the response.')
-        self.assertIn('</html>', resp_data,
-                      'Not a valid html document in the response.')
-        # Just try to make sure we got JavaScript back for noVNC, since we
-        # won't actually use it since not inside of a browser
-        self.assertIn('noVNC', resp_data,
-                      'Not a valid noVNC javascript html document.')
-        self.assertIn('<script', resp_data,
-                      'Not a valid noVNC javascript html document.')
-
-    def _validate_rfb_negotiation(self):
-        """Verify we can connect to novnc and do the websocket connection."""
-        # Turn the Socket into a WebSocket to do the communication
-        data = self._websocket.receive_frame()
-        self.assertFalse(data is None or not data,
-                         'Token must be invalid because the connection '
-                         'closed.')
-        # Parse the RFB version from the data to make sure it is valid
-        # and belong to the known supported RFB versions.
-        version = float("%d.%d" % (int(data[4:7], base=10),
-                                   int(data[8:11], base=10)))
-        # Add the max RFB versions supported
-        supported_versions = [3.3, 3.8]
-        self.assertIn(version, supported_versions,
-                      'Bad RFB Version: ' + str(version))
-        # Send our RFB version to the server
-        self._websocket.send_frame(data)
-        # Get the sever authentication type and make sure None is supported
-        data = self._websocket.receive_frame()
-        self.assertIsNotNone(data, 'Expected authentication type None.')
-        data_length = len(data)
-        if version == 3.3:
-            # For RFB 3.3: in the security handshake, rather than a two-way
-            # negotiation, the server decides the security type and sends a
-            # single word(4 bytes).
-            self.assertEqual(
-                data_length, 4, 'Expected authentication type None.')
-            self.assertIn(1, [int(data[i]) for i in (0, 3)],
-                          'Expected authentication type None.')
-        else:
-            self.assertGreaterEqual(
-                len(data), 2, 'Expected authentication type None.')
-            self.assertIn(
-                1,
-                [int(data[i + 1]) for i in range(int(data[0]))],
-                'Expected authentication type None.')
-            # Send to the server that we only support authentication
-            # type None
-            self._websocket.send_frame(bytes((1,)))
-
-            # The server should send 4 bytes of 0's if security
-            # handshake succeeded
-            data = self._websocket.receive_frame()
-            self.assertEqual(
-                len(data), 4,
-                'Server did not think security was successful.')
-            self.assertEqual(
-                [int(i) for i in data], [0, 0, 0, 0],
-                'Server did not think security was successful.')
-
-        # Say to leave the desktop as shared as part of client initialization
-        self._websocket.send_frame(bytes((1,)))
-        # Get the server initialization packet back and make sure it is the
-        # right structure where bytes 20-24 is the name length and
-        # 24-N is the name
-        data = self._websocket.receive_frame()
-        data_length = len(data) if data is not None else 0
-        self.assertFalse(data_length <= 24 or
-                         data_length != (struct.unpack(">L",
-                                                       data[20:24])[0] + 24),
-                         'Server initialization was not the right format.')
-        # Since the rest of the data on the screen is arbitrary, we will
-        # close the socket and end our validation of the data at this point
-        # Assert that the latest check was false, meaning that the server
-        # initialization was the right format
-        self.assertFalse(data_length <= 24 or
-                         data_length != (struct.unpack(">L",
-                                                       data[20:24])[0] + 24))
-
-    def _validate_websocket_upgrade(self):
-        """Verify that the websocket upgrade was successful.
-
-        Parses response and ensures that required response
-        fields are present and accurate.
-        (https://tools.ietf.org/html/rfc7231#section-6.2.2)
-        """
-
-        self.assertTrue(
-            self._websocket.response.startswith(b'HTTP/1.1 101 Switching '
-                                                b'Protocols'),
-            'Incorrect HTTP return status code: {}'.format(
-                str(self._websocket.response)
-            )
-        )
-        _required_header = 'upgrade: websocket'
-        _response = str(self._websocket.response).lower()
-        self.assertIn(
-            _required_header,
-            _response,
-            'Did not get the expected WebSocket HTTP Response.'
-        )
-
-    @decorators.idempotent_id('c640fdff-8ab4-45a4-a5d8-7e6146cbd0dc')
-    def test_novnc(self):
-        """Test accessing novnc console of server"""
-        if self.use_get_remote_console:
-            body = self.client.get_remote_console(
-                self.server['id'], console_type='novnc',
-                protocol='vnc')['remote_console']
-        else:
-            body = self.client.get_vnc_console(self.server['id'],
-                                               type='novnc')['console']
-        self.assertEqual('novnc', body['type'])
-        # Do the initial HTTP Request to novncproxy to get the NoVNC JavaScript
-        self._validate_novnc_html(body['url'])
-        # Do the WebSockify HTTP Request to novncproxy to do the RFB connection
-        self._websocket = compute.create_websocket(body['url'])
-        # Validate that we successfully connected and upgraded to Web Sockets
-        self._validate_websocket_upgrade()
-        # Validate the RFB Negotiation to determine if a valid VNC session
-        self._validate_rfb_negotiation()
-
-    @decorators.idempotent_id('f9c79937-addc-4aaa-9e0e-841eef02aeb7')
-    def test_novnc_bad_token(self):
-        """Test accessing novnc console with bad token
-
-        Do the WebSockify HTTP Request to novnc proxy with a bad token,
-        the novnc proxy should reject the connection and closed it.
-        """
-        if self.use_get_remote_console:
-            body = self.client.get_remote_console(
-                self.server['id'], console_type='novnc',
-                protocol='vnc')['remote_console']
-        else:
-            body = self.client.get_vnc_console(self.server['id'],
-                                               type='novnc')['console']
-        self.assertEqual('novnc', body['type'])
-        # Do the WebSockify HTTP Request to novncproxy with a bad token
-        parts = urlparse.urlparse(body['url'])
-        qparams = urlparse.parse_qs(parts.query)
-        if 'path' in qparams:
-            qparams['path'] = urlparse.unquote(qparams['path'][0]).replace(
-                'token=', 'token=bad')
-        elif 'token' in qparams:
-            qparams['token'] = 'bad' + qparams['token'][0]
-        new_query = urlparse.urlencode(qparams)
-        new_parts = urlparse.ParseResult(parts.scheme, parts.netloc,
-                                         parts.path, parts.params, new_query,
-                                         parts.fragment)
-        url = urlparse.urlunparse(new_parts)
-        self._websocket = compute.create_websocket(url)
-        # Make sure the novncproxy rejected the connection and closed it
-        data = self._websocket.receive_frame()
-        self.assertTrue(data is None or not data,
-                        "The novnc proxy actually sent us some data, but we "
-                        "expected it to close the connection.")
diff --git a/tempest/api/compute/servers/test_server_actions.py b/tempest/api/compute/servers/test_server_actions.py
index 938e4b4..5d4a5ed 100644
--- a/tempest/api/compute/servers/test_server_actions.py
+++ b/tempest/api/compute/servers/test_server_actions.py
@@ -37,6 +37,9 @@
 class ServerActionsBase(base.BaseV2ComputeTest):
     """Test server actions"""
 
+    if CONF.compute_feature_enabled.volume_multiattach:
+        min_microversion = '2.60'
+
     def setUp(self):
         # NOTE(afazekas): Normally we use the same server with all test cases,
         # but if it has an issue, we build a new one
@@ -292,15 +295,17 @@
             validation_resources=validation_resources)
 
     @decorators.idempotent_id('1499262a-9328-4eda-9068-db1ac57498d2')
-    @testtools.skipUnless(CONF.compute_feature_enabled.resize,
-                          'Resize not available.')
+    @testtools.skipIf(not (CONF.compute_feature_enabled.cold_migration and
+                           CONF.compute_feature_enabled.resize),
+                      'Cold migration/Resize not available.')
     def test_resize_server_confirm(self):
         """Test resizing server and then confirming"""
         self._test_resize_server_confirm(self.server_id, stop=False)
 
     @decorators.idempotent_id('c03aab19-adb1-44f5-917d-c419577e9e68')
-    @testtools.skipUnless(CONF.compute_feature_enabled.resize,
-                          'Resize not available.')
+    @testtools.skipIf(not (CONF.compute_feature_enabled.cold_migration and
+                           CONF.compute_feature_enabled.resize),
+                      'Cold migration/Resize not available.')
     def test_resize_server_revert(self):
         """Test resizing server and then reverting
 
@@ -490,8 +495,9 @@
 
     @decorators.idempotent_id('e6c28180-7454-4b59-b188-0257af08a63b')
     @decorators.related_bug('1728603')
-    @testtools.skipUnless(CONF.compute_feature_enabled.resize,
-                          'Resize not available.')
+    @testtools.skipIf(not (CONF.compute_feature_enabled.cold_migration and
+                           CONF.compute_feature_enabled.resize),
+                      'Cold migration/Resize not available.')
     @utils.services('volume')
     def test_resize_volume_backed_server_confirm(self):
         """Test resizing a volume backed server and then confirming"""
@@ -528,16 +534,18 @@
 
 class ServerActionsTestOtherB(ServerActionsBase):
     @decorators.idempotent_id('138b131d-66df-48c9-a171-64f45eb92962')
-    @testtools.skipUnless(CONF.compute_feature_enabled.resize,
-                          'Resize not available.')
+    @testtools.skipIf(not (CONF.compute_feature_enabled.cold_migration and
+                           CONF.compute_feature_enabled.resize),
+                      'Cold migration/Resize not available.')
     def test_resize_server_confirm_from_stopped(self):
         """Test resizing a stopped server and then confirming"""
         self._test_resize_server_confirm(self.server_id, stop=True)
 
     @decorators.idempotent_id('fbbf075f-a812-4022-bc5c-ccb8047eef12')
     @decorators.related_bug('1737599')
-    @testtools.skipUnless(CONF.compute_feature_enabled.resize,
-                          'Resize not available.')
+    @testtools.skipIf(not (CONF.compute_feature_enabled.cold_migration and
+                           CONF.compute_feature_enabled.resize),
+                      'Cold migration/Resize not available.')
     @utils.services('volume')
     def test_resize_server_revert_with_volume_attached(self):
         """Test resizing a volume attached server and then reverting
@@ -880,7 +888,7 @@
         # 4.Plain username/password auth, if a password was given.
         linux_client = remote_client.RemoteClient(
             self.get_server_ip(server, self.validation_resources),
-            self.ssh_alt_user,
+            self.ssh_user,
             password=None,
             pkey=self.validation_resources['keypair']['private_key'],
             server=server,
diff --git a/tempest/api/compute/servers/test_server_rescue.py b/tempest/api/compute/servers/test_server_rescue.py
index 9404ebd..646f1cd 100644
--- a/tempest/api/compute/servers/test_server_rescue.py
+++ b/tempest/api/compute/servers/test_server_rescue.py
@@ -29,6 +29,9 @@
 class ServerRescueTestBase(base.BaseV2ComputeTest):
     create_default_network = True
 
+    if CONF.compute_feature_enabled.volume_multiattach:
+        min_microversion = '2.60'
+
     @classmethod
     def skip_checks(cls):
         super(ServerRescueTestBase, cls).skip_checks()
diff --git a/tempest/api/compute/servers/test_server_rescue_negative.py b/tempest/api/compute/servers/test_server_rescue_negative.py
index fd05ec6..ba1a7fc 100644
--- a/tempest/api/compute/servers/test_server_rescue_negative.py
+++ b/tempest/api/compute/servers/test_server_rescue_negative.py
@@ -30,6 +30,9 @@
 class ServerRescueNegativeTestJSON(base.BaseV2ComputeTest):
     """Negative tests of server rescue"""
 
+    if CONF.compute_feature_enabled.volume_multiattach:
+        min_microversion = '2.60'
+
     @classmethod
     def skip_checks(cls):
         super(ServerRescueNegativeTestJSON, cls).skip_checks()
diff --git a/tempest/api/compute/servers/test_servers_negative.py b/tempest/api/compute/servers/test_servers_negative.py
index fa40629..b9d4be22 100644
--- a/tempest/api/compute/servers/test_servers_negative.py
+++ b/tempest/api/compute/servers/test_servers_negative.py
@@ -128,8 +128,9 @@
                           self.create_test_server, accessIPv6=IPv6)
 
     @decorators.idempotent_id('7ea45b3e-e770-46fa-bfcc-9daaf6d987c0')
-    @testtools.skipUnless(CONF.compute_feature_enabled.resize,
-                          'Resize not available.')
+    @testtools.skipIf(not (CONF.compute_feature_enabled.cold_migration and
+                           CONF.compute_feature_enabled.resize),
+                      'Cold migration/Resize not available.')
     @decorators.attr(type=['negative'])
     def test_resize_nonexistent_server(self):
         """Resizing a non-existent server should fail"""
@@ -139,8 +140,9 @@
                           nonexistent_server, self.flavor_ref)
 
     @decorators.idempotent_id('ced1a1d7-2ab6-45c9-b90f-b27d87b30efd')
-    @testtools.skipUnless(CONF.compute_feature_enabled.resize,
-                          'Resize not available.')
+    @testtools.skipIf(not (CONF.compute_feature_enabled.cold_migration and
+                           CONF.compute_feature_enabled.resize),
+                      'Cold migration/Resize not available.')
     @decorators.attr(type=['negative'])
     def test_resize_server_with_non_existent_flavor(self):
         """Resizing a server with non existent flavor should fail"""
@@ -149,8 +151,9 @@
                           self.server_id, flavor_ref=nonexistent_flavor)
 
     @decorators.idempotent_id('45436a7d-a388-4a35-a9d8-3adc5d0d940b')
-    @testtools.skipUnless(CONF.compute_feature_enabled.resize,
-                          'Resize not available.')
+    @testtools.skipIf(not (CONF.compute_feature_enabled.cold_migration and
+                           CONF.compute_feature_enabled.resize),
+                      'Cold migration/Resize not available.')
     @decorators.attr(type=['negative'])
     def test_resize_server_with_null_flavor(self):
         """Resizing a server with null flavor should fail"""
diff --git a/tempest/api/compute/volumes/test_attach_volume.py b/tempest/api/compute/volumes/test_attach_volume.py
index 81b5c9d..21eef18 100644
--- a/tempest/api/compute/volumes/test_attach_volume.py
+++ b/tempest/api/compute/volumes/test_attach_volume.py
@@ -61,6 +61,9 @@
 class AttachVolumeTestJSON(BaseAttachVolumeTest):
     """Test attaching volume to server"""
 
+    if CONF.compute_feature_enabled.volume_multiattach:
+        min_microversion = '2.60'
+
     @decorators.idempotent_id('52e9045a-e90d-4c0d-9087-79d657faffff')
     # This test is conditionally marked slow if SSH validation is enabled.
     @decorators.attr(type='slow', condition=CONF.validation.run_validation)
@@ -372,8 +375,19 @@
         volume = self.volumes_client.show_volume(volume_id)['volume']
         attachments = volume['attachments']
         wait_status = 'in-use' if len(attachments) > 1 else 'available'
+        attachment_id = [
+            attachment["attachment_id"] for attachment in attachments
+            if attachment["server_id"] == server_id
+        ][0]
         # Now detach the volume from the given server.
         self.servers_client.detach_volume(server_id, volume_id)
+        # wait for both cinder and nova detaching the volume
+        waiters.wait_for_volume_attachment_remove_from_server(
+            self.servers_client, server_id, volume_id
+        )
+        waiters.wait_for_volume_attachment_remove(
+            self.volumes_client, volume_id, attachment_id
+        )
         # Now wait for the volume status to change.
         waiters.wait_for_volume_resource_status(
             self.volumes_client, volume_id, wait_status)
@@ -573,8 +587,9 @@
         self.volumes_client.wait_for_resource_deletion(volume['id'])
 
     @decorators.idempotent_id('f01c7169-a124-4fc7-ae60-5e380e247c9c')
-    @testtools.skipUnless(CONF.compute_feature_enabled.resize,
-                          'Resize not available.')
+    @testtools.skipIf(not (CONF.compute_feature_enabled.cold_migration and
+                           CONF.compute_feature_enabled.resize),
+                      'Cold migration/Resize not available.')
     def test_resize_server_with_multiattached_volume(self):
         """Test resizing servers with multiattached volume
 
diff --git a/tempest/api/compute/volumes/test_attach_volume_negative.py b/tempest/api/compute/volumes/test_attach_volume_negative.py
index 43b4bf5..09bf488 100644
--- a/tempest/api/compute/volumes/test_attach_volume_negative.py
+++ b/tempest/api/compute/volumes/test_attach_volume_negative.py
@@ -23,6 +23,9 @@
 class AttachVolumeNegativeTest(test_attach_volume.BaseAttachVolumeTest):
     """Negative tests of volume attaching"""
 
+    if CONF.compute_feature_enabled.volume_multiattach:
+        min_microversion = '2.60'
+
     @decorators.attr(type=['negative'])
     @decorators.related_bug('1630783', status_code=500)
     @decorators.idempotent_id('a313b5cd-fbd0-49cc-94de-870e99f763c7')
diff --git a/tempest/api/network/test_ports.py b/tempest/api/network/test_ports.py
index 65e1181..cca804f 100644
--- a/tempest/api/network/test_ports.py
+++ b/tempest/api/network/test_ports.py
@@ -237,7 +237,13 @@
         """Test listing ports filtered by part of ip address string"""
         # Create network and subnet
         network = self._create_network()
-        subnet = self._create_subnet(network)
+        address = self.cidr
+        # Add allocation pool to prevent IP address conflicts.
+        pool_start = ipaddress.ip_address(str(address[2]))
+        pool_end = ipaddress.ip_address(str(address[8]))
+        allocation_pools = {'allocation_pools': [{'start': str(pool_start),
+                                                  'end': str(pool_end)}]}
+        subnet = self._create_subnet(network, **allocation_pools)
         # Get two IP addresses
         ip_address_1 = None
         ip_address_2 = None
@@ -245,9 +251,7 @@
         for ip in ip_network:
             if ip == ip_network.network_address:
                 continue
-            if self.ports_client.list_ports(
-                network_id=network['id'],
-                fixed_ips='ip_address=' + str(ip))['ports']:
+            if pool_start <= ip <= pool_end:
                 continue
             if ip_address_1 is None:
                 ip_address_1 = str(ip)
diff --git a/tempest/api/network/test_tags.py b/tempest/api/network/test_tags.py
index a0c6342..3e7b2d5 100644
--- a/tempest/api/network/test_tags.py
+++ b/tempest/api/network/test_tags.py
@@ -113,7 +113,10 @@
 
     # NOTE(felipemonteiro): The supported resource names are plural. Use
     # the singular case for the corresponding class resource object.
-    SUPPORTED_RESOURCES = ['subnets', 'ports', 'routers', 'subnetpools']
+    if config.is_tungstenfabric_backend_enabled():
+        SUPPORTED_RESOURCES = ['subnets', 'ports', 'routers']
+    else:
+        SUPPORTED_RESOURCES = ['subnets', 'ports', 'routers', 'subnetpools']
 
     @classmethod
     def skip_checks(cls):
@@ -134,6 +137,9 @@
         cls.port = cls.create_port(cls.network)
         cls.router = cls.create_router()
 
+        if config.is_tungstenfabric_backend_enabled():
+            return
+
         subnetpool_name = data_utils.rand_name(
             cls.__name__ + '-Subnetpool', prefix=CONF.resource_name_prefix)
         prefix = CONF.network.default_network
diff --git a/tempest/api/volume/admin/test_multi_backend.py b/tempest/api/volume/admin/test_multi_backend.py
index 028bf1a..b45824e 100644
--- a/tempest/api/volume/admin/test_multi_backend.py
+++ b/tempest/api/volume/admin/test_multi_backend.py
@@ -30,8 +30,9 @@
             raise cls.skipException("Cinder multi-backend feature disabled")
 
         if len(set(CONF.volume.backend_names)) < 2:
-            raise cls.skipException("Requires at least two different "
-                                    "backend names")
+            raise cls.skipException(
+                "Requires at least two different "
+                "backend names")
 
     @classmethod
     def resource_setup(cls):
@@ -66,21 +67,34 @@
             extra_specs = {spec_key_with_prefix: backend_name_key}
         else:
             extra_specs = {spec_key_without_prefix: backend_name_key}
-        cls.create_volume_type(name=type_name,
-                               extra_specs=extra_specs)
+        cls.create_volume_type(
+            name=type_name, extra_specs=extra_specs)
+        # Pick up AZ from volume_type
+        services = cls.admin_volume_services_client.list_services()
+        vol_svrs = [
+            srv
+            for srv in services.get("services")
+            if srv["binary"] == "cinder-volume" and backend_name_key
+                                in srv["host"]
+        ]
+        vol_type_zone = vol_svrs[0]["zone"]
 
-        params = {'name': vol_name, 'volume_type': type_name,
-                  'size': CONF.volume.volume_size}
+        params = {
+            "name": vol_name,
+            "volume_type": type_name,
+            "size": CONF.volume.volume_size,
+            "availability_zone": vol_type_zone,
+        }
         cls.volume = cls.create_volume(**params)
         if with_prefix:
-            cls.volume_id_list_with_prefix.append(cls.volume['id'])
+            cls.volume_id_list_with_prefix.append(cls.volume["id"])
         else:
-            cls.volume_id_list_without_prefix.append(
-                cls.volume['id'])
-        waiters.wait_for_volume_resource_status(cls.admin_volume_client,
-                                                cls.volume['id'], 'available')
+            cls.volume_id_list_without_prefix.append(cls.volume["id"])
+        waiters.wait_for_volume_resource_status(
+            cls.admin_volume_client, cls.volume["id"], "available"
+        )
 
-    @decorators.idempotent_id('c1a41f3f-9dad-493e-9f09-3ff197d477cc')
+    @decorators.idempotent_id("c1a41f3f-9dad-493e-9f09-3ff197d477cc")
     def test_backend_name_reporting(self):
         """Test backend name reporting for volume when type is without prefix
 
@@ -92,7 +106,7 @@
         for volume_id in self.volume_id_list_without_prefix:
             self._test_backend_name_reporting_by_volume_id(volume_id)
 
-    @decorators.idempotent_id('f38e647f-ab42-4a31-a2e7-ca86a6485215')
+    @decorators.idempotent_id("f38e647f-ab42-4a31-a2e7-ca86a6485215")
     def test_backend_name_reporting_with_prefix(self):
         """Test backend name reporting for volume when type is with prefix
 
@@ -105,7 +119,7 @@
         for volume_id in self.volume_id_list_with_prefix:
             self._test_backend_name_reporting_by_volume_id(volume_id)
 
-    @decorators.idempotent_id('46435ab1-a0af-4401-8373-f14e66b0dd58')
+    @decorators.idempotent_id("46435ab1-a0af-4401-8373-f14e66b0dd58")
     def test_backend_name_distinction(self):
         """Test volume backend distinction when type is without prefix
 
@@ -116,7 +130,7 @@
         """
         self._test_backend_name_distinction(self.volume_id_list_without_prefix)
 
-    @decorators.idempotent_id('4236305b-b65a-4bfc-a9d2-69cb5b2bf2ed')
+    @decorators.idempotent_id("4236305b-b65a-4bfc-a9d2-69cb5b2bf2ed")
     def test_backend_name_distinction_with_prefix(self):
         """Test volume backend distinction when type is with prefix
 
@@ -128,28 +142,29 @@
         self._test_backend_name_distinction(self.volume_id_list_with_prefix)
 
     def _get_volume_host(self, volume_id):
-        return self.admin_volume_client.show_volume(
-            volume_id)['volume']['os-vol-host-attr:host']
+        return self.admin_volume_client.show_volume(volume_id)["volume"][
+            "os-vol-host-attr:host"
+        ]
 
     def _test_backend_name_reporting_by_volume_id(self, volume_id):
         # this test checks if os-vol-attr:host is populated correctly after
         # the multi backend feature has been enabled
         # if multi-backend is enabled: os-vol-attr:host should be like:
         # host@backend_name
-        volume = self.admin_volume_client.show_volume(volume_id)['volume']
+        volume = self.admin_volume_client.show_volume(volume_id)["volume"]
 
-        volume1_host = volume['os-vol-host-attr:host']
-        msg = ("multi-backend reporting incorrect values for volume %s" %
-               volume_id)
+        volume1_host = volume["os-vol-host-attr:host"]
+        msg = ("multi-backend reporting incorrect values for volume %s"
+               % volume_id)
         self.assertGreater(len(volume1_host.split("@")), 1, msg)
 
     def _test_backend_name_distinction(self, volume_id_list):
         # this test checks that the volumes created at setUp don't
         # belong to the same backend (if they are, than the
         # volume backend distinction is not working properly)
-        volume_hosts = [self._get_volume_host(volume) for volume in
-                        volume_id_list]
+        volume_hosts = [self._get_volume_host(volume)
+                        for volume in volume_id_list]
         # assert that volumes are each created on separate hosts:
-        msg = ("volumes %s were created in the same backend" % ", "
-               .join(volume_hosts))
+        msg = "volumes %s were created in the same backend" % ", ".join(
+            volume_hosts)
         self.assertCountEqual(volume_hosts, set(volume_hosts), msg)
diff --git a/tempest/api/volume/admin/test_volume_retype.py b/tempest/api/volume/admin/test_volume_retype.py
index 4cb2262..586111c 100644
--- a/tempest/api/volume/admin/test_volume_retype.py
+++ b/tempest/api/volume/admin/test_volume_retype.py
@@ -63,9 +63,10 @@
         src_vol = self.create_volume(volume_type=self.src_vol_type['name'],
                                      snapshot_id=snapshot['id'])
 
-        # Delete the snapshot
-        self.snapshots_client.delete_snapshot(snapshot['id'])
-        self.snapshots_client.wait_for_resource_deletion(snapshot['id'])
+        if not CONF.volume_feature_enabled.snapshot_locked_by_volume:
+            # Delete the snapshot
+            self.snapshots_client.delete_snapshot(snapshot['id'])
+            self.snapshots_client.wait_for_resource_deletion(snapshot['id'])
 
         return src_vol
 
diff --git a/tempest/api/volume/base.py b/tempest/api/volume/base.py
index 7a08545..3d82c8f 100644
--- a/tempest/api/volume/base.py
+++ b/tempest/api/volume/base.py
@@ -267,6 +267,15 @@
             kwargs['validatable'] = True
 
         tenant_network = self.get_tenant_network()
+
+        # Since microversion v2.37 'networks' field is required
+        if (
+            self.compute_request_microversion >= '2.37' and
+            'networks' not in kwargs and
+            not kwargs.get("validatable", False)
+        ):
+            kwargs['networks'] = 'none'
+
         body, _ = compute.create_test_server(
             self.os_primary,
             tenant_network=tenant_network,
diff --git a/tempest/api/volume/test_volume_transfers.py b/tempest/api/volume/test_volume_transfers.py
index 62cb203..d4634e6 100644
--- a/tempest/api/volume/test_volume_transfers.py
+++ b/tempest/api/volume/test_volume_transfers.py
@@ -32,10 +32,23 @@
         cls.alt_client = cls.os_alt.volume_transfers_client_latest
         cls.alt_volumes_client = cls.os_alt.volumes_client_latest
         cls.adm_volumes_client = cls.os_admin.volumes_client_latest
+        cls.volume_type_client = cls.os_admin.volume_types_client_latest
+        cls.encryption_client = cls.os_admin.encryption_types_client_latest
+
+    def _check_default_volume_type(self):
+        default_volume_type = self.volume_type_client.\
+            show_default_volume_type()["volume_type"]["id"]
+        volume_encryption = self.encryption_client.show_encryption_type(
+            default_volume_type)
+        if volume_encryption and volume_encryption.get("provider"):
+            raise self.skipException("Not allowed to run this test with "
+                                     "encrypted volume")
 
     @decorators.idempotent_id('4d75b645-a478-48b1-97c8-503f64242f1a')
     def test_create_get_list_accept_volume_transfer(self):
         """Test creating, getting, listing and accepting of volume transfer"""
+        self._check_default_volume_type()
+
         # Create a volume first
         volume = self.create_volume()
         self.addCleanup(self.delete_volume,
@@ -77,6 +90,8 @@
     @decorators.idempotent_id('ab526943-b725-4c07-b875-8e8ef87a2c30')
     def test_create_list_delete_volume_transfer(self):
         """Test creating, listing and deleting volume transfer"""
+        self._check_default_volume_type()
+
         # Create a volume first
         volume = self.create_volume()
         self.addCleanup(self.delete_volume,
diff --git a/tempest/api/volume/test_volumes_backup.py b/tempest/api/volume/test_volumes_backup.py
index a3ba974..eb581ee 100644
--- a/tempest/api/volume/test_volumes_backup.py
+++ b/tempest/api/volume/test_volumes_backup.py
@@ -29,6 +29,8 @@
     """Test volumes backup"""
 
     create_default_network = True
+    if CONF.compute_feature_enabled.volume_multiattach:
+        min_microversion = '2.60'
 
     @classmethod
     def skip_checks(cls):
diff --git a/tempest/api/volume/test_volumes_extend.py b/tempest/api/volume/test_volumes_extend.py
index c766db8..02b8745 100644
--- a/tempest/api/volume/test_volumes_extend.py
+++ b/tempest/api/volume/test_volumes_extend.py
@@ -180,6 +180,9 @@
 
 class VolumesExtendAttachedTest(BaseVolumesExtendAttachedTest):
 
+    if CONF.compute_feature_enabled.volume_multiattach:
+        min_microversion = '2.60'
+
     @classmethod
     def skip_checks(cls):
         super(VolumesExtendAttachedTest, cls).skip_checks()
diff --git a/tempest/api/volume/test_volumes_negative.py b/tempest/api/volume/test_volumes_negative.py
index c405a50..bc29246 100644
--- a/tempest/api/volume/test_volumes_negative.py
+++ b/tempest/api/volume/test_volumes_negative.py
@@ -268,8 +268,7 @@
         """Test reserving already reserved volume should fail"""
 
         # Skip test if the volume has "multiattach" property
-        volume = self.volumes_client.show_volume(self.volume['id'])
-        if volume['multiattach']:
+        if self.volume['multiattach']:
             raise self.skipException('Reserving multiattach volumes is not'
                                      ' supported.')
 
diff --git a/tempest/api/volume/test_volumes_snapshots.py b/tempest/api/volume/test_volumes_snapshots.py
index 35afffd..3e06304 100644
--- a/tempest/api/volume/test_volumes_snapshots.py
+++ b/tempest/api/volume/test_volumes_snapshots.py
@@ -29,6 +29,9 @@
 
     create_default_network = True
 
+    if CONF.compute_feature_enabled.volume_multiattach:
+        min_microversion = '2.60'
+
     @classmethod
     def skip_checks(cls):
         super(VolumesSnapshotTestJSON, cls).skip_checks()
diff --git a/tempest/common/credentials_factory.py b/tempest/common/credentials_factory.py
index 2d486a7..449bb90 100644
--- a/tempest/common/credentials_factory.py
+++ b/tempest/common/credentials_factory.py
@@ -87,7 +87,8 @@
         ('create_networks', (CONF.auth.create_isolated_networks and not
                              CONF.network.shared_physical_network)),
         ('resource_prefix', 'tempest'),
-        ('identity_admin_endpoint_type', endpoint_type)
+        ('identity_admin_endpoint_type', endpoint_type),
+        ('networking_timeout_409', CONF.network.timeout_409)
     ]))
 
 
diff --git a/tempest/common/waiters.py b/tempest/common/waiters.py
index 6801f97..a13aff4 100644
--- a/tempest/common/waiters.py
+++ b/tempest/common/waiters.py
@@ -713,6 +713,19 @@
     raise lib_exc.TimeoutException()
 
 
+def wait_for_cloudinit(ssh_client, timeout=60):
+    """Waits for cloud-init completed"""
+    start_time = int(time.time())
+    while int(time.time()) - start_time < timeout:
+        try:
+            ssh_client.check_cloudinit()
+            return
+        except Exception:
+            pass
+        time.sleep(5)
+    raise lib_exc.TimeoutException()
+
+
 def wait_for_caching(client, cache_client, image_id):
     """Waits until image is cached"""
     start = int(time.time())
diff --git a/tempest/config.py b/tempest/config.py
index 2e51eb3..3ccce49 100644
--- a/tempest/config.py
+++ b/tempest/config.py
@@ -323,12 +323,24 @@
                 help="A list of trusted certificates to be used when the "
                      "image certificate validation compute feature is "
                      "enabled."),
+    cfg.StrOpt('windows10_image_ref',
+               default=None,
+               help="Valid image reference to be used in Windows 10 tests."),
+    cfg.StrOpt('windows11_image_ref',
+               default=None,
+               help="Valid image reference to be used in Windows 11 tests."),
     cfg.StrOpt('flavor_ref',
                default="1",
                help="Valid primary flavor to use in tests."),
     cfg.StrOpt('flavor_ref_alt',
                default="2",
                help='Valid secondary flavor to be used in tests.'),
+    cfg.StrOpt('windows10_flavor_ref',
+               default=None,
+               help="Valid flavor to be used for Windows 10 tests."),
+    cfg.StrOpt('windows11_flavor_ref',
+               default=None,
+               help="Valid flavor to be used for Windows 11 tests."),
     cfg.IntOpt('build_interval',
                default=1,
                help="Time in seconds between build status checks."),
@@ -845,6 +857,10 @@
     cfg.IntOpt('service_ports_number',
                default=0,
                help="Number of neutron service ports created per network"),
+    cfg.IntOpt('timeout_409',
+               default=120,
+               help="Total time in seconds to keep retrying a request that "
+                    "returns HTTP 409 (Conflict)."),
 ]
 
 network_feature_group = cfg.OptGroup(name='network-feature-enabled',
@@ -954,7 +970,7 @@
                default="root",
                help="User name used to authenticate to an instance."),
     cfg.StrOpt('image_alt_ssh_user',
-               default="cirros",
+               default="root",
                help="User name used to authenticate to an alt instance."),
     cfg.StrOpt('image_ssh_password',
                default="password",
@@ -991,7 +1007,7 @@
                choices=['ecdsa', 'rsa'],
                help='Type of key to use for ssh connections.'),
     cfg.FloatOpt('allowed_network_downtime',
-                 default=5.0,
+                 default=10.0,
                  help="Allowed VM network connection downtime during live "
                       "migration, in seconds. "
                       "When the measured downtime exceeds this value, an "
@@ -1163,6 +1179,10 @@
     cfg.ListOpt('supported_crypto_providers',
                 default=['luks'],
                 help='A list of enabled cryptoproviders for volumes'),
+    cfg.BoolOpt('snapshot_locked_by_volume',
+                default=False,
+                help='Whether snapshot can be deleted, i.e. there is no '
+                     'volume dependent on (created from) it'),
 ]
 
 
@@ -1714,6 +1734,17 @@
     return _parameters
 
 
+def is_tungstenfabric_backend_enabled():
+    """Return True if TungstenFabric is used as a backend."""
+    try:
+        sdn = getattr(CONF, 'sdn')
+        service_name = getattr(sdn, 'service_name')
+        if service_name == 'tungstenfabric':
+            return True
+    except cfg.NoSuchOptError:
+        return False
+
+
 def _register_tempest_service_clients():
     # Register tempest own service clients using the same mechanism used
     # for external plugins.
diff --git a/tempest/lib/api_schema/response/compute/v2_1/servers.py b/tempest/lib/api_schema/response/compute/v2_1/servers.py
index 14e2d3b..e066f7b 100644
--- a/tempest/lib/api_schema/response/compute/v2_1/servers.py
+++ b/tempest/lib/api_schema/response/compute/v2_1/servers.py
@@ -425,6 +425,8 @@
     }
 }
 
+get_spice_console = get_vnc_console
+
 get_console_output = {
     'status_code': [200],
     'response_body': {
diff --git a/tempest/lib/api_schema/response/volume/volume_types.py b/tempest/lib/api_schema/response/volume/volume_types.py
index 51b3a72..4d09bcd 100644
--- a/tempest/lib/api_schema/response/volume/volume_types.py
+++ b/tempest/lib/api_schema/response/volume/volume_types.py
@@ -31,8 +31,7 @@
         'qos_specs_id': {'type': ['string', 'null'], 'format': 'uuid'}
     },
     'additionalProperties': False,
-    'required': ['name', 'is_public', 'description', 'id',
-                 'os-volume-type-access:is_public']
+    'required': ['name', 'is_public', 'description', 'id']
 }
 
 show_volume_type = {
diff --git a/tempest/lib/common/cred_provider.py b/tempest/lib/common/cred_provider.py
index 93b9586..48fef94 100644
--- a/tempest/lib/common/cred_provider.py
+++ b/tempest/lib/common/cred_provider.py
@@ -13,6 +13,8 @@
 #    limitations under the License.
 
 import abc
+import time
+
 from oslo_log import log as logging
 
 from tempest.lib import auth
@@ -141,11 +143,24 @@
             name="default")
         secgroups_to_delete = resp_body['security_groups']
         for secgroup in secgroups_to_delete:
-            try:
-                security_group_client.delete_security_group(secgroup['id'])
-            except exceptions.NotFound:
-                LOG.warning('Security group %s, id %s not found for clean-up',
-                            secgroup['name'], secgroup['id'])
+            # Workaround for PRODX-4003
+            start_time = time.time()
+            while True:
+                try:
+                    security_group_client.delete_security_group(secgroup['id'])
+                    break
+                except exceptions.NotFound:
+                    LOG.warning('Security group %s, id %s not found for '
+                                'clean-up', secgroup['name'], secgroup['id'])
+                    break
+                except exceptions.Conflict:
+                    LOG.warning('Conflict with state of security group %s, '
+                                'id %s.', secgroup['name'], secgroup['id'])
+                    if (time.time() - self.networking_timeout_409) > \
+                            start_time:
+                        raise
+                    else:
+                        time.sleep(5)
 
 
 class TestResources(object):
diff --git a/tempest/lib/common/dynamic_creds.py b/tempest/lib/common/dynamic_creds.py
index d4bd302..4e87f70 100644
--- a/tempest/lib/common/dynamic_creds.py
+++ b/tempest/lib/common/dynamic_creds.py
@@ -76,7 +76,8 @@
                  neutron_available=False, create_networks=True,
                  project_network_cidr=None, project_network_mask_bits=None,
                  public_network_id=None, resource_prefix=None,
-                 identity_admin_endpoint_type='public', identity_uri=None):
+                 identity_admin_endpoint_type='public', identity_uri=None,
+                 networking_timeout_409=120):
         super(DynamicCredentialProvider, self).__init__(
             identity_version=identity_version, identity_uri=identity_uri,
             admin_role=admin_role, name=name,
@@ -121,6 +122,7 @@
             self.roles_admin_client,
             self.domains_admin_client,
             self.creds_domain_name)
+        self.networking_timeout_409 = networking_timeout_409
 
     def _get_admin_clients(self, endpoint_type):
         """Returns a tuple with instances of the following admin clients
diff --git a/tempest/lib/common/preprov_creds.py b/tempest/lib/common/preprov_creds.py
index 9d1078c..1583d73 100644
--- a/tempest/lib/common/preprov_creds.py
+++ b/tempest/lib/common/preprov_creds.py
@@ -114,7 +114,7 @@
                       object_storage_operator_role=None,
                       object_storage_reseller_admin_role=None):
         hash_dict = {'roles': {}, 'creds': {}, 'networks': {},
-                     'scoped_roles': {}}
+                     'scoped_roles': {}, 'projects': {}}
 
         # Loop over the accounts read from the yaml file
         for account in accounts:
@@ -180,6 +180,7 @@
                         'Unknown resource type %s, ignoring this field',
                         resource
                     )
+            hash_dict = cls._append_project(account, temp_hash_key, hash_dict)
         return hash_dict
 
     def is_multi_user(self):
@@ -246,6 +247,7 @@
             hashes = temp_list
         else:
             hashes = self.hash_dict['creds'].keys()
+        hashes = self._exclude_used_projects(hashes)
         # NOTE(mtreinish): admin is a special case because of the increased
         # privilege set which could potentially cause issues on tests where
         # that is not expected. So unless the admin role isn't specified do
@@ -321,7 +323,8 @@
     def get_alt_creds(self):
         if self._creds.get('alt'):
             return self._creds.get('alt')
-        net_creds = self._get_creds()
+        # NOTE(pas-ha) use the same call as get_project_member_creds
+        net_creds = self._get_creds(['member'], scope='project')
         self._creds['alt'] = net_creds
         return net_creds
 
@@ -491,3 +494,16 @@
             for attr in domain_fields.intersection(set(creds_dict.keys())):
                 creds_dict.pop(attr)
         return creds_dict
+
+    @classmethod
+    def _append_project(cls, account, account_hash, hash_dict):
+        key_to_add = account.get('project_name') or account.get('tenant_name')
+        hash_dict['projects'].setdefault(key_to_add, [])
+        hash_dict['projects'][key_to_add].append(account_hash)
+        return hash_dict
+
+    def _exclude_used_projects(self, hashes):
+        excluded_accounts = []
+        for project in [cred.tenant_name for cred in self._creds.values()]:
+            excluded_accounts.extend(self.hash_dict['projects'][project])
+        return hashes - set(excluded_accounts)
diff --git a/tempest/lib/common/utils/linux/remote_client.py b/tempest/lib/common/utils/linux/remote_client.py
index bdf35e7..2f019ce 100644
--- a/tempest/lib/common/utils/linux/remote_client.py
+++ b/tempest/lib/common/utils/linux/remote_client.py
@@ -120,6 +120,17 @@
         """
         self.ssh_client.test_connection_auth()
 
+    @debug_ssh
+    def check_cloudinit(self):
+        """Check cloud-init is completed
+
+           This method raises an Exception when the status is not 'done'.
+        """
+        out = self.ssh_client.exec_command("cloud-init status")
+        res = [s.strip() for s in out.split(' ')]
+        if res[1] != "done":
+            raise ValueError("Cloud init is not done, {res}".format(res=res))
+
     def ping_host(self, host, count=None, size=None, nic=None):
         if count is None:
             count = self.ping_count
diff --git a/tempest/lib/services/compute/servers_client.py b/tempest/lib/services/compute/servers_client.py
index 4c91599..e9a70fd 100644
--- a/tempest/lib/services/compute/servers_client.py
+++ b/tempest/lib/services/compute/servers_client.py
@@ -812,6 +812,16 @@
         return self.action(server_id, "os-getVNCConsole",
                            schema.get_vnc_console, **kwargs)
 
+    def get_spice_console(self, server_id, **kwargs):
+        """Get URL of SPICE console.
+
+        For a full list of available parameters, please refer to the official
+        API reference:
+        https://docs.openstack.org/api-ref/compute/#get-spice-console-os-getspiceconsole-action-deprecated
+        """
+        return self.action(server_id, "os-getSPICEConsole",
+                           schema.get_spice_console, **kwargs)
+
     def add_fixed_ip(self, server_id, **kwargs):
         """Add a fixed IP to server instance.
 
diff --git a/tempest/scenario/manager.py b/tempest/scenario/manager.py
index feb2cf1..20abbb7 100644
--- a/tempest/scenario/manager.py
+++ b/tempest/scenario/manager.py
@@ -52,7 +52,10 @@
 
     credentials = ['primary', 'admin']
 
-    compute_min_microversion = None
+    if CONF.compute_feature_enabled.volume_multiattach:
+        compute_min_microversion = '2.60'
+    else:
+        compute_min_microversion = None
     compute_max_microversion = LATEST_MICROVERSION
     volume_min_microversion = None
     volume_max_microversion = LATEST_MICROVERSION
@@ -1400,6 +1403,14 @@
 
         return self.create_server(**create_kwargs)
 
+    def wait_for_cloud_init(
+            self, ip_address, server, private_key, username, timeout=60):
+        ssh_client = self.get_remote_client(ip_address,
+                                            private_key=private_key,
+                                            server=server,
+                                            username=username)
+        waiters.wait_for_cloudinit(ssh_client, timeout)
+
     def create_volume_from_image(self, **kwargs):
         """Create volume from image.
 
@@ -1417,6 +1428,26 @@
                 prefix=CONF.resource_name_prefix, name=namestart)
         return self.create_volume(name=name, imageRef=image_id, **kwargs)
 
+    def run_sync(self, ip_address, private_key=None, server=None,
+                 username=None):
+        """Syncs server filesystem cached writes
+
+        This wrapper utility does ssh and syncs server's filesystem caches
+        to persistent storage.
+
+        :param ip_address: The floating IP or fixed IP of the remote server
+        :param private_key: The SSH private key to use for authentication
+        :param server: Server dict, used for debugging purposes
+        :param username: Name of the Linux account on the remote server
+        """
+
+        ssh_client = self.get_remote_client(ip_address,
+                                            private_key=private_key,
+                                            server=server,
+                                            username=username)
+
+        ssh_client.exec_command('sudo sh -c "sync"')
+
 
 class ScenarioTestWithNetwork(ScenarioTest):
     """Base class for tests with default network"""
diff --git a/tempest/scenario/test_encrypted_cinder_volumes.py b/tempest/scenario/test_encrypted_cinder_volumes.py
index 4cc9e9c..cb5e673 100644
--- a/tempest/scenario/test_encrypted_cinder_volumes.py
+++ b/tempest/scenario/test_encrypted_cinder_volumes.py
@@ -92,6 +92,8 @@
         self.attach_detach_volume(server, volume)
 
     @decorators.idempotent_id('cbc752ed-b716-4717-910f-956cce965722')
+    @testtools.skipIf(CONF.volume.storage_protocol == 'ceph',
+                      'Skip because ceph does not support Provider plain.')
     @decorators.attr(type='slow')
     @testtools.skipUnless(
         'plain' in CONF.volume_feature_enabled.supported_crypto_providers,
diff --git a/tempest/scenario/test_network_basic_ops.py b/tempest/scenario/test_network_basic_ops.py
index 23e5ae6..c7a5d81 100644
--- a/tempest/scenario/test_network_basic_ops.py
+++ b/tempest/scenario/test_network_basic_ops.py
@@ -156,7 +156,7 @@
             self.assertIn(self.router['id'],
                           seen_router_ids)
 
-    def _create_server(self, network, port_id=None):
+    def _create_server(self, network, port_id=None, **kwargs):
         keypair = self.create_keypair()
         self.keypairs[keypair['name']] = keypair
         security_groups = [
@@ -169,7 +169,8 @@
         server = self.create_server(
             networks=[network],
             key_name=keypair['name'],
-            security_groups=security_groups)
+            security_groups=security_groups,
+            **kwargs)
         self.servers.append(server)
         return server
 
@@ -396,6 +397,21 @@
             router['id'], **kwargs)['router']
         self.assertEqual(admin_state_up, router['admin_state_up'])
 
+    def _live_migrate_server(self, server, host_id=None):
+        src_host = self.get_host_for_server(server['id'])
+
+        self.os_adm.servers_client.live_migrate_server(
+            server_id=server['id'],
+            block_migration=False,
+            host=host_id)
+
+        waiters.wait_for_server_status(
+            self.servers_client, server['id'], 'ACTIVE')
+
+        dst_host = self.get_host_for_server(server['id'])
+        self.assertNotEqual(src_host, dst_host,
+                            msg="Live migration failed, servers are equal")
+
     @decorators.attr(type='smoke')
     @decorators.idempotent_id('f323b3ba-82f8-4db7-8ea6-6a895869ec49')
     @utils.services('compute', 'network')
@@ -925,3 +941,56 @@
                                       security_groups=[])
         self.check_remote_connectivity(ssh_client, dest=peer_address,
                                        nic=spoof_nic, should_succeed=True)
+
+    @decorators.idempotent_id('463caa51-0967-4d6d-8ee9-11db1557c710')
+    @decorators.attr(type='slow')
+    @utils.services('compute', 'network')
+    def test_connectivity_between_vms_after_live_migration(self):
+        """Test the live-migration of the instances and ping
+
+        1. Create server 1 and 2 on the same host
+        2. Ping server 1 from server 2
+        3. Live migrate server 1 to other host
+        4. Ping server 1 from server 2
+        5. Migrate back server 1 to the first host
+        6. Ping server 1 from server 2
+        """
+
+        # Create server 1 with network, subnetwork, router, host
+        # and ping server 1
+        self._setup_network_and_servers()
+
+        server01 = self.servers[0]
+        hints = {'same_host': server01['id']}
+
+        # Create server 2 with network on the same host
+        self._create_new_network(create_gateway=True)
+        server02 = self._create_server(self.network,
+                                       scheduler_hints=hints)
+        server02_ip = [addr['addr'] for addr in
+                       server02['addresses'][self.network['name']]]
+
+        # Check if both instances are on the same host
+        host01_id = self.get_host_for_server(server01['id'])
+        host02_id = self.get_host_for_server(server02['id'])
+
+        self.assertEqual(host01_id, host02_id,
+                         message="Created servers have different hosts")
+
+        # Check ping between servers before live migration
+        self._check_server_connectivity(self.floating_ip_tuple.floating_ip,
+                                        server02_ip, should_connect=True)
+
+        # Live migrate server 1 to the new host
+        self._live_migrate_server(server=server01)
+
+        # Check ping between servers after live migration
+        self._check_server_connectivity(self.floating_ip_tuple.floating_ip,
+                                        server02_ip, should_connect=True)
+
+        # Migrate back server 1 to the first host, wait for status Active
+        self._live_migrate_server(server=server01, host_id=host01_id)
+
+        # Check ping between servers after live migration
+        self._check_server_connectivity(self.floating_ip_tuple.floating_ip,
+                                        server02_ip, should_connect=True)
diff --git a/tempest/scenario/test_volume_boot_pattern.py b/tempest/scenario/test_volume_boot_pattern.py
index 8a2641f..bab2582 100644
--- a/tempest/scenario/test_volume_boot_pattern.py
+++ b/tempest/scenario/test_volume_boot_pattern.py
@@ -58,6 +58,11 @@
                           'The public_network_id option must be specified.')
     @testtools.skipUnless(CONF.volume_feature_enabled.snapshot,
                           'Cinder volume snapshots are disabled')
+    @testtools.skipUnless(all([
+        CONF.compute.image_full_ref,
+        CONF.compute.image_full_username,
+        CONF.compute.image_full_flavor_ref]),
+        'Test requires image_full_* options to be set.')
     @utils.services('compute', 'volume', 'image')
     def test_volume_boot_pattern(self):
         """This test case attempts to reproduce the following steps:
@@ -71,27 +76,35 @@
         * Boot an additional instance from the new snapshot based volume
         * Check written content in the instance booted from snapshot
         """
-
         LOG.info("Creating keypair and security group")
         keypair = self.create_keypair()
         security_group = self.create_security_group()
+        username = CONF.compute.image_full_username
 
         # create an instance from volume
         LOG.info("Booting instance 1 from volume")
-        volume_origin = self.create_volume_from_image()
+        volume_origin = self.create_volume_from_image(
+            image_id=CONF.compute.image_full_ref)
         instance_1st = self.boot_instance_from_resource(
             source_id=volume_origin['id'],
             source_type='volume',
             keypair=keypair,
-            security_group=security_group)
+            security_group=security_group,
+            flavor=CONF.compute.image_full_flavor_ref)
         LOG.info("Booted first instance: %s", instance_1st)
 
         # write content to volume on instance
         LOG.info("Setting timestamp in instance %s", instance_1st)
         ip_instance_1st = self.get_server_ip(instance_1st)
+        self.wait_for_cloud_init(
+            ip_instance_1st,
+            private_key=keypair['private_key'],
+            server=instance_1st,
+            username=username)
         timestamp = self.create_timestamp(ip_instance_1st,
                                           private_key=keypair['private_key'],
-                                          server=instance_1st)
+                                          server=instance_1st,
+                                          username=username)
 
         # delete instance
         LOG.info("Deleting first instance: %s", instance_1st)
@@ -102,17 +115,29 @@
             source_id=volume_origin['id'],
             source_type='volume',
             keypair=keypair,
-            security_group=security_group)
+            security_group=security_group,
+            flavor=CONF.compute.image_full_flavor_ref)
         LOG.info("Booted second instance %s", instance_2nd)
 
         # check the content of written file
         LOG.info("Getting timestamp in instance %s", instance_2nd)
         ip_instance_2nd = self.get_server_ip(instance_2nd)
+        self.wait_for_cloud_init(
+            ip_instance_2nd,
+            private_key=keypair['private_key'],
+            server=instance_2nd,
+            username=username)
         timestamp2 = self.get_timestamp(ip_instance_2nd,
                                         private_key=keypair['private_key'],
-                                        server=instance_2nd)
+                                        server=instance_2nd,
+                                        username=username)
         self.assertEqual(timestamp, timestamp2)
 
+        # Sync filesystem caches to persistent storage before doing snapshot
+        self.run_sync(ip_instance_2nd,
+                      private_key=keypair['private_key'],
+                      server=instance_2nd,
+                      username=username)
         # snapshot a volume
         LOG.info("Creating snapshot from volume: %s", volume_origin['id'])
         snapshot = self.create_volume_snapshot(volume_origin['id'], force=True)
@@ -123,19 +148,26 @@
                                     size=snapshot['size'])
         LOG.info("Booting third instance from snapshot")
         server_from_snapshot = (
-            self.boot_instance_from_resource(source_id=volume['id'],
-                                             source_type='volume',
-                                             keypair=keypair,
-                                             security_group=security_group))
+            self.boot_instance_from_resource(
+                source_id=volume['id'],
+                source_type='volume', keypair=keypair,
+                security_group=security_group,
+                flavor=CONF.compute.image_full_flavor_ref))
         LOG.info("Booted third instance %s", server_from_snapshot)
 
         # check the content of written file
         LOG.info("Logging into third instance to get timestamp: %s",
                  server_from_snapshot)
         server_from_snapshot_ip = self.get_server_ip(server_from_snapshot)
+        self.wait_for_cloud_init(
+            server_from_snapshot_ip,
+            private_key=keypair['private_key'],
+            server=server_from_snapshot,
+            username=username)
         timestamp3 = self.get_timestamp(server_from_snapshot_ip,
                                         private_key=keypair['private_key'],
-                                        server=server_from_snapshot)
+                                        server=server_from_snapshot,
+                                        username=username)
         self.assertEqual(timestamp, timestamp3)
 
     @decorators.idempotent_id('e3f4f2fc-5c6a-4be6-9c54-aedfc0954da7')
diff --git a/tempest/test_discover/test_discover.py b/tempest/test_discover/test_discover.py
index 679d58b..20896bb 100644
--- a/tempest/test_discover/test_discover.py
+++ b/tempest/test_discover/test_discover.py
@@ -24,8 +24,8 @@
     suite = unittest.TestSuite()
     base_path = os.path.split(os.path.dirname(os.path.abspath(__file__)))[0]
     base_path = os.path.split(base_path)[0]
-    # Load local tempest tests
-    for test_dir in ['api', 'scenario', 'serial_tests']:
+    # Load local parallel tempest tests
+    for test_dir in ['api', 'scenario']:
         full_test_dir = os.path.join(base_path, 'tempest', test_dir)
         if not pattern:
             suite.addTests(loader.discover(full_test_dir,
@@ -33,17 +33,25 @@
         else:
             suite.addTests(loader.discover(full_test_dir, pattern=pattern,
                                            top_level_dir=base_path))
-
-    plugin_load_tests = ext_plugins.get_plugin_load_tests_tuple()
-    if not plugin_load_tests:
-        return suite
-
     # Load any installed plugin tests
-    for plugin in plugin_load_tests:
-        test_dir, top_path = plugin_load_tests[plugin]
-        if not pattern:
-            suite.addTests(loader.discover(test_dir, top_level_dir=top_path))
-        else:
-            suite.addTests(loader.discover(test_dir, pattern=pattern,
-                                           top_level_dir=top_path))
+    plugin_load_tests = ext_plugins.get_plugin_load_tests_tuple()
+    if plugin_load_tests:
+        for plugin in plugin_load_tests:
+            test_dir, top_path = plugin_load_tests[plugin]
+            if not pattern:
+                suite.addTests(loader.discover(
+                    test_dir, top_level_dir=top_path))
+            else:
+                suite.addTests(loader.discover(test_dir, pattern=pattern,
+                                               top_level_dir=top_path))
+    # Serial tests can block execution of tests which are loaded after,
+    # so loading them always in the end
+    serial_test_dir = os.path.join(base_path, 'tempest', 'serial_tests')
+    if not pattern:
+        suite.addTests(loader.discover(serial_test_dir,
+                                       top_level_dir=base_path))
+    else:
+        suite.addTests(loader.discover(serial_test_dir, pattern=pattern,
+                                       top_level_dir=base_path))
+
     return suite