Cummulative commit of patches from mcp/rocky branch

includes next patch:
https://gerrit.mcp.mirantis.com/c/packaging/sources/tempest/+/65661
https://gerrit.mcp.mirantis.com/c/packaging/sources/tempest/+/68408
https://gerrit.mcp.mirantis.com/c/packaging/sources/tempest/+/73158

Related-Prod: PRODX-22696
Change-Id: I3b869324580bf44f5c1fc1cbf959e736ccf9d549
diff --git a/releasenotes/notes/add-boot-from-volume-option-312d02c0c84f2092.yaml b/releasenotes/notes/add-boot-from-volume-option-312d02c0c84f2092.yaml
new file mode 100644
index 0000000..0a0b78e
--- /dev/null
+++ b/releasenotes/notes/add-boot-from-volume-option-312d02c0c84f2092.yaml
@@ -0,0 +1,7 @@
+---
+features:
+  - A new config option in group compute-feature-enabled
+    boot_from_volume which specifies if nova allow to boot
+    instances from volume. This functionality is not available
+    on some hypervisors and cinder backends like ironic and
+    ceph.
diff --git a/tempest/api/compute/servers/test_create_server.py b/tempest/api/compute/servers/test_create_server.py
index 672cebf..53088e3 100644
--- a/tempest/api/compute/servers/test_create_server.py
+++ b/tempest/api/compute/servers/test_create_server.py
@@ -170,3 +170,5 @@
             msg = "Volume tests are not available for Ironic with Ceph."
         if msg is not None:
             raise cls.skipException(msg)
+        if not CONF.compute_feature_enabled.boot_from_volume:
+            raise cls.skipException("Booting from volume is not enabled.")
diff --git a/tempest/api/compute/servers/test_server_actions.py b/tempest/api/compute/servers/test_server_actions.py
index ca09ac6..dcbea55 100644
--- a/tempest/api/compute/servers/test_server_actions.py
+++ b/tempest/api/compute/servers/test_server_actions.py
@@ -35,35 +35,17 @@
 
 class ServerActionsTestJSON(base.BaseV2ComputeTest):
     def setUp(self):
-        # NOTE(afazekas): Normally we use the same server with all test cases,
-        # but if it has an issue, we build a new one
         super(ServerActionsTestJSON, self).setUp()
-        # Check if the server is in a clean state after test
-        try:
-            waiters.wait_for_server_status(self.client,
-                                           self.server_id, 'ACTIVE')
-        except lib_exc.NotFound:
-            # The server was deleted by previous test, create a new one
-            # Use class level validation resources to avoid them being
-            # deleted once a test is over
-            validation_resources = self.get_class_validation_resources(
-                self.os_primary)
-            server = self.create_test_server(
-                validatable=True,
-                validation_resources=validation_resources,
-                wait_until='ACTIVE')
-            self.__class__.server_id = server['id']
-        except Exception:
-            # Rebuild server if something happened to it during a test
-            self.__class__.server_id = self.recreate_server(
-                self.server_id, validatable=True)
-
-    def tearDown(self):
-        super(ServerActionsTestJSON, self).tearDown()
-        # NOTE(zhufl): Because server_check_teardown will raise Exception
-        # which will prevent other cleanup steps from being executed, so
-        # server_check_teardown should be called after super's tearDown.
-        self.server_check_teardown()
+        # Instead of reusing an instance which had been created before a new
+        # instance is created in favour of increasing stability of tests.
+        self.validation_resources = self.get_test_validation_resources(
+            self.os_primary)
+        server = self.create_test_server(
+            validatable=True,
+            validation_resources=self.validation_resources,
+            wait_until='ACTIVE')
+        self.server_id = server['id']
+        self.addCleanup(self.delete_server, self.server_id)
 
     @classmethod
     def setup_credentials(cls):
@@ -75,24 +57,11 @@
         super(ServerActionsTestJSON, cls).setup_clients()
         cls.client = cls.servers_client
 
-    @classmethod
-    def resource_setup(cls):
-        super(ServerActionsTestJSON, cls).resource_setup()
-        cls.server_id = cls.recreate_server(None, validatable=True)
-
     @decorators.idempotent_id('6158df09-4b82-4ab3-af6d-29cf36af858d')
     @testtools.skipUnless(CONF.compute_feature_enabled.change_password,
                           'Change password not available.')
     def test_change_server_password(self):
-        # Since this test messes with the password and makes the
-        # server unreachable, it should create its own server
-        validation_resources = self.get_test_validation_resources(
-            self.os_primary)
-        newserver = self.create_test_server(
-            validatable=True,
-            validation_resources=validation_resources,
-            wait_until='ACTIVE')
-        # The server's password should be set to the provided password
+        newserver = self.client.show_server(self.server_id)['server']
         new_password = 'Newpass1234'
         self.client.change_password(newserver['id'], adminPass=new_password)
         waiters.wait_for_server_status(self.client, newserver['id'], 'ACTIVE')
@@ -101,7 +70,7 @@
             # Verify that the user can authenticate with the new password
             server = self.client.show_server(newserver['id'])['server']
             linux_client = remote_client.RemoteClient(
-                self.get_server_ip(server, validation_resources),
+                self.get_server_ip(server, self.validation_resources),
                 self.ssh_user,
                 new_password,
                 server=server,
@@ -110,15 +79,13 @@
 
     def _test_reboot_server(self, reboot_type):
         if CONF.validation.run_validation:
-            validation_resources = self.get_class_validation_resources(
-                self.os_primary)
             # Get the time the server was last rebooted,
             server = self.client.show_server(self.server_id)['server']
             linux_client = remote_client.RemoteClient(
-                self.get_server_ip(server, validation_resources),
+                self.get_server_ip(server, self.validation_resources),
                 self.ssh_user,
-                self.password,
-                validation_resources['keypair']['private_key'],
+                getattr(self, "password", None),
+                self.validation_resources['keypair']['private_key'],
                 server=server,
                 servers_client=self.client)
             boot_time = linux_client.get_boot_time()
@@ -133,10 +100,10 @@
         if CONF.validation.run_validation:
             # Log in and verify the boot time has changed
             linux_client = remote_client.RemoteClient(
-                self.get_server_ip(server, validation_resources),
+                self.get_server_ip(server, self.validation_resources),
                 self.ssh_user,
-                self.password,
-                validation_resources['keypair']['private_key'],
+                getattr(self, "password", None),
+                self.validation_resources['keypair']['private_key'],
                 server=server,
                 servers_client=self.client)
             new_boot_time = linux_client.get_boot_time()
@@ -157,7 +124,7 @@
 
     @decorators.idempotent_id('1d1c9104-1b0a-11e7-a3d4-fa163e65f5ce')
     def test_remove_server_all_security_groups(self):
-        server = self.create_test_server(wait_until='ACTIVE')
+        server = self.client.show_server(self.server_id)['server']
 
         # Remove all Security group
         self.client.remove_security_group(
@@ -177,6 +144,22 @@
         self.assertEqual(image_ref, rebuilt_server['image']['id'], msg)
 
     def _test_rebuild_server(self):
+
+        def floating_ip_ready():
+            addresses = self.client.show_server(
+                self.server_id)['server']['addresses']
+            self.assertTrue(
+                self.validation_resources["floating_ip"]["ip"] in
+                [addr["addr"] for net in addresses.values() for addr in net]
+            )
+
+        # NOTE(pas-ha) we now always create 'validating' server
+        # create_server only waits for ACTIVE state, but floating ip
+        # may be assigned a bit later.
+        # as we compare network addresses before and after rebuild,
+        # need to ensure we remember the floating one too.
+        self.wait_for(floating_ip_ready)
+
         # Get the IPs the server has before rebuilding it
         original_addresses = (self.client.show_server(self.server_id)['server']
                               ['addresses'])
@@ -191,11 +174,6 @@
             metadata=meta,
             adminPass=password)['server']
 
-        # If the server was rebuilt on a different image, restore it to the
-        # original image once the test ends
-        if self.image_ref_alt != self.image_ref:
-            self.addCleanup(self._rebuild_server_and_check, self.image_ref)
-
         # Verify the properties in the initial response are correct
         self.assertEqual(self.server_id, rebuilt_server['id'])
         rebuilt_image_id = rebuilt_server['image']['id']
@@ -212,8 +190,6 @@
         self.assertEqual(original_addresses, server['addresses'])
 
         if CONF.validation.run_validation:
-            validation_resources = self.get_class_validation_resources(
-                self.os_primary)
             # Authentication is attempted in the following order of priority:
             # 1.The key passed in, if one was passed in.
             # 2.Any key we can find through an SSH agent (if allowed).
@@ -221,10 +197,10 @@
             #   ~/.ssh/ (if allowed).
             # 4.Plain username/password auth, if a password was given.
             linux_client = remote_client.RemoteClient(
-                self.get_server_ip(rebuilt_server, validation_resources),
+                self.get_server_ip(rebuilt_server, self.validation_resources),
                 self.ssh_user,
                 password,
-                validation_resources['keypair']['private_key'],
+                self.validation_resources['keypair']['private_key'],
                 server=rebuilt_server,
                 servers_client=self.client)
             linux_client.validate_authentication()
@@ -328,6 +304,10 @@
         # from setUp is not volume-backed.
         server = self.create_test_server(
             volume_backed=True, wait_until='ACTIVE')
+        self.addCleanup(self.delete_server, server['id'])
+        # NOTE(mgoddard): Get detailed server to ensure addresses are present
+        # in fixed IP case.
+        server = self.servers_client.show_server(server['id'])['server']
         self._test_resize_server_confirm(server['id'])
         if CONF.compute_feature_enabled.console_output:
             # Now do something interactive with the guest like get its console
@@ -335,13 +315,11 @@
             # just that it doesn't raise an error.
             self.client.get_console_output(server['id'])
         if CONF.validation.run_validation:
-            validation_resources = self.get_class_validation_resources(
-                self.os_primary)
             linux_client = remote_client.RemoteClient(
-                self.get_server_ip(server, validation_resources),
+                self.get_server_ip(server, self.validation_resources),
                 self.ssh_user,
                 password=None,
-                pkey=validation_resources['keypair']['private_key'],
+                pkey=self.validation_resources['keypair']['private_key'],
                 server=server,
                 servers_client=self.client)
             linux_client.validate_authentication()
@@ -555,7 +533,7 @@
     @testtools.skipUnless(CONF.compute_feature_enabled.console_output,
                           'Console output not supported.')
     def test_get_console_output_with_unlimited_size(self):
-        server = self.create_test_server(wait_until='ACTIVE')
+        server = self.client.show_server(self.server_id)['server']
 
         def _check_full_length_console_log():
             output = self.client.get_console_output(server['id'])['output']
@@ -576,14 +554,8 @@
         # Positive test:Should be able to GET the console output
         # for a given server_id in SHUTOFF status
 
-        # NOTE: SHUTOFF is irregular status. To avoid test instability,
-        #       one server is created only for this test without using
-        #       the server that was created in setUpClass.
-        server = self.create_test_server(wait_until='ACTIVE')
-        temp_server_id = server['id']
-
-        self.client.stop_server(temp_server_id)
-        waiters.wait_for_server_status(self.client, temp_server_id, 'SHUTOFF')
+        self.client.stop_server(self.server_id)
+        waiters.wait_for_server_status(self.client, self.server_id, 'SHUTOFF')
         self.wait_for(self._get_output)
 
     @decorators.idempotent_id('bd61a9fd-062f-4670-972b-2d6c3e3b9e73')
@@ -652,11 +624,10 @@
     @testtools.skipUnless(CONF.compute_feature_enabled.pause,
                           'Pause is not available.')
     def test_shelve_paused_server(self):
-        server = self.create_test_server(wait_until='ACTIVE')
-        self.client.pause_server(server['id'])
-        waiters.wait_for_server_status(self.client, server['id'], 'PAUSED')
+        self.client.pause_server(self.server_id)
+        waiters.wait_for_server_status(self.client, self.server_id, 'PAUSED')
         # Check if Shelve operation is successful on paused server.
-        compute.shelve_server(self.client, server['id'],
+        compute.shelve_server(self.client, self.server_id,
                               force_shelve_offload=True)
 
     @decorators.idempotent_id('af8eafd4-38a7-4a4b-bdbc-75145a580560')
diff --git a/tempest/api/volume/admin/test_volume_retype.py b/tempest/api/volume/admin/test_volume_retype.py
index 08943bc..1f1f678 100644
--- a/tempest/api/volume/admin/test_volume_retype.py
+++ b/tempest/api/volume/admin/test_volume_retype.py
@@ -160,10 +160,11 @@
         keys_with_change = ('volume_type',)
 
         # NOTE(vsaienko): with active-active cluster deployment volume
-        # services registered with different hostname.
-        if CONF.volume_feature_enabled.cluster_active_active:
-            keys_with_change += ('os-vol-host-attr:host',)
-        else:
+        # services registered with different hostname since we don't know
+        # which service process request host might or might not be changed.
+        # TODO(vsaienko): Revisit logic when is fixed
+        # https://bugs.launchpad.net/cinder/+bug/1874414
+        if not CONF.volume_feature_enabled.cluster_active_active:
             keys_with_no_change += ('os-vol-host-attr:host',)
 
         # Check the volume information after the retype
diff --git a/tempest/config.py b/tempest/config.py
index fc67ef7..d8f6e9a 100644
--- a/tempest/config.py
+++ b/tempest/config.py
@@ -497,6 +497,11 @@
     cfg.BoolOpt('barbican_integration_enabled',
                 default=False,
                 help='Does the test environment support Barbican integration'),
+    cfg.BoolOpt('boot_from_volume',
+                default=True,
+                help='Does the test environment support booting instances '
+                     'from volume. This depends on hypervisor and volume '
+                     'backend/type.'),
 ]
 
 
@@ -679,7 +684,7 @@
 
 ValidationGroup = [
     cfg.BoolOpt('run_validation',
-                default=False,
+                default=True,
                 help='Enable ssh on created servers and creation of additional'
                      ' validation resources to enable remote access'),
     cfg.BoolOpt('security_group',