Merge "Do no check os-vol-host-attr:host in retype when using active/active" into mcp/caracal
diff --git a/releasenotes/notes/add-boot-from-volume-option-312d02c0c84f2092.yaml b/releasenotes/notes/add-boot-from-volume-option-312d02c0c84f2092.yaml
new file mode 100644
index 0000000..0a0b78e
--- /dev/null
+++ b/releasenotes/notes/add-boot-from-volume-option-312d02c0c84f2092.yaml
@@ -0,0 +1,7 @@
+---
+features:
+  - A new config option in group compute-feature-enabled
+    boot_from_volume which specifies if nova allow to boot
+    instances from volume. This functionality is not available
+    on some hypervisors and cinder backends like ironic and
+    ceph.
diff --git a/tempest/api/compute/admin/test_create_server.py b/tempest/api/compute/admin/test_create_server.py
index 293e284..5bc9206 100644
--- a/tempest/api/compute/admin/test_create_server.py
+++ b/tempest/api/compute/admin/test_create_server.py
@@ -136,3 +136,36 @@
             servers_client=self.client)
         disks_num_eph = len(linux_client.get_disks().split('\n'))
         self.assertEqual(disks_num + 1, disks_num_eph)
+
+
+class ServersTestUEFI(base.BaseV2ComputeAdminTest):
+    """Test creating server with UEFI firmware type"""
+
+    @classmethod
+    def setup_credentials(cls):
+        cls.prepare_instance_network()
+        super(ServersTestUEFI, cls).setup_credentials()
+
+    @classmethod
+    def setup_clients(cls):
+        super(ServersTestUEFI, cls).setup_clients()
+        cls.client = cls.servers_client
+
+    @decorators.idempotent_id('94feb6c3-d07e-b3b9-def8-64fd082d9b21')
+    def test_created_server_uefi(self):
+        # create custom image with uefi type
+        custom_img = self.create_image_with_custom_property(
+            hw_machine_type='q35',
+            hw_firmware_type='uefi',
+            )
+        # create the server and wait for it to become ready
+        validation_resources = self.get_class_validation_resources(
+            self.os_primary)
+        server = self.create_test_server(
+            image_id=custom_img, validatable=True,
+            validation_resources=validation_resources, wait_until='SSHABLE')
+        # check UEFI boot loader in console log server
+        uefi_boot_loader = "UEFI Misc Device"
+        console_log = self.client.get_console_output(server['id'])['output']
+        self.assertTrue(console_log, "Console output was empty.")
+        self.assertIn(uefi_boot_loader, console_log)
diff --git a/tempest/api/compute/admin/test_volume.py b/tempest/api/compute/admin/test_volume.py
index 2813d7a..dc631e5 100644
--- a/tempest/api/compute/admin/test_volume.py
+++ b/tempest/api/compute/admin/test_volume.py
@@ -37,46 +37,6 @@
         cls.prepare_instance_network()
         super(BaseAttachSCSIVolumeTest, cls).setup_credentials()
 
-    def _create_image_with_custom_property(self, **kwargs):
-        """Wrapper utility that returns the custom image.
-
-        Creates a new image by downloading the default image's bits and
-        uploading them to a new image. Any kwargs are set as image properties
-        on the new image.
-
-        :param return image_id: The UUID of the newly created image.
-        """
-        image = self.admin_image_client.show_image(CONF.compute.image_ref)
-        # NOTE(danms): We need to stream this, so chunked=True means we get
-        # back a urllib3.HTTPResponse and have to carefully pass it to
-        # store_image_file() to upload it in pieces.
-        image_data_resp = self.admin_image_client.show_image_file(
-            CONF.compute.image_ref, chunked=True)
-        create_dict = {
-            'container_format': image['container_format'],
-            'disk_format': image['disk_format'],
-            'min_disk': image['min_disk'],
-            'min_ram': image['min_ram'],
-            'visibility': 'public',
-        }
-        if 'kernel_id' in image:
-            create_dict['kernel_id'] = image['kernel_id']
-        if 'ramdisk_id' in image:
-            create_dict['ramdisk_id'] = image['ramdisk_id']
-
-        create_dict.update(kwargs)
-        try:
-            new_image = self.admin_image_client.create_image(**create_dict)
-            self.addCleanup(self.admin_image_client.wait_for_resource_deletion,
-                            new_image['id'])
-            self.addCleanup(
-                self.admin_image_client.delete_image, new_image['id'])
-            self.admin_image_client.store_image_file(new_image['id'],
-                                                     image_data_resp)
-        finally:
-            image_data_resp.release_conn()
-        return new_image['id']
-
 
 class AttachSCSIVolumeTestJSON(BaseAttachSCSIVolumeTest):
     """Test attaching scsi volume to server"""
@@ -90,7 +50,7 @@
         virtio-scsi mode with further asserting list volume attachments
         in instance after attach and detach of the volume.
         """
-        custom_img = self._create_image_with_custom_property(
+        custom_img = self.create_image_with_custom_property(
             hw_scsi_model='virtio-scsi',
             hw_disk_bus='scsi',
             hw_cdrom_bus='scsi')
diff --git a/tempest/api/compute/base.py b/tempest/api/compute/base.py
index 2557e47..313f73d 100644
--- a/tempest/api/compute/base.py
+++ b/tempest/api/compute/base.py
@@ -701,3 +701,37 @@
         for target_host in hosts:
             if source_host != target_host:
                 return target_host
+
+    def create_image_with_custom_property(self, base_image=None, **kwargs):
+        """Wrapper utility that returns the custom image.
+
+        Creates a new image by downloading the base image bits and
+        uploading them to a new image. Any kwargs are set as image properties
+        on the new image.
+
+        :param return image_id: The UUID of the newly created image.
+        """
+        if base_image is None:
+            base_image = CONF.compute.image_ref
+        image = self.admin_image_client.show_image(base_image)
+        image_data_resp = self.admin_image_client.show_image_file(
+            CONF.compute.image_ref, chunked=True)
+        create_dict = {
+            'container_format': image['container_format'],
+            'disk_format': image['disk_format'],
+            'min_disk': image['min_disk'],
+            'min_ram': image['min_ram'],
+            'visibility': 'public',
+        }
+        create_dict.update(kwargs)
+        try:
+            new_image = self.admin_image_client.create_image(**create_dict)
+            self.addCleanup(self.admin_image_client.wait_for_resource_deletion,
+                            new_image['id'])
+            self.addCleanup(
+                self.admin_image_client.delete_image, new_image['id'])
+            self.admin_image_client.store_image_file(new_image['id'],
+                                                     image_data_resp)
+        finally:
+            image_data_resp.release_conn()
+        return new_image['id']
diff --git a/tempest/api/compute/servers/test_create_server.py b/tempest/api/compute/servers/test_create_server.py
index 6664e15..6f97b1f 100644
--- a/tempest/api/compute/servers/test_create_server.py
+++ b/tempest/api/compute/servers/test_create_server.py
@@ -182,6 +182,8 @@
         if not utils.get_service_list()['volume']:
             msg = "Volume service not enabled."
             raise cls.skipException(msg)
+        if not CONF.compute_feature_enabled.boot_from_volume:
+            raise cls.skipException("Booting from volume is not enabled.")
 
 
 class ServersTestFqdnHostnames(base.BaseV2ComputeTest):
diff --git a/tempest/api/network/admin/test_routers.py b/tempest/api/network/admin/test_routers.py
index 60e6218..d8ef4a3 100644
--- a/tempest/api/network/admin/test_routers.py
+++ b/tempest/api/network/admin/test_routers.py
@@ -141,7 +141,7 @@
 
     def _verify_gateway_port(self, router_id):
         # Workaround for PRODX-8489
-        if CONF.get('sdn').get('service_name') == "tungstenfabric":
+        if config.is_tungstenfabric_backend_enabled():
             self._wait_for_ports(router_id)
         list_body = self.admin_ports_client.list_ports(
             network_id=CONF.network.public_network_id,
diff --git a/tempest/api/network/test_security_groups.py b/tempest/api/network/test_security_groups.py
index 41a0ecb..e6d0f7e 100644
--- a/tempest/api/network/test_security_groups.py
+++ b/tempest/api/network/test_security_groups.py
@@ -192,7 +192,7 @@
         port_range_min = 77
         port_range_max = 77
 
-        if CONF.get('sdn').get('service_name') == "tungstenfabric":
+        if config.is_tungstenfabric_backend_enabled():
             if self.ethertype == 'IPv6':
                 remote_ip_prefix = '::/0'
             else:
@@ -229,7 +229,7 @@
         protocol = 'ipv6-icmp' if self._ip_version == 6 else 'icmp'
         icmp_type_codes = [(3, 2), (3, 0), (8, 0), (0, 0), (11, None)]
         for icmp_type, icmp_code in icmp_type_codes:
-            if CONF.get('sdn').get('service_name') == "tungstenfabric":
+            if config.is_tungstenfabric_backend_enabled():
                 if self.ethertype == 'IPv6':
                     remote_ip_prefix = '::/0'
                 else:
diff --git a/tempest/api/network/test_subnetpools_extensions.py b/tempest/api/network/test_subnetpools_extensions.py
index 689844b..f398062 100644
--- a/tempest/api/network/test_subnetpools_extensions.py
+++ b/tempest/api/network/test_subnetpools_extensions.py
@@ -45,6 +45,9 @@
         if not utils.is_extension_enabled('subnet_allocation', 'network'):
             msg = "subnet_allocation extension not enabled."
             raise cls.skipException(msg)
+        if not utils.is_extension_enabled('default-subnetpools', 'network'):
+            msg = "default-subnetpools extension not enabled."
+            raise cls.skipException(msg)
 
     @decorators.attr(type='smoke')
     @decorators.idempotent_id('62595970-ab1c-4b7f-8fcc-fddfe55e9811')
diff --git a/tempest/api/volume/test_volumes_filters.py b/tempest/api/volume/test_volumes_filters.py
new file mode 100644
index 0000000..74ba9cb
--- /dev/null
+++ b/tempest/api/volume/test_volumes_filters.py
@@ -0,0 +1,50 @@
+# Copyright 2021 Mirantis Inc.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+
+import testtools
+
+from tempest.api.volume import base
+from tempest import config
+from tempest.lib import decorators
+
+
+CONF = config.CONF
+
+
+class VolumesFilter(base.BaseVolumeAdminTest):
+    @testtools.skipUnless(
+        "InstanceLocalityFilter" in CONF.volume.scheduler_default_filters,
+        "Cinder InstanceLocalityFilter is disabled",
+    )
+    @testtools.skipUnless(
+        CONF.volume_feature_enabled.instance_locality_enabled,
+        "InstanceLocalityFilter test is disabled",
+    )
+    @decorators.idempotent_id("5c13f4f7-5add-4fad-8ef7-dccca0f76295")
+    def test_instancelocalityfilter(self):
+        # 1. Create instance
+        # 2. Create volume by using local_to_instance hint
+        # 3. Compare server host and volume host are the same.
+        server = self.create_server()
+        server_host = self.admin_manager.servers_client.show_server(
+            server["id"])["server"]["OS-EXT-SRV-ATTR:host"]
+        volume = self.create_volume(hints={"local_to_instance": server["id"]})
+        fetched_volume = self.admin_volume_client.show_volume(volume["id"])[
+            "volume"]
+        self.assertEqual(
+            server_host, fetched_volume["os-vol-host-attr:host"].split("@")
+            [0],
+            "The fetched Volume host is different "
+            "from the created instance",)
diff --git a/tempest/config.py b/tempest/config.py
index 5dce260..ae065ef 100644
--- a/tempest/config.py
+++ b/tempest/config.py
@@ -622,6 +622,14 @@
     cfg.BoolOpt('unified_limits',
                 default=False,
                 help='Does the test environment support unified limits?'),
+    cfg.BoolOpt('boot_from_volume',
+                default=True,
+                help='Does the test environment support booting instances '
+                     'from volume. This depends on hypervisor and volume '
+                     'backend/type.'),
+    cfg.BoolOpt('barbican_integration_enabled',
+                default=False,
+                help='Does the test environment support Barbican integration'),
 ]
 
 
@@ -1041,6 +1049,9 @@
                     "If both values are not specified, Tempest avoids tests "
                     "which require a microversion. Valid values are string "
                     "with format 'X.Y' or string 'latest'",),
+    cfg.ListOpt('scheduler_default_filters',
+                default=[],
+                help="The list of enabled scheduler filters.",),
 ]
 
 volume_feature_group = cfg.OptGroup(name='volume-feature-enabled',
@@ -1093,7 +1104,11 @@
     cfg.BoolOpt('cluster_active_active',
                 default=False,
                 help='The boolean flag to indicate if active-active mode '
-                     'is used by volume backend.')
+                     'is used by volume backend.'),
+    cfg.BoolOpt('instance_locality_enabled',
+                default=False,
+                help='The boolean flag to run instance locality  tests '
+                     'on environment.')
 ]
 
 
diff --git a/tempest/lib/services/volume/v3/volumes_client.py b/tempest/lib/services/volume/v3/volumes_client.py
index c6f8973..65dc258 100644
--- a/tempest/lib/services/volume/v3/volumes_client.py
+++ b/tempest/lib/services/volume/v3/volumes_client.py
@@ -105,14 +105,17 @@
         self.validate_response(schema.show_volume, resp, body)
         return rest_client.ResponseBody(resp, body)
 
-    def create_volume(self, **kwargs):
+    def create_volume(self, hints=None, **kwargs):
         """Creates a new Volume.
 
         For a full list of available parameters, please refer to the official
         API reference:
         https://docs.openstack.org/api-ref/block-storage/v3/index.html#create-a-volume
         """
-        post_body = json.dumps({'volume': kwargs})
+        obj = {'volume': kwargs}
+        if hints is not None:
+            obj['OS-SCH-HNT:scheduler_hints'] = hints
+        post_body = json.dumps(obj)
         resp, body = self.post('volumes', post_body)
         body = json.loads(body)
         schema = self.get_schema(self.schema_versions_info)
diff --git a/tempest/serial_tests/api/admin/test_aggregates.py b/tempest/serial_tests/api/admin/test_aggregates.py
index cedeec0..ce54957 100644
--- a/tempest/serial_tests/api/admin/test_aggregates.py
+++ b/tempest/serial_tests/api/admin/test_aggregates.py
@@ -222,6 +222,9 @@
     @decorators.idempotent_id('96be03c7-570d-409c-90f8-e4db3c646996')
     def test_aggregate_add_host_create_server_with_az(self):
         """Test adding a host to the given aggregate and creating a server"""
+        if CONF.production:
+            raise self.skipException("Not allowed to run this test "
+                                     "on production environment")
         self.useFixture(fixtures.LockFixture('availability_zone'))
         az_name = data_utils.rand_name(
             prefix=CONF.resource_name_prefix, name=self.az_name_prefix)
@@ -235,12 +238,20 @@
             if agg['availability_zone']:
                 hosts_in_zone.extend(agg['hosts'])
         hosts = [v for v in self.hosts_available if v not in hosts_in_zone]
-        if not hosts:
+        hosts_available = []
+        for host in hosts:
+            hypervisor_servers = (
+                self.os_admin.hypervisor_client.list_servers_on_hypervisor(
+                    host)["hypervisors"][0].get("servers", None))
+            if not hypervisor_servers:
+                hosts_available.append(host)
+        if not hosts_available:
             raise self.skipException("All hosts are already in other "
-                                     "availability zones, so can't add "
+                                     "availability zones or have running "
+                                     "instances, so can't add "
                                      "host to aggregate. \nAggregates list: "
                                      "%s" % aggregates)
-        host = hosts[0]
+        host = hosts_available[0]
 
         self.client.add_host(aggregate['id'], host=host)
         self.addCleanup(self.client.remove_host, aggregate['id'], host=host)
diff --git a/tempest/serial_tests/scenario/test_aggregates_basic_ops.py b/tempest/serial_tests/scenario/test_aggregates_basic_ops.py
index a831fe5..cc45297 100644
--- a/tempest/serial_tests/scenario/test_aggregates_basic_ops.py
+++ b/tempest/serial_tests/scenario/test_aggregates_basic_ops.py
@@ -63,7 +63,11 @@
         hosts_available = []
         for host in svc_list:
             if (host['state'] == 'up' and host['status'] == 'enabled'):
-                hosts_available.append(host['host'])
+                hypervisor_servers = (
+                    self.os_admin.hypervisor_client.list_servers_on_hypervisor(
+                        host["host"])["hypervisors"][0].get("servers", None))
+                if not hypervisor_servers:
+                    hosts_available.append(host["host"])
         aggregates = self.aggregates_client.list_aggregates()['aggregates']
         hosts_in_zone = []
         for agg in aggregates:
@@ -72,7 +76,8 @@
         hosts = [v for v in hosts_available if v not in hosts_in_zone]
         if not hosts:
             raise self.skipException("All hosts are already in other "
-                                     "availability zones, so can't add "
+                                     "availability zones or have running "
+                                     "instances, so can't add "
                                      "host to aggregate. \nAggregates list: "
                                      "%s" % aggregates)
         return hosts[0]
@@ -120,6 +125,9 @@
     @decorators.attr(type='slow')
     @utils.services('compute')
     def test_aggregate_basic_ops(self):
+        if CONF.production:
+            raise self.skipException("Not allowed to run this test "
+                                     "on production environment")
         self.useFixture(fixtures.LockFixture('availability_zone'))
         az = 'foo_zone'
         aggregate_name = data_utils.rand_name(