Merge "Add a new test case to migrate VM with two networks" into mcp/epoxy
diff --git a/tempest/api/compute/admin/test_servers_on_multinodes.py b/tempest/api/compute/admin/test_servers_on_multinodes.py
index db19522..3e18807 100644
--- a/tempest/api/compute/admin/test_servers_on_multinodes.py
+++ b/tempest/api/compute/admin/test_servers_on_multinodes.py
@@ -148,6 +148,15 @@
 
         if not CONF.compute_feature_enabled.shelve:
             raise cls.skipException("Shelve is not available.")
+
+        if not CONF.compute_feature_enabled.shelve:
+            raise cls.skipException("Shelve is not available.")
+        # Test has two users from different projects. Member user creates VM
+        # and shelve it but admin user unshelve this VM. In case of encrypted
+        # Nova storage the admin user has no access to the user encryption
+        # keys and not be able to perform unshelve action.
+        if CONF.ephemeral_storage_encryption.enabled:
+            raise cls.skipException("Nova has encrypted storage.")
         if CONF.compute.min_compute_nodes < 2:
             raise cls.skipException(
                 "Less than 2 compute nodes, skipping multi-nodes test.")
diff --git a/tempest/api/compute/servers/test_console.py b/tempest/api/compute/servers/test_console.py
index 0cbeb41..0ebb268 100644
--- a/tempest/api/compute/servers/test_console.py
+++ b/tempest/api/compute/servers/test_console.py
@@ -33,6 +33,10 @@
     def setUp(self):
         super(ConsoleTestBase, self).setUp()
         self._websocket = None
+        self.server = self.create_test_server(wait_until="ACTIVE")
+        self.use_get_remote_console = False
+        if not self.is_requested_microversion_compatible("2.5"):
+            self.use_get_remote_console = True
 
     def tearDown(self):
         super(ConsoleTestBase, self).tearDown()
@@ -43,19 +47,6 @@
         # server_check_teardown should be called after super's tearDown.
         self.server_check_teardown()
 
-    @classmethod
-    def setup_clients(cls):
-        super(ConsoleTestBase, cls).setup_clients()
-        cls.client = cls.servers_client
-
-    @classmethod
-    def resource_setup(cls):
-        super(ConsoleTestBase, cls).resource_setup()
-        cls.server = cls.create_test_server(wait_until="ACTIVE")
-        cls.use_get_remote_console = False
-        if not cls.is_requested_microversion_compatible("2.5"):
-            cls.use_get_remote_console = True
-
     @property
     def cert_params(self):
         ssl_opt = {}
@@ -124,10 +115,11 @@
 
     def _get_console_body(self, type, protocol, get_console):
         if self.use_get_remote_console:
-            return self.client.get_remote_console(
+            return self.servers_client.get_remote_console(
                 self.server["id"], type=type, protocol=protocol
             )["remote_console"]
-        return getattr(self.client, get_console)(self.server["id"], type=type)[
+        return getattr(self.servers_client, get_console)(self.server["id"],
+                                                         type=type)[
             "console"
         ]
 
diff --git a/tempest/api/image/v2/admin/test_images.py b/tempest/api/image/v2/admin/test_images.py
index b380f11..4927ac2 100644
--- a/tempest/api/image/v2/admin/test_images.py
+++ b/tempest/api/image/v2/admin/test_images.py
@@ -12,6 +12,7 @@
 #    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
 #    License for the specific language governing permissions and limitations
 #    under the License.
+import time
 
 import io
 
@@ -235,3 +236,45 @@
             observed_image,
             stores,
             first_image_store_deleted)
+
+
+class ImageWebUploadAdminTest(base.BaseV2ImageAdminTest):
+    @classmethod
+    def skip_checks(cls):
+        super(ImageWebUploadAdminTest, cls).skip_checks()
+        enabled_methods = CONF.image_feature_enabled.enabled_import_methods
+        if "web-download" not in enabled_methods:
+            raise cls.skipException(
+                "Glance image upload via url feature disabled")
+
+    @decorators.idempotent_id('5b2ce43c-924c-4bae-bac0-f5d6ed69d72e')
+    def test_image_upload_via_url(self):
+        # Create image
+        image_name = data_utils.rand_name("image")
+        container_format = CONF.image.container_formats[0]
+        disk_format = CONF.image.disk_formats[0]
+        image = self.create_image(name=image_name,
+                                  container_format=container_format,
+                                  disk_format=disk_format,
+                                  visibility='private')
+        self.assertEqual('queued', image['status'])
+
+        # Upload image via url
+        image_uri = CONF.image.http_image
+        method = {"name": "web-download", "uri": image_uri}
+        self.admin_client.import_image(image_id=image["id"], method=method)
+
+        timeout = CONF.image.build_timeout
+        interval = CONF.image.build_interval
+
+        start_time = int(time.time())
+        while True:
+            body = self.admin_client.show_image(image['id'])
+            if body["status"] == "active":
+                break
+            if int(time.time()) - start_time >= timeout:
+                message = ('Image %(id)s failed to become active within '
+                           'the required time (%(timeout)s s).' %
+                           {'id': image['id'], 'timeout': timeout})
+                raise lib_exc.TimeoutException(message)
+            time.sleep(interval)
diff --git a/tempest/api/volume/admin/test_volume_retype.py b/tempest/api/volume/admin/test_volume_retype.py
index 586111c..b98fa3c 100644
--- a/tempest/api/volume/admin/test_volume_retype.py
+++ b/tempest/api/volume/admin/test_volume_retype.py
@@ -63,7 +63,10 @@
         src_vol = self.create_volume(volume_type=self.src_vol_type['name'],
                                      snapshot_id=snapshot['id'])
 
-        if not CONF.volume_feature_enabled.snapshot_locked_by_volume:
+        # Delete the snapshot
+        self.snapshots_client.delete_snapshot(snapshot['id'])
+        self.snapshots_client.wait_for_resource_deletion(snapshot['id'])
+        if not CONF.volume_feature_enabled.volume_locked_by_snapshot:
             # Delete the snapshot
             self.snapshots_client.delete_snapshot(snapshot['id'])
             self.snapshots_client.wait_for_resource_deletion(snapshot['id'])
diff --git a/tempest/common/waiters.py b/tempest/common/waiters.py
index a13aff4..3e9fef2 100644
--- a/tempest/common/waiters.py
+++ b/tempest/common/waiters.py
@@ -713,8 +713,9 @@
     raise lib_exc.TimeoutException()
 
 
-def wait_for_cloudinit(ssh_client, timeout=60):
+def wait_for_cloudinit(ssh_client):
     """Waits for cloud-init completed"""
+    timeout = CONF.compute.cloudinit_timeout
     start_time = int(time.time())
     while int(time.time()) - start_time < timeout:
         try:
diff --git a/tempest/config.py b/tempest/config.py
index 3ccce49..777ee29 100644
--- a/tempest/config.py
+++ b/tempest/config.py
@@ -349,6 +349,9 @@
                help="Timeout in seconds to wait for an instance to build. "
                     "Other services that do not define build_timeout will "
                     "inherit this value."),
+    cfg.IntOpt('cloudinit_timeout',
+               default=90,
+               help="Timeout in seconds to wait for a cloudinit on VM."),
     cfg.IntOpt('ready_wait',
                default=0,
                help="Additional wait time for clean state, when there is "
@@ -751,11 +754,11 @@
                                   'fixed in Victoria, and this feature works '
                                   'in any deployment architecture now.'),
     cfg.BoolOpt('os_glance_reserved',
-                default=True,
-                help="Should we check that os_glance namespace is reserved",
-                deprecated_for_removal=True,
-                deprecated_reason='os_glance namespace is always reserved '
-                                  'since Wallaby'),
+                default=False,
+                help="Should we check that os_glance namespace is reserved"),
+    cfg.ListOpt('enabled_import_methods',
+                default=[],
+                help="List of enabled image import methods"),
     cfg.BoolOpt('manage_locations',
                 default=False,
                 help=('Is show_multiple_locations enabled in glance. '
@@ -1110,6 +1113,21 @@
                     "If both values are not specified, Tempest avoids tests "
                     "which require a microversion. Valid values are string "
                     "with format 'X.Y' or string 'latest'",),
+    cfg.ListOpt('scheduler_default_filters',
+                default=[],
+                help="The list of enabled scheduler filters.",),
+    cfg.StrOpt('volume_type_luks',
+               default='luks',
+               help="The name of volume type used by tests to create"
+                    "volumes with luks encryption.",),
+    cfg.StrOpt('volume_type_luks_v2',
+               default='luks2',
+               help="The name of volume type used by tests to create"
+                    "volumes with luks v2 encryption.",),
+    cfg.StrOpt('volume_type_cryptsetup',
+               default='cryptsetup',
+               help="The name of volume type used by tests to create"
+                    "volumes with cryptsetup encryption.",),
 ]
 
 volume_feature_group = cfg.OptGroup(name='volume-feature-enabled',
@@ -1179,10 +1197,11 @@
     cfg.ListOpt('supported_crypto_providers',
                 default=['luks'],
                 help='A list of enabled cryptoproviders for volumes'),
-    cfg.BoolOpt('snapshot_locked_by_volume',
+    cfg.BoolOpt('volume_locked_by_snapshot',
                 default=False,
-                help='Whether snapshot can be deleted, i.e. there is no '
-                     'volume dependent on (created from) it'),
+                help='Whether the volume is locked by snapshot, i.e. '
+                     'can remove volume only when no dependent '
+                     'snapshot exist.'),
 ]
 
 
diff --git a/tempest/lib/services/image/v2/images_client.py b/tempest/lib/services/image/v2/images_client.py
index a6a1623..9223daf 100644
--- a/tempest/lib/services/image/v2/images_client.py
+++ b/tempest/lib/services/image/v2/images_client.py
@@ -55,6 +55,19 @@
         body = json.loads(body)
         return rest_client.ResponseBody(resp, body)
 
+    def import_image(self, image_id, **kwargs):
+        """Import image.
+
+        For a full list of available parameters, please refer to the official
+        API reference:
+        https://docs.openstack.org/api-ref/image/v2/#import-an-image
+        """
+        data = json.dumps(kwargs)
+        url = 'images/%s/import' % image_id
+        resp, body = self.post(url, data)
+        self.expected_success(202, resp.status)
+        return rest_client.ResponseBody(resp, body)
+
     def deactivate_image(self, image_id):
         """Deactivate image.
 
diff --git a/tempest/scenario/manager.py b/tempest/scenario/manager.py
index 20abbb7..24150c7 100644
--- a/tempest/scenario/manager.py
+++ b/tempest/scenario/manager.py
@@ -1264,10 +1264,10 @@
                                             server=server,
                                             username=username)
 
-        # Default the directory in which to write the timestamp file to /tmp
+        # Default the directory in which to write the timestamp file to /root
         # and only use the mount_path as the target directory if we mounted
         # dev_name to mount_path.
-        target_dir = CONF.scenario.target_dir
+        target_dir = '/root'
         if dev_name is not None:
             mount_path = os.path.join(mount_path, dev_name)
             ssh_client.make_fs(dev_name, fs=fs)
@@ -1303,10 +1303,10 @@
                                             server=server,
                                             username=username)
 
-        # Default the directory from which to read the timestamp file to /tmp
+        # Default the directory from which to read the timestamp file to /root
         # and only use the mount_path as the target directory if we mounted
         # dev_name to mount_path.
-        target_dir = CONF.scenario.target_dir
+        target_dir = '/root'
         if dev_name is not None:
             mount_path = os.path.join(mount_path, dev_name)
             ssh_client.mkdir(mount_path)
@@ -1404,12 +1404,12 @@
         return self.create_server(**create_kwargs)
 
     def wait_for_cloud_init(
-            self, ip_address, server, private_key, username, timeout=60):
+            self, ip_address, server, private_key, username):
         ssh_client = self.get_remote_client(ip_address,
                                             private_key=private_key,
                                             server=server,
                                             username=username)
-        waiters.wait_for_cloudinit(ssh_client, timeout)
+        waiters.wait_for_cloudinit(ssh_client)
 
     def create_volume_from_image(self, **kwargs):
         """Create volume from image.
diff --git a/tempest/scenario/test_encrypted_cinder_volumes.py b/tempest/scenario/test_encrypted_cinder_volumes.py
index cb5e673..1afb940 100644
--- a/tempest/scenario/test_encrypted_cinder_volumes.py
+++ b/tempest/scenario/test_encrypted_cinder_volumes.py
@@ -60,9 +60,10 @@
     @utils.services('compute', 'volume', 'image')
     def test_encrypted_cinder_volumes_luks(self):
         """LUKs v1 decrypts volume through libvirt."""
-        volume = self.create_encrypted_volume('luks',
-                                              volume_type='luks',
-                                              wait_until=None)
+        volume = self.create_encrypted_volume(
+            'luks',
+            volume_type=CONF.volume.volume_type_luks,
+            wait_until=None)
         server = self.launch_instance()
         waiters.wait_for_volume_resource_status(self.volumes_client,
                                                 volume['id'], 'available')
@@ -79,9 +80,10 @@
     @utils.services('compute', 'volume', 'image')
     def test_encrypted_cinder_volumes_luksv2(self):
         """LUKs v2 decrypts volume through os-brick."""
-        volume = self.create_encrypted_volume('luks2',
-                                              volume_type='luksv2',
-                                              wait_until=None)
+        volume = self.create_encrypted_volume(
+            'luks2',
+            volume_type=CONF.volume.volume_type_luks_v2,
+            wait_until=None)
         server = self.launch_instance()
         waiters.wait_for_volume_resource_status(self.volumes_client,
                                                 volume['id'], 'available')
@@ -100,9 +102,10 @@
         'plain cryptoprovider is not supported.')
     @utils.services('compute', 'volume', 'image')
     def test_encrypted_cinder_volumes_cryptsetup(self):
-        volume = self.create_encrypted_volume('plain',
-                                              volume_type='cryptsetup',
-                                              wait_until=None)
+        volume = self.create_encrypted_volume(
+            'plain',
+            volume_type=CONF.volume.volume_type_cryptsetup,
+            wait_until=None)
         server = self.launch_instance()
         waiters.wait_for_volume_resource_status(self.volumes_client,
                                                 volume['id'], 'available')
diff --git a/tempest/scenario/test_network_advanced_server_ops.py b/tempest/scenario/test_network_advanced_server_ops.py
index f4ee98d..64d1d71 100644
--- a/tempest/scenario/test_network_advanced_server_ops.py
+++ b/tempest/scenario/test_network_advanced_server_ops.py
@@ -21,7 +21,6 @@
 from tempest.common.utils import net_downtime
 from tempest.common import waiters
 from tempest import config
-from tempest.lib.common import api_version_request
 from tempest.lib import decorators
 from tempest.scenario import manager
 
@@ -201,12 +200,10 @@
         # check if microversion is less than 2.25 because of
         # disk_over_commit is depracted since compute api version 2.25
         # if min_microversion is None, it runs on version < 2.25
-        min_v = api_version_request.APIVersionRequest(
-            CONF.compute.min_microversion)
-        api_v = api_version_request.APIVersionRequest('2.25')
-        if not migration and (CONF.compute.min_microversion is None or
-                              min_v < api_v):
-            migration_kwargs['disk_over_commit'] = False
+        if (self.compute_request_microversion and
+                self.compute_request_microversion < '2.25'):
+            if not migration:
+                migration_kwargs['disk_over_commit'] = False
 
         if dest_host:
             migration_kwargs['host'] = dest_host
diff --git a/tempest/scenario/test_network_basic_ops.py b/tempest/scenario/test_network_basic_ops.py
index df63785..2d1da6e 100644
--- a/tempest/scenario/test_network_basic_ops.py
+++ b/tempest/scenario/test_network_basic_ops.py
@@ -340,7 +340,8 @@
                 project_id=server['tenant_id'],
                 network_id=network['id'])['ports']
             if ((p['device_owner'].startswith('network') and
-                 not p['device_owner'] == 'network:distributed') or
+                 not p['device_owner'] in ['network:distributed',
+                                           'network:portprober']) or
                 p['device_owner'].startswith('compute'))
         )
 
diff --git a/tempest/scenario/test_volume_boot_pattern.py b/tempest/scenario/test_volume_boot_pattern.py
index bab2582..4eb51fc 100644
--- a/tempest/scenario/test_volume_boot_pattern.py
+++ b/tempest/scenario/test_volume_boot_pattern.py
@@ -322,10 +322,10 @@
         # Assert that the underlying volume is gone.
         self.volumes_client.wait_for_resource_deletion(volume_origin['id'])
 
-    def _do_test_boot_server_from_encrypted_volume_luks(self, provider):
+    def _do_test_boot_server_from_encrypted_volume_luks(self, provider, vtype):
         # Create an encrypted volume
         volume = self.create_encrypted_volume(provider,
-                                              volume_type=provider)
+                                              volume_type=vtype)
 
         self.volumes_client.set_bootable_volume(volume['id'], bootable=True)
 
@@ -348,7 +348,10 @@
     @utils.services('compute', 'volume')
     def test_boot_server_from_encrypted_volume_luks(self):
         """LUKs v1 decrypts volume through libvirt."""
-        self._do_test_boot_server_from_encrypted_volume_luks('luks')
+        self._do_test_boot_server_from_encrypted_volume_luks(
+            "luks",
+            CONF.volume.volume_type_luks
+        )
 
     @decorators.idempotent_id('5ab6100f-1b31-4dd0-a774-68cfd837ef77')
     @testtools.skipIf(CONF.volume.storage_protocol == 'ceph',
@@ -361,4 +364,7 @@
     @utils.services('compute', 'volume')
     def test_boot_server_from_encrypted_volume_luksv2(self):
         """LUKs v2 decrypts volume through os-brick."""
-        self._do_test_boot_server_from_encrypted_volume_luks('luks2')
+        self._do_test_boot_server_from_encrypted_volume_luks(
+            "luks2",
+            CONF.volume.volume_type_luks_v2
+        )
diff --git a/tempest/serial_tests/api/admin/test_aggregates.py b/tempest/serial_tests/api/admin/test_aggregates.py
new file mode 100644
index 0000000..4555dff
--- /dev/null
+++ b/tempest/serial_tests/api/admin/test_aggregates.py
@@ -0,0 +1,304 @@
+# Copyright 2013 NEC Corporation.
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import testtools
+
+from tempest.api.compute import base
+from tempest.common import tempest_fixtures as fixtures
+from tempest.common import waiters
+from tempest import config
+from tempest.lib.common.utils import data_utils
+from tempest.lib.common.utils import test_utils
+from tempest.lib import decorators
+
+CONF = config.CONF
+
+
+@decorators.serial
+class AggregatesAdminTestBase(base.BaseV2ComputeAdminTest):
+    """Tests Aggregates API that require admin privileges"""
+
+    @classmethod
+    def setup_clients(cls):
+        super(AggregatesAdminTestBase, cls).setup_clients()
+        cls.client = cls.os_admin.aggregates_client
+
+    @classmethod
+    def resource_setup(cls):
+        super(AggregatesAdminTestBase, cls).resource_setup()
+        cls.aggregate_name_prefix = 'test_aggregate'
+        cls.az_name_prefix = 'test_az'
+
+        cls.host = None
+        hypers = cls.os_admin.hypervisor_client.list_hypervisors(
+            detail=True)['hypervisors']
+
+        if CONF.compute.hypervisor_type:
+            hypers = [hyper for hyper in hypers
+                      if (hyper['hypervisor_type'] ==
+                          CONF.compute.hypervisor_type)]
+
+        cls.hosts_available = [hyper['service']['host'] for hyper in hypers
+                               if (hyper['state'] == 'up' and
+                                   hyper['status'] == 'enabled')]
+        if cls.hosts_available:
+            cls.host = cls.hosts_available[0]
+        else:
+            msg = "no available compute node found"
+            if CONF.compute.hypervisor_type:
+                msg += " for hypervisor_type %s" % CONF.compute.hypervisor_type
+            raise testtools.TestCase.failureException(msg)
+
+    def _create_test_aggregate(self, **kwargs):
+        if 'name' not in kwargs:
+            kwargs['name'] = data_utils.rand_name(
+                prefix=CONF.resource_name_prefix,
+                name=self.aggregate_name_prefix)
+        aggregate = self.client.create_aggregate(**kwargs)['aggregate']
+        self.addCleanup(test_utils.call_and_ignore_notfound_exc,
+                        self.client.delete_aggregate, aggregate['id'])
+        self.assertEqual(kwargs['name'], aggregate['name'])
+
+        return aggregate
+
+
+class AggregatesAdminTestJSON(AggregatesAdminTestBase):
+    """Tests Aggregates API that require admin privileges"""
+
+    @decorators.idempotent_id('0d148aa3-d54c-4317-aa8d-42040a475e20')
+    def test_aggregate_create_delete(self):
+        """Test create/delete aggregate"""
+        aggregate = self._create_test_aggregate()
+        self.assertIsNone(aggregate['availability_zone'])
+
+        self.client.delete_aggregate(aggregate['id'])
+        self.client.wait_for_resource_deletion(aggregate['id'])
+
+    @decorators.idempotent_id('5873a6f8-671a-43ff-8838-7ce430bb6d0b')
+    def test_aggregate_create_delete_with_az(self):
+        """Test create/delete aggregate with availability_zone"""
+        az_name = data_utils.rand_name(
+            prefix=CONF.resource_name_prefix, name=self.az_name_prefix)
+        aggregate = self._create_test_aggregate(availability_zone=az_name)
+        self.assertEqual(az_name, aggregate['availability_zone'])
+
+        self.client.delete_aggregate(aggregate['id'])
+        self.client.wait_for_resource_deletion(aggregate['id'])
+
+    @decorators.idempotent_id('68089c38-04b1-4758-bdf0-cf0daec4defd')
+    def test_aggregate_create_verify_entry_in_list(self):
+        """Test listing aggregate should contain the created aggregate"""
+        aggregate = self._create_test_aggregate()
+        aggregates = self.client.list_aggregates()['aggregates']
+        self.assertIn((aggregate['id'], aggregate['availability_zone']),
+                      map(lambda x: (x['id'], x['availability_zone']),
+                          aggregates))
+
+    @decorators.idempotent_id('36ec92ca-7a73-43bc-b920-7531809e8540')
+    def test_aggregate_create_update_metadata_get_details(self):
+        """Test set/get aggregate metadata"""
+        aggregate = self._create_test_aggregate()
+        body = self.client.show_aggregate(aggregate['id'])['aggregate']
+        self.assertEqual(aggregate['name'], body['name'])
+        self.assertEqual(aggregate['availability_zone'],
+                         body['availability_zone'])
+        self.assertEqual({}, body["metadata"])
+
+        # set the metadata of the aggregate
+        meta = {"key": "value"}
+        body = self.client.set_metadata(aggregate['id'], metadata=meta)
+        self.assertEqual(meta, body['aggregate']["metadata"])
+
+        # verify the metadata has been set
+        body = self.client.show_aggregate(aggregate['id'])['aggregate']
+        self.assertEqual(meta, body["metadata"])
+
+    @decorators.idempotent_id('4d2b2004-40fa-40a1-aab2-66f4dab81beb')
+    def test_aggregate_create_update_with_az(self):
+        """Test create/update aggregate with availability_zone"""
+        aggregate_name = data_utils.rand_name(
+            prefix=CONF.resource_name_prefix, name=self.aggregate_name_prefix)
+        az_name = data_utils.rand_name(
+            prefix=CONF.resource_name_prefix, name=self.az_name_prefix)
+        aggregate = self._create_test_aggregate(
+            name=aggregate_name, availability_zone=az_name)
+
+        self.assertEqual(az_name, aggregate['availability_zone'])
+
+        aggregate_id = aggregate['id']
+        new_aggregate_name = aggregate_name + '_new'
+        new_az_name = az_name + '_new'
+
+        resp_aggregate = self.client.update_aggregate(
+            aggregate_id,
+            name=new_aggregate_name,
+            availability_zone=new_az_name)['aggregate']
+        self.assertEqual(new_aggregate_name, resp_aggregate['name'])
+        self.assertEqual(new_az_name, resp_aggregate['availability_zone'])
+
+        aggregates = self.client.list_aggregates()['aggregates']
+        self.assertIn((aggregate_id, new_aggregate_name, new_az_name),
+                      map(lambda x:
+                          (x['id'], x['name'], x['availability_zone']),
+                          aggregates))
+
+    @decorators.idempotent_id('c8e85064-e79b-4906-9931-c11c24294d02')
+    def test_aggregate_add_remove_host(self):
+        """Test adding host to and removing host from aggregate"""
+        self.useFixture(fixtures.LockFixture('availability_zone'))
+        aggregate_name = data_utils.rand_name(
+            prefix=CONF.resource_name_prefix, name=self.aggregate_name_prefix)
+        aggregate = self._create_test_aggregate(name=aggregate_name)
+
+        body = (self.client.add_host(aggregate['id'], host=self.host)
+                ['aggregate'])
+        self.assertEqual(aggregate_name, body['name'])
+        self.assertEqual(aggregate['availability_zone'],
+                         body['availability_zone'])
+        self.assertIn(self.host, body['hosts'])
+
+        body = (self.client.remove_host(aggregate['id'], host=self.host)
+                ['aggregate'])
+        self.assertEqual(aggregate_name, body['name'])
+        self.assertEqual(aggregate['availability_zone'],
+                         body['availability_zone'])
+        self.assertNotIn(self.host, body['hosts'])
+
+    @decorators.idempotent_id('7f6a1cc5-2446-4cdb-9baa-b6ae0a919b72')
+    def test_aggregate_add_host_list(self):
+        """Test listing aggregate contains the host added to the aggregate
+
+        Add a host to the given aggregate and list.
+        """
+        self.useFixture(fixtures.LockFixture('availability_zone'))
+        aggregate_name = data_utils.rand_name(
+            prefix=CONF.resource_name_prefix, name=self.aggregate_name_prefix)
+        aggregate = self._create_test_aggregate(name=aggregate_name)
+
+        self.client.add_host(aggregate['id'], host=self.host)
+        self.addCleanup(self.client.remove_host, aggregate['id'],
+                        host=self.host)
+
+        aggregates = self.client.list_aggregates()['aggregates']
+        aggs = [agg for agg in aggregates if agg['id'] == aggregate['id']]
+        self.assertEqual(1, len(aggs))
+        agg = aggs[0]
+        self.assertEqual(aggregate_name, agg['name'])
+        self.assertIsNone(agg['availability_zone'])
+        self.assertIn(self.host, agg['hosts'])
+
+    @decorators.idempotent_id('eeef473c-7c52-494d-9f09-2ed7fc8fc036')
+    def test_aggregate_add_host_get_details(self):
+        """Test showing aggregate contains the host added to the aggregate
+
+        Add a host to the given aggregate and get details.
+        """
+        self.useFixture(fixtures.LockFixture('availability_zone'))
+        aggregate_name = data_utils.rand_name(
+            prefix=CONF.resource_name_prefix, name=self.aggregate_name_prefix)
+        aggregate = self._create_test_aggregate(name=aggregate_name)
+
+        self.client.add_host(aggregate['id'], host=self.host)
+        self.addCleanup(self.client.remove_host, aggregate['id'],
+                        host=self.host)
+
+        body = self.client.show_aggregate(aggregate['id'])['aggregate']
+        self.assertEqual(aggregate_name, body['name'])
+        self.assertIsNone(body['availability_zone'])
+        self.assertIn(self.host, body['hosts'])
+
+    @decorators.idempotent_id('96be03c7-570d-409c-90f8-e4db3c646996')
+    def test_aggregate_add_host_create_server_with_az(self):
+        """Test adding a host to the given aggregate and creating a server"""
+        if CONF.production:
+            raise self.skipException("Not allowed to run this test "
+                                     "on production environment")
+        self.useFixture(fixtures.LockFixture('availability_zone'))
+        az_name = data_utils.rand_name(
+            prefix=CONF.resource_name_prefix, name=self.az_name_prefix)
+        aggregate = self._create_test_aggregate(availability_zone=az_name)
+
+        # Find a host that has not been added to other availability zone,
+        # for one host can't be added to different availability zones.
+        aggregates = self.client.list_aggregates()['aggregates']
+        hosts_in_zone = []
+        for agg in aggregates:
+            if agg['availability_zone']:
+                hosts_in_zone.extend(agg['hosts'])
+        hosts = [v for v in self.hosts_available if v not in hosts_in_zone]
+        hosts_available = []
+        list_hypervisors = (
+            self.os_admin.hypervisor_client.list_hypervisors(
+                {"with_servers": True}))["hypervisors"]
+        for host in hosts:
+            hypervisor_vms = next((
+                hyper["running_vms"] for hyper in list_hypervisors if
+                hyper['service']['host'] == host), None)
+            if hypervisor_vms == 0:
+                hosts_available.append(host)
+        if not hosts_available:
+            raise self.skipException("All hosts are already in other "
+                                     "availability zones or have running "
+                                     "instances, so can't add "
+                                     "host to aggregate. \nAggregates list: "
+                                     "%s" % aggregates)
+        host = hosts_available[0]
+
+        self.client.add_host(aggregate['id'], host=host)
+        self.addCleanup(self.client.remove_host, aggregate['id'], host=host)
+        server = self.create_test_server(availability_zone=az_name,
+                                         wait_until='ACTIVE')
+        server_host = self.get_host_for_server(server['id'])
+        self.assertEqual(host, server_host)
+        self.servers_client.delete_server(server['id'])
+        # NOTE(gmann): We need to wait for the server to delete before
+        # addCleanup remove the host from aggregate.
+        waiters.wait_for_server_termination(self.servers_client, server['id'])
+
+
+class AggregatesAdminTestV241(AggregatesAdminTestBase):
+    """Tests Aggregates API that require admin privileges
+
+    Tests Aggregates API that require admin privileges with compute
+    microversion greater than 2.40.
+    """
+    min_microversion = '2.41'
+
+    # NOTE(gmann): This test tests the Aggregate APIs response schema
+    # for 2.41 microversion. No specific assert or behaviour verification
+    # is needed.
+
+    @decorators.idempotent_id('fdf24d9e-8afa-4700-b6aa-9c498351504f')
+    def test_create_update_show_aggregate_add_remove_host(self):
+        """Test response schema of aggregates API
+
+        Test response schema of aggregates API(create/update/show/add host/
+        remove host) with compute microversion greater than 2.40.
+        """
+        # Update and add a host to the given aggregate and get details.
+        self.useFixture(fixtures.LockFixture('availability_zone'))
+        # Checking create aggregate API response schema
+        aggregate = self._create_test_aggregate()
+
+        new_aggregate_name = data_utils.rand_name(
+            prefix=CONF.resource_name_prefix, name=self.aggregate_name_prefix)
+        # Checking update aggregate API response schema
+        self.client.update_aggregate(aggregate['id'], name=new_aggregate_name)
+        # Checking show aggregate API response schema
+        self.client.show_aggregate(aggregate['id'])['aggregate']
+        # Checking add host to aggregate API response schema
+        self.client.add_host(aggregate['id'], host=self.host)
+        # Checking rempve host from aggregate API response schema
+        self.client.remove_host(aggregate['id'], host=self.host)
diff --git a/tempest/serial_tests/scenario/test_aggregates_basic_ops.py b/tempest/serial_tests/scenario/test_aggregates_basic_ops.py
index cc45297..ba10322 100644
--- a/tempest/serial_tests/scenario/test_aggregates_basic_ops.py
+++ b/tempest/serial_tests/scenario/test_aggregates_basic_ops.py
@@ -61,12 +61,15 @@
             binary='nova-compute')['services']
         self.assertNotEmpty(svc_list)
         hosts_available = []
+        list_hypervisors = (
+            self.os_admin.hypervisor_client.list_hypervisors(
+                {"with_servers": True}))["hypervisors"]
         for host in svc_list:
             if (host['state'] == 'up' and host['status'] == 'enabled'):
-                hypervisor_servers = (
-                    self.os_admin.hypervisor_client.list_servers_on_hypervisor(
-                        host["host"])["hypervisors"][0].get("servers", None))
-                if not hypervisor_servers:
+                hypervisor_vms = next((
+                    hyper["running_vms"] for hyper in list_hypervisors if
+                    hyper['service']['host'] == host["host"]), None)
+                if hypervisor_vms == 0:
                     hosts_available.append(host["host"])
         aggregates = self.aggregates_client.list_aggregates()['aggregates']
         hosts_in_zone = []