Merge "Add fields in hypervisor schema for 2.33 and 2.53"
diff --git a/doc/source/microversion_testing.rst b/doc/source/microversion_testing.rst
index 2529c9e..ecf2930 100644
--- a/doc/source/microversion_testing.rst
+++ b/doc/source/microversion_testing.rst
@@ -432,7 +432,11 @@
 
   * `2.79`_
 
-  .. _2.79: https://docs.openstack.org/nova/latest/reference/api-microversion-history.html#maximum-in-train 
+  .. _2.79: https://docs.openstack.org/nova/latest/reference/api-microversion-history.html#maximum-in-train
+
+  * `2.86`_
+
+  .. _2.86: https://docs.openstack.org/nova/latest/reference/api-microversion-history.html#id79
 
 * Volume
 
diff --git a/releasenotes/notes/set-default-value-of-concurrency-to-2-d916d5c31e3725d5.yaml b/releasenotes/notes/set-default-value-of-concurrency-to-2-d916d5c31e3725d5.yaml
new file mode 100644
index 0000000..0d964a9
--- /dev/null
+++ b/releasenotes/notes/set-default-value-of-concurrency-to-2-d916d5c31e3725d5.yaml
@@ -0,0 +1,6 @@
+---
+fixes:
+  - |
+    [`bug 1948935 <https://bugs.launchpad.net/tempest/+bug/1948935>`_]
+    The default value of account-generator --concurrency parameter is now
+    set to 2 instead of 1.
diff --git a/tempest/api/compute/admin/test_flavors_extra_specs.py b/tempest/api/compute/admin/test_flavors_extra_specs.py
index 4c531b3..10018fe 100644
--- a/tempest/api/compute/admin/test_flavors_extra_specs.py
+++ b/tempest/api/compute/admin/test_flavors_extra_specs.py
@@ -127,3 +127,34 @@
             self.flavor['id'], 'hw:numa_nodes')
         self.assertEqual(body['hw:numa_nodes'], '1')
         self.assertNotIn('hw:cpu_policy', body)
+
+
+class FlavorMetadataValidation(base.BaseV2ComputeAdminTest):
+
+    min_microversion = '2.86'
+
+    @classmethod
+    def resource_setup(cls):
+        super(FlavorMetadataValidation, cls).resource_setup()
+        cls.flavor_name_prefix = 'test_flavor_validate_metadata_'
+        cls.ram = 512
+        cls.vcpus = 1
+        cls.disk = 10
+        cls.ephemeral = 10
+        cls.swap = 1024
+        cls.rxtx = 2
+
+    @decorators.idempotent_id('d3114f03-b0f2-4dc7-be11-70c0abc178b3')
+    def test_flavor_update_with_custom_namespace(self):
+        """Test flavor creation with a custom namespace, key and value"""
+        flavor_name = data_utils.rand_name(self.flavor_name_prefix)
+        flavor_id = self.create_flavor(ram=self.ram,
+                                       vcpus=self.vcpus,
+                                       disk=self.disk,
+                                       name=flavor_name)['id']
+        specs = {'hw:cpu_policy': 'shared', 'foo:bar': 'baz'}
+        body = self.admin_flavors_client.set_flavor_extra_spec(
+            flavor_id,
+            **specs)['extra_specs']
+        self.assertEqual(body['foo:bar'], 'baz')
+        self.assertEqual(body['hw:cpu_policy'], 'shared')
diff --git a/tempest/api/volume/admin/test_group_snapshots.py b/tempest/api/volume/admin/test_group_snapshots.py
index ddfc78a..73903cf 100644
--- a/tempest/api/volume/admin/test_group_snapshots.py
+++ b/tempest/api/volume/admin/test_group_snapshots.py
@@ -256,7 +256,6 @@
     volume_max_microversion = 'latest'
 
     @decorators.idempotent_id('3b42c9b9-c984-4444-816e-ca2e1ed30b40')
-    @decorators.skip_because(bug='1770179')
     def test_reset_group_snapshot_status(self):
         """Test resetting group snapshot status to creating/available/error"""
         # Create volume type
diff --git a/tempest/clients.py b/tempest/clients.py
index 3d799c5..327f0da 100644
--- a/tempest/clients.py
+++ b/tempest/clients.py
@@ -74,6 +74,8 @@
         self.qos_client = self.network.QosClient()
         self.qos_min_bw_client = self.network.QosMinimumBandwidthRulesClient()
         self.qos_limit_bw_client = self.network.QosLimitBandwidthRulesClient()
+        self.qos_min_pps_client = (
+            self.network.QosMinimumPacketRateRulesClient())
         self.segments_client = self.network.SegmentsClient()
         self.trunks_client = self.network.TrunksClient()
         self.log_resource_client = self.network.LogResourceClient()
diff --git a/tempest/cmd/account_generator.py b/tempest/cmd/account_generator.py
index 917262e..ad0b547 100755
--- a/tempest/cmd/account_generator.py
+++ b/tempest/cmd/account_generator.py
@@ -81,11 +81,11 @@
   will have the prefix with the given TAG in its name. Using tag is recommended
   for the further using, cleaning resources.
 
-* ``-r, --concurrency CONCURRENCY`` (Optional) Concurrency count
-  (default: 1). The number of accounts required can be estimated as
-  CONCURRENCY x 2. Each user provided in *accounts.yaml* file will be in
-  a different tenant. This is required to provide isolation between test for
-  running in parallel.
+* ``-r, --concurrency CONCURRENCY`` (Optional) Concurrency count (default: 2).
+  The number of accounts generated will be same as CONCURRENCY. The higher the
+  number, the more tests will run in parallel. If you want to run tests
+  sequentially then use 1 as value for concurrency (beware that tests that need
+  more credentials will fail).
 
 * ``--with-admin`` (Optional) Creates admin for each concurrent group
   (default: False).
@@ -236,7 +236,7 @@
                         dest='tag',
                         help='Resources tag')
     parser.add_argument('-r', '--concurrency',
-                        default=1,
+                        default=2,
                         type=positive_int,
                         required=False,
                         dest='concurrency',
diff --git a/tempest/common/compute.py b/tempest/common/compute.py
index a062f6f..2443a67 100644
--- a/tempest/common/compute.py
+++ b/tempest/common/compute.py
@@ -197,6 +197,7 @@
     body = clients.servers_client.create_server(name=name, imageRef=image_id,
                                                 flavorRef=flavor,
                                                 **kwargs)
+    request_id = body.response['x-openstack-request-id']
 
     # handle the case of multiple servers
     if multiple_create_request:
@@ -234,7 +235,8 @@
         for server in servers:
             try:
                 waiters.wait_for_server_status(
-                    clients.servers_client, server['id'], wait_until)
+                    clients.servers_client, server['id'], wait_until,
+                    request_id=request_id)
 
                 # Multiple validatable servers are not supported for now. Their
                 # creation will fail with the condition above.
diff --git a/tempest/common/waiters.py b/tempest/common/waiters.py
index 1b69349..21d0109 100644
--- a/tempest/common/waiters.py
+++ b/tempest/common/waiters.py
@@ -32,7 +32,8 @@
 
 # NOTE(afazekas): This function needs to know a token and a subject.
 def wait_for_server_status(client, server_id, status, ready_wait=True,
-                           extra_timeout=0, raise_on_error=True):
+                           extra_timeout=0, raise_on_error=True,
+                           request_id=None):
     """Waits for a server to reach a given status."""
 
     # NOTE(afazekas): UNKNOWN status possible on ERROR
@@ -71,11 +72,12 @@
                      '/'.join((server_status, str(task_state))),
                      time.time() - start_time)
         if (server_status == 'ERROR') and raise_on_error:
+            details = ''
             if 'fault' in body:
-                raise exceptions.BuildErrorException(body['fault'],
-                                                     server_id=server_id)
-            else:
-                raise exceptions.BuildErrorException(server_id=server_id)
+                details += 'Fault: %s.' % body['fault']
+            if request_id:
+                details += ' Server boot request ID: %s.' % request_id
+            raise exceptions.BuildErrorException(details, server_id=server_id)
 
         timed_out = int(time.time()) - start_time >= timeout
 
@@ -88,6 +90,8 @@
                         'status': status,
                         'expected_task_state': expected_task_state,
                         'timeout': timeout})
+            if request_id:
+                message += ' Server boot request ID: %s.' % request_id
             message += ' Current status: %s.' % server_status
             message += ' Current task state: %s.' % task_state
             caller = test_utils.find_test_caller()
diff --git a/tempest/config.py b/tempest/config.py
index 662a249..a840a97 100644
--- a/tempest/config.py
+++ b/tempest/config.py
@@ -875,7 +875,10 @@
                     'bandwidth allocation.'),
     cfg.StrOpt('provider_net_base_segmentation_id', default=3000,
                help='Base segmentation ID to create provider networks. '
-                    'This value will be increased in case of conflict.')
+                    'This value will be increased in case of conflict.'),
+    cfg.BoolOpt('qos_min_bw_and_pps', default=False,
+                help='Does the test environment have minimum bandwidth and '
+                     'packet rate inventories configured?'),
 ]
 
 dashboard_group = cfg.OptGroup(name="dashboard",
diff --git a/tempest/lib/api_schema/response/volume/volumes.py b/tempest/lib/api_schema/response/volume/volumes.py
index ffcf488..4f44526 100644
--- a/tempest/lib/api_schema/response/volume/volumes.py
+++ b/tempest/lib/api_schema/response/volume/volumes.py
@@ -21,7 +21,7 @@
     'items': {
         'type': 'object',
         'properties': {
-            'server_id': {'type': 'string', 'format': 'uuid'},
+            'server_id': {'type': ['string', 'null'], 'format': 'uuid'},
             'attachment_id': {'type': 'string', 'format': 'uuid'},
             'attached_at': parameter_types.date_time_or_null,
             'host_name': {'type': ['string', 'null']},
diff --git a/tempest/lib/cmd/check_uuid.py b/tempest/lib/cmd/check_uuid.py
index 0ae11ca..466222d 100755
--- a/tempest/lib/cmd/check_uuid.py
+++ b/tempest/lib/cmd/check_uuid.py
@@ -38,7 +38,7 @@
 
 class SourcePatcher(object):
 
-    """"Lazy patcher for python source files"""
+    """Lazy patcher for python source files"""
 
     def __init__(self):
         self.source_files = None
@@ -431,14 +431,21 @@
                         help='Package with tests')
     parser.add_argument('--fix', action='store_true', dest='fix_tests',
                         help='Attempt to fix tests without UUIDs')
+    parser.add_argument('--libpath', action='store', dest='libpath',
+                        default=".", type=str,
+                        help='Path to package')
+
     args = parser.parse_args()
-    sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
+    sys.path.append(os.path.join(os.path.dirname(__file__), os.pardir))
+    sys.path.insert(0, args.libpath)
     pkg = importlib.import_module(args.package)
+
     checker = TestChecker(pkg)
     errors = False
     tests = checker.get_tests()
     untagged = checker.find_untagged(tests)
     errors = checker.report_collisions(tests) or errors
+
     if args.fix_tests and untagged:
         checker.fix_tests(untagged)
     else:
diff --git a/tempest/lib/common/rest_client.py b/tempest/lib/common/rest_client.py
index 3f735f5..ef14dfc 100644
--- a/tempest/lib/common/rest_client.py
+++ b/tempest/lib/common/rest_client.py
@@ -907,8 +907,8 @@
             if int(time.time()) - start_time >= self.build_timeout:
                 message = ('Failed to delete %(resource_type)s %(id)s within '
                            'the required time (%(timeout)s s). Timer started '
-                           'at %(start_time)s. Timer ended at %(end_time)s'
-                           'waited for %(wait_time)s' %
+                           'at %(start_time)s. Timer ended at %(end_time)s. '
+                           'Waited for %(wait_time)s s.' %
                            {'resource_type': self.resource_type, 'id': id,
                             'timeout': self.build_timeout,
                             'start_time': start_time,
diff --git a/tempest/lib/services/network/__init__.py b/tempest/lib/services/network/__init__.py
index 98d7482..faf35d1 100644
--- a/tempest/lib/services/network/__init__.py
+++ b/tempest/lib/services/network/__init__.py
@@ -31,6 +31,8 @@
     QosLimitBandwidthRulesClient
 from tempest.lib.services.network.qos_minimum_bandwidth_rules_client import \
     QosMinimumBandwidthRulesClient
+from tempest.lib.services.network.qos_minimum_packet_rate_rules_client import \
+    QosMinimumPacketRateRulesClient
 from tempest.lib.services.network.quotas_client import QuotasClient
 from tempest.lib.services.network.routers_client import RoutersClient
 from tempest.lib.services.network.security_group_rules_client import \
@@ -54,4 +56,4 @@
            'SecurityGroupRulesClient', 'SecurityGroupsClient',
            'SegmentsClient', 'ServiceProvidersClient', 'SubnetpoolsClient',
            'SubnetsClient', 'TagsClient', 'TrunksClient', 'LogResourceClient',
-           'LoggableResourceClient']
+           'LoggableResourceClient', 'QosMinimumPacketRateRulesClient']
diff --git a/tempest/scenario/test_network_qos_placement.py b/tempest/scenario/test_network_qos_placement.py
index db4751b..a8e9174 100644
--- a/tempest/scenario/test_network_qos_placement.py
+++ b/tempest/scenario/test_network_qos_placement.py
@@ -49,6 +49,7 @@
     compute_max_microversion = 'latest'
 
     INGRESS_DIRECTION = 'ingress'
+    ANY_DIRECTION = 'any'
     BW_RESOURCE_CLASS = "NET_BW_IGR_KILOBIT_PER_SEC"
 
     # For any realistic inventory value (that is inventory != MAX_INT) an
@@ -508,3 +509,500 @@
             **{'description': 'foo'})
         self._assert_allocation_is_as_expected(server1['id'], [port['id']],
                                                self.BANDWIDTH_1)
+
+
+class QoSBandwidthAndPacketRateTests(NetworkQoSPlacementTestBase):
+
+    PPS_RESOURCE_CLASS = "NET_PACKET_RATE_KILOPACKET_PER_SEC"
+
+    @classmethod
+    def skip_checks(cls):
+        super().skip_checks()
+        if not CONF.network_feature_enabled.qos_min_bw_and_pps:
+            msg = (
+                "Skipped as no resource inventories are configured for QoS "
+                "minimum bandwidth and packet rate testing.")
+            raise cls.skipException(msg)
+
+    @classmethod
+    def setup_clients(cls):
+        super().setup_clients()
+        cls.qos_min_pps_client = cls.os_admin.qos_min_pps_client
+
+    def setUp(self):
+        super().setUp()
+        self.network = self._create_network()
+
+    def _create_qos_policy_with_bw_and_pps_rules(self, min_kbps, min_kpps):
+        policy = self.qos_client.create_qos_policy(
+            name=data_utils.rand_name(),
+            shared=True
+        )['policy']
+        self.addCleanup(
+            test_utils.call_and_ignore_notfound_exc,
+            self.qos_client.delete_qos_policy,
+            policy['id']
+        )
+
+        if min_kbps > 0:
+            bw_rule = self.qos_min_bw_client.create_minimum_bandwidth_rule(
+                policy['id'],
+                min_kbps=min_kbps,
+                direction=self.INGRESS_DIRECTION
+            )['minimum_bandwidth_rule']
+            self.addCleanup(
+                test_utils.call_and_ignore_notfound_exc,
+                self.qos_min_bw_client.delete_minimum_bandwidth_rule,
+                policy['id'],
+                bw_rule['id']
+            )
+
+        if min_kpps > 0:
+            pps_rule = self.qos_min_pps_client.create_minimum_packet_rate_rule(
+                policy['id'],
+                min_kpps=min_kpps,
+                direction=self.ANY_DIRECTION
+            )['minimum_packet_rate_rule']
+            self.addCleanup(
+                test_utils.call_and_ignore_notfound_exc,
+                self.qos_min_pps_client.delete_minimum_packet_rate_rule,
+                policy['id'],
+                pps_rule['id']
+            )
+
+        return policy
+
+    def _create_network(self):
+        physnet_name = CONF.network_feature_enabled.qos_placement_physnet
+        base_segm = (
+            CONF.network_feature_enabled.provider_net_base_segmentation_id)
+
+        # setup_network_subnet_with_router will add the necessary cleanup calls
+        network, _, _ = self.setup_network_subnet_with_router(
+            networks_client=self.networks_client,
+            routers_client=self.routers_client,
+            subnets_client=self.subnets_client,
+            shared=True,
+            **{
+                'provider:network_type': 'vlan',
+                'provider:physical_network': physnet_name,
+                # +1 to be different from the segmentation_id used in
+                # MinBwAllocationPlacementTest
+                'provider:segmentation_id': int(base_segm) + 1,
+            }
+        )
+        return network
+
+    def _create_port_with_qos_policy(self, policy):
+        port = self.ports_client.create_port(
+            name=data_utils.rand_name(self.__class__.__name__),
+            network_id=self.network['id'],
+            qos_policy_id=policy['id'] if policy else None,
+        )['port']
+        self.addCleanup(
+            test_utils.call_and_ignore_notfound_exc,
+            self.ports_client.delete_port, port['id']
+        )
+        return port
+
+    def assert_allocations(
+            self, server, port, expected_min_kbps, expected_min_kpps
+    ):
+        allocations = self.placement_client.list_allocations(
+            server['id'])['allocations']
+
+        # one allocation for the flavor related resources on the compute RP
+        expected_allocation = 1
+        # one allocation due to bw rule
+        if expected_min_kbps > 0:
+            expected_allocation += 1
+        # one allocation due to pps rule
+        if expected_min_kpps > 0:
+            expected_allocation += 1
+        self.assertEqual(expected_allocation, len(allocations), allocations)
+
+        expected_rp_uuids_in_binding_allocation = set()
+
+        if expected_min_kbps > 0:
+            bw_rp_allocs = {
+                rp: alloc['resources'][self.BW_RESOURCE_CLASS]
+                for rp, alloc in allocations.items()
+                if self.BW_RESOURCE_CLASS in alloc['resources']
+            }
+            self.assertEqual(1, len(bw_rp_allocs))
+            bw_rp, bw_alloc = list(bw_rp_allocs.items())[0]
+            self.assertEqual(expected_min_kbps, bw_alloc)
+            expected_rp_uuids_in_binding_allocation.add(bw_rp)
+
+        if expected_min_kpps > 0:
+            pps_rp_allocs = {
+                rp: alloc['resources'][self.PPS_RESOURCE_CLASS]
+                for rp, alloc in allocations.items()
+                if self.PPS_RESOURCE_CLASS in alloc['resources']
+            }
+            self.assertEqual(1, len(pps_rp_allocs))
+            pps_rp, pps_alloc = list(pps_rp_allocs.items())[0]
+            self.assertEqual(expected_min_kpps, pps_alloc)
+            expected_rp_uuids_in_binding_allocation.add(pps_rp)
+
+        # Let's check port.binding:profile.allocation points to the two
+        # provider resource allocated from
+        port = self.os_admin.ports_client.show_port(port['id'])
+        port_binding_alloc = port[
+            'port']['binding:profile'].get('allocation', {})
+        self.assertEqual(
+            expected_rp_uuids_in_binding_allocation,
+            set(port_binding_alloc.values())
+        )
+
+    def assert_no_allocation(self, server, port):
+        # check that there are no allocations
+        allocations = self.placement_client.list_allocations(
+            server['id'])['allocations']
+        self.assertEqual(0, len(allocations))
+
+        # check that binding_profile of the port is empty
+        port = self.os_admin.ports_client.show_port(port['id'])
+        self.assertEqual(0, len(port['port']['binding:profile']))
+
+    @decorators.idempotent_id('93d1a88d-235e-4b7b-b44d-2a17dcf4e213')
+    @utils.services('compute', 'network')
+    def test_server_create_delete(self):
+        min_kbps = 1000
+        min_kpps = 100
+        policy = self._create_qos_policy_with_bw_and_pps_rules(
+            min_kbps, min_kpps)
+        port = self._create_port_with_qos_policy(policy)
+
+        server = self.create_server(
+            networks=[{'port': port['id']}],
+            wait_until='ACTIVE'
+        )
+
+        self.assert_allocations(server, port, min_kbps, min_kpps)
+
+        self.servers_client.delete_server(server['id'])
+        waiters.wait_for_server_termination(self.servers_client, server['id'])
+
+        self.assert_no_allocation(server, port)
+
+    def _test_create_server_negative(self, min_kbps=1000, min_kpps=100):
+        policy = self._create_qos_policy_with_bw_and_pps_rules(
+            min_kbps, min_kpps)
+        port = self._create_port_with_qos_policy(policy)
+
+        server = self.create_server(
+            networks=[{'port': port['id']}],
+            wait_until=None)
+        waiters.wait_for_server_status(
+            client=self.servers_client, server_id=server['id'],
+            status='ERROR', ready_wait=False, raise_on_error=False)
+
+        # check that the creation failed with No valid host
+        server = self.servers_client.show_server(server['id'])['server']
+        self.assertIn('fault', server)
+        self.assertIn('No valid host', server['fault']['message'])
+
+        self.assert_no_allocation(server, port)
+
+    @decorators.idempotent_id('915dd2ce-4890-40c8-9db6-f3e04080c6c1')
+    @utils.services('compute', 'network')
+    def test_server_create_no_valid_host_due_to_bandwidth(self):
+        self._test_create_server_negative(min_kbps=self.PLACEMENT_MAX_INT)
+
+    @decorators.idempotent_id('2d4a755e-10b9-4ac0-bef2-3f89de1f150b')
+    @utils.services('compute', 'network')
+    def test_server_create_no_valid_host_due_to_packet_rate(self):
+        self._test_create_server_negative(min_kpps=self.PLACEMENT_MAX_INT)
+
+    @decorators.idempotent_id('69d93e4f-0dfc-4d17-8d84-cc5c3c842cd5')
+    @testtools.skipUnless(
+        CONF.compute_feature_enabled.resize, 'Resize not available.')
+    @utils.services('compute', 'network')
+    def test_server_resize(self):
+        min_kbps = 1000
+        min_kpps = 100
+        policy = self._create_qos_policy_with_bw_and_pps_rules(
+            min_kbps, min_kpps)
+        port = self._create_port_with_qos_policy(policy)
+
+        server = self.create_server(
+            networks=[{'port': port['id']}],
+            wait_until='ACTIVE'
+        )
+
+        self.assert_allocations(server, port, min_kbps, min_kpps)
+
+        new_flavor = self._create_flavor_to_resize_to()
+
+        self.servers_client.resize_server(
+            server_id=server['id'], flavor_ref=new_flavor['id']
+        )
+        waiters.wait_for_server_status(
+            client=self.servers_client, server_id=server['id'],
+            status='VERIFY_RESIZE', ready_wait=False, raise_on_error=False)
+
+        self.assert_allocations(server, port, min_kbps, min_kpps)
+
+        self.servers_client.confirm_resize_server(server_id=server['id'])
+        waiters.wait_for_server_status(
+            client=self.servers_client, server_id=server['id'],
+            status='ACTIVE', ready_wait=False, raise_on_error=True)
+
+        self.assert_allocations(server, port, min_kbps, min_kpps)
+
+    @decorators.idempotent_id('d01d4aee-ca06-4e4e-add7-8a47fe0daf96')
+    @testtools.skipUnless(
+        CONF.compute_feature_enabled.resize, 'Resize not available.')
+    @utils.services('compute', 'network')
+    def test_server_resize_revert(self):
+        min_kbps = 1000
+        min_kpps = 100
+        policy = self._create_qos_policy_with_bw_and_pps_rules(
+            min_kbps, min_kpps)
+        port = self._create_port_with_qos_policy(policy)
+
+        server = self.create_server(
+            networks=[{'port': port['id']}],
+            wait_until='ACTIVE'
+        )
+
+        self.assert_allocations(server, port, min_kbps, min_kpps)
+
+        new_flavor = self._create_flavor_to_resize_to()
+
+        self.servers_client.resize_server(
+            server_id=server['id'], flavor_ref=new_flavor['id']
+        )
+        waiters.wait_for_server_status(
+            client=self.servers_client, server_id=server['id'],
+            status='VERIFY_RESIZE', ready_wait=False, raise_on_error=False)
+
+        self.assert_allocations(server, port, min_kbps, min_kpps)
+
+        self.servers_client.revert_resize_server(server_id=server['id'])
+        waiters.wait_for_server_status(
+            client=self.servers_client, server_id=server['id'],
+            status='ACTIVE', ready_wait=False, raise_on_error=True)
+
+        self.assert_allocations(server, port, min_kbps, min_kpps)
+
+    @decorators.idempotent_id('bdd0b31c-c8b0-4b7b-b80a-545a46b32abe')
+    @testtools.skipUnless(
+        CONF.compute_feature_enabled.cold_migration,
+        'Cold migration is not available.')
+    @testtools.skipUnless(
+        CONF.compute.min_compute_nodes > 1,
+        'Less than 2 compute nodes, skipping multinode tests.')
+    @utils.services('compute', 'network')
+    def test_server_migrate(self):
+        min_kbps = 1000
+        min_kpps = 100
+        policy = self._create_qos_policy_with_bw_and_pps_rules(
+            min_kbps, min_kpps)
+        port = self._create_port_with_qos_policy(policy)
+
+        server = self.create_server(
+            networks=[{'port': port['id']}],
+            wait_until='ACTIVE'
+        )
+
+        self.assert_allocations(server, port, min_kbps, min_kpps)
+
+        self.os_adm.servers_client.migrate_server(server_id=server['id'])
+        waiters.wait_for_server_status(
+            client=self.servers_client, server_id=server['id'],
+            status='VERIFY_RESIZE', ready_wait=False, raise_on_error=False)
+
+        self.assert_allocations(server, port, min_kbps, min_kpps)
+
+        self.os_adm.servers_client.confirm_resize_server(
+            server_id=server['id'])
+        waiters.wait_for_server_status(
+            client=self.servers_client, server_id=server['id'],
+            status='ACTIVE', ready_wait=False, raise_on_error=True)
+
+        self.assert_allocations(server, port, min_kbps, min_kpps)
+
+    @decorators.idempotent_id('fdb260e3-caa5-482d-ac7c-8c22adf3d750')
+    @utils.services('compute', 'network')
+    def test_qos_policy_update_on_bound_port(self):
+        min_kbps = 1000
+        min_kpps = 100
+        policy = self._create_qos_policy_with_bw_and_pps_rules(
+            min_kbps, min_kpps)
+
+        min_kbps2 = 2000
+        min_kpps2 = 50
+        policy2 = self._create_qos_policy_with_bw_and_pps_rules(
+            min_kbps2, min_kpps2)
+
+        port = self._create_port_with_qos_policy(policy)
+
+        server = self.create_server(
+            networks=[{'port': port['id']}],
+            wait_until='ACTIVE'
+        )
+
+        self.assert_allocations(server, port, min_kbps, min_kpps)
+
+        self.ports_client.update_port(
+            port['id'],
+            qos_policy_id=policy2['id'])
+
+        self.assert_allocations(server, port, min_kbps2, min_kpps2)
+
+    @decorators.idempotent_id('e6a20125-a02e-49f5-bcf6-894305ee3715')
+    @utils.services('compute', 'network')
+    def test_qos_policy_update_on_bound_port_from_null_policy(self):
+        min_kbps = 1000
+        min_kpps = 100
+        policy = self._create_qos_policy_with_bw_and_pps_rules(
+            min_kbps, min_kpps)
+
+        port = self._create_port_with_qos_policy(policy=None)
+
+        server = self.create_server(
+            networks=[{'port': port['id']}],
+            wait_until='ACTIVE'
+        )
+
+        self.assert_allocations(server, port, 0, 0)
+
+        self.ports_client.update_port(
+            port['id'],
+            qos_policy_id=policy['id'])
+
+        # NOTE(gibi): This is unintuitive but it is the expected behavior.
+        # If there was no policy attached to the port when the server was
+        # created then neutron still allows adding a policy to the port later
+        # as this operation was support before placement enforcement was added
+        # for the qos minimum bandwidth rule. However neutron cannot create
+        # the placement resource allocation for this port.
+        self.assert_allocations(server, port, 0, 0)
+
+    @decorators.idempotent_id('f5864761-966c-4e49-b430-ac0044b7d658')
+    @utils.services('compute', 'network')
+    def test_qos_policy_update_on_bound_port_additional_rule(self):
+        min_kbps = 1000
+        policy = self._create_qos_policy_with_bw_and_pps_rules(
+            min_kbps, 0)
+
+        min_kbps2 = 2000
+        min_kpps2 = 50
+        policy2 = self._create_qos_policy_with_bw_and_pps_rules(
+            min_kbps2, min_kpps2)
+
+        port = self._create_port_with_qos_policy(policy=policy)
+
+        server = self.create_server(
+            networks=[{'port': port['id']}],
+            wait_until='ACTIVE'
+        )
+
+        self.assert_allocations(server, port, min_kbps, 0)
+
+        self.ports_client.update_port(
+            port['id'],
+            qos_policy_id=policy2['id'])
+
+        # FIXME(gibi): Agree in the spec: do we ignore the pps request or we
+        # reject the update? It seems current implementation goes with
+        # ignoring the additional pps rule.
+        self.assert_allocations(server, port, min_kbps2, 0)
+
+    @decorators.idempotent_id('fbbb9c81-ed21-48c3-bdba-ce2361e93aad')
+    @utils.services('compute', 'network')
+    def test_qos_policy_update_on_bound_port_to_null_policy(self):
+        min_kbps = 1000
+        min_kpps = 100
+        policy = self._create_qos_policy_with_bw_and_pps_rules(
+            min_kbps, min_kpps)
+
+        port = self._create_port_with_qos_policy(policy=policy)
+
+        server = self.create_server(
+            networks=[{'port': port['id']}],
+            wait_until='ACTIVE'
+        )
+
+        self.assert_allocations(server, port, min_kbps, min_kpps)
+
+        self.ports_client.update_port(
+            port['id'],
+            qos_policy_id=None)
+
+        self.assert_allocations(server, port, 0, 0)
+
+    @decorators.idempotent_id('0393d038-03ad-4844-a0e4-83010f69dabb')
+    @utils.services('compute', 'network')
+    def test_interface_attach_detach(self):
+        min_kbps = 1000
+        min_kpps = 100
+        policy = self._create_qos_policy_with_bw_and_pps_rules(
+            min_kbps, min_kpps)
+
+        port = self._create_port_with_qos_policy(policy=None)
+
+        port2 = self._create_port_with_qos_policy(policy=policy)
+
+        server = self.create_server(
+            networks=[{'port': port['id']}],
+            wait_until='ACTIVE'
+        )
+
+        self.assert_allocations(server, port, 0, 0)
+
+        self.interface_client.create_interface(
+            server_id=server['id'],
+            port_id=port2['id'])
+        waiters.wait_for_interface_status(
+            self.interface_client, server['id'], port2['id'], 'ACTIVE')
+
+        self.assert_allocations(server, port2, min_kbps, min_kpps)
+
+        req_id = self.interface_client.delete_interface(
+            server_id=server['id'],
+            port_id=port2['id']).response['x-openstack-request-id']
+        waiters.wait_for_interface_detach(
+            self.servers_client, server['id'], port2['id'], req_id)
+
+        self.assert_allocations(server, port2, 0, 0)
+
+    @decorators.idempotent_id('36ffdb85-6cc2-4cc9-a426-cad5bac8626b')
+    @testtools.skipUnless(
+        CONF.compute.min_compute_nodes > 1,
+        'Less than 2 compute nodes, skipping multinode tests.')
+    @testtools.skipUnless(
+        CONF.compute_feature_enabled.live_migration,
+        'Live migration not available')
+    @utils.services('compute', 'network')
+    def test_server_live_migrate(self):
+        min_kbps = 1000
+        min_kpps = 100
+        policy = self._create_qos_policy_with_bw_and_pps_rules(
+            min_kbps, min_kpps)
+
+        port = self._create_port_with_qos_policy(policy=policy)
+
+        server = self.create_server(
+            networks=[{'port': port['id']}],
+            wait_until='ACTIVE'
+        )
+
+        self.assert_allocations(server, port, min_kbps, min_kpps)
+
+        server_details = self.os_adm.servers_client.show_server(server['id'])
+        source_host = server_details['server']['OS-EXT-SRV-ATTR:host']
+
+        self.os_adm.servers_client.live_migrate_server(
+            server['id'], block_migration=True, host=None)
+        waiters.wait_for_server_status(
+            self.servers_client, server['id'], 'ACTIVE')
+
+        server_details = self.os_adm.servers_client.show_server(server['id'])
+        new_host = server_details['server']['OS-EXT-SRV-ATTR:host']
+
+        self.assertNotEqual(source_host, new_host, "Live migration failed")
+
+        self.assert_allocations(server, port, min_kbps, min_kpps)
diff --git a/zuul.d/integrated-gate.yaml b/zuul.d/integrated-gate.yaml
index b86268a..98cff6e 100644
--- a/zuul.d/integrated-gate.yaml
+++ b/zuul.d/integrated-gate.yaml
@@ -131,6 +131,8 @@
 - job:
     name: tempest-integrated-compute-centos-8-stream
     parent: tempest-integrated-compute
+    # TODO(gmann): Make this job non voting until bug#1957941 if fixed.
+    voting: false
     nodeset: devstack-single-node-centos-8-stream
     branches: ^(?!stable/(ocata|pike|queens|rocky|stein|train|ussuri|victoria)).*$
     description: |
diff --git a/zuul.d/project.yaml b/zuul.d/project.yaml
index 36f4920..9ab10d7 100644
--- a/zuul.d/project.yaml
+++ b/zuul.d/project.yaml
@@ -43,8 +43,6 @@
             irrelevant-files: *tempest-irrelevant-files
         - tempest-full-ussuri-py3:
             irrelevant-files: *tempest-irrelevant-files
-        - tempest-full-train-py3:
-            irrelevant-files: *tempest-irrelevant-files
         - tempest-multinode-full-py3:
             irrelevant-files: *tempest-irrelevant-files
         - tempest-tox-plugin-sanity-check:
@@ -169,7 +167,6 @@
         - tempest-full-wallaby-py3
         - tempest-full-victoria-py3
         - tempest-full-ussuri-py3
-        - tempest-full-train-py3
     periodic:
       jobs:
         - tempest-all
diff --git a/zuul.d/stable-jobs.yaml b/zuul.d/stable-jobs.yaml
index e682457..da6cc46 100644
--- a/zuul.d/stable-jobs.yaml
+++ b/zuul.d/stable-jobs.yaml
@@ -21,12 +21,6 @@
     override-checkout: stable/ussuri
 
 - job:
-    name: tempest-full-train-py3
-    parent: tempest-full-py3
-    nodeset: openstack-single-node-bionic
-    override-checkout: stable/train
-
-- job:
     name: tempest-full-py3
     parent: devstack-tempest
     # This job version is with swift disabled on py3
diff --git a/zuul.d/tempest-specific.yaml b/zuul.d/tempest-specific.yaml
index 051d8b0..a24c73d 100644
--- a/zuul.d/tempest-specific.yaml
+++ b/zuul.d/tempest-specific.yaml
@@ -80,6 +80,8 @@
 - job:
     name: tempest-full-py3-centos-8-stream
     parent: tempest-full-py3
+    # TODO(gmann): Make this job non voting until bug#1957941 if fixed.
+    voting: false
     nodeset: devstack-single-node-centos-8-stream
     description: |
       Base integration test with Neutron networking and py36 running