Merge "Use CONF.identity.disable_ssl_certificate_validation in object_client"
diff --git a/doc/source/microversion_testing.rst b/doc/source/microversion_testing.rst
index c7004dd..c1981f9 100644
--- a/doc/source/microversion_testing.rst
+++ b/doc/source/microversion_testing.rst
@@ -126,16 +126,16 @@
 
 .. code-block:: python
 
-    class BaseTestCase1(api_version_utils.BaseMicroversionTest):
+   class BaseTestCase1(api_version_utils.BaseMicroversionTest):
 
-        [..]
-    @classmethod
-    def skip_checks(cls):
-        super(BaseTestCase1, cls).skip_checks()
-        api_version_utils.check_skip_with_microversion(cls.min_microversion,
-                                                       cls.max_microversion,
-                                                       CONF.compute.min_microversion,
-                                                       CONF.compute.max_microversion)
+       [..]
+       @classmethod
+       def skip_checks(cls):
+           super(BaseTestCase1, cls).skip_checks()
+           api_version_utils.check_skip_with_microversion(cls.min_microversion,
+                                                          cls.max_microversion,
+                                                          CONF.compute.min_microversion,
+                                                          CONF.compute.max_microversion)
 
 Skip logic can be added in tests base class or any specific test class depends on
 tests class structure.
diff --git a/doc/source/plugins/plugin.rst b/doc/source/plugins/plugin.rst
index ab1b0b1..6726def 100644
--- a/doc/source/plugins/plugin.rst
+++ b/doc/source/plugins/plugin.rst
@@ -268,12 +268,12 @@
 
    class MyAPIClient(rest_client.RestClient):
 
-    def __init__(self, auth_provider, service, region,
-                 my_arg, my_arg2=True, **kwargs):
-        super(MyAPIClient, self).__init__(
-            auth_provider, service, region, **kwargs)
-        self.my_arg = my_arg
-        self.my_args2 = my_arg
+       def __init__(self, auth_provider, service, region,
+                    my_arg, my_arg2=True, **kwargs):
+           super(MyAPIClient, self).__init__(
+               auth_provider, service, region, **kwargs)
+           self.my_arg = my_arg
+           self.my_args2 = my_arg
 
 Finally the service client should be structured in a python module, so that all
 service client classes are importable from it. Each major API version should
diff --git a/doc/source/supported_version.rst b/doc/source/supported_version.rst
index 388b4cd..4ca7f0d 100644
--- a/doc/source/supported_version.rst
+++ b/doc/source/supported_version.rst
@@ -9,9 +9,9 @@
 
 Tempest master supports the below OpenStack Releases:
 
+* Victoria
 * Ussuri
 * Train
-* Stein
 
 For older OpenStack Release:
 
@@ -34,3 +34,4 @@
 
 * Python 3.6
 * Python 3.7
+* Python 3.8
diff --git a/doc/source/write_tests.rst b/doc/source/write_tests.rst
index 0a29b7b..34df089 100644
--- a/doc/source/write_tests.rst
+++ b/doc/source/write_tests.rst
@@ -76,54 +76,54 @@
 
   class TestExampleCase(test.BaseTestCase):
 
-    @classmethod
-    def skip_checks(cls):
-        """This section is used to evaluate config early and skip all test
-           methods based on these checks
-        """
-        super(TestExampleCase, cls).skip_checks()
-        if not CONF.section.foo
-            cls.skip('A helpful message')
+      @classmethod
+      def skip_checks(cls):
+          """This section is used to evaluate config early and skip all test
+             methods based on these checks
+          """
+          super(TestExampleCase, cls).skip_checks()
+          if not CONF.section.foo
+              cls.skip('A helpful message')
 
-    @classmethod
-    def setup_credentials(cls):
-        """This section is used to do any manual credential allocation and also
-           in the case of dynamic credentials to override the default network
-           resource creation/auto allocation
-        """
-        # This call is used to tell the credential allocator to not create any
-        # network resources for this test case. It also enables selective
-        # creation of other neutron resources. NOTE: it must go before the
-        # super call
-        cls.set_network_resources()
-        super(TestExampleCase, cls).setup_credentials()
+      @classmethod
+      def setup_credentials(cls):
+          """This section is used to do any manual credential allocation and also
+             in the case of dynamic credentials to override the default network
+             resource creation/auto allocation
+          """
+          # This call is used to tell the credential allocator to not create any
+          # network resources for this test case. It also enables selective
+          # creation of other neutron resources. NOTE: it must go before the
+          # super call
+          cls.set_network_resources()
+          super(TestExampleCase, cls).setup_credentials()
 
-    @classmethod
-    def setup_clients(cls):
-        """This section is used to setup client aliases from the manager object
-           or to initialize any additional clients. Except in a few very
-           specific situations you should not need to use this.
-        """
-        super(TestExampleCase, cls).setup_clients()
-        cls.servers_client = cls.os_primary.servers_client
+      @classmethod
+      def setup_clients(cls):
+          """This section is used to setup client aliases from the manager object
+             or to initialize any additional clients. Except in a few very
+             specific situations you should not need to use this.
+          """
+          super(TestExampleCase, cls).setup_clients()
+          cls.servers_client = cls.os_primary.servers_client
 
-    @classmethod
-    def resource_setup(cls):
-        """This section is used to create any resources or objects which are
-           going to be used and shared by **all** test methods in the
-           TestCase. Note then anything created in this section must also be
-           destroyed in the corresponding resource_cleanup() method (which will
-           be run during tearDownClass())
-        """
-        super(TestExampleCase, cls).resource_setup()
-        cls.shared_server = cls.servers_client.create_server(...)
-        cls.addClassResourceCleanup(waiters.wait_for_server_termination,
-                                    cls.servers_client,
-                                    cls.shared_server['id'])
-        cls.addClassResourceCleanup(
-            test_utils.call_and_ignore_notfound_exc(
-                cls.servers_client.delete_server,
-                cls.shared_server['id']))
+      @classmethod
+      def resource_setup(cls):
+          """This section is used to create any resources or objects which are
+             going to be used and shared by **all** test methods in the
+             TestCase. Note then anything created in this section must also be
+             destroyed in the corresponding resource_cleanup() method (which will
+             be run during tearDownClass())
+          """
+          super(TestExampleCase, cls).resource_setup()
+          cls.shared_server = cls.servers_client.create_server(...)
+          cls.addClassResourceCleanup(waiters.wait_for_server_termination,
+                                      cls.servers_client,
+                                      cls.shared_server['id'])
+          cls.addClassResourceCleanup(
+              test_utils.call_and_ignore_notfound_exc(
+                  cls.servers_client.delete_server,
+                  cls.shared_server['id']))
 
 .. _credentials:
 
@@ -150,9 +150,9 @@
 
         credentials = ['primary', 'admin']
 
-    @classmethod
-    def skip_checks(cls):
-    ...
+        @classmethod
+        def skip_checks(cls):
+            ...
 
 In this example the ``TestExampleAdmin`` TestCase will allocate 2 sets of
 credentials, one regular user and one admin user. The corresponding manager
@@ -225,10 +225,10 @@
 
   class TestExampleCase(test.BaseTestCase):
 
-  @classmethod
-  def setup_credentials(cls):
-      cls.set_network_resources(network=True, subnet=True, router=False)
-      super(TestExampleCase, cls).setup_credentials()
+      @classmethod
+      def setup_credentials(cls):
+          cls.set_network_resources(network=True, subnet=True, router=False)
+          super(TestExampleCase, cls).setup_credentials()
 
 There are 2 quirks with the usage here. First for the set_network_resources
 function to work properly it **must be called before super()**. This is so
@@ -242,10 +242,10 @@
 
   class TestExampleCase(test.BaseTestCase):
 
-  @classmethod
-  def setup_credentials(cls):
-      cls.set_network_resources()
-      super(TestExampleCase, cls).setup_credentials()
+      @classmethod
+      def setup_credentials(cls):
+          cls.set_network_resources()
+          super(TestExampleCase, cls).setup_credentials()
 
 This will not allocate any networking resources. This is because by default all
 the arguments default to False.
@@ -282,8 +282,8 @@
 
 
   class TestExampleCase(test.BaseTestCase):
-    def test_example_create_server(self):
-      self.os_primary.servers_client.create_server(...)
+      def test_example_create_server(self):
+          self.os_primary.servers_client.create_server(...)
 
 is all you need to do. As described previously, in the above example the
 ``self.os_primary`` is created automatically because the base test class sets the
@@ -305,8 +305,8 @@
 
 
   class TestExampleCase(test.BaseTestCase):
-    def test_example_create_server(self):
-      credentials = self.os_primary.credentials
+      def test_example_create_server(self):
+          credentials = self.os_primary.credentials
 
 The credentials object provides access to all of the credential information you
 would need to make API requests. For example, building off the previous
@@ -316,9 +316,9 @@
 
 
   class TestExampleCase(test.BaseTestCase):
-    def test_example_create_server(self):
-      credentials = self.os_primary.credentials
-      username = credentials.username
-      user_id = credentials.user_id
-      password = credentials.password
-      tenant_id = credentials.tenant_id
+      def test_example_create_server(self):
+          credentials = self.os_primary.credentials
+          username = credentials.username
+          user_id = credentials.user_id
+          password = credentials.password
+          tenant_id = credentials.tenant_id
diff --git a/releasenotes/notes/end-of-support-for-stein-f795b968d83497a9.yaml b/releasenotes/notes/end-of-support-for-stein-f795b968d83497a9.yaml
new file mode 100644
index 0000000..fd7a874
--- /dev/null
+++ b/releasenotes/notes/end-of-support-for-stein-f795b968d83497a9.yaml
@@ -0,0 +1,12 @@
+---
+prelude: |
+    This is an intermediate release during the Wallaby development cycle to
+    mark the end of support for EM Stein release in Tempest.
+    After this release, Tempest will support below OpenStack Releases:
+
+    * Victoria
+    * Ussuri
+    * Train
+
+    Current development of Tempest is for OpenStack Wallaby development
+    cycle.
diff --git a/releasenotes/source/index.rst b/releasenotes/source/index.rst
index d8702f9..21f414e 100644
--- a/releasenotes/source/index.rst
+++ b/releasenotes/source/index.rst
@@ -6,6 +6,7 @@
    :maxdepth: 1
 
    unreleased
+   v26.0.0
    v24.0.0
    v23.0.0
    v22.1.0
diff --git a/releasenotes/source/v26.0.0.rst b/releasenotes/source/v26.0.0.rst
new file mode 100644
index 0000000..4161f89
--- /dev/null
+++ b/releasenotes/source/v26.0.0.rst
@@ -0,0 +1,5 @@
+=====================
+v26.0.0 Release Notes
+=====================
+.. release-notes:: 26.0.0 Release Notes
+   :version: 26.0.0
diff --git a/tempest/api/compute/admin/test_volume.py b/tempest/api/compute/admin/test_volume.py
index 9340997..342380e 100644
--- a/tempest/api/compute/admin/test_volume.py
+++ b/tempest/api/compute/admin/test_volume.py
@@ -112,7 +112,5 @@
             server['id'], attachment['volumeId'])
         waiters.wait_for_volume_resource_status(
             self.volumes_client, attachment['volumeId'], 'available')
-        volume_after_detach = self.servers_client.list_volume_attachments(
-            server['id'])['volumeAttachments']
-        self.assertEqual(0, len(volume_after_detach),
-                         "Failed to detach volume")
+        waiters.wait_for_volume_attachment_remove_from_server(
+            self.servers_client, server['id'], attachment['volumeId'])
diff --git a/tempest/api/compute/volumes/test_attach_volume.py b/tempest/api/compute/volumes/test_attach_volume.py
index d85e4f7..7251e36 100644
--- a/tempest/api/compute/volumes/test_attach_volume.py
+++ b/tempest/api/compute/volumes/test_attach_volume.py
@@ -200,6 +200,10 @@
         super(AttachVolumeShelveTestJSON, cls).skip_checks()
         if not CONF.compute_feature_enabled.shelve:
             raise cls.skipException('Shelve is not available.')
+        if CONF.compute.compute_volume_common_az:
+            # assuming cross_az_attach is set to false in nova.conf
+            # per the compute_volume_common_az option description
+            raise cls.skipException('Cross AZ attach not available.')
 
     def _count_volumes(self, server, validation_resources):
         # Count number of volumes on an instance
diff --git a/tempest/common/waiters.py b/tempest/common/waiters.py
index 625e08e..e3c33c7 100644
--- a/tempest/common/waiters.py
+++ b/tempest/common/waiters.py
@@ -317,6 +317,32 @@
              'seconds', attachment_id, volume_id, time.time() - start)
 
 
+def wait_for_volume_attachment_remove_from_server(
+        client, server_id, volume_id):
+    """Waits for a volume to be removed from a given server.
+
+    This waiter checks the compute API if the volume attachment is removed.
+    """
+    start = int(time.time())
+    volumes = client.list_volume_attachments(server_id)['volumeAttachments']
+
+    while any(volume for volume in volumes if volume['volumeId'] == volume_id):
+        time.sleep(client.build_interval)
+
+        timed_out = int(time.time()) - start >= client.build_timeout
+        if timed_out:
+            message = ('Volume %s failed to detach from server %s within '
+                       'the required time (%s s) from the compute API '
+                       'perspective' %
+                       (volume_id, server_id, client.build_timeout))
+            raise lib_exc.TimeoutException(message)
+
+        volumes = client.list_volume_attachments(server_id)[
+            'volumeAttachments']
+
+    return volumes
+
+
 def wait_for_volume_migration(client, volume_id, new_host):
     """Waits for a Volume to move to a new host."""
     body = client.show_volume(volume_id)['volume']
diff --git a/tempest/scenario/manager.py b/tempest/scenario/manager.py
index 85a6488..ce13166 100644
--- a/tempest/scenario/manager.py
+++ b/tempest/scenario/manager.py
@@ -504,7 +504,14 @@
         self.addCleanup(self._cleanup_volume_type, volume_type)
         return volume_type
 
-    def _create_loginable_secgroup_rule(self, secgroup_id=None):
+    def _create_loginable_secgroup_rule(self, secgroup_id=None, rulesets=None):
+        """Create loginable security group rule by compute clients.
+
+        This function will create by default the following rules:
+        1. tcp port 22 allow rule in order to allow ssh access for ipv4
+        2. ipv4 icmp allow rule in order to allow icmpv4
+        """
+
         _client = self.compute_security_groups_client
         _client_rules = self.compute_security_group_rules_client
         if secgroup_id is None:
@@ -517,22 +524,23 @@
         # traffic from all sources, so no group_id is provided.
         # Setting a group_id would only permit traffic from ports
         # belonging to the same security group.
-        rulesets = [
-            {
-                # ssh
-                'ip_protocol': 'tcp',
-                'from_port': 22,
-                'to_port': 22,
-                'cidr': '0.0.0.0/0',
-            },
-            {
-                # ping
-                'ip_protocol': 'icmp',
-                'from_port': -1,
-                'to_port': -1,
-                'cidr': '0.0.0.0/0',
-            }
-        ]
+        if not rulesets:
+            rulesets = [
+                {
+                    # ssh
+                    'ip_protocol': 'tcp',
+                    'from_port': 22,
+                    'to_port': 22,
+                    'cidr': '0.0.0.0/0',
+                },
+                {
+                    # ping
+                    'ip_protocol': 'icmp',
+                    'from_port': -1,
+                    'to_port': -1,
+                    'cidr': '0.0.0.0/0',
+                }
+            ]
         rules = list()
         for ruleset in rulesets:
             sg_rule = _client_rules.create_security_group_rule(
@@ -540,14 +548,16 @@
             rules.append(sg_rule)
         return rules
 
-    def _create_security_group(self):
+    def _create_security_group(self, **kwargs):
         """Create security group and add rules to security group"""
-        sg_name = data_utils.rand_name(self.__class__.__name__)
-        sg_desc = sg_name + " description"
+        if not kwargs.get('name'):
+            kwargs['name'] = data_utils.rand_name(self.__class__.__name__)
+        if not kwargs.get('description'):
+            kwargs['description'] = kwargs['name'] + " description"
         secgroup = self.compute_security_groups_client.create_security_group(
-            name=sg_name, description=sg_desc)['security_group']
-        self.assertEqual(secgroup['name'], sg_name)
-        self.assertEqual(secgroup['description'], sg_desc)
+            **kwargs)['security_group']
+        self.assertEqual(secgroup['name'], kwargs['name'])
+        self.assertEqual(secgroup['description'], kwargs['description'])
         self.addCleanup(
             test_utils.call_and_ignore_notfound_exc,
             self.compute_security_groups_client.delete_security_group,
@@ -731,14 +741,12 @@
     def nova_volume_detach(self, server, volume):
         """Compute volume detach
 
-        This utility detaches volume from compute and check whether the
-        volume status is 'available' state, and if not, an exception
-        will be thrown.
+        This utility detaches the volume from the server and checks whether the
+        volume attachment has been removed from Nova.
         """
         self.servers_client.detach_volume(server['id'], volume['id'])
-        waiters.wait_for_volume_resource_status(self.volumes_client,
-                                                volume['id'], 'available')
-        volume = self.volumes_client.show_volume(volume['id'])['volume']
+        waiters.wait_for_volume_attachment_remove_from_server(
+            self.servers_client, server['id'], volume['id'])
 
     def ping_ip_address(self, ip_address, should_succeed=True,
                         ping_timeout=None, mtu=None, server=None):
@@ -869,15 +877,25 @@
         return timestamp
 
     def get_timestamp(self, ip_address, dev_name=None, mount_path='/mnt',
-                      private_key=None, server=None):
+                      private_key=None, server=None, username=None):
         """Returns timestamp
 
         This wrapper utility does ssh and returns the timestamp.
+
+        :param ip_address: The floating IP or fixed IP of the remote server
+        :param dev_name: Name of the device that stores the timestamp
+        :param mount_path: Path which should be used as mount point for
+                           dev_name
+        :param private_key: The SSH private key to use for authentication
+        :param server: Server dict, used for debugging purposes
+        :param username: Name of the Linux account on the remote server
         """
 
         ssh_client = self.get_remote_client(ip_address,
                                             private_key=private_key,
-                                            server=server)
+                                            server=server,
+                                            username=username)
+
         if dev_name is not None:
             ssh_client.mount(dev_name, mount_path)
         timestamp = ssh_client.exec_command('sudo cat %s/timestamp'
@@ -1047,12 +1065,19 @@
         def cidr_in_use(cidr, project_id):
             """Check cidr existence
 
-            :returns: True if subnet with cidr already exist in tenant
-                  False else
+            :returns: True if subnet with cidr already exist in tenant or
+                  external False else
             """
-            cidr_in_use = self.os_admin.subnets_client.list_subnets(
+            tenant_subnets = self.os_admin.subnets_client.list_subnets(
                 project_id=project_id, cidr=cidr)['subnets']
-            return len(cidr_in_use) != 0
+            external_nets = self.os_admin.networks_client.list_networks(
+                **{"router:external": True})['networks']
+            external_subnets = []
+            for ext_net in external_nets:
+                external_subnets.extend(
+                    self.os_admin.subnets_client.list_subnets(
+                        network_id=ext_net['id'], cidr=cidr)['subnets'])
+            return len(tenant_subnets + external_subnets) != 0
 
         ip_version = kwargs.pop('ip_version', 4)
 
@@ -1402,7 +1427,7 @@
     def _create_loginable_secgroup_rule(self, security_group_rules_client=None,
                                         secgroup=None,
                                         security_groups_client=None):
-        """Create loginable security group rule
+        """Create loginable security group rule by neutron clients by default.
 
         This function will create:
         1. egress and ingress tcp port 22 allow rule in order to allow ssh
diff --git a/tempest/scenario/test_minbw_allocation_placement.py b/tempest/scenario/test_minbw_allocation_placement.py
index 74d4ed9..a9d15bc 100644
--- a/tempest/scenario/test_minbw_allocation_placement.py
+++ b/tempest/scenario/test_minbw_allocation_placement.py
@@ -12,7 +12,7 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
-from oslo_log import log as logging
+import testtools
 
 from tempest.common import utils
 from tempest.common import waiters
@@ -23,7 +23,6 @@
 from tempest.scenario import manager
 
 
-LOG = logging.getLogger(__name__)
 CONF = config.CONF
 
 
@@ -65,6 +64,8 @@
         cls.routers_client = cls.os_adm.routers_client
         cls.qos_client = cls.os_admin.qos_client
         cls.qos_min_bw_client = cls.os_admin.qos_min_bw_client
+        cls.flavors_client = cls.os_adm.flavors_client
+        cls.servers_client = cls.os_adm.servers_client
 
     @classmethod
     def skip_checks(cls):
@@ -74,6 +75,11 @@
                   "placement based QoS allocation."
             raise cls.skipException(msg)
 
+    def setUp(self):
+        super(MinBwAllocationPlacementTest, self).setUp()
+        self._check_if_allocation_is_possible()
+        self._create_network_and_qos_policies()
+
     def _create_policy_and_min_bw_rule(self, name_prefix, min_kbps):
         policy = self.qos_client.create_qos_policy(
             name=data_utils.rand_name(name_prefix),
@@ -139,8 +145,34 @@
             self.fail('For %s:%s there should be no available candidate!' %
                       (self.INGRESS_RESOURCE_CLASS, self.PLACEMENT_MAX_INT))
 
+    def _boot_vm_with_min_bw(self, qos_policy_id, status='ACTIVE'):
+        wait_until = (None if status == 'ERROR' else status)
+        port = self.create_port(
+            self.prov_network['id'], qos_policy_id=qos_policy_id)
+
+        server = self.create_server(networks=[{'port': port['id']}],
+                                    wait_until=wait_until)
+        waiters.wait_for_server_status(
+            client=self.os_primary.servers_client, server_id=server['id'],
+            status=status, ready_wait=False, raise_on_error=False)
+        return server, port
+
+    def _assert_allocation_is_as_expected(self, allocations, port_id):
+        self.assertGreater(len(allocations['allocations']), 0)
+        bw_resource_in_alloc = False
+        for rp, resources in allocations['allocations'].items():
+            if self.INGRESS_RESOURCE_CLASS in resources['resources']:
+                bw_resource_in_alloc = True
+                allocation_rp = rp
+        self.assertTrue(bw_resource_in_alloc)
+
+        # Check binding_profile of the port is not empty and equals with the
+        # rp uuid
+        port = self.os_admin.ports_client.show_port(port_id)
+        self.assertEqual(allocation_rp,
+                         port['port']['binding:profile']['allocation'])
+
     @decorators.idempotent_id('78625d92-212c-400e-8695-dd51706858b8')
-    @decorators.attr(type='slow')
     @utils.services('compute', 'network')
     def test_qos_min_bw_allocation_basic(self):
         """"Basic scenario with QoS min bw allocation in placement.
@@ -162,40 +194,13 @@
         it should fail.
         """
 
-        self._check_if_allocation_is_possible()
-
-        self._create_network_and_qos_policies()
-
-        valid_port = self.create_port(
-            self.prov_network['id'], qos_policy_id=self.qos_policy_valid['id'])
-
-        server1 = self.create_server(
-            networks=[{'port': valid_port['id']}])
+        server1, valid_port = self._boot_vm_with_min_bw(
+            qos_policy_id=self.qos_policy_valid['id'])
         allocations = self.placement_client.list_allocations(server1['id'])
+        self._assert_allocation_is_as_expected(allocations, valid_port['id'])
 
-        self.assertGreater(len(allocations['allocations']), 0)
-        bw_resource_in_alloc = False
-        for rp, resources in allocations['allocations'].items():
-            if self.INGRESS_RESOURCE_CLASS in resources['resources']:
-                bw_resource_in_alloc = True
-                allocation_rp = rp
-        self.assertTrue(bw_resource_in_alloc)
-        # Check that binding_profile of the port is not empty and equals with
-        # the rp uuid
-        port = self.os_admin.ports_client.show_port(valid_port['id'])
-        self.assertEqual(allocation_rp,
-                         port['port']['binding:profile']['allocation'])
-
-        # boot another vm with max int bandwidth
-        not_valid_port = self.create_port(
-            self.prov_network['id'],
-            qos_policy_id=self.qos_policy_not_valid['id'])
-        server2 = self.create_server(
-            wait_until=None,
-            networks=[{'port': not_valid_port['id']}])
-        waiters.wait_for_server_status(
-            client=self.os_primary.servers_client, server_id=server2['id'],
-            status='ERROR', ready_wait=False, raise_on_error=False)
+        server2, not_valid_port = self._boot_vm_with_min_bw(
+            self.qos_policy_not_valid['id'], status='ERROR')
         allocations = self.placement_client.list_allocations(server2['id'])
 
         self.assertEqual(0, len(allocations['allocations']))
@@ -205,3 +210,90 @@
         # Check that binding_profile of the port is empty
         port = self.os_admin.ports_client.show_port(not_valid_port['id'])
         self.assertEqual(0, len(port['port']['binding:profile']))
+
+    @decorators.idempotent_id('8a98150c-a506-49a5-96c6-73a5e7b04ada')
+    @testtools.skipUnless(CONF.compute_feature_enabled.cold_migration,
+                          'Cold migration is not available.')
+    @testtools.skipUnless(CONF.compute.min_compute_nodes > 1,
+                          'Less than 2 compute nodes, skipping multinode '
+                          'tests.')
+    @utils.services('compute', 'network')
+    def test_migrate_with_qos_min_bw_allocation(self):
+        """Scenario to migrate VM with QoS min bw allocation in placement
+
+        Boot a VM like in test_qos_min_bw_allocation_basic, do the same
+        checks, and
+        * migrate the server
+        * confirm the resize, if the VM state is VERIFY_RESIZE
+        * If the VM goes to ACTIVE state check that allocations are as
+        expected.
+        """
+        server, valid_port = self._boot_vm_with_min_bw(
+            qos_policy_id=self.qos_policy_valid['id'])
+        allocations = self.placement_client.list_allocations(server['id'])
+        self._assert_allocation_is_as_expected(allocations, valid_port['id'])
+
+        self.servers_client.migrate_server(server_id=server['id'])
+        waiters.wait_for_server_status(
+            client=self.os_primary.servers_client, server_id=server['id'],
+            status='VERIFY_RESIZE', ready_wait=False, raise_on_error=False)
+        allocations = self.placement_client.list_allocations(server['id'])
+
+        # TODO(lajoskatona): Check that the allocations are ok for the
+        #  migration?
+        self._assert_allocation_is_as_expected(allocations, valid_port['id'])
+
+        self.servers_client.confirm_resize_server(server_id=server['id'])
+        waiters.wait_for_server_status(
+            client=self.os_primary.servers_client, server_id=server['id'],
+            status='ACTIVE', ready_wait=False, raise_on_error=True)
+        allocations = self.placement_client.list_allocations(server['id'])
+        self._assert_allocation_is_as_expected(allocations, valid_port['id'])
+
+    @decorators.idempotent_id('c29e7fd3-035d-4993-880f-70819847683f')
+    @testtools.skipUnless(CONF.compute_feature_enabled.resize,
+                          'Resize not available.')
+    @utils.services('compute', 'network')
+    def test_resize_with_qos_min_bw_allocation(self):
+        """Scenario to resize VM with QoS min bw allocation in placement.
+
+        Boot a VM like in test_qos_min_bw_allocation_basic, do the same
+        checks, and
+        * resize the server with new flavor
+        * confirm the resize, if the VM state is VERIFY_RESIZE
+        * If the VM goes to ACTIVE state check that allocations are as
+        expected.
+        """
+        server, valid_port = self._boot_vm_with_min_bw(
+            qos_policy_id=self.qos_policy_valid['id'])
+        allocations = self.placement_client.list_allocations(server['id'])
+        self._assert_allocation_is_as_expected(allocations, valid_port['id'])
+
+        old_flavor = self.flavors_client.show_flavor(
+            CONF.compute.flavor_ref)['flavor']
+        new_flavor = self.flavors_client.create_flavor(**{
+            'ram': old_flavor['ram'],
+            'vcpus': old_flavor['vcpus'],
+            'name': old_flavor['name'] + 'extra',
+            'disk': old_flavor['disk'] + 1
+        })['flavor']
+        self.addCleanup(test_utils.call_and_ignore_notfound_exc,
+                        self.flavors_client.delete_flavor, new_flavor['id'])
+
+        self.servers_client.resize_server(
+            server_id=server['id'], flavor_ref=new_flavor['id'])
+        waiters.wait_for_server_status(
+            client=self.os_primary.servers_client, server_id=server['id'],
+            status='VERIFY_RESIZE', ready_wait=False, raise_on_error=False)
+        allocations = self.placement_client.list_allocations(server['id'])
+
+        # TODO(lajoskatona): Check that the allocations are ok for the
+        #  migration?
+        self._assert_allocation_is_as_expected(allocations, valid_port['id'])
+
+        self.servers_client.confirm_resize_server(server_id=server['id'])
+        waiters.wait_for_server_status(
+            client=self.os_primary.servers_client, server_id=server['id'],
+            status='ACTIVE', ready_wait=False, raise_on_error=True)
+        allocations = self.placement_client.list_allocations(server['id'])
+        self._assert_allocation_is_as_expected(allocations, valid_port['id'])
diff --git a/tempest/scenario/test_stamp_pattern.py b/tempest/scenario/test_stamp_pattern.py
index c3b3670..a8e4c30 100644
--- a/tempest/scenario/test_stamp_pattern.py
+++ b/tempest/scenario/test_stamp_pattern.py
@@ -13,7 +13,6 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
-from oslo_log import log as logging
 import testtools
 
 from tempest.common import utils
@@ -24,7 +23,6 @@
 from tempest.scenario import manager
 
 CONF = config.CONF
-LOG = logging.getLogger(__name__)
 
 
 class TestStampPattern(manager.ScenarioTest):
diff --git a/tempest/tests/common/test_waiters.py b/tempest/tests/common/test_waiters.py
index f45eec0..ff74877 100755
--- a/tempest/tests/common/test_waiters.py
+++ b/tempest/tests/common/test_waiters.py
@@ -20,6 +20,7 @@
 from tempest.common import waiters
 from tempest import exceptions
 from tempest.lib import exceptions as lib_exc
+from tempest.lib.services.compute import servers_client
 from tempest.lib.services.volume.v2 import volumes_client
 from tempest.tests import base
 import tempest.tests.utils as utils
@@ -384,3 +385,54 @@
                                                   uuids.attachment_id)
         # Assert that show volume is only called once before we return
         show_volume.assert_called_once_with(uuids.volume_id)
+
+    def test_wait_for_volume_attachment_remove_from_server(self):
+        volume_attached = {
+            "volumeAttachments": [{"volumeId": uuids.volume_id}]}
+        volume_not_attached = {"volumeAttachments": []}
+        mock_list_volume_attachments = mock.Mock(
+            side_effect=[volume_attached, volume_not_attached])
+        mock_client = mock.Mock(
+            spec=servers_client.ServersClient,
+            build_interval=1,
+            build_timeout=1,
+            list_volume_attachments=mock_list_volume_attachments)
+        self.patch(
+            'time.time',
+            side_effect=[0., 0.5, mock_client.build_timeout + 1.])
+        self.patch('time.sleep')
+
+        waiters.wait_for_volume_attachment_remove_from_server(
+            mock_client, uuids.server_id, uuids.volume_id)
+
+        # Assert that list_volume_attachments is called until the attachment is
+        # removed.
+        mock_list_volume_attachments.assert_has_calls([
+            mock.call(uuids.server_id),
+            mock.call(uuids.server_id)])
+
+    def test_wait_for_volume_attachment_remove_from_server_timeout(self):
+        volume_attached = {
+            "volumeAttachments": [{"volumeId": uuids.volume_id}]}
+        mock_list_volume_attachments = mock.Mock(
+            side_effect=[volume_attached, volume_attached])
+        mock_client = mock.Mock(
+            spec=servers_client.ServersClient,
+            build_interval=1,
+            build_timeout=1,
+            list_volume_attachments=mock_list_volume_attachments)
+        self.patch(
+            'time.time',
+            side_effect=[0., 0.5, mock_client.build_timeout + 1.])
+        self.patch('time.sleep')
+
+        self.assertRaises(
+            lib_exc.TimeoutException,
+            waiters.wait_for_volume_attachment_remove_from_server,
+            mock_client, uuids.server_id, uuids.volume_id)
+
+        # Assert that list_volume_attachments is called until the attachment is
+        # removed.
+        mock_list_volume_attachments.assert_has_calls([
+            mock.call(uuids.server_id),
+            mock.call(uuids.server_id)])
diff --git a/zuul.d/integrated-gate.yaml b/zuul.d/integrated-gate.yaml
index 1a1d523..4c1ee5a 100644
--- a/zuul.d/integrated-gate.yaml
+++ b/zuul.d/integrated-gate.yaml
@@ -76,6 +76,18 @@
         FORCE_CONFIG_DRIVE: true
         ENABLE_VOLUME_MULTIATTACH: true
         GLANCE_USE_IMPORT_WORKFLOW: True
+      devstack_plugins:
+        neutron: https://opendev.org/openstack/neutron
+      devstack_local_conf:
+        post-config:
+          "/$NEUTRON_CORE_PLUGIN_CONF":
+            ovs:
+              bridge_mappings: public:br-ex
+              resource_provider_bandwidths: br-ex:1000000:1000000
+        test-config:
+          $TEMPEST_CONFIG:
+            network-feature-enabled:
+              qos_placement_physnet: public
       devstack_services:
         s-account: false
         s-container: false
@@ -89,6 +101,8 @@
         # especially because the tests fail at a high rate (see bugs 1483434,
         # 1813217, 1745168)
         c-bak: false
+        neutron-placement: true
+        neutron-qos: true
 
 - job:
     name: tempest-integrated-networking
@@ -244,6 +258,21 @@
     vars:
       devstack_localrc:
         USE_PYTHON3: true
+      devstack_plugins:
+        neutron: https://opendev.org/openstack/neutron
+      devstack_local_conf:
+        post-config:
+          "/$NEUTRON_CORE_PLUGIN_CONF":
+            ovs:
+              bridge_mappings: public:br-ex
+              resource_provider_bandwidths: br-ex:1000000:1000000
+        test-config:
+          $TEMPEST_CONFIG:
+            network-feature-enabled:
+              qos_placement_physnet: public
+      devstack_services:
+        neutron-placement: true
+        neutron-qos: true
     group-vars:
       subnode:
         devstack_localrc:
@@ -271,16 +300,6 @@
       devstack_services:
         neutron-placement: true
         neutron-qos: true
-      devstack_local_conf:
-        post-config:
-          "/$NEUTRON_CORE_PLUGIN_CONF":
-            ovs:
-              bridge_mappings: public:br-ex
-              resource_provider_bandwidths: br-ex:1000000:1000000
-        test-config:
-          $TEMPEST_CONFIG:
-            network-feature-enabled:
-              qos_placement_physnet: public
       tempest_concurrency: 2
     group-vars:
       # NOTE(mriedem): The ENABLE_VOLUME_MULTIATTACH variable is used on both
diff --git a/zuul.d/project.yaml b/zuul.d/project.yaml
index f2522af..5dcd27f 100644
--- a/zuul.d/project.yaml
+++ b/zuul.d/project.yaml
@@ -48,8 +48,6 @@
             irrelevant-files: *tempest-irrelevant-files
         - tempest-full-train-py3:
             irrelevant-files: *tempest-irrelevant-files
-        - tempest-full-stein-py3:
-            irrelevant-files: *tempest-irrelevant-files
         - tempest-multinode-full-py3:
             irrelevant-files: *tempest-irrelevant-files
         - tempest-tox-plugin-sanity-check:
@@ -139,7 +137,6 @@
         - tempest-full-victoria-py3
         - tempest-full-ussuri-py3
         - tempest-full-train-py3
-        - tempest-full-stein-py3
     periodic:
       jobs:
         - tempest-all
diff --git a/zuul.d/stable-jobs.yaml b/zuul.d/stable-jobs.yaml
index 832a0d5..769b280 100644
--- a/zuul.d/stable-jobs.yaml
+++ b/zuul.d/stable-jobs.yaml
@@ -15,9 +15,3 @@
     parent: tempest-full-py3
     nodeset: openstack-single-node-bionic
     override-checkout: stable/train
-
-- job:
-    name: tempest-full-stein-py3
-    parent: tempest-full-py3
-    nodeset: openstack-single-node-bionic
-    override-checkout: stable/stein