Merge "Fix system & domain scoped admin dynamic credential"
diff --git a/releasenotes/notes/log_console_output-dae6b8740b5a5821.yaml b/releasenotes/notes/log_console_output-dae6b8740b5a5821.yaml
new file mode 100644
index 0000000..2779b26
--- /dev/null
+++ b/releasenotes/notes/log_console_output-dae6b8740b5a5821.yaml
@@ -0,0 +1,8 @@
+---
+features:
+  - |
+    Added public interface log_console_output().
+    It used to be a private method with name _log_console_output().
+    Since this interface is meant to be used by tempest plugins,
+    It doesn't neccessarily require to be private api.
+
diff --git a/tempest/api/image/v2/test_images.py b/tempest/api/image/v2/test_images.py
index d1f6f98..ca72388 100644
--- a/tempest/api/image/v2/test_images.py
+++ b/tempest/api/image/v2/test_images.py
@@ -90,7 +90,7 @@
         self.assertEqual('uploading', body['status'])
         # import image from staging to backend
         self.client.image_import(image['id'], method='glance-direct')
-        self.client.wait_for_resource_activation(image['id'])
+        waiters.wait_for_image_imported_to_stores(self.client, image['id'])
 
     @decorators.idempotent_id('f6feb7a4-b04f-4706-a011-206129f83e62')
     def test_image_web_download_import(self):
@@ -111,7 +111,7 @@
         image_uri = CONF.image.http_image
         self.client.image_import(image['id'], method='web-download',
                                  image_uri=image_uri)
-        self.client.wait_for_resource_activation(image['id'])
+        waiters.wait_for_image_imported_to_stores(self.client, image['id'])
 
 
 class MultiStoresImportImagesTest(base.BaseV2ImageTest):
diff --git a/tempest/common/waiters.py b/tempest/common/waiters.py
index e3c33c7..eaac05e 100644
--- a/tempest/common/waiters.py
+++ b/tempest/common/waiters.py
@@ -193,26 +193,34 @@
     raise lib_exc.TimeoutException(message)
 
 
-def wait_for_image_imported_to_stores(client, image_id, stores):
+def wait_for_image_imported_to_stores(client, image_id, stores=None):
     """Waits for an image to be imported to all requested stores.
 
+    Short circuits to fail if the serer reports failure of any store.
+    If stores is None, just wait for status==active.
+
     The client should also have build_interval and build_timeout attributes.
     """
 
+    exc_cls = lib_exc.TimeoutException
     start = int(time.time())
     while int(time.time()) - start < client.build_timeout:
         image = client.show_image(image_id)
-        if image['status'] == 'active' and image['stores'] == stores:
+        if image['status'] == 'active' and (stores is None or
+                                            image['stores'] == stores):
             return
+        if image.get('os_glance_failed_import'):
+            exc_cls = lib_exc.OtherRestClientException
+            break
 
         time.sleep(client.build_interval)
 
     message = ('Image %s failed to import on stores: %s' %
-               (image_id, str(image['os_glance_failed_import'])))
+               (image_id, str(image.get('os_glance_failed_import'))))
     caller = test_utils.find_test_caller()
     if caller:
         message = '(%s) %s' % (caller, message)
-    raise lib_exc.TimeoutException(message)
+    raise exc_cls(message)
 
 
 def wait_for_image_copied_to_stores(client, image_id):
diff --git a/tempest/scenario/manager.py b/tempest/scenario/manager.py
index acc563a..8866a22 100644
--- a/tempest/scenario/manager.py
+++ b/tempest/scenario/manager.py
@@ -660,7 +660,7 @@
         LOG.debug("image:%s", image['id'])
         return image['id']
 
-    def _log_console_output(self, servers=None, client=None, **kwargs):
+    def log_console_output(self, servers=None, client=None, **kwargs):
         """Console log output"""
         if not CONF.compute_feature_enabled.console_output:
             LOG.debug('Console output not supported, cannot log')
@@ -796,7 +796,7 @@
                       'result': 'expected' if result else 'unexpected'
                   })
         if server:
-            self._log_console_output([server])
+            self.log_console_output([server])
         return result
 
     def check_vm_connectivity(self, ip_address,
@@ -1285,7 +1285,7 @@
                                                should_connect=should_connect)
         except Exception as e:
             LOG.exception('Tenant network connectivity check failed')
-            self._log_console_output(servers_for_debug)
+            self.log_console_output(servers_for_debug)
             self._log_net_info(e)
             raise
 
@@ -1328,7 +1328,7 @@
                 % (dest, source_host)
         else:
             msg = "%s is reachable from %s" % (dest, source_host)
-        self._log_console_output()
+        self.log_console_output()
         self.fail(msg)
 
     def _create_security_group(self, security_group_rules_client=None,
diff --git a/tempest/scenario/test_minbw_allocation_placement.py b/tempest/scenario/test_minbw_allocation_placement.py
index a9d15bc..8c2752d 100644
--- a/tempest/scenario/test_minbw_allocation_placement.py
+++ b/tempest/scenario/test_minbw_allocation_placement.py
@@ -20,6 +20,7 @@
 from tempest.lib.common.utils import data_utils
 from tempest.lib.common.utils import test_utils
 from tempest.lib import decorators
+from tempest.lib import exceptions as lib_exc
 from tempest.scenario import manager
 
 
@@ -54,6 +55,8 @@
     # https://github.com/openstack/placement/blob/master/placement/
     # db/constants.py#L16
     PLACEMENT_MAX_INT = 0x7FFFFFFF
+    BANDWIDTH_1 = 1000
+    BANDWIDTH_2 = 2000
 
     @classmethod
     def setup_clients(cls):
@@ -61,6 +64,7 @@
         cls.placement_client = cls.os_admin.placement_client
         cls.networks_client = cls.os_admin.networks_client
         cls.subnets_client = cls.os_admin.subnets_client
+        cls.ports_client = cls.os_primary.ports_client
         cls.routers_client = cls.os_adm.routers_client
         cls.qos_client = cls.os_admin.qos_client
         cls.qos_min_bw_client = cls.os_admin.qos_min_bw_client
@@ -78,7 +82,6 @@
     def setUp(self):
         super(MinBwAllocationPlacementTest, self).setUp()
         self._check_if_allocation_is_possible()
-        self._create_network_and_qos_policies()
 
     def _create_policy_and_min_bw_rule(self, name_prefix, min_kbps):
         policy = self.qos_client.create_qos_policy(
@@ -99,7 +102,7 @@
 
         return policy
 
-    def _create_qos_policies(self):
+    def _create_qos_basic_policies(self):
         self.qos_policy_valid = self._create_policy_and_min_bw_rule(
             name_prefix='test_policy_valid',
             min_kbps=self.SMALLEST_POSSIBLE_BW)
@@ -107,7 +110,20 @@
             name_prefix='test_policy_not_valid',
             min_kbps=self.PLACEMENT_MAX_INT)
 
-    def _create_network_and_qos_policies(self):
+    def _create_qos_policies_from_life(self):
+        # For tempest-slow the max bandwidth configured is 1000000,
+        # https://opendev.org/openstack/tempest/src/branch/master/
+        # .zuul.yaml#L416-L420
+        self.qos_policy_1 = self._create_policy_and_min_bw_rule(
+            name_prefix='test_policy_1',
+            min_kbps=self.BANDWIDTH_1
+        )
+        self.qos_policy_2 = self._create_policy_and_min_bw_rule(
+            name_prefix='test_policy_2',
+            min_kbps=self.BANDWIDTH_2
+        )
+
+    def _create_network_and_qos_policies(self, policy_method):
         physnet_name = CONF.network_feature_enabled.qos_placement_physnet
         base_segm = \
             CONF.network_feature_enabled.provider_net_base_segmentation_id
@@ -123,7 +139,7 @@
                 'provider:segmentation_id': base_segm
             })
 
-        self._create_qos_policies()
+        policy_method()
 
     def _check_if_allocation_is_possible(self):
         alloc_candidates = self.placement_client.list_allocation_candidates(
@@ -157,20 +173,29 @@
             status=status, ready_wait=False, raise_on_error=False)
         return server, port
 
-    def _assert_allocation_is_as_expected(self, allocations, port_id):
-        self.assertGreater(len(allocations['allocations']), 0)
+    def _assert_allocation_is_as_expected(self, consumer, port_ids,
+                                          min_kbps=SMALLEST_POSSIBLE_BW):
+        allocations = self.placement_client.list_allocations(
+            consumer)['allocations']
+        self.assertGreater(len(allocations), 0)
         bw_resource_in_alloc = False
-        for rp, resources in allocations['allocations'].items():
+        for rp, resources in allocations.items():
             if self.INGRESS_RESOURCE_CLASS in resources['resources']:
+                self.assertEqual(
+                    min_kbps,
+                    resources['resources'][self.INGRESS_RESOURCE_CLASS])
                 bw_resource_in_alloc = True
                 allocation_rp = rp
-        self.assertTrue(bw_resource_in_alloc)
+        if min_kbps:
+            self.assertTrue(bw_resource_in_alloc)
 
-        # Check binding_profile of the port is not empty and equals with the
-        # rp uuid
-        port = self.os_admin.ports_client.show_port(port_id)
-        self.assertEqual(allocation_rp,
-                         port['port']['binding:profile']['allocation'])
+            # Check binding_profile of the port is not empty and equals with
+            # the rp uuid
+            for port_id in port_ids:
+                port = self.os_admin.ports_client.show_port(port_id)
+                self.assertEqual(
+                    allocation_rp,
+                    port['port']['binding:profile']['allocation'])
 
     @decorators.idempotent_id('78625d92-212c-400e-8695-dd51706858b8')
     @utils.services('compute', 'network')
@@ -193,11 +218,11 @@
         * Create port with invalid QoS policy, and try to boot VM with that,
         it should fail.
         """
-
+        self._create_network_and_qos_policies(self._create_qos_basic_policies)
         server1, valid_port = self._boot_vm_with_min_bw(
             qos_policy_id=self.qos_policy_valid['id'])
-        allocations = self.placement_client.list_allocations(server1['id'])
-        self._assert_allocation_is_as_expected(allocations, valid_port['id'])
+        self._assert_allocation_is_as_expected(server1['id'],
+                                               [valid_port['id']])
 
         server2, not_valid_port = self._boot_vm_with_min_bw(
             self.qos_policy_not_valid['id'], status='ERROR')
@@ -228,27 +253,28 @@
         * If the VM goes to ACTIVE state check that allocations are as
         expected.
         """
+        self._create_network_and_qos_policies(self._create_qos_basic_policies)
         server, valid_port = self._boot_vm_with_min_bw(
             qos_policy_id=self.qos_policy_valid['id'])
-        allocations = self.placement_client.list_allocations(server['id'])
-        self._assert_allocation_is_as_expected(allocations, valid_port['id'])
+        self._assert_allocation_is_as_expected(server['id'],
+                                               [valid_port['id']])
 
         self.servers_client.migrate_server(server_id=server['id'])
         waiters.wait_for_server_status(
             client=self.os_primary.servers_client, server_id=server['id'],
             status='VERIFY_RESIZE', ready_wait=False, raise_on_error=False)
-        allocations = self.placement_client.list_allocations(server['id'])
 
         # TODO(lajoskatona): Check that the allocations are ok for the
         #  migration?
-        self._assert_allocation_is_as_expected(allocations, valid_port['id'])
+        self._assert_allocation_is_as_expected(server['id'],
+                                               [valid_port['id']])
 
         self.servers_client.confirm_resize_server(server_id=server['id'])
         waiters.wait_for_server_status(
             client=self.os_primary.servers_client, server_id=server['id'],
             status='ACTIVE', ready_wait=False, raise_on_error=True)
-        allocations = self.placement_client.list_allocations(server['id'])
-        self._assert_allocation_is_as_expected(allocations, valid_port['id'])
+        self._assert_allocation_is_as_expected(server['id'],
+                                               [valid_port['id']])
 
     @decorators.idempotent_id('c29e7fd3-035d-4993-880f-70819847683f')
     @testtools.skipUnless(CONF.compute_feature_enabled.resize,
@@ -264,10 +290,11 @@
         * If the VM goes to ACTIVE state check that allocations are as
         expected.
         """
+        self._create_network_and_qos_policies(self._create_qos_basic_policies)
         server, valid_port = self._boot_vm_with_min_bw(
             qos_policy_id=self.qos_policy_valid['id'])
-        allocations = self.placement_client.list_allocations(server['id'])
-        self._assert_allocation_is_as_expected(allocations, valid_port['id'])
+        self._assert_allocation_is_as_expected(server['id'],
+                                               [valid_port['id']])
 
         old_flavor = self.flavors_client.show_flavor(
             CONF.compute.flavor_ref)['flavor']
@@ -285,15 +312,176 @@
         waiters.wait_for_server_status(
             client=self.os_primary.servers_client, server_id=server['id'],
             status='VERIFY_RESIZE', ready_wait=False, raise_on_error=False)
-        allocations = self.placement_client.list_allocations(server['id'])
 
         # TODO(lajoskatona): Check that the allocations are ok for the
         #  migration?
-        self._assert_allocation_is_as_expected(allocations, valid_port['id'])
+        self._assert_allocation_is_as_expected(server['id'],
+                                               [valid_port['id']])
 
         self.servers_client.confirm_resize_server(server_id=server['id'])
         waiters.wait_for_server_status(
             client=self.os_primary.servers_client, server_id=server['id'],
             status='ACTIVE', ready_wait=False, raise_on_error=True)
-        allocations = self.placement_client.list_allocations(server['id'])
-        self._assert_allocation_is_as_expected(allocations, valid_port['id'])
+        self._assert_allocation_is_as_expected(server['id'],
+                                               [valid_port['id']])
+
+    @decorators.idempotent_id('79fdaa1c-df62-4738-a0f0-1cff9dc415f6')
+    @utils.services('compute', 'network')
+    def test_qos_min_bw_allocation_update_policy(self):
+        """Test the update of QoS policy on bound port
+
+        Related RFE in neutron: #1882804
+        The scenario is the following:
+        * Have a port with QoS policy and minimum bandwidth rule.
+        * Boot a VM with the port.
+        * Update the port with a new policy with different minimum bandwidth
+        values.
+        * The allocation on placement side should be according to the new
+        rules.
+        """
+        if not utils.is_network_feature_enabled('update_port_qos'):
+            raise self.skipException("update_port_qos feature is not enabled")
+
+        self._create_network_and_qos_policies(
+            self._create_qos_policies_from_life)
+
+        port = self.create_port(
+            self.prov_network['id'], qos_policy_id=self.qos_policy_1['id'])
+
+        server1 = self.create_server(
+            networks=[{'port': port['id']}])
+
+        self._assert_allocation_is_as_expected(server1['id'], [port['id']],
+                                               self.BANDWIDTH_1)
+
+        self.ports_client.update_port(
+            port['id'],
+            **{'qos_policy_id': self.qos_policy_2['id']})
+        self._assert_allocation_is_as_expected(server1['id'], [port['id']],
+                                               self.BANDWIDTH_2)
+
+        # I changed my mind
+        self.ports_client.update_port(
+            port['id'],
+            **{'qos_policy_id': self.qos_policy_1['id']})
+        self._assert_allocation_is_as_expected(server1['id'], [port['id']],
+                                               self.BANDWIDTH_1)
+
+        # bad request....
+        self.qos_policy_not_valid = self._create_policy_and_min_bw_rule(
+            name_prefix='test_policy_not_valid',
+            min_kbps=self.PLACEMENT_MAX_INT)
+        port_orig = self.ports_client.show_port(port['id'])['port']
+        self.assertRaises(
+            lib_exc.Conflict,
+            self.ports_client.update_port,
+            port['id'], **{'qos_policy_id': self.qos_policy_not_valid['id']})
+        self._assert_allocation_is_as_expected(server1['id'], [port['id']],
+                                               self.BANDWIDTH_1)
+
+        port_upd = self.ports_client.show_port(port['id'])['port']
+        self.assertEqual(port_orig['qos_policy_id'],
+                         port_upd['qos_policy_id'])
+        self.assertEqual(self.qos_policy_1['id'], port_upd['qos_policy_id'])
+
+    @decorators.idempotent_id('9cfc3bb8-f433-4c91-87b6-747cadc8958a')
+    @utils.services('compute', 'network')
+    def test_qos_min_bw_allocation_update_policy_from_zero(self):
+        """Test port without QoS policy to have QoS policy
+
+        This scenario checks if updating a port without QoS policy to
+        have QoS policy with minimum_bandwidth rule succeeds only on
+        controlplane, but placement allocation remains 0.
+        """
+        if not utils.is_network_feature_enabled('update_port_qos'):
+            raise self.skipException("update_port_qos feature is not enabled")
+
+        self._create_network_and_qos_policies(
+            self._create_qos_policies_from_life)
+
+        port = self.create_port(self.prov_network['id'])
+
+        server1 = self.create_server(
+            networks=[{'port': port['id']}])
+
+        self._assert_allocation_is_as_expected(server1['id'], [port['id']], 0)
+
+        self.ports_client.update_port(
+            port['id'], **{'qos_policy_id': self.qos_policy_2['id']})
+        self._assert_allocation_is_as_expected(server1['id'], [port['id']], 0)
+
+    @decorators.idempotent_id('a9725a70-1d28-4e3b-ae0e-450abc235962')
+    @utils.services('compute', 'network')
+    def test_qos_min_bw_allocation_update_policy_to_zero(self):
+        """Test port with QoS policy to remove QoS policy
+
+        In this scenario port with QoS minimum_bandwidth rule update to
+        remove QoS policy results in 0 placement allocation.
+        """
+        if not utils.is_network_feature_enabled('update_port_qos'):
+            raise self.skipException("update_port_qos feature is not enabled")
+
+        self._create_network_and_qos_policies(
+            self._create_qos_policies_from_life)
+
+        port = self.create_port(
+            self.prov_network['id'], qos_policy_id=self.qos_policy_1['id'])
+
+        server1 = self.create_server(
+            networks=[{'port': port['id']}])
+        self._assert_allocation_is_as_expected(server1['id'], [port['id']],
+                                               self.BANDWIDTH_1)
+
+        self.ports_client.update_port(
+            port['id'],
+            **{'qos_policy_id': None})
+        self._assert_allocation_is_as_expected(server1['id'], [port['id']], 0)
+
+    @decorators.idempotent_id('756ced7f-6f1a-43e7-a851-2fcfc16f3dd7')
+    @utils.services('compute', 'network')
+    def test_qos_min_bw_allocation_update_with_multiple_ports(self):
+        if not utils.is_network_feature_enabled('update_port_qos'):
+            raise self.skipException("update_port_qos feature is not enabled")
+
+        self._create_network_and_qos_policies(
+            self._create_qos_policies_from_life)
+
+        port1 = self.create_port(
+            self.prov_network['id'], qos_policy_id=self.qos_policy_1['id'])
+        port2 = self.create_port(
+            self.prov_network['id'], qos_policy_id=self.qos_policy_2['id'])
+
+        server1 = self.create_server(
+            networks=[{'port': port1['id']}, {'port': port2['id']}])
+        self._assert_allocation_is_as_expected(
+            server1['id'], [port1['id'], port2['id']],
+            self.BANDWIDTH_1 + self.BANDWIDTH_2)
+
+        self.ports_client.update_port(
+            port1['id'],
+            **{'qos_policy_id': self.qos_policy_2['id']})
+        self._assert_allocation_is_as_expected(
+            server1['id'], [port1['id'], port2['id']],
+            2 * self.BANDWIDTH_2)
+
+    @decorators.idempotent_id('0805779e-e03c-44fb-900f-ce97a790653b')
+    @utils.services('compute', 'network')
+    def test_empty_update(self):
+        if not utils.is_network_feature_enabled('update_port_qos'):
+            raise self.skipException("update_port_qos feature is not enabled")
+
+        self._create_network_and_qos_policies(
+            self._create_qos_policies_from_life)
+
+        port = self.create_port(
+            self.prov_network['id'], qos_policy_id=self.qos_policy_1['id'])
+
+        server1 = self.create_server(
+            networks=[{'port': port['id']}])
+        self._assert_allocation_is_as_expected(server1['id'], [port['id']],
+                                               self.BANDWIDTH_1)
+        self.ports_client.update_port(
+            port['id'],
+            **{'description': 'foo'})
+        self._assert_allocation_is_as_expected(server1['id'], [port['id']],
+                                               self.BANDWIDTH_1)
diff --git a/tempest/scenario/test_network_v6.py b/tempest/scenario/test_network_v6.py
index 14f24c7..9be28c4 100644
--- a/tempest/scenario/test_network_v6.py
+++ b/tempest/scenario/test_network_v6.py
@@ -218,7 +218,7 @@
                     guest_has_address,
                     CONF.validation.ping_timeout, 1, ssh, ip)
                 if not result:
-                    self._log_console_output(servers=[srv])
+                    self.log_console_output(servers=[srv])
                     self.fail(
                         'Address %s not configured for instance %s, '
                         'ip address output is\n%s' %
diff --git a/tempest/scenario/test_security_groups_basic_ops.py b/tempest/scenario/test_security_groups_basic_ops.py
index 3fc93e4..03a4a39 100644
--- a/tempest/scenario/test_security_groups_basic_ops.py
+++ b/tempest/scenario/test_security_groups_basic_ops.py
@@ -464,9 +464,9 @@
     def _log_console_output_for_all_tenants(self):
         for tenant in self.tenants.values():
             client = tenant.manager.servers_client
-            self._log_console_output(servers=tenant.servers, client=client)
+            self.log_console_output(servers=tenant.servers, client=client)
             if tenant.access_point is not None:
-                self._log_console_output(
+                self.log_console_output(
                     servers=[tenant.access_point], client=client)
 
     def _create_protocol_ruleset(self, protocol, port=80):
diff --git a/tempest/scenario/test_server_advanced_ops.py b/tempest/scenario/test_server_advanced_ops.py
index 8aa729b..990b325 100644
--- a/tempest/scenario/test_server_advanced_ops.py
+++ b/tempest/scenario/test_server_advanced_ops.py
@@ -37,7 +37,7 @@
 
     @classmethod
     def setup_credentials(cls):
-        cls.set_network_resources()
+        cls.set_network_resources(network=True, subnet=True)
         super(TestServerAdvancedOps, cls).setup_credentials()
 
     @decorators.attr(type='slow')
diff --git a/tempest/tests/common/test_waiters.py b/tempest/tests/common/test_waiters.py
index ff74877..d64d7b0 100755
--- a/tempest/tests/common/test_waiters.py
+++ b/tempest/tests/common/test_waiters.py
@@ -66,7 +66,7 @@
         # Ensure waiter returns before build_timeout
         self.assertLess((end_time - start_time), 10)
 
-    def test_wait_for_image_imported_to_stores_timeout(self):
+    def test_wait_for_image_imported_to_stores_failure(self):
         time_mock = self.patch('time.time')
         client = mock.MagicMock()
         client.build_timeout = 2
@@ -77,6 +77,20 @@
             'status': 'saving',
             'stores': 'fake_store',
             'os_glance_failed_import': 'fake_os_glance_failed_import'})
+        self.assertRaises(lib_exc.OtherRestClientException,
+                          waiters.wait_for_image_imported_to_stores,
+                          client, 'fake_image_id', 'fake_store')
+
+    def test_wait_for_image_imported_to_stores_timeout(self):
+        time_mock = self.patch('time.time')
+        client = mock.MagicMock()
+        client.build_timeout = 2
+        self.patch('time.time', side_effect=[0., 1., 2.])
+        time_mock.side_effect = utils.generate_timeout_series(1)
+
+        client.show_image.return_value = ({
+            'status': 'saving',
+            'stores': 'fake_store'})
         self.assertRaises(lib_exc.TimeoutException,
                           waiters.wait_for_image_imported_to_stores,
                           client, 'fake_image_id', 'fake_store')