Merge "Test deleting volume snapshots while instance is stopped"
diff --git a/releasenotes/notes/bug-2132971-a89a576348dcd1d6.yaml b/releasenotes/notes/bug-2132971-a89a576348dcd1d6.yaml
new file mode 100644
index 0000000..d21289c
--- /dev/null
+++ b/releasenotes/notes/bug-2132971-a89a576348dcd1d6.yaml
@@ -0,0 +1,5 @@
+---
+fixes:
+  - |
+    Fixed bug #2132971. ``test_rebuild_server`` will no longer expect a
+    floating ip when floating ip networks are disabled.
diff --git a/releasenotes/source/index.rst b/releasenotes/source/index.rst
index 33c141d..29ec4d5 100644
--- a/releasenotes/source/index.rst
+++ b/releasenotes/source/index.rst
@@ -6,6 +6,7 @@
    :maxdepth: 1
 
    unreleased
+   v46.0.0
    v45.0.0
    v44.0.0
    v43.0.0
diff --git a/releasenotes/source/v46.0.0.rst b/releasenotes/source/v46.0.0.rst
new file mode 100644
index 0000000..cbd2e95
--- /dev/null
+++ b/releasenotes/source/v46.0.0.rst
@@ -0,0 +1,6 @@
+=====================
+v46.0.0 Release Notes
+=====================
+
+.. release-notes:: 46.0.0 Release Notes
+   :version: 46.0.0
diff --git a/tempest/api/compute/base.py b/tempest/api/compute/base.py
index 3ac2b46..3b44ded 100644
--- a/tempest/api/compute/base.py
+++ b/tempest/api/compute/base.py
@@ -560,8 +560,17 @@
                             'tagging metadata was not checked in the '
                             'metadata API')
                 return True
+
             cmd = 'curl %s' % md_url
-            md_json = ssh_client.exec_command(cmd)
+            try:
+                md_json = ssh_client.exec_command(cmd)
+            except lib_exc.SSHExecCommandFailed:
+                # NOTE(eolivare): We cannot guarantee that the metadata service
+                # is available right after the VM is ssh-able, because it could
+                # obtain authorized ssh keys from config_drive or it could use
+                # password. Hence, retries may be needed.
+                LOG.exception('metadata service not available yet')
+                return False
             return verify_method(md_json)
         # NOTE(gmann) Keep refreshing the metadata info until the metadata
         # cache is refreshed. For safer side, we will go with wait loop of
diff --git a/tempest/api/compute/security_groups/base.py b/tempest/api/compute/security_groups/base.py
index ef69a13..f57ed59 100644
--- a/tempest/api/compute/security_groups/base.py
+++ b/tempest/api/compute/security_groups/base.py
@@ -27,6 +27,15 @@
     create_default_network = True
 
     @classmethod
+    def setup_clients(cls):
+        super(BaseSecurityGroupsTest, cls).setup_clients()
+        if CONF.enforce_scope.nova and hasattr(cls, 'os_project_reader'):
+            cls.reader_security_groups_client = (
+                cls.os_project_reader.compute_security_groups_client)
+        else:
+            cls.reader_security_groups_client = cls.security_groups_client
+
+    @classmethod
     def skip_checks(cls):
         super(BaseSecurityGroupsTest, cls).skip_checks()
         if not utils.get_service_list()['network']:
diff --git a/tempest/api/compute/security_groups/test_security_group_rules.py b/tempest/api/compute/security_groups/test_security_group_rules.py
index 3c4daf6..ed6dcd4 100644
--- a/tempest/api/compute/security_groups/test_security_group_rules.py
+++ b/tempest/api/compute/security_groups/test_security_group_rules.py
@@ -148,7 +148,7 @@
             rule2_id)
 
         # Get rules of the created Security Group
-        rules = self.security_groups_client.show_security_group(
+        rules = self.reader_security_groups_client.show_security_group(
             securitygroup_id)['security_group']['rules']
         self.assertNotEmpty([i for i in rules if i['id'] == rule1_id])
         self.assertNotEmpty([i for i in rules if i['id'] == rule2_id])
@@ -173,7 +173,8 @@
         # Delete group2
         self.security_groups_client.delete_security_group(sg2_id)
         # Get rules of the Group1
-        rules = (self.security_groups_client.show_security_group(sg1_id)
-                 ['security_group']['rules'])
+        rules = (
+            self.reader_security_groups_client.show_security_group(sg1_id)
+            ['security_group']['rules'])
         # The group1 has no rules because group2 has deleted
         self.assertEmpty(rules)
diff --git a/tempest/api/compute/security_groups/test_security_groups.py b/tempest/api/compute/security_groups/test_security_groups.py
index 01a7986..89b1025 100644
--- a/tempest/api/compute/security_groups/test_security_groups.py
+++ b/tempest/api/compute/security_groups/test_security_groups.py
@@ -45,7 +45,9 @@
             security_group_list.append(body)
         # Fetch all Security Groups and verify the list
         # has all created Security Groups
-        fetched_list = self.client.list_security_groups()['security_groups']
+        fetched_list = (
+            self.reader_security_groups_client.list_security_groups()
+            ['security_groups'])
         # Now check if all the created Security Groups are in fetched list
         missing_sgs = \
             [sg for sg in security_group_list if sg not in fetched_list]
@@ -58,7 +60,9 @@
             self.client.delete_security_group(sg['id'])
             self.client.wait_for_resource_deletion(sg['id'])
         # Now check if all the created Security Groups are deleted
-        fetched_list = self.client.list_security_groups()['security_groups']
+        fetched_list = (
+            self.reader_security_groups_client.list_security_groups()
+            ['security_groups'])
         deleted_sgs = [sg for sg in security_group_list if sg in fetched_list]
         self.assertFalse(deleted_sgs,
                          "Failed to delete Security Group %s "
@@ -80,8 +84,9 @@
                          "The created Security Group name is "
                          "not equal to the requested name")
         # Now fetch the created Security Group by its 'id'
-        fetched_group = (self.client.show_security_group(securitygroup['id'])
-                         ['security_group'])
+        fetched_group = (
+            self.reader_security_groups_client.show_security_group(
+                securitygroup['id'])['security_group'])
         self.assertEqual(securitygroup, fetched_group,
                          "The fetched Security Group is different "
                          "from the created Group")
@@ -144,8 +149,9 @@
                                           name=s_new_name,
                                           description=s_new_des)
         # get the security group
-        fetched_group = (self.client.show_security_group(securitygroup_id)
-                         ['security_group'])
+        fetched_group = (
+            self.reader_security_groups_client.show_security_group(
+                securitygroup_id)['security_group'])
         self.assertEqual(s_new_name, fetched_group['name'])
         self.assertEqual(s_new_des, fetched_group['description'])
 
@@ -170,7 +176,7 @@
 
         # list security groups for a server
         fetched_groups = (
-            self.servers_client.list_security_groups_by_server(
+            self.reader_servers_client.list_security_groups_by_server(
                 server_id)['security_groups'])
         fetched_security_groups_ids = [i['id'] for i in fetched_groups]
         # verifying the security groups ids in list
diff --git a/tempest/api/compute/security_groups/test_security_groups_negative.py b/tempest/api/compute/security_groups/test_security_groups_negative.py
index c7d873f..b4f060f 100644
--- a/tempest/api/compute/security_groups/test_security_groups_negative.py
+++ b/tempest/api/compute/security_groups/test_security_groups_negative.py
@@ -41,8 +41,10 @@
     def test_security_group_get_nonexistent_group(self):
         """Test getting non existent security group details should fail"""
         non_exist_id = self.generate_random_security_group_id()
-        self.assertRaises(lib_exc.NotFound, self.client.show_security_group,
-                          non_exist_id)
+        self.assertRaises(
+            lib_exc.NotFound,
+            self.reader_security_groups_client.show_security_group,
+            non_exist_id)
 
     @decorators.skip_because(bug="1161411",
                              condition=CONF.service_available.neutron)
@@ -111,7 +113,9 @@
     def test_delete_the_default_security_group(self):
         """Test deleting "default" security group should fail"""
         default_security_group_id = None
-        body = self.client.list_security_groups()['security_groups']
+        body = (
+            self.reader_security_groups_client.list_security_groups()
+            ['security_groups'])
         for i in range(len(body)):
             if body[i]['name'] == 'default':
                 default_security_group_id = body[i]['id']
diff --git a/tempest/api/compute/servers/test_server_actions.py b/tempest/api/compute/servers/test_server_actions.py
index 1fe4a65..10c2e91 100644
--- a/tempest/api/compute/servers/test_server_actions.py
+++ b/tempest/api/compute/servers/test_server_actions.py
@@ -283,10 +283,11 @@
         # a situation when a newly created server doesn't have a floating
         # ip attached at the beginning of the test_rebuild_server let's
         # make sure right here the floating ip is attached
-        waiters.wait_for_server_floating_ip(
-            self.servers_client,
-            server,
-            validation_resources['floating_ip'])
+        if 'floating_ip' in validation_resources:
+            waiters.wait_for_server_floating_ip(
+                self.servers_client,
+                server,
+                validation_resources['floating_ip'])
 
         self.addCleanup(waiters.wait_for_server_termination,
                         self.servers_client, server['id'])
diff --git a/tempest/api/object_storage/test_account_quotas.py b/tempest/api/object_storage/test_account_quotas.py
index 37783b8..6c472a6 100644
--- a/tempest/api/object_storage/test_account_quotas.py
+++ b/tempest/api/object_storage/test_account_quotas.py
@@ -109,6 +109,57 @@
         nafter = self._get_bytes_used()
         self.assertEqual(nbefore, nafter)
 
+    @decorators.idempotent_id('aab68903-cc9f-493a-b17e-b387db3e4e44')
+    @utils.requires_ext(extension='account_quotas', service='object')
+    def test_storage_policy_quota_limit(self):
+        """Verify quota limits are enforced per storage policy"""
+        policy_names = [p["name"] for p in self.policies]
+        if 'silver' not in policy_names:
+            raise self.skipException("Missing storage policy 'silver'")
+
+        policy_quota = 10
+        policy_quota_header = {
+            "X-Account-Quota-Bytes-Policy-silver": str(policy_quota)
+        }
+        self.account_client.auth_provider.set_alt_auth_data(
+            request_part='headers',
+            auth_data=self.reselleradmin_auth_data
+        )
+        self.os_roles_operator.account_client.request(
+            "POST", url="", headers=policy_quota_header, body=""
+        )
+
+        # Create a new container using the "silver" storage policy
+        silver_container = data_utils.rand_name("silver-container")
+        headers = {'X-Storage-Policy': 'silver'}
+        self.container_client.create_container(
+            silver_container, **headers
+        )
+
+        # Try uploading an object larger than the quota
+        large_data = data_utils.arbitrary_string(size=policy_quota + 1)
+        object_name = data_utils.rand_name(name='large_object')
+        self.assertRaises(
+            lib_exc.OverLimit,
+            self.object_client.create_object,
+            silver_container,
+            object_name,
+            large_data
+            )
+
+        # Upload same large object to default container
+        default_container = data_utils.rand_name(
+            "default_container"
+        )
+        self.container_client.create_container(default_container)
+        default_object = data_utils.rand_name(name='default_object')
+        resp, _ = self.object_client.create_object(
+            default_container,
+            default_object,
+            large_data
+        )
+        self.assertHeaders(resp, 'Object', 'PUT')
+
     @decorators.attr(type=["smoke"])
     @decorators.idempotent_id('63f51f9f-5f1d-4fc6-b5be-d454d70949d6')
     @utils.requires_ext(extension='account_quotas', service='object')
diff --git a/tempest/scenario/test_network_qos_placement.py b/tempest/scenario/test_network_qos_placement.py
index 055dcb6..faff6f9 100644
--- a/tempest/scenario/test_network_qos_placement.py
+++ b/tempest/scenario/test_network_qos_placement.py
@@ -152,21 +152,36 @@
             min_kbps=self.BANDWIDTH_2
         )
 
-    def _create_network_and_qos_policies(self, policy_method):
-        physnet_name = CONF.network_feature_enabled.qos_placement_physnet
-        base_segm = \
-            CONF.network_feature_enabled.provider_net_base_segmentation_id
-
-        self.prov_network, _, _ = self.setup_network_subnet_with_router(
-            networks_client=self.networks_client,
-            routers_client=self.routers_client,
-            subnets_client=self.subnets_client,
+    def _use_or_create_network_and_qos_policies(self, policy_method):
+        vlan_ext_nets = self.networks_client.list_networks(
             **{
-                'shared': True,
                 'provider:network_type': 'vlan',
-                'provider:physical_network': physnet_name,
-                'provider:segmentation_id': base_segm
-            })
+                'router:external': True}
+        )['networks']
+        if vlan_ext_nets:
+            self.prov_network = vlan_ext_nets[0]
+            if not self.prov_network['shared']:
+                self.prov_network = self.networks_client.update_network(
+                    self.prov_network['id'], shared=True)['network']
+                self.addClassResourceCleanup(
+                    self.networks_client.update_network,
+                    self.prov_network['id'],
+                    shared=False)
+        else:
+            physnet_name = CONF.network_feature_enabled.qos_placement_physnet
+            base_segm = \
+                CONF.network_feature_enabled.provider_net_base_segmentation_id
+
+            self.prov_network, _, _ = self.setup_network_subnet_with_router(
+                networks_client=self.networks_client,
+                routers_client=self.routers_client,
+                subnets_client=self.subnets_client,
+                **{
+                    'shared': True,
+                    'provider:network_type': 'vlan',
+                    'provider:physical_network': physnet_name,
+                    'provider:segmentation_id': base_segm
+                })
 
         policy_method()
 
@@ -261,7 +276,8 @@
         * Create port with invalid QoS policy, and try to boot VM with that,
         it should fail.
         """
-        self._create_network_and_qos_policies(self._create_qos_basic_policies)
+        self._use_or_create_network_and_qos_policies(
+            self._create_qos_basic_policies)
         server1, valid_port = self._boot_vm_with_min_bw(
             qos_policy_id=self.qos_policy_valid['id'])
         self._assert_allocation_is_as_expected(server1['id'],
@@ -297,7 +313,8 @@
         * If the VM goes to ACTIVE state check that allocations are as
         expected.
         """
-        self._create_network_and_qos_policies(self._create_qos_basic_policies)
+        self._use_or_create_network_and_qos_policies(
+            self._create_qos_basic_policies)
         server, valid_port = self._boot_vm_with_min_bw(
             qos_policy_id=self.qos_policy_valid['id'])
         self._assert_allocation_is_as_expected(server['id'],
@@ -335,7 +352,8 @@
         * If the VM goes to ACTIVE state check that allocations are as
         expected.
         """
-        self._create_network_and_qos_policies(self._create_qos_basic_policies)
+        self._use_or_create_network_and_qos_policies(
+            self._create_qos_basic_policies)
         server, valid_port = self._boot_vm_with_min_bw(
             qos_policy_id=self.qos_policy_valid['id'])
         self._assert_allocation_is_as_expected(server['id'],
@@ -378,7 +396,7 @@
         if not utils.is_network_feature_enabled('update_port_qos'):
             raise self.skipException("update_port_qos feature is not enabled")
 
-        self._create_network_and_qos_policies(
+        self._use_or_create_network_and_qos_policies(
             self._create_qos_policies_from_life)
 
         port = self.create_port(
@@ -432,7 +450,7 @@
         if not utils.is_network_feature_enabled('update_port_qos'):
             raise self.skipException("update_port_qos feature is not enabled")
 
-        self._create_network_and_qos_policies(
+        self._use_or_create_network_and_qos_policies(
             self._create_qos_policies_from_life)
 
         port = self.create_port(self.prov_network['id'])
@@ -457,7 +475,7 @@
         if not utils.is_network_feature_enabled('update_port_qos'):
             raise self.skipException("update_port_qos feature is not enabled")
 
-        self._create_network_and_qos_policies(
+        self._use_or_create_network_and_qos_policies(
             self._create_qos_policies_from_life)
 
         port = self.create_port(
@@ -479,7 +497,7 @@
         if not utils.is_network_feature_enabled('update_port_qos'):
             raise self.skipException("update_port_qos feature is not enabled")
 
-        self._create_network_and_qos_policies(
+        self._use_or_create_network_and_qos_policies(
             self._create_qos_policies_from_life)
 
         port1 = self.create_port(
@@ -506,7 +524,7 @@
         if not utils.is_network_feature_enabled('update_port_qos'):
             raise self.skipException("update_port_qos feature is not enabled")
 
-        self._create_network_and_qos_policies(
+        self._use_or_create_network_and_qos_policies(
             self._create_qos_policies_from_life)
 
         port = self.create_port(
@@ -552,7 +570,7 @@
                 direction=self.EGRESS_DIRECTION,
             )
 
-        self._create_network_and_qos_policies(create_policies)
+        self._use_or_create_network_and_qos_policies(create_policies)
 
         port = self.create_port(
             self.prov_network['id'],
diff --git a/test-requirements.txt b/test-requirements.txt
index b925921..f599d53 100644
--- a/test-requirements.txt
+++ b/test-requirements.txt
@@ -1,4 +1,4 @@
 hacking>=7.0.0,<7.1.0
 coverage!=4.4,>=4.0 # Apache-2.0
 oslotest>=3.2.0 # Apache-2.0
-flake8-import-order>=0.18.0,<0.19.0 # LGPLv3
+flake8-import-order>=0.19.0 # LGPLv3
diff --git a/tools/generate-tempest-plugins-list.py b/tools/generate-tempest-plugins-list.py
index 2e8ced5..0690d57 100644
--- a/tools/generate-tempest-plugins-list.py
+++ b/tools/generate-tempest-plugins-list.py
@@ -79,6 +79,10 @@
     # No changes are merging in this
     # https://review.opendev.org/q/project:x%252Fnetworking-fortinet
     'x/networking-fortinet'
+    # It is broken and it use retired plugin 'patrol'. Last change done
+    # in this plugin was 7 years ago.
+    # https://opendev.org/airship/tempest-plugin
+    'airship/tempest-plugin'
 ]
 
 url = 'https://review.opendev.org/projects/'
diff --git a/zuul.d/integrated-gate.yaml b/zuul.d/integrated-gate.yaml
index d151274..2fc7aea 100644
--- a/zuul.d/integrated-gate.yaml
+++ b/zuul.d/integrated-gate.yaml
@@ -82,8 +82,8 @@
       Former names for this job where:
         * legacy-tempest-dsvm-py35
         * gate-tempest-dsvm-py35
-    required-projects:
-      - openstack/horizon
+    # required-projects:
+    #  - openstack/horizon
     vars:
       # NOTE(gmann): Default concurrency is higher (number of cpu -2) which
       # end up 6 in upstream CI. Higher concurrency means high parallel
@@ -101,7 +101,11 @@
         neutron: https://opendev.org/openstack/neutron
       devstack_services:
         # Enable horizon so that we can run horizon test.
-        horizon: true
+        # horizon: true
+        # FIXME(sean-k-mooney): restore horizon deployment
+        # once horizon does not depend on setuptools to provide
+        # pkg_resources or bug #2141277 is resolved by other means
+        horizon: false
 
 - job:
     name: tempest-full-centos-9-stream
@@ -436,29 +440,6 @@
       run on neutron gate only.
     check:
       jobs:
-        - grenade
-        # NOTE(gmann): These template are generic and used on stable branch
-        # as well as master testing. So grenade-skip-level on stable/2023.1
-        # which test stable/yoga to stable/2023.1 upgrade is non-voting.
-        - grenade-skip-level:
-            voting: false
-            branches:
-              - stable/2023.1
-        # on stable/2024.1(SLURP) grenade-skip-level is voting which test
-        # stable/2023.1 to stable/2024.1 upgrade. This is supposed to run on
-        # SLURP release only.
-        - grenade-skip-level:
-            branches:
-              - ^.*/2024.1
-        # on 2025.1(SLURP) grenade-skip-level-always is voting.
-        # which test stable/2024.1 to 2025.1 upgrade.
-        # As extra testing, we do run it voting on current master(even that is non SLURP).
-        # but if project feel that is not required to run for non SLURP releases then they can opt to make it non-voting or remove it.
-        - grenade-skip-level-always:
-            branches:
-              - ^.*/2025.2
-              - ^.*/2025.1
-              - master
         - tempest-integrated-networking
         # Do not run it on ussuri until below issue is fixed
         # https://storyboard.openstack.org/#!/story/2010057
@@ -470,23 +451,7 @@
               negate: true
     gate:
       jobs:
-        - grenade
         - tempest-integrated-networking
-        # on stable/2024.1(SLURP) grenade-skip-level is voting which test
-        # stable/2023.1 to stable/2024.1 upgrade. This is supposed to run on
-        # SLURP release only.
-        - grenade-skip-level:
-            branches:
-              - ^.*/2024.1
-        # on 2025.1(SLURP) grenade-skip-level-always is voting.
-        # which test stable/2024.1 to 2025.1 upgrade.
-        # As extra testing, we do run it voting on current master(even that is non SLURP).
-        # but if project feel that is not required to run for non SLURP releases then they can opt to make it non-voting or remove it.
-        - grenade-skip-level-always:
-            branches:
-              - ^.*/2025.2
-              - ^.*/2025.1
-              - master
         # Do not run it on ussuri until below issue is fixed
         # https://storyboard.openstack.org/#!/story/2010057
         # and job is broken up to wallaby branch due to the issue