Merge "Add project reader to account-generator"
diff --git a/releasenotes/source/index.rst b/releasenotes/source/index.rst
index ccd5fe1..882413f 100644
--- a/releasenotes/source/index.rst
+++ b/releasenotes/source/index.rst
@@ -6,6 +6,7 @@
    :maxdepth: 1
 
    unreleased
+   v34.0.0
    v33.0.0
    v32.0.0
    v31.1.0
diff --git a/releasenotes/source/v34.0.0.rst b/releasenotes/source/v34.0.0.rst
new file mode 100644
index 0000000..94d3b67
--- /dev/null
+++ b/releasenotes/source/v34.0.0.rst
@@ -0,0 +1,6 @@
+=====================
+v34.0.0 Release Notes
+=====================
+
+.. release-notes:: 34.0.0 Release Notes
+   :version: 34.0.0
diff --git a/tempest/api/compute/admin/test_volume_swap.py b/tempest/api/compute/admin/test_volume_swap.py
index 7da87c7..36148c5 100644
--- a/tempest/api/compute/admin/test_volume_swap.py
+++ b/tempest/api/compute/admin/test_volume_swap.py
@@ -199,11 +199,12 @@
            "server1"
         8. Check "volume2" is attached to "server1".
         """
+        multiattach_vol_type = CONF.volume.volume_type_multiattach
         # Create two volumes.
         # NOTE(gmann): Volumes are created before server creation so that
         # volumes cleanup can happen successfully irrespective of which volume
         # is attached to server.
-        volume1 = self.create_volume(multiattach=True)
+        volume1 = self.create_volume(volume_type=multiattach_vol_type)
         # Make volume1 read-only since you can't swap from a volume with
         # multiple read/write attachments, and you can't change the readonly
         # flag on an in-use volume so we have to do this before attaching
@@ -211,7 +212,7 @@
         # attach modes, then we can handle this differently.
         self.admin_volumes_client.update_volume_readonly(
             volume1['id'], readonly=True)
-        volume2 = self.create_volume(multiattach=True)
+        volume2 = self.create_volume(volume_type=multiattach_vol_type)
 
         # Create two servers and wait for them to be ACTIVE.
         validation_resources = self.get_class_validation_resources(
diff --git a/tempest/api/compute/admin/test_volumes_negative.py b/tempest/api/compute/admin/test_volumes_negative.py
index 91ab09e..55c842f 100644
--- a/tempest/api/compute/admin/test_volumes_negative.py
+++ b/tempest/api/compute/admin/test_volumes_negative.py
@@ -115,9 +115,11 @@
         5. Check "vol1" is still attached to both servers
         6. Check "vol2" is not attached to any server
         """
+        multiattach_vol_type = CONF.volume.volume_type_multiattach
+
         # Create two multiattach capable volumes.
-        vol1 = self.create_volume(multiattach=True)
-        vol2 = self.create_volume(multiattach=True)
+        vol1 = self.create_volume(volume_type=multiattach_vol_type)
+        vol2 = self.create_volume(volume_type=multiattach_vol_type)
 
         # Create two instances.
         validation_resources = self.get_class_validation_resources(
diff --git a/tempest/api/compute/servers/test_create_server_multi_nic.py b/tempest/api/compute/servers/test_create_server_multi_nic.py
index bd3f58d..6ec058d 100644
--- a/tempest/api/compute/servers/test_create_server_multi_nic.py
+++ b/tempest/api/compute/servers/test_create_server_multi_nic.py
@@ -23,6 +23,28 @@
 CONF = config.CONF
 
 
+def get_subnets(count=2):
+    """Returns a list of requested subnets from project_network_cidr block.
+
+    Args:
+        count (int):    Number of blocks required.
+
+    Returns:
+        CIDRs as a list of strings
+            e.g. ['19.80.0.0/24', '19.86.0.0/24']
+    """
+    default_rtn = ['19.80.0.0/24', '19.86.0.0/24']
+    _net = netaddr.IPNetwork(CONF.network.project_network_cidr)
+
+    # Split the subnet into the requested number of smaller subnets.
+    sub_prefix_len = (32 - _net.prefixlen) // count
+    if sub_prefix_len < 1:
+        return default_rtn
+
+    _new_cidr = _net.prefixlen + sub_prefix_len
+    return [str(net) for _, net in zip(range(count), _net.subnet(_new_cidr))]
+
+
 class ServersTestMultiNic(base.BaseV2ComputeTest):
     """Test multiple networks in servers"""
 
@@ -65,8 +87,9 @@
         The networks order given at the server creation is preserved within
         the server.
         """
-        net1 = self._create_net_subnet_ret_net_from_cidr('19.80.0.0/24')
-        net2 = self._create_net_subnet_ret_net_from_cidr('19.86.0.0/24')
+        _cidrs = get_subnets()
+        net1 = self._create_net_subnet_ret_net_from_cidr(_cidrs[0])
+        net2 = self._create_net_subnet_ret_net_from_cidr(_cidrs[1])
 
         networks = [{'uuid': net1['network']['id']},
                     {'uuid': net2['network']['id']}]
@@ -86,14 +109,12 @@
                      ['addresses'])
 
         # We can't predict the ip addresses assigned to the server on networks.
-        # Sometimes the assigned addresses are ['19.80.0.2', '19.86.0.2'], at
-        # other times ['19.80.0.3', '19.86.0.3']. So we check if the first
-        # address is in first network, similarly second address is in second
-        # network.
+        # So we check if the first address is in first network, similarly
+        # second address is in second network.
         addr = [addresses[net1['network']['name']][0]['addr'],
                 addresses[net2['network']['name']][0]['addr']]
-        networks = [netaddr.IPNetwork('19.80.0.0/24'),
-                    netaddr.IPNetwork('19.86.0.0/24')]
+        networks = [netaddr.IPNetwork(_cidrs[0]),
+                    netaddr.IPNetwork(_cidrs[1])]
         for address, network in zip(addr, networks):
             self.assertIn(address, network)
 
@@ -107,8 +128,9 @@
         """
         # Verify that server creation does not fail when more than one nic
         # is created on the same network.
-        net1 = self._create_net_subnet_ret_net_from_cidr('19.80.0.0/24')
-        net2 = self._create_net_subnet_ret_net_from_cidr('19.86.0.0/24')
+        _cidrs = get_subnets()
+        net1 = self._create_net_subnet_ret_net_from_cidr(_cidrs[0])
+        net2 = self._create_net_subnet_ret_net_from_cidr(_cidrs[1])
 
         networks = [{'uuid': net1['network']['id']},
                     {'uuid': net2['network']['id']},
@@ -124,8 +146,8 @@
         addr = [addresses[net1['network']['name']][0]['addr'],
                 addresses[net2['network']['name']][0]['addr'],
                 addresses[net1['network']['name']][1]['addr']]
-        networks = [netaddr.IPNetwork('19.80.0.0/24'),
-                    netaddr.IPNetwork('19.86.0.0/24'),
-                    netaddr.IPNetwork('19.80.0.0/24')]
+        networks = [netaddr.IPNetwork(_cidrs[0]),
+                    netaddr.IPNetwork(_cidrs[1]),
+                    netaddr.IPNetwork(_cidrs[0])]
         for address, network in zip(addr, networks):
             self.assertIn(address, network)
diff --git a/tempest/api/compute/volumes/test_attach_volume.py b/tempest/api/compute/volumes/test_attach_volume.py
index 5380c67..7ea8f09 100644
--- a/tempest/api/compute/volumes/test_attach_volume.py
+++ b/tempest/api/compute/volumes/test_attach_volume.py
@@ -369,7 +369,9 @@
         kwargs = {}
         if bootable:
             kwargs['image_ref'] = CONF.compute.image_ref
-        return self.create_volume(multiattach=True, **kwargs)
+        multiattach_vol_type = CONF.volume.volume_type_multiattach
+        return self.create_volume(volume_type=multiattach_vol_type,
+                                  **kwargs)
 
     def _create_and_multiattach(self):
         """Creates two server instances and a volume and attaches to both.
diff --git a/tempest/config.py b/tempest/config.py
index dfc0a8e..551578e 100644
--- a/tempest/config.py
+++ b/tempest/config.py
@@ -1015,6 +1015,10 @@
     cfg.StrOpt('volume_type',
                default='',
                help='Volume type to be used while creating volume.'),
+    cfg.StrOpt('volume_type_multiattach',
+               default='',
+               help='Multiattach volume type used while creating multiattach '
+                    'volume.'),
     cfg.StrOpt('storage_protocol',
                default='iSCSI',
                help='Backend protocol to target when creating volume types'),
diff --git a/tempest/scenario/manager.py b/tempest/scenario/manager.py
index bf3f62f..db0aa5a 100644
--- a/tempest/scenario/manager.py
+++ b/tempest/scenario/manager.py
@@ -325,13 +325,15 @@
 
     def create_volume(self, size=None, name=None, snapshot_id=None,
                       imageRef=None, volume_type=None, wait_until='available',
-                      **kwargs):
+                      client=None, **kwargs):
         """Creates volume
 
         This wrapper utility creates volume and waits for volume to be
         in 'available' state by default. If wait_until is None, means no wait.
         This method returns the volume's full representation by GET request.
         """
+        if client is None:
+            client = self.volumes_client
 
         if size is None:
             size = CONF.volume.volume_size
@@ -355,19 +357,20 @@
             kwargs.setdefault('availability_zone',
                               CONF.compute.compute_volume_common_az)
 
-        volume = self.volumes_client.create_volume(**kwargs)['volume']
+        volume = client.create_volume(**kwargs)['volume']
 
-        self.addCleanup(self.volumes_client.wait_for_resource_deletion,
+        self.addCleanup(client.wait_for_resource_deletion,
                         volume['id'])
         self.addCleanup(test_utils.call_and_ignore_notfound_exc,
-                        self.volumes_client.delete_volume, volume['id'])
+                        client.delete_volume, volume['id'])
         self.assertEqual(name, volume['name'])
         if wait_until:
-            waiters.wait_for_volume_resource_status(self.volumes_client,
+            waiters.wait_for_volume_resource_status(client,
                                                     volume['id'], wait_until)
             # The volume retrieved on creation has a non-up-to-date status.
             # Retrieval after it becomes active ensures correct details.
-            volume = self.volumes_client.show_volume(volume['id'])['volume']
+            volume = client.show_volume(volume['id'])['volume']
+
         return volume
 
     def create_backup(self, volume_id, name=None, description=None,
@@ -858,32 +861,43 @@
                   image_name, server['name'])
         return snapshot_image
 
-    def nova_volume_attach(self, server, volume_to_attach, **kwargs):
+    def nova_volume_attach(self, server, volume_to_attach,
+                           volumes_client=None, servers_client=None,
+                           **kwargs):
         """Compute volume attach
 
         This utility attaches volume from compute and waits for the
         volume status to be 'in-use' state.
         """
-        volume = self.servers_client.attach_volume(
+        if volumes_client is None:
+            volumes_client = self.volumes_client
+        if servers_client is None:
+            servers_client = self.servers_client
+
+        volume = servers_client.attach_volume(
             server['id'], volumeId=volume_to_attach['id'],
             **kwargs)['volumeAttachment']
         self.assertEqual(volume_to_attach['id'], volume['id'])
-        waiters.wait_for_volume_resource_status(self.volumes_client,
+        waiters.wait_for_volume_resource_status(volumes_client,
                                                 volume['id'], 'in-use')
         self.addCleanup(test_utils.call_and_ignore_notfound_exc,
-                        self.nova_volume_detach, server, volume)
+                        self.nova_volume_detach, server, volume,
+                        servers_client)
         # Return the updated volume after the attachment
-        return self.volumes_client.show_volume(volume['id'])['volume']
+        return volumes_client.show_volume(volume['id'])['volume']
 
-    def nova_volume_detach(self, server, volume):
+    def nova_volume_detach(self, server, volume, servers_client=None):
         """Compute volume detach
 
         This utility detaches the volume from the server and checks whether the
         volume attachment has been removed from Nova.
         """
-        self.servers_client.detach_volume(server['id'], volume['id'])
+        if servers_client is None:
+            servers_client = self.servers_client
+
+        servers_client.detach_volume(server['id'], volume['id'])
         waiters.wait_for_volume_attachment_remove_from_server(
-            self.servers_client, server['id'], volume['id'])
+            servers_client, server['id'], volume['id'])
 
     def ping_ip_address(self, ip_address, should_succeed=True,
                         ping_timeout=None, mtu=None, server=None):
diff --git a/zuul.d/integrated-gate.yaml b/zuul.d/integrated-gate.yaml
index 4f21956..6c495b7 100644
--- a/zuul.d/integrated-gate.yaml
+++ b/zuul.d/integrated-gate.yaml
@@ -62,7 +62,7 @@
 
 - job:
     name: tempest-extra-tests
-    parent: devstack-tempest
+    parent: tempest-full-py3
     description: |
       This job runs the extra tests mentioned in
       tools/tempest-extra-tests-list.txt.
@@ -410,6 +410,8 @@
         - grenade
         - grenade-skip-level:
             voting: false
+            branches:
+              - stable/2023.1
         - tempest-integrated-networking
         # Do not run it on ussuri until below issue is fixed
         # https://storyboard.openstack.org/#!/story/2010057
@@ -444,6 +446,17 @@
       jobs:
         - grenade-skip-level:
             voting: false
+            branches:
+              - stable/2023.1
+        # NOTE(gmann): Nova decided to run grenade skip level testing always
+        # (on SLURP as well as non SLURP release) so we are adding grenade-skip-level-always
+        # job in integrated gate and we do not need to update skip level job
+        # here until Nova change the decision.
+        # This is added from 2023.2 relese cycle onwards so we need to use branch variant
+        # to make sure we do not run this job on older than 2023.2 gate.
+        - grenade-skip-level-always:
+            branches:
+              - master
         - tempest-integrated-compute
         # centos-8-stream is tested from wallaby -> yoga branches
         - tempest-integrated-compute-centos-8-stream:
@@ -456,6 +469,9 @@
             branches: ^(?!stable/(ussuri|victoria|wallaby)).*$
     gate:
       jobs:
+        - grenade-skip-level-always:
+            branches:
+              - master
         - tempest-integrated-compute
         - openstacksdk-functional-devstack:
             branches: ^(?!stable/(ussuri|victoria|wallaby)).*$
@@ -477,6 +493,8 @@
         - grenade
         - grenade-skip-level:
             voting: false
+            branches:
+              - stable/2023.1
         - tempest-integrated-placement
         # Do not run it on ussuri until below issue is fixed
         # https://storyboard.openstack.org/#!/story/2010057
@@ -507,6 +525,8 @@
         - grenade
         - grenade-skip-level:
             voting: false
+            branches:
+              - stable/2023.1
         - tempest-integrated-storage
         # Do not run it on ussuri until below issue is fixed
         # https://storyboard.openstack.org/#!/story/2010057
diff --git a/zuul.d/project.yaml b/zuul.d/project.yaml
index 3df61d8..be8442a 100644
--- a/zuul.d/project.yaml
+++ b/zuul.d/project.yaml
@@ -33,7 +33,12 @@
         - glance-multistore-cinder-import:
             voting: false
             irrelevant-files: *tempest-irrelevant-files
-        - tempest-full-zed:
+        # NOTE(gmann): We will be testing the latest and oldest
+        # supported stable branch in Tempest master gate with assuming
+        # if things are working in latest and oldest it will work in between
+        # stable branches also. If anything is breaking we will be catching
+        # those in respective stable branch gate.
+        - tempest-full-2023-1:
             irrelevant-files: *tempest-irrelevant-files
         - tempest-full-xena:
             irrelevant-files: *tempest-irrelevant-files
@@ -161,6 +166,7 @@
         - tempest-full-zed-extra-tests
         - tempest-full-yoga-extra-tests
         - tempest-full-xena-extra-tests
+        - tempest-full-enforce-scope-new-defaults-zed
         - neutron-ovs-tempest-dvr-ha-multinode-full:
             irrelevant-files: *tempest-irrelevant-files
         - nova-tempest-v2-api:
@@ -179,12 +185,15 @@
             irrelevant-files: *tempest-irrelevant-files
     periodic-stable:
       jobs:
+        - tempest-full-2023-1
         - tempest-full-zed
         - tempest-full-yoga
         - tempest-full-xena
+        - tempest-slow-2023-1
         - tempest-slow-zed
         - tempest-slow-yoga
         - tempest-slow-xena
+        - tempest-full-2023-1-extra-tests
         - tempest-full-zed-extra-tests
         - tempest-full-yoga-extra-tests
         - tempest-full-xena-extra-tests
@@ -199,3 +208,4 @@
         - tempest-centos9-stream-fips
         - tempest-full-centos-9-stream
         - tempest-full-test-account-no-admin-py3
+        - tempest-full-enforce-scope-new-defaults-zed
diff --git a/zuul.d/stable-jobs.yaml b/zuul.d/stable-jobs.yaml
index 8aeb748..c5fc063 100644
--- a/zuul.d/stable-jobs.yaml
+++ b/zuul.d/stable-jobs.yaml
@@ -1,5 +1,11 @@
 # NOTE(gmann): This file includes all stable release jobs definition.
 - job:
+    name: tempest-full-2023-1
+    parent: tempest-full-py3
+    nodeset: openstack-single-node-jammy
+    override-checkout: stable/2023.1
+
+- job:
     name: tempest-full-zed
     parent: tempest-full-py3
     nodeset: openstack-single-node-focal
@@ -18,6 +24,12 @@
     override-checkout: stable/xena
 
 - job:
+    name: tempest-full-2023-1-extra-tests
+    parent: tempest-extra-tests
+    nodeset: openstack-single-node-jammy
+    override-checkout: stable/2023.1
+
+- job:
     name: tempest-full-zed-extra-tests
     parent: tempest-extra-tests
     nodeset: openstack-single-node-focal
@@ -36,6 +48,18 @@
     override-checkout: stable/xena
 
 - job:
+    name: tempest-slow-2023-1
+    parent: tempest-slow-py3
+    nodeset: openstack-two-node-jammy
+    override-checkout: stable/2023-1
+
+- job:
+    name: tempest-full-enforce-scope-new-defaults-zed
+    parent: tempest-full-enforce-scope-new-defaults
+    nodeset: openstack-single-node-focal
+    override-checkout: stable/zed
+
+- job:
     name: tempest-slow-zed
     parent: tempest-slow-py3
     nodeset: openstack-two-node-focal