Prepared fixed for the MOS Victoria-based image, MOS 23.2

The list of the fixes / improvements:

* Dockerfiles: set the FROM image to the Mirantis heat
  Victoria image of MOS 23.2. Fixed some packages installation.
* Python cleanup.py script: re-wrote opertions with the
  volume groups and volume group types with the specific
  API version, resettings volume status, finding the
  server groups. Added the functions for non-existing
  methods in downstream openstacksdk.
* FIO scripts: Fixed the attaching/detaching the volumes,
  some other fixes.
* Refreshed the si-tests, mos-spt, cfg-checker repositories,
  archived.
* Changed the tag of the toolset image for Victoria:
  toolset:23.2-victoria

Change-Id: I762eaa813c4c864979a2dfb5151dd082aff98cb2
diff --git a/cleanup.py b/cleanup.py
index 247dcbe..6f82823 100644
--- a/cleanup.py
+++ b/cleanup.py
@@ -1,7 +1,11 @@
 import argparse
+import json
 import os
 import re
 import sys
+import time
+
+from types import SimpleNamespace
 
 import openstack
 
@@ -9,6 +13,8 @@
 # Send logs to both, a log file and stdout
 openstack.enable_logging(debug=False, path='openstack.log', stream=sys.stdout)
 
+volume_api_version = "3.43"
+
 # Connect to cloud
 TEST_CLOUD = os.getenv('OS_TEST_CLOUD', 'os-cloud')
 cloud = openstack.connect(cloud=TEST_CLOUD)
@@ -41,6 +47,11 @@
         return default
 
 
+def items_to_object(items):
+    data = json.dumps(items)
+    return json.loads(data, object_hook=lambda d: SimpleNamespace(**d))
+
+
 def _filter_test_resources(resources, attribute, pattern=mask_pattern):
     filtered_resources = {}
     for item in resources:
@@ -64,15 +75,67 @@
     log.info(f"... deleting {name} (id={id_}) {type_}")
 
 
-def _force_delete_load_balancer(id_):
-    log.info(f"... ... force deleting {id_} load balancer")
-    lb_ep = load_balancer.get_endpoint()
-    lb_uri = f"{lb_ep}/lbaas/loadbalancers/{id_}"
+def _generate_volume_headers():
     headers = {'X-Auth-Token': cloud.session.get_token(),
-               'Content-Type': 'application/json'}
-    params = {'cascade': 'true', 'force': 'true'}
-    cloud.session.request(url=lb_uri, method='DELETE',
-                          headers=headers, params=params)
+               'Accept': 'application/json',
+               'Content-Type': 'application/json',
+               'OpenStack-API-Version': f'volume {volume_api_version}'}
+    return headers
+
+
+def _get_volume_group_types(all_tenants='true'):
+    # this method is not implemented in Victoria yet in openstacksdk
+    ep = volume.get_endpoint()
+    uri = f"{ep}/group_types"
+    headers = _generate_volume_headers()
+    params = {'all_tenants': all_tenants}
+    response = cloud.session.request(url=uri, method='GET',
+                                     headers=headers, params=params).json()
+    for g_type in response['group_types']:
+        yield g_type
+
+
+def _delete_volume_group_type(uuid):
+    # this method is not implemented in Victoria yet in openstacksdk
+    ep = volume.get_endpoint()
+    uri = f"{ep}/group_types/{uuid}"
+    headers = _generate_volume_headers()
+    cloud.session.request(url=uri, method='DELETE', headers=headers)
+
+
+def _get_volume_groups(all_tenants='true'):
+    # this method is not implemented in Victoria yet in openstacksdk
+    ep = volume.get_endpoint()
+    uri = f"{ep}/groups/detail"
+    headers = _generate_volume_headers()
+    params = {'all_tenants': all_tenants}
+    response = cloud.session.request(url=uri, method='GET',
+                                     headers=headers, params=params).json()
+    for group in response['groups']:
+        yield group
+
+
+def _delete_volume_group(uuid, delete_volumes='false'):
+    # this method is not implemented in Victoria yet in openstacksdk
+    ep = volume.get_endpoint()
+    uri = f"{ep}/groups/{uuid}/action"
+    headers = _generate_volume_headers()
+    body = {"delete": {"delete-volumes": delete_volumes}}
+    cloud.session.request(
+        url=uri, method='POST', headers=headers, json=body)
+
+
+def _reset_volume_status(uuid, status='available', attach_status='detached',
+                         migration_status='None'):
+    # this method is not implemented in Victoria yet in openstacksdk
+    ep = volume.get_endpoint()
+    uri = f"{ep}/volumes/{uuid}/action"
+    headers = _generate_volume_headers()
+    body = {"os-reset_status": {
+        "status": status, "attach_status": attach_status,
+        "migration_status": migration_status}}
+    cloud.session.request(
+        url=uri, method='POST', headers=headers, json=body)
 
 
 def cleanup_users():
@@ -225,7 +288,7 @@
     if args.dry_run:
         return
     for id_ in volumes_to_delete:
-        volume.reset_volume_status(id_, 'available', 'detached', 'None')
+        _reset_volume_status(id_, 'available', 'detached', 'None')
         _log_resource_delete(id_, volumes_to_delete[id_], 'volume')
         volume.delete_volume(id_)
         vol_obj = volume.get_volume(id_)
@@ -233,14 +296,16 @@
 
 
 def cleanup_volume_groups():
-    groups = volume.groups()
+    groups_in_response = _get_volume_groups()
+    groups = items_to_object([g for g in groups_in_response])
     groups_to_delete = _filter_test_resources(groups, 'name')
     _log_resources_count(len(groups_to_delete), 'volume group(s)')
     if args.dry_run:
         return
     for id_ in groups_to_delete:
         _log_resource_delete(id_, groups_to_delete[id_], 'volume group')
-        volume.delete_group(id_)
+        _delete_volume_group(id_)
+        time.sleep(10)      # TODO : need to add a proper waiter
 
 
 def cleanup_volume_backups():
@@ -257,7 +322,8 @@
 
 
 def cleanup_volume_group_types():
-    group_types = volume.group_types()
+    types_in_response = _get_volume_group_types()
+    group_types = items_to_object([g for g in types_in_response])
     group_types_to_delete = _filter_test_resources(group_types, 'name')
     _log_resources_count(len(group_types_to_delete), 'volume group type(s)')
     if args.dry_run:
@@ -265,7 +331,7 @@
     for id_ in group_types_to_delete:
         _log_resource_delete(
             id_, group_types_to_delete[id_], 'volume group type')
-        volume.delete_group_type(id_)
+        _delete_volume_group_type(id_)
 
 
 def cleanup_volume_types():
@@ -357,7 +423,8 @@
             load_balancer.delete_load_balancer(id_, cascade=True)
         except openstack.exceptions.ConflictException:
             # force delete the LB in case it is in some PENDING_* state
-            _force_delete_load_balancer(id_)
+            log.info(f"... ... force deleting {id_} load balancer")
+            load_balancer.delete_load_balancer(id_, cascade=True, force=True)
         except Exception as e:
             log.info(f"... ... could not delete {id_} load balancer: {e}")
 
diff --git a/fio/connection.py b/fio/connection.py
index 6d82179..40018c4 100644
--- a/fio/connection.py
+++ b/fio/connection.py
@@ -78,13 +78,25 @@
 
 
 def detach_volume(
+    att: openstack.compute.v2.volume_attachment.VolumeAttachment,
     srv: openstack.compute.v2.server.Server,
     vol: openstack.block_storage.v3.volume.Volume
 ) -> None:
-    cloud.compute.delete_volume_attachment(srv, vol)
+    cloud.compute.delete_volume_attachment(att, srv)
     cloud.volume.wait_for_status(vol, status='available')
 
 
+def find_server_group(
+        name: str
+) -> Union[openstack.compute.v2.server_group.ServerGroup, None]:
+    server_groups = cloud.compute.server_groups(all_projects=True)
+    matching_server_groups = [s for s in server_groups
+                              if s.name == name]
+    server_group = matching_server_groups[0] \
+        if matching_server_groups else None
+    return server_group
+
+
 if __name__ == "__main__":
     print(UBUNTU_IMAGE_NAME)
     print(FIO_SG_NAME)
diff --git a/fio/fio_cleanup.py b/fio/fio_cleanup.py
index 753df87..f403823 100644
--- a/fio/fio_cleanup.py
+++ b/fio/fio_cleanup.py
@@ -14,6 +14,7 @@
 FLAVOR_NAME: Final[str] = conn.FIO_FLAVOR_NAME
 KEYPAIR_NAME: Final[str] = conn.FIO_KEYPAIR_NAME
 SG_NAME: Final[str] = conn.FIO_SG_NAME
+VOL_NAME_MASK: Final[str] = conn.FIO_VOL_NAME_MASK
 
 ROUTER_NAME: Final[str] = conn.FIO_ROUTER_NAME
 NET_NAME: Final[str] = conn.FIO_NET_NAME
@@ -30,19 +31,19 @@
         vol_id = att.volume_id
         vol = volume.get_volume(vol_id)
         try:
-            conn.detach_volume(vm, vol)
+            conn.detach_volume(att, vm, vol)
             print(
                 f"'{vol.id}' volume has been detached from fio '{vm.name}'"
                 " server.")
             conn.delete_volume(vol)
             print(f"'{vol.id}' volume has been deleted.")
-            conn.delete_server(vm)
-            print(f"'{vm.name}' server has been deleted.")
         except ResourceFailure as e:
             print(
                 f"Cleanup of '{vm.id}' with volume '{vol.id}' attached "
                 f"failed with '{e.message}' error.")
             conn.delete_volume(vol)
+    conn.delete_server(vm)
+    print(f"'{vm.name}' server has been deleted.")
 
 
 if __name__ == "__main__":
@@ -59,11 +60,12 @@
     router = network.find_router(ROUTER_NAME)
     if router:
         network.update_router(router.id, external_gateway_info={})
-        print("Externa GW port has been deleted from fio router.")
+        print("External GW port has been deleted from fio router.")
         router_ports = network.ports(device_id=router.id)
         for p in router_ports:
-            network.remove_interface_from_router(router.id, port_id=p.id)
-            print(f"'{p.id}' port has been deleted from fio router.")
+            if p.device_owner != "network:router_ha_interface":
+                network.remove_interface_from_router(router.id, port_id=p.id)
+                print(f"'{p.id}' port has been deleted from fio router.")
 
     # Delete fio network topology
     net = network.find_network(NET_NAME)
@@ -92,9 +94,15 @@
         network.delete_security_group(sg)
         print(f"fio '{sg.id}' security group has been deleted.")
 
+    # Delete the orphan fio volumes which are not attached (if any)
+    volumes = volume.volumes(VOL_NAME_MASK)
+    for v in volumes:
+        if not v.attachments:
+            volume.delete_volume(v.id)
+            print(f"'{v.id}' volume has been deleted.")
+
     # Delete fio server group
-    server_group = compute.find_server_group(
-        AA_SERVER_GROUP_NAME, all_projects=True)
+    server_group = conn.find_server_group(AA_SERVER_GROUP_NAME)
     if server_group:
         compute.delete_server_group(server_group)
         print(f"fio '{server_group.name}' server group has been deleted.")
diff --git a/fio/fio_setup.py b/fio/fio_setup.py
index 3220aab..be33d28 100644
--- a/fio/fio_setup.py
+++ b/fio/fio_setup.py
@@ -114,7 +114,7 @@
         conn.delete_volume(vol)
 
     # Attach the volume to the fio client
-    compute.create_volume_attachment(vm, volume=vol)
+    compute.create_volume_attachment(vm,  volume_id=vol.id)
     try:
         vol = volume.wait_for_status(vol, status='in-use')
         print(f"Volume '{vol.name}' is attached to '{vm.name}' fio client")
@@ -195,8 +195,7 @@
             router.id, subnet_id=fio_subnet.id)
 
     # Create fio server group with anti-affinity scheduling policy
-    server_group = compute.find_server_group(
-        AA_SERVER_GROUP_NAME, all_projects=True)
+    server_group = conn.find_server_group(AA_SERVER_GROUP_NAME)
     if not server_group:
         server_group = compute.create_server_group(
             name=AA_SERVER_GROUP_NAME, policies=['soft-anti-affinity'])
diff --git a/k8s/docker-mos-rally b/k8s/docker-mos-rally
index 7d1b5d6..c2d5406 100644
--- a/k8s/docker-mos-rally
+++ b/k8s/docker-mos-rally
@@ -1,6 +1,6 @@
 FROM xrally/xrally-openstack:2.1.0 
 
-MAINTAINER Alex Savatieiev (a.savex@gmail.com)
+LABEL maintainer="qa-ps@mirantis.com"
 
 WORKDIR /rally
 RUN mkdir /rally/rally-files && \
diff --git a/k8s/docker-mos-toolset-full b/k8s/docker-mos-toolset-full
index 0ce01e3..729a3d1 100644
--- a/k8s/docker-mos-toolset-full
+++ b/k8s/docker-mos-toolset-full
@@ -1,4 +1,4 @@
-FROM ubuntu:20.04
+FROM mirantis.azurecr.io/openstack/heat:victoria-focal-20230912131036
 
 LABEL maintainer="qa-ps@mirantis.com"
 
@@ -30,9 +30,9 @@
     apt-get install -y helm && \
     rm -rf /var/lib/apt/lists/*
 
-RUN pip3 install --no-cache-dir python-openstackclient python-neutronclient python-heatclient pyghmi python-octaviaclient tempestparser python-ironicclient aodhclient gnocchiclient python-barbicanclient python-glanceclient
+RUN pip3 install --no-cache-dir pyghmi tempestparser
 
-RUN git clone https://gerrit.mcp.mirantis.com/mcp/cvp-configuration /opt/res-files && \
+RUN git clone https://gerrit.mcp.mirantis.com/mcp/cvp-configuration -b 23.2-victoria /opt/res-files && \
     cd /opt/cfg-checker && \
     python3 -m venv .checkervenv && \
     . .checkervenv/bin/activate && \
@@ -50,6 +50,7 @@
     python3 -m venv .sivenv && \
     . .sivenv/bin/activate && \
     pip3 install --no-cache-dir -r si_tests/requirements.txt && \
+    pip3 install urllib3==1.26.16 && \
     deactivate
 
 RUN mkdir /opt/packages/ && \
diff --git a/k8s/docker-mos-toolset-min b/k8s/docker-mos-toolset-min
index 53e6c55..f1350c2 100644
--- a/k8s/docker-mos-toolset-min
+++ b/k8s/docker-mos-toolset-min
@@ -1,6 +1,6 @@
-FROM ubuntu:20.04
+FROM mirantis.azurecr.io/openstack/heat:victoria-focal-20230912131036
 
-MAINTAINER Alex Savatieiev (a.savex@gmail.com)
+LABEL maintainer="qa-ps@mirantis.com"
 
 ADD ./src/mos-spt.tgz /opt/mos-spt/
 WORKDIR /opt
@@ -20,7 +20,7 @@
     apt autoremove --yes && \
     rm -rf /var/lib/apt/lists/*
 
-RUN pip3 install --no-cache-dir python-openstackclient python-neutronclient python-heatclient pyghmi python-octaviaclient tempestparser python-ironicclient aodhclient gnocchiclient python-barbicanclient python-glanceclient
+RUN pip3 install --no-cache-dir tempestparser
 
 RUN mkdir /opt/packages/ && \
     cd /opt/packages && \
diff --git a/k8s/docker-rally-2.1.0 b/k8s/docker-rally-2.1.0
index c0996d1..510638d 100644
--- a/k8s/docker-rally-2.1.0
+++ b/k8s/docker-rally-2.1.0
@@ -1,6 +1,6 @@
 FROM xrally/xrally-openstack:2.1.0
 
-MAINTAINER Alex Savatieiev (a.savex@gmail.com)
+LABEL maintainer="qa-ps@mirantis.com"
 
 WORKDIR /rally
 RUN sudo apt update && \
diff --git a/k8s/src/mos-checker.tgz b/k8s/src/mos-checker.tgz
index 9c7949a..543da28 100644
--- a/k8s/src/mos-checker.tgz
+++ b/k8s/src/mos-checker.tgz
Binary files differ
diff --git a/k8s/src/mos-spt.tgz b/k8s/src/mos-spt.tgz
index 978a9e6..be7571d 100644
--- a/k8s/src/mos-spt.tgz
+++ b/k8s/src/mos-spt.tgz
Binary files differ
diff --git a/k8s/src/si-tests.tgz b/k8s/src/si-tests.tgz
index c5f69a7..a55d434 100644
--- a/k8s/src/si-tests.tgz
+++ b/k8s/src/si-tests.tgz
Binary files differ
diff --git a/k8s/yamls/qa-toolset-bare.yaml b/k8s/yamls/qa-toolset-bare.yaml
index 2a41b98..cb8bae9 100644
--- a/k8s/yamls/qa-toolset-bare.yaml
+++ b/k8s/yamls/qa-toolset-bare.yaml
@@ -10,8 +10,8 @@
     - infinity
     imagePullPolicy: IfNotPresent
     name: toolset
-    #image: savex13/toolset:latest
-    image: registry.mirantis.com/osavatieiev/toolset:latest
+    #image: registry.mirantis.com/professional-services-qa/toolset:latest
+    image: registry.mirantis.com/professional-services-qa/toolset:23.2-victoria
   dnsPolicy: ClusterFirst
   volumes:
   - emptyDir: {}
diff --git a/k8s/yamls/qa-toolset.yaml b/k8s/yamls/qa-toolset.yaml
index 8487ac1..5f64332 100644
--- a/k8s/yamls/qa-toolset.yaml
+++ b/k8s/yamls/qa-toolset.yaml
@@ -67,7 +67,8 @@
     name: toolset
     #image: 127.0.0.1:44301/general/external/docker.io/library/ubuntu:bionic-20201119
     #image: savex13/toolset:latest
-    image: registry.mirantis.com/osavatieiev/toolset:latest
+    #image: registry.mirantis.com/osavatieiev/toolset:latest
+    image: registry.mirantis.com/osavatieiev/toolset:23.2-victoria
     volumeMounts:
     - mountPath: /artifacts
       name: qa-pv-a