Prepared fixed for the MOS Yoga-based image, MOS 23.2
The list of the fixes / improvements:
* Dockerfiles: set the FROM image to the Mirantis heat
Yoga image of MOS 23.2. Fixed some packages installation.
* Python cleanup.py script: re-wrote opertions with the
volume groups with the specific API version, resettings
volume status, finding the server groups. Added the
functions for non-existing methods in downstream
openstacksdk.
* FIO scripts: Fixed the attaching/detaching the volumes,
some other fixes.
* Fixed e2e sctipt to find the hypervisor.
* Refreshed the si-tests, mos-spt repositories, archived.
Change-Id: I1153ebcf3691cc43eb3fc0a7e98a0d88da3a8154
diff --git a/cleanup.py b/cleanup.py
index 247dcbe..b69cd62 100644
--- a/cleanup.py
+++ b/cleanup.py
@@ -1,7 +1,11 @@
import argparse
+import json
import os
import re
import sys
+import time
+
+from types import SimpleNamespace
import openstack
@@ -9,6 +13,8 @@
# Send logs to both, a log file and stdout
openstack.enable_logging(debug=False, path='openstack.log', stream=sys.stdout)
+volume_api_version = "3.43"
+
# Connect to cloud
TEST_CLOUD = os.getenv('OS_TEST_CLOUD', 'os-cloud')
cloud = openstack.connect(cloud=TEST_CLOUD)
@@ -41,6 +47,11 @@
return default
+def items_to_object(items):
+ data = json.dumps(items)
+ return json.loads(data, object_hook=lambda d: SimpleNamespace(**d))
+
+
def _filter_test_resources(resources, attribute, pattern=mask_pattern):
filtered_resources = {}
for item in resources:
@@ -64,15 +75,43 @@
log.info(f"... deleting {name} (id={id_}) {type_}")
-def _force_delete_load_balancer(id_):
- log.info(f"... ... force deleting {id_} load balancer")
- lb_ep = load_balancer.get_endpoint()
- lb_uri = f"{lb_ep}/lbaas/loadbalancers/{id_}"
+def _get_volume_groups(all_tenants='true'):
+ ep = volume.get_endpoint()
+ uri = f"{ep}/groups/detail"
headers = {'X-Auth-Token': cloud.session.get_token(),
- 'Content-Type': 'application/json'}
- params = {'cascade': 'true', 'force': 'true'}
- cloud.session.request(url=lb_uri, method='DELETE',
- headers=headers, params=params)
+ 'Accept': 'application/json',
+ 'OpenStack-API-Version': f'volume {volume_api_version}'}
+ params = {'all_tenants': all_tenants}
+ response = cloud.session.request(url=uri, method='GET',
+ headers=headers, params=params).json()
+ for group in response['groups']:
+ yield group
+
+
+def _delete_volume_group(uuid, delete_volumes='false'):
+ ep = volume.get_endpoint()
+ uri = f"{ep}/groups/{uuid}/action"
+ headers = {'X-Auth-Token': cloud.session.get_token(),
+ 'OpenStack-API-Version': f'volume {volume_api_version}',
+ 'Content-Type': 'application/json',
+ 'Accept': 'application/json'}
+ body = {"delete": {"delete-volumes": delete_volumes}}
+ cloud.session.request(
+ url=uri, method='POST', headers=headers, json=body)
+
+
+def _reset_volume_status(uuid, status='available', attach_status='detached',
+ migration_status='None'):
+ ep = volume.get_endpoint()
+ uri = f"{ep}/volumes/{uuid}/action"
+ headers = {'X-Auth-Token': cloud.session.get_token(),
+ 'Content-Type': 'application/json',
+ 'Accept': 'application/json'}
+ body = {"os-reset_status": {
+ "status": status, "attach_status": attach_status,
+ "migration_status": migration_status}}
+ cloud.session.request(
+ url=uri, method='POST', headers=headers, json=body)
def cleanup_users():
@@ -225,7 +264,7 @@
if args.dry_run:
return
for id_ in volumes_to_delete:
- volume.reset_volume_status(id_, 'available', 'detached', 'None')
+ _reset_volume_status(id_, 'available', 'detached', 'None')
_log_resource_delete(id_, volumes_to_delete[id_], 'volume')
volume.delete_volume(id_)
vol_obj = volume.get_volume(id_)
@@ -233,14 +272,16 @@
def cleanup_volume_groups():
- groups = volume.groups()
+ groups_in_response = _get_volume_groups()
+ groups = items_to_object([g for g in groups_in_response])
groups_to_delete = _filter_test_resources(groups, 'name')
_log_resources_count(len(groups_to_delete), 'volume group(s)')
if args.dry_run:
return
for id_ in groups_to_delete:
_log_resource_delete(id_, groups_to_delete[id_], 'volume group')
- volume.delete_group(id_)
+ _delete_volume_group(id_)
+ time.sleep(10) # TODO(izadorozhna): need to add a proper waiter
def cleanup_volume_backups():
@@ -357,7 +398,8 @@
load_balancer.delete_load_balancer(id_, cascade=True)
except openstack.exceptions.ConflictException:
# force delete the LB in case it is in some PENDING_* state
- _force_delete_load_balancer(id_)
+ log.info(f"... ... force deleting {id_} load balancer")
+ load_balancer.delete_load_balancer(id_, cascade=True, force=True)
except Exception as e:
log.info(f"... ... could not delete {id_} load balancer: {e}")
diff --git a/fio/connection.py b/fio/connection.py
index 6d82179..40018c4 100644
--- a/fio/connection.py
+++ b/fio/connection.py
@@ -78,13 +78,25 @@
def detach_volume(
+ att: openstack.compute.v2.volume_attachment.VolumeAttachment,
srv: openstack.compute.v2.server.Server,
vol: openstack.block_storage.v3.volume.Volume
) -> None:
- cloud.compute.delete_volume_attachment(srv, vol)
+ cloud.compute.delete_volume_attachment(att, srv)
cloud.volume.wait_for_status(vol, status='available')
+def find_server_group(
+ name: str
+) -> Union[openstack.compute.v2.server_group.ServerGroup, None]:
+ server_groups = cloud.compute.server_groups(all_projects=True)
+ matching_server_groups = [s for s in server_groups
+ if s.name == name]
+ server_group = matching_server_groups[0] \
+ if matching_server_groups else None
+ return server_group
+
+
if __name__ == "__main__":
print(UBUNTU_IMAGE_NAME)
print(FIO_SG_NAME)
diff --git a/fio/fio_cleanup.py b/fio/fio_cleanup.py
index 753df87..933959f 100644
--- a/fio/fio_cleanup.py
+++ b/fio/fio_cleanup.py
@@ -30,19 +30,19 @@
vol_id = att.volume_id
vol = volume.get_volume(vol_id)
try:
- conn.detach_volume(vm, vol)
+ conn.detach_volume(att, vm, vol)
print(
f"'{vol.id}' volume has been detached from fio '{vm.name}'"
" server.")
conn.delete_volume(vol)
print(f"'{vol.id}' volume has been deleted.")
- conn.delete_server(vm)
- print(f"'{vm.name}' server has been deleted.")
except ResourceFailure as e:
print(
f"Cleanup of '{vm.id}' with volume '{vol.id}' attached "
f"failed with '{e.message}' error.")
conn.delete_volume(vol)
+ conn.delete_server(vm)
+ print(f"'{vm.name}' server has been deleted.")
if __name__ == "__main__":
@@ -59,11 +59,12 @@
router = network.find_router(ROUTER_NAME)
if router:
network.update_router(router.id, external_gateway_info={})
- print("Externa GW port has been deleted from fio router.")
+ print("External GW port has been deleted from fio router.")
router_ports = network.ports(device_id=router.id)
for p in router_ports:
- network.remove_interface_from_router(router.id, port_id=p.id)
- print(f"'{p.id}' port has been deleted from fio router.")
+ if p.device_owner != "network:router_ha_interface":
+ network.remove_interface_from_router(router.id, port_id=p.id)
+ print(f"'{p.id}' port has been deleted from fio router.")
# Delete fio network topology
net = network.find_network(NET_NAME)
@@ -93,8 +94,7 @@
print(f"fio '{sg.id}' security group has been deleted.")
# Delete fio server group
- server_group = compute.find_server_group(
- AA_SERVER_GROUP_NAME, all_projects=True)
+ server_group = conn.find_server_group(AA_SERVER_GROUP_NAME)
if server_group:
compute.delete_server_group(server_group)
print(f"fio '{server_group.name}' server group has been deleted.")
diff --git a/fio/fio_setup.py b/fio/fio_setup.py
index 3220aab..be33d28 100644
--- a/fio/fio_setup.py
+++ b/fio/fio_setup.py
@@ -114,7 +114,7 @@
conn.delete_volume(vol)
# Attach the volume to the fio client
- compute.create_volume_attachment(vm, volume=vol)
+ compute.create_volume_attachment(vm, volume_id=vol.id)
try:
vol = volume.wait_for_status(vol, status='in-use')
print(f"Volume '{vol.name}' is attached to '{vm.name}' fio client")
@@ -195,8 +195,7 @@
router.id, subnet_id=fio_subnet.id)
# Create fio server group with anti-affinity scheduling policy
- server_group = compute.find_server_group(
- AA_SERVER_GROUP_NAME, all_projects=True)
+ server_group = conn.find_server_group(AA_SERVER_GROUP_NAME)
if not server_group:
server_group = compute.create_server_group(
name=AA_SERVER_GROUP_NAME, policies=['soft-anti-affinity'])
diff --git a/k8s/docker-mos-rally b/k8s/docker-mos-rally
index 7d1b5d6..c2d5406 100644
--- a/k8s/docker-mos-rally
+++ b/k8s/docker-mos-rally
@@ -1,6 +1,6 @@
FROM xrally/xrally-openstack:2.1.0
-MAINTAINER Alex Savatieiev (a.savex@gmail.com)
+LABEL maintainer="qa-ps@mirantis.com"
WORKDIR /rally
RUN mkdir /rally/rally-files && \
diff --git a/k8s/docker-mos-toolset-full b/k8s/docker-mos-toolset-full
index 0ce01e3..61ba1dc 100644
--- a/k8s/docker-mos-toolset-full
+++ b/k8s/docker-mos-toolset-full
@@ -1,4 +1,4 @@
-FROM ubuntu:20.04
+FROM mirantis.azurecr.io/openstack/heat:yoga-focal-20230821170130
LABEL maintainer="qa-ps@mirantis.com"
@@ -30,7 +30,7 @@
apt-get install -y helm && \
rm -rf /var/lib/apt/lists/*
-RUN pip3 install --no-cache-dir python-openstackclient python-neutronclient python-heatclient pyghmi python-octaviaclient tempestparser python-ironicclient aodhclient gnocchiclient python-barbicanclient python-glanceclient
+RUN pip3 install --no-cache-dir pyghmi tempestparser
RUN git clone https://gerrit.mcp.mirantis.com/mcp/cvp-configuration /opt/res-files && \
cd /opt/cfg-checker && \
@@ -50,6 +50,7 @@
python3 -m venv .sivenv && \
. .sivenv/bin/activate && \
pip3 install --no-cache-dir -r si_tests/requirements.txt && \
+ pip3 install urllib3==1.26.16 && \
deactivate
RUN mkdir /opt/packages/ && \
diff --git a/k8s/docker-mos-toolset-min b/k8s/docker-mos-toolset-min
index 53e6c55..9f9065e 100644
--- a/k8s/docker-mos-toolset-min
+++ b/k8s/docker-mos-toolset-min
@@ -1,6 +1,6 @@
-FROM ubuntu:20.04
+FROM mirantis.azurecr.io/openstack/heat:yoga-focal-20230821170130
-MAINTAINER Alex Savatieiev (a.savex@gmail.com)
+LABEL maintainer="qa-ps@mirantis.com"
ADD ./src/mos-spt.tgz /opt/mos-spt/
WORKDIR /opt
@@ -20,7 +20,7 @@
apt autoremove --yes && \
rm -rf /var/lib/apt/lists/*
-RUN pip3 install --no-cache-dir python-openstackclient python-neutronclient python-heatclient pyghmi python-octaviaclient tempestparser python-ironicclient aodhclient gnocchiclient python-barbicanclient python-glanceclient
+RUN pip3 install --no-cache-dir tempestparser
RUN mkdir /opt/packages/ && \
cd /opt/packages && \
diff --git a/k8s/docker-rally-2.1.0 b/k8s/docker-rally-2.1.0
index c0996d1..510638d 100644
--- a/k8s/docker-rally-2.1.0
+++ b/k8s/docker-rally-2.1.0
@@ -1,6 +1,6 @@
FROM xrally/xrally-openstack:2.1.0
-MAINTAINER Alex Savatieiev (a.savex@gmail.com)
+LABEL maintainer="qa-ps@mirantis.com"
WORKDIR /rally
RUN sudo apt update && \
diff --git a/k8s/src/mos-spt.tgz b/k8s/src/mos-spt.tgz
index 978a9e6..be7571d 100644
--- a/k8s/src/mos-spt.tgz
+++ b/k8s/src/mos-spt.tgz
Binary files differ
diff --git a/k8s/src/si-tests.tgz b/k8s/src/si-tests.tgz
index c5f69a7..40e0353 100644
--- a/k8s/src/si-tests.tgz
+++ b/k8s/src/si-tests.tgz
Binary files differ
diff --git a/scripts/cmp_check.sh b/scripts/cmp_check.sh
index 07bc9ca..58e841f 100644
--- a/scripts/cmp_check.sh
+++ b/scripts/cmp_check.sh
@@ -83,7 +83,7 @@
fi
function cmp_stats() {
- cmpid=$(openstack hypervisor list --matching ${1} -f value -c ID)
+ cmpid=$(openstack hypervisor list | grep ${1} | awk '{print $2}')
vars=( $(openstack hypervisor show ${cmpid} -f shell -c state -c running_vms -c vcpus -c vcpus_used -c memory_mb -c memory_mb_used) )
[ ! 0 -eq $? ] && errors+=("${1}: $(cat ${vars[@]})")
if [ ! $state == '"up"' ]; then