Updates to the image for MOSK 25.2.x

* fixed an issue in mos-spt, improve report name and location
* imrpoved cleanup script: show details on dry-run, skip tempest net
* revert to jammy image as base for the toolset dockerfile
* improved ceph collect report location

Related-PROD: PROD-37187
Change-Id: I138d9e550cd244be122ff035bbb67433eb866910
diff --git a/cleanup.py b/cleanup.py
index 55a2f90..6e9305a 100644
--- a/cleanup.py
+++ b/cleanup.py
@@ -45,7 +45,7 @@
 if manila_present:
     shared_file_system = cloud.shared_file_system
 
-mask = "cvp|s_rally|rally_|tempest-|tempest_|spt|fio"
+mask = "cvp|s_rally|rally_|tempest-|tempest_|spt-test|fio"
 full_mask = f"^(?!.*(manual|-static-)).*({mask}).*$"
 mask_pattern = re.compile(full_mask, re.IGNORECASE)
 stack_mask = "api-[0-9]+-[a-z]+"
@@ -83,10 +83,28 @@
     log.info(f"{count} {resource} containing '{pattern}' are found.")
 
 
+def _log_resources_details_at_dry_run(list_to_delete, resource):
+    if len(list_to_delete) > 0:
+        log.info(f"... {len(list_to_delete)} {resource} would be deleted:")
+        for id_ in list_to_delete:
+            log.info(f"... ... {id_} ({list_to_delete[id_]})")
+
+
 def _log_resource_delete(id_, name, type_):
     log.info(f"... deleting {name} (id={id_}) {type_}")
 
 
+def _skip_tempest_fixed_net(nets_to_delete: dict) -> dict:
+    # the "tempest-fixed-net" comes from MOSK itself and should not be deleted
+    keys_to_remove = [
+        net_id for net_id, name in nets_to_delete.items()
+        if name == "tempest-fixed-net"
+    ]
+    for net_id in keys_to_remove:
+        nets_to_delete.pop(net_id, None)
+    return nets_to_delete
+
+
 def _get_volume_groups(all_tenants='true'):
     ep = volume.get_endpoint()
     uri = f"{ep}/groups/detail"
@@ -204,6 +222,7 @@
     users_to_delete = _filter_test_resources(users, 'name')
     _log_resources_count(len(users_to_delete), 'user(s)')
     if args.dry_run:
+        _log_resources_details_at_dry_run(users_to_delete, 'user(s)')
         return
     for id_ in users_to_delete:
         _log_resource_delete(id_, users_to_delete[id_], 'user')
@@ -215,6 +234,7 @@
     roles_to_delete = _filter_test_resources(roles, 'name')
     _log_resources_count(len(roles_to_delete), 'role(s)')
     if args.dry_run:
+        _log_resources_details_at_dry_run(roles_to_delete, 'role(s)')
         return
     for id_ in roles_to_delete:
         _log_resource_delete(id_, roles_to_delete[id_], 'role')
@@ -226,6 +246,7 @@
     projects_to_delete = _filter_test_resources(projects, 'name')
     _log_resources_count(len(projects_to_delete), 'project(s)')
     if args.dry_run:
+        _log_resources_details_at_dry_run(projects_to_delete, 'project(s)')
         return
     for id_ in projects_to_delete:
         _log_resource_delete(id_, projects_to_delete[id_], 'project')
@@ -237,6 +258,7 @@
     regions_to_delete = _filter_test_resources(regions, 'id')
     _log_resources_count(len(regions_to_delete), 'region(s)')
     if args.dry_run:
+        _log_resources_details_at_dry_run(regions_to_delete, 'region(s)')
         return
     for id_ in regions_to_delete:
         _log_resource_delete(id_, id_, 'region')
@@ -248,6 +270,7 @@
     services_to_delete = _filter_test_resources(services, 'name')
     _log_resources_count(len(services_to_delete), 'service(s)')
     if args.dry_run:
+        _log_resources_details_at_dry_run(services_to_delete, 'service(s)')
         return
     for id_ in services_to_delete:
         _log_resource_delete(id_, services_to_delete[id_], 'service')
@@ -268,6 +291,7 @@
         stacks_to_delete.update(stacks_alt_to_delete)
 
     if args.dry_run:
+        _log_resources_details_at_dry_run(stacks_to_delete, 'stack(s)')
         return
 
     for id_ in stacks_to_delete:
@@ -282,6 +306,7 @@
     flavors_to_delete = _filter_test_resources(flavors, 'name')
     _log_resources_count(len(flavors_to_delete), 'flavor(s)')
     if args.dry_run:
+        _log_resources_details_at_dry_run(flavors_to_delete, 'flavor(s)')
         return
     for id_ in flavors_to_delete:
         _log_resource_delete(id_, flavors_to_delete[id_], 'flavor')
@@ -293,6 +318,7 @@
     images_to_delete = _filter_test_resources(images, 'name')
     _log_resources_count(len(images_to_delete), 'image(s)')
     if args.dry_run:
+        _log_resources_details_at_dry_run(images_to_delete, 'image(s)')
         return
     for id_ in images_to_delete:
         _log_resource_delete(id_, images_to_delete[id_], 'image')
@@ -304,6 +330,7 @@
     keypairs_to_delete = _filter_test_resources(keypairs, 'name')
     _log_resources_count(len(keypairs_to_delete), 'keypair(s)')
     if args.dry_run:
+        _log_resources_details_at_dry_run(keypairs_to_delete, 'keypair(s)')
         return
     for id_ in keypairs_to_delete:
         _log_resource_delete(id_, keypairs_to_delete[id_], 'keypair')
@@ -315,6 +342,7 @@
     servers_to_delete = _filter_test_resources(servers, 'name')
     _log_resources_count(len(servers_to_delete), 'server(s)')
     if args.dry_run:
+        _log_resources_details_at_dry_run(servers_to_delete, 'server(s)')
         return
     for id_ in servers_to_delete:
         if args.servers_active:
@@ -334,6 +362,7 @@
     shares_to_delete = _filter_test_resources(shares, 'name')
     _log_resources_count(len(shares_to_delete), 'share(s)')
     if args.dry_run:
+        _log_resources_details_at_dry_run(shares_to_delete, 'share(s)')
         return
     for id_ in shares_to_delete:
         _log_resource_delete(id_, shares_to_delete[id_], 'share')
@@ -349,6 +378,8 @@
     share_types_to_delete = _filter_test_resources(share_types, 'name')
     _log_resources_count(len(share_types_to_delete), 'share type(s)')
     if args.dry_run:
+        _log_resources_details_at_dry_run(share_types_to_delete,
+                                          'share_type(s)')
         return
     for id_ in share_types_to_delete:
         _log_resource_delete(id_, share_types_to_delete[id_], 'type')
@@ -361,6 +392,8 @@
     share_networks_to_delete = _filter_test_resources(share_networks, 'name')
     _log_resources_count(len(share_networks_to_delete), 'share network(s)')
     if args.dry_run:
+        _log_resources_details_at_dry_run(share_networks_to_delete,
+                                          'share_network(s)')
         return
     for id_ in share_networks_to_delete:
         _log_resource_delete(id_, share_networks_to_delete[id_], 'type')
@@ -372,6 +405,7 @@
     snapshots_to_delete = _filter_test_resources(snapshots, 'name')
     _log_resources_count(len(snapshots_to_delete), 'snapshot(s)')
     if args.dry_run:
+        _log_resources_details_at_dry_run(snapshots_to_delete, 'snapshot(s)')
         return
     for id_ in snapshots_to_delete:
         snapshot_obj = volume.get_snapshot(id_)
@@ -386,6 +420,7 @@
     volumes_to_delete = _filter_test_resources(volumes, 'name')
     _log_resources_count(len(volumes_to_delete), 'volume(s)')
     if args.dry_run:
+        _log_resources_details_at_dry_run(volumes_to_delete, 'volume(s)')
         return
     for id_ in volumes_to_delete:
         _reset_volume_status(id_, 'available', 'detached', 'None')
@@ -401,6 +436,7 @@
     groups_to_delete = _filter_test_resources(groups, 'name')
     _log_resources_count(len(groups_to_delete), 'volume group(s)')
     if args.dry_run:
+        _log_resources_details_at_dry_run(groups_to_delete, 'volume group(s)')
         return
     for id_ in groups_to_delete:
         _log_resource_delete(id_, groups_to_delete[id_], 'volume group')
@@ -413,6 +449,8 @@
     backups_to_delete = _filter_test_resources(backups, 'name')
     _log_resources_count(len(backups_to_delete), 'volume backup(s)')
     if args.dry_run:
+        _log_resources_details_at_dry_run(backups_to_delete,
+                                          'volume backup(s)')
         return
     for id_ in backups_to_delete:
         backup_obj = volume.get_backup(id_)
@@ -426,6 +464,8 @@
     group_types_to_delete = _filter_test_resources(group_types, 'name')
     _log_resources_count(len(group_types_to_delete), 'volume group type(s)')
     if args.dry_run:
+        _log_resources_details_at_dry_run(group_types_to_delete,
+                                          'volume group type(s)')
         return
     for id_ in group_types_to_delete:
         _log_resource_delete(
@@ -438,6 +478,8 @@
     volume_types_to_delete = _filter_test_resources(volume_types, 'name')
     _log_resources_count(len(volume_types_to_delete), 'volume type(s)')
     if args.dry_run:
+        _log_resources_details_at_dry_run(volume_types_to_delete,
+                                          'volume type(s)')
         return
     for id_ in volume_types_to_delete:
         _log_resource_delete(id_, volume_types_to_delete[id_], 'volume type')
@@ -449,6 +491,8 @@
     sec_groups_to_delete = _filter_test_resources(sec_groups, 'name')
     _log_resources_count(len(sec_groups_to_delete), 'security group(s)')
     if args.dry_run:
+        _log_resources_details_at_dry_run(sec_groups_to_delete,
+                                          'security group(s)')
         return
     for id_ in sec_groups_to_delete:
         _log_resource_delete(id_, sec_groups_to_delete[id_], 'security group')
@@ -460,6 +504,7 @@
     containers_to_delete = _filter_test_resources(containers, 'name')
     _log_resources_count(len(containers_to_delete), 'container(s)')
     if args.dry_run:
+        _log_resources_details_at_dry_run(containers_to_delete, 'container(s)')
         return
     for id_ in containers_to_delete:
         _log_resource_delete(id_, containers_to_delete[id_], 'container')
@@ -471,6 +516,7 @@
     routers_to_delete = _filter_test_resources(routers, 'name')
     _log_resources_count(len(routers_to_delete), 'router(s)')
     if args.dry_run:
+        _log_resources_details_at_dry_run(routers_to_delete, 'router(s)')
         return
     for id_ in routers_to_delete:
         _log_resource_delete(id_, routers_to_delete[id_], 'router')
@@ -490,8 +536,10 @@
 def cleanup_networks():
     nets = network.networks()
     nets_to_delete = _filter_test_resources(nets, 'name')
+    _skip_tempest_fixed_net(nets_to_delete)
     _log_resources_count(len(nets_to_delete), 'network(s)')
     if args.dry_run:
+        _log_resources_details_at_dry_run(nets_to_delete, 'network(s)')
         return
     for id_ in nets_to_delete:
         _log_resource_delete(id_, nets_to_delete[id_], 'network')
@@ -515,6 +563,7 @@
     lbs_to_delete = _filter_test_resources(lbs, 'name')
     _log_resources_count(len(lbs_to_delete), 'load_balancer(s)')
     if args.dry_run:
+        _log_resources_details_at_dry_run(lbs_to_delete, 'load_balancer(s)')
         return
     for id_ in lbs_to_delete:
         _log_resource_delete(id_, lbs_to_delete[id_], 'load_balancer')
@@ -540,6 +589,7 @@
                 fips_to_delete[ip.id] = ip.floating_ip_address
     _log_resources_count(len(fips_to_delete), 'floating ip(s)')
     if args.dry_run:
+        _log_resources_details_at_dry_run(fips_to_delete, 'floating ip(s)')
         return
     for id_ in fips_to_delete:
         _log_resource_delete(id_, fips_to_delete[id_], 'floating ip')
@@ -609,5 +659,8 @@
     if args.projects:
         cleanup_projects()
 
-    msg = "Cleanup is FINISHED"
+    if args.dry_run:
+        msg = "DRY-RUN mode, no cleanup is done"
+    else:
+        msg = "Cleanup is FINISHED"
     log.info(f"\n{'=' * len(msg)}\n{msg}")
diff --git a/cleanup.sh b/cleanup.sh
index 415d53c..604f3e7 100644
--- a/cleanup.sh
+++ b/cleanup.sh
@@ -1,343 +1,347 @@
-#!/bin/bash
-export OS_INTERFACE='admin'
-mask='cvp\|s_rally\|rally_\|tempest_\|tempest-\|spt-'
-exclude='manual\|-static-'
-stack_alt=false
-stack_regex='api-[0-9]+-[a-z]+'
-dry_run=false
-clean_projects=false
-make_servers_active=false
-serial=false
-batch_size=10
-# granularity values: days,hours,minutes,seconds
-stack_granularity=days
-stack_granularity_value=1
+echo ""
+echo "This script is obsolete. Please use the following script instead: cd /opt/res-files; python3 cleanup.py"
+echo ""
 
-function show_help {
-    printf "Resource cleaning script\nMask is: %s\n\t-h, -?\tShow this help\n" ${mask}
-    printf "\t-t\tDry run mode, no cleaning done\n"
-    printf "\t-P\tForce cleaning of projects\n"
-    printf "\t-s\tUse single thread of 'openstack' client for cleanup\n"
-    printf "\t-S\tSet servers to ACTIVE before deletion (bare metal reqiured)\n"
-    printf "\t-f\tForce stack cleanup with an additional mask of '%s'\n" ${stack_regex}
-    printf "\t-F\tForce purge deleted stacks. Batch size: %s, >%s %s\n" ${batch_size} ${stack_granularity_value} ${stack_granularity}
-}
-
-OPTIND=1 # Reset in case getopts has been used previously in the shell.
-while getopts "h?:tsSPfF" opt; do
-    case "$opt" in
-    h|\?)
-        show_help
-        exit 0
-        ;;
-    t)  dry_run=true
-        printf "Running in dry-run mode\n"
-        ;;
-    s)  serial=true
-        printf "Single threaded mode enabled\n"
-        ;;
-    S)  make_servers_active=true
-        printf "Servers will be set to ACTIVE before deletion\n"
-        ;;
-    P)  clean_projects=true
-        printf "Project cleanning enabled\n"
-        ;;
-    f)  stack_alt=true
-        printf "Cleaning stacks using additional mask '%s'\n" ${stack_regex}
-        ;;
-    F)  purge_deleted_stacks=true
-        printf "Purging stacks deleted >$stack_granularity_value $stack_granularity ago enabled, batch size %s\n" $stack_batch_size
-        ;;
-    esac
-done
-
-shift $((OPTIND-1))
-[ "${1:-}" = "--" ] && shift
-
-### Execute collected commands and flush the temp file
-function _clean_and_flush {
-    if [ "$dry_run" = true ] ; then
-        return 0
-    fi
-    if [ -s ${cmds} ]; then
-        if [ "${serial}" = false ] ; then
-            echo "... processing $(cat ${cmds} | wc -l) commands, worker threads ${batch_size}"
-            cat ${cmds} | tr '\n' '\0' | xargs -P ${batch_size} -n 1 -0 echo | openstack
-            #cat ${cmds} | openstack
-            truncate -s 0 ${cmds}
-        else
-            echo "... processing $(cat ${cmds} | wc -l) commands"
-            cat ${cmds} | tr '\n' '\0' | xargs -P 1 -n 1 -0 echo | openstack
-            truncate -s 0 ${cmds}
-        fi
-    fi
-}
-
-function _clean_and_flush_cinder {
-    if [ "$dry_run" = true ] ; then
-        return 0
-    fi
-    if [ -s ${cmds} ]; then
-        if [ "${serial}" = false ] ; then
-            echo "... processing $(cat ${cmds} | wc -l) commands, worker threads ${batch_size}"
-            cat ${cmds} | tr '\n' '\0' | xargs -I{} -P ${batch_size} -n 1 -0 /bin/bash -c 'cinder --os-volume-api-version 3.43 {}'
-            #cat ${cmds} | cinder --os-volume-api-version 3.43
-            truncate -s 0 ${cmds}
-        else
-            echo "... processing $(cat ${cmds} | wc -l) commands"
-            cat ${cmds} | tr '\n' '\0' | xargs -I{} -P 1 -n 1 -0 /bin/bash -c 'cinder --os-volume-api-version 3.43 {}'
-            truncate -s 0 ${cmds}
-        fi
-    fi
-}
-
-### Users
-function _clean_users {
-    users=( $(openstack user list -c ID -c Name -f value | grep ${mask} | grep -v ${exclude} | cut -d' ' -f1) )
-    echo "-> ${#users[@]} users containing '${mask}' found"
-    printf "%s\n" ${users[@]} | xargs -I{} echo user delete {} >>${cmds}
-    _clean_and_flush
-}
-
-### Roles
-function _clean_roles {
-    roles=( $(openstack role list -c ID -c Name -f value | grep ${mask} | grep -v ${exclude} | cut -d' ' -f1) )
-    echo "-> ${#roles[@]} roles containing '${mask}' found"
-    printf "%s\n" ${roles[@]} | xargs -I{} echo role delete {} >>${cmds}
-    _clean_and_flush
-}
-
-### Projects
-function _clean_projects {
-    projects=( $(openstack project list -c ID -c Name -f value | grep ${mask} | grep -v ${exclude} | cut -d' ' -f1) )
-    echo "-> ${#projects[@]} projects containing '${mask}' found"
-    printf "%s\n" ${projects[@]} | xargs -I{} echo project delete {} >>${cmds}
-    _clean_and_flush
-}
-
-### Servers
-function _clean_servers {
-    servers=( $(openstack server list -c ID -c Name -f value --all | grep "${mask}" | grep -v ${exclude} | cut -d' ' -f1) )
-    echo "-> ${#servers[@]} servers containing '${mask}' found"
-    if [ "$make_servers_active" = true ]; then
-        printf "%s\n" ${servers[@]} | xargs -I{} echo server set --state active {} >>${cmds}
-    fi
-    printf "%s\n" ${servers[@]} | xargs -I{} echo server delete {} >>${cmds}
-    _clean_and_flush
-}
-
-### Reset snapshot state and delete
-function _clean_snapshots {
-    snapshots=( $(openstack volume snapshot list --all -c ID -c Name -f value | grep ${mask} | grep -v ${exclude} | cut -d' ' -f1) )
-    echo "-> ${#snapshots[@]} snapshots containing '${mask}' found"
-    printf "%s\n" ${snapshots[@]} | xargs -I{} echo volume snapshot set --state available {} >>${cmds}
-    printf "%s\n" ${snapshots[@]} | xargs -I{} echo volume snapshot delete {} >>${cmds}
-    _clean_and_flush
-}
-
-function _clean_volumes {
-    volumes=( $(openstack volume list --all -c ID -c Name -c Type -f value | grep ${mask} | grep -v ${exclude} | cut -d' ' -f1) )
-    echo "-> ${#volumes[@]} volumes containing '${mask}' found"
-    printf "%s\n" ${volumes[@]} | xargs -I{} echo volume set --state available {} >>${cmds}
-    printf "%s\n" ${volumes[@]} | xargs -I{} echo volume delete {} >>${cmds}
-    _clean_and_flush
-}
-
-function _clean_volume_groups {
-    groups=( $(cinder --os-volume-api-version 3.43 group-list --all-tenants 1 | grep ${mask} | grep -v ${exclude} | awk '{print $2}') )
-    echo "-> ${#groups[@]} groups containing '${mask}' found"
-    printf "%s\n" ${groups[@]} | xargs -I{} echo group-delete {} >>${cmds}
-    _clean_and_flush_cinder
-}
-
-function _clean_volume_group_types {
-    group_types=( $(cinder --os-volume-api-version 3.43 group-type-list | grep ${mask} | grep -v ${exclude} | awk '{print $2}') )
-    echo "-> ${#group_types[@]} group types containing '${mask}' found"
-    printf "%s\n" ${group_types[@]} | xargs -I{} echo group-type-delete {} >>${cmds}
-    _clean_and_flush_cinder
-}
-
-### Volume types
-function _clean_volume_types {
-    vtypes=( $(openstack volume type list -c ID -c Name -f value | grep ${mask} | grep -v ${exclude} | cut -d' ' -f1) )
-    echo "-> ${#vtypes[@]} volume types containing '${mask}' found"
-    printf "%s\n" ${vtypes[@]} | xargs -I{} echo volume type delete {} >>${cmds}
-    _clean_and_flush
-}
-
-### Images
-function _clean_images {
-    images=( $(openstack image list -c ID -c Name -f value | grep ${mask} | grep -v ${exclude} | cut -d' ' -f1) )
-    echo "-> ${#images[@]} images containing '${mask}' found"
-    printf "%s\n" ${images[@]} | xargs -I{} echo image delete {} >>${cmds}
-    _clean_and_flush
-}
-
-### Sec groups
-function _clean_sec_groups {
-    projects=( $(openstack project list -c ID -c Name -f value | grep ${mask} | grep -v ${exclude} | cut -d' ' -f1) )
-    sgroups=( $(printf "%s\n" ${projects[@]} | xargs -I{} /bin/bash -c "openstack security group list -c ID -c Project -f value | grep {} | cut -d' ' -f1") )
-    echo "-> ${#sgroups[@]} security groups for project containing '${mask}' found"
-    printf "%s\n" ${sgroups[@]} | xargs -I{} echo security group delete {} >>${cmds}
-    _clean_and_flush
-
-    # Additional step to cleanup 'hanged' groups
-    sgroups_raw=( $(openstack security group list -c ID -c Name -f value | grep ${mask} | grep -v ${exclude} | cut -d' ' -f1) )
-    echo "-> ${#sgroups_raw[@]} security groups for '${mask}' found"
-    printf "%s\n" ${sgroups_raw[@]} | xargs -I{} echo security group delete {} >>${cmds}
-    _clean_and_flush
-}
-
-### Keypairs
-function _clean_keypairs {
-    keypairs=( $(openstack keypair list -c Name -f value | grep ${mask} | grep -v ${exclude}) )
-    echo "-> ${#keypairs[@]} keypairs containing '${mask}' found"
-    printf "%s\n" ${keypairs[@]} | xargs -I{} echo keypair delete {} >>${cmds}
-    _clean_and_flush
-}
-
-### Routers and Networks
-function _clean_routers_and_networks {
-    routers=( $(openstack router list -c ID -c Name -f value | grep ${mask} | grep -v ${exclude} | cut -d ' ' -f1) )
-    if [ ${#routers[@]} -eq 0 ]; then
-        echo "-> No routers containing '${mask}' found"
-    else
-        echo "-> ${#routers[@]} routers containing '${mask}' found"
-        echo "... unsetting gateways"
-        printf "%s\n" ${routers[@]} | xargs -I{} echo router unset --external-gateway {} >>${cmds}
-        _clean_and_flush
-
-        echo "... removing ports"
-        for router in ${routers[@]}; do
-            r_ports=( $(openstack port list --router ${router} -f value -c ID) )
-            if [ ${#r_ports[@]} -eq 0 ]; then
-                echo "... no ports to unplug for ${router}"
-            else
-                for r_port in ${r_ports[@]}; do
-                    echo "... queued removal of port '${r_port}' from router '${router}'"
-                    echo "router remove port ${router} ${r_port}" >>${cmds}
-                done
-            fi
-        done
-        _clean_and_flush
-
-        echo "... deleting routers"
-        printf "%s\n" ${routers[@]} | xargs -I{} echo router delete {} >>${cmds}
-        _clean_and_flush
-    fi
-
-    networks=( $(openstack network list | grep "${mask}" | grep -v ${exclude} | cut -d' ' -f2) )
-    if [ ${#networks[@]} -eq 0 ]; then
-        echo "-> No networks containing '${mask}' found"
-    else
-        ports=()
-        subnets=()
-        for((idx=0;idx<${#networks[@]};idx++)) do
-            ports+=( $(openstack port list --network ${networks[idx]} -c ID -f value) )
-            subnets+=( $(openstack subnet list --network ${networks[idx]} -c ID -f value) )
-            echo "-> $((${idx}+1)) of ${#networks[@]}, total ${#ports[@]} ports, ${#subnets[@]} subnets"
-        done
-        printf "%s\n" ${ports[@]} | xargs -I{} echo port delete {} >>${cmds}
-        printf "%s\n" ${subnets[@]} | xargs -I{} echo subnet delete {} >>${cmds}
-        echo network delete ${networks[@]} >>${cmds}
-        echo "-> ${#routers[@]} routers, ${#ports[@]} ports, ${#subnets[@]} subnets, ${#networks[@]} networks"
-    fi
-    _clean_and_flush
-}
-
-### Regions
-function _clean_regions {
-    regions=( $(openstack region list -c Region -f value | grep ${mask} | grep -v ${exclude}) )
-    echo "-> ${#regions[@]} regions containing '${mask}' found"
-    printf "%s\n" ${regions[@]} | xargs -I{} echo region delete {} >>${cmds}
-    _clean_and_flush
-}
-
-### Services
-function _clean_services {
-    services=( $(openstack service list -c Name -f value | grep ${mask} | grep -v ${exclude}) )
-    echo "-> ${#services[@]} services containing '${mask}' found"
-    printf "%s\n" ${services[@]} | xargs -I{} echo service delete {} >>${cmds}
-    _clean_and_flush
-}
-
-### Stacks
-function _clean_stacks {
-    # By default openstack denies use of global_index for everyone.
-    # In case you want to have handy cleanup, consider updating policy.json here:
-    # root@ctl0x:~# cat -n /etc/heat/policy.json | grep global_index
-    # 48      "stacks:global_index": "rule:deny_everybody",
-    # 73      "software_configs:global_index": "rule:deny_everybody",
-    # After this you will be able to use --all option
-
-    stacks=( $(openstack stack list --nested --hidden -c ID -c "Stack Name" -f value | grep ${mask} | grep -v ${exclude} | cut -d' ' -f1) )
-    echo "-> ${#stacks[@]} stacks containing '${mask}' found"
-    printf "%s\n" ${stacks[@]} | xargs -I{} echo stack check {} >>${cmds}
-    printf "%s\n" ${stacks[@]} | xargs -I{} echo stack delete -y {} >>${cmds}
-    _clean_and_flush
-
-    if [ "$stack_alt" = true ]; then
-        stacks=( $(openstack stack list --nested --hidden -c ID -c "Stack Name" -f value | grep -E ${stack_regex} | cut -d' ' -f1) )
-        echo "-> ${#stacks[@]} stacks containing '${stack_regex}' found"
-        printf "%s\n" ${stacks[@]} | xargs -I{} echo stack check {} >>${cmds}
-        printf "%s\n" ${stacks[@]} | xargs -I{} echo stack delete -y {} >>${cmds}
-        _clean_and_flush
-    fi
-
-    if [ "$purge_deleted_stacks" = true ]; then
-        heat-manage purge_deleted -g ${stack_granularity} -b ${batch_size} ${stack_granularity_value} | wc -l | xargs -I{} echo "-> Purged {} stacks"
-    fi
-}
-
-### Containers
-function _clean_containers {
-    containers=( $(openstack container list --all -c ID -c Name -f value | grep ${mask} | grep -v ${exclude} | cut -d' ' -f1) )
-    echo "-> ${#containers[@]} containers containing '${mask}' found"
-    printf "%s\n" ${containers[@]} | xargs -I{} echo container delete {} >>${cmds}
-    _clean_and_flush
-}
-
-function _clean_flavors {
-    flavors=( $(openstack flavor list --all -c ID -c Name -f value | grep ${mask} | grep -v ${exclude} | cut -d' ' -f1) )
-    echo "-> ${#flavors[@]} flavors containing '${mask}' found"
-    printf "%s\n" ${flavors[@]} | xargs -I{} echo flavor delete {} >>${cmds}
-    _clean_and_flush
-}
-
-###################
-### Main
-###################
-# temp file for commands
-cmds=$(mktemp)
-trap "rm -f ${cmds}" EXIT
-echo "Using tempfile: '${cmds}'"
-
-# Consider cleaning contrail resources carefully
-# ...and only after that - clean projects
-
-_clean_stacks
-_clean_servers
-_clean_flavors
-_clean_users
-_clean_roles
-_clean_snapshots
-_clean_volumes
-_clean_volume_groups
-_clean_volume_group_types
-_clean_volume_types
-_clean_images
-_clean_sec_groups
-_clean_keypairs
-_clean_routers_and_networks
-_clean_regions
-_clean_services
-_clean_containers
-
-# project cleaning disabled by default
-# Coz cleaning Contrail with no projects is a hard task
-if [ "$clean_projects" = true ]; then
-    _clean_projects
-fi
-
-# remove temp file
-rm ${cmds}
+##!/bin/bash
+#export OS_INTERFACE='admin'
+#mask='cvp\|s_rally\|rally_\|tempest_\|tempest-\|spt-'
+#exclude='manual\|-static-'
+#stack_alt=false
+#stack_regex='api-[0-9]+-[a-z]+'
+#dry_run=false
+#clean_projects=false
+#make_servers_active=false
+#serial=false
+#batch_size=10
+## granularity values: days,hours,minutes,seconds
+#stack_granularity=days
+#stack_granularity_value=1
+#
+#function show_help {
+#    printf "Resource cleaning script\nMask is: %s\n\t-h, -?\tShow this help\n" ${mask}
+#    printf "\t-t\tDry run mode, no cleaning done\n"
+#    printf "\t-P\tForce cleaning of projects\n"
+#    printf "\t-s\tUse single thread of 'openstack' client for cleanup\n"
+#    printf "\t-S\tSet servers to ACTIVE before deletion (bare metal reqiured)\n"
+#    printf "\t-f\tForce stack cleanup with an additional mask of '%s'\n" ${stack_regex}
+#    printf "\t-F\tForce purge deleted stacks. Batch size: %s, >%s %s\n" ${batch_size} ${stack_granularity_value} ${stack_granularity}
+#}
+#
+#OPTIND=1 # Reset in case getopts has been used previously in the shell.
+#while getopts "h?:tsSPfF" opt; do
+#    case "$opt" in
+#    h|\?)
+#        show_help
+#        exit 0
+#        ;;
+#    t)  dry_run=true
+#        printf "Running in dry-run mode\n"
+#        ;;
+#    s)  serial=true
+#        printf "Single threaded mode enabled\n"
+#        ;;
+#    S)  make_servers_active=true
+#        printf "Servers will be set to ACTIVE before deletion\n"
+#        ;;
+#    P)  clean_projects=true
+#        printf "Project cleanning enabled\n"
+#        ;;
+#    f)  stack_alt=true
+#        printf "Cleaning stacks using additional mask '%s'\n" ${stack_regex}
+#        ;;
+#    F)  purge_deleted_stacks=true
+#        printf "Purging stacks deleted >$stack_granularity_value $stack_granularity ago enabled, batch size %s\n" $stack_batch_size
+#        ;;
+#    esac
+#done
+#
+#shift $((OPTIND-1))
+#[ "${1:-}" = "--" ] && shift
+#
+#### Execute collected commands and flush the temp file
+#function _clean_and_flush {
+#    if [ "$dry_run" = true ] ; then
+#        return 0
+#    fi
+#    if [ -s ${cmds} ]; then
+#        if [ "${serial}" = false ] ; then
+#            echo "... processing $(cat ${cmds} | wc -l) commands, worker threads ${batch_size}"
+#            cat ${cmds} | tr '\n' '\0' | xargs -P ${batch_size} -n 1 -0 echo | openstack
+#            #cat ${cmds} | openstack
+#            truncate -s 0 ${cmds}
+#        else
+#            echo "... processing $(cat ${cmds} | wc -l) commands"
+#            cat ${cmds} | tr '\n' '\0' | xargs -P 1 -n 1 -0 echo | openstack
+#            truncate -s 0 ${cmds}
+#        fi
+#    fi
+#}
+#
+#function _clean_and_flush_cinder {
+#    if [ "$dry_run" = true ] ; then
+#        return 0
+#    fi
+#    if [ -s ${cmds} ]; then
+#        if [ "${serial}" = false ] ; then
+#            echo "... processing $(cat ${cmds} | wc -l) commands, worker threads ${batch_size}"
+#            cat ${cmds} | tr '\n' '\0' | xargs -I{} -P ${batch_size} -n 1 -0 /bin/bash -c 'cinder --os-volume-api-version 3.43 {}'
+#            #cat ${cmds} | cinder --os-volume-api-version 3.43
+#            truncate -s 0 ${cmds}
+#        else
+#            echo "... processing $(cat ${cmds} | wc -l) commands"
+#            cat ${cmds} | tr '\n' '\0' | xargs -I{} -P 1 -n 1 -0 /bin/bash -c 'cinder --os-volume-api-version 3.43 {}'
+#            truncate -s 0 ${cmds}
+#        fi
+#    fi
+#}
+#
+#### Users
+#function _clean_users {
+#    users=( $(openstack user list -c ID -c Name -f value | grep ${mask} | grep -v ${exclude} | cut -d' ' -f1) )
+#    echo "-> ${#users[@]} users containing '${mask}' found"
+#    printf "%s\n" ${users[@]} | xargs -I{} echo user delete {} >>${cmds}
+#    _clean_and_flush
+#}
+#
+#### Roles
+#function _clean_roles {
+#    roles=( $(openstack role list -c ID -c Name -f value | grep ${mask} | grep -v ${exclude} | cut -d' ' -f1) )
+#    echo "-> ${#roles[@]} roles containing '${mask}' found"
+#    printf "%s\n" ${roles[@]} | xargs -I{} echo role delete {} >>${cmds}
+#    _clean_and_flush
+#}
+#
+#### Projects
+#function _clean_projects {
+#    projects=( $(openstack project list -c ID -c Name -f value | grep ${mask} | grep -v ${exclude} | cut -d' ' -f1) )
+#    echo "-> ${#projects[@]} projects containing '${mask}' found"
+#    printf "%s\n" ${projects[@]} | xargs -I{} echo project delete {} >>${cmds}
+#    _clean_and_flush
+#}
+#
+#### Servers
+#function _clean_servers {
+#    servers=( $(openstack server list -c ID -c Name -f value --all | grep "${mask}" | grep -v ${exclude} | cut -d' ' -f1) )
+#    echo "-> ${#servers[@]} servers containing '${mask}' found"
+#    if [ "$make_servers_active" = true ]; then
+#        printf "%s\n" ${servers[@]} | xargs -I{} echo server set --state active {} >>${cmds}
+#    fi
+#    printf "%s\n" ${servers[@]} | xargs -I{} echo server delete {} >>${cmds}
+#    _clean_and_flush
+#}
+#
+#### Reset snapshot state and delete
+#function _clean_snapshots {
+#    snapshots=( $(openstack volume snapshot list --all -c ID -c Name -f value | grep ${mask} | grep -v ${exclude} | cut -d' ' -f1) )
+#    echo "-> ${#snapshots[@]} snapshots containing '${mask}' found"
+#    printf "%s\n" ${snapshots[@]} | xargs -I{} echo volume snapshot set --state available {} >>${cmds}
+#    printf "%s\n" ${snapshots[@]} | xargs -I{} echo volume snapshot delete {} >>${cmds}
+#    _clean_and_flush
+#}
+#
+#function _clean_volumes {
+#    volumes=( $(openstack volume list --all -c ID -c Name -c Type -f value | grep ${mask} | grep -v ${exclude} | cut -d' ' -f1) )
+#    echo "-> ${#volumes[@]} volumes containing '${mask}' found"
+#    printf "%s\n" ${volumes[@]} | xargs -I{} echo volume set --state available {} >>${cmds}
+#    printf "%s\n" ${volumes[@]} | xargs -I{} echo volume delete {} >>${cmds}
+#    _clean_and_flush
+#}
+#
+#function _clean_volume_groups {
+#    groups=( $(cinder --os-volume-api-version 3.43 group-list --all-tenants 1 | grep ${mask} | grep -v ${exclude} | awk '{print $2}') )
+#    echo "-> ${#groups[@]} groups containing '${mask}' found"
+#    printf "%s\n" ${groups[@]} | xargs -I{} echo group-delete {} >>${cmds}
+#    _clean_and_flush_cinder
+#}
+#
+#function _clean_volume_group_types {
+#    group_types=( $(cinder --os-volume-api-version 3.43 group-type-list | grep ${mask} | grep -v ${exclude} | awk '{print $2}') )
+#    echo "-> ${#group_types[@]} group types containing '${mask}' found"
+#    printf "%s\n" ${group_types[@]} | xargs -I{} echo group-type-delete {} >>${cmds}
+#    _clean_and_flush_cinder
+#}
+#
+#### Volume types
+#function _clean_volume_types {
+#    vtypes=( $(openstack volume type list -c ID -c Name -f value | grep ${mask} | grep -v ${exclude} | cut -d' ' -f1) )
+#    echo "-> ${#vtypes[@]} volume types containing '${mask}' found"
+#    printf "%s\n" ${vtypes[@]} | xargs -I{} echo volume type delete {} >>${cmds}
+#    _clean_and_flush
+#}
+#
+#### Images
+#function _clean_images {
+#    images=( $(openstack image list -c ID -c Name -f value | grep ${mask} | grep -v ${exclude} | cut -d' ' -f1) )
+#    echo "-> ${#images[@]} images containing '${mask}' found"
+#    printf "%s\n" ${images[@]} | xargs -I{} echo image delete {} >>${cmds}
+#    _clean_and_flush
+#}
+#
+#### Sec groups
+#function _clean_sec_groups {
+#    projects=( $(openstack project list -c ID -c Name -f value | grep ${mask} | grep -v ${exclude} | cut -d' ' -f1) )
+#    sgroups=( $(printf "%s\n" ${projects[@]} | xargs -I{} /bin/bash -c "openstack security group list -c ID -c Project -f value | grep {} | cut -d' ' -f1") )
+#    echo "-> ${#sgroups[@]} security groups for project containing '${mask}' found"
+#    printf "%s\n" ${sgroups[@]} | xargs -I{} echo security group delete {} >>${cmds}
+#    _clean_and_flush
+#
+#    # Additional step to cleanup 'hanged' groups
+#    sgroups_raw=( $(openstack security group list -c ID -c Name -f value | grep ${mask} | grep -v ${exclude} | cut -d' ' -f1) )
+#    echo "-> ${#sgroups_raw[@]} security groups for '${mask}' found"
+#    printf "%s\n" ${sgroups_raw[@]} | xargs -I{} echo security group delete {} >>${cmds}
+#    _clean_and_flush
+#}
+#
+#### Keypairs
+#function _clean_keypairs {
+#    keypairs=( $(openstack keypair list -c Name -f value | grep ${mask} | grep -v ${exclude}) )
+#    echo "-> ${#keypairs[@]} keypairs containing '${mask}' found"
+#    printf "%s\n" ${keypairs[@]} | xargs -I{} echo keypair delete {} >>${cmds}
+#    _clean_and_flush
+#}
+#
+#### Routers and Networks
+#function _clean_routers_and_networks {
+#    routers=( $(openstack router list -c ID -c Name -f value | grep ${mask} | grep -v ${exclude} | cut -d ' ' -f1) )
+#    if [ ${#routers[@]} -eq 0 ]; then
+#        echo "-> No routers containing '${mask}' found"
+#    else
+#        echo "-> ${#routers[@]} routers containing '${mask}' found"
+#        echo "... unsetting gateways"
+#        printf "%s\n" ${routers[@]} | xargs -I{} echo router unset --external-gateway {} >>${cmds}
+#        _clean_and_flush
+#
+#        echo "... removing ports"
+#        for router in ${routers[@]}; do
+#            r_ports=( $(openstack port list --router ${router} -f value -c ID) )
+#            if [ ${#r_ports[@]} -eq 0 ]; then
+#                echo "... no ports to unplug for ${router}"
+#            else
+#                for r_port in ${r_ports[@]}; do
+#                    echo "... queued removal of port '${r_port}' from router '${router}'"
+#                    echo "router remove port ${router} ${r_port}" >>${cmds}
+#                done
+#            fi
+#        done
+#        _clean_and_flush
+#
+#        echo "... deleting routers"
+#        printf "%s\n" ${routers[@]} | xargs -I{} echo router delete {} >>${cmds}
+#        _clean_and_flush
+#    fi
+#
+#    networks=( $(openstack network list | grep "${mask}" | grep -v ${exclude} | cut -d' ' -f2) )
+#    if [ ${#networks[@]} -eq 0 ]; then
+#        echo "-> No networks containing '${mask}' found"
+#    else
+#        ports=()
+#        subnets=()
+#        for((idx=0;idx<${#networks[@]};idx++)) do
+#            ports+=( $(openstack port list --network ${networks[idx]} -c ID -f value) )
+#            subnets+=( $(openstack subnet list --network ${networks[idx]} -c ID -f value) )
+#            echo "-> $((${idx}+1)) of ${#networks[@]}, total ${#ports[@]} ports, ${#subnets[@]} subnets"
+#        done
+#        printf "%s\n" ${ports[@]} | xargs -I{} echo port delete {} >>${cmds}
+#        printf "%s\n" ${subnets[@]} | xargs -I{} echo subnet delete {} >>${cmds}
+#        echo network delete ${networks[@]} >>${cmds}
+#        echo "-> ${#routers[@]} routers, ${#ports[@]} ports, ${#subnets[@]} subnets, ${#networks[@]} networks"
+#    fi
+#    _clean_and_flush
+#}
+#
+#### Regions
+#function _clean_regions {
+#    regions=( $(openstack region list -c Region -f value | grep ${mask} | grep -v ${exclude}) )
+#    echo "-> ${#regions[@]} regions containing '${mask}' found"
+#    printf "%s\n" ${regions[@]} | xargs -I{} echo region delete {} >>${cmds}
+#    _clean_and_flush
+#}
+#
+#### Services
+#function _clean_services {
+#    services=( $(openstack service list -c Name -f value | grep ${mask} | grep -v ${exclude}) )
+#    echo "-> ${#services[@]} services containing '${mask}' found"
+#    printf "%s\n" ${services[@]} | xargs -I{} echo service delete {} >>${cmds}
+#    _clean_and_flush
+#}
+#
+#### Stacks
+#function _clean_stacks {
+#    # By default openstack denies use of global_index for everyone.
+#    # In case you want to have handy cleanup, consider updating policy.json here:
+#    # root@ctl0x:~# cat -n /etc/heat/policy.json | grep global_index
+#    # 48      "stacks:global_index": "rule:deny_everybody",
+#    # 73      "software_configs:global_index": "rule:deny_everybody",
+#    # After this you will be able to use --all option
+#
+#    stacks=( $(openstack stack list --nested --hidden -c ID -c "Stack Name" -f value | grep ${mask} | grep -v ${exclude} | cut -d' ' -f1) )
+#    echo "-> ${#stacks[@]} stacks containing '${mask}' found"
+#    printf "%s\n" ${stacks[@]} | xargs -I{} echo stack check {} >>${cmds}
+#    printf "%s\n" ${stacks[@]} | xargs -I{} echo stack delete -y {} >>${cmds}
+#    _clean_and_flush
+#
+#    if [ "$stack_alt" = true ]; then
+#        stacks=( $(openstack stack list --nested --hidden -c ID -c "Stack Name" -f value | grep -E ${stack_regex} | cut -d' ' -f1) )
+#        echo "-> ${#stacks[@]} stacks containing '${stack_regex}' found"
+#        printf "%s\n" ${stacks[@]} | xargs -I{} echo stack check {} >>${cmds}
+#        printf "%s\n" ${stacks[@]} | xargs -I{} echo stack delete -y {} >>${cmds}
+#        _clean_and_flush
+#    fi
+#
+#    if [ "$purge_deleted_stacks" = true ]; then
+#        heat-manage purge_deleted -g ${stack_granularity} -b ${batch_size} ${stack_granularity_value} | wc -l | xargs -I{} echo "-> Purged {} stacks"
+#    fi
+#}
+#
+#### Containers
+#function _clean_containers {
+#    containers=( $(openstack container list --all -c ID -c Name -f value | grep ${mask} | grep -v ${exclude} | cut -d' ' -f1) )
+#    echo "-> ${#containers[@]} containers containing '${mask}' found"
+#    printf "%s\n" ${containers[@]} | xargs -I{} echo container delete {} >>${cmds}
+#    _clean_and_flush
+#}
+#
+#function _clean_flavors {
+#    flavors=( $(openstack flavor list --all -c ID -c Name -f value | grep ${mask} | grep -v ${exclude} | cut -d' ' -f1) )
+#    echo "-> ${#flavors[@]} flavors containing '${mask}' found"
+#    printf "%s\n" ${flavors[@]} | xargs -I{} echo flavor delete {} >>${cmds}
+#    _clean_and_flush
+#}
+#
+####################
+#### Main
+####################
+## temp file for commands
+#cmds=$(mktemp)
+#trap "rm -f ${cmds}" EXIT
+#echo "Using tempfile: '${cmds}'"
+#
+## Consider cleaning contrail resources carefully
+## ...and only after that - clean projects
+#
+#_clean_stacks
+#_clean_servers
+#_clean_flavors
+#_clean_users
+#_clean_roles
+#_clean_snapshots
+#_clean_volumes
+#_clean_volume_groups
+#_clean_volume_group_types
+#_clean_volume_types
+#_clean_images
+#_clean_sec_groups
+#_clean_keypairs
+#_clean_routers_and_networks
+#_clean_regions
+#_clean_services
+#_clean_containers
+#
+## project cleaning disabled by default
+## Coz cleaning Contrail with no projects is a hard task
+#if [ "$clean_projects" = true ]; then
+#    _clean_projects
+#fi
+#
+## remove temp file
+#rm ${cmds}
diff --git a/k8s/docker-mos-toolset-full b/k8s/docker-mos-toolset-full
index cc6cb6d..9fef71f 100644
--- a/k8s/docker-mos-toolset-full
+++ b/k8s/docker-mos-toolset-full
@@ -10,7 +10,10 @@
     apt-get download $(apt-cache depends --recurse --no-recommends --no-suggests --no-conflicts --no-breaks --no-replaces --no-enhances iperf3 iperf fio nfs-utils | grep "^\w" | sort -u) || true && \
     rm -rf /var/lib/apt/lists/*
 
-FROM mirantis.azurecr.io/openstack/heat:epoxy-noble-20251017061956
+# TODO: in future change to tag epoxy-noble-20251017061956
+# but this will lead to possible dependencies issues in some test repos (e.g. cfg-checker, mos-spt)
+# some need some time to test this and move to Noble inside the toolset docker container and k8s pod
+FROM mirantis.azurecr.io/openstack/heat:caracal-jammy-20250314195424
 
 LABEL maintainer="qa-ps@mirantis.com"
 
@@ -22,7 +25,7 @@
 COPY --from=download-packages-1804 /opt/packages-1804 /opt/packages-1804
 
 RUN apt-get clean && apt-get update && \
-    apt-get install -y make gcc g++ git libaio-dev libaio1 zlib1g-dev fio && \
+    apt-get install -y make gcc g++ git libaio-dev zlib1g-dev fio && \
     mkdir /opt/density && \
     rm -rf /var/lib/apt/lists/*
 
@@ -33,11 +36,11 @@
     rm -rf /var/lib/apt/lists/*
 
 RUN apt-get update && \
-    curl https://baltocdn.com/helm/signing.asc | apt-key add - && \
     apt-get install apt-transport-https --yes && \
-    echo "deb https://baltocdn.com/helm/stable/debian/ all main" | tee /etc/apt/sources.list.d/helm-stable-debian.list && \
-    apt-get update && \
-    apt-get install -y helm && \
+    curl -fsSL https://packages.buildkite.com/helm-linux/helm-debian/gpgkey | gpg --dearmor | sudo tee /usr/share/keyrings/helm.gpg > /dev/null && \
+    echo "deb [signed-by=/usr/share/keyrings/helm.gpg] https://packages.buildkite.com/helm-linux/helm-debian/any/ any main" | sudo tee /etc/apt/sources.list.d/helm-stable-debian.list && \
+    sudo apt-get update && \
+    sudo apt-get install -y helm && \
     rm -rf /var/lib/apt/lists/*
 
 RUN pip3 install --no-cache-dir pyghmi tempestparser
diff --git a/k8s/src/mos-spt.tgz b/k8s/src/mos-spt.tgz
index 2ec054b..9081002 100644
--- a/k8s/src/mos-spt.tgz
+++ b/k8s/src/mos-spt.tgz
Binary files differ
diff --git a/k8s/workspace/run-ceph-info-mos.sh b/k8s/workspace/run-ceph-info-mos.sh
index 462b05b..275a571 100644
--- a/k8s/workspace/run-ceph-info-mos.sh
+++ b/k8s/workspace/run-ceph-info-mos.sh
@@ -3,7 +3,16 @@
 . "$(dirname "$0")/functions.sh"
 . /opt/cfg-checker/.checkervenv/bin/activate
 
+current_dir=$(pwd)
+cd /artifacts/reports
+
 fname="$MY_PROJFOLDER/reports/$MY_CLIENTSHORTNAME-mos-ceph-info-$(get_timestamp).html"
 mos-checker ceph info --client-name $MY_CLIENTNAME --project-name $MY_PROJNAME --html "${fname}"
 update_latest_report_to "${fname}"
 deactivate
+
+cd "${current_dir}"
+echo ""
+echo "The reports are saved to:"
+ls -art /artifacts/reports/ | tail -n2 | sed 's|^|/artifacts/reports/|'
+echo ""
\ No newline at end of file
diff --git a/k8s/workspace/run-openstack-spt.sh b/k8s/workspace/run-openstack-spt.sh
index 13a0e2b..30052ae 100644
--- a/k8s/workspace/run-openstack-spt.sh
+++ b/k8s/workspace/run-openstack-spt.sh
@@ -1,5 +1,6 @@
 #!/bin/bash
 tenv=mos
+. /artifacts/env.sh
 . $MY_PROJFOLDER/envs/${tenv}rc
 ##
 echo "### Checking openstack resources"
@@ -29,4 +30,11 @@
 echo "# Copying SPT HTML test report"
 mkdir -p /artifacts/reports/mos-spt
 kubectl exec toolset --stdin --tty -n qa-space -- bash -c "mkdir -p /opt/mos-spt/html_reports && cp /opt/mos-spt/*.html /opt/mos-spt/html_reports"
-kubectl cp qa-space/toolset:/opt/mos-spt/html_reports/ /artifacts/reports/mos-spt/
\ No newline at end of file
+kubectl cp qa-space/toolset:/opt/mos-spt/html_reports/ /artifacts/reports/mos-spt/
+
+# copy and rename the test report
+latest_file=$(ls /artifacts/reports/mos-spt/ | sort | tail -n1)
+new_name="${MY_CLIENTSHORTNAME}-${latest_file}"
+cp "/artifacts/reports/mos-spt/$latest_file" "/artifacts/reports/$new_name"
+echo ""
+echo "The report is saved to /artifacts/reports/${new_name}"
\ No newline at end of file