Updated packages for MOSK 25.1 release, ceph collect scipt

Related-PROD: PROD-37028
Change-Id: I863ffc55a0a8ead0562cc3c104a5f209ac320fb2
diff --git a/cfg_checker/common/const.py b/cfg_checker/common/const.py
index a0e62c1..05e20cb 100644
--- a/cfg_checker/common/const.py
+++ b/cfg_checker/common/const.py
@@ -89,13 +89,13 @@
 
 ubuntu_releases = ["trusty", "xenial", "ubuntu", "bionic", "focal", "jammy", "noble"]
 kaas_ubuntu_active = [
+    "ubuntu-1.1.0",
     "ubuntu-1.0.0",
     "ubuntu-0.0.9",
     "ubuntu-0.0.8",
-    "ubuntu-2025-01-08-003900",
-    "ubuntu-2025-01-27-003900",
-    "ubuntu-2025-02-17-003900",
-    "ubuntu-2025-03-05-003900"
+    "ubuntu-2025-05-26-003900",
+    "ubuntu-2025-06-23-003900",
+    "ubuntu-2025-07-16-003900"
 ]
 mcp_active_tags = [
     "2019.2.0",
@@ -152,6 +152,8 @@
 }
 
 nova_openstack_versions = {
+    "32": "Flamingo",
+    "31": "Epoxy",
     "30": "Dalmatian",
     "29": "Caracal",
     "28": "Bobcat",
diff --git a/scripts/ceph_collect.sh b/scripts/ceph_collect.sh
index fe2ef6c..1285aa9 100644
--- a/scripts/ceph_collect.sh
+++ b/scripts/ceph_collect.sh
@@ -1,9 +1,58 @@
 #!/bin/bash
+
+# Collect relevant data from a Ceph cluster for troubleshooting and assessment
+# (C) Christian Huebner chuebner@mirantis.com 2015
+# run with ./ceph_collect.sh <customername> <clustername>
+
 echo "Collecting Ceph cluster data."
 
-if [ "$#" -lt 2 ]; then echo "Usage: ./ceph_collect.sh <CUSTOMER> <CLUSTERNAME>"; exit; fi
-export CUSTOMER=$1
-export CLUSTERNAME=$2
+help () {
+   echo "Data collector for Ceph analytics"
+   echo "Usage: ceph_collect.sh [-b] [-m] <customername> <clustername>"       
+   echo "-m only works with Nautilus and up"
+}
+
+POSITIONAL=()
+BENCH="true"
+VOLUMES="false"
+OSDS=(`ceph osd ls`)
+
+if [[ $# -eq 0 ]]; then 
+	help
+	exit
+fi       
+
+while [[ $# -gt 0 ]]; do
+  key="$1"
+
+  case $key in
+    -h|--help)
+	    help
+	    exit
+       ;; 
+	-vol|--volumes)
+	  VOLUMES="true"
+	  shift
+	  ;;
+    -nb|--nobench)
+      BENCH="false"
+      shift # past argument
+      ;;
+    -m|--healthmetrics)  
+      HEALTHMETRICS="true"	    
+      shift # past argument
+      ;;
+    *)    # unknown option
+      POSITIONAL+=("$1") # save it in an array for later
+      shift # past argument
+      ;;
+  esac
+done
+
+if [ ${#POSITIONAL[*]} -lt 2 ]; then echo "Usage: ./ceph_collect.sh <CUSTOMER> <CLUSTERNAME>"; exit; fi
+# if [ "$#" -lt 2 ]; then echo "Usage: ./ceph_collect.sh <CUSTOMER> <CLUSTERNAME>"; exit; fi
+export CUSTOMER=${POSITIONAL[0]}
+export CLUSTERNAME=${POSITIONAL[1]}
 
 if ! which ceph >/dev/null; then echo "ERROR: This script must be run on a ceph monitor or admin node"; exit; fi
 
@@ -25,7 +74,7 @@
 echo "Collecting cluster status"
 ceph -s -f json -o ceph_s.json
 echo "Collecting health detail"
-ceph -f json health detail -o ceph_healt_detail.json
+ceph -f json health detail -o ceph_health_detail.json
 echo "Collecting monmap"
 ceph mon dump -f json -o monmap.json
 echo "Collecting ceph df"
@@ -42,13 +91,81 @@
 ceph auth list -f json |sed 's/AQ[^=]*==/KEY/g' > ceph_auth_ls.json
 echo "Collecting ceph pg dump"
 ceph pg dump -f json -o ceph_pg_dump.json
-echo "Collecting health metrics"
-mkdir ceph-health
-IFS=$'\n'; for device in `ceph device ls|grep -v DEVICE`; do osd=$(echo $device|awk '{print $3}'); dev=$(echo $device|awk '{print $1}'); ceph device get-health-metrics $dev >ceph-health/$osd-$dev.json ; done
-echo "Collecting ceph osd perf"
-for i in {0..9}; do echo $i; ceph osd perf -f json -o ceph_osd_perf_$i.json; sleep 4; done
+echo "Collecting ceph pg autoscale"
+ceph osd pool autoscale-status -f json -o ceph_pg_autoscale_status.json
 echo "Collecting ceph running configuration"
 ceph config dump -f json >ceph_config_dump.json
+echo "Collecting ceph erasure code profiles"
+ceph -f json osd erasure-code-profile ls >ceph_osd_erasure-code-profiles.json
+
+
+echo "Collecting erasure code profiles"
+ceph -f json osd erasure-code-profile ls >ceph_osd_erasure-code-profiles.json
+
+echo "Collecting rbd ls -l"
+rbd ls -l | sed '$ s/.$/}/' >rbd_ls_l.json
+
+echo "Collecting block DB/WAL stats"
+for i in ${OSDS[@]}; do
+	echo \"osd.$i\"\: `ceph -f json tell osd.$i bluefs stats` >>ceph_db_wal_stats.json; 
+done
+
+for prof in `ceph osd erasure-code-profile ls`; do \
+	ceph -f json -o ceph_osd_erasure-code-profile_$prof.json osd erasure-code-profile get $prof; \
+done
+
+if [[ $VOLUMES = "true" ]]; then
+	echo "Collecting ceph volumes and CoW clones per rbd pool"
+	for pool in `ceph osd pool ls detail |grep rbd |awk '{print $3}'|sed s/\'//g`; do \
+		echo '{' \
+			'"name" : ' $pool ',' \
+			'"volumes" : ' `rbd ls -l $pool | grep -v '@'| wc -l |grep -v ' 0$'` ',' \
+			'"snapshots" : ' `rbd ls -l $pool | awk '{print $1}' |grep '@' | wc -l` ',' \
+			'"clones" : ' `rbd ls -l $pool | awk '{print $4}' |grep '@' | wc -l` ',' \
+			'}' ; \
+	done >> volumes_per_pool.json
+else
+	echo "Volume collection disabled."
+fi
+
+echo "Collecting ceph osd perf"
+for i in {0..9}; do echo $i; ceph osd perf -f json -o ceph_osd_perf_$i.json; sleep 4; done
+
+if $(ceph device ls &>/dev/null); then 
+	echo "Collecting device health information"
+	ceph device ls|grep -v DEVICE|awk '{print $1}'|xargs --max-args=1 ceph device get-health-metrics > ceph_device_get_health_metrics.json
+else
+	echo "Device health check not supported"	
+fi	
+
+if [[ $HEALTHMETRICS = "true" ]]; then
+	echo "Collecting Ceph Health Metrics (-m option)"	
+	IFS=$'\n'
+	for device in `ceph device ls|grep -v DEVICE`; do
+		osd=$(echo $device|awk '{print $3}'); 
+		dev=$(echo $device|awk '{print $1}'); 
+		ceph device get-health-metrics $dev >ceph_health_$osd.json ; 
+	done;
+fi
+
+if [[ $BENCH = "true" ]]; then 
+	echo "Collecting Ceph Benchmark"	
+	echo "{ " > ceph_tell_bench.json
+	for i in ${OSDS[@]}; do
+		if [ $i -ne ${OSDS[-1]} ]; then
+			echo \"osd.$i\"\: `ceph tell osd.$i bench -f json 12000000 4096` ',' >>ceph_tell_bench.json; 
+		else	
+			echo \"osd.$i\"\: `ceph tell osd.$i bench -f json 12000000 4096` >>ceph_tell_bench.json; 
+		fi	
+	done;
+	echo "}" >> ceph_tell_bench.json
+fi	
+
+# Grab Ceph logs
+echo "Collecting Ceph Logs"
+ceph log last 10000 cluster >ceph_cluster_log.json || true
+ceph log last 10000 audit >ceph_audit_log.json || true
 
 tar czf "../"$ARCHNAME *
+cd ..
 
diff --git a/versions/repo.info.tgz b/versions/repo.info.tgz
index 858a9e0..b055cd5 100644
--- a/versions/repo.info.tgz
+++ b/versions/repo.info.tgz
Binary files differ
diff --git a/versions/repo.versions.tgz b/versions/repo.versions.tgz
index 43bd20c..b8acdd9 100644
--- a/versions/repo.versions.tgz
+++ b/versions/repo.versions.tgz
Binary files differ