Offline procedure preparations, p3
Related-PROD: PROD-36856
Change-Id: Id27983047f5a71cc459df4a50ac9429f999e47cf
diff --git a/k8s/rally-files/openstack-mos-scn-i100.json b/k8s/rally-files/openstack-mos-scn-i100.json
index 20a388a..169dfcd 100644
--- a/k8s/rally-files/openstack-mos-scn-i100.json
+++ b/k8s/rally-files/openstack-mos-scn-i100.json
@@ -9,7 +9,7 @@
{% set availability_zone = "nova" %}
{% set rbd_image = "http://download.cirros-cloud.net/0.3.4/cirros-0.3.4-x86_64-disk.img" %}
{% set heat_template = "/rally/rally-files/default.yaml.template" %}
-{% set fixed_net = "id" %}
+{% set fixed_net = "fixed-net-id" %}
{
"KeystoneBasic.authenticate_user_and_validate_token": [
diff --git a/k8s/rally-files/openstack-mos-scn.json.clean b/k8s/rally-files/openstack-mos-scn.json.clean
new file mode 100644
index 0000000..dd5a4f1
--- /dev/null
+++ b/k8s/rally-files/openstack-mos-scn.json.clean
@@ -0,0 +1,455 @@
+{% set concurrency = concurrent-threads %}
+{% set volume_size = 1 %}
+{% set times = run-times-number %}
+{% set users = 3 %}
+{% set tenants = 3 %}
+
+{% set flavor_name = flavor_name or "cvp.tiny" %}
+{% set image_name = image_name or "cvp.cirros.51" %}
+{% set availability_zone = "nova" %}
+{% set rbd_image = "http://download.cirros-cloud.net/0.3.4/cirros-0.3.4-x86_64-disk.img" %}
+{% set heat_template = "/rally/rally-files/default.yaml.template" %}
+{% set fixed_net = "fixed-net-id" %}
+
+{
+ "KeystoneBasic.authenticate_user_and_validate_token": [
+ {
+ "args": {},
+ "runner": {
+ "type": "constant",
+ "times": {{times}},
+ "concurrency": {{concurrency}}
+ },
+ "sla": { "failure_rate": { "max": 0 } }
+ }
+ ],
+ "KeystoneBasic.create_tenant_with_users": [
+ {
+ "args": { "users_per_tenant": 10 },
+ "runner": {
+ "type": "constant",
+ "concurrency": {{concurrency}},
+ "times": {{times}}
+ },
+ "sla": { "failure_rate": { "max": 0 } }
+ }
+ ],
+ "Authenticate.validate_cinder": [
+ {
+ "args": { "repetitions": 2 },
+ "runner": {
+ "type": "constant",
+ "times": {{times}},
+ "concurrency": {{concurrency}}
+ },
+ "context": {
+ "users": {
+ "users_per_tenant": {{users}},
+ "tenants": {{tenants}}
+ },
+ "api_versions@openstack": { "cinder": {"service_type": "volumev3"} }
+ },
+ "sla": { "failure_rate": { "max": 0 } }
+ }
+ ],
+ "Authenticate.validate_glance": [
+ {
+ "args": { "repetitions": 2 },
+ "runner": {
+ "type": "constant",
+ "times": {{times}},
+ "concurrency": {{concurrency}}
+ },
+ "context": {
+ "users": {
+ "users_per_tenant": {{users}},
+ "tenants": {{tenants}}
+ }
+ },
+ "sla": { "failure_rate": { "max": 0 } }
+ }
+ ],
+ "Authenticate.validate_heat": [
+ {
+ "args": { "repetitions": 2 },
+ "runner": {
+ "type": "constant",
+ "times": {{times}},
+ "concurrency": {{concurrency}}
+ },
+ "context": {
+ "users": {
+ "users_per_tenant": {{users}},
+ "tenants": {{tenants}}
+ }
+ },
+ "sla": { "failure_rate": { "max": 0 } }
+ }
+ ],
+ "Authenticate.validate_nova": [
+ {
+ "args": { "repetitions": 2 },
+ "runner": {
+ "type": "constant",
+ "times": {{times}},
+ "concurrency": {{concurrency}}
+ },
+ "context": {
+ "users": {
+ "users_per_tenant": {{users}},
+ "tenants": {{tenants}}
+ }
+ },
+ "sla": { "failure_rate": { "max": 0 } }
+ }
+ ],
+ "Authenticate.validate_neutron": [
+ {
+ "args": { "repetitions": 2 },
+ "runner": {
+ "type": "constant",
+ "times": {{times}},
+ "concurrency": {{concurrency}}
+ },
+ "context": {
+ "users": {
+ "users_per_tenant": {{users}},
+ "tenants": {{tenants}}
+ }
+ },
+ "sla": { "failure_rate": { "max": 0 } }
+ }
+ ],
+ "NovaServers.boot_and_migrate_server": [
+ {
+ "args": {
+ "flavor": {
+ "name": "{{flavor_name}}"
+ },
+ "image": {
+ "name": "{{image_name}}"
+ },
+ "nics": [
+ {"net-id": "{{fixed_net}}"}
+ ]
+ },
+ "runner": {
+ "type": "constant",
+ "times": {{times}},
+ "concurrency": {{concurrency}}
+ },
+ "context": {
+ "users": {
+ "users_per_tenant": {{users}},
+ "tenants": {{tenants}}
+ },
+ "quotas": {
+ "nova": {
+ "instances": -1,
+ "cores": -1,
+ "ram": -1,
+ "key_pairs": -1,
+ "security_groups": -1,
+ "security_group_rules": -1
+ }
+ }
+ },
+ "sla": { "failure_rate": { "max": 0 } }
+ }
+ ],
+ "NovaServers.boot_and_delete_server": [
+ {
+ "args": {
+ "flavor": {
+ "name": "{{flavor_name}}"
+ },
+ "image": {
+ "name": "{{image_name}}"
+ },
+ "nics": [
+ {"net-id": "{{fixed_net}}"}
+ ],
+ "detailed": true,
+ "availability_zone": "{{availability_zone}}"
+ },
+ "runner": {
+ "type": "constant",
+ "concurrency": {{concurrency}},
+ "times": {{times}}
+ },
+ "context": {
+ "users": {
+ "tenants": {{users}},
+ "users_per_tenant": {{tenants}}
+ },
+ "quotas": {
+ "nova": {
+ "instances": -1,
+ "cores": -1,
+ "ram": -1,
+ "key_pairs": -1,
+ "security_groups": -1,
+ "security_group_rules": -1
+ }
+ }
+ },
+ "sla": { "failure_rate": { "max": 0 } }
+ }
+ ],
+ "NovaServers.boot_server_from_volume_and_delete": [
+ {
+ "args": {
+ "flavor": {
+ "name": "{{flavor_name}}"
+ },
+ "image": {
+ "name": "{{image_name}}"
+ },
+ "nics": [
+ {"net-id": "{{fixed_net}}"}
+ ],
+ "volume_size": {{volume_size}},
+ "force_delete": false,
+ "availability_zone": "{{availability_zone}}"
+ },
+ "runner": {
+ "type": "constant",
+ "concurrency": {{concurrency}},
+ "times": {{times}}
+ },
+ "context": {
+ "users": {
+ "tenants": {{users}},
+ "users_per_tenant": {{tenants}}
+ },
+ "api_versions@openstack": { "cinder": {"service_type": "volumev3"} },
+ "quotas": {
+ "nova": {
+ "instances": -1,
+ "cores": -1,
+ "ram": -1,
+ "key_pairs": -1,
+ "security_groups": -1,
+ "security_group_rules": -1
+ },
+ "cinder": { "volumes": -1 }
+ }
+ },
+ "sla": { "failure_rate": { "max": 0 } }
+ }
+ ],
+ "NeutronNetworks.create_and_delete_networks": [
+ {
+ "runner": {
+ "type": "constant",
+ "concurrency": {{concurrency}},
+ "times": {{times}}
+ },
+ "context": {
+ "users": {
+ "tenants": {{users}},
+ "users_per_tenant": {{tenants}}
+ },
+ "quotas": {
+ "neutron": { "network": -1 }
+ }
+ },
+ "sla": { "failure_rate": { "max": 0 } }
+ }
+ ],
+ "NeutronNetworks.create_and_delete_ports": [
+ {
+ "args": {
+ "network_create_args": {},
+ "port_create_args": {},
+ "ports_per_network": 10
+ },
+ "runner": {
+ "type": "constant",
+ "concurrency": {{concurrency}},
+ "times": {{times}}
+ },
+ "context": {
+ "users": {
+ "tenants": {{users}},
+ "users_per_tenant": {{tenants}}
+ },
+ "quotas": {
+ "neutron": {
+ "network": -1,
+ "port": -1
+ }
+ }
+ },
+ "sla": { "failure_rate": { "max": 0 } }
+ }
+ ],
+ "NeutronNetworks.create_and_delete_routers": [
+ {
+ "args": {
+ "network_create_args": {},
+ "subnet_create_args": {},
+ "subnet_cidr_start": "1.1.0.0/30",
+ "subnets_per_network": 2,
+ "router_create_args": {}
+ },
+ "runner": {
+ "type": "constant",
+ "concurrency": {{concurrency}},
+ "times": {{times}}
+ },
+ "context": {
+ "users": {
+ "tenants": {{users}},
+ "users_per_tenant": {{tenants}}
+ },
+ "quotas": {
+ "neutron": {
+ "network": -1,
+ "subnet": -1,
+ "router": -1
+ }
+ }
+ },
+ "sla": { "failure_rate": { "max": 0 } }
+ }
+ ],
+ "CinderVolumes.create_and_attach_volume": [
+ {
+ "args": {
+ "size": {{volume_size}},
+ "image": {
+ "name": "{{image_name}}"
+ },
+ "flavor": {
+ "name": "{{flavor_name}}"
+ },
+ "nics": [
+ {"net-id": "{{fixed_net}}"}
+ ],
+ "create_volume_params": {},
+ "availability_zone": "{{availability_zone}}"
+ },
+ "runner": {
+ "type": "constant",
+ "concurrency": {{concurrency}},
+ "times": {{times}}
+ },
+ "context": {
+ "users": {
+ "tenants": {{users}},
+ "users_per_tenant": {{tenants}}
+ },
+ "api_versions@openstack": { "cinder": {"service_type": "volumev3"} },
+ "quotas": {
+ "nova": {
+ "instances": -1,
+ "cores": -1,
+ "ram": -1,
+ "key_pairs": -1,
+ "security_groups": -1,
+ "security_group_rules": -1
+ },
+ "cinder": { "volumes": -1 }
+ }
+ },
+ "sla": { "failure_rate": { "max": 0 } }
+ }
+ ],
+ "CinderVolumes.create_and_delete_snapshot": [
+ {
+ "args": { "force": false },
+ "runner": {
+ "type": "constant",
+ "concurrency": {{concurrency}},
+ "times": {{times}}
+ },
+ "context": {
+ "users": {
+ "tenants": {{users}},
+ "users_per_tenant": {{tenants}}
+ },
+ "volumes": { "size": {{volume_size}} },
+ "quotas": {
+ "cinder": {
+ "snapshots": -1,
+ "volumes": -1
+ }
+ },
+ "api_versions@openstack": { "cinder": {"service_type": "volumev3"} }
+ },
+ "sla": { "failure_rate": { "max": 0 } }
+ }
+ ],
+ "CinderVolumes.create_and_upload_volume_to_image": [
+ {
+ "args": {
+ "size": {{volume_size}},
+ "force": false,
+ "container_format": "bare",
+ "disk_format": "raw",
+ "do_delete": true,
+ "image": {
+ "name": "{{image_name}}"
+ }
+ },
+ "runner": {
+ "type": "constant",
+ "concurrency": {{concurrency}},
+ "times": {{times}}
+ },
+ "context": {
+ "users": {
+ "tenants": {{users}},
+ "users_per_tenant": {{tenants}}
+ },
+ "quotas": {
+ "cinder": {
+ "snapshots": -1,
+ "volumes": -1
+ }
+ },
+ "api_versions@openstack": { "cinder": {"service_type": "volumev3"} }
+ },
+ "sla": { "failure_rate": { "max": 0 } }
+ }
+ ],
+ "GlanceImages.create_and_delete_image": [
+ {
+ "args": {
+ "image_location": "{{rbd_image}}",
+ "container_format": "bare",
+ "disk_format": "qcow2"
+ },
+ "runner": {
+ "type": "constant",
+ "concurrency": {{concurrency}},
+ "times": {{times}}
+ },
+ "context": {
+ "users": {
+ "tenants": {{users}},
+ "users_per_tenant": {{tenants}}
+ }
+ },
+ "sla": { "failure_rate": { "max": 0 } }
+ }
+ ],
+ "HeatStacks.create_and_delete_stack": [
+ {
+ "args": { "template_path": "{{heat_template}}" },
+ "runner": {
+ "type": "constant",
+ "concurrency": {{concurrency}},
+ "times": {{times}}
+ },
+ "context": {
+ "users": {
+ "tenants": {{users}},
+ "users_per_tenant": {{tenants}}
+ }
+ },
+ "sla": { "failure_rate": { "max": 0 } }
+ }
+ ]
+}
diff --git a/k8s/workspace/create-rally-deployments.sh b/k8s/workspace/create-rally-deployments.sh
index 942c81f..71a4041 100644
--- a/k8s/workspace/create-rally-deployments.sh
+++ b/k8s/workspace/create-rally-deployments.sh
@@ -7,6 +7,10 @@
exit 1
fi
+# Updating filder and file permissions
+kubectl exec -n qa-space --stdin rally -- sudo chown rally /artifacts
+kubectl exec -n qa-space --stdin rally -- sudo chown rally /rally/rally-files/*
+
###
if [ ! -z $(kubectl exec -n qa-space --stdin rally -- rally env list | grep openstack | cut -d' ' -f2) ]; then
echo "# Openstack env already created"
@@ -22,7 +26,6 @@
kubectl exec -n qa-space --stdin rally -- rally env list
else
echo "# Creating kubernetes env"
- kubectl exec -n qa-space --stdin rally -- sudo chown rally /artifacts
kubectl cp $MY_PROJFOLDER/envs/mos-kubeconfig.yaml qa-space/rally:/artifacts/mos-kubeconfig.yaml
kubectl exec -n qa-space --stdin rally -- bash -c "bash /rally/rally-files/init-rally-kube.sh"
fi
diff --git a/k8s/workspace/init-workspace.sh b/k8s/workspace/init-workspace.sh
index 6b889c2..0e546aa 100644
--- a/k8s/workspace/init-workspace.sh
+++ b/k8s/workspace/init-workspace.sh
@@ -11,17 +11,21 @@
export MY_PROJFOLDER=/artifacts
echo "# Using folder '$MY_PROJFOLDER'"
cd $MY_PROJFOLDER
-[ -f envs ] && mkdir envs
-[ -f yamls ] && mkdir yamls
-[ -f reports ] && mkdir reports
-[ -f tmp ] && mkdir tmp
+[ ! -d envs ] && mkdir envs
+[ ! -d yamls ] && mkdir yamls
+[ ! -d reports ] && mkdir reports
+[ ! -d tmp ] && mkdir tmp
# move mcc konfig to default place
if [ -f $MY_PROJFOLDER/mcc-kubeconfig.yaml ]; then
mv $MY_PROJFOLDER/mcc-kubeconfig.yaml $MY_PROJFOLDER/envs/mcc-kubeconfig.yaml
fi
+if [ -f $MY_PROJFOLDER/node.key ]; then
+ mv $MY_PROJFOLDER/node.key $MY_PROJFOLDER/envs/node.key
+fi
if [ ! -f $MY_PROJFOLDER/envs/mcc-kubeconfig.yaml ]; then
echo "ERROR: MCC kubeconfig not found either at '$MY_PROJFOLDER/mcc-kubeconfig.yaml' or '$MY_PROJFOLDER/envs/mcc-kubeconfig.yaml'"
+ exit 1
fi
echo " "
@@ -70,6 +74,12 @@
cp -v /opt/res-files/k8s/workspace/* $MY_PROJFOLDER/envs/
[ ! -d $MY_PROJFOLDER/scripts ] && mkdir $MY_PROJFOLDER/scripts
mv -v $MY_PROJFOLDER/envs/*.sh $MY_PROJFOLDER/scripts/
+
+cp -v /opt/res-files/k8s/yamls/qa-rally.yaml $MY_PROJFOLDER/yamls
+cp -v /opt/res-files/k8s/yamls/qa-res.yaml $MY_PROJFOLDER/yamls
+cp -v /opt/res-files/k8s/yamls/qa-toolset-bare.yaml $MY_PROJFOLDER/yamls
+cp -v /opt/res-files/k8s/yamls/qa-toolset.yaml $MY_PROJFOLDER/yamls
+
# remove duplicate init
rm -v $MY_PROJFOLDER/scripts/init-workspace.sh
# update IP Addresses
diff --git a/k8s/workspace/run-perf-k8s.sh b/k8s/workspace/run-k8s-perf.sh
similarity index 100%
rename from k8s/workspace/run-perf-k8s.sh
rename to k8s/workspace/run-k8s-perf.sh
diff --git a/k8s/workspace/run-openstack-func-full.sh b/k8s/workspace/run-openstack-func-full.sh
new file mode 100644
index 0000000..a11dbe0
--- /dev/null
+++ b/k8s/workspace/run-openstack-func-full.sh
@@ -0,0 +1,56 @@
+#!/bin/bash
+tenv=mos
+. /opt/si-tests/.sivenv/bin/activate
+cd $MY_PROJFOLDER/tmp
+. $MY_PROJFOLDER/env.sh
+. $MY_PROJFOLDER/envs/${tenv}rc
+# Just in case
+unset TARGET_CLUSTER
+unset TARGET_NAMESPACE
+
+#
+echo "# Creating schema"
+[ -f "/artifacts/tmp/artifacts/test_scheme.yaml" ] && rm -v $MY_PROJFOLDER/tmp/artifacts/test_scheme.yaml
+cat <<'EOF' >artifacts/test_scheme.yaml
+---
+smoke: false
+concurrency: 8
+blacklist-file: /etc/tempest/test-blacklist
+enabled: true
+fail_on_test: true
+type: tempest
+# regex: test
+EOF
+cat artifacts/test_scheme.yaml
+echo " "
+env | grep TEMPEST_
+echo " "
+#
+echo "# Checking auto-allocation"
+cmd="openstack network auto allocated topology create --check-resources"
+kubectl -n qa-space exec toolset --stdin -- $cmd
+if [ $? -ne 0 ]; then
+ cmd="openstack network set --default --external ${TEMPEST_CUSTOM_PUBLIC_NET}"
+ echo "# Trying to set network: '${cmd}'"
+ kubectl -n qa-space exec toolset --stdin -- $cmd
+ echo "# Checking again"
+ cmd="openstack network auto allocated topology create --check-resources"
+ kubectl -n qa-space exec toolset --stdin -- $cmd
+ [ $? -ne 0 ] && printf "\n\n# WARNING: Check functional tests pod for errors on test init\n\n"
+fi
+
+# run tests
+pytest -vv /opt/si-tests/si_tests/tests/lcm/test_run_tempest.py
+deactivate
+
+# report
+if [ -d $MY_PROJFOLDER/reports/${tenv}-func ]; then
+ echo "# Generating repors"
+ yes | rm $MY_PROJFOLDER/reports/${tenv}-func/*
+else
+ mkdir $MY_PROJFOLDER/reports/${tenv}-func
+fi
+cp ./artifacts/*.xml $MY_PROJFOLDER/reports/${tenv}-func/
+cd $MY_PROJFOLDER/reports/
+tparser -f r_xml -d -r $MY_CLIENTSHORTNAME-${tenv}-openstack-func-full-latest.html $MY_PROJFOLDER/reports/${tenv}-func/
+cd $MY_PROJFOLDER
diff --git a/k8s/workspace/run-openstack-func-smoke.sh b/k8s/workspace/run-openstack-func-smoke.sh
index 3d044e5..340a771 100644
--- a/k8s/workspace/run-openstack-func-smoke.sh
+++ b/k8s/workspace/run-openstack-func-smoke.sh
@@ -23,6 +23,8 @@
EOF
cat artifacts/test_scheme.yaml
echo " "
+env | grep TEMPEST_
+echo " "
#
echo "# Checking auto-allocation"
cmd="openstack network auto allocated topology create --check-resources"
@@ -38,7 +40,7 @@
fi
# run tests
-pytest -v /opt/si-tests/si_tests/tests/lcm/test_run_tempest.py
+pytest -vv /opt/si-tests/si_tests/tests/lcm/test_run_tempest.py
deactivate
# report
diff --git a/k8s/workspace/run-openstack-perf.sh b/k8s/workspace/run-openstack-perf.sh
new file mode 100644
index 0000000..c66b5a0
--- /dev/null
+++ b/k8s/workspace/run-openstack-perf.sh
@@ -0,0 +1,63 @@
+#!/bin/bash
+function kexec() {
+ kubectl exec -n qa-space --tty --stdin rally -- bash -c "${1}"
+}
+
+# consts
+concurrency=10
+run_times=200
+
+tenv=mos
+. /opt/si-tests/.sivenv/bin/activate
+cd $MY_PROJFOLDER/tmp
+. $MY_PROJFOLDER/env.sh
+. $MY_PROJFOLDER/envs/${tenv}rc
+# Just in case
+unset TARGET_CLUSTER
+unset TARGET_NAMESPACE
+dryrun=0
+#
+if [ ! -z ${1+x} ]; then
+ echo "# Using Dry-run mode"
+ dryrun=1
+fi
+
+##
+echo "### Checking rally environments"
+status=$(kubectl -n qa-space get pod | grep rally | tr -s " " | cut -d' ' -f3)
+if [ ${status} != "Running" ]; then
+ echo "# 'rally' container is not Running"
+ echo "# Consider creating resources and/or creating environments"
+ exit 1
+fi
+
+###
+uuid=$(kubectl exec -n qa-space --stdin rally -- rally env list | grep openstack | cut -d' ' -f2)
+if [ -z ${uuid} ]; then
+ echo "# Openstack env not found. Please, run 'create-rally-deployments.sh'"
+ kubectl exec -n qa-space --stdin rally -- rally env list
+else
+ echo "# Running Openstack performance tests"
+ if [ ${dryrun} == 1 ]; then
+ scenario=/rally/rally-files/openstack-mos-scn-i1.json
+ else
+ scenario=/rally/rally-files/openstack-mos-scn.json.clean
+ fi
+ task_scn=/artifacts/openstack-scenario.json
+ # prepare scenario
+ kexec "cp -v ${scenario} ${task_scn}"
+ declare $(kubectl exec toolset --stdin -n qa-space -- bash -c "cat /artifacts/cmp-check/cvp.manifest")
+ echo "# Updating network UUID to ${fixed_net_left_id}"
+ kexec "sed -i \"s/fixed-net-id/${fixed_net_left_id}/g\" ${task_scn}"
+ echo "# Updating concurrency to ${concurrency}"
+ kexec "sed -i \"s/concurrent-threads/${concurrency}/g\" ${task_scn}"
+ echo "# Updating running times to ${run_times}"
+ kexec "sed -i \"s/run-times-number/${run_times}/g\" ${task_scn}"
+ # run
+ kexec "rally env use ${uuid}; rally task start ${task_scn}"
+ # generate report
+ echo "# Generating report"
+ fname=$MY_CLIENTSHORTNAME-mos-openstack-perf-latest.html
+ kubectl exec -n qa-space --stdin rally -- rally task report $(kubectl exec -n qa-space --stdin rally -- rally task list | grep openstack | cut -d' ' -f2 | tail -1) --html-static --out ${fname}
+ kubectl cp qa-space/rally:/rally/${fname} $MY_PROJFOLDER/reports/${fname}
+fi
diff --git a/k8s/workspace/run-openstack-spt.sh b/k8s/workspace/run-openstack-spt.sh
new file mode 100644
index 0000000..156fd1d
--- /dev/null
+++ b/k8s/workspace/run-openstack-spt.sh
@@ -0,0 +1,22 @@
+#!/bin/bash
+tenv=mos
+. $MY_PROJFOLDER/envs/${tenv}rc
+##
+echo "### Checking openstack resources"
+status=$(kubectl -n qa-space get pod | grep toolset | tr -s " " | cut -d' ' -f3)
+if [ ${status} != "Running" ]; then
+ echo "# 'toolset' container is not Running"
+ exit 1
+fi
+admin_uuid=$(kubectl exec toolset --stdin -n qa-space -- bash -c "openstack user show cvp.admin -c id -f value")
+if [ ! -z ${TEMPEST_CUSTOM_PUBLIC_NET+x} ]; then
+ echo "# Copying global_config.yaml"
+ kubectl cp $MY_PROJFOLDER/yamls/global_config.yaml qa-space/toolset:/opt/mos-spt/global_config.yaml
+ echo " "
+ echo "# Running spt checks"
+ echo " "
+ kubectl exec toolset --stdin --tty -n qa-space -- bash -c "cd /opt/mos-spt; . .venv/bin/activate; pytest -rs -o log_cli=true --tb=short tests/test_vm2vm.py"
+else
+ echo "# Public network not set: TEMPEST_CUSTOM_PUBLIC_NET=${TEMPEST_CUSTOM_PUBLIC_NET}"
+ exit 1
+fi
diff --git a/k8s/workspace/update-openstack-resources.sh b/k8s/workspace/update-openstack-resources.sh
index 9c9b36c..f566676 100644
--- a/k8s/workspace/update-openstack-resources.sh
+++ b/k8s/workspace/update-openstack-resources.sh
@@ -20,7 +20,7 @@
echo "# Creating openstack resources"
echo " "
kubectl exec toolset --stdin -n qa-space -- bash -c "mkdir /artifacts/cmp-check"
- kubectl exec toolset --stdin -n qa-space -- bash -c "cd /artifacts/cmp-check; bash /opt/cmp-check/prepare.sh"
+ kubectl exec toolset --tty --stdin -n qa-space -- bash -c "cd /artifacts/cmp-check; bash /opt/cmp-check/prepare.sh -w \$(pwd)"
fi
#
@@ -41,6 +41,13 @@
sed -i "s/public_net_uuid/${netid}/g" $MY_PROJFOLDER/yamls/tempest_custom.yaml
echo "# s/public_net_name/ -> ${TEMPEST_CUSTOM_PUBLIC_NET}"
sed -i "s/public_net_name/${TEMPEST_CUSTOM_PUBLIC_NET}/g" $MY_PROJFOLDER/yamls/tempest_custom.yaml
+echo " "
+echo "# Updating SPT global_config.yaml"
+cp -v /opt/res-files/k8s/yamls/spt_global_config.yaml.clean $MY_PROJFOLDER/yamls/global_config.yaml
+echo "# s/public-network-name/ -> ${TEMPEST_CUSTOM_PUBLIC_NET}"
+sed -i "s/public-network-name/${TEMPEST_CUSTOM_PUBLIC_NET}/g" $MY_PROJFOLDER/yamls/global_config.yaml
+echo "# s/mtu-value/ -> default"
+sed -i "s/mtu-value/default/g" $MY_PROJFOLDER/yamls/global_config.yaml
#
echo "# Done!"
diff --git a/k8s/yamls/spt_global_config.yaml.clean b/k8s/yamls/spt_global_config.yaml.clean
new file mode 100644
index 0000000..751ef62
--- /dev/null
+++ b/k8s/yamls/spt_global_config.yaml.clean
@@ -0,0 +1,22 @@
+---
+# parameters for glance image test
+IMAGE_SIZE_MB: 9000
+
+# parameters for vm2vm test
+CMP_HOSTS: []
+image_name: "cvp.ubuntu.2004" # Use Ubuntu 20.04 LTS image
+flavor_name: 'spt-test'
+flavor_ram: 1536
+flavor_vcpus: 1
+flavor_disk: 5
+nova_timeout: 300
+external_network: 'public-network-name'
+custom_mtu: 'mtu-value' # 'default' or some value like 8950
+iperf_prep_string: "sudo /bin/bash -c 'echo \"91.189.88.161 archive.ubuntu.com\" >> /etc/hosts'"
+internet_at_vms: 'false' # whether Internet is present at OpenStack VMs and iperf can be installed with apt
+iperf_deb_package_dir_path: '/opt/packages/'
+iperf_time: 60 # time in seconds to transmit for (iperf -t option)
+multiple_threads_number: 10
+multiple_threads_iperf_utility: "iperf3" # set "iperf" for v2, "iperf3" for v3
+ssh_timeout: 500
+skipped_nodes: []