Different updates, fresh repositories, see list

* new archives of mos-spt, cfg-checker, si-tests
* run tempest full with concurrency 4
* updated mos-spt configuration
* fixed manual_create_signed_images.sh script
* improved prepare.sh script

Related-PROD: PROD-37187
Change-Id: I41a34026f7cd593145173ff330e19c59bb8325d3
diff --git a/fio/clouds.yaml b/fio/clouds.yaml
index d373e5a..085e0df 100644
--- a/fio/clouds.yaml
+++ b/fio/clouds.yaml
@@ -43,5 +43,5 @@
       fio_vol_mountpoint: "/dev/vdc"
       mtu_size: 8000
       hv_suffix: "kaas-kubernetes-XXX"
-      cloud_name: "cloud-lon-XXX"
-      concurrency: 5
+      cloud_name: "cloud-XXX"
+      concurrency: 1
diff --git a/k8s/src/mos-checker.tgz b/k8s/src/mos-checker.tgz
index ed4cd36..fa7f30c 100644
--- a/k8s/src/mos-checker.tgz
+++ b/k8s/src/mos-checker.tgz
Binary files differ
diff --git a/k8s/src/mos-spt.tgz b/k8s/src/mos-spt.tgz
index e81fe06..2ec054b 100644
--- a/k8s/src/mos-spt.tgz
+++ b/k8s/src/mos-spt.tgz
Binary files differ
diff --git a/k8s/src/si-tests.tgz b/k8s/src/si-tests.tgz
index b82be0c..60838c3 100644
--- a/k8s/src/si-tests.tgz
+++ b/k8s/src/si-tests.tgz
Binary files differ
diff --git a/k8s/workspace/run-openstack-func-full.sh b/k8s/workspace/run-openstack-func-full.sh
index 52bec02..a567ab2 100644
--- a/k8s/workspace/run-openstack-func-full.sh
+++ b/k8s/workspace/run-openstack-func-full.sh
@@ -27,7 +27,7 @@
 cat <<'EOF' >artifacts/test_scheme.yaml
 ---
 smoke: false
-concurrency: 8
+concurrency: 4
 blacklist-file: /etc/tempest/test-blacklist
 enabled: true
 fail_on_test: true
diff --git a/k8s/workspace/update-openstack-resources.sh b/k8s/workspace/update-openstack-resources.sh
index e467f91..dbbfd14 100644
--- a/k8s/workspace/update-openstack-resources.sh
+++ b/k8s/workspace/update-openstack-resources.sh
@@ -93,5 +93,12 @@
 sed -i "s/image_ref_name/${ubuntu20_name}/g" $MY_PROJFOLDER/yamls/global_config.yaml
 echo "# s/public-network-name/ -> ${TEMPEST_CUSTOM_PUBLIC_NET}"
 sed -i "s/public-network-name/${TEMPEST_CUSTOM_PUBLIC_NET}/g" $MY_PROJFOLDER/yamls/global_config.yaml
-# 
+#
+
+echo "# Copying files"
+kubectl exec toolset --tty --stdin -n qa-space -- bash -c "cp /artifacts/cmp-check/cvp.manifest /opt/cmp-check/cvp.manifest"
+kubectl exec toolset --tty --stdin -n qa-space -- bash -c "cp /artifacts/cmp-check/cvprc /opt/cmp-check/cvprc"
+kubectl exec toolset --tty --stdin -n qa-space -- bash -c "cp /artifacts/cmp-check/cvprc /opt/cvprc"
+kubectl exec toolset --tty --stdin -n qa-space -- bash -c "cp /artifacts/cmp-check/cvp_testkey /opt/cmp-check/cvp_testkey"
+
 echo "# Done!"
diff --git a/k8s/yamls/spt_global_config.yaml.clean b/k8s/yamls/spt_global_config.yaml.clean
index c1945ba..5fe3f2d 100644
--- a/k8s/yamls/spt_global_config.yaml.clean
+++ b/k8s/yamls/spt_global_config.yaml.clean
@@ -4,11 +4,12 @@
 
 # parameters for vm2vm tests
 CMP_HOSTS: []
-image_name: "image_ref_name" # Use Ubuntu 20.04 LTS image
+image_name: "image_ref_name" # Use Ubuntu 20.04 LTS image with hw_vif_multiqueue_enabled='True' property
+add_hw_vif_multiqueue_enabled_to_image: "true"
 flavor_name: "spt-test"
-flavor_ram: 1536
-flavor_vcpus: 1
-flavor_disk: 5
+flavor_ram: 4096
+flavor_vcpus: 10
+flavor_disk: 20
 nova_timeout: 300
 external_network: "public-network-name"
 custom_mtu: 'default' # 'default' or some value like 8950
diff --git a/scripts/manual_create_signed_images.sh b/scripts/manual_create_signed_images.sh
index ddd3f6d..bd378c5 100644
--- a/scripts/manual_create_signed_images.sh
+++ b/scripts/manual_create_signed_images.sh
@@ -1,6 +1,28 @@
 #!/bin/bash
 
 echo "Preparing certs"
+cat <<EOF > image_crt.cnf
+[ req ]
+default_bits       = 1024
+prompt             = no
+default_md         = sha256
+req_extensions     = req_ext
+distinguished_name = dn
+
+[ dn ]
+C = US
+ST = TestState
+L = TestCity
+O = TestOrg
+OU = TestUnit
+CN = test.example.com
+
+[ req_ext ]
+subjectAltName = @alt_names
+
+[ alt_names ]
+DNS.1 = test.example.com
+EOF
 openssl genrsa -out image_key.pem 1024
 openssl rsa -pubout -in image_key.pem -out image_key.pem.pub
 openssl req -new -key image_key.pem -out image_req.crt -config image_crt.cnf
@@ -14,10 +36,10 @@
 echo "Exported '$s_uuid'"

 echo "Converting images to Raw"
-qemu-img convert -f qcow2 -O raw -p cvp.ubuntu.2004 /var/tmp/cvp.ubuntu.2004.raw
-qemu-img convert -f qcow2 -O raw -p cvp.ubuntu.2204 /var/tmp/cvp.ubuntu.2204.raw
-qemu-img convert -f qcow2 -O raw -p cvp.cirros.61 /var/tmp/cvp.cirros.61.raw
-qemu-img convert -f qcow2 -O raw -p cvp.cirros.62 /var/tmp/cvp.cirros.62.raw
+qemu-img convert -f qcow2 -O raw -p /artifacts/cmp-check/cvp.ubuntu.2004 /var/tmp/cvp.ubuntu.2004.raw
+qemu-img convert -f qcow2 -O raw -p /artifacts/cmp-check/cvp.ubuntu.2204 /var/tmp/cvp.ubuntu.2204.raw
+qemu-img convert -f qcow2 -O raw -p /artifacts/cmp-check/cvp.cirros.61 /var/tmp/cvp.cirros.61.raw
+qemu-img convert -f qcow2 -O raw -p /artifacts/cmp-check/cvp.cirros.62 /var/tmp/cvp.cirros.62.raw

 echo "Signing images"
 openssl dgst -sha256 -sign image_key.pem -sigopt rsa_padding_mode:pss -out cvp.cirros.61.raw.signature /var/tmp/cvp.cirros.61.raw
diff --git a/scripts/prepare.sh b/scripts/prepare.sh
index 0170630..9b34291 100644
--- a/scripts/prepare.sh
+++ b/scripts/prepare.sh
@@ -276,15 +276,15 @@
 function _flavors() {
     # huge paged flavors
     if [ "$huge_pages" = true ]; then
-        echo flavor create --id 1 --ram 256 --disk 5 --vcpus 1 ${flavor_t} --property hw:mem_page_size=large >>${cmds}
-        echo flavor create --id 2 --ram 512 --disk 10 --vcpus 2 ${flavor_s} --property hw:mem_page_size=large >>${cmds}
-        echo flavor create --id 3 --ram 2048 --disk 20 --vcpus 4 ${flavor_m} --property hw:mem_page_size=large >>${cmds}
-        echo flavor create --id 4 --ram 4096 --disk 30 --vcpus 6 ${flavor_h} --property hw:mem_page_size=large >>${cmds}
+        echo flavor create --ram 256 --disk 5 --vcpus 1 ${flavor_t} --property hw:mem_page_size=large >>${cmds}
+        echo flavor create --ram 512 --disk 10 --vcpus 2 ${flavor_s} --property hw:mem_page_size=large >>${cmds}
+        echo flavor create --ram 2048 --disk 20 --vcpus 4 ${flavor_m} --property hw:mem_page_size=large >>${cmds}
+        echo flavor create --ram 4096 --disk 30 --vcpus 6 ${flavor_h} --property hw:mem_page_size=large >>${cmds}
     else
-        echo flavor create --id 1 --ram 256 --disk 5 --vcpus 1 ${flavor_t} >>${cmds}
-        echo flavor create --id 2 --ram 512 --disk 10 --vcpus 2 ${flavor_s} >>${cmds}
-        echo flavor create --id 3 --ram 2048 --disk 20 --vcpus 4 ${flavor_m} >>${cmds}
-        echo flavor create --id 4 --ram 4096 --disk 30 --vcpus 6 ${flavor_h} >>${cmds}
+        echo flavor create --ram 256 --disk 5 --vcpus 1 ${flavor_t} >>${cmds}
+        echo flavor create --ram 512 --disk 10 --vcpus 2 ${flavor_s} >>${cmds}
+        echo flavor create --ram 2048 --disk 20 --vcpus 4 ${flavor_m} >>${cmds}
+        echo flavor create --ram 4096 --disk 30 --vcpus 6 ${flavor_h} >>${cmds}
     fi
 }
 
diff --git a/tempest/test_extension.py b/tempest/test_extension.py
index 205543c..6658201 100644
--- a/tempest/test_extension.py
+++ b/tempest/test_extension.py
@@ -1,137 +1,137 @@
-
-
-    import testtools
-    @test.attr(type='full')
-    @test.services('compute', 'network')
-    @testtools.skipUnless(CONF.compute_feature_enabled.live_migration,
-                          'Live migration must be enabled in tempest.conf')
-    def test_live_migrate_to_all_nodes(self):
-        # collect all active hosts in all az
-        if not CONF.compute_feature_enabled.live_migration:
-            raise cls.skipException(
-                "Live migration is disabled")
-        available_zone = \
-            self.os_adm.availability_zone_client.list_availability_zones(
-                detail=True)['availabilityZoneInfo']
-        hosts = []
-        for zone in available_zone:
-            if zone['zoneState']['available']:
-                for host in zone['hosts']:
-                    if 'nova-compute' in zone['hosts'][host] and \
-                        zone['hosts'][host]['nova-compute']['available']:
-                        hosts.append({'zone': zone['zoneName'],
-                                      'host_name': host})
-
-        # ensure we have at least as many compute hosts as we expect
-        if len(hosts) < CONF.compute.min_compute_nodes:
-            raise exceptions.InvalidConfiguration(
-                "Host list %s is shorter than min_compute_nodes. "
-                "Did a compute worker not boot correctly?" % hosts)
-
-        # Create 1 VM
-        servers = []
-        first_last_host = hosts[0]
-        inst = self.create_server(
-            availability_zone='%(zone)s:%(host_name)s' % hosts[0],
-            wait_until='ACTIVE')
-        server = self.servers_client.show_server(inst['id'])['server']
-        # ensure server is located on the requested host
-        self.assertEqual(hosts[0]['host_name'], server['OS-EXT-SRV-ATTR:host'])
-        hosts.remove(first_last_host)
-        hosts.append(first_last_host)
-
-        # Live migrate to every host
-        for host in hosts[:CONF.compute.min_compute_nodes]:
-            self.servers_client.live_migrate_server(server_id=inst["id"],host=host['host_name'],block_migration=CONF.compute_feature_enabled.block_migration_for_live_migration,disk_over_commit=False)
-            waiters.wait_for_server_status(self.servers_client, inst["id"], 'ACTIVE')
-            server = self.servers_client.show_server(inst['id'])['server']
-            # ensure server is located on the requested host
-            self.assertEqual(host['host_name'], server['OS-EXT-SRV-ATTR:host'])
-
-
-from tempest.lib.common.utils import test_utils
-class TestServerSshAllComputes(manager.NetworkScenarioTest):
-    credentials = ['primary', 'admin']
-
-
-    @classmethod
-    def setup_clients(cls):
-        super(TestServerSshAllComputes, cls).setup_clients()
-        # Use admin client by default
-        cls.manager = cls.admin_manager
-        # this is needed so that we can use the availability_zone:host
-        # scheduler hint, which is admin_only by default
-        cls.servers_client = cls.admin_manager.servers_client
-
-    @test.attr(type='full')
-    @test.services('compute', 'network')
-    def test_ssh_to_all_nodes(self):
-        available_zone = \
-            self.os_adm.availability_zone_client.list_availability_zones(
-                detail=True)['availabilityZoneInfo']
-        hosts = []
-        for zone in available_zone:
-            if zone['zoneState']['available']:
-                for host in zone['hosts']:
-                    if 'nova-compute' in zone['hosts'][host] and \
-                        zone['hosts'][host]['nova-compute']['available']:
-                        hosts.append({'zone': zone['zoneName'],
-                                      'host_name': host})
-
-        # ensure we have at least as many compute hosts as we expect
-        if len(hosts) < CONF.compute.min_compute_nodes:
-            raise exceptions.InvalidConfiguration(
-                "Host list %s is shorter than min_compute_nodes. "
-                "Did a compute worker not boot correctly?" % hosts)
-
-        servers = []
-
-        # prepare key pair and sec group
-        keypair = self.os_adm.keypairs_client.create_keypair(name="tempest-live")
-        secgroup = self._create_security_group(security_groups_client=self.os_adm.security_groups_client, security_group_rules_client=self.os_adm.security_group_rules_client, tenant_id=self.os_adm.security_groups_client.tenant_id)
-
-        # create 1 compute for each node, up to the min_compute_nodes
-        # threshold (so that things don't get crazy if you have 1000
-        # compute nodes but set min to 3).
-
-        for host in hosts[:CONF.compute.min_compute_nodes]:
-            inst = self.create_server(
-                availability_zone='%(zone)s:%(host_name)s' % host,
-                key_name=keypair['keypair']['name'])
-            server = self.os_adm.servers_client.show_server(inst['id'])['server']
-            # TODO we may create server with sec group instead of adding it
-            self.os_adm.servers_client.add_security_group(server['id'],
-                                                 name=secgroup['name'])
-            
-            # ensure server is located on the requested host
-            self.assertEqual(host['host_name'], server['OS-EXT-SRV-ATTR:host'])
-            # TODO maybe check validate = True?
-            if CONF.network.public_network_id:
-                # Check VM via ssh
-                floating_ip = self.os_adm.compute_floating_ips_client.create_floating_ip(pool=CONF.network.floating_network_name)['floating_ip']
-                self.addCleanup(test_utils.call_and_ignore_notfound_exc,
-                        self.os_adm.compute_floating_ips_client.delete_floating_ip,
-                        floating_ip['id'])
-                self.os_adm.compute_floating_ips_client.associate_floating_ip_to_server(floating_ip['ip'], inst['id'])
-
-                #   TODO maybe add this
-                #    "Failed to find floating IP '%s' in server addresses: %s" %
-                #   (floating_ip['ip'], server['addresses']))
-
-                # check that we can SSH to the server
-                self.linux_client = self.get_remote_client(
-                    floating_ip['ip'], private_key=keypair['keypair']['private_key'])
-
-            servers.append(server)
-
-        # make sure we really have the number of servers we think we should
-        self.assertEqual(
-            len(servers), CONF.compute.min_compute_nodes,
-            "Incorrect number of servers built %s" % servers)
-
-        # ensure that every server ended up on a different host
-        host_ids = [x['hostId'] for x in servers]
-        self.assertEqual(
-            len(set(host_ids)), len(servers),
-            "Incorrect number of distinct host_ids scheduled to %s" % servers)
-        self.os_adm.keypairs_client.delete_keypair(keypair['keypair']['name'])
+# import testtools
+# @test.attr(type='full')
+# @test.services('compute', 'network')
+# @testtools.skipUnless(CONF.compute_feature_enabled.live_migration,
+#                       'Live migration must be enabled in tempest.conf')
+#
+# def test_live_migrate_to_all_nodes(self):
+#     # collect all active hosts in all az
+#     if not CONF.compute_feature_enabled.live_migration:
+#         raise cls.skipException(
+#             "Live migration is disabled")
+#     available_zone = \
+#         self.os_adm.availability_zone_client.list_availability_zones(
+#             detail=True)['availabilityZoneInfo']
+#     hosts = []
+#     for zone in available_zone:
+#         if zone['zoneState']['available']:
+#             for host in zone['hosts']:
+#                 if 'nova-compute' in zone['hosts'][host] and \
+#                     zone['hosts'][host]['nova-compute']['available']:
+#                     hosts.append({'zone': zone['zoneName'],
+#                                   'host_name': host})
+#
+#     # ensure we have at least as many compute hosts as we expect
+#     if len(hosts) < CONF.compute.min_compute_nodes:
+#         raise exceptions.InvalidConfiguration(
+#             "Host list %s is shorter than min_compute_nodes. "
+#             "Did a compute worker not boot correctly?" % hosts)
+#
+#     # Create 1 VM
+#     servers = []
+#     first_last_host = hosts[0]
+#     inst = self.create_server(
+#         availability_zone='%(zone)s:%(host_name)s' % hosts[0],
+#         wait_until='ACTIVE')
+#     server = self.servers_client.show_server(inst['id'])['server']
+#     # ensure server is located on the requested host
+#     self.assertEqual(hosts[0]['host_name'], server['OS-EXT-SRV-ATTR:host'])
+#     hosts.remove(first_last_host)
+#     hosts.append(first_last_host)
+#
+#     # Live migrate to every host
+#     for host in hosts[:CONF.compute.min_compute_nodes]:
+#         self.servers_client.live_migrate_server(server_id=inst["id"],host=host['host_name'],block_migration=CONF.compute_feature_enabled.block_migration_for_live_migration,disk_over_commit=False)
+#         waiters.wait_for_server_status(self.servers_client, inst["id"], 'ACTIVE')
+#         server = self.servers_client.show_server(inst['id'])['server']
+#         # ensure server is located on the requested host
+#         self.assertEqual(host['host_name'], server['OS-EXT-SRV-ATTR:host'])
+#
+#
+# from tempest.lib.common.utils import test_utils
+# class TestServerSshAllComputes(manager.NetworkScenarioTest):
+# credentials = ['primary', 'admin']
+#
+#
+# @classmethod
+# def setup_clients(cls):
+#     super(TestServerSshAllComputes, cls).setup_clients()
+#     # Use admin client by default
+#     cls.manager = cls.admin_manager
+#     # this is needed so that we can use the availability_zone:host
+#     # scheduler hint, which is admin_only by default
+#     cls.servers_client = cls.admin_manager.servers_client
+#
+# @test.attr(type='full')
+# @test.services('compute', 'network')
+# def test_ssh_to_all_nodes(self):
+#     available_zone = \
+#         self.os_adm.availability_zone_client.list_availability_zones(
+#             detail=True)['availabilityZoneInfo']
+#     hosts = []
+#     for zone in available_zone:
+#         if zone['zoneState']['available']:
+#             for host in zone['hosts']:
+#                 if 'nova-compute' in zone['hosts'][host] and \
+#                     zone['hosts'][host]['nova-compute']['available']:
+#                     hosts.append({'zone': zone['zoneName'],
+#                                   'host_name': host})
+#
+#     # ensure we have at least as many compute hosts as we expect
+#     if len(hosts) < CONF.compute.min_compute_nodes:
+#         raise exceptions.InvalidConfiguration(
+#             "Host list %s is shorter than min_compute_nodes. "
+#             "Did a compute worker not boot correctly?" % hosts)
+#
+#     servers = []
+#
+#     # prepare key pair and sec group
+#     keypair = self.os_adm.keypairs_client.create_keypair(name="tempest-live")
+#     secgroup = self._create_security_group(security_groups_client=self.os_adm.security_groups_client, security_group_rules_client=self.os_adm.security_group_rules_client, tenant_id=self.os_adm.security_groups_client.tenant_id)
+#
+#     # create 1 compute for each node, up to the min_compute_nodes
+#     # threshold (so that things don't get crazy if you have 1000
+#     # compute nodes but set min to 3).
+#
+#     for host in hosts[:CONF.compute.min_compute_nodes]:
+#         inst = self.create_server(
+#             availability_zone='%(zone)s:%(host_name)s' % host,
+#             key_name=keypair['keypair']['name'])
+#         server = self.os_adm.servers_client.show_server(inst['id'])['server']
+#         # TODO we may create server with sec group instead of adding it
+#         self.os_adm.servers_client.add_security_group(server['id'],
+#                                              name=secgroup['name'])
+#
+#         # ensure server is located on the requested host
+#         self.assertEqual(host['host_name'], server['OS-EXT-SRV-ATTR:host'])
+#         # TODO maybe check validate = True?
+#         if CONF.network.public_network_id:
+#             # Check VM via ssh
+#             floating_ip = self.os_adm.compute_floating_ips_client.create_floating_ip(pool=CONF.network.floating_network_name)['floating_ip']
+#             self.addCleanup(test_utils.call_and_ignore_notfound_exc,
+#                     self.os_adm.compute_floating_ips_client.delete_floating_ip,
+#                     floating_ip['id'])
+#             self.os_adm.compute_floating_ips_client.associate_floating_ip_to_server(floating_ip['ip'], inst['id'])
+#
+#             #   TODO maybe add this
+#             #    "Failed to find floating IP '%s' in server addresses: %s" %
+#             #   (floating_ip['ip'], server['addresses']))
+#
+#             # check that we can SSH to the server
+#             self.linux_client = self.get_remote_client(
+#                 floating_ip['ip'], private_key=keypair['keypair']['private_key'])
+#
+#         servers.append(server)
+#
+#     # make sure we really have the number of servers we think we should
+#     self.assertEqual(
+#         len(servers), CONF.compute.min_compute_nodes,
+#         "Incorrect number of servers built %s" % servers)
+#
+#     # ensure that every server ended up on a different host
+#     host_ids = [x['hostId'] for x in servers]
+#     self.assertEqual(
+#         len(set(host_ids)), len(servers),
+#         "Incorrect number of distinct host_ids scheduled to %s" % servers)
+#     self.os_adm.keypairs_client.delete_keypair(keypair['keypair']['name'])
+#