Added VM2VM test between different routers in 2 projects

Added the test:
 VM to VM test in different projects, different networks,
 different routers, measure by Floating IPs (common floating net):

 * a separate project is created, admin user is added to it
 * VMs are in separate projects (admin and newly created),
   separate networks, each project has its own router.
   VMs have access to each other only by the common floating net.
 * VMs are created on the same and different compute nodes.
 * Verification is done via Floating IPs in 1 and multiple threads.

Other enhancements:
* renamed "tenant" variables and values to "project"
* more detailed logging (e.g. in which project resource is created)
* some refactoring
* fixed picking existing spt flavor if any
* check the project is empty before deleting it
* added stderr, stdout in case Internet is absent at VM and iperf
  was not installed

Related-PROD: PROD-36943
Change-Id: I165ed41259336e586ad16ed9eb27ea59619db4c8
diff --git a/fixtures/base.py b/fixtures/base.py
index 721d772..d1434dd 100644
--- a/fixtures/base.py
+++ b/fixtures/base.py
@@ -16,11 +16,11 @@
     return os_client.OfficialClientManager(
         username=os.environ['OS_USERNAME'],
         password=os.environ['OS_PASSWORD'],
-        tenant_name=os.environ['OS_PROJECT_NAME'],
+        project_name=os.environ['OS_PROJECT_NAME'],
         auth_url=os.environ['OS_AUTH_URL'],
         cert=False,
         domain=os.environ['OS_PROJECT_DOMAIN_NAME'],
-        )
+    )
 
 
 nodes = utils.get_pairs()
@@ -34,6 +34,7 @@
 
 @pytest.fixture(scope='session')
 def os_resources(openstack_clients):
+    logger.info("Setting up resources in admin project...")
     os_actions = os_client.OSCliActions(openstack_clients)
     os_resource = {}
     config = utils.get_configuration()
@@ -50,19 +51,16 @@
     if os_images_list.__len__() == 0:
         pytest.skip("No images with name {}. This name can be redefined "
                     "with 'image_name' env var ".format(image_name))
-
     os_resource['image_id'] = str(os_images_list[0])
-
-    os_resource['flavor_id'] = [flavor.id for flavor in
-                                openstack_clients.compute.flavors.list()
-                                if flavor.name == flavor_name]
-    flavor_is_created = False
-    if not os_resource['flavor_id']:
+    flavor_id = os_actions.get_flavor_id_by_name(flavor_name)
+    if flavor_id is None:
+        flavor_is_created = True
         os_resource['flavor_id'] = os_actions.create_flavor(
             flavor_name, flavor_ram, flavor_vcpus, flavor_disk).id
-        flavor_is_created = True
     else:
-        os_resource['flavor_id'] = str(os_resource['flavor_id'][0])
+        flavor_is_created = False
+        logger.info("Flavor {} already exists".format(flavor_name))
+        os_resource['flavor_id'] = flavor_id
 
     os_resource['sec_group'] = os_actions.create_sec_group()
     os_resource['keypair'] = openstack_clients.compute.keypairs.create(
@@ -70,12 +68,12 @@
     )
     os_resource['net1'] = os_actions.create_network_resources()
     os_resource['ext_net'] = os_actions.get_external_network()
-    adm_tenant = os_actions.get_admin_tenant()
+    adm_project = os_actions.get_project_by_name("admin")
     os_resource['router'] = os_actions.create_router(
-        os_resource['ext_net'], adm_tenant.id)
-    os_resource['net2'] = os_actions.create_network(adm_tenant.id)
+        os_resource['ext_net'], adm_project.id)
+    os_resource['net2'] = os_actions.create_network(adm_project.id)
     os_resource['subnet2'] = os_actions.create_subnet(
-        os_resource['net2'], adm_tenant.id, '10.2.7.0/24')
+        os_resource['net2'], adm_project.id, '10.2.7.0/24')
     for subnet in openstack_clients.network.list_subnets()['subnets']:
         if subnet['network_id'] == os_resource['net1']['id']:
             os_resource['subnet1'] = subnet['id']
@@ -88,7 +86,8 @@
     yield os_resource
 
     # cleanup created resources
-    logger.info("Deleting routers, networks, SG, key pair, flavor...")
+    logger.info("Deleting routers, networks, SG, key pair, flavor in {}..."
+                "".format(adm_project.name))
     openstack_clients.network.remove_interface_router(
         os_resource['router']['id'], {'subnet_id': os_resource['subnet1']})
     openstack_clients.network.remove_interface_router(
@@ -109,3 +108,104 @@
         openstack_clients.compute.flavors.delete(os_resource['flavor_id'])
     if os_actions.create_fake_ext_net:
         openstack_clients.network.delete_network(os_resource['ext_net']['id'])
+
+
+@pytest.fixture(scope='session')
+def openstack_alt_clients(openstack_clients):
+    # create alt project with regular admin user
+    tmp_os_actions = os_client.OSCliActions(openstack_clients)
+    alt_project = tmp_os_actions.create_project()
+    tmp_os_actions.add_roles_to_user_in_project(alt_project.id)
+
+    # create a client using alt project with admin user in it
+    return os_client.OfficialClientManager(
+        username=os.environ['OS_USERNAME'],
+        password=os.environ['OS_PASSWORD'],
+        project_name=alt_project.name,
+        auth_url=os.environ['OS_AUTH_URL'],
+        cert=False,
+        domain=os.environ['OS_PROJECT_DOMAIN_NAME'],
+    )
+
+
+@pytest.fixture(scope='session')
+def os_resources_alt_project(openstack_alt_clients):
+    logger.info("Setting up resources in the project {}..."
+                "".format(openstack_alt_clients.project_name))
+    alt_os_actions = os_client.OSCliActions(openstack_alt_clients)
+    os_resource_alt_project = {}
+    config = utils.get_configuration()
+    image_name = config.get('image_name', 'Ubuntu-18.04')
+    flavor_name = config.get('flavor_name', 'spt-test')
+    flavor_ram = config.get('flavor_ram', 1536)
+    flavor_vcpus = config.get('flavor_vcpus', 1)
+    flavor_disk = config.get('flavor_disk', 3)
+    os_images_list = [image.id for image in
+                      openstack_alt_clients.image.images.list(
+                          filters={'name': image_name})]
+
+    if os_images_list.__len__() == 0:
+        pytest.skip("No images with name {}. This name can be redefined "
+                    "with 'image_name' env var ".format(image_name))
+    os_resource_alt_project['image_id'] = str(os_images_list[0])
+
+    flavor_id = alt_os_actions.get_flavor_id_by_name(flavor_name)
+    if flavor_id is None:
+        flavor_is_created = True
+        os_resource_alt_project['flavor_id'] = alt_os_actions.create_flavor(
+            flavor_name, flavor_ram, flavor_vcpus, flavor_disk).id
+    else:
+        flavor_is_created = False
+        logger.info("Flavor {} already exists".format(flavor_name))
+        os_resource_alt_project['flavor_id'] = flavor_id
+
+    os_resource_alt_project['sec_group'] = alt_os_actions.create_sec_group()
+
+    alt_project = alt_os_actions.get_project_by_name(
+        openstack_alt_clients.project_name)
+    os_resource_alt_project['net1'] = alt_os_actions.create_network_resources(
+        project=alt_project.name, cidr='10.3.7.0/24')
+    os_resource_alt_project['ext_net'] = alt_os_actions.get_external_network()
+
+    os_resource_alt_project['router'] = alt_os_actions.create_router(
+        os_resource_alt_project['ext_net'], alt_project.id)
+
+    os_resource_alt_project['subnet1'] = \
+        openstack_alt_clients.network.show_network(
+            os_resource_alt_project['net1']['id'])['network']['subnets'][0]
+    openstack_alt_clients.network.add_interface_router(
+        os_resource_alt_project['router']['id'],
+        {'subnet_id': os_resource_alt_project['subnet1']})
+    yield os_resource_alt_project
+
+    # cleanup created resources
+    logger.info("Deleting routers, networks, SG, flavor in {} "
+                "project...".format(alt_project.name))
+
+    openstack_alt_clients.network.remove_interface_router(
+        os_resource_alt_project['router']['id'],
+        {'subnet_id': os_resource_alt_project['subnet1']})
+    openstack_alt_clients.network.remove_gateway_router(
+        os_resource_alt_project['router']['id'])
+    time.sleep(5)
+    openstack_alt_clients.network.delete_router(
+        os_resource_alt_project['router']['id'])
+    time.sleep(5)
+    openstack_alt_clients.network.delete_network(
+        os_resource_alt_project['net1']['id'])
+
+    openstack_alt_clients.network.delete_security_group(
+        os_resource_alt_project['sec_group']['id'])
+    if flavor_is_created:
+        openstack_alt_clients.compute.flavors.delete(
+            os_resource_alt_project['flavor_id'])
+    if alt_os_actions.create_fake_ext_net:
+        openstack_alt_clients.network.delete_network(
+            os_resource_alt_project['ext_net']['id'])
+
+    if alt_os_actions.is_project_empty(alt_project.id):
+        openstack_alt_clients.auth.projects.delete(alt_project.id)
+        logger.info("Deleted project {}".format(alt_project.name))
+    else:
+        logger.info("Project {} is not empty, skip deleting".format(
+            alt_project.name))
diff --git a/tests/test_vm2vm.py b/tests/test_vm2vm.py
index 0a89f1e..44b584e 100644
--- a/tests/test_vm2vm.py
+++ b/tests/test_vm2vm.py
@@ -118,39 +118,40 @@
             'Test Case', 'Host 1', 'Host 2', 'MTU at VMs', 'Result'
         ]]
         # Do iperf3 measurement #1
-        logger.info("Doing 'VM to VM in same tenant on same node via Private "
-                    "IP, 1 thread' measurement...")
+        measurement1 = ("VM to VM in same project on same node via Private "
+                        "IP, 1 thread; iperf3")
+        logger.info("Doing '{}' measurement...".format(measurement1))
         result1 = transport1.exec_command(
             'iperf3 -c {} -t {} | grep sender | tail -n 1'.format(
                 vm_info[1]['private_address'], iperf_time))
         res1 = (b" ".join(result1.split()[-4:-2:])).decode('utf-8')
         logger.info("Result #1 is {}".format(res1))
-        table_rows.append(['VM to VM in same tenant on same node via '
-                           'Private IP, 1 thread; iperf3',
+        table_rows.append([measurement1,
                            "{}".format(pair[0]),
                            "{}".format(pair[0]),
                            "{}, {}".format(mtus[0], mtus[1]),
                            "{}".format(res1)])
 
         # Do iperf3 measurement #2
-        logger.info("Doing 'VM to VM in same tenant on different HW nodes "
-                    "via Private IP, 1 thread' measurement...")
+        measurement2 = ("VM to VM in same project on different HW nodes via "
+                        "Private IP, 1 thread; iperf3")
+        logger.info("Doing '{}' measurement...".format(measurement2))
         result2 = transport1.exec_command(
             'iperf3 -c {} -t {} | grep sender | tail -n 1'.format(
                 vm_info[2]['private_address'], iperf_time))
         res2 = (b" ".join(result2.split()[-4:-2:])).decode('utf-8')
         logger.info("Result #2 is {}".format(res2))
-        table_rows.append(['VM to VM in same tenant on different HW nodes '
-                           'via Private IP, 1 thread; iperf3',
+        table_rows.append([measurement2,
                            "{}".format(pair[0]),
                            "{}".format(pair[1]),
                            "{}, {}".format(mtus[0], mtus[2]),
                            "{}".format(res2)])
 
-        # Do iperf3 measurement #3
-        logger.info("Doing 'VM to VM in same tenant on different HW nodes "
-                    "via Private IP, {} threads' measurement..."
-                    "".format(threads))
+        # Do iperf/iperf3 measurement #3
+        measurement3 = ("VM to VM in same project on different HW nodes via "
+                        "Private IP, {} threads; {}"
+                        "".format(threads, iperf_utility))
+        logger.info("Doing '{}' measurement...".format(measurement3))
         if iperf_utility == "iperf3":
             result3 = transport1.exec_command(
                 '{} -c {} -P {} -t {} | grep sender | tail -n 1'
@@ -161,47 +162,43 @@
             iperf_utility = "iperf"
             result3 = transport1.exec_command(
                 '{} -c {} -P {} -t {} | tail -n 1'.format(
-                    iperf_utility, vm_info[2]['private_address'],
-                    threads, iperf_time))
+                    iperf_utility, vm_info[2]['private_address'], threads,
+                    iperf_time))
             res3 = (b" ".join(result3.split()[-2::])).decode('utf-8')
         logger.info("Result #3 is {}".format(res3))
-        table_rows.append(['VM to VM in same tenant on different HW nodes '
-                           'via Private IP, {} threads; {}'
-                           ''.format(threads, iperf_utility),
+        table_rows.append([measurement3,
                            "{}".format(pair[0]),
                            "{}".format(pair[1]),
                            "{}, {}".format(mtus[0], mtus[2]),
                            "{}".format(res3)])
 
-        # Do iperf (v2) measurement #4
-        logger.info("Doing 'VM to VM in same tenant via Floating IP and VMs "
-                    "are on different nodes, 1 thread' measurement...")
+        # Do iperf3 measurement #4
+        measurement4 = ("VM to VM in same project via Floating IP and VMs "
+                        "are on different nodes, 1 thread; iperf3")
+        logger.info("Doing '{}' measurement...".format(measurement4))
         result4 = transport1.exec_command(
             'iperf3 -c {} -t {} | grep sender | tail -n 1'.format(
                 vm_info[2]['fip'], iperf_time))
         res4 = (b" ".join(result4.split()[-4:-2:])).decode('utf-8')
         logger.info("Result #4 is {}".format(res4))
-        table_rows.append(['VM to VM in same tenant via Floating IP and VMs '
-                           'are on different nodes, 1 thread; iperf3',
+        table_rows.append([measurement4,
                            "{}".format(pair[0]),
                            "{}".format(pair[1]),
                            "{}, {}".format(mtus[0], mtus[2]),
                            "{}".format(res4)])
 
         # Do iperf3 measurement #5
-        logger.info("Doing 'VM to VM in same tenant, different HW nodes and "
-                    "each VM is connected to separate network which are "
-                    " connected using Router via Private IP, 1 thread' "
-                    "measurement...")
+        measurement5 = ("VM to VM in same project, different HW nodes and "
+                        "each VM is connected to separate network which are "
+                        "connected using Router via Private IP, 1 thread; "
+                        "iperf3")
+        logger.info("Doing '{}' measurement...".format(measurement5))
         result5 = transport1.exec_command(
             'iperf3 -c {} -t {} | grep sender | tail -n 1'.format(
                 vm_info[3]['private_address'], iperf_time))
         res5 = (b" ".join(result5.split()[-4:-2:])).decode('utf-8')
         logger.info("Result #5 is {}".format(res5))
-        table_rows.append(['VM to VM in same tenant, different HW nodes and '
-                           'each VM is connected to separate network which are'
-                           ' connected using Router via Private IP, 1 thread; '
-                           'iperf3',
+        table_rows.append([measurement5,
                            "{}".format(pair[0]),
                            "{}".format(pair[1]),
                            "{}, {}".format(mtus[0], mtus[3]),
diff --git a/tests/test_vm2vm_different_routers.py b/tests/test_vm2vm_different_routers.py
new file mode 100644
index 0000000..8d20090
--- /dev/null
+++ b/tests/test_vm2vm_different_routers.py
@@ -0,0 +1,248 @@
+import logging
+import sys
+import time
+
+import pytest
+from texttable import Texttable
+
+import utils
+from utils import os_client
+from utils import ssh
+
+
+logger = logging.getLogger(__name__)
+
+
+def test_vm2vm_different_project_different_routers(
+        openstack_clients, openstack_alt_clients, pair,
+        os_resources, os_resources_alt_project,
+        record_property):
+    """
+    Simplified Performance Tests VM to VM test in different projects, different
+    networks, different routers, measure by Floating IPs (common floating net):
+    1. Create a new project
+    2. Create a network, router, VM in admin project
+    3. Create a network, router, 2 VMs in the newly created project (on
+       different nodes)
+    4. Associate floating IPs to all 3 VMs
+    5. Connect to each VM via SSH and install iperf3
+    6. Measure VM to VM on same node, in different projects, different network,
+       router, via Floating IP, 1 thread
+    7. Measure VM to VM on same node, in different projects, different network,
+       router, via Floating IP, multiple threads (10 by default)
+    8. Measure VM to VM on different nodes, in different projects, different
+       network, router, via Floating IP, 1 thread
+    9. Measure VM to VM on different nodes, in different projects, different
+       network, router, via Floating IP, multiple threads (10 by default)
+    10. Draw the table with all pairs and results
+    """
+    os_actions = os_client.OSCliActions(openstack_clients)
+    alt_os_actions = os_client.OSCliActions(openstack_alt_clients)
+    config = utils.get_configuration()
+    timeout = int(config.get('nova_timeout', 30))
+    iperf_time = int(config.get('iperf_time', 60))
+    private_key = os_resources['keypair'].private_key
+    ssh_timeout = int(config.get('ssh_timeout', 500))
+    threads = int(config.get('multiple_threads_number', 10))
+    iperf_utility = config.get('multiple_threads_iperf_utility', 'iperf3')
+    custom_mtu = config.get('custom_mtu') or 'default'
+    utils.check_iperf_utility(iperf_utility)
+    result_table = Texttable(max_width=120)
+
+    try:
+        zone1 = [service.zone for service in
+                 openstack_clients.compute.services.list() if
+                 service.host == pair[0]]
+        zone2 = [service.zone for service in
+                 openstack_clients.compute.services.list()
+                 if service.host == pair[1]]
+
+        # create 3 VMs: 1 VM in admin project (zone1), 2 VMs in a separate
+        # project (zone1, zone2)
+        logger.info("Creating 3 VMs...")
+        vm1 = os_actions.create_basic_server(
+            os_resources['image_id'], os_resources['flavor_id'],
+            os_resources['net1'], '{0}:{1}'.format(zone1[0], pair[0]),
+            [os_resources['sec_group']['name']], os_resources['keypair'].name)
+        logger.info("Created VM {} in {} project.".format(
+            vm1.id, openstack_clients.project_name))
+
+        vm2 = alt_os_actions.create_basic_server(
+            os_resources_alt_project['image_id'],
+            os_resources_alt_project['flavor_id'],
+            os_resources_alt_project['net1'],
+            '{0}:{1}'.format(zone1[0], pair[0]),
+            [os_resources_alt_project['sec_group']['name']],
+            os_resources['keypair'].name)
+        logger.info("Created VM {} in {} project.".format(
+            vm2.id, openstack_alt_clients.project_name))
+
+        vm3 = alt_os_actions.create_basic_server(
+            os_resources_alt_project['image_id'],
+            os_resources_alt_project['flavor_id'],
+            os_resources_alt_project['net1'],
+            '{0}:{1}'.format(zone2[0], pair[1]),
+            [os_resources_alt_project['sec_group']['name']],
+            os_resources['keypair'].name)
+        logger.info("Created VM {} in {} project.".format(
+            vm3.id, openstack_alt_clients.project_name))
+
+        vm_info = []
+        vms = []
+        vms.extend([vm1, vm2, vm3])
+        fips = []
+        time.sleep(5)
+
+        # Associate FIPs and check VMs are Active
+        logger.info("Creating Floating IPs and associating them...")
+        fip0 = os_actions.create_floating_ip(os_resources['ext_net']['id'])
+        fip1 = alt_os_actions.create_floating_ip(os_resources['ext_net']['id'])
+        fip2 = alt_os_actions.create_floating_ip(os_resources['ext_net']['id'])
+        fips.extend([fip0, fip1, fip2])
+        os_actions.check_vm_is_active(vms[0].id, timeout=timeout)
+        alt_os_actions.check_vm_is_active(vms[1].id, timeout=timeout)
+        alt_os_actions.check_vm_is_active(vms[2].id, timeout=timeout)
+        vms[0].add_floating_ip(fip0['floating_ip_address'])
+        vms[1].add_floating_ip(fip1['floating_ip_address'])
+        vms[2].add_floating_ip(fip2['floating_ip_address'])
+        for i in range(len(vms)):
+            vm_info.append({'vm': vms[i],
+                            'fip': fips[i]['floating_ip_address']})
+
+        # Set custom MTU if required
+        if os_actions.is_cloud_tf() and (custom_mtu != "default"):
+            logger.info("Setting up custom MTU at network ports...")
+            for vm in vms:
+                os_actions.update_network_port_with_custom_mtu(vm.id,
+                                                               custom_mtu)
+
+        # Check VMs are reachable and prepare iperf3
+        logger.info("Checking VMs are reachable via SSH, getting MTU...")
+        mtus = []
+        transport1 = ssh.SSHTransport(vm_info[0]['fip'], 'ubuntu',
+                                      password='dd', private_key=private_key)
+        logger.info("Checking VMs are reachable via SSH...")
+        for i in range(len(vms)):
+            if transport1.check_vm_is_reachable_ssh(
+                    floating_ip=vm_info[i]['fip'], timeout=ssh_timeout):
+                ssh.prepare_iperf(vm_info[i]['fip'], private_key=private_key)
+                mtus.append(transport1.get_mtu_from_vm(
+                    vm_info[i]['fip'], private_key=private_key))
+        logger.info(
+            "MTU at networks: {}, {}".format(
+                os_resources['net1']['mtu'],
+                os_resources_alt_project['net1']['mtu']))
+        logger.info("MTU at VMs: {}".format(", ".join(mtus)))
+
+        # Prepare the result table and run iperf3
+        table_rows = []
+        table_rows.append(['Test Case', 'Host 1', 'Host 2',
+                           'Project 1', 'Project 2', 'MTU at VMs', 'Result'])
+        # Do iperf3 measurement #1
+        measurement1 = ("VM to VM in different projects, nets, routers on "
+                        "same node via Floating IP, 1 thread; iperf3")
+        logger.info("Doing '{}' measurement...".format(measurement1))
+        result1 = transport1.exec_command(
+            'iperf3 -c {} -t {} | grep sender | tail -n 1'.format(
+                vm_info[1]['fip'], iperf_time))
+        res1 = (b" ".join(result1.split()[-4:-2:])).decode('utf-8')
+        logger.info("Result #1 is {}".format(res1))
+        table_rows.append([measurement1,
+                           "{}".format(pair[0]),
+                           "{}".format(pair[0]),
+                           "{}".format(openstack_clients.project_name),
+                           "{}".format(openstack_alt_clients.project_name),
+                           "{}, {}".format(mtus[0], mtus[1]),
+                           "{}".format(res1)])
+
+        # Do iperf/iperf3 measurement #2
+        measurement2 = ("VM to VM in different projects, nets, routers on "
+                        "same node via Floating IP, {} threads; {}"
+                        "".format(threads, iperf_utility))
+        logger.info("Doing '{}' measurement...".format(measurement2))
+        if iperf_utility == "iperf3":
+            result2 = transport1.exec_command(
+                '{} -c {} -P {} -t {} | grep sender | tail -n 1'.format(
+                    iperf_utility, vm_info[1]['fip'], threads, iperf_time))
+            res2 = (b" ".join(result2.split()[-4:-2:])).decode('utf-8')
+        else:
+            iperf_utility = "iperf"
+            result2 = transport1.exec_command(
+                '{} -c {} -P {} -t {} | tail -n 1'.format(
+                    iperf_utility, vm_info[1]['fip'], threads, iperf_time))
+            res2 = (b" ".join(result2.split()[-2::])).decode('utf-8')
+        logger.info("Result #2 is {}".format(res2))
+        table_rows.append([measurement2,
+                           "{}".format(pair[0]),
+                           "{}".format(pair[0]),
+                           "{}".format(openstack_clients.project_name),
+                           "{}".format(openstack_alt_clients.project_name),
+                           "{}, {}".format(mtus[0], mtus[1]),
+                           "{}".format(res2)])
+
+        # Do iperf3 measurement #3
+        measurement3 = ("VM to VM in different projects, nets, routers on "
+                        "different nodes via Floating IP, 1 thread; iperf3")
+        logger.info("Doing '{}' measurement...".format(measurement3))
+        result3 = transport1.exec_command(
+            'iperf3 -c {} -t {} | grep sender | tail -n 1'.format(
+                vm_info[2]['fip'], iperf_time))
+        res3 = (b" ".join(result3.split()[-4:-2:])).decode('utf-8')
+        logger.info("Result #3 is {}".format(res3))
+        table_rows.append([measurement3,
+                           "{}".format(pair[0]),
+                           "{}".format(pair[1]),
+                           "{}".format(openstack_clients.project_name),
+                           "{}".format(openstack_alt_clients.project_name),
+                           "{}, {}".format(mtus[0], mtus[1]),
+                           "{}".format(res3)])
+
+        # Do iperf/iperf3 measurement #4
+        measurement4 = ("VM to VM in different projects, nets, routers on "
+                        "different nodes via Floating IP, {} threads; {}"
+                        "".format(threads, iperf_utility))
+        logger.info("Doing '{}' measurement...".format(measurement4))
+        if iperf_utility == "iperf3":
+            result4 = transport1.exec_command(
+                '{} -c {} -P {} -t {} | grep sender | tail -n 1'.format(
+                    iperf_utility, vm_info[2]['fip'], threads, iperf_time))
+            res4 = (b" ".join(result4.split()[-4:-2:])).decode('utf-8')
+        else:
+            iperf_utility = "iperf"
+            result4 = transport1.exec_command(
+                '{} -c {} -P {} -t {} | tail -n 1'.format(
+                    iperf_utility, vm_info[2]['fip'], threads, iperf_time))
+            res4 = (b" ".join(result4.split()[-2::])).decode('utf-8')
+        logger.info("Result #4 is {}".format(res4))
+        table_rows.append([measurement4,
+                           "{}".format(pair[0]),
+                           "{}".format(pair[1]),
+                           "{}".format(openstack_clients.project_name),
+                           "{}".format(openstack_alt_clients.project_name),
+                           "{}, {}".format(mtus[0], mtus[1]),
+                           "{}".format(res4)])
+
+        logger.info("Drawing the table with iperf results...")
+        result_table.add_rows(table_rows)
+        sys.stdout.write('\n{}\n'.format(result_table.draw()))
+
+        logger.info("Removing VMs and FIPs...")
+        for vm in vms:
+            openstack_clients.compute.servers.delete(vm)
+        logger.info("Removing FIPs...")
+        for fip in fips:
+            os_actions.delete_floating_ip(fip['id'])
+    except Exception as e:
+        sys.stdout.write("\n{}".format(e))
+        sys.stdout.write("\nSomething went wrong\n")
+        if 'vms' in locals():
+            logger.info("Removing VMs...")
+            for vm in vms:
+                openstack_clients.compute.servers.delete(vm)
+            if 'fips' in locals():
+                logger.info("Removing FIPs...")
+                for fip in fips:
+                    os_actions.delete_floating_ip(fip['id'])
+        else:
+            sys.stdout.write("\nSkipping cleaning, VMs were not created")
+        pytest.fail("Something went wrong")
diff --git a/utils/__init__.py b/utils/__init__.py
index 8cfe897..190dbd7 100644
--- a/utils/__init__.py
+++ b/utils/__init__.py
@@ -30,7 +30,7 @@
         openstack_clients = os_client.OfficialClientManager(
             username=os.environ['OS_USERNAME'],
             password=os.environ['OS_PASSWORD'],
-            tenant_name=os.environ['OS_PROJECT_NAME'],
+            project_name=os.environ['OS_PROJECT_NAME'],
             auth_url=os.environ['OS_AUTH_URL'],
             cert=False,
             domain=os.environ['OS_PROJECT_DOMAIN_NAME']
diff --git a/utils/os_client.py b/utils/os_client.py
index 9b128c5..aa51732 100644
--- a/utils/os_client.py
+++ b/utils/os_client.py
@@ -33,7 +33,7 @@
         INTERFACE = os.environ["OS_ENDPOINT_TYPE"]
 
     def __init__(self, username=None, password=None,
-                 tenant_name=None, auth_url=None, endpoint_type="internalURL",
+                 project_name=None, auth_url=None, endpoint_type="internalURL",
                  cert=False, domain="Default", **kwargs):
         self.traceback = ""
 
@@ -46,8 +46,7 @@
         ]
         self.username = username
         self.password = password
-        self.tenant_name = tenant_name
-        self.project_name = tenant_name
+        self.project_name = project_name
         self.auth_url = auth_url
         self.endpoint_type = endpoint_type
         self.cert = cert
@@ -63,16 +62,16 @@
 
     @classmethod
     def _get_auth_session(cls, username=None, password=None,
-                          tenant_name=None, auth_url=None, cert=None,
+                          project_name=None, auth_url=None, cert=None,
                           domain='Default'):
-        if None in (username, password, tenant_name):
-            sys.stdout.write((username, password, tenant_name))
+        if None in (username, password, project_name):
+            sys.stdout.write((username, password, project_name))
             msg = ("Missing required credentials for identity client. "
                    "username: {username}, password: {password}, "
-                   "tenant_name: {tenant_name}").format(
+                   "project_name: {project_name}").format(
                 username=username,
                 password=password,
-                tenant_name=tenant_name
+                project_name=project_name
             )
             raise msg
 
@@ -93,7 +92,7 @@
                 username=username,
                 password=password,
                 project_domain_name=domain,
-                project_name=tenant_name)
+                project_name=project_name)
 
         auth_session = keystone_session.Session(auth=auth, verify=cert)
         # auth_session.get_auth_headers()
@@ -101,12 +100,12 @@
 
     @classmethod
     def get_auth_client(cls, username=None, password=None,
-                        tenant_name=None, auth_url=None, cert=None,
+                        project_name=None, auth_url=None, cert=None,
                         domain='Default', **kwargs):
         session = cls._get_auth_session(
             username=username,
             password=password,
-            tenant_name=tenant_name,
+            project_name=project_name,
             auth_url=auth_url,
             cert=cert,
             domain=domain)
@@ -117,10 +116,10 @@
 
     @classmethod
     def get_compute_client(cls, username=None, password=None,
-                           tenant_name=None, auth_url=None, cert=None,
+                           project_name=None, auth_url=None, cert=None,
                            domain='Default', **kwargs):
         session = cls._get_auth_session(
-            username=username, password=password, tenant_name=tenant_name,
+            username=username, password=password, project_name=project_name,
             auth_url=auth_url, cert=cert, domain=domain)
         service_type = 'compute'
         compute_client = novaclient.Client(
@@ -130,10 +129,10 @@
 
     @classmethod
     def get_network_client(cls, username=None, password=None,
-                           tenant_name=None, auth_url=None, cert=None,
+                           project_name=None, auth_url=None, cert=None,
                            domain='Default', **kwargs):
         session = cls._get_auth_session(
-            username=username, password=password, tenant_name=tenant_name,
+            username=username, password=password, project_name=project_name,
             auth_url=auth_url, cert=cert, domain=domain)
         service_type = 'network'
         return neutron_client.Client(
@@ -142,10 +141,10 @@
 
     @classmethod
     def get_volume_client(cls, username=None, password=None,
-                          tenant_name=None, auth_url=None, cert=None,
+                          project_name=None, auth_url=None, cert=None,
                           domain='Default', **kwargs):
         session = cls._get_auth_session(
-            username=username, password=password, tenant_name=tenant_name,
+            username=username, password=password, project_name=project_name,
             auth_url=auth_url, cert=cert, domain=domain)
         service_type = 'volume'
         return cinder_client.Client(
@@ -156,10 +155,10 @@
 
     @classmethod
     def get_image_client(cls, username=None, password=None,
-                         tenant_name=None, auth_url=None, cert=None,
+                         project_name=None, auth_url=None, cert=None,
                          domain='Default', **kwargs):
         session = cls._get_auth_session(
-            username=username, password=password, tenant_name=tenant_name,
+            username=username, password=password, project_name=project_name,
             auth_url=auth_url, cert=cert, domain=domain)
         service_type = 'image'
         return glance_client.Client(
@@ -172,7 +171,7 @@
     def auth(self):
         if self._auth is None:
             self._auth = self.get_auth_client(
-                self.username, self.password, self.tenant_name, self.auth_url,
+                self.username, self.password, self.project_name, self.auth_url,
                 self.cert, self.domain, endpoint_type=self.endpoint_type
             )
         return self._auth
@@ -181,7 +180,7 @@
     def compute(self):
         if self._compute is None:
             self._compute = self.get_compute_client(
-                self.username, self.password, self.tenant_name, self.auth_url,
+                self.username, self.password, self.project_name, self.auth_url,
                 self.cert, self.domain, endpoint_type=self.endpoint_type
             )
         return self._compute
@@ -190,7 +189,7 @@
     def network(self):
         if self._network is None:
             self._network = self.get_network_client(
-                self.username, self.password, self.tenant_name, self.auth_url,
+                self.username, self.password, self.project_name, self.auth_url,
                 self.cert, self.domain, endpoint_type=self.endpoint_type
             )
         return self._network
@@ -199,7 +198,7 @@
     def volume(self):
         if self._volume is None:
             self._volume = self.get_volume_client(
-                self.username, self.password, self.tenant_name, self.auth_url,
+                self.username, self.password, self.project_name, self.auth_url,
                 self.cert, self.domain, endpoint_type=self.endpoint_type
             )
         return self._volume
@@ -209,7 +208,7 @@
 
         if self._image is None:
             self._image = self.get_image_client(
-                self.username, self.password, self.tenant_name, self.auth_url,
+                self.username, self.password, self.project_name, self.auth_url,
                 self.cert, self.domain
             )
         return self._image
@@ -220,16 +219,15 @@
         self.os_clients = os_clients
         self.create_fake_ext_net = False
 
-    def get_admin_tenant(self):
-        # TODO Keystone v3 doesnt have tenants attribute
-        return self.os_clients.auth.projects.find(name="admin")
+    def get_project_by_name(self, name):
+        return self.os_clients.auth.projects.find(name=name)
 
     def get_internal_network(self):
         networks = [
             net for net in self.os_clients.network.list_networks()["networks"]
             if net["admin_state_up"] and not net["router:external"] and
             len(net["subnets"])
-            ]
+        ]
         if networks:
             net = networks[0]
         else:
@@ -276,7 +274,7 @@
                 self.os_clients.network.list_networks()["networks"]
                 if net["admin_state_up"] and net["router:external"] and
                 len(net["subnets"])
-                ]
+            ]
         else:
             networks = [net for net in
                         self.os_clients.network.list_networks()["networks"]
@@ -284,7 +282,7 @@
 
         if networks:
             ext_net = networks[0]
-            logger.info("Using external net '{}'.".format(ext_net["name"]))
+            logger.info("Using external net '{}'".format(ext_net["name"]))
         else:
             ext_net = self.create_fake_external_network()
         return ext_net
@@ -390,14 +388,14 @@
                     vm_uuid=vm_uuid, expected_state=expected_state,
                     actual=vm.status))
 
-    def create_network(self, tenant_id):
+    def create_network(self, project_id):
         net_name = "spt-test-net-{}".format(random.randrange(100, 999))
         config = utils.get_configuration()
         mtu = config.get('custom_mtu') or 'default'
         net_body = {
             'network': {
                 'name': net_name,
-                'tenant_id': tenant_id
+                'project_id': project_id
             }
         }
         if mtu != 'default':
@@ -412,10 +410,11 @@
         # default, so this blocked running tests at TF envs with default MTU
         if 'mtu' not in net:
             net['mtu'] = None
-        logger.info("Created internal network {}".format(net_name))
+        logger.info("Created internal network {} in {} project".format(
+            net_name, project_id))
         return net
 
-    def create_subnet(self, net, tenant_id, cidr=None):
+    def create_subnet(self, net, project_id, cidr=None):
         subnet_name = "spt-test-subnet-{}".format(random.randrange(100, 999))
         subnet_body = {
             'subnet': {
@@ -423,14 +422,15 @@
                 'network_id': net['id'],
                 'ip_version': 4,
                 'cidr': cidr if cidr else '10.1.7.0/24',
-                'tenant_id': tenant_id
+                'project_id': project_id
             }
         }
         subnet = self.os_clients.network.create_subnet(subnet_body)['subnet']
-        logger.info("Created subnet {}".format(subnet_name))
+        logger.info("Created subnet {} in {} project".format(
+            subnet_name, project_id))
         return subnet
 
-    def create_router(self, ext_net, tenant_id):
+    def create_router(self, ext_net, project_id):
         name = 'spt-test-router-{}'.format(random.randrange(100, 999))
         router_body = {
             'router': {
@@ -438,18 +438,19 @@
                 'external_gateway_info': {
                     'network_id': ext_net['id']
                 },
-                'tenant_id': tenant_id
+                'project_id': project_id
             }
         }
-        logger.info("Created a router {}".format(name))
+        logger.info("Created a router {} in {} project".format(
+            name, project_id))
         router = self.os_clients.network.create_router(router_body)['router']
         return router
 
-    def create_network_resources(self):
-        tenant_id = self.get_admin_tenant().id
+    def create_network_resources(self, project="admin", cidr=None):
+        project_id = self.get_project_by_name(project).id
         self.get_external_network()
-        net = self.create_network(tenant_id)
-        self.create_subnet(net, tenant_id)
+        net = self.create_network(project_id)
+        self.create_subnet(net, project_id, cidr)
         return net
 
     def list_nova_computes(self):
@@ -470,6 +471,47 @@
                   "".format(floatingip_id, e)
             logger.info(msg)
 
+    def create_project(self):
+        project_name = "spt-test-project-{}".format(random.randrange(100, 999))
+        project = self.os_clients.auth.projects.create(
+            name=project_name, domain=self.os_clients.domain,
+            description="Mirantis SPT test project")
+        logger.info("Created a project {}, uuid: {}".format(
+            project.name, project.id))
+        return project
+
+    def add_roles_to_user_in_project(self, project_id, username='admin',
+                                     domain='default', roles=None):
+        user_id = [
+            user.id for user in self.os_clients.auth.users.list()
+            if (user.name == username) and (user.domain_id == domain)][0]
+        if roles is None:
+            roles = ["admin", "member", "creator"]
+        for role in roles:
+            try:
+                role_id = self.os_clients.auth.roles.list(name=role)[0].id
+                self.os_clients.auth.roles.grant(
+                    role=role_id, user=user_id, project=project_id)
+            except Exception as e:
+                continue
+        logger.info("Added admin user to {} project".format(project_id))
+
+    def is_project_empty(self, project_id):
+        sec_groups = [i for i in self.os_clients.network.list_security_groups(
+            tenant_id=project_id)['security_groups'] if i['name'] != 'default']
+        servers = self.os_clients.compute.servers.list(
+            search_opts={'project_id': project_id})
+        nets = self.os_clients.network.list_networks(
+            project_id=project_id)["networks"]
+        subnets = self.os_clients.network.list_subnets(
+            project_id=project_id)["subnets"]
+        ports = self.os_clients.network.list_ports(
+            project_id=project_id)["ports"]
+        routers = self.os_clients.network.list_routers(
+            project_id=project_id)["routers"]
+        resources = [*sec_groups, *servers, *nets, *subnets, *ports, *routers]
+        return not bool(resources)
+
     def is_cloud_tf(self):
         # Detect the TF cloud by assuming it does not have any neutron
         # agents (404 in response)
@@ -492,3 +534,10 @@
                             "See detailed error: {}".format(e))
         logger.info("The port {} is updated with custom MTU {}."
                     "".format(port_uuid, custom_mtu))
+
+    def get_flavor_id_by_name(self, name):
+        flavors = [flavor for flavor in self.os_clients.compute.flavors.list()]
+        flavor_id = [f.id for f in flavors if f.name == name]
+        if not flavor_id:
+            return None
+        return str(flavor_id[0])
diff --git a/utils/ssh.py b/utils/ssh.py
index abbd89f..e9e5f3a 100644
--- a/utils/ssh.py
+++ b/utils/ssh.py
@@ -204,13 +204,14 @@
             home_ubuntu = "/home/ubuntu/"
             transport.put_iperf3_deb_packages_at_vms(path_to_iperf_deb,
                                                      home_ubuntu)
-            transport.exec_command('sudo dpkg -i {}*.deb'.format(home_ubuntu))
+            exit_status, stdout, stderr = transport.exec_sync(
+                'sudo dpkg -i {}*.deb'.format(home_ubuntu))
         else:
             logger.info("Installing iperf, iperf3 using apt")
             preparation_cmd = config.get('iperf_prep_string') or ['']
             transport.exec_command(preparation_cmd)
-            transport.exec_command('sudo apt-get update;'
-                                   'sudo apt-get install -y iperf3 iperf')
+            exit_status, stdout, stderr = transport.exec_sync(
+                'sudo apt-get update && sudo apt-get install -y iperf3 iperf')
 
         # Log whether iperf is installed with version
         check = transport.exec_command('dpkg -l | grep ii | grep iperf3')
@@ -221,8 +222,9 @@
             else:
                 info = "Could not put offline iperf packages from {} to the " \
                        "VM.".format(path_to_iperf_deb)
-            raise BaseException("iperf3 is not installed at VM with FIP {}. {}"
-                                "".format(fip, info))
+            raise BaseException("iperf3 is not installed at VM with FIP {}. "
+                                "{}.\nStdout, stderr at VM:\n{}\n{}"
+                                "".format(fip, info, stdout, stderr))
         # Staring iperf server
         transport.exec_command('nohup iperf3 -s > file 2>&1 &')
         transport.exec_command('nohup iperf -s > file 2>&1 &')