| import logging |
| import sys |
| import time |
| |
| import pytest |
| from texttable import Texttable |
| |
| import utils |
| from utils import helpers |
| from utils import os_client |
| from utils import ssh |
| |
| |
| logger = logging.getLogger(__name__) |
| |
| |
| def test_vm2vm_different_project_different_routers( |
| openstack_clients, openstack_alt_clients, pair, |
| os_resources, os_resources_alt_project, request, html_report): |
| """ |
| Simplified Performance Tests VM to VM test in different projects, different |
| networks, different routers, measure by Floating IPs (common floating net): |
| 1. Create a new project |
| 2. Create a network, router, VM in admin project |
| 3. Create a network, router, 2 VMs in the newly created project (on |
| different nodes) |
| 4. Associate floating IPs to all 3 VMs |
| 5. Connect to each VM via SSH and install iperf3 |
| 6. Measure VM to VM on same node, in different projects, different network, |
| router, via Floating IP, 1 thread |
| 7. Measure VM to VM on same node, in different projects, different network, |
| router, via Floating IP, multiple threads (10 by default) |
| 8. Measure VM to VM on different nodes, in different projects, different |
| network, router, via Floating IP, 1 thread |
| 9. Measure VM to VM on different nodes, in different projects, different |
| network, router, via Floating IP, multiple threads (10 by default) |
| 10. Draw the table with all pairs and results |
| """ |
| os_actions = os_client.OSCliActions(openstack_clients) |
| alt_os_actions = os_client.OSCliActions(openstack_alt_clients) |
| config = utils.get_configuration() |
| timeout = int(config.get('nova_timeout', 30)) |
| iperf_time = int(config.get('iperf_time', 60)) |
| private_key = os_resources['keypair'].private_key |
| ssh_timeout = int(config.get('ssh_timeout', 500)) |
| threads = int(config.get('multiple_threads_number', 10)) |
| iperf_utility = config.get('multiple_threads_iperf_utility', 'iperf3') |
| custom_mtu = config.get('custom_mtu') or 'default' |
| utils.check_iperf_utility(iperf_utility) |
| result_table = Texttable(max_width=120) |
| |
| try: |
| zone1 = [service.zone for service in |
| openstack_clients.compute.services.list() if |
| service.host == pair[0]] |
| zone2 = [service.zone for service in |
| openstack_clients.compute.services.list() |
| if service.host == pair[1]] |
| |
| # create 3 VMs: 1 VM in admin project (zone1), 2 VMs in a separate |
| # project (zone1, zone2) |
| logger.info("Creating 3 VMs...") |
| vm1 = os_actions.create_basic_server( |
| os_resources['image_id'], os_resources['flavor_id'], |
| os_resources['net1'], '{0}:{1}'.format(zone1[0], pair[0]), |
| [os_resources['sec_group']['name']], os_resources['keypair'].name) |
| logger.info("Created VM {} in {} project.".format( |
| vm1.id, openstack_clients.project_name)) |
| |
| vm2 = alt_os_actions.create_basic_server( |
| os_resources_alt_project['image_id'], |
| os_resources_alt_project['flavor_id'], |
| os_resources_alt_project['net1'], |
| '{0}:{1}'.format(zone1[0], pair[0]), |
| [os_resources_alt_project['sec_group']['name']], |
| os_resources['keypair'].name) |
| logger.info("Created VM {} in {} project.".format( |
| vm2.id, openstack_alt_clients.project_name)) |
| |
| vm3 = alt_os_actions.create_basic_server( |
| os_resources_alt_project['image_id'], |
| os_resources_alt_project['flavor_id'], |
| os_resources_alt_project['net1'], |
| '{0}:{1}'.format(zone2[0], pair[1]), |
| [os_resources_alt_project['sec_group']['name']], |
| os_resources['keypair'].name) |
| logger.info("Created VM {} in {} project.".format( |
| vm3.id, openstack_alt_clients.project_name)) |
| |
| vm_info = [] |
| vms = [] |
| vms.extend([vm1, vm2, vm3]) |
| fips = [] |
| time.sleep(5) |
| |
| # Associate FIPs and check VMs are Active |
| logger.info("Creating Floating IPs and associating them...") |
| fip0 = os_actions.create_floating_ip(os_resources['ext_net']['id']) |
| fip1 = alt_os_actions.create_floating_ip(os_resources['ext_net']['id']) |
| fip2 = alt_os_actions.create_floating_ip(os_resources['ext_net']['id']) |
| fips.extend([fip0, fip1, fip2]) |
| os_actions.check_vm_is_active(vms[0].id, timeout=timeout) |
| alt_os_actions.check_vm_is_active(vms[1].id, timeout=timeout) |
| alt_os_actions.check_vm_is_active(vms[2].id, timeout=timeout) |
| vms[0].add_floating_ip(fip0['floating_ip_address']) |
| vms[1].add_floating_ip(fip1['floating_ip_address']) |
| vms[2].add_floating_ip(fip2['floating_ip_address']) |
| for i in range(len(vms)): |
| vm_info.append({'vm': vms[i], |
| 'fip': fips[i]['floating_ip_address']}) |
| |
| # Set custom MTU if required |
| if os_actions.is_cloud_tf() and (custom_mtu != "default"): |
| logger.info("Setting up custom MTU at network ports...") |
| for vm in vms: |
| os_actions.update_network_port_with_custom_mtu(vm.id, |
| custom_mtu) |
| |
| # Check VMs are reachable and prepare iperf3 |
| logger.info("Checking VMs are reachable via SSH, getting MTU...") |
| mtus = [] |
| transport1 = ssh.SSHTransport(vm_info[0]['fip'], 'ubuntu', |
| password='dd', private_key=private_key) |
| logger.info("Checking VMs are reachable via SSH...") |
| for i in range(len(vms)): |
| if transport1.check_vm_is_reachable_ssh( |
| floating_ip=vm_info[i]['fip'], timeout=ssh_timeout): |
| ssh.IperfAtVM( |
| vm_info[i]['fip'], private_key=private_key) |
| mtus.append(transport1.get_mtu_from_vm( |
| vm_info[i]['fip'], private_key=private_key)) |
| logger.info( |
| "MTU at networks: {}, {}".format( |
| os_resources['net1']['mtu'], |
| os_resources_alt_project['net1']['mtu'])) |
| logger.info("MTU at VMs: {}".format(", ".join(mtus))) |
| |
| # Prepare the result table and run iperf3 |
| table_rows = [] |
| table_rows.append(['Test Case', 'Host 1', 'Host 2', |
| 'Project 1', 'Project 2', 'MTU at VMs', 'Result']) |
| # Do iperf3 measurement #1 |
| measurement1 = ("VM to VM in different projects, nets, routers on " |
| "same node via Floating IP, 1 thread; iperf3") |
| logger.info("Doing '{}' measurement...".format(measurement1)) |
| result1 = transport1.exec_command( |
| 'iperf3 -c {} -t {} | grep sender | tail -n 1'.format( |
| vm_info[1]['fip'], iperf_time)) |
| res1 = (b" ".join(result1.split()[-4:-2:])).decode('utf-8') |
| logger.info("Result #1 is {}".format(res1)) |
| table_rows.append([measurement1, |
| "{}".format(pair[0]), |
| "{}".format(pair[0]), |
| "{}".format(openstack_clients.project_name), |
| "{}".format(openstack_alt_clients.project_name), |
| "{}, {}".format(mtus[0], mtus[1]), |
| "{}".format(res1)]) |
| |
| # Do iperf/iperf3 measurement #2 |
| measurement2 = ("VM to VM in different projects, nets, routers on " |
| "same node via Floating IP, {} threads; {}" |
| "".format(threads, iperf_utility)) |
| logger.info("Doing '{}' measurement...".format(measurement2)) |
| if iperf_utility == "iperf3": |
| result2 = transport1.exec_command( |
| '{} -c {} -P {} -t {} | grep sender | tail -n 1'.format( |
| iperf_utility, vm_info[1]['fip'], threads, iperf_time)) |
| res2 = (b" ".join(result2.split()[-4:-2:])).decode('utf-8') |
| else: |
| iperf_utility = "iperf" |
| result2 = transport1.exec_command( |
| '{} -c {} -P {} -t {} | tail -n 1'.format( |
| iperf_utility, vm_info[1]['fip'], threads, iperf_time)) |
| res2 = (b" ".join(result2.split()[-2::])).decode('utf-8') |
| logger.info("Result #2 is {}".format(res2)) |
| table_rows.append([measurement2, |
| "{}".format(pair[0]), |
| "{}".format(pair[0]), |
| "{}".format(openstack_clients.project_name), |
| "{}".format(openstack_alt_clients.project_name), |
| "{}, {}".format(mtus[0], mtus[1]), |
| "{}".format(res2)]) |
| |
| # Do iperf3 measurement #3 |
| measurement3 = ("VM to VM in different projects, nets, routers on " |
| "different nodes via Floating IP, 1 thread; iperf3") |
| logger.info("Doing '{}' measurement...".format(measurement3)) |
| result3 = transport1.exec_command( |
| 'iperf3 -c {} -t {} | grep sender | tail -n 1'.format( |
| vm_info[2]['fip'], iperf_time)) |
| res3 = (b" ".join(result3.split()[-4:-2:])).decode('utf-8') |
| logger.info("Result #3 is {}".format(res3)) |
| table_rows.append([measurement3, |
| "{}".format(pair[0]), |
| "{}".format(pair[1]), |
| "{}".format(openstack_clients.project_name), |
| "{}".format(openstack_alt_clients.project_name), |
| "{}, {}".format(mtus[0], mtus[1]), |
| "{}".format(res3)]) |
| |
| # Do iperf/iperf3 measurement #4 |
| measurement4 = ("VM to VM in different projects, nets, routers on " |
| "different nodes via Floating IP, {} threads; {}" |
| "".format(threads, iperf_utility)) |
| logger.info("Doing '{}' measurement...".format(measurement4)) |
| if iperf_utility == "iperf3": |
| result4 = transport1.exec_command( |
| '{} -c {} -P {} -t {} | grep sender | tail -n 1'.format( |
| iperf_utility, vm_info[2]['fip'], threads, iperf_time)) |
| res4 = (b" ".join(result4.split()[-4:-2:])).decode('utf-8') |
| else: |
| iperf_utility = "iperf" |
| result4 = transport1.exec_command( |
| '{} -c {} -P {} -t {} | tail -n 1'.format( |
| iperf_utility, vm_info[2]['fip'], threads, iperf_time)) |
| res4 = (b" ".join(result4.split()[-2::])).decode('utf-8') |
| logger.info("Result #4 is {}".format(res4)) |
| table_rows.append([measurement4, |
| "{}".format(pair[0]), |
| "{}".format(pair[1]), |
| "{}".format(openstack_clients.project_name), |
| "{}".format(openstack_alt_clients.project_name), |
| "{}, {}".format(mtus[0], mtus[1]), |
| "{}".format(res4)]) |
| |
| logger.info("Drawing the table with iperf results...") |
| result_table.add_rows(table_rows) |
| sys.stdout.write('\n{}\n'.format(result_table.draw())) |
| |
| # Send the results to CSV file at reports/ directory |
| helpers.create_test_result_table_csv_file( |
| table_rows, request.node.name) |
| |
| logger.info("Removing VMs and FIPs...") |
| for vm in vms: |
| openstack_clients.compute.servers.delete(vm) |
| logger.info("Removing FIPs...") |
| for fip in fips: |
| os_actions.delete_floating_ip(fip['id']) |
| except Exception as e: |
| sys.stdout.write("\n{}".format(e)) |
| sys.stdout.write("\nSomething went wrong\n") |
| if 'vms' in locals(): |
| logger.info("Removing VMs...") |
| for vm in vms: |
| openstack_clients.compute.servers.delete(vm) |
| if 'fips' in locals(): |
| logger.info("Removing FIPs...") |
| for fip in fips: |
| os_actions.delete_floating_ip(fip['id']) |
| else: |
| sys.stdout.write("\nSkipping cleaning, VMs were not created") |
| pytest.fail("Something went wrong") |