Reworked SPT tests for running at MOS inside rally pod

These SPT tests are taken from the CVP-SPT, but reworked
to test MOS inside rally pod.

Here is the list of changes since CVP-SPT:
* Switched to Python3
* Removed all Salt related code
* Removed HW2HW test
* Default global_config.yaml is suitable for MOS
* Switched to iperf3
* Added smart waiters for VMs to be Active, VMs to be reachable by FIPs
* Extended pytest.ini file with logging settings
* Added lots of loggers at info level to understand what happends during the test run
* Extended & fixed README with the actual instruction
* Ability to use iperf3 even if there is no Internet at VMs
* Fixed the coding style according PEP8
* Various small fixes, enhancements

Change-Id: I31a1b8c8c827133d144377031c6f546d8c82a47d
diff --git a/tests/__init__.py b/tests/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/tests/__init__.py
diff --git a/tests/test_glance.py b/tests/test_glance.py
new file mode 100644
index 0000000..460d900
--- /dev/null
+++ b/tests/test_glance.py
@@ -0,0 +1,100 @@
+import pytest
+import time
+import subprocess
+import random
+import logging
+
+import utils
+
+logger = logging.getLogger(__name__)
+
+
+def is_parsable(value, to):
+    """
+    Check if value can be converted into some type
+    :param value:  input value that should be converted
+    :param to: type of output value like int, float. It's not a string!
+    :return: bool
+    """
+    try:
+        to(value)
+    except:
+        return False
+    return True
+
+
+@pytest.fixture
+def create_image():
+    image_size_megabytes = utils.get_configuration().get("IMAGE_SIZE_MB", 9000)
+    create_file_cmdline = 'dd if=/dev/zero of=/tmp/image_mk_framework.dd ' \
+                          'bs=1M count={} 2>/dev/null' \
+                          ''.format(image_size_megabytes)
+    is_cmd_successful = subprocess.call(create_file_cmdline, shell=True) == 0
+    logger.info("Created local image file /tmp/image_mk_framework.dd")
+    yield is_cmd_successful
+
+    # teardown
+    logger.info("Deleting /tmp/image_mk_framework.dd file")
+    subprocess.call('rm -f /tmp/image_mk_framework.dd', shell=True)
+    subprocess.call('rm -f /tmp/image_mk_framework.download', shell=True)
+
+
+def test_speed_glance(create_image, openstack_clients, record_property):
+    """
+    Simplified Performance Tests Download / upload Glance
+    1. Create file with random data (dd)
+    2. Upload data as image to glance.
+    3. Download image.
+    4. Measure download/upload speed and print them into stdout
+    """
+    image_size_megabytes = utils.get_configuration().get("IMAGE_SIZE_MB")
+    if not is_parsable(image_size_megabytes, int):
+        pytest.fail("Can't convert IMAGE_SIZE_MB={} to 'int'".format(
+            image_size_megabytes))
+    image_size_megabytes = int(image_size_megabytes)
+    if not create_image:
+        pytest.skip("Can't create image, maybe there is lack of disk "
+                    "space to create file {}MB".
+                    format(image_size_megabytes))
+    image_name = "spt-test-image-{}".format(random.randrange(100, 999))
+    try:
+        image = openstack_clients.image.images.create(
+            name=image_name,
+            disk_format='iso',
+            container_format='bare')
+        logger.info("Created an image {} in Glance.".format(image_name))
+    except BaseException as e:
+        logger.info("Could not create image in Glance. See details: {}"
+                    "".format(e))
+        pytest.fail("Can't create image in Glance. Occurred error: {}"
+                    "".format(e))
+
+    logger.info("Testing upload file speed...")
+    start_time = time.time()
+    try:
+        openstack_clients.image.images.upload(
+            image.id, image_data=open("/tmp/image_mk_framework.dd", 'rb'))
+    except BaseException as e:
+        pytest.fail("Can't upload image in Glance. "
+                    "Occurred error: {}".format(e))
+    end_time = time.time()
+
+    speed_upload = image_size_megabytes / (end_time - start_time)
+
+    logger.info("Testing download file speed...")
+    start_time = time.time()
+    with open("/tmp/image_mk_framework.download", 'wb') as image_file:
+        for item in openstack_clients.image.images.data(image.id):
+            image_file.write(item)
+    end_time = time.time()
+
+    speed_download = image_size_megabytes / (end_time - start_time)
+    logger.info("Deleted image {}.".format(image.id))
+    openstack_clients.image.images.delete(image.id)
+    record_property("Upload", speed_upload)
+    record_property("Download", speed_download)
+
+    print("++++++++++++++++++++++++++++++++++++++++")
+    print(('upload - {} MB/s'.format(speed_upload)))
+    print(('download - {} MB/s'.format(speed_download)))
+    print("++++++++++++++++++++++++++++++++++++++++")
diff --git a/tests/test_vm2vm.py b/tests/test_vm2vm.py
new file mode 100644
index 0000000..19b6abf
--- /dev/null
+++ b/tests/test_vm2vm.py
@@ -0,0 +1,198 @@
+import logging
+import time
+
+import pytest
+from texttable import Texttable
+
+import utils
+from utils import os_client
+from utils import ssh
+
+
+logger = logging.getLogger(__name__)
+
+
+def test_vm2vm(openstack_clients, pair, os_resources, record_property):
+    """
+    Simplified Performance Tests VM to VM test in different topologies
+    1. Create 4 VMs admin project
+    2. Associate floating IPs to the VMs
+    3. Connect to each VM via SSH and install iperf3
+    4. Measure VM to VM on same node via Private IP, 1 thread
+    5. Measure VM to VM on different HW nodes via Private IP, 1 thread
+    6. Measure VM to VM on different HW nodes via Private IP, 10 threads
+    7. Measure VM to VM on different HW nodes via Floating IP, 1 thread
+    8. Measure VM to VM on different HW nodes, each VM is in separate network,
+       the networks are connected using Router via Private IP, 1 thread
+    9. Draw the table with all pairs and results
+    """
+    os_actions = os_client.OSCliActions(openstack_clients)
+    config = utils.get_configuration()
+    timeout = int(config.get('nova_timeout', 30))
+    iperf_time = int(config.get('iperf_time', 60))
+    private_key = os_resources['keypair'].private_key
+    ssh_timeout = int(config.get('ssh_timeout', 500))
+    result_table = Texttable()
+
+    try:
+        zone1 = [service.zone for service in
+                 openstack_clients.compute.services.list() if
+                 service.host == pair[0]]
+        zone2 = [service.zone for service in
+                 openstack_clients.compute.services.list()
+                 if service.host == pair[1]]
+
+        # create 4 VMs
+        logger.info("Creating 4 VMs...")
+        vm1 = os_actions.create_basic_server(
+            os_resources['image_id'], os_resources['flavor_id'],
+            os_resources['net1'], '{0}:{1}'.format(zone1[0], pair[0]),
+            [os_resources['sec_group'].name], os_resources['keypair'].name)
+        logger.info("Created VM {}.".format(vm1.id))
+
+        vm2 = os_actions.create_basic_server(
+            os_resources['image_id'], os_resources['flavor_id'],
+            os_resources['net1'], '{0}:{1}'.format(zone1[0], pair[0]),
+            [os_resources['sec_group'].name], os_resources['keypair'].name)
+        logger.info("Created VM {}.".format(vm2.id))
+
+        vm3 = os_actions.create_basic_server(
+            os_resources['image_id'], os_resources['flavor_id'],
+            os_resources['net1'], '{0}:{1}'.format(zone2[0], pair[1]),
+            [os_resources['sec_group'].name], os_resources['keypair'].name)
+        logger.info("Created VM {}.".format(vm3.id))
+
+        vm4 = os_actions.create_basic_server(
+            os_resources['image_id'], os_resources['flavor_id'],
+            os_resources['net2'], '{0}:{1}'.format(zone2[0], pair[1]),
+            [os_resources['sec_group'].name], os_resources['keypair'].name)
+        logger.info("Created VM {}.".format(vm4.id))
+
+        vm_info = []
+        vms = []
+        vms.extend([vm1, vm2, vm3, vm4])
+        fips = []
+        time.sleep(5)
+
+        # Associate FIPs and check VMs are Active
+        logger.info("Creating Floating IPs and associating them...")
+        for i in range(4):
+            fip = openstack_clients.compute.floating_ips.create(
+                os_resources['ext_net']['name'])
+            fips.append(fip.id)
+            os_actions.check_vm_is_active(vms[i].id, timeout=timeout)
+            vms[i].add_floating_ip(fip)
+            private_address = vms[i].addresses[
+                list(vms[i].addresses.keys())[0]][0]['addr']
+            vm_info.append({'vm': vms[i], 'fip': fip.ip,
+                            'private_address': private_address})
+        # Check VMs are reachable and prepare iperf3
+        transport1 = ssh.SSHTransport(vm_info[0]['fip'], 'ubuntu',
+                                      password='dd', private_key=private_key)
+        logger.info("Checking VMs are reachable via SSH...")
+        for i in range(4):
+            if transport1.check_vm_is_reachable_ssh(
+                    floating_ip=vm_info[i]['fip'], timeout=ssh_timeout):
+                ssh.prepare_iperf(vm_info[i]['fip'], private_key=private_key)
+
+        # Prepare the result table and run iperf3
+        table_rows = []
+        table_rows.append(['Test Case', 'Host 1', 'Host 2', 'Result'])
+        # Do iperf3 measurement #1
+        logger.info("Doing 'VM to VM in same tenant on same node via Private "
+                    "IP, 1 thread' measurement...")
+        result1 = transport1.exec_command(
+            'iperf3 -c {} -t {} | grep sender | tail -n 1'.format(
+                vm_info[1]['private_address'], iperf_time))
+        res1 = (b" ".join(result1.split()[-4:-2:])).decode('utf-8')
+        logger.info("Result #1 is {}".format(res1))
+        table_rows.append(['VM to VM in same tenant on same node via '
+                           'Private IP, 1 thread',
+                           "{}".format(pair[0]),
+                           "{}".format(pair[0]),
+                           "{}".format(res1)])
+
+        # Do iperf3 measurement #2
+        logger.info("Doing 'VM to VM in same tenant on different HW nodes "
+                    "via Private IP, 1 thread' measurement...")
+        result2 = transport1.exec_command(
+            'iperf3 -c {} -t {} | grep sender | tail -n 1'.format(
+                vm_info[2]['private_address'], iperf_time))
+        res2 = (b" ".join(result2.split()[-4:-2:])).decode('utf-8')
+        logger.info("Result #2 is {}".format(res2))
+        table_rows.append(['VM to VM in same tenant on different HW nodes '
+                           'via Private IP, 1 thread',
+                           "{}".format(pair[0]),
+                           "{}".format(pair[1]),
+                           "{}".format(res2)])
+
+        # Do iperf3 measurement #3
+        logger.info("Doing 'VM to VM in same tenant on different HW nodes "
+                    "via Private IP, 10 threads' measurement...")
+        result3 = transport1.exec_command(
+            'iperf3 -c {} -P 10 -t {} | grep sender | tail -n 1'.format(
+                vm_info[2]['private_address'], iperf_time))
+        res3 = (b" ".join(result3.split()[-4:-2:])).decode('utf-8')
+        logger.info("Result #3 is {}".format(res3))
+        table_rows.append(['VM to VM in same tenant on different HW nodes '
+                           'via Private IP, 10 threads',
+                           "{}".format(pair[0]),
+                           "{}".format(pair[1]),
+                           "{}".format(res3)])
+
+        # Do iperf3 measurement #4
+        logger.info("Doing 'VM to VM in same tenant via Floating IP and VMs "
+                    "are on different nodes, 1 thread' measurement...")
+        result4 = transport1.exec_command(
+            'iperf3 -c {} -t {} | grep sender | tail -n 1'.format(
+                vm_info[2]['fip'], iperf_time))
+        res4 = (b" ".join(result4.split()[-4:-2:])).decode('utf-8')
+        logger.info("Result #4 is {}".format(res4))
+        table_rows.append(['VM to VM in same tenant via Floating IP and VMs '
+                           'are on different nodes, 1 thread',
+                           "{}".format(pair[0]),
+                           "{}".format(pair[1]),
+                           "{}".format(res4)])
+
+        # Do iperf3 measurement #5
+        logger.info("Doing 'VM to VM in same tenant, different HW nodes and "
+                    "each VM is connected to separate network which are "
+                    " connected using Router via Private IP, 1 thread' "
+                    "measurement...")
+        result5 = transport1.exec_command(
+            'iperf3 -c {} -t {} | grep sender | tail -n 1'.format(
+                vm_info[3]['private_address'], iperf_time))
+        res5 = (b" ".join(result5.split()[-4:-2:])).decode('utf-8')
+        logger.info("Result #5 is {}".format(res5))
+        table_rows.append(['VM to VM in same tenant, different HW nodes and '
+                           'each VM is connected to separate network which are'
+                           ' connected using Router via Private IP, 1 thread',
+                           "{}".format(pair[0]),
+                           "{}".format(pair[1]),
+                           "{}".format(res5)])
+
+        logger.info("Drawing the table with iperf results...")
+        result_table.add_rows(table_rows)
+        print((result_table.draw()))
+
+        print("Removing VMs and FIPs...")
+        logger.info("Removing VMs and FIPs...")
+        for vm in vms:
+            openstack_clients.compute.servers.delete(vm)
+        print("Removing FIPs...")
+        for fip in fips:
+            openstack_clients.compute.floating_ips.delete(fip)
+    except Exception as e:
+        print(e)
+        print("Something went wrong")
+        if 'vms' in locals():
+            logger.info("Removing VMs...")
+            for vm in vms:
+                openstack_clients.compute.servers.delete(vm)
+            if 'fips' in locals():
+                logger.info("Removing FIPs...")
+                for fip in fips:
+                    openstack_clients.compute.floating_ips.delete(fip)
+        else:
+            print("Skipping cleaning, VMs were not created")
+        pytest.fail("Something went wrong")