Common Dockerfile for CVP-Sanity and CVP-SPT

Related-Task: #PROD-26312(PROD:26312)

Change-Id: I457a8d5c6ff73d944518f6b0c2c568f8286728a9
diff --git a/test_set/cvp_spt/README.md b/test_set/cvp_spt/README.md
new file mode 100644
index 0000000..00a200a
--- /dev/null
+++ b/test_set/cvp_spt/README.md
@@ -0,0 +1,5 @@
+# cvp-spt
+Environment variables
+--
+* Set *keystone_api_version* to required keystone API version (like keystone_api_version=2)
+Or it will be set to '3' otherwise
diff --git a/test_set/cvp_spt/__init__.py b/test_set/cvp_spt/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/test_set/cvp_spt/__init__.py
diff --git a/test_set/cvp_spt/fixtures/__init__.py b/test_set/cvp_spt/fixtures/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/test_set/cvp_spt/fixtures/__init__.py
diff --git a/test_set/cvp_spt/fixtures/base.py b/test_set/cvp_spt/fixtures/base.py
new file mode 100644
index 0000000..b4c07ac
--- /dev/null
+++ b/test_set/cvp_spt/fixtures/base.py
@@ -0,0 +1,99 @@
+import pytest
+import cvp_spt.utils as utils
+import random
+import time
+from cvp_spt.utils import os_client
+
+@pytest.fixture(scope='session')
+def local_salt_client():
+    return utils.init_salt_client()
+
+
+# TODO: fix
+# should not be executed on any test run
+nodes = utils.get_pairs()
+hw_nodes = utils.get_hw_pairs()
+
+
+@pytest.fixture(scope='session', params=nodes.values(), ids=nodes.keys())
+def pair(request):
+    return request.param
+
+
+@pytest.fixture(scope='session', params=hw_nodes.values(), ids=hw_nodes.keys())
+def hw_pair(request):
+    return request.param
+
+
+@pytest.fixture(scope='session')
+def openstack_clients(local_salt_client):
+    nodes_info = local_salt_client.cmd(
+        'keystone:server', 'pillar.get',
+        ['keystone:server'],
+        expr_form='pillar')
+    if nodes_info.__len__() < 1:
+        pytest.skip("No keystone server found")
+        return False
+    keystone = nodes_info[nodes_info.keys()[0]]
+    url = 'http://{ip}:{port}/'.format(ip=keystone['bind']['public_address'],
+                                       port=keystone['bind']['public_port'])
+    return os_client.OfficialClientManager(
+        username=keystone['admin_name'],
+        password=keystone['admin_password'],
+        tenant_name=keystone['admin_tenant'],
+        auth_url=url,
+        cert=False,
+        domain='Default',
+        )
+
+
+@pytest.fixture(scope='session')
+def os_resources(openstack_clients):
+    os_actions = os_client.OSCliActions(openstack_clients)
+    os_resource = {}
+    config = utils.get_configuration()
+    image_name = config.get('image_name') or ['Ubuntu']
+
+    os_images_list = [image.id for image in openstack_clients.image.images.list(filters={'name': image_name})]
+    if os_images_list.__len__() == 0:
+        print "No images with name {}. This name can be redefined with 'image_name' env var ".format(image_name)
+        exit()
+
+    os_resource['image_id'] = str(os_images_list[0])
+
+    os_resource['flavor_id'] = [flavor.id for flavor in openstack_clients.compute.flavors.list() if flavor.name == 'spt-test']
+    if not os_resource['flavor_id']:
+        os_resource['flavor_id'] = os_actions.create_flavor('spt-test', 1536, 1, 3).id
+    else:
+        os_resource['flavor_id'] = str(os_resource['flavor_id'][0])
+
+    os_resource['sec_group'] = os_actions.create_sec_group()
+    os_resource['keypair'] = openstack_clients.compute.keypairs.create('spt-test-{}'.format(random.randrange(100, 999)))
+    os_resource['net1'] = os_actions.create_network_resources()
+    os_resource['ext_net'] = os_actions.get_external_network()
+    adm_tenant = os_actions.get_admin_tenant()
+    os_resource['router'] = os_actions.create_router(os_resource['ext_net'], adm_tenant.id)
+    os_resource['net2'] = os_actions.create_network(adm_tenant.id)
+    os_resource['subnet2'] = os_actions.create_subnet(os_resource['net2'], adm_tenant, '10.2.7.0/24')
+    for subnet in openstack_clients.network.list_subnets()['subnets']:
+        if subnet['network_id'] == os_resource['net1']['id']:
+            os_resource['subnet1'] = subnet['id']
+
+    openstack_clients.network.add_interface_router(os_resource['router']['id'], {'subnet_id': os_resource['subnet1']})
+    openstack_clients.network.add_interface_router(os_resource['router']['id'], {'subnet_id': os_resource['subnet2']['id']})
+    yield os_resource
+    # time.sleep(5)
+    openstack_clients.network.remove_interface_router(os_resource['router']['id'], {'subnet_id': os_resource['subnet1']})
+    openstack_clients.network.remove_interface_router(os_resource['router']['id'], {'subnet_id': os_resource['subnet2']['id']})
+    openstack_clients.network.remove_gateway_router(os_resource['router']['id'])
+    time.sleep(5)
+    openstack_clients.network.delete_router(os_resource['router']['id'])
+    time.sleep(5)
+    # openstack_clients.network.delete_subnet(subnet1['id'])
+    openstack_clients.network.delete_network(os_resource['net1']['id'])
+    openstack_clients.network.delete_network(os_resource['net2']['id'])
+
+    openstack_clients.compute.security_groups.delete(os_resource['sec_group'].id)
+    openstack_clients.compute.keypairs.delete(os_resource['keypair'].name)
+
+    openstack_clients.compute.flavors.delete(os_resource['flavor_id'])
diff --git a/test_set/cvp_spt/global_config.yaml b/test_set/cvp_spt/global_config.yaml
new file mode 100644
index 0000000..51af102
--- /dev/null
+++ b/test_set/cvp_spt/global_config.yaml
@@ -0,0 +1,29 @@
+---
+# MANDATORY: Credentials for Salt Master
+# SALT_URL should consist of url and port.
+# For example: http://10.0.0.1:6969
+# 6969 - default Salt Master port to listen
+# Can be found on cfg* node using
+# "salt-call pillar.get _param:salt_master_host"
+# and "salt-call pillar.get _param:salt_master_port"
+# or "salt-call pillar.get _param:jenkins_salt_api_url"
+# SALT_USERNAME by default: salt
+# It can be verified with "salt-call shadow.info salt"
+# SALT_PASSWORD you can find on cfg* node using
+# "salt-call pillar.get _param:salt_api_password"
+# or "grep -r salt_api_password /srv/salt/reclass/classes"
+SALT_URL: <URL>
+SALT_USERNAME: <USERNAME>
+SALT_PASSWORD: <PASSWORD>
+
+# How many seconds to wait for salt-minion to respond
+salt_timeout: 1
+
+image_name: "Ubuntu"
+skipped_nodes: []
+# example for Jenkins: networks=net1,net2
+networks: "10.101.0.0/24"
+HW_NODES: []
+CMP_HOSTS: []
+nova_timeout: 30
+iperf_prep_string: "sudo /bin/bash -c 'echo \"91.189.88.161        archive.ubuntu.com\" >> /etc/hosts'"
diff --git a/test_set/cvp_spt/requirements.txt b/test_set/cvp_spt/requirements.txt
new file mode 100644
index 0000000..55011ce
--- /dev/null
+++ b/test_set/cvp_spt/requirements.txt
@@ -0,0 +1,10 @@
+paramiko==2.0.0 # LGPLv2.1+
+pytest>=3.0.4  # MIT
+python-cinderclient>=1.6.0,!=1.7.0,!=1.7.1  # Apache-2.0
+python-glanceclient>=2.5.0  # Apache-2.0
+python-keystoneclient>=3.8.0  # Apache-2.0
+python-neutronclient>=5.1.0  # Apache-2.0
+python-novaclient==7.1.0
+PyYAML>=3.12  # MIT
+requests>=2.10.0,!=2.12.2 # Apache-2.0
+texttable==1.2.0
diff --git a/test_set/cvp_spt/tests/__init__.py b/test_set/cvp_spt/tests/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/test_set/cvp_spt/tests/__init__.py
diff --git a/test_set/cvp_spt/tests/conftest.py b/test_set/cvp_spt/tests/conftest.py
new file mode 100644
index 0000000..a0636ac
--- /dev/null
+++ b/test_set/cvp_spt/tests/conftest.py
@@ -0,0 +1 @@
+from cvp_spt.fixtures.base import *
diff --git a/test_set/cvp_spt/tests/test_glance.py b/test_set/cvp_spt/tests/test_glance.py
new file mode 100644
index 0000000..fd6681f
--- /dev/null
+++ b/test_set/cvp_spt/tests/test_glance.py
@@ -0,0 +1,54 @@
+import pytest
+import time
+import subprocess
+import cvp_spt.utils as utils
+
+
+@pytest.fixture
+def create_image():
+    line = 'echo "Executing dd on $(hostname -f)"; ' \
+           'dd if=/dev/zero of=/tmp/image_mk_framework.dd bs=1M count=9000 ;' \
+           'echo "Free space :" ; ' \
+           'df -H / '
+
+    subprocess.call(line.split())
+    yield
+    # teardown
+    subprocess.call('rm /tmp/image_mk_framework.dd'.split())
+    subprocess.call('rm /tmp/image_mk_framework.download'.split())
+
+
+def test_speed_glance(create_image, openstack_clients, record_property):
+    """
+    Simplified Performance Tests Download / upload lance
+    1. Step download image
+    2. Step upload image
+    """
+    image = openstack_clients.image.images.create(
+        name="test_image",
+        disk_format='iso',
+        container_format='bare')
+
+    start_time = time.time()
+    openstack_clients.image.images.upload(
+        image.id,
+        image_data=open("/tmp/image_mk_framework.dd", 'rb'))
+    end_time = time.time()
+
+    speed_upload = 9000 / (end_time - start_time)
+
+    start_time = time.time()
+    with open("/tmp/image_mk_framework.download", 'wb') as image_file:
+        for item in openstack_clients.image.images.data(image.id):
+            image_file.write(item)
+    end_time = time.time()
+
+    speed_download = 9000 / (end_time - start_time)
+
+    openstack_clients.image.images.delete(image.id)
+    record_property("Upload", speed_upload)
+    record_property("Download", speed_download)
+
+    print "++++++++++++++++++++++++++++++++++++++++"
+    print 'upload - {} Mb/s'.format(speed_upload)
+    print 'download - {} Mb/s'.format(speed_download)
diff --git a/test_set/cvp_spt/tests/test_hw2hw.py b/test_set/cvp_spt/tests/test_hw2hw.py
new file mode 100644
index 0000000..629be42
--- /dev/null
+++ b/test_set/cvp_spt/tests/test_hw2hw.py
@@ -0,0 +1,52 @@
+#!/usr/bin/env python
+import itertools
+import re
+import os
+import yaml
+import requests
+from cvp_spt import utils
+from cvp_spt.utils import helpers
+from netaddr import IPNetwork, IPAddress
+
+
+def test_hw2hw (local_salt_client,hw_pair,record_property):
+    helpp = helpers.helpers(local_salt_client)
+    config = utils.get_configuration()
+    nodes = local_salt_client.cmd(expr_form='compound', tgt=str(hw_pair[0]+' or '+hw_pair[1]),
+                                  fun='network.interfaces')
+    short_name = []
+    short_name.append(hw_pair[0].split('.')[0])
+    short_name.append(hw_pair[1].split('.')[0])
+    nets = config.get('networks').split(',')
+    local_salt_client.cmd(expr_form='compound', tgt=str(hw_pair[0]+' or '+hw_pair[1]),
+                          fun='cmd.run', param=['nohup iperf -s > file 2>&1 &'])
+    global_results = []
+    for net in nets:
+        for interf in nodes[hw_pair[0]]:
+            if 'inet' not in nodes[hw_pair[0]][interf].keys():
+                continue
+            ip = nodes[hw_pair[0]][interf]['inet'][0]['address']
+            if (IPAddress(ip) in IPNetwork(net)) and (nodes[hw_pair[0]][interf]['inet'][0]['broadcast']):
+                for interf2 in nodes[hw_pair[1]]:
+                    if 'inet' not in nodes[hw_pair[1]][interf2].keys():
+                        continue
+                    ip2 = nodes[hw_pair[1]][interf2]['inet'][0]['address']
+                    if (IPAddress(ip2) in IPNetwork(net)) and (nodes[hw_pair[1]][interf2]['inet'][0]['broadcast']):
+                        print "Will IPERF between {0} and {1}".format(ip,ip2)
+                        try:
+                            res = helpp.start_iperf_between_hosts(global_results, hw_pair[0], hw_pair[1],
+                                                                      ip, ip2, net)
+                            record_property("1-worst {0}-{1}".format(short_name[0],short_name[1]), res[0] if res[0] < res[2] else res[2])
+                            record_property("1-best {0}-{1}".format(short_name[0],short_name[1]), res[0] if res[0] > res[2] else res[2])
+                            record_property("10-best {0}-{1}".format(short_name[0],short_name[1]), res[1] if res[1] > res[3] else res[3])
+                            record_property("10-best {0}-{1}".format(short_name[0],short_name[1]), res[1] if res[1] > res[3] else res[3])
+                            print "Measurement between {} and {} " \
+                                   "has been finished".format(hw_pair[0],
+                                                              hw_pair[1])
+                        except Exception as e:
+                                print "Failed for {0} {1}".format(
+                                              hw_pair[0], hw_pair[1])
+                                print e
+    local_salt_client.cmd(expr_form='compound', tgt=str(hw_pair[0]+' or '+hw_pair[1]),
+                          fun='cmd.run', param=['killall -9 iperf'])
+    helpp.draw_table_with_results(global_results)
diff --git a/test_set/cvp_spt/tests/test_vm2vm.py b/test_set/cvp_spt/tests/test_vm2vm.py
new file mode 100644
index 0000000..9e1d5d7
--- /dev/null
+++ b/test_set/cvp_spt/tests/test_vm2vm.py
@@ -0,0 +1,103 @@
+import os
+import random
+import time
+import pytest
+from cvp_spt import utils
+from cvp_spt.utils import os_client
+from cvp_spt.utils import ssh
+
+
+def test_vm2vm (openstack_clients, pair, os_resources, record_property):
+    os_actions = os_client.OSCliActions(openstack_clients)
+    config = utils.get_configuration()
+    timeout = int(config.get('nova_timeout', 30))
+    try:
+        zone1 = [service.zone for service in openstack_clients.compute.services.list() if service.host == pair[0]]
+        zone2 = [service.zone for service in openstack_clients.compute.services.list() if service.host == pair[1]]
+        vm1 = os_actions.create_basic_server(os_resources['image_id'],
+                                             os_resources['flavor_id'],
+                                             os_resources['net1'],
+                                             '{0}:{1}'.format(zone1[0],pair[0]),
+                                             [os_resources['sec_group'].name],
+                                             os_resources['keypair'].name)
+
+        vm2 = os_actions.create_basic_server(os_resources['image_id'],
+                                             os_resources['flavor_id'],
+                                             os_resources['net1'],
+                                             '{0}:{1}'.format(zone1[0],pair[0]),
+                                             [os_resources['sec_group'].name],
+                                             os_resources['keypair'].name)
+
+        vm3 = os_actions.create_basic_server(os_resources['image_id'],
+                                             os_resources['flavor_id'],
+                                             os_resources['net1'],
+                                             '{0}:{1}'.format(zone2[0],pair[1]),
+                                             [os_resources['sec_group'].name],
+                                             os_resources['keypair'].name)
+
+        vm4 = os_actions.create_basic_server(os_resources['image_id'],
+                                             os_resources['flavor_id'],
+                                             os_resources['net2'],
+                                             '{0}:{1}'.format(zone2[0],pair[1]),
+                                             [os_resources['sec_group'].name],
+                                             os_resources['keypair'].name)
+
+        vm_info = []
+        vms = []
+        vms.extend([vm1,vm2,vm3,vm4])
+        fips = []
+        time.sleep(5)
+        for i in range(4):
+            fip = openstack_clients.compute.floating_ips.create(os_resources['ext_net']['name'])
+            fips.append(fip.id)
+            status = openstack_clients.compute.servers.get(vms[i]).status
+            if status != 'ACTIVE':
+                print "VM #{0} {1} is not ready. Status {2}".format(i,vms[i].id,status)
+                time.sleep(timeout)
+                status = openstack_clients.compute.servers.get(vms[i]).status
+            if status != 'ACTIVE':
+                raise Exception('VM is not ready')
+            vms[i].add_floating_ip(fip)
+            private_address = vms[i].addresses[vms[i].addresses.keys()[0]][0]['addr']
+            time.sleep(5)
+            try:
+                ssh.prepare_iperf(fip.ip,private_key=os_resources['keypair'].private_key)
+            except Exception as e:
+                print e
+                print "ssh.prepare_iperf was not successful, retry after {} sec".format(timeout)
+                time.sleep(timeout)
+                ssh.prepare_iperf(fip.ip,private_key=os_resources['keypair'].private_key)
+            vm_info.append({'vm': vms[i], 'fip': fip.ip, 'private_address': private_address})   
+        
+        transport1 = ssh.SSHTransport(vm_info[0]['fip'], 'ubuntu', password='dd', private_key=os_resources['keypair'].private_key)
+
+        result1 = transport1.exec_command('iperf -c {} | tail -n 1'.format(vm_info[1]['private_address']))
+        print ' '.join(result1.split()[-2::])
+        record_property("same {0}-{1}".format(zone1[0],zone2[0]), ' '.join(result1.split()[-2::]))
+        result2 = transport1.exec_command('iperf -c {} | tail -n 1'.format(vm_info[2]['private_address']))
+        print ' '.join(result2.split()[-2::])
+        record_property("diff host {0}-{1}".format(zone1[0],zone2[0]), ' '.join(result2.split()[-2::]))
+        result3 = transport1.exec_command('iperf -c {} -P 10 | tail -n 1'.format(vm_info[2]['private_address']))
+        print ' '.join(result3.split()[-2::])
+        record_property("dif host 10 threads {0}-{1}".format(zone1[0],zone2[0]), ' '.join(result3.split()[-2::]))
+        result4 = transport1.exec_command('iperf -c {} | tail -n 1'.format(vm_info[2]['fip']))
+        print ' '.join(result4.split()[-2::])
+        record_property("diff host fip {0}-{1}".format(zone1[0],zone2[0]), ' '.join(result4.split()[-2::]))
+        result5 = transport1.exec_command('iperf -c {} | tail -n 1'.format(vm_info[3]['private_address']))
+        print ' '.join(result5.split()[-2::])
+        record_property("diff host, diff net {0}-{1}".format(zone1[0],zone2[0]), ' '.join(result5.split()[-2::]))
+
+        print "Remove VMs"
+        for vm in vms:
+            openstack_clients.compute.servers.delete(vm)
+        print "Remove FIPs"
+        for fip in fips:
+            openstack_clients.compute.floating_ips.delete(fip)
+    except Exception as e:
+        print e
+        print "Something went wrong"
+        for vm in vms:
+            openstack_clients.compute.servers.delete(vm)
+        for fip in fips:
+            openstack_clients.compute.floating_ips.delete(fip)
+        pytest.fail("Something went wrong")
diff --git a/test_set/cvp_spt/utils/__init__.py b/test_set/cvp_spt/utils/__init__.py
new file mode 100644
index 0000000..c53dd69
--- /dev/null
+++ b/test_set/cvp_spt/utils/__init__.py
@@ -0,0 +1,111 @@
+import os
+import yaml
+import requests
+import re
+import sys, traceback
+import itertools
+import helpers
+from cvp_spt.utils import os_client
+
+
+class salt_remote:
+    def cmd(self, tgt, fun, param=None, expr_form=None, tgt_type=None):
+        config = get_configuration()
+        url = config['SALT_URL']
+        proxies = {"http": None, "https": None}
+        headers = {'Accept': 'application/json'}
+        login_payload = {'username': config['SALT_USERNAME'],
+                         'password': config['SALT_PASSWORD'], 'eauth': 'pam'}
+        accept_key_payload = {'fun': fun, 'tgt': tgt, 'client': 'local',
+                              'expr_form': expr_form, 'tgt_type': tgt_type,
+                              'timeout': config['salt_timeout']}
+        if param:
+            accept_key_payload['arg'] = param
+
+        try:
+            login_request = requests.post(os.path.join(url, 'login'),
+                                          headers=headers, data=login_payload,
+                                          proxies=proxies)
+            if login_request.ok:
+                request = requests.post(url, headers=headers,
+                                        data=accept_key_payload,
+                                        cookies=login_request.cookies,
+                                        proxies=proxies)
+                return request.json()['return'][0]
+        except Exception:
+            print "\033[91m\nConnection to SaltMaster " \
+                  "was not established.\n" \
+                  "Please make sure that you " \
+                  "provided correct credentials.\033[0m\n"
+            traceback.print_exc(file=sys.stdout)
+            sys.exit()
+
+
+def init_salt_client():
+    local = salt_remote()
+    return local
+
+
+def compile_pairs (nodes):
+    result = {}
+    if len(nodes) %2 != 0:
+        nodes.pop(1)
+    pairs = zip(*[iter(nodes)]*2)
+    for pair in pairs:
+        result[pair[0]+'<>'+pair[1]] = pair
+    return result
+
+
+def get_pairs():
+    # TODO
+    # maybe collect cmp from nova service-list
+    config = get_configuration()
+    local_salt_client = init_salt_client()
+    cmp_hosts = config.get('CMP_HOSTS') or []
+    skipped_nodes = config.get('skipped_nodes') or []
+    if skipped_nodes:
+        print "Notice: {0} nodes will be skipped for vm2vm test".format(skipped_nodes)
+    if not cmp_hosts:
+        nodes = local_salt_client.cmd(
+                'I@nova:compute',
+                'test.ping',
+                expr_form='compound')
+        cmp_hosts = [node.split('.')[0] for node in nodes.keys() if node not in skipped_nodes]
+    return compile_pairs(cmp_hosts)
+
+
+def get_hw_pairs():
+    config = get_configuration()
+    local_salt_client = init_salt_client()
+    hw_nodes = config.get('HW_NODES') or []
+    skipped_nodes = config.get('skipped_nodes') or []
+    if skipped_nodes:
+        print "Notice: {0} nodes will be skipped for hw2hw test".format(skipped_nodes)
+    if not hw_nodes:
+        nodes = local_salt_client.cmd(
+                'I@salt:control or I@nova:compute',
+                'test.ping',
+                expr_form='compound')
+        hw_nodes = [node for node in nodes.keys() if node not in skipped_nodes]
+    print local_salt_client.cmd(expr_form='compound', tgt="L@"+','.join(hw_nodes),
+                                fun='pkg.install', param=['iperf'])
+    return compile_pairs(hw_nodes)
+
+def get_configuration():
+    """function returns configuration for environment
+    and for test if it's specified"""
+
+    global_config_file = os.path.join(
+        os.path.dirname(os.path.abspath(__file__)), "../global_config.yaml")
+    with open(global_config_file, 'r') as file:
+        global_config = yaml.load(file)
+    for param in global_config.keys():
+        if param in os.environ.keys():
+            if ',' in os.environ[param]:
+                global_config[param] = []
+                for item in os.environ[param].split(','):
+                    global_config[param].append(item)
+            else:
+                global_config[param] = os.environ[param]
+
+    return global_config
diff --git a/test_set/cvp_spt/utils/helpers.py b/test_set/cvp_spt/utils/helpers.py
new file mode 100644
index 0000000..97a3bae
--- /dev/null
+++ b/test_set/cvp_spt/utils/helpers.py
@@ -0,0 +1,79 @@
+import texttable as tt
+
+class helpers(object):
+    def __init__ (self, local_salt_client):
+        self.local_salt_client = local_salt_client
+
+    def start_iperf_between_hosts(self, global_results, node_i, node_j, ip_i, ip_j, net_name):
+        result = []
+        direct_raw_results = self.start_iperf_client(node_i, ip_j)
+        result.append(direct_raw_results)
+        print "1 forward"
+        forward = "1 thread:\n"
+        forward += direct_raw_results + " Gbits/sec"
+
+        direct_raw_results = self.start_iperf_client(node_i, ip_j, 10)
+        result.append(direct_raw_results)
+        print "10 forward"
+        forward += "\n\n10 thread:\n"
+        forward += direct_raw_results + " Gbits/sec"
+
+        reverse_raw_results = self.start_iperf_client(node_j, ip_i)
+        result.append(reverse_raw_results)
+        print "1 backward"
+        backward = "1 thread:\n"
+        backward += reverse_raw_results + " Gbits/sec"
+
+        reverse_raw_results = self.start_iperf_client(node_j, ip_i, 10)
+        result.append(reverse_raw_results)
+        print "10 backward"
+        backward += "\n\n10 thread:\n"
+        backward += reverse_raw_results + " Gbits/sec"
+        global_results.append([node_i, node_j,
+                               net_name, forward, backward])
+
+        self.kill_iperf_processes(node_i)
+        self.kill_iperf_processes(node_j)
+        return result
+
+    def draw_table_with_results(self, global_results):
+        tab = tt.Texttable()
+        header = [
+            'node name 1',
+            'node name 2',
+            'network',
+            'bandwidth >',
+            'bandwidth <',
+        ]
+        tab.set_cols_align(['l', 'l', 'l', 'l', 'l'])
+        tab.set_cols_width([27, 27, 15, 20, '20'])
+        tab.header(header)
+        for row in global_results:
+            tab.add_row(row)
+        s = tab.draw()
+        print s
+
+    def start_iperf_client(self, minion_name, target_ip, thread_count=None):
+        iperf_command = 'timeout --kill-after=20 19 iperf -c {0}'.format(target_ip)
+        if thread_count:
+            iperf_command += ' -P {0}'.format(thread_count)
+        output = self.local_salt_client.cmd(tgt=minion_name,
+                                            fun='cmd.run',
+                                            param=[iperf_command])
+        # self.kill_iperf_processes(minion_name)
+        try:
+            result = output.values()[0].split('\n')[-1].split(' ')[-2:]
+            if result[1] == 'Mbits/sec':
+                return str(float(result[0])*0.001)
+            if result[1] != 'Gbits/sec':
+                return "0"
+            return result[0]
+        except:
+            print "No iperf result between {} and {} (maybe they don't have connectivity)".format(minion_name, target_ip)
+
+
+    def kill_iperf_processes(self, minion_name):
+        kill_command = "for pid in $(pgrep  iperf); do kill $pid; done"
+        output = self.local_salt_client.cmd(tgt=minion_name,
+                                            fun='cmd.run',
+                                            param=[kill_command])
diff --git a/test_set/cvp_spt/utils/os_client.py b/test_set/cvp_spt/utils/os_client.py
new file mode 100644
index 0000000..c17617f
--- /dev/null
+++ b/test_set/cvp_spt/utils/os_client.py
@@ -0,0 +1,396 @@
+from cinderclient import client as cinder_client
+from glanceclient import client as glance_client
+from keystoneauth1 import identity as keystone_identity
+from keystoneauth1 import session as keystone_session
+from keystoneclient.v3 import client as keystone_client
+from neutronclient.v2_0 import client as neutron_client
+from novaclient import client as novaclient
+
+import os
+import random
+import time
+
+
+class OfficialClientManager(object):
+    """Manager that provides access to the official python clients for
+    calling various OpenStack APIs.
+    """
+
+    CINDERCLIENT_VERSION = 3
+    GLANCECLIENT_VERSION = 2
+    KEYSTONECLIENT_VERSION = 3
+    NEUTRONCLIENT_VERSION = 2
+    NOVACLIENT_VERSION = 2
+    INTERFACE = 'admin'
+    if "OS_ENDPOINT_TYPE" in os.environ.keys():
+        INTERFACE = os.environ["OS_ENDPOINT_TYPE"]
+
+    def __init__(self, username=None, password=None,
+                 tenant_name=None, auth_url=None, endpoint_type="internalURL",
+                 cert=False, domain="Default", **kwargs):
+        self.traceback = ""
+
+        self.client_attr_names = [
+            "auth",
+            "compute",
+            "network",
+            "volume",
+            "image",
+        ]
+        self.username = username
+        self.password = password
+        self.tenant_name = tenant_name
+        self.project_name = tenant_name
+        self.auth_url = auth_url
+        self.endpoint_type = endpoint_type
+        self.cert = cert
+        self.domain = domain
+        self.kwargs = kwargs
+
+        # Lazy clients
+        self._auth = None
+        self._compute = None
+        self._network = None
+        self._volume = None
+        self._image = None
+
+    @classmethod
+    def _get_auth_session(cls, username=None, password=None,
+                          tenant_name=None, auth_url=None, cert=None,
+                          domain='Default'):
+        if None in (username, password, tenant_name):
+            print(username, password, tenant_name)
+            msg = ("Missing required credentials for identity client. "
+                   "username: {username}, password: {password}, "
+                   "tenant_name: {tenant_name}").format(
+                       username=username,
+                       password=password,
+                       tenant_name=tenant_name, )
+            raise msg
+
+        if cert and "https" not in auth_url:
+            auth_url = auth_url.replace("http", "https")
+
+        if cls.KEYSTONECLIENT_VERSION == (2, 0):
+            # auth_url = "{}{}".format(auth_url, "v2.0/")
+            auth = keystone_identity.v2.Password(
+                username=username,
+                password=password,
+                auth_url=auth_url,
+                tenant_name=tenant_name)
+        else:
+            auth_url = "{}{}".format(auth_url, "/v3")
+            auth = keystone_identity.v3.Password(
+                auth_url=auth_url,
+                user_domain_name=domain,
+                username=username,
+                password=password,
+                project_domain_name=domain,
+                project_name=tenant_name)
+
+        auth_session = keystone_session.Session(auth=auth, verify=cert)
+        # auth_session.get_auth_headers()
+        return auth_session
+
+    @classmethod
+    def get_auth_client(cls, username=None, password=None,
+                        tenant_name=None, auth_url=None, cert=None,
+                        domain='Default', **kwargs):
+        session = cls._get_auth_session(
+            username=username,
+            password=password,
+            tenant_name=tenant_name,
+            auth_url=auth_url,
+            cert=cert,
+            domain=domain)
+        keystone = keystone_client.Client(version=cls.KEYSTONECLIENT_VERSION,
+                                          session=session, **kwargs)
+        keystone.management_url = auth_url
+        return keystone
+
+    @classmethod
+    def get_compute_client(cls, username=None, password=None,
+                           tenant_name=None, auth_url=None, cert=None,
+                           domain='Default', **kwargs):
+        session = cls._get_auth_session(
+            username=username, password=password, tenant_name=tenant_name,
+            auth_url=auth_url, cert=cert, domain=domain)
+        service_type = 'compute'
+        compute_client = novaclient.Client(
+            version=cls.NOVACLIENT_VERSION, session=session,
+            service_type=service_type, os_cache=False, **kwargs)
+        return compute_client
+
+    @classmethod
+    def get_network_client(cls, username=None, password=None,
+                           tenant_name=None, auth_url=None, cert=None,
+                           domain='Default', **kwargs):
+        session = cls._get_auth_session(
+            username=username, password=password, tenant_name=tenant_name,
+            auth_url=auth_url, cert=cert, domain=domain)
+        service_type = 'network'
+        return neutron_client.Client(
+            service_type=service_type, session=session, interface=cls.INTERFACE, **kwargs)
+
+    @classmethod
+    def get_volume_client(cls, username=None, password=None,
+                          tenant_name=None, auth_url=None, cert=None,
+                          domain='Default', **kwargs):
+        session = cls._get_auth_session(
+            username=username, password=password, tenant_name=tenant_name,
+            auth_url=auth_url, cert=cert, domain=domain)
+        service_type = 'volume'
+        return cinder_client.Client(
+            version=cls.CINDERCLIENT_VERSION,
+            service_type=service_type,
+            interface=cls.INTERFACE,
+            session=session, **kwargs)
+
+    @classmethod
+    def get_image_client(cls, username=None, password=None,
+                         tenant_name=None, auth_url=None, cert=None,
+                         domain='Default', **kwargs):
+        session = cls._get_auth_session(
+            username=username, password=password, tenant_name=tenant_name,
+            auth_url=auth_url, cert=cert, domain=domain)
+        service_type = 'image'
+        return glance_client.Client(
+            version=cls.GLANCECLIENT_VERSION,
+            service_type=service_type,
+            session=session, interface=cls.INTERFACE,
+            **kwargs)
+
+    @property
+    def auth(self):
+        if self._auth is None:
+            self._auth = self.get_auth_client(
+                self.username, self.password, self.tenant_name, self.auth_url,
+                self.cert, self.domain, endpoint_type=self.endpoint_type
+            )
+        return self._auth
+
+    @property
+    def compute(self):
+        if self._compute is None:
+            self._compute = self.get_compute_client(
+                self.username, self.password, self.tenant_name, self.auth_url,
+                self.cert, self.domain, endpoint_type=self.endpoint_type
+            )
+        return self._compute
+
+    @property
+    def network(self):
+        if self._network is None:
+            self._network = self.get_network_client(
+                self.username, self.password, self.tenant_name, self.auth_url,
+                self.cert, self.domain, endpoint_type=self.endpoint_type
+            )
+        return self._network
+
+    @property
+    def volume(self):
+        if self._volume is None:
+            self._volume = self.get_volume_client(
+                self.username, self.password, self.tenant_name, self.auth_url,
+                self.cert, self.domain, endpoint_type=self.endpoint_type
+            )
+        return self._volume
+
+    @property
+    def image(self):
+        if self._image is None:
+            self._image = self.get_image_client(
+                self.username, self.password, self.tenant_name, self.auth_url,
+                self.cert, self.domain
+            )
+        return self._image
+
+
+class OSCliActions(object):
+    def __init__(self, os_clients):
+        self.os_clients = os_clients
+
+    def get_admin_tenant(self):
+        # TODO Keystone v3 doesnt have tenants attribute
+        return self.os_clients.auth.projects.find(name="admin")
+
+    # TODO: refactor
+    def get_cirros_image(self):
+        images_list = list(self.os_clients.image.images.list(name='TestVM'))
+        if images_list:
+            image = images_list[0]
+        else:
+            image = self.os_clients.image.images.create(
+                name="TestVM",
+                disk_format='qcow2',
+                container_format='bare')
+            with file_cache.get_file(settings.CIRROS_QCOW2_URL) as f:
+                self.os_clients.image.images.upload(image.id, f)
+        return image
+
+    def get_internal_network(self):
+        networks = [
+            net for net in self.os_clients.network.list_networks()["networks"]
+            if net["admin_state_up"] and not net["router:external"] and
+            len(net["subnets"])
+        ]
+        if networks:
+            net = networks[0]
+        else:
+            net = self.create_network_resources()
+        return net
+
+    def get_external_network(self):
+        networks = [
+            net for net in self.os_clients.network.list_networks()["networks"]
+            if net["admin_state_up"] and net["router:external"] and
+            len(net["subnets"])
+        ]
+        if networks:
+            ext_net = networks[0]
+        else:
+            ext_net = self.create_fake_external_network()
+        return ext_net
+
+    def create_flavor(self, name, ram=256, vcpus=1, disk=2):
+        return self.os_clients.compute.flavors.create(name, ram, vcpus, disk)
+
+    def create_sec_group(self, rulesets=None):
+        if rulesets is None:
+            rulesets = [
+                {
+                    # ssh
+                    'ip_protocol': 'tcp',
+                    'from_port': 22,
+                    'to_port': 22,
+                    'cidr': '0.0.0.0/0',
+                },
+                {
+                    # iperf
+                    'ip_protocol': 'tcp',
+                    'from_port':5001,
+                    'to_port': 5001,
+                    'cidr': '0.0.0.0/0',
+                },
+                {
+                    # ping
+                    'ip_protocol': 'icmp',
+                    'from_port': -1,
+                    'to_port': -1,
+                    'cidr': '0.0.0.0/0',
+                }
+            ]
+        sg_name = "spt-test-secgroup-{}".format(random.randrange(100, 999))
+        sg_desc = sg_name + " SPT"
+        secgroup = self.os_clients.compute.security_groups.create(
+            sg_name, sg_desc)
+        for ruleset in rulesets:
+            self.os_clients.compute.security_group_rules.create(
+                secgroup.id, **ruleset)
+        return secgroup
+
+
+    def wait(predicate, interval=5, timeout=60, timeout_msg="Waiting timed out"):
+        start_time = time.time()
+        if not timeout:
+            return predicate()
+        while not predicate():
+            if start_time + timeout < time.time():
+                raise exceptions.TimeoutError(timeout_msg)
+
+            seconds_to_sleep = max(
+                0,
+                min(interval, start_time + timeout - time.time()))
+            time.sleep(seconds_to_sleep)
+
+        return timeout + start_time - time.time()
+
+    def create_basic_server(self, image=None, flavor=None, net=None,
+                            availability_zone=None, sec_groups=(),
+                            keypair=None,
+                            wait_timeout=3 * 60):
+        os_conn = self.os_clients
+        image = image or self.get_cirros_image()
+        flavor = flavor or self.get_micro_flavor()
+        net = net or self.get_internal_network()
+        kwargs = {}
+        if sec_groups:
+            kwargs['security_groups'] = sec_groups
+        server = os_conn.compute.servers.create(
+            "spt-test-server-{}".format(random.randrange(100, 999)),
+            image, flavor, nics=[{"net-id": net["id"]}],
+            availability_zone=availability_zone, key_name=keypair, **kwargs)
+        # TODO
+        #if wait_timeout:
+        #    self.wait(
+        #        lambda: os_conn.compute.servers.get(server).status == "ACTIVE",
+        #        timeout=wait_timeout,
+        #        timeout_msg=(
+        #            "Create server {!r} failed by timeout. "
+        #            "Please, take a look at OpenStack logs".format(server.id)))
+        return server
+
+    def create_network(self, tenant_id):
+        net_name = "spt-test-net-{}".format(random.randrange(100, 999))
+        net_body = {
+            'network': {
+                'name': net_name,
+                'tenant_id': tenant_id
+            }
+        }
+        net = self.os_clients.network.create_network(net_body)['network']
+        return net
+        #yield net
+        #self.os_clients.network.delete_network(net['id'])
+
+    def create_subnet(self, net, tenant_id, cidr=None):
+        subnet_name = "spt-test-subnet-{}".format(random.randrange(100, 999))
+        subnet_body = {
+            'subnet': {
+                "name": subnet_name,
+                'network_id': net['id'],
+                'ip_version': 4,
+                'cidr': cidr if cidr else '10.1.7.0/24',
+                'tenant_id': tenant_id
+            }
+        }
+        subnet = self.os_clients.network.create_subnet(subnet_body)['subnet']
+        return subnet
+        #yield subnet
+        #self.os_clients.network.delete_subnet(subnet['id'])
+
+    def create_router(self, ext_net, tenant_id):
+        name = 'spt-test-router-{}'.format(random.randrange(100, 999))
+        router_body = {
+            'router': {
+                'name': name,
+                'external_gateway_info': {
+                    'network_id': ext_net['id']
+                },
+                'tenant_id': tenant_id
+            }
+        }
+        router = self.os_clients.network.create_router(router_body)['router']
+        return router
+        #yield router
+        #self.os_clients.network.delete_router(router['id'])
+
+    def create_network_resources(self):
+        tenant_id = self.get_admin_tenant().id
+        ext_net = self.get_external_network()
+        net = self.create_network(tenant_id)
+        subnet = self.create_subnet(net, tenant_id)
+        #router = self.create_router(ext_net, tenant_id)
+        #self.os_clients.network.add_interface_router(
+        #    router['id'], {'subnet_id': subnet['id']})
+
+        private_net_id = net['id']
+        # floating_ip_pool = ext_net['id']
+
+        return net
+        #yield private_net_id, floating_ip_pool
+        #yield private_net_id
+        #
+        #self.os_clients.network.remove_interface_router(
+        #     router['id'], {'subnet_id': subnet['id']})
+        #self.os_clients.network.remove_gateway_router(router['id'])
diff --git a/test_set/cvp_spt/utils/ssh.py b/test_set/cvp_spt/utils/ssh.py
new file mode 100644
index 0000000..8e78963
--- /dev/null
+++ b/test_set/cvp_spt/utils/ssh.py
@@ -0,0 +1,140 @@
+import cStringIO
+import logging
+import select
+from cvp_spt import utils
+import paramiko
+
+
+logger = logging.getLogger(__name__)
+
+# Suppress paramiko logging
+logging.getLogger("paramiko").setLevel(logging.WARNING)
+
+
+class SSHTransport(object):
+    def __init__(self, address, username, password=None,
+                 private_key=None, look_for_keys=False, *args, **kwargs):
+
+        self.address = address
+        self.username = username
+        self.password = password
+        if private_key is not None:
+            self.private_key = paramiko.RSAKey.from_private_key(
+                cStringIO.StringIO(private_key))
+        else:
+            self.private_key = None
+
+        self.look_for_keys = look_for_keys
+        self.buf_size = 1024
+        self.channel_timeout = 10.0
+
+    def _get_ssh_connection(self):
+        ssh = paramiko.SSHClient()
+        ssh.set_missing_host_key_policy(
+            paramiko.AutoAddPolicy())
+        ssh.connect(self.address, username=self.username,
+                    password=self.password, pkey=self.private_key,
+                    timeout=self.channel_timeout)
+        logger.debug("Successfully connected to: {0}".format(self.address))
+        return ssh
+
+    def _get_sftp_connection(self):
+        transport = paramiko.Transport((self.address, 22))
+        transport.connect(username=self.username,
+                          password=self.password,
+                          pkey=self.private_key)
+
+        return paramiko.SFTPClient.from_transport(transport)
+
+    def exec_sync(self, cmd):
+        logger.debug("Executing {0} on host {1}".format(cmd, self.address))
+        ssh = self._get_ssh_connection()
+        transport = ssh.get_transport()
+        channel = transport.open_session()
+        channel.fileno()
+        channel.exec_command(cmd)
+        channel.shutdown_write()
+        out_data = []
+        err_data = []
+        poll = select.poll()
+        poll.register(channel, select.POLLIN)
+
+        while True:
+            ready = poll.poll(self.channel_timeout)
+            if not any(ready):
+                continue
+            if not ready[0]:
+                continue
+            out_chunk = err_chunk = None
+            if channel.recv_ready():
+                out_chunk = channel.recv(self.buf_size)
+                out_data += out_chunk,
+            if channel.recv_stderr_ready():
+                err_chunk = channel.recv_stderr(self.buf_size)
+                err_data += err_chunk,
+            if channel.closed and not err_chunk and not out_chunk:
+                break
+        exit_status = channel.recv_exit_status()
+        logger.debug("Command {0} executed with status: {1}"
+                     .format(cmd, exit_status))
+        return (
+            exit_status, ''.join(out_data).strip(), ''.join(err_data).strip())
+
+    def exec_command(self, cmd):
+        exit_status, stdout, stderr = self.exec_sync(cmd)
+        return stdout
+
+    def check_call(self, command, error_info=None, expected=None,
+                   raise_on_err=True):
+        """Execute command and check for return code
+        :type command: str
+        :type error_info: str
+        :type expected: list
+        :type raise_on_err: bool
+        :rtype: ExecResult
+        :raises: DevopsCalledProcessError
+        """
+        if expected is None:
+            expected = [0]
+        ret = self.exec_sync(command)
+        exit_code, stdout_str, stderr_str = ret
+        if exit_code not in expected:
+            message = (
+                "{append}Command '{cmd}' returned exit code {code} while "
+                "expected {expected}\n"
+                "\tSTDOUT:\n"
+                "{stdout}"
+                "\n\tSTDERR:\n"
+                "{stderr}".format(
+                    append=error_info + '\n' if error_info else '',
+                    cmd=command,
+                    code=exit_code,
+                    expected=expected,
+                    stdout=stdout_str,
+                    stderr=stderr_str
+                ))
+            logger.error(message)
+            if raise_on_err:
+                exit()
+        return ret
+
+    def put_file(self, source_path, destination_path):
+        sftp = self._get_sftp_connection()
+        sftp.put(source_path, destination_path)
+        sftp.close()
+
+    def get_file(self, source_path, destination_path):
+        sftp = self._get_sftp_connection()
+        sftp.get(source_path, destination_path)
+        sftp.close()
+
+
+class prepare_iperf(object):
+
+    def __init__(self,fip,user='ubuntu',password='password', private_key=None):
+        transport = SSHTransport(fip, user, password, private_key)
+        config = utils.get_configuration()
+        preparation_cmd = config.get('iperf_prep_string') or ['']
+        transport.exec_command(preparation_cmd)
+        transport.exec_command('sudo apt-get update; sudo apt-get install -y iperf')
+        transport.exec_command('nohup iperf -s > file 2>&1 &')