Common Dockerfile for CVP-Sanity and CVP-SPT

Related-Task: #PROD-26312(PROD:26312)

Change-Id: I457a8d5c6ff73d944518f6b0c2c568f8286728a9
diff --git a/test_set/cvp_sanity/README.md b/test_set/cvp_sanity/README.md
new file mode 100644
index 0000000..13acd7e
--- /dev/null
+++ b/test_set/cvp_sanity/README.md
@@ -0,0 +1,65 @@
+MCP sanity checks
+========================
+
+This is salt-based set of tests for basic verification of MCP deployments
+
+How to start
+=======================
+
+1) Clone repo to any node (node must have an access via http to salt master):
+```bash
+   # root@cfg-01:~/# git clone https://github.com/Mirantis/cvp-sanity-checks
+   # cd cvp-sanity-checks
+```
+Use git config --global http.proxy http://proxyuser:proxypwd@proxy.server.com:8080
+if needed.
+
+2) Install virtualenv
+```bash
+   # curl -O https://pypi.python.org/packages/source/v/virtualenv/virtualenv-X.X.tar.gz
+   # tar xvfz virtualenv-X.X.tar.gz
+   # cd virtualenv-X.X
+   # sudo python setup.py install
+```
+or
+1```bash
+   # apt-get install python-virtualenv
+```
+
+3) Create virtualenv and install requirements and package:
+
+```bash
+   # virtualenv --system-site-packages .venv
+   # source .venv/bin/activate
+   # pip install --proxy http://$PROXY:8678 -r requirements.txt
+   # python setup.py install
+   # python setup.py develop
+```
+
+4) Configure:
+```bash
+   # vim cvp_checks/global_config.yaml
+```
+SALT credentials are mandatory for tests.
+
+
+Other settings are optional (please keep uncommented with default values)
+
+
+Alternatively, you can specify these settings via env variables:
+```bash
+export SALT_URL=http://10.0.0.1:6969
+```
+For array-type settings please do:
+```bash
+export skipped_nodes='ctl01.example.com,ctl02.example.com'
+```
+
+5) Start tests:
+```bash
+   # pytest --tb=short -sv cvp_checks/tests/
+```
+or
+```bash
+   # pytest -sv cvp_checks/tests/ --ignore cvp_checks/tests/test_mtu.py
+```
diff --git a/test_set/cvp_sanity/__init__.py b/test_set/cvp_sanity/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/test_set/cvp_sanity/__init__.py
diff --git a/test_set/cvp_sanity/fixtures/__init__.py b/test_set/cvp_sanity/fixtures/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/test_set/cvp_sanity/fixtures/__init__.py
diff --git a/test_set/cvp_sanity/fixtures/base.py b/test_set/cvp_sanity/fixtures/base.py
new file mode 100644
index 0000000..f6ad22a
--- /dev/null
+++ b/test_set/cvp_sanity/fixtures/base.py
@@ -0,0 +1,111 @@
+import pytest
+import atexit
+import cvp_checks.utils as utils
+
+
+@pytest.fixture(scope='session')
+def local_salt_client():
+    return utils.init_salt_client()
+
+nodes = utils.calculate_groups()
+
+
+@pytest.fixture(scope='session', params=nodes.values(), ids=nodes.keys())
+def nodes_in_group(request):
+    return request.param
+
+
+@pytest.fixture(scope='session')
+def check_prometheus(local_salt_client):
+    salt_output = local_salt_client.cmd(
+        'prometheus:server',
+        'test.ping',
+        expr_form='pillar')
+    if not salt_output:
+        pytest.skip("Prometheus service or prometheus:server pillar \
+          are not found on this environment.")
+
+
+@pytest.fixture(scope='session')
+def check_kibana(local_salt_client):
+    salt_output = local_salt_client.cmd(
+        'kibana:server',
+        'test.ping',
+        expr_form='pillar')
+    if not salt_output:
+        pytest.skip("Kibana service or kibana:server pillar \
+          are not found on this environment.")
+
+
+@pytest.fixture(scope='session')
+def check_grafana(local_salt_client):
+    salt_output = local_salt_client.cmd(
+        'grafana:client',
+        'test.ping',
+        expr_form='pillar')
+    if not salt_output:
+        pytest.skip("Grafana service or grafana:client pillar \
+          are not found on this environment.")
+
+
+def pytest_namespace():
+    return {'contrail': None}
+
+
+@pytest.fixture(scope='module')
+def contrail(local_salt_client):
+    probe = local_salt_client.cmd(
+        'opencontrail:control',
+        'pillar.get',
+        'opencontrail:control:version',
+        expr_form='pillar')
+    if not probe:
+        pytest.skip("Contrail is not found on this environment")
+    versions = set(probe.values())
+    if len(versions) != 1:
+        pytest.fail('Contrail versions are not the same: {}'.format(probe))
+    pytest.contrail = str(versions.pop())[:1]
+
+
+@pytest.fixture(autouse=True, scope='session')
+def print_node_version(local_salt_client):
+    """
+        Gets info about each node using salt command, info is represented as a dictionary with :
+        {node_name1: output1, node_name2: ...}
+
+        :print to output the table with results after completing all tests if nodes and salt output exist.
+                Prints nothing otherwise
+        :return None
+    """
+    try:
+        filename_with_versions = "/etc/image_version"
+        cat_image_version_file = "if [ -f '{name}' ]; then \
+                                        cat {name}; \
+                                    else \
+                                        echo BUILD_TIMESTAMP='no {name}'; \
+                                        echo BUILD_TIMESTAMP_RFC='no {name}'; \
+                                    fi ".format(name=filename_with_versions)
+
+        list_version = local_salt_client.cmd(
+            '*',
+            'cmd.run',
+            'echo "NODE_INFO=$(uname -sr)" && ' + cat_image_version_file,
+            expr_form='compound')
+        if list_version.__len__() == 0:
+            yield
+        parsed = {k: v.split('\n') for k, v in list_version.items()}
+        columns = [name.split('=')[0] for name in parsed.values()[0]]
+
+        template = "{:<40} | {:<25} | {:<25} | {:<25}\n"
+
+        report_text = template.format("NODE", *columns)
+        for node, data in sorted(parsed.items()):
+            report_text += template.format(node, *[item.split("=")[1] for item in data])
+
+        def write_report():
+            print(report_text)
+        atexit.register(write_report)
+        yield
+    except Exception as e:
+        print("print_node_version:: some error occurred: {}".format(e))
+        yield
diff --git a/test_set/cvp_sanity/global_config.yaml b/test_set/cvp_sanity/global_config.yaml
new file mode 100644
index 0000000..4faddaa
--- /dev/null
+++ b/test_set/cvp_sanity/global_config.yaml
@@ -0,0 +1,79 @@
+---
+# MANDATORY: Credentials for Salt Master
+# SALT_URL should consist of url and port.
+# For example: http://10.0.0.1:6969
+# 6969 - default Salt Master port to listen
+# Can be found on cfg* node using
+# "salt-call pillar.get _param:salt_master_host"
+# and "salt-call pillar.get _param:salt_master_port"
+# or "salt-call pillar.get _param:jenkins_salt_api_url"
+# SALT_USERNAME by default: salt
+# It can be verified with "salt-call shadow.info salt"
+# SALT_PASSWORD you can find on cfg* node using
+# "salt-call pillar.get _param:salt_api_password"
+# or "grep -r salt_api_password /srv/salt/reclass/classes"
+SALT_URL: <salt_url>
+SALT_USERNAME: <salt_usr>
+SALT_PASSWORD: <salt_pwd>
+
+# How many seconds to wait for salt-minion to respond
+salt_timeout: 1
+
+# List of nodes (full fqdn) to skip in ALL tests
+# Use as env variable as
+# export skipped_nodes=mtr01.local,log02.local
+# TEMPORARY: please do not comment this setting.
+skipped_nodes: [""]
+
+# List of groups (short name, e.g. dbs) to skip in group tests
+# Use as env variable as
+# export skipped_groups=mtr,log
+# TEMPORARY: please do not comment this setting.
+skipped_groups: [""]
+
+# Groups can be defined using pillars.
+# Uncomment this section to enable this.
+# Otherwise groups will be discovered automaticaly
+# Tips:
+# 1) you don't need to separate kvm and kvm_glusterfs nodes
+# 2) Use I@pillar or mask like ctl* for targetting nodes
+
+groups: {
+         cmp: 'I@nova:compute',
+         ctl: 'I@keystone:server',
+         msg: 'I@rabbitmq:server',
+         dbs: 'I@galera:*',
+         prx: 'I@nginx:server',
+         mon: 'I@prometheus:server and not I@influxdb:server',
+         log: 'I@kibana:server',
+         mtr: 'I@influxdb:server',
+         kvm: 'I@salt:control',
+         cid: 'I@docker:host and not I@prometheus:server and not I@kubernetes:*',
+         ntw: 'I@opencontrail:database',
+         ceph_mon: 'I@ceph:mon',
+         ceph_osd: 'I@ceph:osd',
+         k8-ctl: 'I@etcd:server',
+         k8-cmp: 'I@kubernetes:* and not I@etcd:*',
+         cfg: 'I@salt:master',
+         gtw: 'I@neutron:gateway'
+}
+
+# mtu test setting
+# this test may skip groups (see example)
+test_mtu:
+  { #"skipped_groups": ["dbs"]
+    "skipped_ifaces": ["bonding_masters", "lo", "veth", "tap", "cali", "qv", "qb", "br-int", "vxlan"]}
+# mask for interfaces to skip
+
+# specify what mcp version (tag) is deployed
+drivetrain_version: ''
+
+# jenkins job to run during the test
+jenkins_test_job: 'DT-test-job'
+jenkins_cvp_job: 'cvp-sanity'
+
+# ntp test setting
+# this test may skip specific node (use fqdn)
+test_ntp_sync:
+  { #"skipped_nodes": [""],
+    "time_deviation": 1}
diff --git a/test_set/cvp_sanity/requirements.txt b/test_set/cvp_sanity/requirements.txt
new file mode 100644
index 0000000..eea162a
--- /dev/null
+++ b/test_set/cvp_sanity/requirements.txt
@@ -0,0 +1,8 @@
+pytest==3.0.6
+requests==2.10.0
+flake8
+PyYAML
+python-jenkins==0.4.11
+pygerrit2==2.0.6
+gitpython
+python-ldap
diff --git a/test_set/cvp_sanity/tests/__init__.py b/test_set/cvp_sanity/tests/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/test_set/cvp_sanity/tests/__init__.py
diff --git a/test_set/cvp_sanity/tests/ceph/test_ceph_haproxy.py b/test_set/cvp_sanity/tests/ceph/test_ceph_haproxy.py
new file mode 100644
index 0000000..d6c8e49
--- /dev/null
+++ b/test_set/cvp_sanity/tests/ceph/test_ceph_haproxy.py
@@ -0,0 +1,23 @@
+import pytest
+
+
+def test_ceph_haproxy(local_salt_client):
+    pytest.skip("This test doesn't work. Skipped")
+    fail = {}
+
+    monitor_info = local_salt_client.cmd(
+        'ceph:mon',
+        'cmd.run',
+        ["echo 'show stat' | nc -U "
+         "/var/run/haproxy/admin.sock | "
+         "grep ceph_mon_radosgw_cluster"],
+        expr_form='pillar')
+    if not monitor_info:
+        pytest.skip("Ceph is not found on this environment")
+
+    for name, info in monitor_info.iteritems():
+        if "OPEN" and "UP" in info:
+            continue
+        else:
+            fail[name] = info
+    assert not fail, "Failed monitors: {}".format(fail)
diff --git a/test_set/cvp_sanity/tests/ceph/test_ceph_pg_count.py b/test_set/cvp_sanity/tests/ceph/test_ceph_pg_count.py
new file mode 100644
index 0000000..28783e8
--- /dev/null
+++ b/test_set/cvp_sanity/tests/ceph/test_ceph_pg_count.py
@@ -0,0 +1,94 @@
+import pytest
+import math
+
+def __next_power_of2(total_pg):
+	count = 0
+	if (total_pg and not(total_pg & (total_pg - 1))):
+		return total_pg	
+	while( total_pg != 0):
+		total_pg >>= 1
+		count += 1
+	
+	return 1 << count
+
+
+def test_ceph_pg_count(local_salt_client):
+    """
+    Test aimed to calculate placement groups for Ceph cluster
+    according formula below.
+    Formula to calculate PG num:
+    Total PGs = 
+    (Total_number_of_OSD * 100) / max_replication_count / pool count
+    pg_num and pgp_num should be the same and 
+    set according formula to higher value of powered 2
+    """
+    pytest.skip("This test needs redesign. Skipped for now")
+    ceph_monitors = local_salt_client.cmd(
+        'ceph:mon', 
+        'test.ping', 
+        expr_form='pillar')
+    
+    if not ceph_monitors:
+        pytest.skip("Ceph is not found on this environment")
+
+    monitor = ceph_monitors.keys()[0]
+    pools = local_salt_client.cmd(
+        monitor, 'cmd.run', 
+        ["rados lspools"], 
+        expr_form='glob').get(
+            ceph_monitors.keys()[0]).split('\n')
+    
+    total_osds = int(local_salt_client.cmd(
+        monitor, 
+        'cmd.run', 
+        ['ceph osd tree | grep osd | grep "up\|down" | wc -l'], 
+        expr_form='glob').get(ceph_monitors.keys()[0]))
+    
+    raw_pool_replications = local_salt_client.cmd(
+        monitor, 
+        'cmd.run', 
+        ["ceph osd dump | grep size | awk '{print $3, $6}'"], 
+        expr_form='glob').get(ceph_monitors.keys()[0]).split('\n')
+    
+    pool_replications = {}
+    for replication in raw_pool_replications:
+        pool_replications[replication.split()[0]] = int(replication.split()[1])
+    
+    max_replication_value = 0
+    for repl_value in pool_replications.values():
+        if repl_value > max_replication_value:
+            max_replication_value = repl_value
+
+    total_pg = (total_osds * 100) / max_replication_value / len(pools)
+    correct_pg_num = __next_power_of2(total_pg)
+    
+    pools_pg_num = {}
+    pools_pgp_num = {}
+    for pool in pools:
+        pg_num = int(local_salt_client.cmd(
+            monitor, 
+            'cmd.run', 
+            ["ceph osd pool get {} pg_num".format(pool)], 
+            expr_form='glob').get(ceph_monitors.keys()[0]).split()[1])
+        pools_pg_num[pool] = pg_num
+        pgp_num = int(local_salt_client.cmd(
+            monitor, 
+            'cmd.run', 
+            ["ceph osd pool get {} pgp_num".format(pool)], 
+            expr_form='glob').get(ceph_monitors.keys()[0]).split()[1])
+        pools_pgp_num[pool] = pgp_num
+
+    wrong_pg_num_pools = [] 
+    pg_pgp_not_equal_pools = []
+    for pool in pools:
+        if pools_pg_num[pool] != pools_pgp_num[pool]:
+            pg_pgp_not_equal_pools.append(pool)
+        if pools_pg_num[pool] < correct_pg_num:
+            wrong_pg_num_pools.append(pool)
+
+    assert not pg_pgp_not_equal_pools, \
+    "For pools {} PG and PGP are not equal " \
+    "but should be".format(pg_pgp_not_equal_pools)
+    assert not wrong_pg_num_pools, "For pools {} " \
+    "PG number lower than Correct PG number, " \
+    "but should be equal or higher".format(wrong_pg_num_pools)
diff --git a/test_set/cvp_sanity/tests/ceph/test_ceph_replicas.py b/test_set/cvp_sanity/tests/ceph/test_ceph_replicas.py
new file mode 100644
index 0000000..62af49d
--- /dev/null
+++ b/test_set/cvp_sanity/tests/ceph/test_ceph_replicas.py
@@ -0,0 +1,49 @@
+import pytest
+
+
+def test_ceph_replicas(local_salt_client):
+    """
+    Test aimed to check number of replicas
+    for most of deployments if there is no
+    special requirement for that.
+    """
+
+    ceph_monitors = local_salt_client.cmd(
+        'ceph:mon', 
+        'test.ping', 
+        expr_form='pillar')
+
+    if not ceph_monitors:
+        pytest.skip("Ceph is not found on this environment")
+
+    monitor = ceph_monitors.keys()[0]
+
+    raw_pool_replicas = local_salt_client.cmd(
+        monitor, 
+        'cmd.run', 
+        ["ceph osd dump | grep size | " \
+        "awk '{print $3, $5, $6, $7, $8}'"], 
+        expr_form='glob').get(
+        ceph_monitors.keys()[0]).split('\n')
+
+    pools_replicas = {}
+    for pool in raw_pool_replicas:
+        pool_name = pool.split(" ", 1)[0]
+        pool_replicas = {}
+        raw_replicas = pool.split(" ", 1)[1].split()
+        for elem in raw_replicas:
+            pool_replicas[raw_replicas[0]] = int(raw_replicas[1])
+            pool_replicas[raw_replicas[2]] = int(raw_replicas[3])
+        pools_replicas[pool_name] = pool_replicas
+    
+    error = []
+    for pool, replicas in pools_replicas.items():
+        for replica, value in replicas.items():
+            if replica == 'min_size' and value < 2:
+                error.append(pool + " " + replica + " " 
+                + str(value) + " must be 2")
+            if replica == 'size' and value < 3:
+                error.append(pool + " " + replica + " " 
+                + str(value) + " must be 3")
+    
+    assert not error, "Wrong pool replicas found\n{}".format(error)
diff --git a/test_set/cvp_sanity/tests/ceph/test_ceph_status.py b/test_set/cvp_sanity/tests/ceph/test_ceph_status.py
new file mode 100644
index 0000000..ffd7bed
--- /dev/null
+++ b/test_set/cvp_sanity/tests/ceph/test_ceph_status.py
@@ -0,0 +1,39 @@
+import json
+
+import pytest
+
+
+def test_ceph_osd(local_salt_client):
+    osd_fail = local_salt_client.cmd(
+        'ceph:osd',
+        'cmd.run',
+        ['ceph osd tree | grep down'],
+        expr_form='pillar')
+    if not osd_fail:
+        pytest.skip("Ceph is not found on this environment")
+    assert not osd_fail.values()[0], \
+        "Some osds are in down state or ceph is not found".format(
+        osd_fail.values()[0])
+
+
+def test_ceph_health(local_salt_client):
+    get_status = local_salt_client.cmd(
+        'ceph:mon',
+        'cmd.run',
+        ['ceph -s -f json'],
+        expr_form='pillar')
+    if not get_status:
+        pytest.skip("Ceph is not found on this environment")
+    status = json.loads(get_status.values()[0])["health"]
+    health = status["status"] if 'status' in status \
+        else status["overall_status"]
+
+    # Health structure depends on Ceph version, so condition is needed:
+    if 'checks' in status:
+        summary = "Summary: {}".format(
+            [i["summary"]["message"] for i in status["checks"].values()])
+    else:
+        summary = status["summary"]
+
+    assert health == "HEALTH_OK",\
+        "Ceph status is not expected. {}".format(summary)
diff --git a/test_set/cvp_sanity/tests/ceph/test_ceph_tell_bench.py b/test_set/cvp_sanity/tests/ceph/test_ceph_tell_bench.py
new file mode 100644
index 0000000..b275022
--- /dev/null
+++ b/test_set/cvp_sanity/tests/ceph/test_ceph_tell_bench.py
@@ -0,0 +1,56 @@
+import pytest
+import json
+import math
+
+
+def test_ceph_tell_bench(local_salt_client):
+    """
+    Test checks that each OSD MB per second speed 
+    is not lower than 10 MB comparing with AVG. 
+    Bench command by default writes 1Gb on each OSD 
+    with the default values of 4M 
+    and gives the "bytes_per_sec" speed for each OSD.
+
+    """
+    pytest.skip("This test needs redesign. Skipped for now")
+    ceph_monitors = local_salt_client.cmd(
+        'ceph:mon', 
+        'test.ping', 
+        expr_form='pillar')
+
+    if not ceph_monitors:
+        pytest.skip("Ceph is not found on this environment")
+
+    cmd_result = local_salt_client.cmd(
+        ceph_monitors.keys()[0], 
+        'cmd.run', ["ceph tell osd.* bench -f json"], 
+        expr_form='glob').get(
+            ceph_monitors.keys()[0]).split('\n')
+
+    cmd_result = filter(None, cmd_result)
+
+    osd_pool = {}
+    for osd in cmd_result:
+        osd_ = osd.split(" ")
+        osd_pool[osd_[0]] = osd_[1]
+
+    mbps_sum = 0
+    osd_count = 0
+    for osd in osd_pool:
+        osd_count += 1
+        mbps_sum += json.loads(
+            osd_pool[osd])['bytes_per_sec'] / 1000000
+
+    mbps_avg = mbps_sum / osd_count
+    result = {}
+    for osd in osd_pool:
+        mbps = json.loads(
+            osd_pool[osd])['bytes_per_sec'] / 1000000
+        if math.fabs(mbps_avg - mbps) > 10:
+            result[osd] = osd_pool[osd]
+
+    assert len(result) == 0, \
+    "Performance of {0} OSD(s) lower " \
+    "than AVG performance ({1} mbps), " \
+    "please check Ceph for possible problems".format(
+        json.dumps(result, indent=4), mbps_avg)
diff --git a/test_set/cvp_sanity/tests/conftest.py b/test_set/cvp_sanity/tests/conftest.py
new file mode 100644
index 0000000..21af490
--- /dev/null
+++ b/test_set/cvp_sanity/tests/conftest.py
@@ -0,0 +1 @@
+from cvp_checks.fixtures.base import *
diff --git a/test_set/cvp_sanity/tests/test_cinder_services.py b/test_set/cvp_sanity/tests/test_cinder_services.py
new file mode 100644
index 0000000..e6b8c8e
--- /dev/null
+++ b/test_set/cvp_sanity/tests/test_cinder_services.py
@@ -0,0 +1,30 @@
+import pytest
+
+
+def test_cinder_services(local_salt_client):
+    cinder_backends_info = local_salt_client.cmd(
+        'cinder:controller',
+        'pillar.get',
+        ['cinder:controller:backend'],
+        expr_form='pillar')
+    if not cinder_backends_info:
+        pytest.skip("Cinder service or cinder:controller pillar \
+        are not found on this environment.")
+    service_down = local_salt_client.cmd(
+        'keystone:server',
+        'cmd.run',
+        ['. /root/keystonercv3; cinder service-list | grep "down\|disabled"'],
+        expr_form='pillar')
+    cinder_volume = local_salt_client.cmd(
+        'keystone:server',
+        'cmd.run',
+        ['. /root/keystonercv3; cinder service-list | grep -c "volume"'],
+        expr_form='pillar')
+    backends_cinder = cinder_backends_info[cinder_backends_info.keys()[0]]
+    backends_num = len(backends_cinder.keys())
+    assert service_down[service_down.keys()[0]] == '', \
+        '''Some cinder services are in wrong state'''
+    assert cinder_volume[cinder_volume.keys()[0]] == str(backends_num), \
+        'Number of cinder-volume services ({0}) does not match ' \
+        'number of volume backends ({1})'.format(
+        cinder_volume[cinder_volume.keys()[0]], str(backends_num))
diff --git a/test_set/cvp_sanity/tests/test_contrail.py b/test_set/cvp_sanity/tests/test_contrail.py
new file mode 100644
index 0000000..5e7e108
--- /dev/null
+++ b/test_set/cvp_sanity/tests/test_contrail.py
@@ -0,0 +1,87 @@
+import pytest
+import json
+
+pytestmark = pytest.mark.usefixtures("contrail")
+
+STATUS_FILTER = r'grep -Pv "(==|^$|Disk|unix|support|boot|\*\*|FOR NODE)"'
+STATUS_COMMAND = "contrail-status -t 10"
+
+def get_contrail_status(salt_client, pillar, command, processor):
+    return salt_client.cmd(
+        pillar, 'cmd.run',
+        ['{} | {}'.format(command, processor)],
+        expr_form='pillar'
+    )
+
+def test_contrail_compute_status(local_salt_client):
+    cs = get_contrail_status(local_salt_client, 'nova:compute',
+                             STATUS_COMMAND, STATUS_FILTER)
+    broken_services = []
+
+    for node in cs:
+        for line in cs[node].split('\n'):
+            line = line.strip()
+            if len (line.split(None, 1)) == 1:
+                err_msg = "{0}: {1}".format(
+                    node, line)
+                broken_services.append(err_msg)
+                continue
+            name, status = line.split(None, 1)
+            if status not in {'active'}:
+                err_msg = "{node}:{service} - {status}".format(
+                    node=node, service=name, status=status)
+                broken_services.append(err_msg)
+
+    assert not broken_services, 'Broken services: {}'.format(json.dumps(
+                                                             broken_services,
+                                                             indent=4))
+
+
+def test_contrail_node_status(local_salt_client):
+    command = STATUS_COMMAND
+
+    # TODO: what will be in OpenContrail 5?
+    if pytest.contrail == '4':
+        command = "doctrail all " + command
+    cs = get_contrail_status(local_salt_client,
+                             'opencontrail:client:analytics_node',
+                             command, STATUS_FILTER)
+    cs.update(get_contrail_status(local_salt_client, 'opencontrail:control',
+                                  command, STATUS_FILTER)
+    )
+    broken_services = []
+    for node in cs:
+        for line in cs[node].split('\n'):
+            line = line.strip()
+            if 'crashes/core.java.' not in line:
+                name, status = line.split(None, 1)
+            else:
+                name, status = line, 'FATAL'
+            if status not in {'active', 'backup'}:
+                err_msg = "{node}:{service} - {status}".format(
+                    node=node, service=name, status=status)
+                broken_services.append(err_msg)
+
+    assert not broken_services, 'Broken services: {}'.format(json.dumps(
+                                                             broken_services,
+                                                             indent=4))
+
+
+def test_contrail_vrouter_count(local_salt_client):
+    cs = get_contrail_status(local_salt_client, 'nova:compute',
+                             STATUS_COMMAND, STATUS_FILTER)
+
+    # TODO: what if compute lacks these service unintentionally?
+    if not cs:
+        pytest.skip("Contrail services were not found on compute nodes")
+
+    actual_vrouter_count = 0
+    for node in cs:
+        for line in cs[node].split('\n'):
+            if 'contrail-vrouter-nodemgr' in line:
+                actual_vrouter_count += 1
+
+    assert actual_vrouter_count == len(cs.keys()),\
+        'The length of vRouters {} differs' \
+        ' from the length of compute nodes {}'.format(actual_vrouter_count,
+                                                      len(cs.keys()))
diff --git a/test_set/cvp_sanity/tests/test_default_gateway.py b/test_set/cvp_sanity/tests/test_default_gateway.py
new file mode 100644
index 0000000..69fd116
--- /dev/null
+++ b/test_set/cvp_sanity/tests/test_default_gateway.py
@@ -0,0 +1,23 @@
+import json
+import pytest
+import os
+from cvp_checks import utils
+
+
+def test_check_default_gateways(local_salt_client, nodes_in_group):
+    netstat_info = local_salt_client.cmd(
+        "L@"+','.join(nodes_in_group), 'cmd.run', ['ip r | sed -n 1p'], expr_form='compound')
+
+    gateways = {}
+    nodes = netstat_info.keys()
+
+    for node in nodes:
+        if netstat_info[node] not in gateways:
+            gateways[netstat_info[node]] = [node]
+        else:
+            gateways[netstat_info[node]].append(node)
+
+    assert len(gateways.keys()) == 1, \
+        "There were found few gateways: {gw}".format(
+        gw=json.dumps(gateways, indent=4)
+    )
diff --git a/test_set/cvp_sanity/tests/test_drivetrain.py b/test_set/cvp_sanity/tests/test_drivetrain.py
new file mode 100644
index 0000000..468cdea
--- /dev/null
+++ b/test_set/cvp_sanity/tests/test_drivetrain.py
@@ -0,0 +1,330 @@
+import jenkins
+from xml.dom import minidom
+from cvp_checks import utils
+import json
+import pytest
+import time
+import os
+from pygerrit2 import GerritRestAPI, HTTPBasicAuth
+from requests import HTTPError
+import git
+import ldap
+import ldap.modlist as modlist
+
+def join_to_gerrit(local_salt_client, gerrit_user, gerrit_password):
+    gerrit_port = local_salt_client.cmd(
+        'I@gerrit:client and not I@salt:master',
+        'pillar.get',
+        ['_param:haproxy_gerrit_bind_port'],
+        expr_form='compound').values()[0]
+    gerrit_address = local_salt_client.cmd(
+        'I@gerrit:client and not I@salt:master',
+        'pillar.get',
+        ['_param:haproxy_gerrit_bind_host'],
+        expr_form='compound').values()[0]
+    url = 'http://{0}:{1}'.format(gerrit_address,gerrit_port)
+    auth = HTTPBasicAuth(gerrit_user, gerrit_password)
+    rest = GerritRestAPI(url=url, auth=auth)
+    return rest
+
+def join_to_jenkins(local_salt_client, jenkins_user, jenkins_password):
+    jenkins_port = local_salt_client.cmd(
+        'I@jenkins:client and not I@salt:master',
+        'pillar.get',
+        ['_param:haproxy_jenkins_bind_port'],
+        expr_form='compound').values()[0]
+    jenkins_address = local_salt_client.cmd(
+        'I@jenkins:client and not I@salt:master',
+        'pillar.get',
+        ['_param:haproxy_jenkins_bind_host'],
+        expr_form='compound').values()[0]
+    jenkins_url = 'http://{0}:{1}'.format(jenkins_address,jenkins_port)
+    server = jenkins.Jenkins(jenkins_url, username=jenkins_user, password=jenkins_password)
+    return server
+
+def get_password(local_salt_client,service):
+    password = local_salt_client.cmd(
+        service,
+        'pillar.get',
+        ['_param:openldap_admin_password'],
+        expr_form='pillar').values()[0]
+    return password
+
+def test_drivetrain_gerrit(local_salt_client):
+    gerrit_password = get_password(local_salt_client,'gerrit:client')
+    gerrit_error = ''
+    current_date = time.strftime("%Y%m%d-%H.%M.%S", time.localtime())
+    test_proj_name = "test-dt-{0}".format(current_date)
+    gerrit_port = local_salt_client.cmd(
+        'I@gerrit:client and not I@salt:master',
+        'pillar.get',
+        ['_param:haproxy_gerrit_bind_port'],
+        expr_form='compound').values()[0]
+    gerrit_address = local_salt_client.cmd(
+        'I@gerrit:client and not I@salt:master',
+        'pillar.get',
+        ['_param:haproxy_gerrit_bind_host'],
+        expr_form='compound').values()[0]
+    try:
+        #Connecting to gerrit and check connection
+        server = join_to_gerrit(local_salt_client,'admin',gerrit_password)
+        gerrit_check = server.get("/changes/?q=owner:self%20status:open")
+        #Check deleteproject plugin and skip test if the plugin is not installed
+        gerrit_plugins = server.get("/plugins/?all")
+        if 'deleteproject' not in gerrit_plugins:
+            pytest.skip("Delete-project plugin is not installed")
+        #Create test project and add description
+        server.put("/projects/"+test_proj_name)
+        server.put("/projects/"+test_proj_name+"/description",json={"description":"Test DriveTrain project","commit_message": "Update the project description"})
+    except HTTPError, e:
+        gerrit_error = e
+    try:
+        #Create test folder and init git
+        repo_dir = os.path.join(os.getcwd(),test_proj_name)
+        file_name = os.path.join(repo_dir, current_date)
+        repo = git.Repo.init(repo_dir)
+        #Add remote url for this git repo
+        origin = repo.create_remote('origin', 'http://admin:{1}@{2}:{3}/{0}.git'.format(test_proj_name,gerrit_password,gerrit_address,gerrit_port))
+        #Add commit-msg hook to automatically add Change-Id to our commit
+        os.system("curl -Lo {0}/.git/hooks/commit-msg 'http://admin:{1}@{2}:{3}/tools/hooks/commit-msg' > /dev/null 2>&1".format(repo_dir,gerrit_password,gerrit_address,gerrit_port))
+        os.system("chmod u+x {0}/.git/hooks/commit-msg".format(repo_dir))
+        #Create a test file
+        f = open(file_name, 'w+')
+        f.write("This is a test file for DriveTrain test")
+        f.close()
+        #Add file to git and commit it to Gerrit for review
+        repo.index.add([file_name])
+        repo.index.commit("This is a test commit for DriveTrain test")
+        repo.git.push("origin", "HEAD:refs/for/master")
+        #Get change id from Gerrit. Set Code-Review +2 and submit this change
+        changes = server.get("/changes/?q=project:{0}".format(test_proj_name))
+        last_change = changes[0].get('change_id')
+        server.post("/changes/{0}/revisions/1/review".format(last_change),json={"message":"All is good","labels":{"Code-Review":"+2"}})
+        server.post("/changes/{0}/submit".format(last_change))
+    except HTTPError, e:
+        gerrit_error = e
+    finally:
+        #Delete test project
+        server.post("/projects/"+test_proj_name+"/deleteproject~delete")
+    assert gerrit_error == '',\
+        'Something is wrong with Gerrit'.format(gerrit_error)
+
+def test_drivetrain_openldap(local_salt_client):
+    '''Create a test user 'DT_test_user' in openldap,
+    add the user to admin group, login using the user to Jenkins.
+    Add the user to devops group in Gerrit and then login to Gerrit,
+    using test_user credentials. Finally, delete the user from admin
+    group and openldap
+    '''
+    ldap_password = get_password(local_salt_client,'openldap:client')
+    #Check that ldap_password is exists, otherwise skip test
+    if not ldap_password:
+        pytest.skip("Openldap service or openldap:client pillar \
+        are not found on this environment.")
+    ldap_port = local_salt_client.cmd(
+        'I@openldap:client and not I@salt:master',
+        'pillar.get',
+        ['_param:haproxy_openldap_bind_port'],
+        expr_form='compound').values()[0]
+    ldap_address = local_salt_client.cmd(
+        'I@openldap:client and not I@salt:master',
+        'pillar.get',
+        ['_param:haproxy_openldap_bind_host'],
+        expr_form='compound').values()[0]
+    ldap_dc = local_salt_client.cmd(
+        'openldap:client',
+        'pillar.get',
+        ['_param:openldap_dn'],
+        expr_form='pillar').values()[0]
+    ldap_con_admin = local_salt_client.cmd(
+        'openldap:client',
+        'pillar.get',
+        ['openldap:client:server:auth:user'],
+        expr_form='pillar').values()[0]
+    ldap_url = 'ldap://{0}:{1}'.format(ldap_address,ldap_port)
+    ldap_error = ''
+    ldap_result = ''
+    gerrit_result = ''
+    gerrit_error = ''
+    jenkins_error = ''
+    #Test user's CN
+    test_user_name = 'DT_test_user'
+    test_user = 'cn={0},ou=people,{1}'.format(test_user_name,ldap_dc)
+    #Admins group CN
+    admin_gr_dn = 'cn=admins,ou=groups,{0}'.format(ldap_dc)
+    #List of attributes for test user
+    attrs = {}
+    attrs['objectclass'] = ['organizationalRole','simpleSecurityObject','shadowAccount']
+    attrs['cn'] = test_user_name
+    attrs['uid'] = test_user_name
+    attrs['userPassword'] = 'aSecretPassw'
+    attrs['description'] = 'Test user for CVP DT test'
+    searchFilter = 'cn={0}'.format(test_user_name)
+    #Get a test job name from config
+    config = utils.get_configuration()
+    jenkins_cvp_job = config['jenkins_cvp_job']
+    #Open connection to ldap and creating test user in admins group
+    try:
+        ldap_server = ldap.initialize(ldap_url)
+        ldap_server.simple_bind_s(ldap_con_admin,ldap_password)
+        ldif = modlist.addModlist(attrs)
+        ldap_server.add_s(test_user,ldif)
+        ldap_server.modify_s(admin_gr_dn,[(ldap.MOD_ADD, 'memberUid', [test_user_name],)],)
+        #Check search test user in LDAP
+        searchScope = ldap.SCOPE_SUBTREE
+        ldap_result = ldap_server.search_s(ldap_dc, searchScope, searchFilter)
+    except ldap.LDAPError, e:
+        ldap_error = e
+    try:
+        #Check connection between Jenkins and LDAP
+        jenkins_server = join_to_jenkins(local_salt_client,test_user_name,'aSecretPassw')
+        jenkins_version = jenkins_server.get_job_name(jenkins_cvp_job)
+        #Check connection between Gerrit and LDAP
+        gerrit_server = join_to_gerrit(local_salt_client,'admin',ldap_password)
+        gerrit_check = gerrit_server.get("/changes/?q=owner:self%20status:open")
+        #Add test user to devops-contrib group in Gerrit and check login
+        _link = "/groups/devops-contrib/members/{0}".format(test_user_name)
+        gerrit_add_user = gerrit_server.put(_link)
+        gerrit_server = join_to_gerrit(local_salt_client,test_user_name,'aSecretPassw')
+        gerrit_result = gerrit_server.get("/changes/?q=owner:self%20status:open")
+    except HTTPError, e:
+        gerrit_error = e
+    except jenkins.JenkinsException, e:
+        jenkins_error = e
+    finally:
+        ldap_server.modify_s(admin_gr_dn,[(ldap.MOD_DELETE, 'memberUid', [test_user_name],)],)
+        ldap_server.delete_s(test_user)
+        ldap_server.unbind_s()
+    assert ldap_error == '', \
+        '''Something is wrong with connection to LDAP:
+            {0}'''.format(e)
+    assert jenkins_error == '', \
+        '''Connection to Jenkins was not established:
+            {0}'''.format(e)
+    assert gerrit_error == '', \
+        '''Connection to Gerrit was not established:
+            {0}'''.format(e)
+    assert ldap_result !=[], \
+        '''Test user was not found'''
+
+def test_drivetrain_jenkins_job(local_salt_client):
+    jenkins_password = get_password(local_salt_client,'jenkins:client')
+    server = join_to_jenkins(local_salt_client,'admin',jenkins_password)
+    #Getting Jenkins test job name from configuration
+    config = utils.get_configuration()
+    jenkins_test_job = config['jenkins_test_job']
+    if not server.get_job_name(jenkins_test_job):
+        server.create_job(jenkins_test_job, jenkins.EMPTY_CONFIG_XML)
+    if server.get_job_name(jenkins_test_job):
+        next_build_num = server.get_job_info(jenkins_test_job)['nextBuildNumber']
+        #If this is first build number skip building check
+        if next_build_num != 1:
+            #Check that test job is not running at this moment,
+            #Otherwise skip the test
+            last_build_num = server.get_job_info(jenkins_test_job)['lastBuild'].get('number')
+            last_build_status = server.get_build_info(jenkins_test_job,last_build_num)['building']
+            if last_build_status:
+                pytest.skip("Test job {0} is already running").format(jenkins_test_job)
+        server.build_job(jenkins_test_job)
+        timeout = 0
+        #Use job status True by default to exclude timeout between build job and start job.
+        job_status = True
+        while job_status and ( timeout < 180 ):
+            time.sleep(10)
+            timeout += 10
+            job_status = server.get_build_info(jenkins_test_job,next_build_num)['building']
+        job_result = server.get_build_info(jenkins_test_job,next_build_num)['result']
+    else:
+        pytest.skip("The job {0} was not found").format(test_job_name)
+    assert job_result == 'SUCCESS', \
+        '''Test job '{0}' build was not successfull or timeout is too small
+         '''.format(jenkins_test_job)
+
+def test_drivetrain_services_replicas(local_salt_client):
+    salt_output = local_salt_client.cmd(
+        'I@gerrit:client',
+        'cmd.run',
+        ['docker service ls'],
+        expr_form='compound')
+    wrong_items = []
+    for line in salt_output[salt_output.keys()[0]].split('\n'):
+        if line[line.find('/') - 1] != line[line.find('/') + 1] \
+           and 'replicated' in line:
+            wrong_items.append(line)
+    assert len(wrong_items) == 0, \
+        '''Some DriveTrain services doesn't have expected number of replicas:
+              {}'''.format(json.dumps(wrong_items, indent=4))
+
+
+def test_drivetrain_components_and_versions(local_salt_client):
+    config = utils.get_configuration()
+    if not config['drivetrain_version']:
+        version = \
+            local_salt_client.cmd(
+                'I@salt:master',
+                'pillar.get',
+                ['_param:mcp_version'],
+                expr_form='compound').values()[0] or \
+            local_salt_client.cmd(
+                'I@salt:master',
+                'pillar.get',
+                ['_param:apt_mk_version'],
+                expr_form='compound').values()[0]
+        if not version:
+            pytest.skip("drivetrain_version is not defined. Skipping")
+    else:
+        version = config['drivetrain_version']
+    salt_output = local_salt_client.cmd(
+        'I@gerrit:client',
+        'cmd.run',
+        ['docker service ls'],
+        expr_form='compound')
+    #  'ldap_server' removed because it is an external component now v 1.1.8
+    not_found_services = ['gerrit_db', 'gerrit_server', 'jenkins_master',
+                          'jenkins_slave01', 'jenkins_slave02',
+                          'jenkins_slave03', 'ldap_admin', 'docker_registry',
+                          'docker_visualizer']
+    version_mismatch = []
+    for line in salt_output[salt_output.keys()[0]].split('\n'):
+        for service in not_found_services:
+            if service in line:
+                not_found_services.remove(service)
+                if version != line.split()[4].split(':')[1]:
+                    version_mismatch.append("{0}: expected "
+                        "version is {1}, actual - {2}".format(service,version,
+                                                              line.split()[4].split(':')[1]))
+                continue
+    assert len(not_found_services) == 0, \
+        '''Some DriveTrain components are not found:
+              {}'''.format(json.dumps(not_found_services, indent=4))
+    assert len(version_mismatch) == 0, \
+        '''Version mismatch found:
+              {}'''.format(json.dumps(version_mismatch, indent=4))
+
+
+def test_jenkins_jobs_branch(local_salt_client):
+    config = utils.get_configuration()
+    expected_version = config['drivetrain_version'] or []
+    if not expected_version or expected_version == '':
+        pytest.skip("drivetrain_version is not defined. Skipping")
+    jenkins_password = get_password(local_salt_client,'jenkins:client')
+    version_mismatch = []
+    server = join_to_jenkins(local_salt_client,'admin',jenkins_password)
+    for job_instance in server.get_jobs():
+        job_name = job_instance.get('name')
+        job_config = server.get_job_config(job_name)
+        xml_data = minidom.parseString(job_config)
+        BranchSpec = xml_data.getElementsByTagName('hudson.plugins.git.BranchSpec')
+        #We use master branch for pipeline-library in case of 'testing,stable,nighlty' versions
+        if expected_version in ['testing','nightly','stable']:
+            expected_version = 'master'
+        if BranchSpec:
+            actual_version = BranchSpec[0].getElementsByTagName('name')[0].childNodes[0].data
+            if ( actual_version != expected_version ) and ( job_name not in ['upgrade-mcp-release'] ) :
+                version_mismatch.append("Job {0} has {1} branch."
+                                        "Expected {2}".format(job_name,
+                                                              actual_version,
+                                                              expected_version))
+    assert len(version_mismatch) == 0, \
+        '''Some DriveTrain jobs have version/branch mismatch:
+              {}'''.format(json.dumps(version_mismatch, indent=4))
diff --git a/test_set/cvp_sanity/tests/test_etc_hosts.py b/test_set/cvp_sanity/tests/test_etc_hosts.py
new file mode 100644
index 0000000..1db29c8
--- /dev/null
+++ b/test_set/cvp_sanity/tests/test_etc_hosts.py
@@ -0,0 +1,23 @@
+import pytest
+import json
+import os
+from cvp_checks import utils
+
+
+def test_etc_hosts(local_salt_client):
+    active_nodes = utils.get_active_nodes()
+    nodes_info = local_salt_client.cmd(
+        utils.list_to_target_string(active_nodes, 'or'), 'cmd.run',
+        ['cat /etc/hosts'],
+        expr_form='compound')
+    result = {}
+    for node in nodes_info.keys():
+        for nd in nodes_info.keys():
+           if node not in nodes_info[nd]:
+              if node in result:
+                  result[node]+=','+nd
+              else:
+                  result[node]=nd
+    assert len(result) <= 1, \
+        "Some hosts are not presented in /etc/hosts: {0}".format(
+         json.dumps(result, indent=4))     
diff --git a/test_set/cvp_sanity/tests/test_galera_cluster.py b/test_set/cvp_sanity/tests/test_galera_cluster.py
new file mode 100644
index 0000000..676f09b
--- /dev/null
+++ b/test_set/cvp_sanity/tests/test_galera_cluster.py
@@ -0,0 +1,24 @@
+import pytest
+
+
+def test_galera_cluster_status(local_salt_client):
+    gs = local_salt_client.cmd(
+        'galera:*',
+        'cmd.run',
+        ['salt-call mysql.status | grep -A1 wsrep_cluster_size | tail -n1'],
+        expr_form='pillar')
+
+    if not gs:
+        pytest.skip("Galera service or galera:* pillar \
+        are not found on this environment.")
+
+    size_cluster = []
+    amount = len(gs)
+
+    for item in gs.values():
+        size_cluster.append(item.split('\n')[-1].strip())
+
+    assert all(item == str(amount) for item in size_cluster), \
+        '''There found inconsistency within cloud. MySQL galera cluster
+              is probably broken, the cluster size gathered from nodes:
+              {}'''.format(gs)
diff --git a/test_set/cvp_sanity/tests/test_k8s.py b/test_set/cvp_sanity/tests/test_k8s.py
new file mode 100644
index 0000000..c3a5ff9
--- /dev/null
+++ b/test_set/cvp_sanity/tests/test_k8s.py
@@ -0,0 +1,142 @@
+import pytest
+import json
+import os
+
+
+def test_k8s_get_cs_status(local_salt_client):
+    result = local_salt_client.cmd(
+        'etcd:server', 'cmd.run',
+        ['kubectl get cs'],
+        expr_form='pillar'
+    )
+    errors = []
+    if not result:
+        pytest.skip("k8s is not found on this environment")
+    for node in result:
+        for line in result[node].split('\n'):
+            line = line.strip()
+            if 'MESSAGE' in line or 'proto' in line:
+                continue
+            else:
+                if 'Healthy' not in line:
+                    errors.append(line)
+        break
+    assert not errors, 'k8s is not healthy: {}'.format(json.dumps(
+                                                       errors,
+                                                       indent=4))
+
+
+def test_k8s_get_nodes_status(local_salt_client):
+    result = local_salt_client.cmd(
+        'etcd:server', 'cmd.run',
+        ['kubectl get nodes'],
+        expr_form='pillar'
+    )
+    errors = []
+    if not result:
+        pytest.skip("k8s is not found on this environment")
+    for node in result:
+        for line in result[node].split('\n'):
+            line = line.strip()
+            if 'STATUS' in line or 'proto' in line:
+                continue
+            else:
+                if 'Ready' != line.split()[1]:
+                    errors.append(line)
+        break
+    assert not errors, 'k8s is not healthy: {}'.format(json.dumps(
+                                                       errors,
+                                                       indent=4))
+
+
+def test_k8s_get_calico_status(local_salt_client):
+    result = local_salt_client.cmd(
+        'kubernetes:pool', 'cmd.run',
+        ['calicoctl node status'],
+        expr_form='pillar'
+    )
+    errors = []
+    if not result:
+        pytest.skip("k8s is not found on this environment")
+    for node in result:
+        for line in result[node].split('\n'):
+            line = line.strip('|')
+            if 'STATE' in line or '| ' not in line:
+                continue
+            else:
+                if 'up' not in line or 'Established' not in line:
+                    errors.append(line)
+    assert not errors, 'Calico node status is not good: {}'.format(json.dumps(
+                                                                   errors,
+                                                                   indent=4))
+
+
+def test_k8s_cluster_status(local_salt_client):
+    result = local_salt_client.cmd(
+        'kubernetes:pool', 'cmd.run',
+        ['kubectl cluster-info'],
+        expr_form='pillar'
+    )
+    errors = []
+    if not result:
+        pytest.skip("k8s is not found on this environment")
+    for node in result:
+        for line in result[node].split('\n'):
+            if 'proto' in line or 'further' in line or line == '':
+                continue
+            else:
+                if 'is running' not in line:
+                    errors.append(line)
+        break
+    assert not errors, 'k8s cluster info is not good: {}'.format(json.dumps(
+                                                                 errors,
+                                                                 indent=4))
+
+
+def test_k8s_kubelet_status(local_salt_client):
+    result = local_salt_client.cmd(
+        'kubernetes:pool', 'service.status',
+        ['kubelet'],
+        expr_form='pillar'
+    )
+    errors = []
+    if not result:
+        pytest.skip("k8s is not found on this environment")
+    for node in result:
+        if not result[node]:
+            errors.append(node)
+    assert not errors, 'Kublete is not running on these nodes: {}'.format(
+                       errors)
+
+
+def test_k8s_check_system_pods_status(local_salt_client):
+    result = local_salt_client.cmd(
+        'etcd:server', 'cmd.run',
+        ['kubectl --namespace="kube-system" get pods'],
+        expr_form='pillar'
+    )
+    errors = []
+    if not result:
+        pytest.skip("k8s is not found on this environment")
+    for node in result:
+        for line in result[node].split('\n'):
+            line = line.strip('|')
+            if 'STATUS' in line or 'proto' in line:
+                continue
+            else:
+                if 'Running' not in line:
+                    errors.append(line)
+        break
+    assert not errors, 'Some system pods are not running: {}'.format(json.dumps(
+                                                                   errors,
+                                                                   indent=4))
+
+
+def test_check_k8s_image_availability(local_salt_client):
+    # not a test actually
+    hostname = 'https://docker-dev-virtual.docker.mirantis.net/artifactory/webapp/'
+    response = os.system('curl -s --insecure {} > /dev/null'.format(hostname))
+    if response == 0:
+        print '{} is AVAILABLE'.format(hostname)
+    else:
+        print '{} IS NOT AVAILABLE'.format(hostname)
diff --git a/test_set/cvp_sanity/tests/test_mtu.py b/test_set/cvp_sanity/tests/test_mtu.py
new file mode 100644
index 0000000..9054ba3
--- /dev/null
+++ b/test_set/cvp_sanity/tests/test_mtu.py
@@ -0,0 +1,66 @@
+import pytest
+import json
+from cvp_checks import utils
+import os
+
+
+def test_mtu(local_salt_client, nodes_in_group):
+    testname = os.path.basename(__file__).split('.')[0]
+    config = utils.get_configuration()
+    skipped_ifaces = config.get(testname)["skipped_ifaces"] or \
+        ["bonding_masters", "lo", "veth", "tap", "cali", "qv", "qb", "br-int", "vxlan"]
+    total = {}
+    network_info = local_salt_client.cmd(
+        "L@"+','.join(nodes_in_group), 'cmd.run', ['ls /sys/class/net/'], expr_form='compound')
+
+    kvm_nodes = local_salt_client.cmd(
+        'salt:control', 'test.ping', expr_form='pillar').keys()
+
+    if len(network_info.keys()) < 2:
+        pytest.skip("Nothing to compare - only 1 node")
+
+    for node, ifaces_info in network_info.iteritems():
+        if node in kvm_nodes:
+            kvm_info = local_salt_client.cmd(node, 'cmd.run',
+                                             ["virsh list | "
+                                              "awk '{print $2}' | "
+                                              "xargs -n1 virsh domiflist | "
+                                              "grep -v br-pxe | grep br- | "
+                                              "awk '{print $1}'"])
+            ifaces_info = kvm_info.get(node)
+        node_ifaces = ifaces_info.split('\n')
+        ifaces = {}
+        for iface in node_ifaces:
+            for skipped_iface in skipped_ifaces:
+                if skipped_iface in iface:
+                    break
+            else:
+                iface_mtu = local_salt_client.cmd(node, 'cmd.run',
+                                                  ['cat /sys/class/'
+                                                   'net/{}/mtu'.format(iface)])
+                ifaces[iface] = iface_mtu.get(node)
+        total[node] = ifaces
+
+    nodes = []
+    mtu_data = []
+    my_set = set()
+
+    for node in total:
+        nodes.append(node)
+        my_set.update(total[node].keys())
+    for interf in my_set:
+        diff = []
+        row = []
+        for node in nodes:
+            if interf in total[node].keys():
+                diff.append(total[node][interf])
+                row.append("{}: {}".format(node, total[node][interf]))
+            else:
+                row.append("{}: No interface".format(node))
+        if diff.count(diff[0]) < len(nodes):
+            row.sort()
+            row.insert(0, interf)
+            mtu_data.append(row)
+    assert len(mtu_data) == 0, \
+        "Several problems found: {0}".format(
+        json.dumps(mtu_data, indent=4))
diff --git a/test_set/cvp_sanity/tests/test_nova_services.py b/test_set/cvp_sanity/tests/test_nova_services.py
new file mode 100644
index 0000000..8fdadd6
--- /dev/null
+++ b/test_set/cvp_sanity/tests/test_nova_services.py
@@ -0,0 +1,16 @@
+import pytest
+
+
+def test_nova_services_status(local_salt_client):
+    result = local_salt_client.cmd(
+        'keystone:server',
+        'cmd.run',
+        ['. /root/keystonercv3; nova service-list | grep "down\|disabled" | grep -v "Forced down"'],
+        expr_form='pillar')
+
+    if not result:
+        pytest.skip("Nova service or keystone:server pillar \
+        are not found on this environment.")
+
+    assert result[result.keys()[0]] == '', \
+        '''Some nova services are in wrong state'''
diff --git a/test_set/cvp_sanity/tests/test_ntp_sync.py b/test_set/cvp_sanity/tests/test_ntp_sync.py
new file mode 100644
index 0000000..09b323f
--- /dev/null
+++ b/test_set/cvp_sanity/tests/test_ntp_sync.py
@@ -0,0 +1,28 @@
+from cvp_checks import utils
+import os
+
+
+def test_ntp_sync(local_salt_client):
+    testname = os.path.basename(__file__).split('.')[0]
+    active_nodes = utils.get_active_nodes(os.path.basename(__file__))
+    config = utils.get_configuration()
+    fail = {}
+    saltmaster_time = int(local_salt_client.cmd(
+        'salt:master',
+        'cmd.run',
+        ['date +%s'],
+        expr_form='pillar').values()[0])
+    nodes_time = local_salt_client.cmd(
+        utils.list_to_target_string(active_nodes, 'or'),
+        'cmd.run',
+        ['date +%s'],
+        expr_form='compound')
+    diff = config.get(testname)["time_deviation"] or 30
+    for node, time in nodes_time.iteritems():
+        if (int(time) - saltmaster_time) > diff or \
+                (int(time) - saltmaster_time) < -diff:
+            fail[node] = time
+
+    assert not fail, 'SaltMaster time: {}\n' \
+                     'Nodes with time mismatch:\n {}'.format(saltmaster_time,
+                                                             fail)
diff --git a/test_set/cvp_sanity/tests/test_oss.py b/test_set/cvp_sanity/tests/test_oss.py
new file mode 100644
index 0000000..58a4151
--- /dev/null
+++ b/test_set/cvp_sanity/tests/test_oss.py
@@ -0,0 +1,39 @@
+import requests
+import csv
+import json
+
+
+def test_oss_status(local_salt_client):
+    result = local_salt_client.cmd(
+        'docker:swarm:role:master',
+        'pillar.fetch',
+        ['haproxy:proxy:listen:stats:binds:address'],
+        expr_form='pillar')
+    HAPROXY_STATS_IP = [node for node in result if result[node]]
+    proxies = {"http": None, "https": None}
+    csv_result = requests.get('http://{}:9600/haproxy?stats;csv"'.format(
+                              result[HAPROXY_STATS_IP[0]]),
+                              proxies=proxies).content
+    data = csv_result.lstrip('# ')
+    wrong_data = []
+    list_of_services = ['aptly', 'openldap', 'gerrit', 'jenkins', 'postgresql',
+                        'pushkin', 'rundeck', 'elasticsearch']
+    for service in list_of_services:
+        check = local_salt_client.cmd(
+            '{}:client'.format(service),
+            'test.ping',
+            expr_form='pillar')
+        if check:
+            lines = [row for row in csv.DictReader(data.splitlines())
+                     if service in row['pxname']]
+            for row in lines:
+                info = "Service {0} with svname {1} and status {2}".format(
+                    row['pxname'], row['svname'], row['status'])
+                if row['svname'] == 'FRONTEND' and row['status'] != 'OPEN':
+                        wrong_data.append(info)
+                if row['svname'] != 'FRONTEND' and row['status'] != 'UP':
+                        wrong_data.append(info)
+
+    assert len(wrong_data) == 0, \
+        '''Some haproxy services are in wrong state
+              {}'''.format(json.dumps(wrong_data, indent=4))
diff --git a/test_set/cvp_sanity/tests/test_packet_checker.py b/test_set/cvp_sanity/tests/test_packet_checker.py
new file mode 100644
index 0000000..f76c339
--- /dev/null
+++ b/test_set/cvp_sanity/tests/test_packet_checker.py
@@ -0,0 +1,87 @@
+import pytest
+import json
+
+
+def test_check_package_versions(local_salt_client, nodes_in_group):
+    output = local_salt_client.cmd("L@"+','.join(nodes_in_group),
+                                   'lowpkg.list_pkgs',
+                                   expr_form='compound')
+    # Let's exclude cid01 and dbs01 nodes from this check
+    exclude_nodes = local_salt_client.cmd("I@galera:master or I@gerrit:client",
+                                          'test.ping',
+                                          expr_form='compound').keys()
+    total_nodes = [i for i in output.keys() if i not in exclude_nodes]
+    if len(total_nodes) < 2:
+        pytest.skip("Nothing to compare - only 1 node")
+
+    nodes = []
+    pkts_data = []
+    my_set = set()
+
+    for node in total_nodes:
+        nodes.append(node)
+        my_set.update(output[node].keys())
+
+    for deb in my_set:
+        diff = []
+        row = []
+        for node in nodes:
+            if deb in output[node].keys():
+                diff.append(output[node][deb])
+                row.append("{}: {}".format(node, output[node][deb]))
+            else:
+                row.append("{}: No package".format(node))
+        if diff.count(diff[0]) < len(nodes):
+            row.sort()
+            row.insert(0, deb)
+            pkts_data.append(row)
+    assert len(pkts_data) <= 1, \
+        "Several problems found: {0}".format(
+        json.dumps(pkts_data, indent=4))
+
+
+def test_check_module_versions(local_salt_client, nodes_in_group):
+    pre_check = local_salt_client.cmd(
+        "L@"+','.join(nodes_in_group),
+        'cmd.run',
+        ['dpkg -l | grep "python-pip "'],
+        expr_form='compound')
+    if pre_check.values().count('') > 0:
+        pytest.skip("pip is not installed on one or more nodes")
+
+    exclude_nodes = local_salt_client.cmd("I@galera:master or I@gerrit:client",
+                                          'test.ping',
+                                          expr_form='compound').keys()
+    total_nodes = [i for i in pre_check.keys() if i not in exclude_nodes]
+
+    if len(total_nodes) < 2:
+        pytest.skip("Nothing to compare - only 1 node")
+    output = local_salt_client.cmd("L@"+','.join(nodes_in_group),
+                                   'pip.freeze', expr_form='compound')
+
+    nodes = []
+
+    pkts_data = []
+    my_set = set()
+
+    for node in total_nodes:
+        nodes.append(node)
+        my_set.update([x.split("=")[0] for x in output[node]])
+        output[node] = dict([x.split("==") for x in output[node]])
+
+    for deb in my_set:
+        diff = []
+        row = []
+        for node in nodes:
+            if deb in output[node].keys():
+                diff.append(output[node][deb])
+                row.append("{}: {}".format(node, output[node][deb]))
+            else:
+                row.append("{}: No module".format(node))
+        if diff.count(diff[0]) < len(nodes):
+            row.sort()
+            row.insert(0, deb)
+            pkts_data.append(row)
+    assert len(pkts_data) <= 1, \
+        "Several problems found: {0}".format(
+        json.dumps(pkts_data, indent=4))
diff --git a/test_set/cvp_sanity/tests/test_rabbit_cluster.py b/test_set/cvp_sanity/tests/test_rabbit_cluster.py
new file mode 100644
index 0000000..daae7ce
--- /dev/null
+++ b/test_set/cvp_sanity/tests/test_rabbit_cluster.py
@@ -0,0 +1,44 @@
+from cvp_checks import utils
+
+
+def test_checking_rabbitmq_cluster(local_salt_client):
+    # disable config for this test
+    # it may be reintroduced in future
+    config = utils.get_configuration()
+    # request pillar data from rmq nodes
+    rabbitmq_pillar_data = local_salt_client.cmd(
+        'rabbitmq:server', 'pillar.data',
+        ['rabbitmq:cluster'], expr_form='pillar')
+    # creating dictionary {node:cluster_size_for_the_node}
+    # with required cluster size for each node
+    control_dict = {}
+    required_cluster_size_dict = {}
+    # request actual data from rmq nodes
+    rabbit_actual_data = local_salt_client.cmd(
+        'rabbitmq:server', 'cmd.run',
+        ['rabbitmqctl cluster_status'], expr_form='pillar')
+    for node in rabbitmq_pillar_data:
+        if node in config.get('skipped_nodes'):
+            del rabbit_actual_data[node]
+            continue
+        cluster_size_from_the_node = len(
+            rabbitmq_pillar_data[node]['rabbitmq:cluster']['members'])
+        required_cluster_size_dict.update({node: cluster_size_from_the_node})
+
+    # find actual cluster size for each node
+    for node in rabbit_actual_data:
+        running_nodes_count = 0
+        # rabbitmqctl cluster_status output contains
+        # 3 * # of nodes 'rabbit@' entries + 1
+        running_nodes_count = (rabbit_actual_data[node].count('rabbit@') - 1)/3
+        # update control dictionary with values
+        # {node:actual_cluster_size_for_node}
+        if required_cluster_size_dict[node] != running_nodes_count:
+            control_dict.update({node: running_nodes_count})
+
+    assert not len(control_dict), "Inconsistency found within cloud. " \
+                                  "RabbitMQ cluster is probably broken, " \
+                                  "the cluster size for each node " \
+                                  "should be: {} but the following " \
+                                  "nodes has other values: {}".format(
+        len(required_cluster_size_dict.keys()), control_dict)
diff --git a/test_set/cvp_sanity/tests/test_repo_list.py b/test_set/cvp_sanity/tests/test_repo_list.py
new file mode 100644
index 0000000..0e35f37
--- /dev/null
+++ b/test_set/cvp_sanity/tests/test_repo_list.py
@@ -0,0 +1,59 @@
+import pytest
+from cvp_checks import utils
+
+
+def test_list_of_repo_on_nodes(local_salt_client, nodes_in_group):
+    info_salt = local_salt_client.cmd('L@' + ','.join(
+                                      nodes_in_group),
+                                      'pillar.data', ['linux:system:repo'],
+                                      expr_form='compound')
+
+    # check if some repos are disabled
+    for node in info_salt.keys():
+        repos = info_salt[node]["linux:system:repo"]
+        for repo in repos.keys():
+            repository = repos[repo]
+            if "enabled" in repository:
+                if not repository["enabled"]:
+                    repos.pop(repo)
+
+    raw_actual_info = local_salt_client.cmd(
+        'L@' + ','.join(
+        nodes_in_group),
+        'cmd.run',
+        ['cat /etc/apt/sources.list.d/*;'
+         'cat /etc/apt/sources.list|grep deb|grep -v "#"'],
+        expr_form='compound')
+    actual_repo_list = [item.replace('/ ', ' ').replace('[arch=amd64] ', '')
+                        for item in raw_actual_info.values()[0].split('\n')]
+    if info_salt.values()[0]['linux:system:repo'] == '':
+        expected_salt_data = ''
+    else:
+        expected_salt_data = [repo['source'].replace('/ ', ' ')
+                                            .replace('[arch=amd64] ', '')
+                              for repo in info_salt.values()[0]
+                              ['linux:system:repo'].values()
+                              if 'source' in repo.keys()]
+
+    diff = {}
+    my_set = set()
+    fail_counter = 0
+    my_set.update(actual_repo_list)
+    my_set.update(expected_salt_data)
+    import json
+    for repo in my_set:
+        rows = []
+        if repo not in actual_repo_list:
+            rows.append("{}: {}".format("pillars", "+"))
+            rows.append("{}: No repo".format('config'))
+            diff[repo] = rows
+            fail_counter += 1
+        elif repo not in expected_salt_data:
+            rows.append("{}: {}".format("config", "+"))
+            rows.append("{}: No repo".format('pillars'))
+            diff[repo] = rows
+    assert fail_counter == 0, \
+        "Several problems found: {0}".format(
+            json.dumps(diff, indent=4))
+    if fail_counter == 0 and len(diff) > 0:
+        print "\nWarning: nodes contain more repos than reclass"
diff --git a/test_set/cvp_sanity/tests/test_salt_master.py b/test_set/cvp_sanity/tests/test_salt_master.py
new file mode 100644
index 0000000..7649767
--- /dev/null
+++ b/test_set/cvp_sanity/tests/test_salt_master.py
@@ -0,0 +1,20 @@
+def test_uncommited_changes(local_salt_client):
+    git_status = local_salt_client.cmd(
+        'salt:master',
+        'cmd.run',
+        ['cd /srv/salt/reclass/classes/cluster/; git status'],
+        expr_form='pillar')
+    assert 'nothing to commit' in git_status.values()[0], 'Git status showed' \
+           ' some unmerged changes {}'''.format(git_status.values()[0])
+
+
+def test_reclass_smoke(local_salt_client):
+    reclass = local_salt_client.cmd(
+        'salt:master',
+        'cmd.run',
+        ['reclass-salt --top; echo $?'],
+        expr_form='pillar')
+    result = reclass[reclass.keys()[0]][-1]
+
+    assert result == '0', 'Reclass is broken' \
+                          '\n {}'.format(reclass)
diff --git a/test_set/cvp_sanity/tests/test_services.py b/test_set/cvp_sanity/tests/test_services.py
new file mode 100644
index 0000000..89794c1
--- /dev/null
+++ b/test_set/cvp_sanity/tests/test_services.py
@@ -0,0 +1,114 @@
+import pytest
+import json
+import os
+from cvp_checks import utils
+
+# Some nodes can have services that are not applicable for other noder in similar group.
+# For example , there are 3 node in kvm group, but just kvm03 node has srv-volumes-backup.mount service
+# in service.get_all
+#                        NODE NAME          SERVICE_NAME
+inconsistency_rule = {"kvm03": ["srv-volumes-backup.mount"]}
+
+
+def test_check_services(local_salt_client, nodes_in_group):
+    """
+    Skips services if they are not consistent for all node.
+    Inconsistent services will be checked with another test case
+    """
+    output = local_salt_client.cmd("L@"+','.join(nodes_in_group), 'service.get_all', expr_form='compound')
+
+    if len(output.keys()) < 2:
+        pytest.skip("Nothing to compare - only 1 node")
+
+    nodes = []
+    pkts_data = []
+    my_set = set()
+
+    for node in output:
+        nodes.append(node)
+        my_set.update(output[node])
+
+    for srv in my_set:
+        diff = []
+        row = []
+        for node in nodes:
+            short_name_of_node = node.split('.')[0]
+            if inconsistency_rule.get(short_name_of_node) is not None and srv in inconsistency_rule[short_name_of_node]:
+                # Found service on node and it SHOULD be there
+                break
+            elif srv in output[node]:
+                # Found service on node
+                diff.append(srv)
+                row.append("{}: +".format(node))
+            else:
+                # Not found expected service on node
+                row.append("{}: No service".format(node))
+        if diff.__len__() > 0 and diff.count(diff[0]) < len(nodes):
+            row.sort()
+            row.insert(0, srv)
+            pkts_data.append(row)
+    assert len(pkts_data) == 0, \
+        "Several problems found: {0}".format(
+        json.dumps(pkts_data, indent=4))
+
+
+# TODO : remake this test to make workable https://mirantis.jira.com/browse/PROD-25958
+
+# def _check_services_on_special_node(local_salt_client, nodes_in_group):
+#     """
+#     Check that specific node has service.
+#     Nodes and proper services should be defined in inconsistency_rule dictionary
+#
+#     :print: Table with nodes which don't have required services and not existed services
+#     """
+#
+#     output = local_salt_client.cmd("L@" + ','.join(nodes_in_group), 'service.get_all', expr_form='compound')
+#     if len(output.keys()) < 2:
+#         pytest.skip("Nothing to compare - just 1 node")
+#
+#     def is_proper_service_for_node(_service, _node):
+#         """
+#         Return True if service exists on node and exists in inconsistency_rule
+#         Return True if service doesn't exists on node and doesn't exists in inconsistency_rule
+#         Return False otherwise
+#         :param _service: string
+#         :param _node: string full name of node
+#         :return: bool, read description for further details
+#         """
+#         short_name_of_node = _node.split('.')[0]
+#         if short_name_of_node not in inconsistency_rule.keys():
+#             return False
+#
+#         if _service in inconsistency_rule[short_name_of_node] and \
+#                 _service in output[_node]:
+#             # Return True if service exists on node and exists in inconsistency_rule
+#             return True
+#
+#         if _service not in inconsistency_rule[short_name_of_node] and \
+#                 _service not in output[_node]:
+#             # Return True if service exists on node and exists in inconsistency_rule
+#             return True
+#         print("return False for {} in {}".format(_service, _node))
+#         # error_text = ""
+#         return False
+#
+#     errors = list()
+#     for node, expected_services in inconsistency_rule.items():
+#         print("Check {} , {} ".format(node, expected_services))
+#         # Skip if there is no proper node. Find nodes that contains node_title (like 'kvm03') in their titles
+#         if not any([node in node_name for node_name in output.keys()]):
+#             continue
+#         for expected_service in expected_services:
+#             service_on_nodes = {_node: expected_service if expected_service in _service else None
+#                                 for _node, _service
+#                                 in output.items()}
+#             print([is_proper_service_for_node(expected_service, _node)
+#                   for _node
+#                   in output.keys()])
+#             if not all([is_proper_service_for_node(expected_service, _node)
+#                         for _node
+#                         in output.keys()]):
+#                 errors.append(service_on_nodes)
+#
+#     assert errors.__len__() == 0, json.dumps(errors, indent=4)
+#     assert False
diff --git a/test_set/cvp_sanity/tests/test_single_vip.py b/test_set/cvp_sanity/tests/test_single_vip.py
new file mode 100644
index 0000000..fe6cb5f
--- /dev/null
+++ b/test_set/cvp_sanity/tests/test_single_vip.py
@@ -0,0 +1,24 @@
+import pytest
+from cvp_checks import utils
+import os
+from collections import Counter
+
+
+def test_single_vip(local_salt_client, nodes_in_group):
+    local_salt_client.cmd("L@"+','.join(nodes_in_group), 'saltutil.sync_all', expr_form='compound')
+    nodes_list = local_salt_client.cmd(
+        "L@"+','.join(nodes_in_group), 'grains.item', ['ipv4'], expr_form='compound')
+
+    ipv4_list = []
+
+    for node in nodes_list:
+        ipv4_list.extend(nodes_list.get(node).get('ipv4'))
+
+    cnt = Counter(ipv4_list)
+
+    for ip in cnt:
+        if ip == '127.0.0.1':
+            continue
+        elif cnt[ip] > 1:
+            assert "VIP IP duplicate found " \
+                   "\n{}".format(ipv4_list)
diff --git a/test_set/cvp_sanity/tests/test_stacklight.py b/test_set/cvp_sanity/tests/test_stacklight.py
new file mode 100644
index 0000000..ec6ed40
--- /dev/null
+++ b/test_set/cvp_sanity/tests/test_stacklight.py
@@ -0,0 +1,200 @@
+import json
+import requests
+import datetime
+import pytest
+from cvp_checks import utils
+
+
+@pytest.mark.usefixtures('check_kibana')
+def test_elasticsearch_cluster(local_salt_client):
+    salt_output = local_salt_client.cmd(
+        'kibana:server',
+        'pillar.get',
+        ['_param:haproxy_elasticsearch_bind_host'],
+        expr_form='pillar')
+
+    proxies = {"http": None, "https": None}
+    for node in salt_output.keys():
+        IP = salt_output[node]
+        assert requests.get('http://{}:9200/'.format(IP),
+                            proxies=proxies).status_code == 200, \
+            'Cannot check elasticsearch url on {}.'.format(IP)
+        resp = requests.get('http://{}:9200/_cat/health'.format(IP),
+                            proxies=proxies).content
+        assert resp.split()[3] == 'green', \
+            'elasticsearch status is not good {}'.format(
+            json.dumps(resp, indent=4))
+        assert resp.split()[4] == '3', \
+            'elasticsearch status is not good {}'.format(
+            json.dumps(resp, indent=4))
+        assert resp.split()[5] == '3', \
+            'elasticsearch status is not good {}'.format(
+            json.dumps(resp, indent=4))
+        assert resp.split()[10] == '0', \
+            'elasticsearch status is not good {}'.format(
+            json.dumps(resp, indent=4))
+        assert resp.split()[13] == '100.0%', \
+            'elasticsearch status is not good {}'.format(
+            json.dumps(resp, indent=4))
+
+
+@pytest.mark.usefixtures('check_kibana')
+def test_kibana_status(local_salt_client):
+    proxies = {"http": None, "https": None}
+    IP = utils.get_monitoring_ip('stacklight_log_address')
+    resp = requests.get('http://{}:5601/api/status'.format(IP),
+                        proxies=proxies).content
+    body = json.loads(resp)
+    assert body['status']['overall']['state'] == "green", \
+        "Kibana status is not expected: {}".format(
+        body['status']['overall'])
+    for i in body['status']['statuses']:
+        assert i['state'] == "green", \
+            "Kibana statuses are unexpected: {}".format(i)
+
+
+@pytest.mark.usefixtures('check_kibana')
+def test_elasticsearch_node_count(local_salt_client):
+    now = datetime.datetime.now()
+    today = now.strftime("%Y.%m.%d")
+    active_nodes = utils.get_active_nodes()
+    salt_output = local_salt_client.cmd(
+        'kibana:server',
+        'pillar.get',
+        ['_param:haproxy_elasticsearch_bind_host'],
+        expr_form='pillar')
+
+    IP = salt_output.values()[0]
+    headers = {'Content-type': 'application/json', 'Accept': 'text/plain'}
+    proxies = {"http": None, "https": None}
+    data = ('{"size": 0, "aggs": '
+            '{"uniq_hostname": '
+            '{"terms": {"size": 500, '
+            '"field": "Hostname.keyword"}}}}')
+    response = requests.post(
+        'http://{0}:9200/log-{1}/_search?pretty'.format(IP, today),
+        proxies=proxies,
+        headers=headers,
+        data=data)
+    assert 200 == response.status_code, 'Unexpected code {}'.format(
+        response.text)
+    resp = json.loads(response.text)
+    cluster_domain = local_salt_client.cmd('salt:control',
+                                           'pillar.get',
+                                           ['_param:cluster_domain'],
+                                           expr_form='pillar').values()[0]
+    monitored_nodes = []
+    for item_ in resp['aggregations']['uniq_hostname']['buckets']:
+        node_name = item_['key']
+        monitored_nodes.append(node_name + '.' + cluster_domain)
+    missing_nodes = []
+    for node in active_nodes.keys():
+        if node not in monitored_nodes:
+            missing_nodes.append(node)
+    assert len(missing_nodes) == 0, \
+        'Not all nodes are in Elasticsearch. Found {0} keys, ' \
+        'expected {1}. Missing nodes: \n{2}'. \
+            format(len(monitored_nodes), len(active_nodes), missing_nodes)
+
+
+def test_stacklight_services_replicas(local_salt_client):
+    # TODO
+    # change to docker:swarm:role:master ?
+    salt_output = local_salt_client.cmd(
+        'I@docker:client:stack:monitoring and I@prometheus:server',
+        'cmd.run',
+        ['docker service ls'],
+        expr_form='compound')
+
+    if not salt_output:
+        pytest.skip("docker:client:stack:monitoring or \
+        prometheus:server pillars are not found on this environment.")
+
+    wrong_items = []
+    for line in salt_output[salt_output.keys()[0]].split('\n'):
+        if line[line.find('/') - 1] != line[line.find('/') + 1] \
+           and 'replicated' in line:
+            wrong_items.append(line)
+    assert len(wrong_items) == 0, \
+        '''Some monitoring services doesn't have expected number of replicas:
+              {}'''.format(json.dumps(wrong_items, indent=4))
+
+
+@pytest.mark.usefixtures('check_prometheus')
+def test_prometheus_alert_count(local_salt_client):
+    IP = utils.get_monitoring_ip('cluster_public_host')
+    # keystone:server can return 3 nodes instead of 1
+    # this will be fixed later
+    # TODO
+    nodes_info = local_salt_client.cmd(
+        'keystone:server',
+        'cmd.run',
+        ['curl -s http://{}:15010/alerts | grep icon-chevron-down | '
+         'grep -v "0 active"'.format(IP)],
+        expr_form='pillar')
+
+    result = nodes_info[nodes_info.keys()[0]].replace('</td>', '').replace(
+        '<td><i class="icon-chevron-down"></i> <b>', '').replace('</b>', '')
+    assert result == '', 'AlertManager page has some alerts! {}'.format(
+                         json.dumps(result), indent=4)
+
+
+def test_stacklight_containers_status(local_salt_client):
+    salt_output = local_salt_client.cmd(
+        'I@docker:swarm:role:master and I@prometheus:server',
+        'cmd.run',
+        ['docker service ps $(docker stack services -q monitoring)'],
+        expr_form='compound')
+
+    if not salt_output:
+        pytest.skip("docker:swarm:role:master or prometheus:server \
+        pillars are not found on this environment.")
+
+    result = {}
+    # for old reclass models, docker:swarm:role:master can return
+    # 2 nodes instead of one. Here is temporary fix.
+    # TODO
+    if len(salt_output.keys()) > 1:
+        if 'CURRENT STATE' not in salt_output[salt_output.keys()[0]]:
+            del salt_output[salt_output.keys()[0]]
+    for line in salt_output[salt_output.keys()[0]].split('\n')[1:]:
+        shift = 0
+        if line.split()[1] == '\\_':
+            shift = 1
+        if line.split()[1 + shift] not in result.keys():
+            result[line.split()[1]] = 'NOT OK'
+        if line.split()[4 + shift] == 'Running' \
+           or line.split()[4 + shift] == 'Ready':
+            result[line.split()[1 + shift]] = 'OK'
+    assert 'NOT OK' not in result.values(), \
+        '''Some containers are in incorrect state:
+              {}'''.format(json.dumps(result, indent=4))
+
+
+def test_running_telegraf_services(local_salt_client):
+    salt_output = local_salt_client.cmd('telegraf:agent',
+                                        'service.status',
+                                        'telegraf',
+                                        expr_form='pillar')
+
+    if not salt_output:
+        pytest.skip("Telegraf or telegraf:agent \
+        pillar are not found on this environment.")
+
+    result = [{node: status} for node, status
+              in salt_output.items()
+              if status is False]
+    assert result == [], 'Telegraf service is not running ' \
+                         'on following nodes: {}'.format(result)
+
+
+def test_running_fluentd_services(local_salt_client):
+    salt_output = local_salt_client.cmd('fluentd:agent',
+                                        'service.status',
+                                        'td-agent',
+                                        expr_form='pillar')
+    result = [{node: status} for node, status
+              in salt_output.items()
+              if status is False]
+    assert result == [], 'Fluentd check failed: td-agent service is not ' \
+                         'running on following nodes:'.format(result)
diff --git a/test_set/cvp_sanity/tests/test_ui_addresses.py b/test_set/cvp_sanity/tests/test_ui_addresses.py
new file mode 100644
index 0000000..82bcae1
--- /dev/null
+++ b/test_set/cvp_sanity/tests/test_ui_addresses.py
@@ -0,0 +1,73 @@
+from cvp_checks import utils
+import pytest
+
+
+def test_ui_horizon(local_salt_client):
+    salt_output = local_salt_client.cmd(
+        'horizon:server',
+        'pillar.get',
+        ['_param:cluster_public_host'],
+        expr_form='pillar')
+    if not salt_output:
+        pytest.skip("Horizon is not enabled on this environment")
+    IP = [salt_output[node] for node in salt_output
+          if salt_output[node]]
+    result = local_salt_client.cmd(
+        'keystone:server',
+        'cmd.run',
+        ['curl --insecure https://{}/auth/login/ 2>&1 | \
+         grep Login'.format(IP[0])],
+        expr_form='pillar')
+    assert len(result[result.keys()[0]]) != 0, \
+        'Horizon login page is not reachable on {} from ctl nodes'.format(
+        IP[0])
+
+
+@pytest.mark.usefixtures('check_kibana')
+def test_ui_kibana(local_salt_client):
+    IP = utils.get_monitoring_ip('stacklight_log_address')
+    result = local_salt_client.cmd(
+        'keystone:server',
+        'cmd.run',
+        ['curl http://{}:5601/app/kibana 2>&1 | \
+         grep loading'.format(IP)],
+        expr_form='pillar')
+    assert len(result[result.keys()[0]]) != 0, \
+        'Kibana login page is not reachable on {} from ctl nodes'.format(IP)
+
+
+@pytest.mark.usefixtures('check_prometheus')
+def test_ui_prometheus(local_salt_client):
+    IP = utils.get_monitoring_ip('stacklight_monitor_address')
+    result = local_salt_client.cmd(
+        'keystone:server',
+        'cmd.run',
+        ['curl http://{}:15010/graph 2>&1 | \
+         grep Prometheus'.format(IP)],
+        expr_form='pillar')
+    assert len(result[result.keys()[0]]) != 0, \
+        'Prometheus page is not reachable on {} from ctl nodes'.format(IP)
+
+
+@pytest.mark.usefixtures('check_prometheus')
+def test_ui_alert_manager(local_salt_client):
+    IP = utils.get_monitoring_ip('stacklight_monitor_address')
+    result = local_salt_client.cmd(
+        'keystone:server',
+        'cmd.run',
+        ['curl -s http://{}:15011/ | grep Alertmanager'.format(IP)],
+        expr_form='pillar')
+    assert len(result[result.keys()[0]]) != 0, \
+        'AlertManager page is not reachable on {} from ctl nodes'.format(IP)
+
+
+@pytest.mark.usefixtures('check_grafana')
+def test_ui_grafana(local_salt_client):
+    IP = utils.get_monitoring_ip('stacklight_monitor_address')
+    result = local_salt_client.cmd(
+        'keystone:server',
+        'cmd.run',
+        ['curl http://{}:15013/login 2>&1 | grep Grafana'.format(IP)],
+        expr_form='pillar')
+    assert len(result[result.keys()[0]]) != 0, \
+        'Grafana page is not reachable on {} from ctl nodes'.format(IP)
diff --git a/test_set/cvp_sanity/utils/__init__.py b/test_set/cvp_sanity/utils/__init__.py
new file mode 100644
index 0000000..aeb4cd8
--- /dev/null
+++ b/test_set/cvp_sanity/utils/__init__.py
@@ -0,0 +1,171 @@
+import os
+import yaml
+import requests
+import re
+import sys, traceback
+
+
+class AuthenticationError(Exception):
+    pass
+
+
+class salt_remote:
+    def cmd(self, tgt, fun, param=None, expr_form=None, tgt_type=None):
+        config = get_configuration()
+        url = config['SALT_URL'].strip()
+        if not re.match("^(http|https)://", url):
+            raise AuthenticationError("Salt URL should start \
+            with http or https, given - {}".format(url))
+        proxies = {"http": None, "https": None}
+        headers = {'Accept': 'application/json'}
+        login_payload = {'username': config['SALT_USERNAME'],
+                         'password': config['SALT_PASSWORD'], 'eauth': 'pam'}
+        accept_key_payload = {'fun': fun, 'tgt': tgt, 'client': 'local',
+                              'expr_form': expr_form, 'tgt_type': tgt_type,
+                              'timeout': config['salt_timeout']}
+        if param:
+            accept_key_payload['arg'] = param
+
+        try:
+            login_request = requests.post(os.path.join(url, 'login'),
+                                          headers=headers, data=login_payload,
+                                          proxies=proxies)
+            if not login_request.ok:
+                raise AuthenticationError("Authentication to SaltMaster failed")
+
+            request = requests.post(url, headers=headers,
+                                    data=accept_key_payload,
+                                    cookies=login_request.cookies,
+                                    proxies=proxies)
+
+            response = request.json()['return'][0]
+            return response
+
+        except Exception as e:
+            print ("\033[91m\nConnection to SaltMaster "
+                  "was not established.\n"
+                  "Please make sure that you "
+                  "provided correct credentials.\n"
+                  "Error message: {}\033[0m\n".format(e.message or e)
+            )
+            traceback.print_exc(file=sys.stdout)
+            sys.exit()
+
+
+def init_salt_client():
+    local = salt_remote()
+    return local
+
+
+def list_to_target_string(node_list, separator, add_spaces=True):
+    if add_spaces:
+        separator = ' ' + separator.strip() + ' '
+    return separator.join(node_list)
+
+
+def get_monitoring_ip(param_name):
+    local_salt_client = init_salt_client()
+    salt_output = local_salt_client.cmd(
+        'salt:master',
+        'pillar.get',
+        ['_param:{}'.format(param_name)],
+        expr_form='pillar')
+    return salt_output[salt_output.keys()[0]]
+
+
+def get_active_nodes(test=None):
+    config = get_configuration()
+    local_salt_client = init_salt_client()
+
+    skipped_nodes = config.get('skipped_nodes') or []
+    if test:
+        testname = test.split('.')[0]
+        if 'skipped_nodes' in config.get(testname).keys():
+            skipped_nodes += config.get(testname)['skipped_nodes'] or []
+    if skipped_nodes != ['']:
+        print "\nNotice: {0} nodes will be skipped".format(skipped_nodes)
+        nodes = local_salt_client.cmd(
+            '* and not ' + list_to_target_string(skipped_nodes, 'and not'),
+            'test.ping',
+            expr_form='compound')
+    else:
+        nodes = local_salt_client.cmd('*', 'test.ping')
+    return nodes
+
+
+def calculate_groups():
+    config = get_configuration()
+    local_salt_client = init_salt_client()
+    node_groups = {}
+    nodes_names = set ()
+    expr_form = ''
+    all_nodes = set(local_salt_client.cmd('*', 'test.ping'))
+    if 'groups' in config.keys() and 'PB_GROUPS' in os.environ.keys() and \
+       os.environ['PB_GROUPS'].lower() != 'false':
+        nodes_names.update(config['groups'].keys())
+        expr_form = 'compound'
+    else:
+        for node in all_nodes:
+            index = re.search('[0-9]{1,3}$', node.split('.')[0])
+            if index:
+                nodes_names.add(node.split('.')[0][:-len(index.group(0))])
+            else:
+                nodes_names.add(node)
+        expr_form = 'pcre'
+
+    gluster_nodes = local_salt_client.cmd('I@salt:control and '
+                                          'I@glusterfs:server',
+                                          'test.ping', expr_form='compound')
+    kvm_nodes = local_salt_client.cmd('I@salt:control and not '
+                                      'I@glusterfs:server',
+                                      'test.ping', expr_form='compound')
+
+    for node_name in nodes_names:
+        skipped_groups = config.get('skipped_groups') or []
+        if node_name in skipped_groups:
+            continue
+        if expr_form == 'pcre':
+            nodes = local_salt_client.cmd('{}[0-9]{{1,3}}'.format(node_name),
+                                          'test.ping',
+                                          expr_form=expr_form)
+        else:
+            nodes = local_salt_client.cmd(config['groups'][node_name],
+                                          'test.ping',
+                                          expr_form=expr_form)
+            if nodes == {}:
+                continue
+
+        node_groups[node_name]=[x for x in nodes
+                                if x not in config['skipped_nodes']
+                                if x not in gluster_nodes.keys()
+                                if x not in kvm_nodes.keys()]
+        all_nodes = set(all_nodes - set(node_groups[node_name]))
+        if node_groups[node_name] == []:
+            del node_groups[node_name]
+            if kvm_nodes:
+                node_groups['kvm'] = kvm_nodes.keys()
+            node_groups['kvm_gluster'] = gluster_nodes.keys()
+    all_nodes = set(all_nodes - set(kvm_nodes.keys()))
+    all_nodes = set(all_nodes - set(gluster_nodes.keys()))
+    if all_nodes:
+        print ("These nodes were not collected {0}. Check config (groups section)".format(all_nodes))
+    return node_groups
+                
+            
+def get_configuration():
+    """function returns configuration for environment
+    and for test if it's specified"""
+    global_config_file = os.path.join(
+        os.path.dirname(os.path.abspath(__file__)), "../global_config.yaml")
+    with open(global_config_file, 'r') as file:
+        global_config = yaml.load(file)
+    for param in global_config.keys():
+        if param in os.environ.keys():
+            if ',' in os.environ[param]:
+                global_config[param] = []
+                for item in os.environ[param].split(','):
+                    global_config[param].append(item)
+            else:
+                global_config[param] = os.environ[param]
+
+    return global_config
diff --git a/test_set/cvp_spt/README.md b/test_set/cvp_spt/README.md
new file mode 100644
index 0000000..00a200a
--- /dev/null
+++ b/test_set/cvp_spt/README.md
@@ -0,0 +1,5 @@
+# cvp-spt
+Environment variables
+--
+* Set *keystone_api_version* to required keystone API version (like keystone_api_version=2)
+Or it will be set to '3' otherwise
diff --git a/test_set/cvp_spt/__init__.py b/test_set/cvp_spt/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/test_set/cvp_spt/__init__.py
diff --git a/test_set/cvp_spt/fixtures/__init__.py b/test_set/cvp_spt/fixtures/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/test_set/cvp_spt/fixtures/__init__.py
diff --git a/test_set/cvp_spt/fixtures/base.py b/test_set/cvp_spt/fixtures/base.py
new file mode 100644
index 0000000..b4c07ac
--- /dev/null
+++ b/test_set/cvp_spt/fixtures/base.py
@@ -0,0 +1,99 @@
+import pytest
+import cvp_spt.utils as utils
+import random
+import time
+from cvp_spt.utils import os_client
+
+@pytest.fixture(scope='session')
+def local_salt_client():
+    return utils.init_salt_client()
+
+
+# TODO: fix
+# should not be executed on any test run
+nodes = utils.get_pairs()
+hw_nodes = utils.get_hw_pairs()
+
+
+@pytest.fixture(scope='session', params=nodes.values(), ids=nodes.keys())
+def pair(request):
+    return request.param
+
+
+@pytest.fixture(scope='session', params=hw_nodes.values(), ids=hw_nodes.keys())
+def hw_pair(request):
+    return request.param
+
+
+@pytest.fixture(scope='session')
+def openstack_clients(local_salt_client):
+    nodes_info = local_salt_client.cmd(
+        'keystone:server', 'pillar.get',
+        ['keystone:server'],
+        expr_form='pillar')
+    if nodes_info.__len__() < 1:
+        pytest.skip("No keystone server found")
+        return False
+    keystone = nodes_info[nodes_info.keys()[0]]
+    url = 'http://{ip}:{port}/'.format(ip=keystone['bind']['public_address'],
+                                       port=keystone['bind']['public_port'])
+    return os_client.OfficialClientManager(
+        username=keystone['admin_name'],
+        password=keystone['admin_password'],
+        tenant_name=keystone['admin_tenant'],
+        auth_url=url,
+        cert=False,
+        domain='Default',
+        )
+
+
+@pytest.fixture(scope='session')
+def os_resources(openstack_clients):
+    os_actions = os_client.OSCliActions(openstack_clients)
+    os_resource = {}
+    config = utils.get_configuration()
+    image_name = config.get('image_name') or ['Ubuntu']
+
+    os_images_list = [image.id for image in openstack_clients.image.images.list(filters={'name': image_name})]
+    if os_images_list.__len__() == 0:
+        print "No images with name {}. This name can be redefined with 'image_name' env var ".format(image_name)
+        exit()
+
+    os_resource['image_id'] = str(os_images_list[0])
+
+    os_resource['flavor_id'] = [flavor.id for flavor in openstack_clients.compute.flavors.list() if flavor.name == 'spt-test']
+    if not os_resource['flavor_id']:
+        os_resource['flavor_id'] = os_actions.create_flavor('spt-test', 1536, 1, 3).id
+    else:
+        os_resource['flavor_id'] = str(os_resource['flavor_id'][0])
+
+    os_resource['sec_group'] = os_actions.create_sec_group()
+    os_resource['keypair'] = openstack_clients.compute.keypairs.create('spt-test-{}'.format(random.randrange(100, 999)))
+    os_resource['net1'] = os_actions.create_network_resources()
+    os_resource['ext_net'] = os_actions.get_external_network()
+    adm_tenant = os_actions.get_admin_tenant()
+    os_resource['router'] = os_actions.create_router(os_resource['ext_net'], adm_tenant.id)
+    os_resource['net2'] = os_actions.create_network(adm_tenant.id)
+    os_resource['subnet2'] = os_actions.create_subnet(os_resource['net2'], adm_tenant, '10.2.7.0/24')
+    for subnet in openstack_clients.network.list_subnets()['subnets']:
+        if subnet['network_id'] == os_resource['net1']['id']:
+            os_resource['subnet1'] = subnet['id']
+
+    openstack_clients.network.add_interface_router(os_resource['router']['id'], {'subnet_id': os_resource['subnet1']})
+    openstack_clients.network.add_interface_router(os_resource['router']['id'], {'subnet_id': os_resource['subnet2']['id']})
+    yield os_resource
+    # time.sleep(5)
+    openstack_clients.network.remove_interface_router(os_resource['router']['id'], {'subnet_id': os_resource['subnet1']})
+    openstack_clients.network.remove_interface_router(os_resource['router']['id'], {'subnet_id': os_resource['subnet2']['id']})
+    openstack_clients.network.remove_gateway_router(os_resource['router']['id'])
+    time.sleep(5)
+    openstack_clients.network.delete_router(os_resource['router']['id'])
+    time.sleep(5)
+    # openstack_clients.network.delete_subnet(subnet1['id'])
+    openstack_clients.network.delete_network(os_resource['net1']['id'])
+    openstack_clients.network.delete_network(os_resource['net2']['id'])
+
+    openstack_clients.compute.security_groups.delete(os_resource['sec_group'].id)
+    openstack_clients.compute.keypairs.delete(os_resource['keypair'].name)
+
+    openstack_clients.compute.flavors.delete(os_resource['flavor_id'])
diff --git a/test_set/cvp_spt/global_config.yaml b/test_set/cvp_spt/global_config.yaml
new file mode 100644
index 0000000..51af102
--- /dev/null
+++ b/test_set/cvp_spt/global_config.yaml
@@ -0,0 +1,29 @@
+---
+# MANDATORY: Credentials for Salt Master
+# SALT_URL should consist of url and port.
+# For example: http://10.0.0.1:6969
+# 6969 - default Salt Master port to listen
+# Can be found on cfg* node using
+# "salt-call pillar.get _param:salt_master_host"
+# and "salt-call pillar.get _param:salt_master_port"
+# or "salt-call pillar.get _param:jenkins_salt_api_url"
+# SALT_USERNAME by default: salt
+# It can be verified with "salt-call shadow.info salt"
+# SALT_PASSWORD you can find on cfg* node using
+# "salt-call pillar.get _param:salt_api_password"
+# or "grep -r salt_api_password /srv/salt/reclass/classes"
+SALT_URL: <URL>
+SALT_USERNAME: <USERNAME>
+SALT_PASSWORD: <PASSWORD>
+
+# How many seconds to wait for salt-minion to respond
+salt_timeout: 1
+
+image_name: "Ubuntu"
+skipped_nodes: []
+# example for Jenkins: networks=net1,net2
+networks: "10.101.0.0/24"
+HW_NODES: []
+CMP_HOSTS: []
+nova_timeout: 30
+iperf_prep_string: "sudo /bin/bash -c 'echo \"91.189.88.161        archive.ubuntu.com\" >> /etc/hosts'"
diff --git a/test_set/cvp_spt/requirements.txt b/test_set/cvp_spt/requirements.txt
new file mode 100644
index 0000000..55011ce
--- /dev/null
+++ b/test_set/cvp_spt/requirements.txt
@@ -0,0 +1,10 @@
+paramiko==2.0.0 # LGPLv2.1+
+pytest>=3.0.4  # MIT
+python-cinderclient>=1.6.0,!=1.7.0,!=1.7.1  # Apache-2.0
+python-glanceclient>=2.5.0  # Apache-2.0
+python-keystoneclient>=3.8.0  # Apache-2.0
+python-neutronclient>=5.1.0  # Apache-2.0
+python-novaclient==7.1.0
+PyYAML>=3.12  # MIT
+requests>=2.10.0,!=2.12.2 # Apache-2.0
+texttable==1.2.0
diff --git a/test_set/cvp_spt/tests/__init__.py b/test_set/cvp_spt/tests/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/test_set/cvp_spt/tests/__init__.py
diff --git a/test_set/cvp_spt/tests/conftest.py b/test_set/cvp_spt/tests/conftest.py
new file mode 100644
index 0000000..a0636ac
--- /dev/null
+++ b/test_set/cvp_spt/tests/conftest.py
@@ -0,0 +1 @@
+from cvp_spt.fixtures.base import *
diff --git a/test_set/cvp_spt/tests/test_glance.py b/test_set/cvp_spt/tests/test_glance.py
new file mode 100644
index 0000000..fd6681f
--- /dev/null
+++ b/test_set/cvp_spt/tests/test_glance.py
@@ -0,0 +1,54 @@
+import pytest
+import time
+import subprocess
+import cvp_spt.utils as utils
+
+
+@pytest.fixture
+def create_image():
+    line = 'echo "Executing dd on $(hostname -f)"; ' \
+           'dd if=/dev/zero of=/tmp/image_mk_framework.dd bs=1M count=9000 ;' \
+           'echo "Free space :" ; ' \
+           'df -H / '
+
+    subprocess.call(line.split())
+    yield
+    # teardown
+    subprocess.call('rm /tmp/image_mk_framework.dd'.split())
+    subprocess.call('rm /tmp/image_mk_framework.download'.split())
+
+
+def test_speed_glance(create_image, openstack_clients, record_property):
+    """
+    Simplified Performance Tests Download / upload lance
+    1. Step download image
+    2. Step upload image
+    """
+    image = openstack_clients.image.images.create(
+        name="test_image",
+        disk_format='iso',
+        container_format='bare')
+
+    start_time = time.time()
+    openstack_clients.image.images.upload(
+        image.id,
+        image_data=open("/tmp/image_mk_framework.dd", 'rb'))
+    end_time = time.time()
+
+    speed_upload = 9000 / (end_time - start_time)
+
+    start_time = time.time()
+    with open("/tmp/image_mk_framework.download", 'wb') as image_file:
+        for item in openstack_clients.image.images.data(image.id):
+            image_file.write(item)
+    end_time = time.time()
+
+    speed_download = 9000 / (end_time - start_time)
+
+    openstack_clients.image.images.delete(image.id)
+    record_property("Upload", speed_upload)
+    record_property("Download", speed_download)
+
+    print "++++++++++++++++++++++++++++++++++++++++"
+    print 'upload - {} Mb/s'.format(speed_upload)
+    print 'download - {} Mb/s'.format(speed_download)
diff --git a/test_set/cvp_spt/tests/test_hw2hw.py b/test_set/cvp_spt/tests/test_hw2hw.py
new file mode 100644
index 0000000..629be42
--- /dev/null
+++ b/test_set/cvp_spt/tests/test_hw2hw.py
@@ -0,0 +1,52 @@
+#!/usr/bin/env python
+import itertools
+import re
+import os
+import yaml
+import requests
+from cvp_spt import utils
+from cvp_spt.utils import helpers
+from netaddr import IPNetwork, IPAddress
+
+
+def test_hw2hw (local_salt_client,hw_pair,record_property):
+    helpp = helpers.helpers(local_salt_client)
+    config = utils.get_configuration()
+    nodes = local_salt_client.cmd(expr_form='compound', tgt=str(hw_pair[0]+' or '+hw_pair[1]),
+                                  fun='network.interfaces')
+    short_name = []
+    short_name.append(hw_pair[0].split('.')[0])
+    short_name.append(hw_pair[1].split('.')[0])
+    nets = config.get('networks').split(',')
+    local_salt_client.cmd(expr_form='compound', tgt=str(hw_pair[0]+' or '+hw_pair[1]),
+                          fun='cmd.run', param=['nohup iperf -s > file 2>&1 &'])
+    global_results = []
+    for net in nets:
+        for interf in nodes[hw_pair[0]]:
+            if 'inet' not in nodes[hw_pair[0]][interf].keys():
+                continue
+            ip = nodes[hw_pair[0]][interf]['inet'][0]['address']
+            if (IPAddress(ip) in IPNetwork(net)) and (nodes[hw_pair[0]][interf]['inet'][0]['broadcast']):
+                for interf2 in nodes[hw_pair[1]]:
+                    if 'inet' not in nodes[hw_pair[1]][interf2].keys():
+                        continue
+                    ip2 = nodes[hw_pair[1]][interf2]['inet'][0]['address']
+                    if (IPAddress(ip2) in IPNetwork(net)) and (nodes[hw_pair[1]][interf2]['inet'][0]['broadcast']):
+                        print "Will IPERF between {0} and {1}".format(ip,ip2)
+                        try:
+                            res = helpp.start_iperf_between_hosts(global_results, hw_pair[0], hw_pair[1],
+                                                                      ip, ip2, net)
+                            record_property("1-worst {0}-{1}".format(short_name[0],short_name[1]), res[0] if res[0] < res[2] else res[2])
+                            record_property("1-best {0}-{1}".format(short_name[0],short_name[1]), res[0] if res[0] > res[2] else res[2])
+                            record_property("10-best {0}-{1}".format(short_name[0],short_name[1]), res[1] if res[1] > res[3] else res[3])
+                            record_property("10-best {0}-{1}".format(short_name[0],short_name[1]), res[1] if res[1] > res[3] else res[3])
+                            print "Measurement between {} and {} " \
+                                   "has been finished".format(hw_pair[0],
+                                                              hw_pair[1])
+                        except Exception as e:
+                                print "Failed for {0} {1}".format(
+                                              hw_pair[0], hw_pair[1])
+                                print e
+    local_salt_client.cmd(expr_form='compound', tgt=str(hw_pair[0]+' or '+hw_pair[1]),
+                          fun='cmd.run', param=['killall -9 iperf'])
+    helpp.draw_table_with_results(global_results)
diff --git a/test_set/cvp_spt/tests/test_vm2vm.py b/test_set/cvp_spt/tests/test_vm2vm.py
new file mode 100644
index 0000000..9e1d5d7
--- /dev/null
+++ b/test_set/cvp_spt/tests/test_vm2vm.py
@@ -0,0 +1,103 @@
+import os
+import random
+import time
+import pytest
+from cvp_spt import utils
+from cvp_spt.utils import os_client
+from cvp_spt.utils import ssh
+
+
+def test_vm2vm (openstack_clients, pair, os_resources, record_property):
+    os_actions = os_client.OSCliActions(openstack_clients)
+    config = utils.get_configuration()
+    timeout = int(config.get('nova_timeout', 30))
+    try:
+        zone1 = [service.zone for service in openstack_clients.compute.services.list() if service.host == pair[0]]
+        zone2 = [service.zone for service in openstack_clients.compute.services.list() if service.host == pair[1]]
+        vm1 = os_actions.create_basic_server(os_resources['image_id'],
+                                             os_resources['flavor_id'],
+                                             os_resources['net1'],
+                                             '{0}:{1}'.format(zone1[0],pair[0]),
+                                             [os_resources['sec_group'].name],
+                                             os_resources['keypair'].name)
+
+        vm2 = os_actions.create_basic_server(os_resources['image_id'],
+                                             os_resources['flavor_id'],
+                                             os_resources['net1'],
+                                             '{0}:{1}'.format(zone1[0],pair[0]),
+                                             [os_resources['sec_group'].name],
+                                             os_resources['keypair'].name)
+
+        vm3 = os_actions.create_basic_server(os_resources['image_id'],
+                                             os_resources['flavor_id'],
+                                             os_resources['net1'],
+                                             '{0}:{1}'.format(zone2[0],pair[1]),
+                                             [os_resources['sec_group'].name],
+                                             os_resources['keypair'].name)
+
+        vm4 = os_actions.create_basic_server(os_resources['image_id'],
+                                             os_resources['flavor_id'],
+                                             os_resources['net2'],
+                                             '{0}:{1}'.format(zone2[0],pair[1]),
+                                             [os_resources['sec_group'].name],
+                                             os_resources['keypair'].name)
+
+        vm_info = []
+        vms = []
+        vms.extend([vm1,vm2,vm3,vm4])
+        fips = []
+        time.sleep(5)
+        for i in range(4):
+            fip = openstack_clients.compute.floating_ips.create(os_resources['ext_net']['name'])
+            fips.append(fip.id)
+            status = openstack_clients.compute.servers.get(vms[i]).status
+            if status != 'ACTIVE':
+                print "VM #{0} {1} is not ready. Status {2}".format(i,vms[i].id,status)
+                time.sleep(timeout)
+                status = openstack_clients.compute.servers.get(vms[i]).status
+            if status != 'ACTIVE':
+                raise Exception('VM is not ready')
+            vms[i].add_floating_ip(fip)
+            private_address = vms[i].addresses[vms[i].addresses.keys()[0]][0]['addr']
+            time.sleep(5)
+            try:
+                ssh.prepare_iperf(fip.ip,private_key=os_resources['keypair'].private_key)
+            except Exception as e:
+                print e
+                print "ssh.prepare_iperf was not successful, retry after {} sec".format(timeout)
+                time.sleep(timeout)
+                ssh.prepare_iperf(fip.ip,private_key=os_resources['keypair'].private_key)
+            vm_info.append({'vm': vms[i], 'fip': fip.ip, 'private_address': private_address})   
+        
+        transport1 = ssh.SSHTransport(vm_info[0]['fip'], 'ubuntu', password='dd', private_key=os_resources['keypair'].private_key)
+
+        result1 = transport1.exec_command('iperf -c {} | tail -n 1'.format(vm_info[1]['private_address']))
+        print ' '.join(result1.split()[-2::])
+        record_property("same {0}-{1}".format(zone1[0],zone2[0]), ' '.join(result1.split()[-2::]))
+        result2 = transport1.exec_command('iperf -c {} | tail -n 1'.format(vm_info[2]['private_address']))
+        print ' '.join(result2.split()[-2::])
+        record_property("diff host {0}-{1}".format(zone1[0],zone2[0]), ' '.join(result2.split()[-2::]))
+        result3 = transport1.exec_command('iperf -c {} -P 10 | tail -n 1'.format(vm_info[2]['private_address']))
+        print ' '.join(result3.split()[-2::])
+        record_property("dif host 10 threads {0}-{1}".format(zone1[0],zone2[0]), ' '.join(result3.split()[-2::]))
+        result4 = transport1.exec_command('iperf -c {} | tail -n 1'.format(vm_info[2]['fip']))
+        print ' '.join(result4.split()[-2::])
+        record_property("diff host fip {0}-{1}".format(zone1[0],zone2[0]), ' '.join(result4.split()[-2::]))
+        result5 = transport1.exec_command('iperf -c {} | tail -n 1'.format(vm_info[3]['private_address']))
+        print ' '.join(result5.split()[-2::])
+        record_property("diff host, diff net {0}-{1}".format(zone1[0],zone2[0]), ' '.join(result5.split()[-2::]))
+
+        print "Remove VMs"
+        for vm in vms:
+            openstack_clients.compute.servers.delete(vm)
+        print "Remove FIPs"
+        for fip in fips:
+            openstack_clients.compute.floating_ips.delete(fip)
+    except Exception as e:
+        print e
+        print "Something went wrong"
+        for vm in vms:
+            openstack_clients.compute.servers.delete(vm)
+        for fip in fips:
+            openstack_clients.compute.floating_ips.delete(fip)
+        pytest.fail("Something went wrong")
diff --git a/test_set/cvp_spt/utils/__init__.py b/test_set/cvp_spt/utils/__init__.py
new file mode 100644
index 0000000..c53dd69
--- /dev/null
+++ b/test_set/cvp_spt/utils/__init__.py
@@ -0,0 +1,111 @@
+import os
+import yaml
+import requests
+import re
+import sys, traceback
+import itertools
+import helpers
+from cvp_spt.utils import os_client
+
+
+class salt_remote:
+    def cmd(self, tgt, fun, param=None, expr_form=None, tgt_type=None):
+        config = get_configuration()
+        url = config['SALT_URL']
+        proxies = {"http": None, "https": None}
+        headers = {'Accept': 'application/json'}
+        login_payload = {'username': config['SALT_USERNAME'],
+                         'password': config['SALT_PASSWORD'], 'eauth': 'pam'}
+        accept_key_payload = {'fun': fun, 'tgt': tgt, 'client': 'local',
+                              'expr_form': expr_form, 'tgt_type': tgt_type,
+                              'timeout': config['salt_timeout']}
+        if param:
+            accept_key_payload['arg'] = param
+
+        try:
+            login_request = requests.post(os.path.join(url, 'login'),
+                                          headers=headers, data=login_payload,
+                                          proxies=proxies)
+            if login_request.ok:
+                request = requests.post(url, headers=headers,
+                                        data=accept_key_payload,
+                                        cookies=login_request.cookies,
+                                        proxies=proxies)
+                return request.json()['return'][0]
+        except Exception:
+            print "\033[91m\nConnection to SaltMaster " \
+                  "was not established.\n" \
+                  "Please make sure that you " \
+                  "provided correct credentials.\033[0m\n"
+            traceback.print_exc(file=sys.stdout)
+            sys.exit()
+
+
+def init_salt_client():
+    local = salt_remote()
+    return local
+
+
+def compile_pairs (nodes):
+    result = {}
+    if len(nodes) %2 != 0:
+        nodes.pop(1)
+    pairs = zip(*[iter(nodes)]*2)
+    for pair in pairs:
+        result[pair[0]+'<>'+pair[1]] = pair
+    return result
+
+
+def get_pairs():
+    # TODO
+    # maybe collect cmp from nova service-list
+    config = get_configuration()
+    local_salt_client = init_salt_client()
+    cmp_hosts = config.get('CMP_HOSTS') or []
+    skipped_nodes = config.get('skipped_nodes') or []
+    if skipped_nodes:
+        print "Notice: {0} nodes will be skipped for vm2vm test".format(skipped_nodes)
+    if not cmp_hosts:
+        nodes = local_salt_client.cmd(
+                'I@nova:compute',
+                'test.ping',
+                expr_form='compound')
+        cmp_hosts = [node.split('.')[0] for node in nodes.keys() if node not in skipped_nodes]
+    return compile_pairs(cmp_hosts)
+
+
+def get_hw_pairs():
+    config = get_configuration()
+    local_salt_client = init_salt_client()
+    hw_nodes = config.get('HW_NODES') or []
+    skipped_nodes = config.get('skipped_nodes') or []
+    if skipped_nodes:
+        print "Notice: {0} nodes will be skipped for hw2hw test".format(skipped_nodes)
+    if not hw_nodes:
+        nodes = local_salt_client.cmd(
+                'I@salt:control or I@nova:compute',
+                'test.ping',
+                expr_form='compound')
+        hw_nodes = [node for node in nodes.keys() if node not in skipped_nodes]
+    print local_salt_client.cmd(expr_form='compound', tgt="L@"+','.join(hw_nodes),
+                                fun='pkg.install', param=['iperf'])
+    return compile_pairs(hw_nodes)
+
+def get_configuration():
+    """function returns configuration for environment
+    and for test if it's specified"""
+
+    global_config_file = os.path.join(
+        os.path.dirname(os.path.abspath(__file__)), "../global_config.yaml")
+    with open(global_config_file, 'r') as file:
+        global_config = yaml.load(file)
+    for param in global_config.keys():
+        if param in os.environ.keys():
+            if ',' in os.environ[param]:
+                global_config[param] = []
+                for item in os.environ[param].split(','):
+                    global_config[param].append(item)
+            else:
+                global_config[param] = os.environ[param]
+
+    return global_config
diff --git a/test_set/cvp_spt/utils/helpers.py b/test_set/cvp_spt/utils/helpers.py
new file mode 100644
index 0000000..97a3bae
--- /dev/null
+++ b/test_set/cvp_spt/utils/helpers.py
@@ -0,0 +1,79 @@
+import texttable as tt
+
+class helpers(object):
+    def __init__ (self, local_salt_client):
+        self.local_salt_client = local_salt_client
+
+    def start_iperf_between_hosts(self, global_results, node_i, node_j, ip_i, ip_j, net_name):
+        result = []
+        direct_raw_results = self.start_iperf_client(node_i, ip_j)
+        result.append(direct_raw_results)
+        print "1 forward"
+        forward = "1 thread:\n"
+        forward += direct_raw_results + " Gbits/sec"
+
+        direct_raw_results = self.start_iperf_client(node_i, ip_j, 10)
+        result.append(direct_raw_results)
+        print "10 forward"
+        forward += "\n\n10 thread:\n"
+        forward += direct_raw_results + " Gbits/sec"
+
+        reverse_raw_results = self.start_iperf_client(node_j, ip_i)
+        result.append(reverse_raw_results)
+        print "1 backward"
+        backward = "1 thread:\n"
+        backward += reverse_raw_results + " Gbits/sec"
+
+        reverse_raw_results = self.start_iperf_client(node_j, ip_i, 10)
+        result.append(reverse_raw_results)
+        print "10 backward"
+        backward += "\n\n10 thread:\n"
+        backward += reverse_raw_results + " Gbits/sec"
+        global_results.append([node_i, node_j,
+                               net_name, forward, backward])
+
+        self.kill_iperf_processes(node_i)
+        self.kill_iperf_processes(node_j)
+        return result
+
+    def draw_table_with_results(self, global_results):
+        tab = tt.Texttable()
+        header = [
+            'node name 1',
+            'node name 2',
+            'network',
+            'bandwidth >',
+            'bandwidth <',
+        ]
+        tab.set_cols_align(['l', 'l', 'l', 'l', 'l'])
+        tab.set_cols_width([27, 27, 15, 20, '20'])
+        tab.header(header)
+        for row in global_results:
+            tab.add_row(row)
+        s = tab.draw()
+        print s
+
+    def start_iperf_client(self, minion_name, target_ip, thread_count=None):
+        iperf_command = 'timeout --kill-after=20 19 iperf -c {0}'.format(target_ip)
+        if thread_count:
+            iperf_command += ' -P {0}'.format(thread_count)
+        output = self.local_salt_client.cmd(tgt=minion_name,
+                                            fun='cmd.run',
+                                            param=[iperf_command])
+        # self.kill_iperf_processes(minion_name)
+        try:
+            result = output.values()[0].split('\n')[-1].split(' ')[-2:]
+            if result[1] == 'Mbits/sec':
+                return str(float(result[0])*0.001)
+            if result[1] != 'Gbits/sec':
+                return "0"
+            return result[0]
+        except:
+            print "No iperf result between {} and {} (maybe they don't have connectivity)".format(minion_name, target_ip)
+
+
+    def kill_iperf_processes(self, minion_name):
+        kill_command = "for pid in $(pgrep  iperf); do kill $pid; done"
+        output = self.local_salt_client.cmd(tgt=minion_name,
+                                            fun='cmd.run',
+                                            param=[kill_command])
diff --git a/test_set/cvp_spt/utils/os_client.py b/test_set/cvp_spt/utils/os_client.py
new file mode 100644
index 0000000..c17617f
--- /dev/null
+++ b/test_set/cvp_spt/utils/os_client.py
@@ -0,0 +1,396 @@
+from cinderclient import client as cinder_client
+from glanceclient import client as glance_client
+from keystoneauth1 import identity as keystone_identity
+from keystoneauth1 import session as keystone_session
+from keystoneclient.v3 import client as keystone_client
+from neutronclient.v2_0 import client as neutron_client
+from novaclient import client as novaclient
+
+import os
+import random
+import time
+
+
+class OfficialClientManager(object):
+    """Manager that provides access to the official python clients for
+    calling various OpenStack APIs.
+    """
+
+    CINDERCLIENT_VERSION = 3
+    GLANCECLIENT_VERSION = 2
+    KEYSTONECLIENT_VERSION = 3
+    NEUTRONCLIENT_VERSION = 2
+    NOVACLIENT_VERSION = 2
+    INTERFACE = 'admin'
+    if "OS_ENDPOINT_TYPE" in os.environ.keys():
+        INTERFACE = os.environ["OS_ENDPOINT_TYPE"]
+
+    def __init__(self, username=None, password=None,
+                 tenant_name=None, auth_url=None, endpoint_type="internalURL",
+                 cert=False, domain="Default", **kwargs):
+        self.traceback = ""
+
+        self.client_attr_names = [
+            "auth",
+            "compute",
+            "network",
+            "volume",
+            "image",
+        ]
+        self.username = username
+        self.password = password
+        self.tenant_name = tenant_name
+        self.project_name = tenant_name
+        self.auth_url = auth_url
+        self.endpoint_type = endpoint_type
+        self.cert = cert
+        self.domain = domain
+        self.kwargs = kwargs
+
+        # Lazy clients
+        self._auth = None
+        self._compute = None
+        self._network = None
+        self._volume = None
+        self._image = None
+
+    @classmethod
+    def _get_auth_session(cls, username=None, password=None,
+                          tenant_name=None, auth_url=None, cert=None,
+                          domain='Default'):
+        if None in (username, password, tenant_name):
+            print(username, password, tenant_name)
+            msg = ("Missing required credentials for identity client. "
+                   "username: {username}, password: {password}, "
+                   "tenant_name: {tenant_name}").format(
+                       username=username,
+                       password=password,
+                       tenant_name=tenant_name, )
+            raise msg
+
+        if cert and "https" not in auth_url:
+            auth_url = auth_url.replace("http", "https")
+
+        if cls.KEYSTONECLIENT_VERSION == (2, 0):
+            # auth_url = "{}{}".format(auth_url, "v2.0/")
+            auth = keystone_identity.v2.Password(
+                username=username,
+                password=password,
+                auth_url=auth_url,
+                tenant_name=tenant_name)
+        else:
+            auth_url = "{}{}".format(auth_url, "/v3")
+            auth = keystone_identity.v3.Password(
+                auth_url=auth_url,
+                user_domain_name=domain,
+                username=username,
+                password=password,
+                project_domain_name=domain,
+                project_name=tenant_name)
+
+        auth_session = keystone_session.Session(auth=auth, verify=cert)
+        # auth_session.get_auth_headers()
+        return auth_session
+
+    @classmethod
+    def get_auth_client(cls, username=None, password=None,
+                        tenant_name=None, auth_url=None, cert=None,
+                        domain='Default', **kwargs):
+        session = cls._get_auth_session(
+            username=username,
+            password=password,
+            tenant_name=tenant_name,
+            auth_url=auth_url,
+            cert=cert,
+            domain=domain)
+        keystone = keystone_client.Client(version=cls.KEYSTONECLIENT_VERSION,
+                                          session=session, **kwargs)
+        keystone.management_url = auth_url
+        return keystone
+
+    @classmethod
+    def get_compute_client(cls, username=None, password=None,
+                           tenant_name=None, auth_url=None, cert=None,
+                           domain='Default', **kwargs):
+        session = cls._get_auth_session(
+            username=username, password=password, tenant_name=tenant_name,
+            auth_url=auth_url, cert=cert, domain=domain)
+        service_type = 'compute'
+        compute_client = novaclient.Client(
+            version=cls.NOVACLIENT_VERSION, session=session,
+            service_type=service_type, os_cache=False, **kwargs)
+        return compute_client
+
+    @classmethod
+    def get_network_client(cls, username=None, password=None,
+                           tenant_name=None, auth_url=None, cert=None,
+                           domain='Default', **kwargs):
+        session = cls._get_auth_session(
+            username=username, password=password, tenant_name=tenant_name,
+            auth_url=auth_url, cert=cert, domain=domain)
+        service_type = 'network'
+        return neutron_client.Client(
+            service_type=service_type, session=session, interface=cls.INTERFACE, **kwargs)
+
+    @classmethod
+    def get_volume_client(cls, username=None, password=None,
+                          tenant_name=None, auth_url=None, cert=None,
+                          domain='Default', **kwargs):
+        session = cls._get_auth_session(
+            username=username, password=password, tenant_name=tenant_name,
+            auth_url=auth_url, cert=cert, domain=domain)
+        service_type = 'volume'
+        return cinder_client.Client(
+            version=cls.CINDERCLIENT_VERSION,
+            service_type=service_type,
+            interface=cls.INTERFACE,
+            session=session, **kwargs)
+
+    @classmethod
+    def get_image_client(cls, username=None, password=None,
+                         tenant_name=None, auth_url=None, cert=None,
+                         domain='Default', **kwargs):
+        session = cls._get_auth_session(
+            username=username, password=password, tenant_name=tenant_name,
+            auth_url=auth_url, cert=cert, domain=domain)
+        service_type = 'image'
+        return glance_client.Client(
+            version=cls.GLANCECLIENT_VERSION,
+            service_type=service_type,
+            session=session, interface=cls.INTERFACE,
+            **kwargs)
+
+    @property
+    def auth(self):
+        if self._auth is None:
+            self._auth = self.get_auth_client(
+                self.username, self.password, self.tenant_name, self.auth_url,
+                self.cert, self.domain, endpoint_type=self.endpoint_type
+            )
+        return self._auth
+
+    @property
+    def compute(self):
+        if self._compute is None:
+            self._compute = self.get_compute_client(
+                self.username, self.password, self.tenant_name, self.auth_url,
+                self.cert, self.domain, endpoint_type=self.endpoint_type
+            )
+        return self._compute
+
+    @property
+    def network(self):
+        if self._network is None:
+            self._network = self.get_network_client(
+                self.username, self.password, self.tenant_name, self.auth_url,
+                self.cert, self.domain, endpoint_type=self.endpoint_type
+            )
+        return self._network
+
+    @property
+    def volume(self):
+        if self._volume is None:
+            self._volume = self.get_volume_client(
+                self.username, self.password, self.tenant_name, self.auth_url,
+                self.cert, self.domain, endpoint_type=self.endpoint_type
+            )
+        return self._volume
+
+    @property
+    def image(self):
+        if self._image is None:
+            self._image = self.get_image_client(
+                self.username, self.password, self.tenant_name, self.auth_url,
+                self.cert, self.domain
+            )
+        return self._image
+
+
+class OSCliActions(object):
+    def __init__(self, os_clients):
+        self.os_clients = os_clients
+
+    def get_admin_tenant(self):
+        # TODO Keystone v3 doesnt have tenants attribute
+        return self.os_clients.auth.projects.find(name="admin")
+
+    # TODO: refactor
+    def get_cirros_image(self):
+        images_list = list(self.os_clients.image.images.list(name='TestVM'))
+        if images_list:
+            image = images_list[0]
+        else:
+            image = self.os_clients.image.images.create(
+                name="TestVM",
+                disk_format='qcow2',
+                container_format='bare')
+            with file_cache.get_file(settings.CIRROS_QCOW2_URL) as f:
+                self.os_clients.image.images.upload(image.id, f)
+        return image
+
+    def get_internal_network(self):
+        networks = [
+            net for net in self.os_clients.network.list_networks()["networks"]
+            if net["admin_state_up"] and not net["router:external"] and
+            len(net["subnets"])
+        ]
+        if networks:
+            net = networks[0]
+        else:
+            net = self.create_network_resources()
+        return net
+
+    def get_external_network(self):
+        networks = [
+            net for net in self.os_clients.network.list_networks()["networks"]
+            if net["admin_state_up"] and net["router:external"] and
+            len(net["subnets"])
+        ]
+        if networks:
+            ext_net = networks[0]
+        else:
+            ext_net = self.create_fake_external_network()
+        return ext_net
+
+    def create_flavor(self, name, ram=256, vcpus=1, disk=2):
+        return self.os_clients.compute.flavors.create(name, ram, vcpus, disk)
+
+    def create_sec_group(self, rulesets=None):
+        if rulesets is None:
+            rulesets = [
+                {
+                    # ssh
+                    'ip_protocol': 'tcp',
+                    'from_port': 22,
+                    'to_port': 22,
+                    'cidr': '0.0.0.0/0',
+                },
+                {
+                    # iperf
+                    'ip_protocol': 'tcp',
+                    'from_port':5001,
+                    'to_port': 5001,
+                    'cidr': '0.0.0.0/0',
+                },
+                {
+                    # ping
+                    'ip_protocol': 'icmp',
+                    'from_port': -1,
+                    'to_port': -1,
+                    'cidr': '0.0.0.0/0',
+                }
+            ]
+        sg_name = "spt-test-secgroup-{}".format(random.randrange(100, 999))
+        sg_desc = sg_name + " SPT"
+        secgroup = self.os_clients.compute.security_groups.create(
+            sg_name, sg_desc)
+        for ruleset in rulesets:
+            self.os_clients.compute.security_group_rules.create(
+                secgroup.id, **ruleset)
+        return secgroup
+
+
+    def wait(predicate, interval=5, timeout=60, timeout_msg="Waiting timed out"):
+        start_time = time.time()
+        if not timeout:
+            return predicate()
+        while not predicate():
+            if start_time + timeout < time.time():
+                raise exceptions.TimeoutError(timeout_msg)
+
+            seconds_to_sleep = max(
+                0,
+                min(interval, start_time + timeout - time.time()))
+            time.sleep(seconds_to_sleep)
+
+        return timeout + start_time - time.time()
+
+    def create_basic_server(self, image=None, flavor=None, net=None,
+                            availability_zone=None, sec_groups=(),
+                            keypair=None,
+                            wait_timeout=3 * 60):
+        os_conn = self.os_clients
+        image = image or self.get_cirros_image()
+        flavor = flavor or self.get_micro_flavor()
+        net = net or self.get_internal_network()
+        kwargs = {}
+        if sec_groups:
+            kwargs['security_groups'] = sec_groups
+        server = os_conn.compute.servers.create(
+            "spt-test-server-{}".format(random.randrange(100, 999)),
+            image, flavor, nics=[{"net-id": net["id"]}],
+            availability_zone=availability_zone, key_name=keypair, **kwargs)
+        # TODO
+        #if wait_timeout:
+        #    self.wait(
+        #        lambda: os_conn.compute.servers.get(server).status == "ACTIVE",
+        #        timeout=wait_timeout,
+        #        timeout_msg=(
+        #            "Create server {!r} failed by timeout. "
+        #            "Please, take a look at OpenStack logs".format(server.id)))
+        return server
+
+    def create_network(self, tenant_id):
+        net_name = "spt-test-net-{}".format(random.randrange(100, 999))
+        net_body = {
+            'network': {
+                'name': net_name,
+                'tenant_id': tenant_id
+            }
+        }
+        net = self.os_clients.network.create_network(net_body)['network']
+        return net
+        #yield net
+        #self.os_clients.network.delete_network(net['id'])
+
+    def create_subnet(self, net, tenant_id, cidr=None):
+        subnet_name = "spt-test-subnet-{}".format(random.randrange(100, 999))
+        subnet_body = {
+            'subnet': {
+                "name": subnet_name,
+                'network_id': net['id'],
+                'ip_version': 4,
+                'cidr': cidr if cidr else '10.1.7.0/24',
+                'tenant_id': tenant_id
+            }
+        }
+        subnet = self.os_clients.network.create_subnet(subnet_body)['subnet']
+        return subnet
+        #yield subnet
+        #self.os_clients.network.delete_subnet(subnet['id'])
+
+    def create_router(self, ext_net, tenant_id):
+        name = 'spt-test-router-{}'.format(random.randrange(100, 999))
+        router_body = {
+            'router': {
+                'name': name,
+                'external_gateway_info': {
+                    'network_id': ext_net['id']
+                },
+                'tenant_id': tenant_id
+            }
+        }
+        router = self.os_clients.network.create_router(router_body)['router']
+        return router
+        #yield router
+        #self.os_clients.network.delete_router(router['id'])
+
+    def create_network_resources(self):
+        tenant_id = self.get_admin_tenant().id
+        ext_net = self.get_external_network()
+        net = self.create_network(tenant_id)
+        subnet = self.create_subnet(net, tenant_id)
+        #router = self.create_router(ext_net, tenant_id)
+        #self.os_clients.network.add_interface_router(
+        #    router['id'], {'subnet_id': subnet['id']})
+
+        private_net_id = net['id']
+        # floating_ip_pool = ext_net['id']
+
+        return net
+        #yield private_net_id, floating_ip_pool
+        #yield private_net_id
+        #
+        #self.os_clients.network.remove_interface_router(
+        #     router['id'], {'subnet_id': subnet['id']})
+        #self.os_clients.network.remove_gateway_router(router['id'])
diff --git a/test_set/cvp_spt/utils/ssh.py b/test_set/cvp_spt/utils/ssh.py
new file mode 100644
index 0000000..8e78963
--- /dev/null
+++ b/test_set/cvp_spt/utils/ssh.py
@@ -0,0 +1,140 @@
+import cStringIO
+import logging
+import select
+from cvp_spt import utils
+import paramiko
+
+
+logger = logging.getLogger(__name__)
+
+# Suppress paramiko logging
+logging.getLogger("paramiko").setLevel(logging.WARNING)
+
+
+class SSHTransport(object):
+    def __init__(self, address, username, password=None,
+                 private_key=None, look_for_keys=False, *args, **kwargs):
+
+        self.address = address
+        self.username = username
+        self.password = password
+        if private_key is not None:
+            self.private_key = paramiko.RSAKey.from_private_key(
+                cStringIO.StringIO(private_key))
+        else:
+            self.private_key = None
+
+        self.look_for_keys = look_for_keys
+        self.buf_size = 1024
+        self.channel_timeout = 10.0
+
+    def _get_ssh_connection(self):
+        ssh = paramiko.SSHClient()
+        ssh.set_missing_host_key_policy(
+            paramiko.AutoAddPolicy())
+        ssh.connect(self.address, username=self.username,
+                    password=self.password, pkey=self.private_key,
+                    timeout=self.channel_timeout)
+        logger.debug("Successfully connected to: {0}".format(self.address))
+        return ssh
+
+    def _get_sftp_connection(self):
+        transport = paramiko.Transport((self.address, 22))
+        transport.connect(username=self.username,
+                          password=self.password,
+                          pkey=self.private_key)
+
+        return paramiko.SFTPClient.from_transport(transport)
+
+    def exec_sync(self, cmd):
+        logger.debug("Executing {0} on host {1}".format(cmd, self.address))
+        ssh = self._get_ssh_connection()
+        transport = ssh.get_transport()
+        channel = transport.open_session()
+        channel.fileno()
+        channel.exec_command(cmd)
+        channel.shutdown_write()
+        out_data = []
+        err_data = []
+        poll = select.poll()
+        poll.register(channel, select.POLLIN)
+
+        while True:
+            ready = poll.poll(self.channel_timeout)
+            if not any(ready):
+                continue
+            if not ready[0]:
+                continue
+            out_chunk = err_chunk = None
+            if channel.recv_ready():
+                out_chunk = channel.recv(self.buf_size)
+                out_data += out_chunk,
+            if channel.recv_stderr_ready():
+                err_chunk = channel.recv_stderr(self.buf_size)
+                err_data += err_chunk,
+            if channel.closed and not err_chunk and not out_chunk:
+                break
+        exit_status = channel.recv_exit_status()
+        logger.debug("Command {0} executed with status: {1}"
+                     .format(cmd, exit_status))
+        return (
+            exit_status, ''.join(out_data).strip(), ''.join(err_data).strip())
+
+    def exec_command(self, cmd):
+        exit_status, stdout, stderr = self.exec_sync(cmd)
+        return stdout
+
+    def check_call(self, command, error_info=None, expected=None,
+                   raise_on_err=True):
+        """Execute command and check for return code
+        :type command: str
+        :type error_info: str
+        :type expected: list
+        :type raise_on_err: bool
+        :rtype: ExecResult
+        :raises: DevopsCalledProcessError
+        """
+        if expected is None:
+            expected = [0]
+        ret = self.exec_sync(command)
+        exit_code, stdout_str, stderr_str = ret
+        if exit_code not in expected:
+            message = (
+                "{append}Command '{cmd}' returned exit code {code} while "
+                "expected {expected}\n"
+                "\tSTDOUT:\n"
+                "{stdout}"
+                "\n\tSTDERR:\n"
+                "{stderr}".format(
+                    append=error_info + '\n' if error_info else '',
+                    cmd=command,
+                    code=exit_code,
+                    expected=expected,
+                    stdout=stdout_str,
+                    stderr=stderr_str
+                ))
+            logger.error(message)
+            if raise_on_err:
+                exit()
+        return ret
+
+    def put_file(self, source_path, destination_path):
+        sftp = self._get_sftp_connection()
+        sftp.put(source_path, destination_path)
+        sftp.close()
+
+    def get_file(self, source_path, destination_path):
+        sftp = self._get_sftp_connection()
+        sftp.get(source_path, destination_path)
+        sftp.close()
+
+
+class prepare_iperf(object):
+
+    def __init__(self,fip,user='ubuntu',password='password', private_key=None):
+        transport = SSHTransport(fip, user, password, private_key)
+        config = utils.get_configuration()
+        preparation_cmd = config.get('iperf_prep_string') or ['']
+        transport.exec_command(preparation_cmd)
+        transport.exec_command('sudo apt-get update; sudo apt-get install -y iperf')
+        transport.exec_command('nohup iperf -s > file 2>&1 &')