Renamed folder with tests to make them consistent with cvp-runner.groovy
and CVP jobs in cluster Jenkins
Return rsync service into inconsistency_rule

Related-Task: #PROD-23604(PROD:23604)

Change-Id: I94afe350bd1d9c184bafe8e9e270aeb4c6c24c50
diff --git a/test_set/cvp-sanity/tests/__init__.py b/test_set/cvp-sanity/tests/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/test_set/cvp-sanity/tests/__init__.py
diff --git a/test_set/cvp-sanity/tests/ceph/test_ceph_haproxy.py b/test_set/cvp-sanity/tests/ceph/test_ceph_haproxy.py
new file mode 100644
index 0000000..d6c8e49
--- /dev/null
+++ b/test_set/cvp-sanity/tests/ceph/test_ceph_haproxy.py
@@ -0,0 +1,23 @@
+import pytest
+
+
+def test_ceph_haproxy(local_salt_client):
+    pytest.skip("This test doesn't work. Skipped")
+    fail = {}
+
+    monitor_info = local_salt_client.cmd(
+        'ceph:mon',
+        'cmd.run',
+        ["echo 'show stat' | nc -U "
+         "/var/run/haproxy/admin.sock | "
+         "grep ceph_mon_radosgw_cluster"],
+        expr_form='pillar')
+    if not monitor_info:
+        pytest.skip("Ceph is not found on this environment")
+
+    for name, info in monitor_info.iteritems():
+        if "OPEN" and "UP" in info:
+            continue
+        else:
+            fail[name] = info
+    assert not fail, "Failed monitors: {}".format(fail)
diff --git a/test_set/cvp-sanity/tests/ceph/test_ceph_pg_count.py b/test_set/cvp-sanity/tests/ceph/test_ceph_pg_count.py
new file mode 100644
index 0000000..28783e8
--- /dev/null
+++ b/test_set/cvp-sanity/tests/ceph/test_ceph_pg_count.py
@@ -0,0 +1,94 @@
+import pytest
+import math
+
+def __next_power_of2(total_pg):
+	count = 0
+	if (total_pg and not(total_pg & (total_pg - 1))):
+		return total_pg	
+	while( total_pg != 0):
+		total_pg >>= 1
+		count += 1
+	
+	return 1 << count
+
+
+def test_ceph_pg_count(local_salt_client):
+    """
+    Test aimed to calculate placement groups for Ceph cluster
+    according formula below.
+    Formula to calculate PG num:
+    Total PGs = 
+    (Total_number_of_OSD * 100) / max_replication_count / pool count
+    pg_num and pgp_num should be the same and 
+    set according formula to higher value of powered 2
+    """
+    pytest.skip("This test needs redesign. Skipped for now")
+    ceph_monitors = local_salt_client.cmd(
+        'ceph:mon', 
+        'test.ping', 
+        expr_form='pillar')
+    
+    if not ceph_monitors:
+        pytest.skip("Ceph is not found on this environment")
+
+    monitor = ceph_monitors.keys()[0]
+    pools = local_salt_client.cmd(
+        monitor, 'cmd.run', 
+        ["rados lspools"], 
+        expr_form='glob').get(
+            ceph_monitors.keys()[0]).split('\n')
+    
+    total_osds = int(local_salt_client.cmd(
+        monitor, 
+        'cmd.run', 
+        ['ceph osd tree | grep osd | grep "up\|down" | wc -l'], 
+        expr_form='glob').get(ceph_monitors.keys()[0]))
+    
+    raw_pool_replications = local_salt_client.cmd(
+        monitor, 
+        'cmd.run', 
+        ["ceph osd dump | grep size | awk '{print $3, $6}'"], 
+        expr_form='glob').get(ceph_monitors.keys()[0]).split('\n')
+    
+    pool_replications = {}
+    for replication in raw_pool_replications:
+        pool_replications[replication.split()[0]] = int(replication.split()[1])
+    
+    max_replication_value = 0
+    for repl_value in pool_replications.values():
+        if repl_value > max_replication_value:
+            max_replication_value = repl_value
+
+    total_pg = (total_osds * 100) / max_replication_value / len(pools)
+    correct_pg_num = __next_power_of2(total_pg)
+    
+    pools_pg_num = {}
+    pools_pgp_num = {}
+    for pool in pools:
+        pg_num = int(local_salt_client.cmd(
+            monitor, 
+            'cmd.run', 
+            ["ceph osd pool get {} pg_num".format(pool)], 
+            expr_form='glob').get(ceph_monitors.keys()[0]).split()[1])
+        pools_pg_num[pool] = pg_num
+        pgp_num = int(local_salt_client.cmd(
+            monitor, 
+            'cmd.run', 
+            ["ceph osd pool get {} pgp_num".format(pool)], 
+            expr_form='glob').get(ceph_monitors.keys()[0]).split()[1])
+        pools_pgp_num[pool] = pgp_num
+
+    wrong_pg_num_pools = [] 
+    pg_pgp_not_equal_pools = []
+    for pool in pools:
+        if pools_pg_num[pool] != pools_pgp_num[pool]:
+            pg_pgp_not_equal_pools.append(pool)
+        if pools_pg_num[pool] < correct_pg_num:
+            wrong_pg_num_pools.append(pool)
+
+    assert not pg_pgp_not_equal_pools, \
+    "For pools {} PG and PGP are not equal " \
+    "but should be".format(pg_pgp_not_equal_pools)
+    assert not wrong_pg_num_pools, "For pools {} " \
+    "PG number lower than Correct PG number, " \
+    "but should be equal or higher".format(wrong_pg_num_pools)
diff --git a/test_set/cvp-sanity/tests/ceph/test_ceph_replicas.py b/test_set/cvp-sanity/tests/ceph/test_ceph_replicas.py
new file mode 100644
index 0000000..62af49d
--- /dev/null
+++ b/test_set/cvp-sanity/tests/ceph/test_ceph_replicas.py
@@ -0,0 +1,49 @@
+import pytest
+
+
+def test_ceph_replicas(local_salt_client):
+    """
+    Test aimed to check number of replicas
+    for most of deployments if there is no
+    special requirement for that.
+    """
+
+    ceph_monitors = local_salt_client.cmd(
+        'ceph:mon', 
+        'test.ping', 
+        expr_form='pillar')
+
+    if not ceph_monitors:
+        pytest.skip("Ceph is not found on this environment")
+
+    monitor = ceph_monitors.keys()[0]
+
+    raw_pool_replicas = local_salt_client.cmd(
+        monitor, 
+        'cmd.run', 
+        ["ceph osd dump | grep size | " \
+        "awk '{print $3, $5, $6, $7, $8}'"], 
+        expr_form='glob').get(
+        ceph_monitors.keys()[0]).split('\n')
+
+    pools_replicas = {}
+    for pool in raw_pool_replicas:
+        pool_name = pool.split(" ", 1)[0]
+        pool_replicas = {}
+        raw_replicas = pool.split(" ", 1)[1].split()
+        for elem in raw_replicas:
+            pool_replicas[raw_replicas[0]] = int(raw_replicas[1])
+            pool_replicas[raw_replicas[2]] = int(raw_replicas[3])
+        pools_replicas[pool_name] = pool_replicas
+    
+    error = []
+    for pool, replicas in pools_replicas.items():
+        for replica, value in replicas.items():
+            if replica == 'min_size' and value < 2:
+                error.append(pool + " " + replica + " " 
+                + str(value) + " must be 2")
+            if replica == 'size' and value < 3:
+                error.append(pool + " " + replica + " " 
+                + str(value) + " must be 3")
+    
+    assert not error, "Wrong pool replicas found\n{}".format(error)
diff --git a/test_set/cvp-sanity/tests/ceph/test_ceph_status.py b/test_set/cvp-sanity/tests/ceph/test_ceph_status.py
new file mode 100644
index 0000000..ffd7bed
--- /dev/null
+++ b/test_set/cvp-sanity/tests/ceph/test_ceph_status.py
@@ -0,0 +1,39 @@
+import json
+
+import pytest
+
+
+def test_ceph_osd(local_salt_client):
+    osd_fail = local_salt_client.cmd(
+        'ceph:osd',
+        'cmd.run',
+        ['ceph osd tree | grep down'],
+        expr_form='pillar')
+    if not osd_fail:
+        pytest.skip("Ceph is not found on this environment")
+    assert not osd_fail.values()[0], \
+        "Some osds are in down state or ceph is not found".format(
+        osd_fail.values()[0])
+
+
+def test_ceph_health(local_salt_client):
+    get_status = local_salt_client.cmd(
+        'ceph:mon',
+        'cmd.run',
+        ['ceph -s -f json'],
+        expr_form='pillar')
+    if not get_status:
+        pytest.skip("Ceph is not found on this environment")
+    status = json.loads(get_status.values()[0])["health"]
+    health = status["status"] if 'status' in status \
+        else status["overall_status"]
+
+    # Health structure depends on Ceph version, so condition is needed:
+    if 'checks' in status:
+        summary = "Summary: {}".format(
+            [i["summary"]["message"] for i in status["checks"].values()])
+    else:
+        summary = status["summary"]
+
+    assert health == "HEALTH_OK",\
+        "Ceph status is not expected. {}".format(summary)
diff --git a/test_set/cvp-sanity/tests/ceph/test_ceph_tell_bench.py b/test_set/cvp-sanity/tests/ceph/test_ceph_tell_bench.py
new file mode 100644
index 0000000..b275022
--- /dev/null
+++ b/test_set/cvp-sanity/tests/ceph/test_ceph_tell_bench.py
@@ -0,0 +1,56 @@
+import pytest
+import json
+import math
+
+
+def test_ceph_tell_bench(local_salt_client):
+    """
+    Test checks that each OSD MB per second speed 
+    is not lower than 10 MB comparing with AVG. 
+    Bench command by default writes 1Gb on each OSD 
+    with the default values of 4M 
+    and gives the "bytes_per_sec" speed for each OSD.
+
+    """
+    pytest.skip("This test needs redesign. Skipped for now")
+    ceph_monitors = local_salt_client.cmd(
+        'ceph:mon', 
+        'test.ping', 
+        expr_form='pillar')
+
+    if not ceph_monitors:
+        pytest.skip("Ceph is not found on this environment")
+
+    cmd_result = local_salt_client.cmd(
+        ceph_monitors.keys()[0], 
+        'cmd.run', ["ceph tell osd.* bench -f json"], 
+        expr_form='glob').get(
+            ceph_monitors.keys()[0]).split('\n')
+
+    cmd_result = filter(None, cmd_result)
+
+    osd_pool = {}
+    for osd in cmd_result:
+        osd_ = osd.split(" ")
+        osd_pool[osd_[0]] = osd_[1]
+
+    mbps_sum = 0
+    osd_count = 0
+    for osd in osd_pool:
+        osd_count += 1
+        mbps_sum += json.loads(
+            osd_pool[osd])['bytes_per_sec'] / 1000000
+
+    mbps_avg = mbps_sum / osd_count
+    result = {}
+    for osd in osd_pool:
+        mbps = json.loads(
+            osd_pool[osd])['bytes_per_sec'] / 1000000
+        if math.fabs(mbps_avg - mbps) > 10:
+            result[osd] = osd_pool[osd]
+
+    assert len(result) == 0, \
+    "Performance of {0} OSD(s) lower " \
+    "than AVG performance ({1} mbps), " \
+    "please check Ceph for possible problems".format(
+        json.dumps(result, indent=4), mbps_avg)
diff --git a/test_set/cvp-sanity/tests/test_cinder_services.py b/test_set/cvp-sanity/tests/test_cinder_services.py
new file mode 100644
index 0000000..e6b8c8e
--- /dev/null
+++ b/test_set/cvp-sanity/tests/test_cinder_services.py
@@ -0,0 +1,30 @@
+import pytest
+
+
+def test_cinder_services(local_salt_client):
+    cinder_backends_info = local_salt_client.cmd(
+        'cinder:controller',
+        'pillar.get',
+        ['cinder:controller:backend'],
+        expr_form='pillar')
+    if not cinder_backends_info:
+        pytest.skip("Cinder service or cinder:controller pillar \
+        are not found on this environment.")
+    service_down = local_salt_client.cmd(
+        'keystone:server',
+        'cmd.run',
+        ['. /root/keystonercv3; cinder service-list | grep "down\|disabled"'],
+        expr_form='pillar')
+    cinder_volume = local_salt_client.cmd(
+        'keystone:server',
+        'cmd.run',
+        ['. /root/keystonercv3; cinder service-list | grep -c "volume"'],
+        expr_form='pillar')
+    backends_cinder = cinder_backends_info[cinder_backends_info.keys()[0]]
+    backends_num = len(backends_cinder.keys())
+    assert service_down[service_down.keys()[0]] == '', \
+        '''Some cinder services are in wrong state'''
+    assert cinder_volume[cinder_volume.keys()[0]] == str(backends_num), \
+        'Number of cinder-volume services ({0}) does not match ' \
+        'number of volume backends ({1})'.format(
+        cinder_volume[cinder_volume.keys()[0]], str(backends_num))
diff --git a/test_set/cvp-sanity/tests/test_contrail.py b/test_set/cvp-sanity/tests/test_contrail.py
new file mode 100644
index 0000000..5e7e108
--- /dev/null
+++ b/test_set/cvp-sanity/tests/test_contrail.py
@@ -0,0 +1,87 @@
+import pytest
+import json
+
+pytestmark = pytest.mark.usefixtures("contrail")
+
+STATUS_FILTER = r'grep -Pv "(==|^$|Disk|unix|support|boot|\*\*|FOR NODE)"'
+STATUS_COMMAND = "contrail-status -t 10"
+
+def get_contrail_status(salt_client, pillar, command, processor):
+    return salt_client.cmd(
+        pillar, 'cmd.run',
+        ['{} | {}'.format(command, processor)],
+        expr_form='pillar'
+    )
+
+def test_contrail_compute_status(local_salt_client):
+    cs = get_contrail_status(local_salt_client, 'nova:compute',
+                             STATUS_COMMAND, STATUS_FILTER)
+    broken_services = []
+
+    for node in cs:
+        for line in cs[node].split('\n'):
+            line = line.strip()
+            if len (line.split(None, 1)) == 1:
+                err_msg = "{0}: {1}".format(
+                    node, line)
+                broken_services.append(err_msg)
+                continue
+            name, status = line.split(None, 1)
+            if status not in {'active'}:
+                err_msg = "{node}:{service} - {status}".format(
+                    node=node, service=name, status=status)
+                broken_services.append(err_msg)
+
+    assert not broken_services, 'Broken services: {}'.format(json.dumps(
+                                                             broken_services,
+                                                             indent=4))
+
+
+def test_contrail_node_status(local_salt_client):
+    command = STATUS_COMMAND
+
+    # TODO: what will be in OpenContrail 5?
+    if pytest.contrail == '4':
+        command = "doctrail all " + command
+    cs = get_contrail_status(local_salt_client,
+                             'opencontrail:client:analytics_node',
+                             command, STATUS_FILTER)
+    cs.update(get_contrail_status(local_salt_client, 'opencontrail:control',
+                                  command, STATUS_FILTER)
+    )
+    broken_services = []
+    for node in cs:
+        for line in cs[node].split('\n'):
+            line = line.strip()
+            if 'crashes/core.java.' not in line:
+                name, status = line.split(None, 1)
+            else:
+                name, status = line, 'FATAL'
+            if status not in {'active', 'backup'}:
+                err_msg = "{node}:{service} - {status}".format(
+                    node=node, service=name, status=status)
+                broken_services.append(err_msg)
+
+    assert not broken_services, 'Broken services: {}'.format(json.dumps(
+                                                             broken_services,
+                                                             indent=4))
+
+
+def test_contrail_vrouter_count(local_salt_client):
+    cs = get_contrail_status(local_salt_client, 'nova:compute',
+                             STATUS_COMMAND, STATUS_FILTER)
+
+    # TODO: what if compute lacks these service unintentionally?
+    if not cs:
+        pytest.skip("Contrail services were not found on compute nodes")
+
+    actual_vrouter_count = 0
+    for node in cs:
+        for line in cs[node].split('\n'):
+            if 'contrail-vrouter-nodemgr' in line:
+                actual_vrouter_count += 1
+
+    assert actual_vrouter_count == len(cs.keys()),\
+        'The length of vRouters {} differs' \
+        ' from the length of compute nodes {}'.format(actual_vrouter_count,
+                                                      len(cs.keys()))
diff --git a/test_set/cvp-sanity/tests/test_default_gateway.py b/test_set/cvp-sanity/tests/test_default_gateway.py
new file mode 100644
index 0000000..36ca70e
--- /dev/null
+++ b/test_set/cvp-sanity/tests/test_default_gateway.py
@@ -0,0 +1,23 @@
+import json
+import pytest
+import os
+import utils
+
+
+def test_check_default_gateways(local_salt_client, nodes_in_group):
+    netstat_info = local_salt_client.cmd(
+        "L@"+','.join(nodes_in_group), 'cmd.run', ['ip r | sed -n 1p'], expr_form='compound')
+
+    gateways = {}
+    nodes = netstat_info.keys()
+
+    for node in nodes:
+        if netstat_info[node] not in gateways:
+            gateways[netstat_info[node]] = [node]
+        else:
+            gateways[netstat_info[node]].append(node)
+
+    assert len(gateways.keys()) == 1, \
+        "There were found few gateways: {gw}".format(
+        gw=json.dumps(gateways, indent=4)
+    )
diff --git a/test_set/cvp-sanity/tests/test_drivetrain.py b/test_set/cvp-sanity/tests/test_drivetrain.py
new file mode 100644
index 0000000..4628e70
--- /dev/null
+++ b/test_set/cvp-sanity/tests/test_drivetrain.py
@@ -0,0 +1,330 @@
+import jenkins
+from xml.dom import minidom
+import utils
+import json
+import pytest
+import time
+import os
+from pygerrit2 import GerritRestAPI, HTTPBasicAuth
+from requests import HTTPError
+import git
+import ldap
+import ldap.modlist as modlist
+
+def join_to_gerrit(local_salt_client, gerrit_user, gerrit_password):
+    gerrit_port = local_salt_client.cmd(
+        'I@gerrit:client and not I@salt:master',
+        'pillar.get',
+        ['_param:haproxy_gerrit_bind_port'],
+        expr_form='compound').values()[0]
+    gerrit_address = local_salt_client.cmd(
+        'I@gerrit:client and not I@salt:master',
+        'pillar.get',
+        ['_param:haproxy_gerrit_bind_host'],
+        expr_form='compound').values()[0]
+    url = 'http://{0}:{1}'.format(gerrit_address,gerrit_port)
+    auth = HTTPBasicAuth(gerrit_user, gerrit_password)
+    rest = GerritRestAPI(url=url, auth=auth)
+    return rest
+
+def join_to_jenkins(local_salt_client, jenkins_user, jenkins_password):
+    jenkins_port = local_salt_client.cmd(
+        'I@jenkins:client and not I@salt:master',
+        'pillar.get',
+        ['_param:haproxy_jenkins_bind_port'],
+        expr_form='compound').values()[0]
+    jenkins_address = local_salt_client.cmd(
+        'I@jenkins:client and not I@salt:master',
+        'pillar.get',
+        ['_param:haproxy_jenkins_bind_host'],
+        expr_form='compound').values()[0]
+    jenkins_url = 'http://{0}:{1}'.format(jenkins_address,jenkins_port)
+    server = jenkins.Jenkins(jenkins_url, username=jenkins_user, password=jenkins_password)
+    return server
+
+def get_password(local_salt_client,service):
+    password = local_salt_client.cmd(
+        service,
+        'pillar.get',
+        ['_param:openldap_admin_password'],
+        expr_form='pillar').values()[0]
+    return password
+
+def test_drivetrain_gerrit(local_salt_client):
+    gerrit_password = get_password(local_salt_client,'gerrit:client')
+    gerrit_error = ''
+    current_date = time.strftime("%Y%m%d-%H.%M.%S", time.localtime())
+    test_proj_name = "test-dt-{0}".format(current_date)
+    gerrit_port = local_salt_client.cmd(
+        'I@gerrit:client and not I@salt:master',
+        'pillar.get',
+        ['_param:haproxy_gerrit_bind_port'],
+        expr_form='compound').values()[0]
+    gerrit_address = local_salt_client.cmd(
+        'I@gerrit:client and not I@salt:master',
+        'pillar.get',
+        ['_param:haproxy_gerrit_bind_host'],
+        expr_form='compound').values()[0]
+    try:
+        #Connecting to gerrit and check connection
+        server = join_to_gerrit(local_salt_client,'admin',gerrit_password)
+        gerrit_check = server.get("/changes/?q=owner:self%20status:open")
+        #Check deleteproject plugin and skip test if the plugin is not installed
+        gerrit_plugins = server.get("/plugins/?all")
+        if 'deleteproject' not in gerrit_plugins:
+            pytest.skip("Delete-project plugin is not installed")
+        #Create test project and add description
+        server.put("/projects/"+test_proj_name)
+        server.put("/projects/"+test_proj_name+"/description",json={"description":"Test DriveTrain project","commit_message": "Update the project description"})
+    except HTTPError, e:
+        gerrit_error = e
+    try:
+        #Create test folder and init git
+        repo_dir = os.path.join(os.getcwd(),test_proj_name)
+        file_name = os.path.join(repo_dir, current_date)
+        repo = git.Repo.init(repo_dir)
+        #Add remote url for this git repo
+        origin = repo.create_remote('origin', 'http://admin:{1}@{2}:{3}/{0}.git'.format(test_proj_name,gerrit_password,gerrit_address,gerrit_port))
+        #Add commit-msg hook to automatically add Change-Id to our commit
+        os.system("curl -Lo {0}/.git/hooks/commit-msg 'http://admin:{1}@{2}:{3}/tools/hooks/commit-msg' > /dev/null 2>&1".format(repo_dir,gerrit_password,gerrit_address,gerrit_port))
+        os.system("chmod u+x {0}/.git/hooks/commit-msg".format(repo_dir))
+        #Create a test file
+        f = open(file_name, 'w+')
+        f.write("This is a test file for DriveTrain test")
+        f.close()
+        #Add file to git and commit it to Gerrit for review
+        repo.index.add([file_name])
+        repo.index.commit("This is a test commit for DriveTrain test")
+        repo.git.push("origin", "HEAD:refs/for/master")
+        #Get change id from Gerrit. Set Code-Review +2 and submit this change
+        changes = server.get("/changes/?q=project:{0}".format(test_proj_name))
+        last_change = changes[0].get('change_id')
+        server.post("/changes/{0}/revisions/1/review".format(last_change),json={"message":"All is good","labels":{"Code-Review":"+2"}})
+        server.post("/changes/{0}/submit".format(last_change))
+    except HTTPError, e:
+        gerrit_error = e
+    finally:
+        #Delete test project
+        server.post("/projects/"+test_proj_name+"/deleteproject~delete")
+    assert gerrit_error == '',\
+        'Something is wrong with Gerrit'.format(gerrit_error)
+
+def test_drivetrain_openldap(local_salt_client):
+    '''Create a test user 'DT_test_user' in openldap,
+    add the user to admin group, login using the user to Jenkins.
+    Add the user to devops group in Gerrit and then login to Gerrit,
+    using test_user credentials. Finally, delete the user from admin
+    group and openldap
+    '''
+    ldap_password = get_password(local_salt_client,'openldap:client')
+    #Check that ldap_password is exists, otherwise skip test
+    if not ldap_password:
+        pytest.skip("Openldap service or openldap:client pillar \
+        are not found on this environment.")
+    ldap_port = local_salt_client.cmd(
+        'I@openldap:client and not I@salt:master',
+        'pillar.get',
+        ['_param:haproxy_openldap_bind_port'],
+        expr_form='compound').values()[0]
+    ldap_address = local_salt_client.cmd(
+        'I@openldap:client and not I@salt:master',
+        'pillar.get',
+        ['_param:haproxy_openldap_bind_host'],
+        expr_form='compound').values()[0]
+    ldap_dc = local_salt_client.cmd(
+        'openldap:client',
+        'pillar.get',
+        ['_param:openldap_dn'],
+        expr_form='pillar').values()[0]
+    ldap_con_admin = local_salt_client.cmd(
+        'openldap:client',
+        'pillar.get',
+        ['openldap:client:server:auth:user'],
+        expr_form='pillar').values()[0]
+    ldap_url = 'ldap://{0}:{1}'.format(ldap_address,ldap_port)
+    ldap_error = ''
+    ldap_result = ''
+    gerrit_result = ''
+    gerrit_error = ''
+    jenkins_error = ''
+    #Test user's CN
+    test_user_name = 'DT_test_user'
+    test_user = 'cn={0},ou=people,{1}'.format(test_user_name,ldap_dc)
+    #Admins group CN
+    admin_gr_dn = 'cn=admins,ou=groups,{0}'.format(ldap_dc)
+    #List of attributes for test user
+    attrs = {}
+    attrs['objectclass'] = ['organizationalRole','simpleSecurityObject','shadowAccount']
+    attrs['cn'] = test_user_name
+    attrs['uid'] = test_user_name
+    attrs['userPassword'] = 'aSecretPassw'
+    attrs['description'] = 'Test user for CVP DT test'
+    searchFilter = 'cn={0}'.format(test_user_name)
+    #Get a test job name from config
+    config = utils.get_configuration()
+    jenkins_cvp_job = config['jenkins_cvp_job']
+    #Open connection to ldap and creating test user in admins group
+    try:
+        ldap_server = ldap.initialize(ldap_url)
+        ldap_server.simple_bind_s(ldap_con_admin,ldap_password)
+        ldif = modlist.addModlist(attrs)
+        ldap_server.add_s(test_user,ldif)
+        ldap_server.modify_s(admin_gr_dn,[(ldap.MOD_ADD, 'memberUid', [test_user_name],)],)
+        #Check search test user in LDAP
+        searchScope = ldap.SCOPE_SUBTREE
+        ldap_result = ldap_server.search_s(ldap_dc, searchScope, searchFilter)
+    except ldap.LDAPError, e:
+        ldap_error = e
+    try:
+        #Check connection between Jenkins and LDAP
+        jenkins_server = join_to_jenkins(local_salt_client,test_user_name,'aSecretPassw')
+        jenkins_version = jenkins_server.get_job_name(jenkins_cvp_job)
+        #Check connection between Gerrit and LDAP
+        gerrit_server = join_to_gerrit(local_salt_client,'admin',ldap_password)
+        gerrit_check = gerrit_server.get("/changes/?q=owner:self%20status:open")
+        #Add test user to devops-contrib group in Gerrit and check login
+        _link = "/groups/devops-contrib/members/{0}".format(test_user_name)
+        gerrit_add_user = gerrit_server.put(_link)
+        gerrit_server = join_to_gerrit(local_salt_client,test_user_name,'aSecretPassw')
+        gerrit_result = gerrit_server.get("/changes/?q=owner:self%20status:open")
+    except HTTPError, e:
+        gerrit_error = e
+    except jenkins.JenkinsException, e:
+        jenkins_error = e
+    finally:
+        ldap_server.modify_s(admin_gr_dn,[(ldap.MOD_DELETE, 'memberUid', [test_user_name],)],)
+        ldap_server.delete_s(test_user)
+        ldap_server.unbind_s()
+    assert ldap_error == '', \
+        '''Something is wrong with connection to LDAP:
+            {0}'''.format(e)
+    assert jenkins_error == '', \
+        '''Connection to Jenkins was not established:
+            {0}'''.format(e)
+    assert gerrit_error == '', \
+        '''Connection to Gerrit was not established:
+            {0}'''.format(e)
+    assert ldap_result !=[], \
+        '''Test user was not found'''
+
+def test_drivetrain_jenkins_job(local_salt_client):
+    jenkins_password = get_password(local_salt_client,'jenkins:client')
+    server = join_to_jenkins(local_salt_client,'admin',jenkins_password)
+    #Getting Jenkins test job name from configuration
+    config = utils.get_configuration()
+    jenkins_test_job = config['jenkins_test_job']
+    if not server.get_job_name(jenkins_test_job):
+        server.create_job(jenkins_test_job, jenkins.EMPTY_CONFIG_XML)
+    if server.get_job_name(jenkins_test_job):
+        next_build_num = server.get_job_info(jenkins_test_job)['nextBuildNumber']
+        #If this is first build number skip building check
+        if next_build_num != 1:
+            #Check that test job is not running at this moment,
+            #Otherwise skip the test
+            last_build_num = server.get_job_info(jenkins_test_job)['lastBuild'].get('number')
+            last_build_status = server.get_build_info(jenkins_test_job,last_build_num)['building']
+            if last_build_status:
+                pytest.skip("Test job {0} is already running").format(jenkins_test_job)
+        server.build_job(jenkins_test_job)
+        timeout = 0
+        #Use job status True by default to exclude timeout between build job and start job.
+        job_status = True
+        while job_status and ( timeout < 180 ):
+            time.sleep(10)
+            timeout += 10
+            job_status = server.get_build_info(jenkins_test_job,next_build_num)['building']
+        job_result = server.get_build_info(jenkins_test_job,next_build_num)['result']
+    else:
+        pytest.skip("The job {0} was not found").format(test_job_name)
+    assert job_result == 'SUCCESS', \
+        '''Test job '{0}' build was not successfull or timeout is too small
+         '''.format(jenkins_test_job)
+
+def test_drivetrain_services_replicas(local_salt_client):
+    salt_output = local_salt_client.cmd(
+        'I@gerrit:client',
+        'cmd.run',
+        ['docker service ls'],
+        expr_form='compound')
+    wrong_items = []
+    for line in salt_output[salt_output.keys()[0]].split('\n'):
+        if line[line.find('/') - 1] != line[line.find('/') + 1] \
+           and 'replicated' in line:
+            wrong_items.append(line)
+    assert len(wrong_items) == 0, \
+        '''Some DriveTrain services doesn't have expected number of replicas:
+              {}'''.format(json.dumps(wrong_items, indent=4))
+
+
+def test_drivetrain_components_and_versions(local_salt_client):
+    config = utils.get_configuration()
+    if not config['drivetrain_version']:
+        version = \
+            local_salt_client.cmd(
+                'I@salt:master',
+                'pillar.get',
+                ['_param:mcp_version'],
+                expr_form='compound').values()[0] or \
+            local_salt_client.cmd(
+                'I@salt:master',
+                'pillar.get',
+                ['_param:apt_mk_version'],
+                expr_form='compound').values()[0]
+        if not version:
+            pytest.skip("drivetrain_version is not defined. Skipping")
+    else:
+        version = config['drivetrain_version']
+    salt_output = local_salt_client.cmd(
+        'I@gerrit:client',
+        'cmd.run',
+        ['docker service ls'],
+        expr_form='compound')
+    #  'ldap_server' removed because it is an external component now v 1.1.8
+    not_found_services = ['gerrit_db', 'gerrit_server', 'jenkins_master',
+                          'jenkins_slave01', 'jenkins_slave02',
+                          'jenkins_slave03', 'ldap_admin', 'docker_registry',
+                          'docker_visualizer']
+    version_mismatch = []
+    for line in salt_output[salt_output.keys()[0]].split('\n'):
+        for service in not_found_services:
+            if service in line:
+                not_found_services.remove(service)
+                if version != line.split()[4].split(':')[1]:
+                    version_mismatch.append("{0}: expected "
+                        "version is {1}, actual - {2}".format(service,version,
+                                                              line.split()[4].split(':')[1]))
+                continue
+    assert len(not_found_services) == 0, \
+        '''Some DriveTrain components are not found:
+              {}'''.format(json.dumps(not_found_services, indent=4))
+    assert len(version_mismatch) == 0, \
+        '''Version mismatch found:
+              {}'''.format(json.dumps(version_mismatch, indent=4))
+
+
+def test_jenkins_jobs_branch(local_salt_client):
+    config = utils.get_configuration()
+    expected_version = config['drivetrain_version'] or []
+    if not expected_version or expected_version == '':
+        pytest.skip("drivetrain_version is not defined. Skipping")
+    jenkins_password = get_password(local_salt_client,'jenkins:client')
+    version_mismatch = []
+    server = join_to_jenkins(local_salt_client,'admin',jenkins_password)
+    for job_instance in server.get_jobs():
+        job_name = job_instance.get('name')
+        job_config = server.get_job_config(job_name)
+        xml_data = minidom.parseString(job_config)
+        BranchSpec = xml_data.getElementsByTagName('hudson.plugins.git.BranchSpec')
+        #We use master branch for pipeline-library in case of 'testing,stable,nighlty' versions
+        if expected_version in ['testing','nightly','stable']:
+            expected_version = 'master'
+        if BranchSpec:
+            actual_version = BranchSpec[0].getElementsByTagName('name')[0].childNodes[0].data
+            if ( actual_version != expected_version ) and ( job_name not in ['upgrade-mcp-release'] ) :
+                version_mismatch.append("Job {0} has {1} branch."
+                                        "Expected {2}".format(job_name,
+                                                              actual_version,
+                                                              expected_version))
+    assert len(version_mismatch) == 0, \
+        '''Some DriveTrain jobs have version/branch mismatch:
+              {}'''.format(json.dumps(version_mismatch, indent=4))
diff --git a/test_set/cvp-sanity/tests/test_etc_hosts.py b/test_set/cvp-sanity/tests/test_etc_hosts.py
new file mode 100644
index 0000000..cd9fc35
--- /dev/null
+++ b/test_set/cvp-sanity/tests/test_etc_hosts.py
@@ -0,0 +1,23 @@
+import pytest
+import json
+import os
+import utils
+
+
+def test_etc_hosts(local_salt_client):
+    active_nodes = utils.get_active_nodes()
+    nodes_info = local_salt_client.cmd(
+        utils.list_to_target_string(active_nodes, 'or'), 'cmd.run',
+        ['cat /etc/hosts'],
+        expr_form='compound')
+    result = {}
+    for node in nodes_info.keys():
+        for nd in nodes_info.keys():
+           if node not in nodes_info[nd]:
+              if node in result:
+                  result[node]+=','+nd
+              else:
+                  result[node]=nd
+    assert len(result) <= 1, \
+        "Some hosts are not presented in /etc/hosts: {0}".format(
+         json.dumps(result, indent=4))     
diff --git a/test_set/cvp-sanity/tests/test_galera_cluster.py b/test_set/cvp-sanity/tests/test_galera_cluster.py
new file mode 100644
index 0000000..676f09b
--- /dev/null
+++ b/test_set/cvp-sanity/tests/test_galera_cluster.py
@@ -0,0 +1,24 @@
+import pytest
+
+
+def test_galera_cluster_status(local_salt_client):
+    gs = local_salt_client.cmd(
+        'galera:*',
+        'cmd.run',
+        ['salt-call mysql.status | grep -A1 wsrep_cluster_size | tail -n1'],
+        expr_form='pillar')
+
+    if not gs:
+        pytest.skip("Galera service or galera:* pillar \
+        are not found on this environment.")
+
+    size_cluster = []
+    amount = len(gs)
+
+    for item in gs.values():
+        size_cluster.append(item.split('\n')[-1].strip())
+
+    assert all(item == str(amount) for item in size_cluster), \
+        '''There found inconsistency within cloud. MySQL galera cluster
+              is probably broken, the cluster size gathered from nodes:
+              {}'''.format(gs)
diff --git a/test_set/cvp-sanity/tests/test_k8s.py b/test_set/cvp-sanity/tests/test_k8s.py
new file mode 100644
index 0000000..022eb1c
--- /dev/null
+++ b/test_set/cvp-sanity/tests/test_k8s.py
@@ -0,0 +1,142 @@
+import pytest
+import json
+import os
+
+
+def test_k8s_get_cs_status(local_salt_client):
+    result = local_salt_client.cmd(
+        'etcd:server', 'cmd.run',
+        ['kubectl get cs'],
+        expr_form='pillar'
+    )
+    errors = []
+    if not result:
+        pytest.skip("k8s is not found on this environment")
+    for node in result:
+        for line in result[node].split('\n'):
+            line = line.strip()
+            if 'MESSAGE' in line or 'proto' in line:
+                continue
+            else:
+                if 'Healthy' not in line:
+                    errors.append(line)
+        break
+    assert not errors, 'k8s is not healthy: {}'.format(json.dumps(
+                                                       errors,
+                                                       indent=4))
+
+
+def test_k8s_get_nodes_status(local_salt_client):
+    result = local_salt_client.cmd(
+        'etcd:server', 'cmd.run',
+        ['kubectl get nodes'],
+        expr_form='pillar'
+    )
+    errors = []
+    if not result:
+        pytest.skip("k8s is not found on this environment")
+    for node in result:
+        for line in result[node].split('\n'):
+            line = line.strip()
+            if 'STATUS' in line or 'proto' in line:
+                continue
+            else:
+                if 'Ready' != line.split()[1]:
+                    errors.append(line)
+        break
+    assert not errors, 'k8s is not healthy: {}'.format(json.dumps(
+                                                       errors,
+                                                       indent=4))
+
+
+def test_k8s_get_calico_status(local_salt_client):
+    result = local_salt_client.cmd(
+        'kubernetes:pool', 'cmd.run',
+        ['calicoctl node status'],
+        expr_form='pillar'
+    )
+    errors = []
+    if not result:
+        pytest.skip("k8s is not found on this environment")
+    for node in result:
+        for line in result[node].split('\n'):
+            line = line.strip('|')
+            if 'STATE' in line or '| ' not in line:
+                continue
+            else:
+                if 'up' not in line or 'Established' not in line:
+                    errors.append(line)
+    assert not errors, 'Calico node status is not good: {}'.format(json.dumps(
+                                                                   errors,
+                                                                   indent=4))
+
+
+def test_k8s_cluster_status(local_salt_client):
+    result = local_salt_client.cmd(
+        'kubernetes:master', 'cmd.run',
+        ['kubectl cluster-info'],
+        expr_form='pillar'
+    )
+    errors = []
+    if not result:
+        pytest.skip("k8s is not found on this environment")
+    for node in result:
+        for line in result[node].split('\n'):
+            if 'proto' in line or 'further' in line or line == '':
+                continue
+            else:
+                if 'is running' not in line:
+                    errors.append(line)
+        break
+    assert not errors, 'k8s cluster info is not good: {}'.format(json.dumps(
+                                                                 errors,
+                                                                 indent=4))
+
+
+def test_k8s_kubelet_status(local_salt_client):
+    result = local_salt_client.cmd(
+        'kubernetes:pool', 'service.status',
+        ['kubelet'],
+        expr_form='pillar'
+    )
+    errors = []
+    if not result:
+        pytest.skip("k8s is not found on this environment")
+    for node in result:
+        if not result[node]:
+            errors.append(node)
+    assert not errors, 'Kublete is not running on these nodes: {}'.format(
+                       errors)
+
+
+def test_k8s_check_system_pods_status(local_salt_client):
+    result = local_salt_client.cmd(
+        'etcd:server', 'cmd.run',
+        ['kubectl --namespace="kube-system" get pods'],
+        expr_form='pillar'
+    )
+    errors = []
+    if not result:
+        pytest.skip("k8s is not found on this environment")
+    for node in result:
+        for line in result[node].split('\n'):
+            line = line.strip('|')
+            if 'STATUS' in line or 'proto' in line:
+                continue
+            else:
+                if 'Running' not in line:
+                    errors.append(line)
+        break
+    assert not errors, 'Some system pods are not running: {}'.format(json.dumps(
+                                                                   errors,
+                                                                   indent=4))
+
+
+def test_check_k8s_image_availability(local_salt_client):
+    # not a test actually
+    hostname = 'https://docker-dev-virtual.docker.mirantis.net/artifactory/webapp/'
+    response = os.system('curl -s --insecure {} > /dev/null'.format(hostname))
+    if response == 0:
+        print '{} is AVAILABLE'.format(hostname)
+    else:
+        print '{} IS NOT AVAILABLE'.format(hostname)
diff --git a/test_set/cvp-sanity/tests/test_mtu.py b/test_set/cvp-sanity/tests/test_mtu.py
new file mode 100644
index 0000000..9930fea
--- /dev/null
+++ b/test_set/cvp-sanity/tests/test_mtu.py
@@ -0,0 +1,66 @@
+import pytest
+import json
+import utils
+import os
+
+
+def test_mtu(local_salt_client, nodes_in_group):
+    testname = os.path.basename(__file__).split('.')[0]
+    config = utils.get_configuration()
+    skipped_ifaces = config.get(testname)["skipped_ifaces"] or \
+        ["bonding_masters", "lo", "veth", "tap", "cali", "qv", "qb", "br-int", "vxlan"]
+    total = {}
+    network_info = local_salt_client.cmd(
+        "L@"+','.join(nodes_in_group), 'cmd.run', ['ls /sys/class/net/'], expr_form='compound')
+
+    kvm_nodes = local_salt_client.cmd(
+        'salt:control', 'test.ping', expr_form='pillar').keys()
+
+    if len(network_info.keys()) < 2:
+        pytest.skip("Nothing to compare - only 1 node")
+
+    for node, ifaces_info in network_info.iteritems():
+        if node in kvm_nodes:
+            kvm_info = local_salt_client.cmd(node, 'cmd.run',
+                                             ["virsh list | "
+                                              "awk '{print $2}' | "
+                                              "xargs -n1 virsh domiflist | "
+                                              "grep -v br-pxe | grep br- | "
+                                              "awk '{print $1}'"])
+            ifaces_info = kvm_info.get(node)
+        node_ifaces = ifaces_info.split('\n')
+        ifaces = {}
+        for iface in node_ifaces:
+            for skipped_iface in skipped_ifaces:
+                if skipped_iface in iface:
+                    break
+            else:
+                iface_mtu = local_salt_client.cmd(node, 'cmd.run',
+                                                  ['cat /sys/class/'
+                                                   'net/{}/mtu'.format(iface)])
+                ifaces[iface] = iface_mtu.get(node)
+        total[node] = ifaces
+
+    nodes = []
+    mtu_data = []
+    my_set = set()
+
+    for node in total:
+        nodes.append(node)
+        my_set.update(total[node].keys())
+    for interf in my_set:
+        diff = []
+        row = []
+        for node in nodes:
+            if interf in total[node].keys():
+                diff.append(total[node][interf])
+                row.append("{}: {}".format(node, total[node][interf]))
+            else:
+                row.append("{}: No interface".format(node))
+        if diff.count(diff[0]) < len(nodes):
+            row.sort()
+            row.insert(0, interf)
+            mtu_data.append(row)
+    assert len(mtu_data) == 0, \
+        "Several problems found: {0}".format(
+        json.dumps(mtu_data, indent=4))
diff --git a/test_set/cvp-sanity/tests/test_nova_services.py b/test_set/cvp-sanity/tests/test_nova_services.py
new file mode 100644
index 0000000..8fdadd6
--- /dev/null
+++ b/test_set/cvp-sanity/tests/test_nova_services.py
@@ -0,0 +1,16 @@
+import pytest
+
+
+def test_nova_services_status(local_salt_client):
+    result = local_salt_client.cmd(
+        'keystone:server',
+        'cmd.run',
+        ['. /root/keystonercv3; nova service-list | grep "down\|disabled" | grep -v "Forced down"'],
+        expr_form='pillar')
+
+    if not result:
+        pytest.skip("Nova service or keystone:server pillar \
+        are not found on this environment.")
+
+    assert result[result.keys()[0]] == '', \
+        '''Some nova services are in wrong state'''
diff --git a/test_set/cvp-sanity/tests/test_ntp_sync.py b/test_set/cvp-sanity/tests/test_ntp_sync.py
new file mode 100644
index 0000000..6e35215
--- /dev/null
+++ b/test_set/cvp-sanity/tests/test_ntp_sync.py
@@ -0,0 +1,28 @@
+import utils
+import os
+
+
+def test_ntp_sync(local_salt_client):
+    testname = os.path.basename(__file__).split('.')[0]
+    active_nodes = utils.get_active_nodes(os.path.basename(__file__))
+    config = utils.get_configuration()
+    fail = {}
+    saltmaster_time = int(local_salt_client.cmd(
+        'salt:master',
+        'cmd.run',
+        ['date +%s'],
+        expr_form='pillar').values()[0])
+    nodes_time = local_salt_client.cmd(
+        utils.list_to_target_string(active_nodes, 'or'),
+        'cmd.run',
+        ['date +%s'],
+        expr_form='compound')
+    diff = config.get(testname)["time_deviation"] or 30
+    for node, time in nodes_time.iteritems():
+        if (int(time) - saltmaster_time) > diff or \
+                (int(time) - saltmaster_time) < -diff:
+            fail[node] = time
+
+    assert not fail, 'SaltMaster time: {}\n' \
+                     'Nodes with time mismatch:\n {}'.format(saltmaster_time,
+                                                             fail)
diff --git a/test_set/cvp-sanity/tests/test_oss.py b/test_set/cvp-sanity/tests/test_oss.py
new file mode 100644
index 0000000..58a4151
--- /dev/null
+++ b/test_set/cvp-sanity/tests/test_oss.py
@@ -0,0 +1,39 @@
+import requests
+import csv
+import json
+
+
+def test_oss_status(local_salt_client):
+    result = local_salt_client.cmd(
+        'docker:swarm:role:master',
+        'pillar.fetch',
+        ['haproxy:proxy:listen:stats:binds:address'],
+        expr_form='pillar')
+    HAPROXY_STATS_IP = [node for node in result if result[node]]
+    proxies = {"http": None, "https": None}
+    csv_result = requests.get('http://{}:9600/haproxy?stats;csv"'.format(
+                              result[HAPROXY_STATS_IP[0]]),
+                              proxies=proxies).content
+    data = csv_result.lstrip('# ')
+    wrong_data = []
+    list_of_services = ['aptly', 'openldap', 'gerrit', 'jenkins', 'postgresql',
+                        'pushkin', 'rundeck', 'elasticsearch']
+    for service in list_of_services:
+        check = local_salt_client.cmd(
+            '{}:client'.format(service),
+            'test.ping',
+            expr_form='pillar')
+        if check:
+            lines = [row for row in csv.DictReader(data.splitlines())
+                     if service in row['pxname']]
+            for row in lines:
+                info = "Service {0} with svname {1} and status {2}".format(
+                    row['pxname'], row['svname'], row['status'])
+                if row['svname'] == 'FRONTEND' and row['status'] != 'OPEN':
+                        wrong_data.append(info)
+                if row['svname'] != 'FRONTEND' and row['status'] != 'UP':
+                        wrong_data.append(info)
+
+    assert len(wrong_data) == 0, \
+        '''Some haproxy services are in wrong state
+              {}'''.format(json.dumps(wrong_data, indent=4))
diff --git a/test_set/cvp-sanity/tests/test_packet_checker.py b/test_set/cvp-sanity/tests/test_packet_checker.py
new file mode 100644
index 0000000..f76c339
--- /dev/null
+++ b/test_set/cvp-sanity/tests/test_packet_checker.py
@@ -0,0 +1,87 @@
+import pytest
+import json
+
+
+def test_check_package_versions(local_salt_client, nodes_in_group):
+    output = local_salt_client.cmd("L@"+','.join(nodes_in_group),
+                                   'lowpkg.list_pkgs',
+                                   expr_form='compound')
+    # Let's exclude cid01 and dbs01 nodes from this check
+    exclude_nodes = local_salt_client.cmd("I@galera:master or I@gerrit:client",
+                                          'test.ping',
+                                          expr_form='compound').keys()
+    total_nodes = [i for i in output.keys() if i not in exclude_nodes]
+    if len(total_nodes) < 2:
+        pytest.skip("Nothing to compare - only 1 node")
+
+    nodes = []
+    pkts_data = []
+    my_set = set()
+
+    for node in total_nodes:
+        nodes.append(node)
+        my_set.update(output[node].keys())
+
+    for deb in my_set:
+        diff = []
+        row = []
+        for node in nodes:
+            if deb in output[node].keys():
+                diff.append(output[node][deb])
+                row.append("{}: {}".format(node, output[node][deb]))
+            else:
+                row.append("{}: No package".format(node))
+        if diff.count(diff[0]) < len(nodes):
+            row.sort()
+            row.insert(0, deb)
+            pkts_data.append(row)
+    assert len(pkts_data) <= 1, \
+        "Several problems found: {0}".format(
+        json.dumps(pkts_data, indent=4))
+
+
+def test_check_module_versions(local_salt_client, nodes_in_group):
+    pre_check = local_salt_client.cmd(
+        "L@"+','.join(nodes_in_group),
+        'cmd.run',
+        ['dpkg -l | grep "python-pip "'],
+        expr_form='compound')
+    if pre_check.values().count('') > 0:
+        pytest.skip("pip is not installed on one or more nodes")
+
+    exclude_nodes = local_salt_client.cmd("I@galera:master or I@gerrit:client",
+                                          'test.ping',
+                                          expr_form='compound').keys()
+    total_nodes = [i for i in pre_check.keys() if i not in exclude_nodes]
+
+    if len(total_nodes) < 2:
+        pytest.skip("Nothing to compare - only 1 node")
+    output = local_salt_client.cmd("L@"+','.join(nodes_in_group),
+                                   'pip.freeze', expr_form='compound')
+
+    nodes = []
+
+    pkts_data = []
+    my_set = set()
+
+    for node in total_nodes:
+        nodes.append(node)
+        my_set.update([x.split("=")[0] for x in output[node]])
+        output[node] = dict([x.split("==") for x in output[node]])
+
+    for deb in my_set:
+        diff = []
+        row = []
+        for node in nodes:
+            if deb in output[node].keys():
+                diff.append(output[node][deb])
+                row.append("{}: {}".format(node, output[node][deb]))
+            else:
+                row.append("{}: No module".format(node))
+        if diff.count(diff[0]) < len(nodes):
+            row.sort()
+            row.insert(0, deb)
+            pkts_data.append(row)
+    assert len(pkts_data) <= 1, \
+        "Several problems found: {0}".format(
+        json.dumps(pkts_data, indent=4))
diff --git a/test_set/cvp-sanity/tests/test_rabbit_cluster.py b/test_set/cvp-sanity/tests/test_rabbit_cluster.py
new file mode 100644
index 0000000..f3f03e1
--- /dev/null
+++ b/test_set/cvp-sanity/tests/test_rabbit_cluster.py
@@ -0,0 +1,44 @@
+import utils
+
+
+def test_checking_rabbitmq_cluster(local_salt_client):
+    # disable config for this test
+    # it may be reintroduced in future
+    config = utils.get_configuration()
+    # request pillar data from rmq nodes
+    rabbitmq_pillar_data = local_salt_client.cmd(
+        'rabbitmq:server', 'pillar.data',
+        ['rabbitmq:cluster'], expr_form='pillar')
+    # creating dictionary {node:cluster_size_for_the_node}
+    # with required cluster size for each node
+    control_dict = {}
+    required_cluster_size_dict = {}
+    # request actual data from rmq nodes
+    rabbit_actual_data = local_salt_client.cmd(
+        'rabbitmq:server', 'cmd.run',
+        ['rabbitmqctl cluster_status'], expr_form='pillar')
+    for node in rabbitmq_pillar_data:
+        if node in config.get('skipped_nodes'):
+            del rabbit_actual_data[node]
+            continue
+        cluster_size_from_the_node = len(
+            rabbitmq_pillar_data[node]['rabbitmq:cluster']['members'])
+        required_cluster_size_dict.update({node: cluster_size_from_the_node})
+
+    # find actual cluster size for each node
+    for node in rabbit_actual_data:
+        running_nodes_count = 0
+        # rabbitmqctl cluster_status output contains
+        # 3 * # of nodes 'rabbit@' entries + 1
+        running_nodes_count = (rabbit_actual_data[node].count('rabbit@') - 1)/3
+        # update control dictionary with values
+        # {node:actual_cluster_size_for_node}
+        if required_cluster_size_dict[node] != running_nodes_count:
+            control_dict.update({node: running_nodes_count})
+
+    assert not len(control_dict), "Inconsistency found within cloud. " \
+                                  "RabbitMQ cluster is probably broken, " \
+                                  "the cluster size for each node " \
+                                  "should be: {} but the following " \
+                                  "nodes has other values: {}".format(
+        len(required_cluster_size_dict.keys()), control_dict)
diff --git a/test_set/cvp-sanity/tests/test_repo_list.py b/test_set/cvp-sanity/tests/test_repo_list.py
new file mode 100644
index 0000000..da99ee8
--- /dev/null
+++ b/test_set/cvp-sanity/tests/test_repo_list.py
@@ -0,0 +1,59 @@
+import pytest
+import utils
+
+
+def test_list_of_repo_on_nodes(local_salt_client, nodes_in_group):
+    info_salt = local_salt_client.cmd('L@' + ','.join(
+                                      nodes_in_group),
+                                      'pillar.data', ['linux:system:repo'],
+                                      expr_form='compound')
+
+    # check if some repos are disabled
+    for node in info_salt.keys():
+        repos = info_salt[node]["linux:system:repo"]
+        for repo in repos.keys():
+            repository = repos[repo]
+            if "enabled" in repository:
+                if not repository["enabled"]:
+                    repos.pop(repo)
+
+    raw_actual_info = local_salt_client.cmd(
+        'L@' + ','.join(
+        nodes_in_group),
+        'cmd.run',
+        ['cat /etc/apt/sources.list.d/*;'
+         'cat /etc/apt/sources.list|grep deb|grep -v "#"'],
+        expr_form='compound')
+    actual_repo_list = [item.replace('/ ', ' ').replace('[arch=amd64] ', '')
+                        for item in raw_actual_info.values()[0].split('\n')]
+    if info_salt.values()[0]['linux:system:repo'] == '':
+        expected_salt_data = ''
+    else:
+        expected_salt_data = [repo['source'].replace('/ ', ' ')
+                                            .replace('[arch=amd64] ', '')
+                              for repo in info_salt.values()[0]
+                              ['linux:system:repo'].values()
+                              if 'source' in repo.keys()]
+
+    diff = {}
+    my_set = set()
+    fail_counter = 0
+    my_set.update(actual_repo_list)
+    my_set.update(expected_salt_data)
+    import json
+    for repo in my_set:
+        rows = []
+        if repo not in actual_repo_list:
+            rows.append("{}: {}".format("pillars", "+"))
+            rows.append("{}: No repo".format('config'))
+            diff[repo] = rows
+            fail_counter += 1
+        elif repo not in expected_salt_data:
+            rows.append("{}: {}".format("config", "+"))
+            rows.append("{}: No repo".format('pillars'))
+            diff[repo] = rows
+    assert fail_counter == 0, \
+        "Several problems found: {0}".format(
+            json.dumps(diff, indent=4))
+    if fail_counter == 0 and len(diff) > 0:
+        print "\nWarning: nodes contain more repos than reclass"
diff --git a/test_set/cvp-sanity/tests/test_salt_master.py b/test_set/cvp-sanity/tests/test_salt_master.py
new file mode 100644
index 0000000..7649767
--- /dev/null
+++ b/test_set/cvp-sanity/tests/test_salt_master.py
@@ -0,0 +1,20 @@
+def test_uncommited_changes(local_salt_client):
+    git_status = local_salt_client.cmd(
+        'salt:master',
+        'cmd.run',
+        ['cd /srv/salt/reclass/classes/cluster/; git status'],
+        expr_form='pillar')
+    assert 'nothing to commit' in git_status.values()[0], 'Git status showed' \
+           ' some unmerged changes {}'''.format(git_status.values()[0])
+
+
+def test_reclass_smoke(local_salt_client):
+    reclass = local_salt_client.cmd(
+        'salt:master',
+        'cmd.run',
+        ['reclass-salt --top; echo $?'],
+        expr_form='pillar')
+    result = reclass[reclass.keys()[0]][-1]
+
+    assert result == '0', 'Reclass is broken' \
+                          '\n {}'.format(reclass)
diff --git a/test_set/cvp-sanity/tests/test_services.py b/test_set/cvp-sanity/tests/test_services.py
new file mode 100644
index 0000000..02c001b
--- /dev/null
+++ b/test_set/cvp-sanity/tests/test_services.py
@@ -0,0 +1,114 @@
+import pytest
+import json
+import os
+import utils
+
+# Some nodes can have services that are not applicable for other noder in similar group.
+# For example , there are 3 node in kvm group, but just kvm03 node has srv-volumes-backup.mount service
+# in service.get_all
+#                        NODE NAME          SERVICE_NAME
+inconsistency_rule = {"kvm03": ["srv-volumes-backup.mount", "rsync"]}
+
+
+def test_check_services(local_salt_client, nodes_in_group):
+    """
+    Skips services if they are not consistent for all node.
+    Inconsistent services will be checked with another test case
+    """
+    output = local_salt_client.cmd("L@"+','.join(nodes_in_group), 'service.get_all', expr_form='compound')
+
+    if len(output.keys()) < 2:
+        pytest.skip("Nothing to compare - only 1 node")
+
+    nodes = []
+    pkts_data = []
+    my_set = set()
+
+    for node in output:
+        nodes.append(node)
+        my_set.update(output[node])
+
+    for srv in my_set:
+        diff = []
+        row = []
+        for node in nodes:
+            short_name_of_node = node.split('.')[0]
+            if inconsistency_rule.get(short_name_of_node) is not None and srv in inconsistency_rule[short_name_of_node]:
+                # Found service on node and it SHOULD be there
+                break
+            elif srv in output[node]:
+                # Found service on node
+                diff.append(srv)
+                row.append("{}: +".format(node))
+            else:
+                # Not found expected service on node
+                row.append("{}: No service".format(node))
+        if diff.__len__() > 0 and diff.count(diff[0]) < len(nodes):
+            row.sort()
+            row.insert(0, srv)
+            pkts_data.append(row)
+    assert len(pkts_data) == 0, \
+        "Several problems found: {0}".format(
+        json.dumps(pkts_data, indent=4))
+
+
+# TODO : remake this test to make workable https://mirantis.jira.com/browse/PROD-25958
+
+# def _check_services_on_special_node(local_salt_client, nodes_in_group):
+#     """
+#     Check that specific node has service.
+#     Nodes and proper services should be defined in inconsistency_rule dictionary
+#
+#     :print: Table with nodes which don't have required services and not existed services
+#     """
+#
+#     output = local_salt_client.cmd("L@" + ','.join(nodes_in_group), 'service.get_all', expr_form='compound')
+#     if len(output.keys()) < 2:
+#         pytest.skip("Nothing to compare - just 1 node")
+#
+#     def is_proper_service_for_node(_service, _node):
+#         """
+#         Return True if service exists on node and exists in inconsistency_rule
+#         Return True if service doesn't exists on node and doesn't exists in inconsistency_rule
+#         Return False otherwise
+#         :param _service: string
+#         :param _node: string full name of node
+#         :return: bool, read description for further details
+#         """
+#         short_name_of_node = _node.split('.')[0]
+#         if short_name_of_node not in inconsistency_rule.keys():
+#             return False
+#
+#         if _service in inconsistency_rule[short_name_of_node] and \
+#                 _service in output[_node]:
+#             # Return True if service exists on node and exists in inconsistency_rule
+#             return True
+#
+#         if _service not in inconsistency_rule[short_name_of_node] and \
+#                 _service not in output[_node]:
+#             # Return True if service exists on node and exists in inconsistency_rule
+#             return True
+#         print("return False for {} in {}".format(_service, _node))
+#         # error_text = ""
+#         return False
+#
+#     errors = list()
+#     for node, expected_services in inconsistency_rule.items():
+#         print("Check {} , {} ".format(node, expected_services))
+#         # Skip if there is no proper node. Find nodes that contains node_title (like 'kvm03') in their titles
+#         if not any([node in node_name for node_name in output.keys()]):
+#             continue
+#         for expected_service in expected_services:
+#             service_on_nodes = {_node: expected_service if expected_service in _service else None
+#                                 for _node, _service
+#                                 in output.items()}
+#             print([is_proper_service_for_node(expected_service, _node)
+#                   for _node
+#                   in output.keys()])
+#             if not all([is_proper_service_for_node(expected_service, _node)
+#                         for _node
+#                         in output.keys()]):
+#                 errors.append(service_on_nodes)
+#
+#     assert errors.__len__() == 0, json.dumps(errors, indent=4)
+#     assert False
diff --git a/test_set/cvp-sanity/tests/test_single_vip.py b/test_set/cvp-sanity/tests/test_single_vip.py
new file mode 100644
index 0000000..29bdb88
--- /dev/null
+++ b/test_set/cvp-sanity/tests/test_single_vip.py
@@ -0,0 +1,24 @@
+import pytest
+import utils
+import os
+from collections import Counter
+
+
+def test_single_vip(local_salt_client, nodes_in_group):
+    local_salt_client.cmd("L@"+','.join(nodes_in_group), 'saltutil.sync_all', expr_form='compound')
+    nodes_list = local_salt_client.cmd(
+        "L@"+','.join(nodes_in_group), 'grains.item', ['ipv4'], expr_form='compound')
+
+    ipv4_list = []
+
+    for node in nodes_list:
+        ipv4_list.extend(nodes_list.get(node).get('ipv4'))
+
+    cnt = Counter(ipv4_list)
+
+    for ip in cnt:
+        if ip == '127.0.0.1':
+            continue
+        elif cnt[ip] > 1:
+            assert "VIP IP duplicate found " \
+                   "\n{}".format(ipv4_list)
diff --git a/test_set/cvp-sanity/tests/test_stacklight.py b/test_set/cvp-sanity/tests/test_stacklight.py
new file mode 100644
index 0000000..491c1ee
--- /dev/null
+++ b/test_set/cvp-sanity/tests/test_stacklight.py
@@ -0,0 +1,200 @@
+import json
+import requests
+import datetime
+import pytest
+import utils
+
+
+@pytest.mark.usefixtures('check_kibana')
+def test_elasticsearch_cluster(local_salt_client):
+    salt_output = local_salt_client.cmd(
+        'kibana:server',
+        'pillar.get',
+        ['_param:haproxy_elasticsearch_bind_host'],
+        expr_form='pillar')
+
+    proxies = {"http": None, "https": None}
+    for node in salt_output.keys():
+        IP = salt_output[node]
+        assert requests.get('http://{}:9200/'.format(IP),
+                            proxies=proxies).status_code == 200, \
+            'Cannot check elasticsearch url on {}.'.format(IP)
+        resp = requests.get('http://{}:9200/_cat/health'.format(IP),
+                            proxies=proxies).content
+        assert resp.split()[3] == 'green', \
+            'elasticsearch status is not good {}'.format(
+            json.dumps(resp, indent=4))
+        assert resp.split()[4] == '3', \
+            'elasticsearch status is not good {}'.format(
+            json.dumps(resp, indent=4))
+        assert resp.split()[5] == '3', \
+            'elasticsearch status is not good {}'.format(
+            json.dumps(resp, indent=4))
+        assert resp.split()[10] == '0', \
+            'elasticsearch status is not good {}'.format(
+            json.dumps(resp, indent=4))
+        assert resp.split()[13] == '100.0%', \
+            'elasticsearch status is not good {}'.format(
+            json.dumps(resp, indent=4))
+
+
+@pytest.mark.usefixtures('check_kibana')
+def test_kibana_status(local_salt_client):
+    proxies = {"http": None, "https": None}
+    IP = utils.get_monitoring_ip('stacklight_log_address')
+    resp = requests.get('http://{}:5601/api/status'.format(IP),
+                        proxies=proxies).content
+    body = json.loads(resp)
+    assert body['status']['overall']['state'] == "green", \
+        "Kibana status is not expected: {}".format(
+        body['status']['overall'])
+    for i in body['status']['statuses']:
+        assert i['state'] == "green", \
+            "Kibana statuses are unexpected: {}".format(i)
+
+
+@pytest.mark.usefixtures('check_kibana')
+def test_elasticsearch_node_count(local_salt_client):
+    now = datetime.datetime.now()
+    today = now.strftime("%Y.%m.%d")
+    active_nodes = utils.get_active_nodes()
+    salt_output = local_salt_client.cmd(
+        'kibana:server',
+        'pillar.get',
+        ['_param:haproxy_elasticsearch_bind_host'],
+        expr_form='pillar')
+
+    IP = salt_output.values()[0]
+    headers = {'Content-type': 'application/json', 'Accept': 'text/plain'}
+    proxies = {"http": None, "https": None}
+    data = ('{"size": 0, "aggs": '
+            '{"uniq_hostname": '
+            '{"terms": {"size": 500, '
+            '"field": "Hostname.keyword"}}}}')
+    response = requests.post(
+        'http://{0}:9200/log-{1}/_search?pretty'.format(IP, today),
+        proxies=proxies,
+        headers=headers,
+        data=data)
+    assert 200 == response.status_code, 'Unexpected code {}'.format(
+        response.text)
+    resp = json.loads(response.text)
+    cluster_domain = local_salt_client.cmd('salt:control',
+                                           'pillar.get',
+                                           ['_param:cluster_domain'],
+                                           expr_form='pillar').values()[0]
+    monitored_nodes = []
+    for item_ in resp['aggregations']['uniq_hostname']['buckets']:
+        node_name = item_['key']
+        monitored_nodes.append(node_name + '.' + cluster_domain)
+    missing_nodes = []
+    for node in active_nodes.keys():
+        if node not in monitored_nodes:
+            missing_nodes.append(node)
+    assert len(missing_nodes) == 0, \
+        'Not all nodes are in Elasticsearch. Found {0} keys, ' \
+        'expected {1}. Missing nodes: \n{2}'. \
+            format(len(monitored_nodes), len(active_nodes), missing_nodes)
+
+
+def test_stacklight_services_replicas(local_salt_client):
+    # TODO
+    # change to docker:swarm:role:master ?
+    salt_output = local_salt_client.cmd(
+        'I@docker:client:stack:monitoring and I@prometheus:server',
+        'cmd.run',
+        ['docker service ls'],
+        expr_form='compound')
+
+    if not salt_output:
+        pytest.skip("docker:client:stack:monitoring or \
+        prometheus:server pillars are not found on this environment.")
+
+    wrong_items = []
+    for line in salt_output[salt_output.keys()[0]].split('\n'):
+        if line[line.find('/') - 1] != line[line.find('/') + 1] \
+           and 'replicated' in line:
+            wrong_items.append(line)
+    assert len(wrong_items) == 0, \
+        '''Some monitoring services doesn't have expected number of replicas:
+              {}'''.format(json.dumps(wrong_items, indent=4))
+
+
+@pytest.mark.usefixtures('check_prometheus')
+def test_prometheus_alert_count(local_salt_client):
+    IP = utils.get_monitoring_ip('cluster_public_host')
+    # keystone:server can return 3 nodes instead of 1
+    # this will be fixed later
+    # TODO
+    nodes_info = local_salt_client.cmd(
+        'keystone:server',
+        'cmd.run',
+        ['curl -s http://{}:15010/alerts | grep icon-chevron-down | '
+         'grep -v "0 active"'.format(IP)],
+        expr_form='pillar')
+
+    result = nodes_info[nodes_info.keys()[0]].replace('</td>', '').replace(
+        '<td><i class="icon-chevron-down"></i> <b>', '').replace('</b>', '')
+    assert result == '', 'AlertManager page has some alerts! {}'.format(
+                         json.dumps(result), indent=4)
+
+
+def test_stacklight_containers_status(local_salt_client):
+    salt_output = local_salt_client.cmd(
+        'I@docker:swarm:role:master and I@prometheus:server',
+        'cmd.run',
+        ['docker service ps $(docker stack services -q monitoring)'],
+        expr_form='compound')
+
+    if not salt_output:
+        pytest.skip("docker:swarm:role:master or prometheus:server \
+        pillars are not found on this environment.")
+
+    result = {}
+    # for old reclass models, docker:swarm:role:master can return
+    # 2 nodes instead of one. Here is temporary fix.
+    # TODO
+    if len(salt_output.keys()) > 1:
+        if 'CURRENT STATE' not in salt_output[salt_output.keys()[0]]:
+            del salt_output[salt_output.keys()[0]]
+    for line in salt_output[salt_output.keys()[0]].split('\n')[1:]:
+        shift = 0
+        if line.split()[1] == '\\_':
+            shift = 1
+        if line.split()[1 + shift] not in result.keys():
+            result[line.split()[1]] = 'NOT OK'
+        if line.split()[4 + shift] == 'Running' \
+           or line.split()[4 + shift] == 'Ready':
+            result[line.split()[1 + shift]] = 'OK'
+    assert 'NOT OK' not in result.values(), \
+        '''Some containers are in incorrect state:
+              {}'''.format(json.dumps(result, indent=4))
+
+
+def test_running_telegraf_services(local_salt_client):
+    salt_output = local_salt_client.cmd('telegraf:agent',
+                                        'service.status',
+                                        'telegraf',
+                                        expr_form='pillar')
+
+    if not salt_output:
+        pytest.skip("Telegraf or telegraf:agent \
+        pillar are not found on this environment.")
+
+    result = [{node: status} for node, status
+              in salt_output.items()
+              if status is False]
+    assert result == [], 'Telegraf service is not running ' \
+                         'on following nodes: {}'.format(result)
+
+
+def test_running_fluentd_services(local_salt_client):
+    salt_output = local_salt_client.cmd('fluentd:agent',
+                                        'service.status',
+                                        'td-agent',
+                                        expr_form='pillar')
+    result = [{node: status} for node, status
+              in salt_output.items()
+              if status is False]
+    assert result == [], 'Fluentd check failed: td-agent service is not ' \
+                         'running on following nodes:'.format(result)
diff --git a/test_set/cvp-sanity/tests/test_ui_addresses.py b/test_set/cvp-sanity/tests/test_ui_addresses.py
new file mode 100644
index 0000000..3f6f3a3
--- /dev/null
+++ b/test_set/cvp-sanity/tests/test_ui_addresses.py
@@ -0,0 +1,73 @@
+import utils
+import pytest
+
+
+def test_ui_horizon(local_salt_client):
+    salt_output = local_salt_client.cmd(
+        'horizon:server',
+        'pillar.get',
+        ['_param:cluster_public_host'],
+        expr_form='pillar')
+    if not salt_output:
+        pytest.skip("Horizon is not enabled on this environment")
+    IP = [salt_output[node] for node in salt_output
+          if salt_output[node]]
+    result = local_salt_client.cmd(
+        'keystone:server',
+        'cmd.run',
+        ['curl --insecure https://{}/auth/login/ 2>&1 | \
+         grep Login'.format(IP[0])],
+        expr_form='pillar')
+    assert len(result[result.keys()[0]]) != 0, \
+        'Horizon login page is not reachable on {} from ctl nodes'.format(
+        IP[0])
+
+
+@pytest.mark.usefixtures('check_kibana')
+def test_ui_kibana(local_salt_client):
+    IP = utils.get_monitoring_ip('stacklight_log_address')
+    result = local_salt_client.cmd(
+        'keystone:server',
+        'cmd.run',
+        ['curl http://{}:5601/app/kibana 2>&1 | \
+         grep loading'.format(IP)],
+        expr_form='pillar')
+    assert len(result[result.keys()[0]]) != 0, \
+        'Kibana login page is not reachable on {} from ctl nodes'.format(IP)
+
+
+@pytest.mark.usefixtures('check_prometheus')
+def test_ui_prometheus(local_salt_client):
+    IP = utils.get_monitoring_ip('stacklight_monitor_address')
+    result = local_salt_client.cmd(
+        'keystone:server',
+        'cmd.run',
+        ['curl http://{}:15010/graph 2>&1 | \
+         grep Prometheus'.format(IP)],
+        expr_form='pillar')
+    assert len(result[result.keys()[0]]) != 0, \
+        'Prometheus page is not reachable on {} from ctl nodes'.format(IP)
+
+
+@pytest.mark.usefixtures('check_prometheus')
+def test_ui_alert_manager(local_salt_client):
+    IP = utils.get_monitoring_ip('stacklight_monitor_address')
+    result = local_salt_client.cmd(
+        'keystone:server',
+        'cmd.run',
+        ['curl -s http://{}:15011/ | grep Alertmanager'.format(IP)],
+        expr_form='pillar')
+    assert len(result[result.keys()[0]]) != 0, \
+        'AlertManager page is not reachable on {} from ctl nodes'.format(IP)
+
+
+@pytest.mark.usefixtures('check_grafana')
+def test_ui_grafana(local_salt_client):
+    IP = utils.get_monitoring_ip('stacklight_monitor_address')
+    result = local_salt_client.cmd(
+        'keystone:server',
+        'cmd.run',
+        ['curl http://{}:15013/login 2>&1 | grep Grafana'.format(IP)],
+        expr_form='pillar')
+    assert len(result[result.keys()[0]]) != 0, \
+        'Grafana page is not reachable on {} from ctl nodes'.format(IP)