[CVP] Refactor salt client class

Change-Id: I91cfffe1c8d5df0224657ce9e36be9063b56f0b3
Related-PROD: PROD-28981
Related-PROD: PROD-28729
Related-PROD: PROD-28624
Related-PROD: PROD-29286
diff --git a/test_set/cvp-sanity/fixtures/base.py b/test_set/cvp-sanity/fixtures/base.py
index cb90bc5..8c5be34 100644
--- a/test_set/cvp-sanity/fixtures/base.py
+++ b/test_set/cvp-sanity/fixtures/base.py
@@ -22,27 +22,18 @@
        If no platform is installed (no OS or k8s) we need to skip
        the test (product team use case).
     '''
-    salt_output = local_salt_client.cmd(
-        'keystone:server',
-        'test.ping',
-        expr_form='pillar')
+    salt_output = local_salt_client.test_ping(tgt='keystone:server')
     if salt_output:
         return "keystone:server"
     else:
-        salt_output = local_salt_client.cmd(
-            'etcd:server',
-            'test.ping',
-            expr_form='pillar')
+        salt_output = local_salt_client.test_ping(tgt='etcd:server')
         return "etcd:server" if salt_output else pytest.skip("Neither \
             Openstack nor k8s is found. Skipping test")
 
 
 @pytest.fixture(scope='session')
 def check_openstack(local_salt_client):
-    salt_output = local_salt_client.cmd(
-        'keystone:server',
-        'test.ping',
-        expr_form='pillar')
+    salt_output = local_salt_client.test_ping(tgt='keystone:server')
     if not salt_output:
         pytest.skip("Openstack not found or keystone:server pillar \
           are not found on this environment.")
@@ -50,10 +41,8 @@
 
 @pytest.fixture(scope='session')
 def check_drivetrain(local_salt_client):
-    salt_output = local_salt_client.cmd(
-        'I@jenkins:client and not I@salt:master',
-        'test.ping',
-        expr_form='compound')
+    salt_output = local_salt_client.test_ping(tgt='I@jenkins:client and not I@salt:master',
+                                              expr_form='compound')
     if not salt_output:
         pytest.skip("Drivetrain service or jenkins:client pillar \
           are not found on this environment.")
@@ -61,10 +50,7 @@
 
 @pytest.fixture(scope='session')
 def check_prometheus(local_salt_client):
-    salt_output = local_salt_client.cmd(
-        'prometheus:server',
-        'test.ping',
-        expr_form='pillar')
+    salt_output = local_salt_client.test_ping(tgt='prometheus:server')
     if not salt_output:
         pytest.skip("Prometheus service or prometheus:server pillar \
           are not found on this environment.")
@@ -72,10 +58,7 @@
 
 @pytest.fixture(scope='session')
 def check_alerta(local_salt_client):
-    salt_output = local_salt_client.cmd(
-        'prometheus:alerta',
-        'test.ping',
-        expr_form='pillar')
+    salt_output = local_salt_client.test_ping(tgt='prometheus:alerta')
     if not salt_output:
         pytest.skip("Alerta service or prometheus:alerta pillar \
               are not found on this environment.")
@@ -83,10 +66,7 @@
 
 @pytest.fixture(scope='session')
 def check_kibana(local_salt_client):
-    salt_output = local_salt_client.cmd(
-        'kibana:server',
-        'test.ping',
-        expr_form='pillar')
+    salt_output = local_salt_client.test_ping(tgt='kibana:server')
     if not salt_output:
         pytest.skip("Kibana service or kibana:server pillar \
           are not found on this environment.")
@@ -94,10 +74,7 @@
 
 @pytest.fixture(scope='session')
 def check_grafana(local_salt_client):
-    salt_output = local_salt_client.cmd(
-        'grafana:client',
-        'test.ping',
-        expr_form='pillar')
+    salt_output = local_salt_client.test_ping(tgt='grafana:client')
     if not salt_output:
         pytest.skip("Grafana service or grafana:client pillar \
           are not found on this environment.")
@@ -110,9 +87,9 @@
 @pytest.fixture(scope='module')
 def contrail(local_salt_client):
     probe = local_salt_client.cmd(
-        'opencontrail:control',
-        'pillar.get',
-        'opencontrail:control:version',
+        tgt='opencontrail:control',
+        fun='pillar.get',
+        param='opencontrail:control:version',
         expr_form='pillar')
     if not probe:
         pytest.skip("Contrail is not found on this environment")
@@ -124,9 +101,8 @@
 
 @pytest.fixture(scope='session')
 def check_kdt(local_salt_client):
-    kdt_nodes_available = local_salt_client.cmd(
-        "I@gerrit:client and I@kubernetes:pool",
-        "test.ping",
+    kdt_nodes_available = local_salt_client.test_ping(
+        tgt="I@gerrit:client and I@kubernetes:pool",
         expr_form='compound'
     )
     if not kdt_nodes_available:
@@ -135,9 +111,8 @@
 
 @pytest.fixture(scope='session')
 def check_cicd(local_salt_client):
-    cicd_nodes_available = local_salt_client.cmd(
-        "I@gerrit:client and I@docker:swarm",
-        "test.ping",
+    cicd_nodes_available = local_salt_client.test_ping(
+        tgt="I@gerrit:client and I@docker:swarm",
         expr_form='compound'
     )
     if not cicd_nodes_available:
@@ -164,9 +139,8 @@
                                     fi ".format(name=filename_with_versions)
 
         list_version = local_salt_client.cmd(
-            '*',
-            'cmd.run',
-            'echo "NODE_INFO=$(uname -sr)" && ' + cat_image_version_file,
+            tgt='*',
+            param='echo "NODE_INFO=$(uname -sr)" && ' + cat_image_version_file,
             expr_form='compound')
         if list_version.__len__() == 0:
             yield
@@ -185,4 +159,4 @@
         yield
     except Exception as e:
         print("print_node_version:: some error occurred: {}".format(e))
-        yield
+        yield
\ No newline at end of file
diff --git a/test_set/cvp-sanity/tests/ceph/test_ceph_haproxy.py b/test_set/cvp-sanity/tests/ceph/test_ceph_haproxy.py
index d6c8e49..4d2566c 100644
--- a/test_set/cvp-sanity/tests/ceph/test_ceph_haproxy.py
+++ b/test_set/cvp-sanity/tests/ceph/test_ceph_haproxy.py
@@ -6,11 +6,10 @@
     fail = {}
 
     monitor_info = local_salt_client.cmd(
-        'ceph:mon',
-        'cmd.run',
-        ["echo 'show stat' | nc -U "
-         "/var/run/haproxy/admin.sock | "
-         "grep ceph_mon_radosgw_cluster"],
+        tgt='ceph:mon',
+        param="echo 'show stat' | nc -U "
+              "/var/run/haproxy/admin.sock | "
+              "grep ceph_mon_radosgw_cluster",
         expr_form='pillar')
     if not monitor_info:
         pytest.skip("Ceph is not found on this environment")
diff --git a/test_set/cvp-sanity/tests/ceph/test_ceph_replicas.py b/test_set/cvp-sanity/tests/ceph/test_ceph_replicas.py
index 62af49d..4c93fe6 100644
--- a/test_set/cvp-sanity/tests/ceph/test_ceph_replicas.py
+++ b/test_set/cvp-sanity/tests/ceph/test_ceph_replicas.py
@@ -8,23 +8,17 @@
     special requirement for that.
     """
 
-    ceph_monitors = local_salt_client.cmd(
-        'ceph:mon', 
-        'test.ping', 
-        expr_form='pillar')
+    ceph_monitors = local_salt_client.test_ping(tgt='ceph:mon')
 
     if not ceph_monitors:
         pytest.skip("Ceph is not found on this environment")
 
     monitor = ceph_monitors.keys()[0]
 
-    raw_pool_replicas = local_salt_client.cmd(
-        monitor, 
-        'cmd.run', 
-        ["ceph osd dump | grep size | " \
-        "awk '{print $3, $5, $6, $7, $8}'"], 
-        expr_form='glob').get(
-        ceph_monitors.keys()[0]).split('\n')
+    raw_pool_replicas = local_salt_client.cmd_any(
+        tgt='ceph:mon',
+        param="ceph osd dump | grep size | " \
+              "awk '{print $3, $5, $6, $7, $8}'").split('\n')
 
     pools_replicas = {}
     for pool in raw_pool_replicas:
diff --git a/test_set/cvp-sanity/tests/ceph/test_ceph_status.py b/test_set/cvp-sanity/tests/ceph/test_ceph_status.py
index ffd7bed..0c0ef0c 100644
--- a/test_set/cvp-sanity/tests/ceph/test_ceph_status.py
+++ b/test_set/cvp-sanity/tests/ceph/test_ceph_status.py
@@ -1,13 +1,11 @@
 import json
-
 import pytest
 
 
 def test_ceph_osd(local_salt_client):
     osd_fail = local_salt_client.cmd(
-        'ceph:osd',
-        'cmd.run',
-        ['ceph osd tree | grep down'],
+        tgt='ceph:osd',
+        param='ceph osd tree | grep down',
         expr_form='pillar')
     if not osd_fail:
         pytest.skip("Ceph is not found on this environment")
@@ -18,9 +16,8 @@
 
 def test_ceph_health(local_salt_client):
     get_status = local_salt_client.cmd(
-        'ceph:mon',
-        'cmd.run',
-        ['ceph -s -f json'],
+        tgt='ceph:mon',
+        param='ceph -s -f json',
         expr_form='pillar')
     if not get_status:
         pytest.skip("Ceph is not found on this environment")
diff --git a/test_set/cvp-sanity/tests/test_cinder_services.py b/test_set/cvp-sanity/tests/test_cinder_services.py
index 6c99602..5d783f0 100644
--- a/test_set/cvp-sanity/tests/test_cinder_services.py
+++ b/test_set/cvp-sanity/tests/test_cinder_services.py
@@ -7,29 +7,20 @@
         # Check that all services has 'Up' status in output of `cinder service-list` on keystone:server nodes
         # Check that quantity of backend in cinder:controller:backend pillar is similar to list of volumes in cinder service-list
     """
-    cinder_backends_info = local_salt_client.cmd(
-        'cinder:controller',
-        'pillar.get',
-        ['cinder:controller:backend'],
-        expr_form='pillar')
-    if not cinder_backends_info or not any(cinder_backends_info.values()):
+    backends_cinder = local_salt_client.test_ping(tgt='cinder:controller:backend')
+    if not backends_cinder or not any(backends_cinder.values()):
         pytest.skip("Cinder service or cinder:controller:backend pillar \
         are not found on this environment.")
-    service_down = local_salt_client.cmd(
-        'keystone:server',
-        'cmd.run',
-        ['. /root/keystonercv3; cinder service-list | grep "down\|disabled"'],
-        expr_form='pillar')
-    cinder_volume = local_salt_client.cmd(
-        'keystone:server',
-        'cmd.run',
-        ['. /root/keystonercv3; cinder service-list | grep -c "volume"'],
-        expr_form='pillar')
-    backends_cinder = cinder_backends_info[cinder_backends_info.keys()[0]]
+    service_down = local_salt_client.cmd_any(
+        tgt='keystone:server',
+        param='. /root/keystonercv3; cinder service-list | grep "down\|disabled"')
+    cinder_volume = local_salt_client.cmd_any(
+        tgt='keystone:server',
+        param='. /root/keystonercv3; cinder service-list | grep -c "volume"')
     backends_num = len(backends_cinder.keys())
-    assert service_down[service_down.keys()[0]] == '', \
+    assert service_down == '', \
         '''Some cinder services are in wrong state'''
-    assert cinder_volume[cinder_volume.keys()[0]] == str(backends_num), \
+    assert cinder_volume == str(backends_num), \
         'Number of cinder-volume services ({0}) does not match ' \
         'number of volume backends ({1})'.format(
-        cinder_volume[cinder_volume.keys()[0]], str(backends_num))
+        cinder_volume, str(backends_num))
diff --git a/test_set/cvp-sanity/tests/test_contrail.py b/test_set/cvp-sanity/tests/test_contrail.py
index e5722bd..035c3b3 100644
--- a/test_set/cvp-sanity/tests/test_contrail.py
+++ b/test_set/cvp-sanity/tests/test_contrail.py
@@ -1,6 +1,5 @@
 import pytest
 import json
-import utils
 
 pytestmark = pytest.mark.usefixtures("contrail")
 
@@ -9,8 +8,8 @@
 
 def get_contrail_status(salt_client, pillar, command, processor):
     return salt_client.cmd(
-        pillar, 'cmd.run',
-        ['{} | {}'.format(command, processor)],
+        tgt=pillar,
+        param='{} | {}'.format(command, processor),
         expr_form='pillar'
     )
 
@@ -89,15 +88,13 @@
 
 
 def test_public_ui_contrail(local_salt_client, ctl_nodes_pillar):
-    IP = utils.get_monitoring_ip('cluster_public_host')
+    IP = local_salt_client.pillar_get(param='_param:cluster_public_host')
     protocol = 'https'
     port = '8143'
     url = "{}://{}:{}".format(protocol, IP, port)
-    result = local_salt_client.cmd(
-        ctl_nodes_pillar,
-        'cmd.run',
-        ['curl -k {}/ 2>&1 | \
-         grep Contrail'.format(url)],
-        expr_form='pillar')
-    assert len(result[result.keys()[0]]) != 0, \
+    result = local_salt_client.cmd_any(
+        tgt=ctl_nodes_pillar,
+        param='curl -k {}/ 2>&1 | \
+               grep Contrail'.format(url))
+    assert len(result) != 0, \
         'Public Contrail UI is not reachable on {} from ctl nodes'.format(url)
diff --git a/test_set/cvp-sanity/tests/test_default_gateway.py b/test_set/cvp-sanity/tests/test_default_gateway.py
index 36ca70e..8cea880 100644
--- a/test_set/cvp-sanity/tests/test_default_gateway.py
+++ b/test_set/cvp-sanity/tests/test_default_gateway.py
@@ -1,21 +1,22 @@
 import json
-import pytest
-import os
-import utils
 
 
 def test_check_default_gateways(local_salt_client, nodes_in_group):
     netstat_info = local_salt_client.cmd(
-        "L@"+','.join(nodes_in_group), 'cmd.run', ['ip r | sed -n 1p'], expr_form='compound')
+        tgt="L@"+','.join(nodes_in_group),
+        param='ip r | sed -n 1p',
+        expr_form='compound')
 
     gateways = {}
-    nodes = netstat_info.keys()
 
-    for node in nodes:
-        if netstat_info[node] not in gateways:
-            gateways[netstat_info[node]] = [node]
+    for node in netstat_info.keys():
+        gateway = netstat_info[node]
+        if isinstance(gateway, bool):
+            gateway = 'Cannot access node(-s)'
+        if gateway not in gateways:
+            gateways[gateway] = [node]
         else:
-            gateways[netstat_info[node]].append(node)
+            gateways[gateway].append(node)
 
     assert len(gateways.keys()) == 1, \
         "There were found few gateways: {gw}".format(
diff --git a/test_set/cvp-sanity/tests/test_drivetrain.py b/test_set/cvp-sanity/tests/test_drivetrain.py
index 94f45ff..b5beba5 100644
--- a/test_set/cvp-sanity/tests/test_drivetrain.py
+++ b/test_set/cvp-sanity/tests/test_drivetrain.py
@@ -13,16 +13,14 @@
 
 
 def join_to_gerrit(local_salt_client, gerrit_user, gerrit_password):
-    gerrit_port = local_salt_client.cmd(
-        'I@gerrit:client and not I@salt:master',
-        'pillar.get',
-        ['_param:haproxy_gerrit_bind_port'],
-        expr_form='compound').values()[0]
-    gerrit_address = local_salt_client.cmd(
-        'I@gerrit:client and not I@salt:master',
-        'pillar.get',
-        ['_param:haproxy_gerrit_bind_host'],
-        expr_form='compound').values()[0]
+    gerrit_port = local_salt_client.pillar_get(
+        tgt='I@gerrit:client and not I@salt:master',
+        param='_param:haproxy_gerrit_bind_port',
+        expr_form='compound')
+    gerrit_address = local_salt_client.pillar_get(
+        tgt='I@gerrit:client and not I@salt:master',
+        param='_param:haproxy_gerrit_bind_host',
+        expr_form='compound')
     url = 'http://{0}:{1}'.format(gerrit_address,gerrit_port)
     auth = HTTPBasicAuth(gerrit_user, gerrit_password)
     rest = GerritRestAPI(url=url, auth=auth)
@@ -30,27 +28,23 @@
 
 
 def join_to_jenkins(local_salt_client, jenkins_user, jenkins_password):
-    jenkins_port = local_salt_client.cmd(
-        'I@jenkins:client and not I@salt:master',
-        'pillar.get',
-        ['_param:haproxy_jenkins_bind_port'],
-        expr_form='compound').values()[0]
-    jenkins_address = local_salt_client.cmd(
-        'I@jenkins:client and not I@salt:master',
-        'pillar.get',
-        ['_param:haproxy_jenkins_bind_host'],
-        expr_form='compound').values()[0]
+    jenkins_port = local_salt_client.pillar_get(
+        tgt='I@jenkins:client and not I@salt:master',
+        param='_param:haproxy_jenkins_bind_port',
+        expr_form='compound')
+    jenkins_address = local_salt_client.pillar_get(
+        tgt='I@jenkins:client and not I@salt:master',
+        param='_param:haproxy_jenkins_bind_host',
+        expr_form='compound')
     jenkins_url = 'http://{0}:{1}'.format(jenkins_address,jenkins_port)
     server = jenkins.Jenkins(jenkins_url, username=jenkins_user, password=jenkins_password)
     return server
 
 
 def get_password(local_salt_client,service):
-    password = local_salt_client.cmd(
-        service,
-        'pillar.get',
-        ['_param:openldap_admin_password'],
-        expr_form='pillar').values()[0]
+    password = local_salt_client.pillar_get(
+        tgt=service,
+        param='_param:openldap_admin_password')
     return password
 
 
@@ -59,16 +53,14 @@
     gerrit_error = ''
     current_date = time.strftime("%Y%m%d-%H.%M.%S", time.localtime())
     test_proj_name = "test-dt-{0}".format(current_date)
-    gerrit_port = local_salt_client.cmd(
-        'I@gerrit:client and not I@salt:master',
-        'pillar.get',
-        ['_param:haproxy_gerrit_bind_port'],
-        expr_form='compound').values()[0]
-    gerrit_address = local_salt_client.cmd(
-        'I@gerrit:client and not I@salt:master',
-        'pillar.get',
-        ['_param:haproxy_gerrit_bind_host'],
-        expr_form='compound').values()[0]
+    gerrit_port = local_salt_client.pillar_get(
+        tgt='I@gerrit:client and not I@salt:master',
+        param='_param:haproxy_gerrit_bind_port',
+        expr_form='compound')
+    gerrit_address = local_salt_client.pillar_get(
+        tgt='I@gerrit:client and not I@salt:master',
+        param='_param:haproxy_gerrit_bind_host',
+        expr_form='compound')
     try:
         #Connecting to gerrit and check connection
         server = join_to_gerrit(local_salt_client,'admin',gerrit_password)
@@ -134,26 +126,20 @@
     if not ldap_password:
         pytest.skip("Openldap service or openldap:client pillar \
         are not found on this environment.")
-    ldap_port = local_salt_client.cmd(
-        'I@openldap:client and not I@salt:master',
-        'pillar.get',
-        ['_param:haproxy_openldap_bind_port'],
-        expr_form='compound').values()[0]
-    ldap_address = local_salt_client.cmd(
-        'I@openldap:client and not I@salt:master',
-        'pillar.get',
-        ['_param:haproxy_openldap_bind_host'],
-        expr_form='compound').values()[0]
-    ldap_dc = local_salt_client.cmd(
-        'openldap:client',
-        'pillar.get',
-        ['_param:openldap_dn'],
-        expr_form='pillar').values()[0]
-    ldap_con_admin = local_salt_client.cmd(
-        'openldap:client',
-        'pillar.get',
-        ['openldap:client:server:auth:user'],
-        expr_form='pillar').values()[0]
+    ldap_port = local_salt_client.pillar_get(
+        tgt='I@openldap:client and not I@salt:master',
+        param='_param:haproxy_openldap_bind_port',
+        expr_form='compound')
+    ldap_address = local_salt_client.pillar_get(
+        tgt='I@openldap:client and not I@salt:master',
+        param='_param:haproxy_openldap_bind_host',
+        expr_form='compound')
+    ldap_dc = local_salt_client.pillar_get(
+        tgt='openldap:client',
+        param='_param:openldap_dn')
+    ldap_con_admin = local_salt_client.pillar_get(
+        tgt='openldap:client',
+        param='openldap:client:server:auth:user')
     ldap_url = 'ldap://{0}:{1}'.format(ldap_address,ldap_port)
     ldap_error = ''
     ldap_result = ''
@@ -231,9 +217,8 @@
     wrong_items = []
     for _ in range(4):
         docker_services_by_nodes = local_salt_client.cmd(
-            'I@gerrit:client',
-            'cmd.run',
-            ['docker service ls'],
+            tgt='I@gerrit:client',
+            param='docker service ls',
             expr_form='compound')
         wrong_items = []
         for line in docker_services_by_nodes[docker_services_by_nodes.keys()[0]].split('\n'):
@@ -260,34 +245,22 @@
     config = utils.get_configuration()
     if not config['drivetrain_version']:
         expected_version = \
-            local_salt_client.cmd(
-                'I@salt:master',
-                'pillar.get',
-                ['_param:mcp_version'],
-                expr_form='compound').values()[0] or \
-            local_salt_client.cmd(
-                'I@salt:master',
-                'pillar.get',
-                ['_param:apt_mk_version'],
-                expr_form='compound').values()[0]
+            local_salt_client.pillar_get(param='_param:mcp_version') or \
+            local_salt_client.pillar_get(param='_param:apt_mk_version')
         if not expected_version:
             pytest.skip("drivetrain_version is not defined. Skipping")
     else:
         expected_version = config['drivetrain_version']
-    table_with_docker_services = local_salt_client.cmd('I@gerrit:client',
-                                                       'cmd.run',
-                                                       ['docker service ls --format "{{.Image}}"'],
+    table_with_docker_services = local_salt_client.cmd(tgt='I@gerrit:client',
+                                                       param='docker service ls --format "{{.Image}}"',
                                                        expr_form='compound')
-    table_from_pillar = local_salt_client.cmd('I@gerrit:client',
-                                              'pillar.get',
-                                              ['docker:client:images'],
-                                              expr_form='compound')
-
+    expected_images = local_salt_client.pillar_get(tgt='gerrit:client',
+                                                   param='docker:client:images')
     mismatch = {}
     actual_images = {}
     for image in set(table_with_docker_services[table_with_docker_services.keys()[0]].split('\n')):
         actual_images[image.split(":")[0]] = image.split(":")[-1]
-    for image in set(table_from_pillar[table_from_pillar.keys()[0]]):
+    for image in set(expected_images):
         im_name = image.split(":")[0]
         if im_name not in actual_images:
             mismatch[im_name] = 'not found on env'
@@ -303,7 +276,9 @@
     """ This test compares Jenkins jobs versions
         collected from the cloud vs collected from pillars.
     """
-    excludes = ['upgrade-mcp-release', 'deploy-update-salt']
+    excludes = ['upgrade-mcp-release', 'deploy-update-salt',
+                'git-mirror-downstream-mk-pipelines',
+                'git-mirror-downstream-pipeline-library']
 
     config = utils.get_configuration()
     drivetrain_version = config.get('drivetrain_version', '')
@@ -327,11 +302,9 @@
         if drivetrain_version in ['testing', 'nightly', 'stable']:
             expected_version = 'master'
         else:
-            expected_version = local_salt_client.cmd(
-                'I@gerrit:client',
-                'pillar.get',
-                ['jenkins:client:job:{}:scm:branch'.format(job_name)],
-                expr_form='compound').values()[0]
+            expected_version = local_salt_client.pillar_get(
+                tgt='gerrit:client',
+                param='jenkins:client:job:{}:scm:branch'.format(job_name))
 
         if not BranchSpec:
             print("No BranchSpec has found for {} job".format(job_name))
diff --git a/test_set/cvp-sanity/tests/test_duplicate_ips.py b/test_set/cvp-sanity/tests/test_duplicate_ips.py
index afe1afe..3b55a26 100644
--- a/test_set/cvp-sanity/tests/test_duplicate_ips.py
+++ b/test_set/cvp-sanity/tests/test_duplicate_ips.py
@@ -1,4 +1,3 @@
-import pytest
 from collections import Counter
 from pprint import pformat
 import os
@@ -16,22 +15,24 @@
 
 
 def test_duplicate_ips(local_salt_client):
-    active_nodes = utils.get_active_nodes()
-
     testname = os.path.basename(__file__).split('.')[0]
     config = utils.get_configuration()
     skipped_ifaces = config.get(testname)["skipped_ifaces"]
 
-    local_salt_client.cmd('L@'+','.join(active_nodes),
-                          'saltutil.refresh_grains',
+    local_salt_client.cmd(tgt='*',
+                          fun='saltutil.refresh_grains',
                           expr_form='compound')
-    nodes = local_salt_client.cmd('L@'+','.join(active_nodes),
-                                  'grains.item',
-                                  ['ip4_interfaces'],
+    nodes = local_salt_client.cmd(tgt='*',
+                                  fun='grains.item',
+                                  param='ip4_interfaces',
                                   expr_form='compound')
 
     ipv4_list = []
     for node in nodes:
+        if isinstance(nodes[node], bool):
+            # TODO: do not skip node
+            print ("{} node is skipped".format(node))
+            continue
         for iface in nodes[node]['ip4_interfaces']:
             # Omit 'ip-less' ifaces
             if not nodes[node]['ip4_interfaces'][iface]:
diff --git a/test_set/cvp-sanity/tests/test_etc_hosts.py b/test_set/cvp-sanity/tests/test_etc_hosts.py
index cd9fc35..8850ab7 100644
--- a/test_set/cvp-sanity/tests/test_etc_hosts.py
+++ b/test_set/cvp-sanity/tests/test_etc_hosts.py
@@ -1,23 +1,22 @@
-import pytest
 import json
-import os
-import utils
 
 
 def test_etc_hosts(local_salt_client):
-    active_nodes = utils.get_active_nodes()
     nodes_info = local_salt_client.cmd(
-        utils.list_to_target_string(active_nodes, 'or'), 'cmd.run',
-        ['cat /etc/hosts'],
+        tgt='*',
+        param='cat /etc/hosts',
         expr_form='compound')
     result = {}
     for node in nodes_info.keys():
+        if isinstance(nodes_info[node], bool):
+            result[node] = 'Cannot access this node'
+            continue
         for nd in nodes_info.keys():
-           if node not in nodes_info[nd]:
-              if node in result:
-                  result[node]+=','+nd
-              else:
-                  result[node]=nd
+            if nd not in nodes_info[node]:
+                if node in result:
+                    result[node] += ',' + nd
+                else:
+                    result[node] = nd
     assert len(result) <= 1, \
         "Some hosts are not presented in /etc/hosts: {0}".format(
-         json.dumps(result, indent=4))     
+        json.dumps(result, indent=4))
\ No newline at end of file
diff --git a/test_set/cvp-sanity/tests/test_galera_cluster.py b/test_set/cvp-sanity/tests/test_galera_cluster.py
index 676f09b..73f4932 100644
--- a/test_set/cvp-sanity/tests/test_galera_cluster.py
+++ b/test_set/cvp-sanity/tests/test_galera_cluster.py
@@ -3,9 +3,8 @@
 
 def test_galera_cluster_status(local_salt_client):
     gs = local_salt_client.cmd(
-        'galera:*',
-        'cmd.run',
-        ['salt-call mysql.status | grep -A1 wsrep_cluster_size | tail -n1'],
+        tgt='galera:*',
+        param='salt-call mysql.status | grep -A1 wsrep_cluster_size | tail -n1',
         expr_form='pillar')
 
     if not gs:
diff --git a/test_set/cvp-sanity/tests/test_k8s.py b/test_set/cvp-sanity/tests/test_k8s.py
index 5b905c9..f04066e 100644
--- a/test_set/cvp-sanity/tests/test_k8s.py
+++ b/test_set/cvp-sanity/tests/test_k8s.py
@@ -5,8 +5,8 @@
 
 def test_k8s_get_cs_status(local_salt_client):
     result = local_salt_client.cmd(
-        'etcd:server', 'cmd.run',
-        ['kubectl get cs'],
+        tgt='etcd:server',
+        param='kubectl get cs',
         expr_form='pillar'
     )
     errors = []
@@ -28,8 +28,8 @@
 
 def test_k8s_get_nodes_status(local_salt_client):
     result = local_salt_client.cmd(
-        'etcd:server', 'cmd.run',
-        ['kubectl get nodes'],
+        tgt='etcd:server',
+        param='kubectl get nodes',
         expr_form='pillar'
     )
     errors = []
@@ -51,8 +51,8 @@
 
 def test_k8s_get_calico_status(local_salt_client):
     result = local_salt_client.cmd(
-        'kubernetes:pool', 'cmd.run',
-        ['calicoctl node status'],
+        tgt='kubernetes:pool',
+        param='calicoctl node status',
         expr_form='pillar'
     )
     errors = []
@@ -73,8 +73,8 @@
 
 def test_k8s_cluster_status(local_salt_client):
     result = local_salt_client.cmd(
-        'kubernetes:master', 'cmd.run',
-        ['kubectl cluster-info'],
+        tgt='kubernetes:master',
+        param='kubectl cluster-info',
         expr_form='pillar'
     )
     errors = []
@@ -95,8 +95,9 @@
 
 def test_k8s_kubelet_status(local_salt_client):
     result = local_salt_client.cmd(
-        'kubernetes:pool', 'service.status',
-        ['kubelet'],
+        tgt='kubernetes:pool',
+        fun='service.status',
+        param='kubelet',
         expr_form='pillar'
     )
     errors = []
@@ -111,8 +112,8 @@
 
 def test_k8s_check_system_pods_status(local_salt_client):
     result = local_salt_client.cmd(
-        'etcd:server', 'cmd.run',
-        ['kubectl --namespace="kube-system" get pods'],
+        tgt='etcd:server',
+        param='kubectl --namespace="kube-system" get pods',
         expr_form='pillar'
     )
     errors = []
@@ -151,34 +152,30 @@
         # Check that public_ip:8443 is accessible with curl
     """
     result = local_salt_client.cmd(
-        'etcd:server', 'cmd.run',
-        ['kubectl get svc -n kube-system'],
+        tgt='etcd:server',
+        param='kubectl get svc -n kube-system',
         expr_form='pillar'
     )
     if not result:
         pytest.skip("k8s is not found on this environment")
 
     # service name 'kubernetes-dashboard' is hardcoded in kubernetes formula
-    dashboard_enabled = local_salt_client.cmd(
-        'etcd:server', 'pillar.get',
-        ['kubernetes:common:addons:dashboard:enabled'],
-        expr_form='pillar'
-    ).values()[0]
+    dashboard_enabled = local_salt_client.pillar_get(
+        tgt='etcd:server',
+        param='kubernetes:common:addons:dashboard:enabled',)
     if not dashboard_enabled:
         pytest.skip("Kubernetes dashboard is not enabled in the cluster.")
 
-    external_ip = local_salt_client.cmd(
-        'etcd:server', 'pillar.get',
-        ['kubernetes:common:addons:dashboard:public_ip'],
-        expr_form='pillar'
-    ).values()[0]
+    external_ip = local_salt_client.pillar_get(
+        tgt='etcd:server',
+        param='kubernetes:common:addons:dashboard:public_ip')
 
     assert external_ip.__len__() > 0, "Kubernetes dashboard is enabled but not defined in pillars"
     # dashboard port 8443 is hardcoded in kubernetes formula
     url = "https://{}:8443".format(external_ip)
     check = local_salt_client.cmd(
-        'etcd:server', 'cmd.run',
-        ['curl {} 2>&1 | grep kubernetesDashboard'.format(url)],
+        tgt='etcd:server',
+        param='curl {} 2>&1 | grep kubernetesDashboard'.format(url),
         expr_form='pillar'
     )
     assert len(check.values()[0]) != 0, \
diff --git a/test_set/cvp-sanity/tests/test_mounts.py b/test_set/cvp-sanity/tests/test_mounts.py
index 3b5b697..6fcca56 100644
--- a/test_set/cvp-sanity/tests/test_mounts.py
+++ b/test_set/cvp-sanity/tests/test_mounts.py
@@ -7,21 +7,20 @@
         # Get all mount points from each node in the group  with the next command: `df -h | awk '{print $1}'`
         # Check that all mount points are similar for each node in the group
     """
-    mounts_by_nodes = local_salt_client.cmd("L@"+','.join(nodes_in_group),
-                                            'cmd.run',
-                                            ["df -h | awk '{print $1}'"],
+    mounts_by_nodes = local_salt_client.cmd(tgt="L@"+','.join(nodes_in_group),
+                                            param="df -h | awk '{print $1}'",
                                             expr_form='compound')
 
     # Let's exclude cmp, kvm, ceph OSD nodes, mon, cid, k8s-ctl, k8s-cmp nodes
     # These nodes will have different mounts and this is expected
-    exclude_nodes = local_salt_client.cmd(
-        ("I@nova:compute or "
-         "I@ceph:osd or "
-         "I@salt:control or "
-         "I@prometheus:server and not I@influxdb:server or "
-         "I@kubernetes:* and not I@etcd:* or "
-         "I@docker:host and not I@prometheus:server and not I@kubernetes:*"),
-        'test.ping', expr_form='compound').keys()
+    exclude_nodes = local_salt_client.test_ping(
+         tgt="I@nova:compute or "
+             "I@ceph:osd or "
+             "I@salt:control or "
+             "I@prometheus:server and not I@influxdb:server or "
+             "I@kubernetes:* and not I@etcd:* or "
+             "I@docker:host and not I@prometheus:server and not I@kubernetes:*",
+         expr_form='compound').keys()
 
     if len(mounts_by_nodes.keys()) < 2:
         pytest.skip("Nothing to compare - only 1 node")
diff --git a/test_set/cvp-sanity/tests/test_mtu.py b/test_set/cvp-sanity/tests/test_mtu.py
index 417bc1a..0a3d2d0 100644
--- a/test_set/cvp-sanity/tests/test_mtu.py
+++ b/test_set/cvp-sanity/tests/test_mtu.py
@@ -11,22 +11,26 @@
         ["bonding_masters", "lo", "veth", "tap", "cali", "qv", "qb", "br-int", "vxlan"]
     total = {}
     network_info = local_salt_client.cmd(
-        "L@"+','.join(nodes_in_group), 'cmd.run', ['ls /sys/class/net/'], expr_form='compound')
+        tgt="L@"+','.join(nodes_in_group),
+        param='ls /sys/class/net/',
+        expr_form='compound')
 
-    kvm_nodes = local_salt_client.cmd(
-        'salt:control', 'test.ping', expr_form='pillar').keys()
+    kvm_nodes = local_salt_client.test_ping(tgt='salt:control').keys()
 
     if len(network_info.keys()) < 2:
         pytest.skip("Nothing to compare - only 1 node")
 
     for node, ifaces_info in network_info.iteritems():
+        if isinstance(ifaces_info, bool):
+            print ("{} node is skipped".format(node))
+            continue
         if node in kvm_nodes:
-            kvm_info = local_salt_client.cmd(node, 'cmd.run',
-                                             ["virsh list | "
-                                              "awk '{print $2}' | "
-                                              "xargs -n1 virsh domiflist | "
-                                              "grep -v br-pxe | grep br- | "
-                                              "awk '{print $1}'"])
+            kvm_info = local_salt_client.cmd(tgt=node,
+                                             param="virsh list | "
+                                                   "awk '{print $2}' | "
+                                                   "xargs -n1 virsh domiflist | "
+                                                   "grep -v br-pxe | grep br- | "
+                                                   "awk '{print $1}'")
             ifaces_info = kvm_info.get(node)
         node_ifaces = ifaces_info.split('\n')
         ifaces = {}
@@ -35,9 +39,9 @@
                 if skipped_iface in iface:
                     break
             else:
-                iface_mtu = local_salt_client.cmd(node, 'cmd.run',
-                                                  ['cat /sys/class/'
-                                                   'net/{}/mtu'.format(iface)])
+                iface_mtu = local_salt_client.cmd(tgt=node,
+                                                  param='cat /sys/class/'
+                                                        'net/{}/mtu'.format(iface))
                 ifaces[iface] = iface_mtu.get(node)
         total[node] = ifaces
 
diff --git a/test_set/cvp-sanity/tests/test_nodes.py b/test_set/cvp-sanity/tests/test_nodes.py
index 5f8b2dc..687f3ae 100644
--- a/test_set/cvp-sanity/tests/test_nodes.py
+++ b/test_set/cvp-sanity/tests/test_nodes.py
@@ -4,10 +4,9 @@
 
 def test_minions_status(local_salt_client):
     result = local_salt_client.cmd(
-        'salt:master',
-        'cmd.run',
-        ['salt-run manage.status timeout=10 --out=json'],
-        expr_form='pillar')
+        tgt='salt:master',
+        param='salt-run manage.status timeout=10 --out=json',
+        expr_form='pillar', check_status=True)
     statuses = {}
     try:
         statuses = json.loads(result.values()[0])
diff --git a/test_set/cvp-sanity/tests/test_nodes_in_maas.py b/test_set/cvp-sanity/tests/test_nodes_in_maas.py
index c20d241..fafd150 100644
--- a/test_set/cvp-sanity/tests/test_nodes_in_maas.py
+++ b/test_set/cvp-sanity/tests/test_nodes_in_maas.py
@@ -1,45 +1,35 @@
 import json
 import pytest
-
 import utils
 
 
 def get_maas_logged_in_profiles(local_salt_client):
-    get_apis = local_salt_client.cmd(
-        'maas:cluster',
-        'cmd.run',
-        ['maas list'],
-        expr_form='pillar')
-    return get_apis.values()[0]
+    get_apis = local_salt_client.cmd_any(
+        tgt='maas:cluster',
+        param='maas list')
+    return get_apis
 
 
 def login_to_maas(local_salt_client, user):
-    login = local_salt_client.cmd(
-        'maas:cluster',
-        'cmd.run',
-        ["source /var/lib/maas/.maas_login.sh  ; echo {}=${{PROFILE}}"
-         "".format(user)],
-        expr_form='pillar')
-    return login.values()[0]
+    login = local_salt_client.cmd_any(
+        tgt='maas:cluster',
+        param="source /var/lib/maas/.maas_login.sh  ; echo {}=${{PROFILE}}"
+              "".format(user))
+    return login
 
 
 def test_nodes_deployed_in_maas(local_salt_client):
     config = utils.get_configuration()
 
     # 1. Check MAAS is present on some node
-    check_maas = local_salt_client.cmd(
-        'maas:cluster',
-        'test.ping',
-        expr_form='pillar')
+    check_maas = local_salt_client.test_ping(tgt='maas:cluster')
     if not check_maas:
         pytest.skip("Could not find MAAS on the environment")
 
     # 2. Get MAAS admin user from model
-    maas_admin_user = local_salt_client.cmd(
-        'maas:cluster', 'pillar.get',
-        ['_param:maas_admin_username'],
-        expr_form='pillar'
-    ).values()[0]
+    maas_admin_user = local_salt_client.pillar_get(
+        tgt='maas:cluster',
+        param='_param:maas_admin_username')
     if not maas_admin_user:
         pytest.skip("Could not find MAAS admin user in the model by parameter "
                     "'maas_admin_username'")
@@ -57,9 +47,8 @@
 
     # 4. Get nodes in MAAS
     get_nodes = local_salt_client.cmd(
-        'maas:cluster',
-        'cmd.run',
-        ['maas {} nodes read'.format(maas_admin_user)],
+        tgt='maas:cluster',
+        param='maas {} nodes read'.format(maas_admin_user),
         expr_form='pillar')
     result = ""
     try:
diff --git a/test_set/cvp-sanity/tests/test_nova_services.py b/test_set/cvp-sanity/tests/test_nova_services.py
index 471e5dd..6505d30 100644
--- a/test_set/cvp-sanity/tests/test_nova_services.py
+++ b/test_set/cvp-sanity/tests/test_nova_services.py
@@ -3,43 +3,33 @@
 
 @pytest.mark.usefixtures('check_openstack')
 def test_nova_services_status(local_salt_client):
-    result = local_salt_client.cmd(
-        'keystone:server',
-        'cmd.run',
-        ['. /root/keystonercv3;'
-         'nova service-list | grep "down\|disabled" | grep -v "Forced down"'],
-        expr_form='pillar')
+    result = local_salt_client.cmd_any(
+        tgt='keystone:server',
+        param='. /root/keystonercv3;'
+              'nova service-list | grep "down\|disabled" | grep -v "Forced down"')
 
-    assert result[result.keys()[0]] == '', \
+    assert result == '', \
         '''Some nova services are in wrong state'''
 
 
 @pytest.mark.usefixtures('check_openstack')
 def test_nova_hosts_consistent(local_salt_client):
-    all_cmp_services = local_salt_client.cmd(
-        'keystone:server',
-        'cmd.run',
-        ['. /root/keystonercv3;'
-         'nova service-list | grep "nova-compute" | wc -l'],
-        expr_form='pillar').values()[0]
-    enabled_cmp_services = local_salt_client.cmd(
-        'keystone:server',
-        'cmd.run',
-        ['. /root/keystonercv3;'
-         'nova service-list | grep "nova-compute" | grep "enabled" | wc -l'],
-        expr_form='pillar').values()[0]
-    hosts = local_salt_client.cmd(
-        'keystone:server',
-        'cmd.run',
-        ['. /root/keystonercv3;'
-         'openstack host list | grep "compute" | wc -l'],
-        expr_form='pillar').values()[0]
-    hypervisors = local_salt_client.cmd(
-        'keystone:server',
-        'cmd.run',
-        ['. /root/keystonercv3;'
-         'openstack hypervisor list | egrep -v "\-----|ID" | wc -l'],
-        expr_form='pillar').values()[0]
+    all_cmp_services = local_salt_client.cmd_any(
+        tgt='keystone:server',
+        param='. /root/keystonercv3;'
+              'nova service-list | grep "nova-compute" | wc -l')
+    enabled_cmp_services = local_salt_client.cmd_any(
+        tgt='keystone:server',
+        param='. /root/keystonercv3;'
+              'nova service-list | grep "nova-compute" | grep "enabled" | wc -l')
+    hosts = local_salt_client.cmd_any(
+        tgt='keystone:server',
+        param='. /root/keystonercv3;'
+              'openstack host list | grep "compute" | wc -l')
+    hypervisors = local_salt_client.cmd_any(
+        tgt='keystone:server',
+        param='. /root/keystonercv3;'
+              'openstack hypervisor list | egrep -v "\-----|ID" | wc -l')
 
     assert all_cmp_services == hypervisors, \
         "Number of nova-compute services ({}) does not match number of " \
diff --git a/test_set/cvp-sanity/tests/test_ntp_sync.py b/test_set/cvp-sanity/tests/test_ntp_sync.py
index 3636f14..dd199d1 100644
--- a/test_set/cvp-sanity/tests/test_ntp_sync.py
+++ b/test_set/cvp-sanity/tests/test_ntp_sync.py
@@ -5,15 +5,15 @@
 def test_ntp_sync(local_salt_client):
     """Test checks that system time is the same across all nodes"""
 
-    active_nodes = utils.get_active_nodes()
     config = utils.get_configuration()
     nodes_time = local_salt_client.cmd(
-        utils.list_to_target_string(active_nodes, 'or'),
-        'cmd.run',
-        ['date +%s'],
+        tgt='*',
+        param='date +%s',
         expr_form='compound')
     result = {}
     for node, time in nodes_time.iteritems():
+        if isinstance(nodes_time[node], bool):
+            time = 'Cannot access node(-s)'
         if node in config.get("ntp_skipped_nodes"):
             continue
         if time in result:
@@ -27,16 +27,17 @@
 
 def test_ntp_peers_state(local_salt_client):
     """Test gets ntpq peers state and checks the system peer is declared"""
-
-    active_nodes = utils.get_active_nodes()
     state = local_salt_client.cmd(
-        utils.list_to_target_string(active_nodes, 'or'),
-        'cmd.run',
-        ['ntpq -pn'],
+        tgt='*',
+        param='ntpq -pn',
         expr_form='compound')
     final_result = {}
     for node in state:
         sys_peer_declared = False
+        if not state[node]:
+            # TODO: do not skip
+            print ("Node {} is skipped".format(node))
+            continue
         ntpq_output = state[node].split('\n')
         # if output has no 'remote' in the head of ntpq output
         # the 'ntqp -np' command failed and cannot check peers
diff --git a/test_set/cvp-sanity/tests/test_oss.py b/test_set/cvp-sanity/tests/test_oss.py
index 58a4151..a9623db 100644
--- a/test_set/cvp-sanity/tests/test_oss.py
+++ b/test_set/cvp-sanity/tests/test_oss.py
@@ -4,25 +4,19 @@
 
 
 def test_oss_status(local_salt_client):
-    result = local_salt_client.cmd(
-        'docker:swarm:role:master',
-        'pillar.fetch',
-        ['haproxy:proxy:listen:stats:binds:address'],
-        expr_form='pillar')
-    HAPROXY_STATS_IP = [node for node in result if result[node]]
+    HAPROXY_STATS_IP = local_salt_client.pillar_get(
+        tgt='docker:swarm:role:master',
+        param='haproxy:proxy:listen:stats:binds:address')
     proxies = {"http": None, "https": None}
     csv_result = requests.get('http://{}:9600/haproxy?stats;csv"'.format(
-                              result[HAPROXY_STATS_IP[0]]),
+                              HAPROXY_STATS_IP),
                               proxies=proxies).content
     data = csv_result.lstrip('# ')
     wrong_data = []
     list_of_services = ['aptly', 'openldap', 'gerrit', 'jenkins', 'postgresql',
                         'pushkin', 'rundeck', 'elasticsearch']
     for service in list_of_services:
-        check = local_salt_client.cmd(
-            '{}:client'.format(service),
-            'test.ping',
-            expr_form='pillar')
+        check = local_salt_client.test_ping(tgt='{}:client'.format(service))
         if check:
             lines = [row for row in csv.DictReader(data.splitlines())
                      if service in row['pxname']]
diff --git a/test_set/cvp-sanity/tests/test_packet_checker.py b/test_set/cvp-sanity/tests/test_packet_checker.py
index 2db8c7c..6c1ccc9 100644
--- a/test_set/cvp-sanity/tests/test_packet_checker.py
+++ b/test_set/cvp-sanity/tests/test_packet_checker.py
@@ -5,13 +5,12 @@
 
 def test_check_package_versions(local_salt_client, nodes_in_group):
     exclude_packages = utils.get_configuration().get("skipped_packages", [])
-    packages_versions = local_salt_client.cmd("L@"+','.join(nodes_in_group),
-                                               'lowpkg.list_pkgs',
-                                               expr_form='compound')
+    packages_versions = local_salt_client.cmd(tgt="L@"+','.join(nodes_in_group),
+                                              fun='lowpkg.list_pkgs',
+                                              expr_form='compound')
     # Let's exclude cid01 and dbs01 nodes from this check
-    exclude_nodes = local_salt_client.cmd("I@galera:master or I@gerrit:client",
-                                          'test.ping',
-                                          expr_form='compound').keys()
+    exclude_nodes = local_salt_client.test_ping(tgt="I@galera:master or I@gerrit:client",
+                                                expr_form='compound').keys()
     total_nodes = [i for i in packages_versions.keys() if i not in exclude_nodes]
     if len(total_nodes) < 2:
         pytest.skip("Nothing to compare - only 1 node")
@@ -21,6 +20,10 @@
     packages_names = set()
 
     for node in total_nodes:
+        if not packages_versions[node]:
+            # TODO: do not skip node
+            print "Node {} is skipped".format (node)
+            continue
         nodes.append(node)
         packages_names.update(packages_versions[node].keys())
 
@@ -30,6 +33,8 @@
         diff = []
         row = []
         for node in nodes:
+            if not packages_versions[node]:
+                continue
             if deb in packages_versions[node].keys():
                 diff.append(packages_versions[node][deb])
                 row.append("{}: {}".format(node, packages_versions[node][deb]))
@@ -51,8 +56,8 @@
         pytest.skip("Test for the latest packages is disabled")
     skipped_pkg = config.get("test_packages")["skipped_packages"]
     info_salt = local_salt_client.cmd(
-        'L@' + ','.join(nodes_in_group),
-        'cmd.run', ['apt list --upgradable 2>/dev/null | grep -v Listing'],
+        tgt='L@' + ','.join(nodes_in_group),
+        param='apt list --upgradable 2>/dev/null | grep -v Listing',
         expr_form='compound')
     for node in nodes_in_group:
         result = []
@@ -68,22 +73,20 @@
 def test_check_module_versions(local_salt_client, nodes_in_group):
     exclude_modules = utils.get_configuration().get("skipped_modules", [])
     pre_check = local_salt_client.cmd(
-        "L@"+','.join(nodes_in_group),
-        'cmd.run',
-        ['dpkg -l | grep "python-pip "'],
+        tgt="L@"+','.join(nodes_in_group),
+        param='dpkg -l | grep "python-pip "',
         expr_form='compound')
     if pre_check.values().count('') > 0:
         pytest.skip("pip is not installed on one or more nodes")
 
-    exclude_nodes = local_salt_client.cmd("I@galera:master or I@gerrit:client",
-                                          'test.ping',
-                                          expr_form='compound').keys()
+    exclude_nodes = local_salt_client.test_ping(tgt="I@galera:master or I@gerrit:client",
+                                                expr_form='compound').keys()
     total_nodes = [i for i in pre_check.keys() if i not in exclude_nodes]
 
     if len(total_nodes) < 2:
         pytest.skip("Nothing to compare - only 1 node")
-    list_of_pip_packages = local_salt_client.cmd("L@"+','.join(nodes_in_group),
-                                   'pip.freeze', expr_form='compound')
+    list_of_pip_packages = local_salt_client.cmd(tgt="L@"+','.join(nodes_in_group),
+                                   param='pip.freeze', expr_form='compound')
 
     nodes = []
 
diff --git a/test_set/cvp-sanity/tests/test_rabbit_cluster.py b/test_set/cvp-sanity/tests/test_rabbit_cluster.py
index f3f03e1..73efb57 100644
--- a/test_set/cvp-sanity/tests/test_rabbit_cluster.py
+++ b/test_set/cvp-sanity/tests/test_rabbit_cluster.py
@@ -6,23 +6,26 @@
     # it may be reintroduced in future
     config = utils.get_configuration()
     # request pillar data from rmq nodes
+    # TODO: pillar.get
     rabbitmq_pillar_data = local_salt_client.cmd(
-        'rabbitmq:server', 'pillar.data',
-        ['rabbitmq:cluster'], expr_form='pillar')
+        tgt='rabbitmq:server',
+        fun='pillar.get',
+        param='rabbitmq:cluster',
+        expr_form='pillar')
     # creating dictionary {node:cluster_size_for_the_node}
     # with required cluster size for each node
     control_dict = {}
     required_cluster_size_dict = {}
     # request actual data from rmq nodes
     rabbit_actual_data = local_salt_client.cmd(
-        'rabbitmq:server', 'cmd.run',
-        ['rabbitmqctl cluster_status'], expr_form='pillar')
+        tgt='rabbitmq:server',
+        param='rabbitmqctl cluster_status', expr_form='pillar')
     for node in rabbitmq_pillar_data:
         if node in config.get('skipped_nodes'):
             del rabbit_actual_data[node]
             continue
         cluster_size_from_the_node = len(
-            rabbitmq_pillar_data[node]['rabbitmq:cluster']['members'])
+            rabbitmq_pillar_data[node]['members'])
         required_cluster_size_dict.update({node: cluster_size_from_the_node})
 
     # find actual cluster size for each node
diff --git a/test_set/cvp-sanity/tests/test_repo_list.py b/test_set/cvp-sanity/tests/test_repo_list.py
index da99ee8..5e70eeb 100644
--- a/test_set/cvp-sanity/tests/test_repo_list.py
+++ b/test_set/cvp-sanity/tests/test_repo_list.py
@@ -1,16 +1,18 @@
-import pytest
-import utils
-
-
 def test_list_of_repo_on_nodes(local_salt_client, nodes_in_group):
-    info_salt = local_salt_client.cmd('L@' + ','.join(
-                                      nodes_in_group),
-                                      'pillar.data', ['linux:system:repo'],
+    # TODO: pillar.get
+    info_salt = local_salt_client.cmd(tgt='L@' + ','.join(
+                                              nodes_in_group),
+                                      fun='pillar.get',
+                                      param='linux:system:repo',
                                       expr_form='compound')
 
     # check if some repos are disabled
     for node in info_salt.keys():
-        repos = info_salt[node]["linux:system:repo"]
+        repos = info_salt[node]
+        if not info_salt[node]:
+            # TODO: do not skip node
+            print "Node {} is skipped".format (node)
+            continue
         for repo in repos.keys():
             repository = repos[repo]
             if "enabled" in repository:
@@ -18,21 +20,19 @@
                     repos.pop(repo)
 
     raw_actual_info = local_salt_client.cmd(
-        'L@' + ','.join(
-        nodes_in_group),
-        'cmd.run',
-        ['cat /etc/apt/sources.list.d/*;'
-         'cat /etc/apt/sources.list|grep deb|grep -v "#"'],
-        expr_form='compound')
+        tgt='L@' + ','.join(
+            nodes_in_group),
+        param='cat /etc/apt/sources.list.d/*;'
+              'cat /etc/apt/sources.list|grep deb|grep -v "#"',
+        expr_form='compound', check_status=True)
     actual_repo_list = [item.replace('/ ', ' ').replace('[arch=amd64] ', '')
                         for item in raw_actual_info.values()[0].split('\n')]
-    if info_salt.values()[0]['linux:system:repo'] == '':
+    if info_salt.values()[0] == '':
         expected_salt_data = ''
     else:
         expected_salt_data = [repo['source'].replace('/ ', ' ')
                                             .replace('[arch=amd64] ', '')
-                              for repo in info_salt.values()[0]
-                              ['linux:system:repo'].values()
+                              for repo in info_salt.values()[0].values()
                               if 'source' in repo.keys()]
 
     diff = {}
diff --git a/test_set/cvp-sanity/tests/test_salt_master.py b/test_set/cvp-sanity/tests/test_salt_master.py
index 7649767..7ae5754 100644
--- a/test_set/cvp-sanity/tests/test_salt_master.py
+++ b/test_set/cvp-sanity/tests/test_salt_master.py
@@ -1,8 +1,7 @@
 def test_uncommited_changes(local_salt_client):
     git_status = local_salt_client.cmd(
-        'salt:master',
-        'cmd.run',
-        ['cd /srv/salt/reclass/classes/cluster/; git status'],
+        tgt='salt:master',
+        param='cd /srv/salt/reclass/classes/cluster/; git status',
         expr_form='pillar')
     assert 'nothing to commit' in git_status.values()[0], 'Git status showed' \
            ' some unmerged changes {}'''.format(git_status.values()[0])
@@ -10,9 +9,8 @@
 
 def test_reclass_smoke(local_salt_client):
     reclass = local_salt_client.cmd(
-        'salt:master',
-        'cmd.run',
-        ['reclass-salt --top; echo $?'],
+        tgt='salt:master',
+        param='reclass-salt --top; echo $?',
         expr_form='pillar')
     result = reclass[reclass.keys()[0]][-1]
 
diff --git a/test_set/cvp-sanity/tests/test_services.py b/test_set/cvp-sanity/tests/test_services.py
index 4f7f613..e25015d 100644
--- a/test_set/cvp-sanity/tests/test_services.py
+++ b/test_set/cvp-sanity/tests/test_services.py
@@ -16,7 +16,9 @@
     Inconsistent services will be checked with another test case
     """
     exclude_services = utils.get_configuration().get("exclude_services", [])
-    services_by_nodes = local_salt_client.cmd("L@"+','.join(nodes_in_group), 'service.get_all', expr_form='compound')
+    services_by_nodes = local_salt_client.cmd(tgt="L@"+','.join(nodes_in_group),
+                                              fun='service.get_all',
+                                              expr_form='compound')
 
     if len(services_by_nodes.keys()) < 2:
         pytest.skip("Nothing to compare - only 1 node")
@@ -26,6 +28,10 @@
     all_services = set()
 
     for node in services_by_nodes:
+        if not services_by_nodes[node]:
+            # TODO: do not skip node
+            print "Node {} is skipped".format (node)
+            continue
         nodes.append(node)
         all_services.update(services_by_nodes[node])
 
diff --git a/test_set/cvp-sanity/tests/test_single_vip.py b/test_set/cvp-sanity/tests/test_single_vip.py
index 29bdb88..6fa0a41 100644
--- a/test_set/cvp-sanity/tests/test_single_vip.py
+++ b/test_set/cvp-sanity/tests/test_single_vip.py
@@ -1,17 +1,23 @@
-import pytest
-import utils
-import os
 from collections import Counter
 
 
 def test_single_vip(local_salt_client, nodes_in_group):
-    local_salt_client.cmd("L@"+','.join(nodes_in_group), 'saltutil.sync_all', expr_form='compound')
+    local_salt_client.cmd(tgt="L@"+','.join(nodes_in_group),
+                          fun='saltutil.sync_all',
+                          expr_form='compound')
     nodes_list = local_salt_client.cmd(
-        "L@"+','.join(nodes_in_group), 'grains.item', ['ipv4'], expr_form='compound')
+        tgt="L@"+','.join(nodes_in_group),
+        fun='grains.item',
+        param='ipv4',
+        expr_form='compound')
 
     ipv4_list = []
 
     for node in nodes_list:
+        if not nodes_list.get(node):
+            # TODO: do not skip node
+            print "Node {} is skipped".format (node)
+            continue
         ipv4_list.extend(nodes_list.get(node).get('ipv4'))
 
     cnt = Counter(ipv4_list)
diff --git a/test_set/cvp-sanity/tests/test_stacklight.py b/test_set/cvp-sanity/tests/test_stacklight.py
index c91a5f6..703deea 100644
--- a/test_set/cvp-sanity/tests/test_stacklight.py
+++ b/test_set/cvp-sanity/tests/test_stacklight.py
@@ -2,46 +2,42 @@
 import requests
 import datetime
 import pytest
-import utils
 
 
 @pytest.mark.usefixtures('check_kibana')
 def test_elasticsearch_cluster(local_salt_client):
-    salt_output = local_salt_client.cmd(
-        'kibana:server',
-        'pillar.get',
-        ['_param:haproxy_elasticsearch_bind_host'],
-        expr_form='pillar')
+    salt_output = local_salt_client.pillar_get(
+        tgt='kibana:server',
+        param='_param:haproxy_elasticsearch_bind_host')
 
     proxies = {"http": None, "https": None}
-    for node in salt_output.keys():
-        IP = salt_output[node]
-        assert requests.get('http://{}:9200/'.format(IP),
-                            proxies=proxies).status_code == 200, \
-            'Cannot check elasticsearch url on {}.'.format(IP)
-        resp = requests.get('http://{}:9200/_cat/health'.format(IP),
-                            proxies=proxies).content
-        assert resp.split()[3] == 'green', \
-            'elasticsearch status is not good {}'.format(
-            json.dumps(resp, indent=4))
-        assert resp.split()[4] == '3', \
-            'elasticsearch status is not good {}'.format(
-            json.dumps(resp, indent=4))
-        assert resp.split()[5] == '3', \
-            'elasticsearch status is not good {}'.format(
-            json.dumps(resp, indent=4))
-        assert resp.split()[10] == '0', \
-            'elasticsearch status is not good {}'.format(
-            json.dumps(resp, indent=4))
-        assert resp.split()[13] == '100.0%', \
-            'elasticsearch status is not good {}'.format(
-            json.dumps(resp, indent=4))
+    IP = salt_output
+    assert requests.get('http://{}:9200/'.format(IP),
+                        proxies=proxies).status_code == 200, \
+        'Cannot check elasticsearch url on {}.'.format(IP)
+    resp = requests.get('http://{}:9200/_cat/health'.format(IP),
+                        proxies=proxies).content
+    assert resp.split()[3] == 'green', \
+        'elasticsearch status is not good {}'.format(
+        json.dumps(resp, indent=4))
+    assert resp.split()[4] == '3', \
+        'elasticsearch status is not good {}'.format(
+        json.dumps(resp, indent=4))
+    assert resp.split()[5] == '3', \
+        'elasticsearch status is not good {}'.format(
+        json.dumps(resp, indent=4))
+    assert resp.split()[10] == '0', \
+        'elasticsearch status is not good {}'.format(
+        json.dumps(resp, indent=4))
+    assert resp.split()[13] == '100.0%', \
+        'elasticsearch status is not good {}'.format(
+        json.dumps(resp, indent=4))
 
 
 @pytest.mark.usefixtures('check_kibana')
 def test_kibana_status(local_salt_client):
     proxies = {"http": None, "https": None}
-    IP = utils.get_monitoring_ip('stacklight_log_address')
+    IP = local_salt_client.pillar_get(param='_param:stacklight_log_address')
     resp = requests.get('http://{}:5601/api/status'.format(IP),
                         proxies=proxies).content
     body = json.loads(resp)
@@ -57,14 +53,11 @@
 def test_elasticsearch_node_count(local_salt_client):
     now = datetime.datetime.now()
     today = now.strftime("%Y.%m.%d")
-    active_nodes = utils.get_active_nodes()
-    salt_output = local_salt_client.cmd(
-        'kibana:server',
-        'pillar.get',
-        ['_param:haproxy_elasticsearch_bind_host'],
-        expr_form='pillar')
+    salt_output = local_salt_client.pillar_get(
+        tgt='kibana:server',
+        param='_param:haproxy_elasticsearch_bind_host')
 
-    IP = salt_output.values()[0]
+    IP = salt_output
     headers = {'Content-type': 'application/json', 'Accept': 'text/plain'}
     proxies = {"http": None, "https": None}
     data = ('{"size": 0, "aggs": '
@@ -79,31 +72,28 @@
     assert 200 == response.status_code, 'Unexpected code {}'.format(
         response.text)
     resp = json.loads(response.text)
-    cluster_domain = local_salt_client.cmd('salt:control',
-                                           'pillar.get',
-                                           ['_param:cluster_domain'],
-                                           expr_form='pillar').values()[0]
+    cluster_domain = local_salt_client.pillar_get(param='_param:cluster_domain')
     monitored_nodes = []
     for item_ in resp['aggregations']['uniq_hostname']['buckets']:
         node_name = item_['key']
         monitored_nodes.append(node_name + '.' + cluster_domain)
     missing_nodes = []
-    for node in active_nodes.keys():
+    all_nodes = local_salt_client.test_ping(tgt='*').keys()
+    for node in all_nodes:
         if node not in monitored_nodes:
             missing_nodes.append(node)
     assert len(missing_nodes) == 0, \
         'Not all nodes are in Elasticsearch. Found {0} keys, ' \
         'expected {1}. Missing nodes: \n{2}'. \
-            format(len(monitored_nodes), len(active_nodes), missing_nodes)
+            format(len(monitored_nodes), len(all_nodes), missing_nodes)
 
 
 def test_stacklight_services_replicas(local_salt_client):
     # TODO
     # change to docker:swarm:role:master ?
     salt_output = local_salt_client.cmd(
-        'I@docker:client:stack:monitoring and I@prometheus:server',
-        'cmd.run',
-        ['docker service ls'],
+        tgt='I@docker:client:stack:monitoring and I@prometheus:server',
+        param='docker service ls',
         expr_form='compound')
 
     if not salt_output:
@@ -122,15 +112,14 @@
 
 @pytest.mark.usefixtures('check_prometheus')
 def test_prometheus_alert_count(local_salt_client, ctl_nodes_pillar):
-    IP = utils.get_monitoring_ip('cluster_public_host')
+    IP = local_salt_client.pillar_get(param='_param:cluster_public_host')
     # keystone:server can return 3 nodes instead of 1
     # this will be fixed later
     # TODO
     nodes_info = local_salt_client.cmd(
-        ctl_nodes_pillar,
-        'cmd.run',
-        ['curl -s http://{}:15010/alerts | grep icon-chevron-down | '
-         'grep -v "0 active"'.format(IP)],
+        tgt=ctl_nodes_pillar,
+        param='curl -s http://{}:15010/alerts | grep icon-chevron-down | '
+              'grep -v "0 active"'.format(IP),
         expr_form='pillar')
 
     result = nodes_info[nodes_info.keys()[0]].replace('</td>', '').replace(
@@ -141,9 +130,8 @@
 
 def test_stacklight_containers_status(local_salt_client):
     salt_output = local_salt_client.cmd(
-        'I@docker:swarm:role:master and I@prometheus:server',
-        'cmd.run',
-        ['docker service ps $(docker stack services -q monitoring)'],
+        tgt='I@docker:swarm:role:master and I@prometheus:server',
+        param='docker service ps $(docker stack services -q monitoring)',
         expr_form='compound')
 
     if not salt_output:
@@ -172,10 +160,10 @@
 
 
 def test_running_telegraf_services(local_salt_client):
-    salt_output = local_salt_client.cmd('telegraf:agent',
-                                        'service.status',
-                                        'telegraf',
-                                        expr_form='pillar')
+    salt_output = local_salt_client.cmd(tgt='telegraf:agent',
+                                        fun='service.status',
+                                        param='telegraf',
+                                        expr_form='pillar',)
 
     if not salt_output:
         pytest.skip("Telegraf or telegraf:agent \
@@ -189,9 +177,9 @@
 
 
 def test_running_fluentd_services(local_salt_client):
-    salt_output = local_salt_client.cmd('fluentd:agent',
-                                        'service.status',
-                                        'td-agent',
+    salt_output = local_salt_client.cmd(tgt='fluentd:agent',
+                                        fun='service.status',
+                                        param='td-agent',
                                         expr_form='pillar')
     result = [{node: status} for node, status
               in salt_output.items()
diff --git a/test_set/cvp-sanity/tests/test_ui_addresses.py b/test_set/cvp-sanity/tests/test_ui_addresses.py
index 53bb03b..e5ef112 100644
--- a/test_set/cvp-sanity/tests/test_ui_addresses.py
+++ b/test_set/cvp-sanity/tests/test_ui_addresses.py
@@ -1,40 +1,33 @@
-import utils
 import pytest
 
 
 @pytest.mark.usefixtures('check_openstack')
 def test_ui_horizon(local_salt_client, ctl_nodes_pillar):
-    salt_output = local_salt_client.cmd(
-        'horizon:server',
-        'pillar.get',
-        ['_param:cluster_public_host'],
-        expr_form='pillar')
-    if not salt_output:
+    IP = local_salt_client.pillar_get(
+        tgt='horizon:server',
+        param='_param:cluster_public_host')
+    if not IP:
         pytest.skip("Horizon is not enabled on this environment")
-    IP = [salt_output[node] for node in salt_output
-          if salt_output[node]]
-    result = local_salt_client.cmd(
-        ctl_nodes_pillar,
-        'cmd.run',
-        ['curl --insecure https://{}/auth/login/ 2>&1 | \
-         grep Login'.format(IP[0])],
+    result = local_salt_client.cmd_any(
+        tgt=ctl_nodes_pillar,
+        param='curl --insecure https://{}/auth/login/ 2>&1 | \
+               grep Login'.format(IP),
         expr_form='pillar')
-    assert len(result[result.keys()[0]]) != 0, \
+    assert len(result) != 0, \
         'Horizon login page is not reachable on {} from ctl nodes'.format(
         IP[0])
 
 
 @pytest.mark.usefixtures('check_openstack')
 def test_public_openstack(local_salt_client, ctl_nodes_pillar):
-    IP = utils.get_monitoring_ip('cluster_public_host')
+    IP = local_salt_client.pillar_get(param='_param:cluster_public_host')
     protocol = 'https'
     port = '5000'
     url = "{}://{}:{}/v3".format(protocol, IP, port)
     result = local_salt_client.cmd(
-        ctl_nodes_pillar,
-        'cmd.run',
-        ['curl -k {}/ 2>&1 | \
-         grep stable'.format(url)],
+        tgt=ctl_nodes_pillar,
+        param='curl -k {}/ 2>&1 | \
+               grep stable'.format(url),
         expr_form='pillar')
     assert len(result[result.keys()[0]]) != 0, \
         'Public Openstack url is not reachable on {} from ctl nodes'.format(url)
@@ -42,15 +35,14 @@
 
 @pytest.mark.usefixtures('check_kibana')
 def test_internal_ui_kibana(local_salt_client, ctl_nodes_pillar):
-    IP = utils.get_monitoring_ip('stacklight_log_address')
+    IP = local_salt_client.pillar_get(param='_param:stacklight_log_address')
     protocol = 'http'
     port = '5601'
     url = "{}://{}:{}".format(protocol, IP, port)
     result = local_salt_client.cmd(
-        ctl_nodes_pillar,
-        'cmd.run',
-        ['curl {}/app/kibana 2>&1 | \
-         grep loading'.format(url)],
+        tgt=ctl_nodes_pillar,
+        param='curl {}/app/kibana 2>&1 | \
+               grep loading'.format(url),
         expr_form='pillar')
     assert len(result[result.keys()[0]]) != 0, \
         'Internal Kibana login page is not reachable on {} ' \
@@ -59,15 +51,14 @@
 
 @pytest.mark.usefixtures('check_kibana')
 def test_public_ui_kibana(local_salt_client, ctl_nodes_pillar):
-    IP = utils.get_monitoring_ip('cluster_public_host')
+    IP = local_salt_client.pillar_get(param='_param:cluster_public_host')
     protocol = 'https'
     port = '5601'
     url = "{}://{}:{}".format(protocol, IP, port)
     result = local_salt_client.cmd(
-        ctl_nodes_pillar,
-        'cmd.run',
-        ['curl {}/app/kibana 2>&1 | \
-         grep loading'.format(url)],
+        tgt=ctl_nodes_pillar,
+        param='curl {}/app/kibana 2>&1 | \
+               grep loading'.format(url),
         expr_form='pillar')
     assert len(result[result.keys()[0]]) != 0, \
         'Public Kibana login page is not reachable on {} ' \
@@ -76,15 +67,14 @@
 
 @pytest.mark.usefixtures('check_prometheus')
 def test_internal_ui_prometheus(local_salt_client, ctl_nodes_pillar):
-    IP = utils.get_monitoring_ip('stacklight_monitor_address')
+    IP = local_salt_client.pillar_get(param='_param:stacklight_monitor_address')
     protocol = 'http'
     port = '15010'
     url = "{}://{}:{}".format(protocol, IP, port)
     result = local_salt_client.cmd(
-        ctl_nodes_pillar,
-        'cmd.run',
-        ['curl {}/graph 2>&1 | \
-         grep Prometheus'.format(url)],
+        tgt=ctl_nodes_pillar,
+        param='curl {}/graph 2>&1 | \
+               grep Prometheus'.format(url),
         expr_form='pillar')
     assert len(result[result.keys()[0]]) != 0, \
         'Internal Prometheus page is not reachable on {} ' \
@@ -93,15 +83,14 @@
 
 @pytest.mark.usefixtures('check_prometheus')
 def test_public_ui_prometheus(local_salt_client, ctl_nodes_pillar):
-    IP = utils.get_monitoring_ip('cluster_public_host')
+    IP = local_salt_client.pillar_get(param='_param:cluster_public_host')
     protocol = 'https'
     port = '15010'
     url = "{}://{}:{}".format(protocol, IP, port)
     result = local_salt_client.cmd(
-        ctl_nodes_pillar,
-        'cmd.run',
-        ['curl {}/graph 2>&1 | \
-         grep Prometheus'.format(url)],
+        tgt=ctl_nodes_pillar,
+        param='curl {}/graph 2>&1 | \
+               grep Prometheus'.format(url),
         expr_form='pillar')
     assert len(result[result.keys()[0]]) != 0, \
         'Public Prometheus page is not reachable on {} ' \
@@ -110,14 +99,13 @@
 
 @pytest.mark.usefixtures('check_prometheus')
 def test_internal_ui_alert_manager(local_salt_client, ctl_nodes_pillar):
-    IP = utils.get_monitoring_ip('stacklight_monitor_address')
+    IP = local_salt_client.pillar_get(param='_param:stacklight_monitor_address')
     protocol = 'http'
     port = '15011'
     url = "{}://{}:{}".format(protocol, IP, port)
     result = local_salt_client.cmd(
-        ctl_nodes_pillar,
-        'cmd.run',
-        ['curl -s {}/ | grep Alertmanager'.format(url)],
+        tgt=ctl_nodes_pillar,
+        param='curl -s {}/ | grep Alertmanager'.format(url),
         expr_form='pillar')
     assert len(result[result.keys()[0]]) != 0, \
         'Internal AlertManager page is not reachable on {} ' \
@@ -126,14 +114,13 @@
 
 @pytest.mark.usefixtures('check_prometheus')
 def test_public_ui_alert_manager(local_salt_client, ctl_nodes_pillar):
-    IP = utils.get_monitoring_ip('cluster_public_host')
+    IP = local_salt_client.pillar_get(param='_param:cluster_public_host')
     protocol = 'https'
     port = '15011'
     url = "{}://{}:{}".format(protocol, IP, port)
     result = local_salt_client.cmd(
-        ctl_nodes_pillar,
-        'cmd.run',
-        ['curl -s {}/ | grep Alertmanager'.format(url)],
+        tgt=ctl_nodes_pillar,
+        param='curl -s {}/ | grep Alertmanager'.format(url),
         expr_form='pillar')
     assert len(result[result.keys()[0]]) != 0, \
         'Public AlertManager page is not reachable on {} ' \
@@ -142,14 +129,13 @@
 
 @pytest.mark.usefixtures('check_grafana')
 def test_internal_ui_grafana(local_salt_client, ctl_nodes_pillar):
-    IP = utils.get_monitoring_ip('stacklight_monitor_address')
+    IP = local_salt_client.pillar_get(param='_param:stacklight_monitor_address')
     protocol = 'http'
     port = '15013'
     url = "{}://{}:{}".format(protocol, IP, port)
     result = local_salt_client.cmd(
-        ctl_nodes_pillar,
-        'cmd.run',
-        ['curl {}/login 2>&1 | grep Grafana'.format(url)],
+        tgt=ctl_nodes_pillar,
+        param='curl {}/login 2>&1 | grep Grafana'.format(url),
         expr_form='pillar')
     assert len(result[result.keys()[0]]) != 0, \
         'Internal Grafana page is not reachable on {} ' \
@@ -158,14 +144,13 @@
 
 @pytest.mark.usefixtures('check_grafana')
 def test_public_ui_grafana(local_salt_client, ctl_nodes_pillar):
-    IP = utils.get_monitoring_ip('cluster_public_host')
+    IP = local_salt_client.pillar_get(param='_param:cluster_public_host')
     protocol = 'https'
     port = '8084'
     url = "{}://{}:{}".format(protocol, IP, port)
     result = local_salt_client.cmd(
-        ctl_nodes_pillar,
-        'cmd.run',
-        ['curl {}/login 2>&1 | grep Grafana'.format(url)],
+        tgt=ctl_nodes_pillar,
+        param='curl {}/login 2>&1 | grep Grafana'.format(url),
         expr_form='pillar')
     assert len(result[result.keys()[0]]) != 0, \
         'Public Grafana page is not reachable on {} from ctl nodes'.format(url)
@@ -173,15 +158,14 @@
 
 @pytest.mark.usefixtures('check_alerta')
 def test_internal_ui_alerta(local_salt_client, ctl_nodes_pillar):
-    IP = utils.get_monitoring_ip('stacklight_monitor_address')
+    IP = local_salt_client.pillar_get(param='_param:stacklight_monitor_address')
     protocol = 'http'
     port = '15017'
     url = "{}://{}:{}".format(protocol, IP, port)
     result = local_salt_client.cmd(
-        ctl_nodes_pillar,
-        'cmd.run',
-        ['curl {}/ 2>&1 | \
-         grep Alerta'.format(url)],
+        tgt=ctl_nodes_pillar,
+        param='curl {}/ 2>&1 | \
+             grep Alerta'.format(url),
         expr_form='pillar')
     assert len(result[result.keys()[0]]) != 0, \
         'Internal Alerta page is not reachable on {} from ctl nodes'.format(url)
@@ -189,47 +173,44 @@
 
 @pytest.mark.usefixtures('check_alerta')
 def test_public_ui_alerta(local_salt_client, ctl_nodes_pillar):
-    IP = utils.get_monitoring_ip('cluster_public_host')
+    IP = local_salt_client.pillar_get(param='_param:cluster_public_host')
     protocol = 'https'
     port = '15017'
     url = "{}://{}:{}".format(protocol, IP, port)
     result = local_salt_client.cmd(
-        ctl_nodes_pillar,
-        'cmd.run',
-        ['curl {}/ 2>&1 | \
-         grep Alerta'.format(url)],
+        tgt=ctl_nodes_pillar,
+        param='curl {}/ 2>&1 | \
+               grep Alerta'.format(url),
         expr_form='pillar')
     assert len(result[result.keys()[0]]) != 0, \
         'Public Alerta page is not reachable on {} from ctl nodes'.format(url)
 
 
 @pytest.mark.usefixtures('check_drivetrain')
-def test_public_ui_jenkins(local_salt_client, ctl_nodes_pillar, check_cicd):
-    IP = utils.get_monitoring_ip('cluster_public_host')
+def test_public_ui_jenkins(local_salt_client, ctl_nodes_pillar):
+    IP = local_salt_client.pillar_get(param='_param:cluster_public_host')
     protocol = 'https'
     port = '8081'
     url = "{}://{}:{}".format(protocol, IP, port)
     result = local_salt_client.cmd(
-        ctl_nodes_pillar,
-        'cmd.run',
-        ['curl -k {}/ 2>&1 | \
-         grep Authentication'.format(url)],
+        tgt=ctl_nodes_pillar,
+        param='curl -k {}/ 2>&1 | \
+               grep Authentication'.format(url),
         expr_form='pillar')
     assert len(result[result.keys()[0]]) != 0, \
         'Public Jenkins page is not reachable on {} from ctl nodes'.format(url)
 
 
 @pytest.mark.usefixtures('check_drivetrain')
-def test_public_ui_gerrit(local_salt_client, ctl_nodes_pillar, check_cicd):
-    IP = utils.get_monitoring_ip('cluster_public_host')
+def test_public_ui_gerrit(local_salt_client, ctl_nodes_pillar):
+    IP = local_salt_client.pillar_get(param='_param:cluster_public_host')
     protocol = 'https'
     port = '8070'
     url = "{}://{}:{}".format(protocol, IP, port)
     result = local_salt_client.cmd(
-        ctl_nodes_pillar,
-        'cmd.run',
-        ['curl -k {}/ 2>&1 | \
-         grep "Gerrit Code Review"'.format(url)],
+        tgt=ctl_nodes_pillar,
+        param='curl -k {}/ 2>&1 | \
+               grep "Gerrit Code Review"'.format(url),
         expr_form='pillar')
     assert len(result[result.keys()[0]]) != 0, \
         'Public Gerrit page is not reachable on {} from ctl nodes'.format(url)
diff --git a/test_set/cvp-sanity/utils/__init__.py b/test_set/cvp-sanity/utils/__init__.py
index aeb4cd8..62ccae7 100644
--- a/test_set/cvp-sanity/utils/__init__.py
+++ b/test_set/cvp-sanity/utils/__init__.py
@@ -3,6 +3,7 @@
 import requests
 import re
 import sys, traceback
+import time
 
 
 class AuthenticationError(Exception):
@@ -10,46 +11,100 @@
 
 
 class salt_remote:
-    def cmd(self, tgt, fun, param=None, expr_form=None, tgt_type=None):
-        config = get_configuration()
-        url = config['SALT_URL'].strip()
-        if not re.match("^(http|https)://", url):
+    def __init__(self):
+        self.config = get_configuration()
+        self.skipped_nodes = self.config.get('skipped_nodes') or []
+        self.url = self.config['SALT_URL'].strip()
+        if not re.match("^(http|https)://", self.url):
             raise AuthenticationError("Salt URL should start \
             with http or https, given - {}".format(url))
-        proxies = {"http": None, "https": None}
-        headers = {'Accept': 'application/json'}
-        login_payload = {'username': config['SALT_USERNAME'],
-                         'password': config['SALT_PASSWORD'], 'eauth': 'pam'}
-        accept_key_payload = {'fun': fun, 'tgt': tgt, 'client': 'local',
-                              'expr_form': expr_form, 'tgt_type': tgt_type,
-                              'timeout': config['salt_timeout']}
-        if param:
-            accept_key_payload['arg'] = param
+        self.login_payload = {'username': self.config['SALT_USERNAME'],
+                              'password': self.config['SALT_PASSWORD'], 'eauth': 'pam'}
+        # TODO: proxies
+        self.proxies = {"http": None, "https": None}
+        self.expires = ''
+        self.cookies = []
+        self.headers = {'Accept': 'application/json'}
+        self._login()
 
+    def _login (self):
         try:
-            login_request = requests.post(os.path.join(url, 'login'),
-                                          headers=headers, data=login_payload,
-                                          proxies=proxies)
+            login_request = requests.post(os.path.join(self.url, 'login'),
+                                          headers={'Accept': 'application/json'},
+                                          data=self.login_payload,
+                                          proxies=self.proxies)
             if not login_request.ok:
                 raise AuthenticationError("Authentication to SaltMaster failed")
-
-            request = requests.post(url, headers=headers,
-                                    data=accept_key_payload,
-                                    cookies=login_request.cookies,
-                                    proxies=proxies)
-
-            response = request.json()['return'][0]
-            return response
-
         except Exception as e:
             print ("\033[91m\nConnection to SaltMaster "
                   "was not established.\n"
                   "Please make sure that you "
                   "provided correct credentials.\n"
-                  "Error message: {}\033[0m\n".format(e.message or e)
-            )
+                  "Error message: {}\033[0m\n".format(e.message or e))
             traceback.print_exc(file=sys.stdout)
             sys.exit()
+        self.expire = login_request.json()['return'][0]['expire']
+        self.cookies = login_request.cookies
+        self.headers['X-Auth-Token'] = login_request.json()['return'][0]['token']
+
+    def cmd(self, tgt, fun='cmd.run', param=None, expr_form=None, tgt_type=None, check_status=False, retries=3):
+        if self.expire < time.time() + 300:
+            self.headers['X-Auth-Token'] = self._login()
+        accept_key_payload = {'fun': fun, 'tgt': tgt, 'client': 'local',
+                              'expr_form': expr_form, 'tgt_type': tgt_type,
+                              'timeout': self.config['salt_timeout']}
+        if param:
+            accept_key_payload['arg'] = param
+
+        for i in range(retries):
+            request = requests.post(self.url, headers=self.headers,
+                                    data=accept_key_payload,
+                                    cookies=self.cookies,
+                                    proxies=self.proxies)
+            if not request.ok or not isinstance(request.json()['return'][0], dict):
+                print("Salt master is not responding or response is incorrect. Output: {}".format(request))
+                continue
+            response = request.json()['return'][0]
+            result = {key: response[key] for key in response if key not in self.skipped_nodes}
+            if check_status:
+                if False in result.values():
+                    print(
+                         "One or several nodes are not responding. Output {}".format(json.dumps(result, indent=4)))
+                    continue
+            break
+        else:
+            raise Exception("Error with Salt Master response")
+        return result
+
+    def test_ping(self, tgt, expr_form='pillar'):
+        return self.cmd(tgt=tgt, fun='test.ping', param=None, expr_form=expr_form)
+
+    def cmd_any(self, tgt, param=None, expr_form='pillar'):
+        """
+        This method returns first non-empty result on node or nodes.
+        If all nodes returns nothing, then exception is thrown.
+        """
+        response = self.cmd(tgt=tgt, param=param, expr_form=expr_form)
+        for node in response.keys():
+            if response[node] or response[node] == '':
+                return response[node]
+        else:
+            raise Exception("All minions are down")
+
+    def pillar_get(self, tgt='salt:master', param=None, expr_form='pillar', fail_if_empty=False):
+        """
+        This method is for fetching pillars only.
+        Returns value for pillar, False (if no such pillar) or if fail_if_empty=True - exception
+        """
+        response = self.cmd(tgt=tgt, fun='pillar.get', param=param, expr_form=expr_form)
+        for node in response.keys():
+            if response[node] or response[node] != '':
+                return response[node]
+        else:
+            if fail_if_empty:
+                raise Exception("No pillar found or it is empty.")
+            else:
+                return False
 
 
 def init_salt_client():
@@ -63,43 +118,14 @@
     return separator.join(node_list)
 
 
-def get_monitoring_ip(param_name):
-    local_salt_client = init_salt_client()
-    salt_output = local_salt_client.cmd(
-        'salt:master',
-        'pillar.get',
-        ['_param:{}'.format(param_name)],
-        expr_form='pillar')
-    return salt_output[salt_output.keys()[0]]
-
-
-def get_active_nodes(test=None):
-    config = get_configuration()
-    local_salt_client = init_salt_client()
-
-    skipped_nodes = config.get('skipped_nodes') or []
-    if test:
-        testname = test.split('.')[0]
-        if 'skipped_nodes' in config.get(testname).keys():
-            skipped_nodes += config.get(testname)['skipped_nodes'] or []
-    if skipped_nodes != ['']:
-        print "\nNotice: {0} nodes will be skipped".format(skipped_nodes)
-        nodes = local_salt_client.cmd(
-            '* and not ' + list_to_target_string(skipped_nodes, 'and not'),
-            'test.ping',
-            expr_form='compound')
-    else:
-        nodes = local_salt_client.cmd('*', 'test.ping')
-    return nodes
-
-
 def calculate_groups():
     config = get_configuration()
     local_salt_client = init_salt_client()
     node_groups = {}
     nodes_names = set ()
     expr_form = ''
-    all_nodes = set(local_salt_client.cmd('*', 'test.ping'))
+    all_nodes = set(local_salt_client.test_ping(tgt='*',expr_form=None))
+    print all_nodes
     if 'groups' in config.keys() and 'PB_GROUPS' in os.environ.keys() and \
        os.environ['PB_GROUPS'].lower() != 'false':
         nodes_names.update(config['groups'].keys())
@@ -113,25 +139,23 @@
                 nodes_names.add(node)
         expr_form = 'pcre'
 
-    gluster_nodes = local_salt_client.cmd('I@salt:control and '
+    gluster_nodes = local_salt_client.test_ping(tgt='I@salt:control and '
                                           'I@glusterfs:server',
-                                          'test.ping', expr_form='compound')
-    kvm_nodes = local_salt_client.cmd('I@salt:control and not '
+                                           expr_form='compound')
+    kvm_nodes = local_salt_client.test_ping(tgt='I@salt:control and not '
                                       'I@glusterfs:server',
-                                      'test.ping', expr_form='compound')
+                                       expr_form='compound')
 
     for node_name in nodes_names:
         skipped_groups = config.get('skipped_groups') or []
         if node_name in skipped_groups:
             continue
         if expr_form == 'pcre':
-            nodes = local_salt_client.cmd('{}[0-9]{{1,3}}'.format(node_name),
-                                          'test.ping',
-                                          expr_form=expr_form)
+            nodes = local_salt_client.test_ping(tgt='{}[0-9]{{1,3}}'.format(node_name),
+                                                 expr_form=expr_form)
         else:
-            nodes = local_salt_client.cmd(config['groups'][node_name],
-                                          'test.ping',
-                                          expr_form=expr_form)
+            nodes = local_salt_client.test_ping(tgt=config['groups'][node_name],
+                                                 expr_form=expr_form)
             if nodes == {}:
                 continue