[CVP] Refactor salt client class
Change-Id: I91cfffe1c8d5df0224657ce9e36be9063b56f0b3
Related-PROD: PROD-28981
Related-PROD: PROD-28729
Related-PROD: PROD-28624
Related-PROD: PROD-29286
diff --git a/test_set/cvp-sanity/tests/test_stacklight.py b/test_set/cvp-sanity/tests/test_stacklight.py
index c91a5f6..703deea 100644
--- a/test_set/cvp-sanity/tests/test_stacklight.py
+++ b/test_set/cvp-sanity/tests/test_stacklight.py
@@ -2,46 +2,42 @@
import requests
import datetime
import pytest
-import utils
@pytest.mark.usefixtures('check_kibana')
def test_elasticsearch_cluster(local_salt_client):
- salt_output = local_salt_client.cmd(
- 'kibana:server',
- 'pillar.get',
- ['_param:haproxy_elasticsearch_bind_host'],
- expr_form='pillar')
+ salt_output = local_salt_client.pillar_get(
+ tgt='kibana:server',
+ param='_param:haproxy_elasticsearch_bind_host')
proxies = {"http": None, "https": None}
- for node in salt_output.keys():
- IP = salt_output[node]
- assert requests.get('http://{}:9200/'.format(IP),
- proxies=proxies).status_code == 200, \
- 'Cannot check elasticsearch url on {}.'.format(IP)
- resp = requests.get('http://{}:9200/_cat/health'.format(IP),
- proxies=proxies).content
- assert resp.split()[3] == 'green', \
- 'elasticsearch status is not good {}'.format(
- json.dumps(resp, indent=4))
- assert resp.split()[4] == '3', \
- 'elasticsearch status is not good {}'.format(
- json.dumps(resp, indent=4))
- assert resp.split()[5] == '3', \
- 'elasticsearch status is not good {}'.format(
- json.dumps(resp, indent=4))
- assert resp.split()[10] == '0', \
- 'elasticsearch status is not good {}'.format(
- json.dumps(resp, indent=4))
- assert resp.split()[13] == '100.0%', \
- 'elasticsearch status is not good {}'.format(
- json.dumps(resp, indent=4))
+ IP = salt_output
+ assert requests.get('http://{}:9200/'.format(IP),
+ proxies=proxies).status_code == 200, \
+ 'Cannot check elasticsearch url on {}.'.format(IP)
+ resp = requests.get('http://{}:9200/_cat/health'.format(IP),
+ proxies=proxies).content
+ assert resp.split()[3] == 'green', \
+ 'elasticsearch status is not good {}'.format(
+ json.dumps(resp, indent=4))
+ assert resp.split()[4] == '3', \
+ 'elasticsearch status is not good {}'.format(
+ json.dumps(resp, indent=4))
+ assert resp.split()[5] == '3', \
+ 'elasticsearch status is not good {}'.format(
+ json.dumps(resp, indent=4))
+ assert resp.split()[10] == '0', \
+ 'elasticsearch status is not good {}'.format(
+ json.dumps(resp, indent=4))
+ assert resp.split()[13] == '100.0%', \
+ 'elasticsearch status is not good {}'.format(
+ json.dumps(resp, indent=4))
@pytest.mark.usefixtures('check_kibana')
def test_kibana_status(local_salt_client):
proxies = {"http": None, "https": None}
- IP = utils.get_monitoring_ip('stacklight_log_address')
+ IP = local_salt_client.pillar_get(param='_param:stacklight_log_address')
resp = requests.get('http://{}:5601/api/status'.format(IP),
proxies=proxies).content
body = json.loads(resp)
@@ -57,14 +53,11 @@
def test_elasticsearch_node_count(local_salt_client):
now = datetime.datetime.now()
today = now.strftime("%Y.%m.%d")
- active_nodes = utils.get_active_nodes()
- salt_output = local_salt_client.cmd(
- 'kibana:server',
- 'pillar.get',
- ['_param:haproxy_elasticsearch_bind_host'],
- expr_form='pillar')
+ salt_output = local_salt_client.pillar_get(
+ tgt='kibana:server',
+ param='_param:haproxy_elasticsearch_bind_host')
- IP = salt_output.values()[0]
+ IP = salt_output
headers = {'Content-type': 'application/json', 'Accept': 'text/plain'}
proxies = {"http": None, "https": None}
data = ('{"size": 0, "aggs": '
@@ -79,31 +72,28 @@
assert 200 == response.status_code, 'Unexpected code {}'.format(
response.text)
resp = json.loads(response.text)
- cluster_domain = local_salt_client.cmd('salt:control',
- 'pillar.get',
- ['_param:cluster_domain'],
- expr_form='pillar').values()[0]
+ cluster_domain = local_salt_client.pillar_get(param='_param:cluster_domain')
monitored_nodes = []
for item_ in resp['aggregations']['uniq_hostname']['buckets']:
node_name = item_['key']
monitored_nodes.append(node_name + '.' + cluster_domain)
missing_nodes = []
- for node in active_nodes.keys():
+ all_nodes = local_salt_client.test_ping(tgt='*').keys()
+ for node in all_nodes:
if node not in monitored_nodes:
missing_nodes.append(node)
assert len(missing_nodes) == 0, \
'Not all nodes are in Elasticsearch. Found {0} keys, ' \
'expected {1}. Missing nodes: \n{2}'. \
- format(len(monitored_nodes), len(active_nodes), missing_nodes)
+ format(len(monitored_nodes), len(all_nodes), missing_nodes)
def test_stacklight_services_replicas(local_salt_client):
# TODO
# change to docker:swarm:role:master ?
salt_output = local_salt_client.cmd(
- 'I@docker:client:stack:monitoring and I@prometheus:server',
- 'cmd.run',
- ['docker service ls'],
+ tgt='I@docker:client:stack:monitoring and I@prometheus:server',
+ param='docker service ls',
expr_form='compound')
if not salt_output:
@@ -122,15 +112,14 @@
@pytest.mark.usefixtures('check_prometheus')
def test_prometheus_alert_count(local_salt_client, ctl_nodes_pillar):
- IP = utils.get_monitoring_ip('cluster_public_host')
+ IP = local_salt_client.pillar_get(param='_param:cluster_public_host')
# keystone:server can return 3 nodes instead of 1
# this will be fixed later
# TODO
nodes_info = local_salt_client.cmd(
- ctl_nodes_pillar,
- 'cmd.run',
- ['curl -s http://{}:15010/alerts | grep icon-chevron-down | '
- 'grep -v "0 active"'.format(IP)],
+ tgt=ctl_nodes_pillar,
+ param='curl -s http://{}:15010/alerts | grep icon-chevron-down | '
+ 'grep -v "0 active"'.format(IP),
expr_form='pillar')
result = nodes_info[nodes_info.keys()[0]].replace('</td>', '').replace(
@@ -141,9 +130,8 @@
def test_stacklight_containers_status(local_salt_client):
salt_output = local_salt_client.cmd(
- 'I@docker:swarm:role:master and I@prometheus:server',
- 'cmd.run',
- ['docker service ps $(docker stack services -q monitoring)'],
+ tgt='I@docker:swarm:role:master and I@prometheus:server',
+ param='docker service ps $(docker stack services -q monitoring)',
expr_form='compound')
if not salt_output:
@@ -172,10 +160,10 @@
def test_running_telegraf_services(local_salt_client):
- salt_output = local_salt_client.cmd('telegraf:agent',
- 'service.status',
- 'telegraf',
- expr_form='pillar')
+ salt_output = local_salt_client.cmd(tgt='telegraf:agent',
+ fun='service.status',
+ param='telegraf',
+ expr_form='pillar',)
if not salt_output:
pytest.skip("Telegraf or telegraf:agent \
@@ -189,9 +177,9 @@
def test_running_fluentd_services(local_salt_client):
- salt_output = local_salt_client.cmd('fluentd:agent',
- 'service.status',
- 'td-agent',
+ salt_output = local_salt_client.cmd(tgt='fluentd:agent',
+ fun='service.status',
+ param='td-agent',
expr_form='pillar')
result = [{node: status} for node, status
in salt_output.items()