Fixing ceph tests, part 1
diff --git a/cvp_checks/tests/ceph/config.yaml b/cvp_checks/tests/ceph/config.yaml
deleted file mode 100644
index bfc8e70..0000000
--- a/cvp_checks/tests/ceph/config.yaml
+++ /dev/null
@@ -1,3 +0,0 @@
----
-ceph_monitors: ['ceph-003*', 'ceph-004*', 'ceph-005*']
-ceph_osd_probe_node: ['ceph-001*']
diff --git a/cvp_checks/tests/ceph/test_ceph.py b/cvp_checks/tests/ceph/test_ceph.py
index 05a61c2..c14778d 100644
--- a/cvp_checks/tests/ceph/test_ceph.py
+++ b/cvp_checks/tests/ceph/test_ceph.py
@@ -1,11 +1,14 @@
-from cvp_checks import utils
+import pytest
def test_check_ceph_osd(local_salt_client):
- config = utils.get_configuration(__file__)
- osd_fail = \
- local_salt_client.cmd(config["ceph_osd_probe_node"][0], 'cmd.run',
- ['ceph osd tree | grep down'])
+ osd_fail = local_salt_client.cmd(
+ 'ceph:osd',
+ 'cmd.run',
+ ['ceph osd tree | grep up'],
+ expr_form='pillar')
+ if not osd_fail:
+ pytest.skip("Ceph is not found on this environment")
assert not osd_fail.values()[0], \
"Some osds are in down state or ceph is not found".format(
osd_fail.values()[0])
diff --git a/cvp_checks/tests/ceph/test_ceph_haproxy.py b/cvp_checks/tests/ceph/test_ceph_haproxy.py
index 2f2e1e0..7bff15b 100644
--- a/cvp_checks/tests/ceph/test_ceph_haproxy.py
+++ b/cvp_checks/tests/ceph/test_ceph_haproxy.py
@@ -1,21 +1,23 @@
-from cvp_checks import utils
+import pytest
def test_ceph_haproxy(local_salt_client):
- config = utils.get_configuration(__file__)
-
+ pytest.skip("This test doesn't work. Skipped")
fail = {}
- for monitor in config["ceph_monitors"]:
- monitor_info = local_salt_client.cmd(monitor, 'cmd.run',
- ["echo 'show stat' | nc -U "
- "/var/run/haproxy/admin.sock | "
- "grep ceph_mon_radosgw_cluster"])
+ monitor_info = local_salt_client.cmd(
+ 'ceph:mon',
+ 'cmd.run',
+ ["echo 'show stat' | nc -U "
+ "/var/run/haproxy/admin.sock | "
+ "grep ceph_mon_radosgw_cluster"],
+ expr_form='pillar')
+ if not monitor_info:
+ pytest.skip("Ceph is not found on this environment")
- for name, info in monitor_info.iteritems():
- if "OPEN" and "UP" in info:
- continue
- else:
- fail[name] = info
-
+ for name, info in monitor_info.iteritems():
+ if "OPEN" and "UP" in info:
+ continue
+ else:
+ fail[name] = info
assert not fail, "Failed monitors: {}".format(fail)
diff --git a/cvp_checks/utils/__init__.py b/cvp_checks/utils/__init__.py
index 9762e8e..e58ed40 100644
--- a/cvp_checks/utils/__init__.py
+++ b/cvp_checks/utils/__init__.py
@@ -5,14 +5,14 @@
class salt_remote:
- def cmd(self, tgt, fun, param=None,expr_form=None):
+ def cmd(self, tgt, fun, param=None,expr_form=None,tgt_type=None):
config = get_configuration(__file__)
for salt_cred in ['SALT_USERNAME', 'SALT_PASSWORD', 'SALT_URL']:
if os.environ.get(salt_cred):
config[salt_cred] = os.environ[salt_cred]
headers = {'Accept':'application/json'}
login_payload = {'username':config['SALT_USERNAME'],'password':config['SALT_PASSWORD'],'eauth':'pam'}
- accept_key_payload = {'fun': fun,'tgt':tgt,'client':'local','expr_form':expr_form}
+ accept_key_payload = {'fun': fun,'tgt':tgt,'client':'local','expr_form':expr_form,'tgt_type':tgt_type}
if param:
accept_key_payload['arg']=param
@@ -74,29 +74,6 @@
groups = test_groups if test_groups else groups
- # For splitting Ceph nodes
- local_salt_client = init_salt_client()
-
- if "ceph*" in groups:
- groups.remove("ceph*")
-
- ceph_status = local_salt_client.cmd(
- 'ceph*', "cmd.run", ["ps aux | grep ceph-mon | grep -v grep"])
-
- mon = []
- ceph = []
- for node in ceph_status:
- if ceph_status[node] != '':
- mon.append(node.split('.')[0])
- else:
- ceph.append(node.split('.')[0])
-
- mon_regex = "({0}.*)".format(".*|".join(mon))
- groups.append(mon_regex)
-
- ceph_regex = "({0}.*)".format(".*|".join(ceph))
- groups.append(ceph_regex)
-
return groups