Port all changes from github

Change-Id: Ie78388290ad2780074327c26508cdce73805f0da
diff --git a/cvp_checks/tests/ceph/test_ceph.py b/cvp_checks/tests/ceph/test_ceph_osd.py
similarity index 100%
rename from cvp_checks/tests/ceph/test_ceph.py
rename to cvp_checks/tests/ceph/test_ceph_osd.py
diff --git a/cvp_checks/tests/ceph/test_ceph_pg_count.py b/cvp_checks/tests/ceph/test_ceph_pg_count.py
new file mode 100644
index 0000000..46e50c4
--- /dev/null
+++ b/cvp_checks/tests/ceph/test_ceph_pg_count.py
@@ -0,0 +1,94 @@
+import pytest
+import math
+
+def __next_power_of2(total_pg):
+	count = 0
+	if (total_pg and not(total_pg & (total_pg - 1))):
+		return total_pg	
+	while( total_pg != 0):
+		total_pg >>= 1
+		count += 1
+	
+	return 1 << count
+
+
+def test_ceph_pg_count(local_salt_client):
+    """
+    Test aimed to calculate placement groups for Ceph cluster
+    according formula below.
+    Formula to calculate PG num:
+    Total PGs = 
+    (Total_number_of_OSD * 100) / max_replication_count / pool count
+    pg_num and pgp_num should be the same and 
+    set according formula to higher value of powered 2
+    """
+    
+    ceph_monitors = local_salt_client.cmd(
+        'ceph:mon', 
+        'test.ping', 
+        expr_form='pillar')
+    
+    if not ceph_monitors:
+        pytest.skip("Ceph is not found on this environment")
+
+    monitor = ceph_monitors.keys()[0]
+    pools = local_salt_client.cmd(
+        monitor, 'cmd.run', 
+        ["rados lspools"], 
+        expr_form='glob').get(
+            ceph_monitors.keys()[0]).split('\n')
+    
+    total_osds = int(local_salt_client.cmd(
+        monitor, 
+        'cmd.run', 
+        ['ceph osd tree | grep osd | grep "up\|down" | wc -l'], 
+        expr_form='glob').get(ceph_monitors.keys()[0]))
+    
+    raw_pool_replications = local_salt_client.cmd(
+        monitor, 
+        'cmd.run', 
+        ["ceph osd dump | grep size | awk '{print $3, $6}'"], 
+        expr_form='glob').get(ceph_monitors.keys()[0]).split('\n')
+    
+    pool_replications = {}
+    for replication in raw_pool_replications:
+        pool_replications[replication.split()[0]] = int(replication.split()[1])
+    
+    max_replication_value = 0
+    for repl_value in pool_replications.values():
+        if repl_value > max_replication_value:
+            max_replication_value = repl_value
+
+    total_pg = (total_osds * 100) / max_replication_value / len(pools)
+    correct_pg_num = __next_power_of2(total_pg)
+    
+    pools_pg_num = {}
+    pools_pgp_num = {}
+    for pool in pools:
+        pg_num = int(local_salt_client.cmd(
+            monitor, 
+            'cmd.run', 
+            ["ceph osd pool get {} pg_num".format(pool)], 
+            expr_form='glob').get(ceph_monitors.keys()[0]).split()[1])
+        pools_pg_num[pool] = pg_num
+        pgp_num = int(local_salt_client.cmd(
+            monitor, 
+            'cmd.run', 
+            ["ceph osd pool get {} pgp_num".format(pool)], 
+            expr_form='glob').get(ceph_monitors.keys()[0]).split()[1])
+        pools_pgp_num[pool] = pgp_num
+
+    wrong_pg_num_pools = [] 
+    pg_pgp_not_equal_pools = []
+    for pool in pools:
+        if pools_pg_num[pool] != pools_pgp_num[pool]:
+            pg_pgp_not_equal_pools.append(pool)
+        if pools_pg_num[pool] < correct_pg_num:
+            wrong_pg_num_pools.append(pool)
+
+    assert not pg_pgp_not_equal_pools, \
+    "For pools {} PG and PGP are not equal " \
+    "but should be".format(pg_pgp_not_equal_pools)
+    assert not wrong_pg_num_pools, "For pools {} " \
+    "PG number lower than Correct PG number, " \
+    "but should be equal or higher".format(wrong_pg_num_pools)
diff --git a/cvp_checks/tests/ceph/test_ceph_replicas.py b/cvp_checks/tests/ceph/test_ceph_replicas.py
new file mode 100644
index 0000000..62af49d
--- /dev/null
+++ b/cvp_checks/tests/ceph/test_ceph_replicas.py
@@ -0,0 +1,49 @@
+import pytest
+
+
+def test_ceph_replicas(local_salt_client):
+    """
+    Test aimed to check number of replicas
+    for most of deployments if there is no
+    special requirement for that.
+    """
+
+    ceph_monitors = local_salt_client.cmd(
+        'ceph:mon', 
+        'test.ping', 
+        expr_form='pillar')
+
+    if not ceph_monitors:
+        pytest.skip("Ceph is not found on this environment")
+
+    monitor = ceph_monitors.keys()[0]
+
+    raw_pool_replicas = local_salt_client.cmd(
+        monitor, 
+        'cmd.run', 
+        ["ceph osd dump | grep size | " \
+        "awk '{print $3, $5, $6, $7, $8}'"], 
+        expr_form='glob').get(
+        ceph_monitors.keys()[0]).split('\n')
+
+    pools_replicas = {}
+    for pool in raw_pool_replicas:
+        pool_name = pool.split(" ", 1)[0]
+        pool_replicas = {}
+        raw_replicas = pool.split(" ", 1)[1].split()
+        for elem in raw_replicas:
+            pool_replicas[raw_replicas[0]] = int(raw_replicas[1])
+            pool_replicas[raw_replicas[2]] = int(raw_replicas[3])
+        pools_replicas[pool_name] = pool_replicas
+    
+    error = []
+    for pool, replicas in pools_replicas.items():
+        for replica, value in replicas.items():
+            if replica == 'min_size' and value < 2:
+                error.append(pool + " " + replica + " " 
+                + str(value) + " must be 2")
+            if replica == 'size' and value < 3:
+                error.append(pool + " " + replica + " " 
+                + str(value) + " must be 3")
+    
+    assert not error, "Wrong pool replicas found\n{}".format(error)
diff --git a/cvp_checks/tests/ceph/test_ceph_tell_bench.py b/cvp_checks/tests/ceph/test_ceph_tell_bench.py
new file mode 100644
index 0000000..db45435
--- /dev/null
+++ b/cvp_checks/tests/ceph/test_ceph_tell_bench.py
@@ -0,0 +1,55 @@
+import pytest
+import json
+import math
+
+
+def test_ceph_tell_bench(local_salt_client):
+    """
+    Test checks that each OSD MB per second speed 
+    is not lower than 10 MB comparing with AVG. 
+    Bench command by default writes 1Gb on each OSD 
+    with the default values of 4M 
+    and gives the "bytes_per_sec" speed for each OSD.
+
+    """
+    ceph_monitors = local_salt_client.cmd(
+        'ceph:mon', 
+        'test.ping', 
+        expr_form='pillar')
+
+    if not ceph_monitors:
+        pytest.skip("Ceph is not found on this environment")
+
+    cmd_result = local_salt_client.cmd(
+        ceph_monitors.keys()[0], 
+        'cmd.run', ["ceph tell osd.* bench -f json"], 
+        expr_form='glob').get(
+            ceph_monitors.keys()[0]).split('\n')
+
+    cmd_result = filter(None, cmd_result)
+
+    osd_pool = {}
+    for osd in cmd_result:
+        osd_ = osd.split(" ")
+        osd_pool[osd_[0]] = osd_[1]
+
+    mbps_sum = 0
+    osd_count = 0
+    for osd in osd_pool:
+        osd_count += 1
+        mbps_sum += json.loads(
+            osd_pool[osd])['bytes_per_sec'] / 1000000
+
+    mbps_avg = mbps_sum / osd_count
+    result = {}
+    for osd in osd_pool:
+        mbps = json.loads(
+            osd_pool[osd])['bytes_per_sec'] / 1000000
+        if math.fabs(mbps_avg - mbps) > 10:
+            result[osd] = osd_pool[osd]
+
+    assert len(result) == 0, \
+    "Performance of {0} OSD(s) lower " \
+    "than AVG performance ({1} mbps), " \
+    "please check Ceph for possible problems".format(
+        json.dumps(result, indent=4), mbps_avg)