New test for Ceph Operations

PROD-35900

Change-Id: I34cdad3f6a9b6c89cfde6792b74e393fad7d49b6
diff --git a/tcp_tests/tests/system/test_ceph_operations.py b/tcp_tests/tests/system/test_ceph_operations.py
new file mode 100644
index 0000000..cfff2b5
--- /dev/null
+++ b/tcp_tests/tests/system/test_ceph_operations.py
@@ -0,0 +1,165 @@
+import pytest
+
+from tcp_tests import logger
+
+LOG = logger.logger
+
+
+@pytest.fixture(scope='module')
+def add_xtra_node_to_salt(salt_actions, underlay_actions, config):
+    """
+
+    :return:
+    """
+    LOG.info("Executing pytest SETUP from add_xtra_node_to_salt fixture")
+    xtra_node = [node['node_name'] for node in config.underlay.ssh
+                 if 'xtra' in node.get('node_name')][0]
+
+    cfg_node = [node['node_name'] for node in config.underlay.ssh
+                if 'salt_master' in node.get('roles')][0]
+
+    underlay_actions.check_call(
+            "salt-key -a {node} --include-all -y".format(node=xtra_node),
+            node_name=cfg_node,
+            raise_on_err=False)
+    # Need to restart salt-minion service after accepting it in Salt Master
+    underlay_actions.check_call(
+        "systemctl restart salt-minion",
+        node_name=xtra_node,
+        raise_on_err=False)
+    yield
+
+    # LOG.info("Executing pytest TEARDOWN from add_xtra_node_to_salt fixture")
+    # underlay_actions.check_call(
+    #         "salt-key -r {node} --include-all -y".format(node=node),
+    #         node_name=cfg_node,
+    #         raise_on_err=False)
+
+
+@pytest.mark.usefixtures("add_xtra_node_to_salt")
+class TestCephOsd(object):
+
+    @pytest.fixture
+    def describe_node_in_reclass(self,
+                                 reclass_actions, salt_actions):
+        LOG.info("Executing pytest SETUP "
+                 "from describe_node_in_reclass fixture")
+        reclass = reclass_actions
+        # ---- cluster/*/ceph/init.yml ---------------
+        path = "cluster/*/ceph/init.yml"
+        reclass.add_key("parameters._param.ceph_osd_node04_hostname",
+                        "xtra",
+                        path)
+        reclass.add_key("parameters._param.ceph_osd_node04_address",
+                        "10.6.0.205",
+                        path)
+        reclass.add_key("parameters._param.ceph_osd_system_codename",
+                        "xenial",
+                        path)
+        reclass.add_key("parameters.linux.network.host.xtra.address",
+                        "${_param:ceph_osd_node04_address}",
+                        path)
+        reclass.add_key(
+            key="parameters.linux.network.host.xtra.names",
+            value="['${_param:ceph_osd_node04_hostname}', "
+            "'${_param:ceph_osd_node04_hostname}.${_param:cluster_domain}']",
+            short_path=path)
+
+        # ------- cluster/infra/config/init.yml -----------
+        path = "cluster/*/infra/config/init.yml"
+        parameter = "parameters.reclass.storage.node.ceph_osd_node04"
+        reclass.add_key(parameter + ".name",
+                        "${_param:ceph_osd_node04_hostname}",
+                        path)
+        reclass.add_key(parameter + ".domain",
+                        "${_param:cluster_domain}",
+                        path)
+        reclass.add_key(parameter + ".classes",
+                        "['cluster.${_param:cluster_name}.ceph.osd']",
+                        path)
+        reclass.add_key(parameter + ".params.salt_master_host",
+                        "${_param:reclass_config_master}",
+                        path)
+        reclass.add_key(parameter + ".params.linux_system_codename",
+                        "${_param:ceph_osd_system_codename}",
+                        path)
+        reclass.add_key(parameter + ".params.single_address",
+                        "${_param:ceph_osd_node04_address}",
+                        path)
+        reclass.add_key(parameter + ".params.ceph_crush_parent",
+                        "rack02",
+                        path)
+
+    def test_add_node_process(self, describe_node_in_reclass,
+                              drivetrain_actions):
+        """
+        https://docs.mirantis.com/mcp/q4-18/mcp-operations-guide/openstack-operations/ceph-operations/manage-nodes/add-osd-nodes.html
+        :param describe_node_in_reclass:
+        :param drivetrain_actions:
+        :return:
+        test took about 20 min
+        """
+        dt = drivetrain_actions
+
+        job_name = "ceph-add-osd-upmap"
+        job_parameters = {
+            'HOST': 'xtra*',
+            'HOST_TYPE': 'osd'
+            }
+        add_node_pipeline = dt.start_job_on_jenkins(
+            job_name=job_name,
+            job_parameters=job_parameters,
+            verbose=True)
+        assert add_node_pipeline == 'SUCCESS'
+
+    def test_added_node(self):
+        # root@osd001:~# ceph osd tree in
+        # ID CLASS WEIGHT  TYPE NAME       STATUS REWEIGHT PRI-AFF
+        # -1       0.18585 root default
+        # -3       0.04646     host osd001
+        #  0   hdd 0.01549         osd.0       up  1.00000 1.00000
+        #  1   hdd 0.01549         osd.1       up  1.00000 1.00000
+        #  2   hdd 0.01549         osd.2       up  1.00000 1.00000
+        # -5       0.04646     host osd002
+        #  3   hdd 0.01549         osd.3       up  1.00000 1.00000
+        #  5   hdd 0.01549         osd.5       up  1.00000 1.00000
+        #  6   hdd 0.01549         osd.6       up  1.00000 1.00000
+        # -7       0.04646     host osd003
+        #  4   hdd 0.01549         osd.4       up  1.00000 1.00000
+        #  7   hdd 0.01549         osd.7       up  1.00000 1.00000
+        #  8   hdd 0.01549         osd.8       up  1.00000 1.00000
+        # -9       0.04646     host xtra
+        #  9   hdd 0.01549         osd.9       up  1.00000 1.00000
+        # 10   hdd 0.01549         osd.10      up  1.00000 1.00000
+        # 11   hdd 0.01549         osd.11      up  1.00000 1.00000
+        pass
+
+    def test_delete_node_process(self, drivetrain_actions):
+        dt = drivetrain_actions
+
+        job_name = "ceph-remove-node"
+        job_parameters = {
+            'HOST': 'xtra*',
+            'HOST_TYPE': 'osd'
+            }
+        remove_node_pipeline = dt.start_job_on_jenkins(
+            job_name=job_name,
+            job_parameters=job_parameters,
+            verbose=True)
+        assert remove_node_pipeline == 'SUCCESS'
+
+
+class TestCephMon(object):
+    def test_add_node(self):
+        pass
+
+    def test_delete_node(self):
+        pass
+
+
+class TestCephMgr(object):
+    def test_add_node(self):
+        pass
+
+    def test_delete_node(self):
+        pass