New test for Ceph Operations
PROD-35900
Change-Id: I34cdad3f6a9b6c89cfde6792b74e393fad7d49b6
diff --git a/tcp_tests/managers/reclass_manager.py b/tcp_tests/managers/reclass_manager.py
index 137dd33..0813b5b 100644
--- a/tcp_tests/managers/reclass_manager.py
+++ b/tcp_tests/managers/reclass_manager.py
@@ -37,6 +37,11 @@
super(ReclassManager, self).__init__(config=config, underlay=underlay)
def check_existence(self, key):
+ """
+ Returns True if reclass contains that key.
+ :param key: string
+ :return: boolean
+ """
if key in self.ssh.check_call(
"{reclass_tools} get-key {key} /srv/salt/reclass/classes".
format(reclass_tools=self.reclass_tools_cmd,
@@ -56,15 +61,20 @@
May look like cluster/*/cicd/control/leader.yml
:return: None
"""
- self.check_existence(key)
- self.ssh.check_call(
- "{reclass_tools} add-key {key} {value} \
+
+ value = str(value).replace('"', "'")
+ # let's escape $ symbol for bash-like command
+ value = str(value).replace("$", r"\$")
+
+ # value can contain a space symbol. So value should be in quotes
+ cmd = "{reclass_tools} add-key {key} \"{value}\" \
/srv/salt/reclass/classes/{path}".format(
reclass_tools=self.reclass_tools_cmd,
key=key,
value=value,
- path=short_path
- ))
+ path=short_path)
+ LOG.info("Add key to reclass: \n {cmd} ".format(cmd=cmd))
+ self.ssh.check_call(cmd)
def get_key(self, key, file_name):
"""Find a key in a YAML
diff --git a/tcp_tests/settings_oslo.py b/tcp_tests/settings_oslo.py
index fa61884..c37d319 100644
--- a/tcp_tests/settings_oslo.py
+++ b/tcp_tests/settings_oslo.py
@@ -119,6 +119,7 @@
help="Node roles managed by underlay in the environment",
default=[ext.UNDERLAY_NODE_ROLES.salt_master,
ext.UNDERLAY_NODE_ROLES.salt_minion,
+ ext.UNDERLAY_NODE_ROLES.none,
ext.UNDERLAY_NODE_ROLES.k8s_controller]),
ct.Cfg('bootstrap_timeout', ct.Integer(),
help="Timeout of waiting SSH for nodes with specified roles",
@@ -174,6 +175,7 @@
help="Node roles to install salt-minions and manage by salt",
default=[ext.UNDERLAY_NODE_ROLES.salt_master,
ext.UNDERLAY_NODE_ROLES.salt_minion,
+ ext.UNDERLAY_NODE_ROLES.none,
ext.UNDERLAY_NODE_ROLES.k8s_controller]),
]
salt_opts = [
diff --git a/tcp_tests/templates/heat-cicd-queens-dvr-sl/salt.yaml b/tcp_tests/templates/heat-cicd-queens-dvr-sl/salt.yaml
index 3b77cfd..53bd91c 100644
--- a/tcp_tests/templates/heat-cicd-queens-dvr-sl/salt.yaml
+++ b/tcp_tests/templates/heat-cicd-queens-dvr-sl/salt.yaml
@@ -16,6 +16,7 @@
{{SHARED.MACRO_CHECK_SALT_VERSION_ON_NODES()}}
{{SHARED.MACRO_IPFLUSH_TENANTS_IFACES()}}
+{{SHARED.DISABLE_EMPTY_NODE()}}
{{SHARED_TEST_TOOLS.MACRO_INSTALL_RECLASS_TOOLS()}}
{{ SHARED_WORKAROUNDS.MACRO_CEPH_SET_PGNUM() }}
\ No newline at end of file
diff --git a/tcp_tests/templates/shared-salt.yaml b/tcp_tests/templates/shared-salt.yaml
index 259cbf9..247e7fd 100644
--- a/tcp_tests/templates/shared-salt.yaml
+++ b/tcp_tests/templates/shared-salt.yaml
@@ -828,9 +828,36 @@
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 5}
skip_fail: true
+
{%- endmacro %}
+
+{%- macro DISABLE_EMPTY_NODE() %}
+{#######################################}
+{%- for ssh in config.underlay.ssh %}
+ {%- set none_roles = [] %}
+ {%- for role in ssh['roles'] %}
+ {%- if role == "none" %}
+ {%- set _ = none_roles.append(role) %}
+ {%- endif %}
+ {%- endfor %}
+
+ {%- if none_roles %}
+- description: Move non-product node out of Salt Cluster ( {{ ssh['node_name'] }} )
+ cmd: |
+ set -ex;
+ # Use salt-key -a {{ ssh['node_name'] }} --include-all -y command to return node back
+ salt-key -r {{ ssh['node_name'] }} --include-all -y
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: { count: 1, delay: 5 }
+ skip_fail: true
+ {%- endif %}
+{%- endfor %}
+{%- endmacro %}
+
+
+
{%- macro MACRO_INSTALL_FORMULAS(FORMULA_SERVICES='') %}
{#######################################################}
- description: Install salt formulas
diff --git a/tcp_tests/tests/system/test_ceph_operations.py b/tcp_tests/tests/system/test_ceph_operations.py
new file mode 100644
index 0000000..cfff2b5
--- /dev/null
+++ b/tcp_tests/tests/system/test_ceph_operations.py
@@ -0,0 +1,165 @@
+import pytest
+
+from tcp_tests import logger
+
+LOG = logger.logger
+
+
+@pytest.fixture(scope='module')
+def add_xtra_node_to_salt(salt_actions, underlay_actions, config):
+ """
+
+ :return:
+ """
+ LOG.info("Executing pytest SETUP from add_xtra_node_to_salt fixture")
+ xtra_node = [node['node_name'] for node in config.underlay.ssh
+ if 'xtra' in node.get('node_name')][0]
+
+ cfg_node = [node['node_name'] for node in config.underlay.ssh
+ if 'salt_master' in node.get('roles')][0]
+
+ underlay_actions.check_call(
+ "salt-key -a {node} --include-all -y".format(node=xtra_node),
+ node_name=cfg_node,
+ raise_on_err=False)
+ # Need to restart salt-minion service after accepting it in Salt Master
+ underlay_actions.check_call(
+ "systemctl restart salt-minion",
+ node_name=xtra_node,
+ raise_on_err=False)
+ yield
+
+ # LOG.info("Executing pytest TEARDOWN from add_xtra_node_to_salt fixture")
+ # underlay_actions.check_call(
+ # "salt-key -r {node} --include-all -y".format(node=node),
+ # node_name=cfg_node,
+ # raise_on_err=False)
+
+
+@pytest.mark.usefixtures("add_xtra_node_to_salt")
+class TestCephOsd(object):
+
+ @pytest.fixture
+ def describe_node_in_reclass(self,
+ reclass_actions, salt_actions):
+ LOG.info("Executing pytest SETUP "
+ "from describe_node_in_reclass fixture")
+ reclass = reclass_actions
+ # ---- cluster/*/ceph/init.yml ---------------
+ path = "cluster/*/ceph/init.yml"
+ reclass.add_key("parameters._param.ceph_osd_node04_hostname",
+ "xtra",
+ path)
+ reclass.add_key("parameters._param.ceph_osd_node04_address",
+ "10.6.0.205",
+ path)
+ reclass.add_key("parameters._param.ceph_osd_system_codename",
+ "xenial",
+ path)
+ reclass.add_key("parameters.linux.network.host.xtra.address",
+ "${_param:ceph_osd_node04_address}",
+ path)
+ reclass.add_key(
+ key="parameters.linux.network.host.xtra.names",
+ value="['${_param:ceph_osd_node04_hostname}', "
+ "'${_param:ceph_osd_node04_hostname}.${_param:cluster_domain}']",
+ short_path=path)
+
+ # ------- cluster/infra/config/init.yml -----------
+ path = "cluster/*/infra/config/init.yml"
+ parameter = "parameters.reclass.storage.node.ceph_osd_node04"
+ reclass.add_key(parameter + ".name",
+ "${_param:ceph_osd_node04_hostname}",
+ path)
+ reclass.add_key(parameter + ".domain",
+ "${_param:cluster_domain}",
+ path)
+ reclass.add_key(parameter + ".classes",
+ "['cluster.${_param:cluster_name}.ceph.osd']",
+ path)
+ reclass.add_key(parameter + ".params.salt_master_host",
+ "${_param:reclass_config_master}",
+ path)
+ reclass.add_key(parameter + ".params.linux_system_codename",
+ "${_param:ceph_osd_system_codename}",
+ path)
+ reclass.add_key(parameter + ".params.single_address",
+ "${_param:ceph_osd_node04_address}",
+ path)
+ reclass.add_key(parameter + ".params.ceph_crush_parent",
+ "rack02",
+ path)
+
+ def test_add_node_process(self, describe_node_in_reclass,
+ drivetrain_actions):
+ """
+ https://docs.mirantis.com/mcp/q4-18/mcp-operations-guide/openstack-operations/ceph-operations/manage-nodes/add-osd-nodes.html
+ :param describe_node_in_reclass:
+ :param drivetrain_actions:
+ :return:
+ test took about 20 min
+ """
+ dt = drivetrain_actions
+
+ job_name = "ceph-add-osd-upmap"
+ job_parameters = {
+ 'HOST': 'xtra*',
+ 'HOST_TYPE': 'osd'
+ }
+ add_node_pipeline = dt.start_job_on_jenkins(
+ job_name=job_name,
+ job_parameters=job_parameters,
+ verbose=True)
+ assert add_node_pipeline == 'SUCCESS'
+
+ def test_added_node(self):
+ # root@osd001:~# ceph osd tree in
+ # ID CLASS WEIGHT TYPE NAME STATUS REWEIGHT PRI-AFF
+ # -1 0.18585 root default
+ # -3 0.04646 host osd001
+ # 0 hdd 0.01549 osd.0 up 1.00000 1.00000
+ # 1 hdd 0.01549 osd.1 up 1.00000 1.00000
+ # 2 hdd 0.01549 osd.2 up 1.00000 1.00000
+ # -5 0.04646 host osd002
+ # 3 hdd 0.01549 osd.3 up 1.00000 1.00000
+ # 5 hdd 0.01549 osd.5 up 1.00000 1.00000
+ # 6 hdd 0.01549 osd.6 up 1.00000 1.00000
+ # -7 0.04646 host osd003
+ # 4 hdd 0.01549 osd.4 up 1.00000 1.00000
+ # 7 hdd 0.01549 osd.7 up 1.00000 1.00000
+ # 8 hdd 0.01549 osd.8 up 1.00000 1.00000
+ # -9 0.04646 host xtra
+ # 9 hdd 0.01549 osd.9 up 1.00000 1.00000
+ # 10 hdd 0.01549 osd.10 up 1.00000 1.00000
+ # 11 hdd 0.01549 osd.11 up 1.00000 1.00000
+ pass
+
+ def test_delete_node_process(self, drivetrain_actions):
+ dt = drivetrain_actions
+
+ job_name = "ceph-remove-node"
+ job_parameters = {
+ 'HOST': 'xtra*',
+ 'HOST_TYPE': 'osd'
+ }
+ remove_node_pipeline = dt.start_job_on_jenkins(
+ job_name=job_name,
+ job_parameters=job_parameters,
+ verbose=True)
+ assert remove_node_pipeline == 'SUCCESS'
+
+
+class TestCephMon(object):
+ def test_add_node(self):
+ pass
+
+ def test_delete_node(self):
+ pass
+
+
+class TestCephMgr(object):
+ def test_add_node(self):
+ pass
+
+ def test_delete_node(self):
+ pass