Add test for Ceph Mon node
PROD-35900
Change-Id: I7522feb0ffe49b43996b315fac6cfe9d593dbda4
diff --git a/tcp_tests/tests/system/test_ceph_operations.py b/tcp_tests/tests/system/test_ceph_operations.py
index bc5dc0c..02a6888 100644
--- a/tcp_tests/tests/system/test_ceph_operations.py
+++ b/tcp_tests/tests/system/test_ceph_operations.py
@@ -4,12 +4,39 @@
LOG = logger.logger
+xtra_network_interface = """
+parameters:
+ _param:
+ linux_network_interfaces:
+ br_ctl:
+ address: ${_param:single_address}
+ enabled: True
+ name_servers:
+ - ${_param:dns_server01}
+ - ${_param:dns_server02}
+ netmask: ${_param:control_network_netmask}
+ proto: static
+ require_interfaces: ['ens4']
+ type: bridge
+ use_interfaces: ['ens4']
+ ens3:
+ enabled: True
+ name: ens3
+ proto: dhcp
+ type: eth
+ ens4:
+ enabled: True
+ ipflush_onchange: True
+ name: ens4
+ proto: manual
+ type: eth
+"""
+
add_osd_ceph_init_yml = """
parameters:
_param:
ceph_osd_node04_hostname: xtra
ceph_osd_node04_address: 10.6.0.205
- ceph_mon_node04_ceph_public_address: #10.166.49.205
ceph_osd_system_codename: xenial
linux:
network:
@@ -40,7 +67,8 @@
@pytest.fixture(scope='module')
-def add_xtra_node_to_salt(salt_actions, underlay_actions, config):
+def add_xtra_node_to_salt(salt_actions, underlay_actions,
+ config, reclass_actions):
"""
:return:
@@ -61,6 +89,11 @@
"systemctl restart salt-minion",
node_name=xtra_node,
raise_on_err=False)
+ salt_actions.enforce_state("I@salt:master", "reclass")
+
+ reclass_actions.merge_context(yaml_context=xtra_network_interface,
+ short_path="../nodes/_generated/xtra.*.yml")
+
yield
# LOG.info("Executing pytest TEARDOWN from add_xtra_node_to_salt fixture")
@@ -87,6 +120,21 @@
# ------- cluster/infra/config/init.yml -----------
reclass.merge_context(yaml_context=add_osd_config_init_yml,
short_path="cluster/*/infra/config/init.yml")
+ salt_actions.run_state("*", "saltutil.refresh_pillar")
+
+ @pytest.fixture
+ def remove_node_from_reclass(self,
+ reclass_actions):
+ reclass = reclass_actions
+
+ reclass.delete_key(
+ key="parameters.reclass.storage.node.ceph_osd_node04",
+ short_path="cluster/*/infra/config/init.yml"
+ )
+ reclass.delete_key(
+ key="parameters.linux.network.host.xtra",
+ short_path="cluster/*/ceph/init.yml"
+ )
def test_add_node_process(self, describe_node_in_reclass,
drivetrain_actions):
@@ -132,7 +180,9 @@
# 11 hdd 0.01549 osd.11 up 1.00000 1.00000
pass
- def test_delete_node_process(self, drivetrain_actions):
+ def test_delete_node_process(self,
+ remove_node_from_reclass,
+ drivetrain_actions):
dt = drivetrain_actions
job_name = "ceph-remove-node"
@@ -147,12 +197,147 @@
assert job_result == 'SUCCESS', job_description
-class TestCephMon(object):
- def test_add_node(self):
- pass
+add_mon_ceph_init_yml = """
+parameters:
+ _param:
+ ceph_mon_node04_hostname: xtra
+ ceph_mon_node04_address: 10.6.0.205
+ ceph_mon_node04_ceph_public_address: 10.166.49.209
+ ceph_mon_node04_ceph_backup_hour: 4
+ ceph_mon_node04_ceph_backup_minute: 0
+ linux:
+ network:
+ host:
+ xtra:
+ address: ${_param:ceph_mon_node04_address}
+ names:
+ - ${_param:ceph_mon_node04_hostname}
+ - ${_param:ceph_mon_node04_hostname}.${_param:cluster_domain}
+"""
- def test_delete_node(self):
- pass
+add_mon_ceph_common_yml = """
+parameters:
+ ceph:
+ common:
+ members:
+ - name: ${_param:ceph_mon_node04_hostname}
+ host: ${_param:ceph_mon_node04_address}
+"""
+
+add_mon_config_node_yml = """
+parameters:
+ reclass:
+ storage:
+ node:
+ ceph_mon_node04:
+ name: ${_param:ceph_mon_node04_hostname}
+ domain: ${_param:cluster_domain}
+ classes:
+ - cluster.${_param:cluster_name}.ceph.mon
+ params:
+ ceph_public_address: ${_param:ceph_mon_node04_ceph_public_address}
+ ceph_backup_time_hour: ${_param:ceph_mon_node04_ceph_backup_hour}
+ ceph_backup_time_minute: ${_param:ceph_mon_node04_ceph_backup_minute}
+ salt_master_host: ${_param:reclass_config_master}
+ linux_system_codename: ${_param:ceph_mon_system_codename}
+ single_address: ${_param:ceph_mon_node04_address}
+ keepalived_vip_priority: 104
+""" # noqa: E501
+
+add_mon_infra_kvm_yml = """
+parameters:
+ salt:
+ control:
+ size:
+ ceph.mon:
+ cpu: 8
+ ram: 16384
+ disk_profile: small
+ net_profile: default
+ cluster:
+ internal:
+ node:
+ cmn04:
+ name: ${_param:ceph_mon_node04_hostname}
+ provider: ${_param:infra_kvm_node03_hostname}.${_param:cluster_domain}
+ image: ${_param:salt_control_xenial_image}
+ size: ceph.mon
+""" # noqa: E501
+
+
+@pytest.mark.usefixtures("add_xtra_node_to_salt")
+class TestCephMon(object):
+ @pytest.fixture
+ def describe_node_in_reclass(self,
+ reclass_actions, salt_actions):
+ LOG.info("Executing pytest SETUP "
+ "from describe_node_in_reclass fixture")
+ reclass = reclass_actions
+ # ---- cluster/*/ceph/init.yml --------------
+ reclass.merge_context(yaml_context=add_mon_ceph_init_yml,
+ short_path="cluster/*/ceph/init.yml")
+
+ # ------- cluster/infra/config/init.yml -----------
+ reclass.merge_context(yaml_context=add_mon_ceph_common_yml,
+ short_path="cluster/*/ceph/common.yml")
+ reclass.merge_context(yaml_context=add_mon_config_node_yml,
+ short_path="cluster/*/infra/config/nodes.yml")
+
+ # ------- define settings for new mon node in KVM cluster -----------
+ reclass.merge_context(yaml_context=add_mon_infra_kvm_yml,
+ short_path="cluster/*/infra/kvm.yml")
+
+ salt_actions.run_state("*", "saltutil.refresh_pillar")
+
+ @pytest.fixture
+ def remove_node_from_reclass(self,
+ reclass_actions, salt_actions):
+ LOG.info("Executing pytest SETUP "
+ "from remove_node_from_reclass fixture")
+ reclass = reclass_actions
+ reclass.delete_key(
+ key="parameters.reclass.storage.node.ceph_mon_node04",
+ short_path="cluster/*/infra/config/init.yml")
+ reclass.delete_key(
+ key="parameters.salt.control.cluster.internal.node.cmn04",
+ short_path="cluster/*/infra/kvm.yml"
+ )
+ reclass.delete_key(
+ key="parameters.linux.network.host.xtra",
+ short_path="cluster/*/ceph/init.yml"
+ )
+
+ def test_add_node_process(self,
+ drivetrain_actions,
+ describe_node_in_reclass):
+ dt = drivetrain_actions
+
+ job_name = "ceph-add-node"
+ job_parameters = {
+ 'HOST': 'xtra*',
+ 'USE_UPMAP': True
+ }
+ job_result, job_description = dt.start_job_on_jenkins(
+ job_name=job_name,
+ job_parameters=job_parameters,
+ verbose=True)
+ assert job_result == 'SUCCESS', job_description
+
+ def test_delete_node_process(self,
+ remove_node_from_reclass,
+ drivetrain_actions):
+ dt = drivetrain_actions
+
+ job_name = "ceph-remove-node"
+ job_parameters = {
+ 'HOST': 'xtra*',
+ 'USE_UPMAP': True
+ }
+ job_result, job_description = dt.start_job_on_jenkins(
+ job_name=job_name,
+ job_parameters=job_parameters,
+ verbose=True)
+ assert job_result == 'SUCCESS', job_description
class TestCephMgr(object):