| import pytest |
| |
| from tcp_tests import logger |
| |
| LOG = logger.logger |
| |
| add_osd_ceph_init_yml = """ |
| parameters: |
| _param: |
| ceph_osd_node04_hostname: xtra |
| ceph_osd_node04_address: 10.6.0.205 |
| ceph_mon_node04_ceph_public_address: #10.166.49.205 |
| ceph_osd_system_codename: xenial |
| linux: |
| network: |
| host: |
| xtra: |
| address: ${_param:ceph_osd_node04_address} |
| names: |
| - ${_param:ceph_osd_node04_hostname} |
| - ${_param:ceph_osd_node04_hostname}.${_param:cluster_domain} |
| """ |
| |
| add_osd_config_init_yml = """ |
| parameters: |
| reclass: |
| storage: |
| node: |
| ceph_osd_node04: |
| name: ${_param:ceph_osd_node04_hostname} |
| domain: ${_param:cluster_domain} |
| classes: |
| - cluster.${_param:cluster_name}.ceph.osd |
| params: |
| salt_master_host: ${_param:reclass_config_master} |
| linux_system_codename: ${_param:ceph_osd_system_codename} |
| single_address: ${_param:ceph_osd_node04_address} |
| ceph_crush_parent: rack02 |
| """ |
| |
| |
| @pytest.fixture(scope='module') |
| def add_xtra_node_to_salt(salt_actions, underlay_actions, config): |
| """ |
| |
| :return: |
| """ |
| LOG.info("Executing pytest SETUP from add_xtra_node_to_salt fixture") |
| xtra_node = [node['node_name'] for node in config.underlay.ssh |
| if 'xtra' in node.get('node_name')][0] |
| |
| cfg_node = [node['node_name'] for node in config.underlay.ssh |
| if 'salt_master' in node.get('roles')][0] |
| |
| underlay_actions.check_call( |
| "salt-key -a {node} --include-all -y".format(node=xtra_node), |
| node_name=cfg_node, |
| raise_on_err=False) |
| # Need to restart salt-minion service after accepting it in Salt Master |
| underlay_actions.check_call( |
| "systemctl restart salt-minion", |
| node_name=xtra_node, |
| raise_on_err=False) |
| yield |
| |
| # LOG.info("Executing pytest TEARDOWN from add_xtra_node_to_salt fixture") |
| # underlay_actions.check_call( |
| # "salt-key -r {node} --include-all -y".format(node=node), |
| # node_name=cfg_node, |
| # raise_on_err=False) |
| |
| |
| @pytest.mark.usefixtures("add_xtra_node_to_salt") |
| class TestCephOsd(object): |
| |
| @pytest.fixture |
| def describe_node_in_reclass(self, |
| reclass_actions, |
| salt_actions): |
| LOG.info("Executing pytest SETUP " |
| "from describe_node_in_reclass fixture") |
| reclass = reclass_actions |
| # ---- cluster/*/ceph/init.yml --------------- |
| reclass.merge_context(yaml_context=add_osd_ceph_init_yml, |
| short_path="cluster/*/ceph/init.yml") |
| |
| # ------- cluster/infra/config/init.yml ----------- |
| reclass.merge_context(yaml_context=add_osd_config_init_yml, |
| short_path="cluster/*/infra/config/init.yml") |
| |
| def test_add_node_process(self, describe_node_in_reclass, |
| drivetrain_actions): |
| """ |
| https://docs.mirantis.com/mcp/q4-18/mcp-operations-guide/openstack-operations/ceph-operations/manage-nodes/add-osd-nodes.html |
| :param describe_node_in_reclass: |
| :param drivetrain_actions: |
| :return: |
| test took about 20 min |
| """ |
| dt = drivetrain_actions |
| # Workaround for PROD-36132 |
| job_name = "ceph-add-node" |
| job_parameters = { |
| 'HOST': 'xtra*', |
| 'OSD_ONLY': False |
| } |
| job_result, job_description = dt.start_job_on_jenkins( |
| job_name=job_name, |
| job_parameters=job_parameters, |
| verbose=True) |
| assert job_result == 'SUCCESS', job_description |
| |
| def test_added_node(self): |
| # root@osd001:~# ceph osd tree in |
| # ID CLASS WEIGHT TYPE NAME STATUS REWEIGHT PRI-AFF |
| # -1 0.18585 root default |
| # -3 0.04646 host osd001 |
| # 0 hdd 0.01549 osd.0 up 1.00000 1.00000 |
| # 1 hdd 0.01549 osd.1 up 1.00000 1.00000 |
| # 2 hdd 0.01549 osd.2 up 1.00000 1.00000 |
| # -5 0.04646 host osd002 |
| # 3 hdd 0.01549 osd.3 up 1.00000 1.00000 |
| # 5 hdd 0.01549 osd.5 up 1.00000 1.00000 |
| # 6 hdd 0.01549 osd.6 up 1.00000 1.00000 |
| # -7 0.04646 host osd003 |
| # 4 hdd 0.01549 osd.4 up 1.00000 1.00000 |
| # 7 hdd 0.01549 osd.7 up 1.00000 1.00000 |
| # 8 hdd 0.01549 osd.8 up 1.00000 1.00000 |
| # -9 0.04646 host xtra |
| # 9 hdd 0.01549 osd.9 up 1.00000 1.00000 |
| # 10 hdd 0.01549 osd.10 up 1.00000 1.00000 |
| # 11 hdd 0.01549 osd.11 up 1.00000 1.00000 |
| pass |
| |
| def test_delete_node_process(self, drivetrain_actions): |
| dt = drivetrain_actions |
| |
| job_name = "ceph-remove-node" |
| job_parameters = { |
| 'HOST': 'xtra*', |
| 'OSD': '*' |
| } |
| job_result, job_description = dt.start_job_on_jenkins( |
| job_name=job_name, |
| job_parameters=job_parameters, |
| verbose=True) |
| assert job_result == 'SUCCESS', job_description |
| |
| |
| class TestCephMon(object): |
| def test_add_node(self): |
| pass |
| |
| def test_delete_node(self): |
| pass |
| |
| |
| class TestCephMgr(object): |
| def test_add_node(self): |
| pass |
| |
| def test_delete_node(self): |
| pass |