| import pytest |
| |
| from tcp_tests import logger |
| |
| LOG = logger.logger |
| |
| xtra_network_interface = """ |
| parameters: |
| _param: |
| linux_network_interfaces: |
| br_ctl: |
| address: ${_param:single_address} |
| enabled: True |
| name_servers: |
| - ${_param:dns_server01} |
| - ${_param:dns_server02} |
| netmask: ${_param:control_network_netmask} |
| proto: static |
| require_interfaces: ['ens4'] |
| type: bridge |
| use_interfaces: ['ens4'] |
| ens3: |
| enabled: True |
| name: ens3 |
| proto: dhcp |
| type: eth |
| ens4: |
| enabled: True |
| ipflush_onchange: True |
| name: ens4 |
| proto: manual |
| type: eth |
| """ |
| |
| add_osd_ceph_init_yml = """ |
| parameters: |
| _param: |
| ceph_osd_node04_hostname: xtra |
| ceph_osd_node04_address: 10.6.0.205 |
| ceph_osd_system_codename: xenial |
| linux: |
| network: |
| host: |
| xtra: |
| address: ${_param:ceph_osd_node04_address} |
| names: |
| - ${_param:ceph_osd_node04_hostname} |
| - ${_param:ceph_osd_node04_hostname}.${_param:cluster_domain} |
| """ |
| |
| add_osd_config_init_yml = """ |
| parameters: |
| reclass: |
| storage: |
| node: |
| ceph_osd_node04: |
| name: ${_param:ceph_osd_node04_hostname} |
| domain: ${_param:cluster_domain} |
| classes: |
| - cluster.${_param:cluster_name}.ceph.osd |
| params: |
| salt_master_host: ${_param:reclass_config_master} |
| linux_system_codename: ${_param:ceph_osd_system_codename} |
| single_address: ${_param:ceph_osd_node04_address} |
| ceph_crush_parent: rack02 |
| """ |
| |
| |
| @pytest.fixture(scope='session') |
| def add_xtra_node_to_salt(salt_actions, underlay_actions, |
| config, reclass_actions): |
| """ |
| |
| :return: |
| """ |
| LOG.info("Executing pytest SETUP from add_xtra_node_to_salt fixture") |
| xtra_node = [node['node_name'] for node in config.underlay.ssh |
| if 'xtra' in node.get('node_name')][0] |
| |
| cfg_node = [node['node_name'] for node in config.underlay.ssh |
| if 'salt_master' in node.get('roles')][0] |
| |
| salt_actions.enforce_state("*", "reclass") |
| reclass_actions.add_class( |
| "environment.heat-cicd-queens-dvr-sl.linux_network_interface", |
| short_path="../nodes/_generated/xtra.*.yml") |
| reclass_actions.add_class("environment.heat-cicd-queens-dvr-sl.overrides", |
| short_path="../nodes/_generated/xtra.*.yml") |
| reclass_actions.merge_context(yaml_context=xtra_network_interface, |
| short_path="../nodes/_generated/xtra.*.yml") |
| |
| underlay_actions.check_call( |
| "salt-key -a {node} --include-all -y".format(node=xtra_node), |
| node_name=cfg_node, |
| raise_on_err=False) |
| # Need to restart salt-minion service after accepting it in Salt Master |
| underlay_actions.check_call( |
| "systemctl restart salt-minion", |
| node_name=xtra_node, |
| raise_on_err=False) |
| salt_actions.enforce_state("I@salt:master", "reclass") |
| salt_actions.enforce_state("xtra*", "linux") |
| salt_actions.enforce_state("xtra*", "openssh") |
| |
| yield |
| |
| # LOG.info("Executing pytest TEARDOWN from add_xtra_node_to_salt fixture") |
| # underlay_actions.check_call( |
| # "salt-key -r {node} --include-all -y".format(node=node), |
| # node_name=cfg_node, |
| # raise_on_err=False) |
| |
| |
| @pytest.fixture(scope='session') |
| def wa_prod36167(reclass_actions): |
| reclass_actions.delete_class("system.salt.control.virt", |
| "cluster/*/infra/kvm.yml") |
| |
| |
| @pytest.mark.usefixtures("add_xtra_node_to_salt", |
| "wa_prod36167") |
| class TestCephOsd(object): |
| |
| @pytest.fixture |
| def describe_node_in_reclass(self, |
| reclass_actions, |
| salt_actions): |
| LOG.info("Executing pytest SETUP " |
| "from describe_node_in_reclass fixture") |
| reclass = reclass_actions |
| # ---- cluster/*/ceph/init.yml --------------- |
| reclass.merge_context(yaml_context=add_osd_ceph_init_yml, |
| short_path="cluster/*/ceph/init.yml") |
| |
| # ------- cluster/infra/config/init.yml ----------- |
| reclass.merge_context(yaml_context=add_osd_config_init_yml, |
| short_path="cluster/*/infra/config/init.yml") |
| salt_actions.run_state("*", "saltutil.refresh_pillar") |
| |
| @pytest.fixture |
| def remove_node_from_reclass(self, |
| reclass_actions): |
| reclass = reclass_actions |
| |
| reclass.delete_key( |
| key="parameters.reclass.storage.node.ceph_osd_node04", |
| short_path="cluster/*/infra/config/init.yml" |
| ) |
| reclass.delete_key( |
| key="parameters.linux.network.host.xtra", |
| short_path="cluster/*/ceph/init.yml" |
| ) |
| |
| def test_add_node_process(self, describe_node_in_reclass, |
| drivetrain_actions): |
| """ |
| https://docs.mirantis.com/mcp/q4-18/mcp-operations-guide/openstack-operations/ceph-operations/manage-nodes/add-osd-nodes.html |
| :param describe_node_in_reclass: |
| :param drivetrain_actions: |
| :return: |
| test took about 20 min |
| """ |
| dt = drivetrain_actions |
| # Workaround for PROD-36132 |
| job_name = "ceph-add-node" |
| job_parameters = { |
| 'HOST': 'xtra*', |
| 'OSD_ONLY': False |
| } |
| job_result, job_description = dt.start_job_on_jenkins( |
| job_name=job_name, |
| job_parameters=job_parameters, |
| verbose=True) |
| assert job_result == 'SUCCESS', job_description |
| |
| def test_added_node(self): |
| # root@osd001:~# ceph osd tree in |
| # ID CLASS WEIGHT TYPE NAME STATUS REWEIGHT PRI-AFF |
| # -1 0.18585 root default |
| # -3 0.04646 host osd001 |
| # 0 hdd 0.01549 osd.0 up 1.00000 1.00000 |
| # 1 hdd 0.01549 osd.1 up 1.00000 1.00000 |
| # 2 hdd 0.01549 osd.2 up 1.00000 1.00000 |
| # -5 0.04646 host osd002 |
| # 3 hdd 0.01549 osd.3 up 1.00000 1.00000 |
| # 5 hdd 0.01549 osd.5 up 1.00000 1.00000 |
| # 6 hdd 0.01549 osd.6 up 1.00000 1.00000 |
| # -7 0.04646 host osd003 |
| # 4 hdd 0.01549 osd.4 up 1.00000 1.00000 |
| # 7 hdd 0.01549 osd.7 up 1.00000 1.00000 |
| # 8 hdd 0.01549 osd.8 up 1.00000 1.00000 |
| # -9 0.04646 host xtra |
| # 9 hdd 0.01549 osd.9 up 1.00000 1.00000 |
| # 10 hdd 0.01549 osd.10 up 1.00000 1.00000 |
| # 11 hdd 0.01549 osd.11 up 1.00000 1.00000 |
| pass |
| |
| def test_delete_node_process(self, |
| remove_node_from_reclass, |
| drivetrain_actions): |
| dt = drivetrain_actions |
| |
| job_name = "ceph-remove-node" |
| job_parameters = { |
| 'HOST': 'xtra*', |
| 'OSD': '*' |
| } |
| job_result, job_description = dt.start_job_on_jenkins( |
| job_name=job_name, |
| job_parameters=job_parameters, |
| verbose=True) |
| assert job_result == 'SUCCESS', job_description |
| |
| |
| add_mon_ceph_init_yml = """ |
| parameters: |
| _param: |
| ceph_mon_node04_hostname: xtra |
| ceph_mon_node04_address: 10.6.0.205 |
| ceph_mon_node04_ceph_public_address: 10.166.49.209 |
| ceph_mon_node04_ceph_backup_hour: 4 |
| ceph_mon_node04_ceph_backup_minute: 0 |
| linux: |
| network: |
| host: |
| xtra: |
| address: ${_param:ceph_mon_node04_address} |
| names: |
| - ${_param:ceph_mon_node04_hostname} |
| - ${_param:ceph_mon_node04_hostname}.${_param:cluster_domain} |
| """ |
| |
| add_mon_ceph_common_yml = """ |
| parameters: |
| ceph: |
| common: |
| members: |
| - name: ${_param:ceph_mon_node04_hostname} |
| host: ${_param:ceph_mon_node04_address} |
| """ |
| |
| add_mon_config_node_yml = """ |
| parameters: |
| reclass: |
| storage: |
| node: |
| ceph_mon_node04: |
| name: ${_param:ceph_mon_node04_hostname} |
| domain: ${_param:cluster_domain} |
| classes: |
| - cluster.${_param:cluster_name}.ceph.mon |
| params: |
| ceph_public_address: ${_param:ceph_mon_node04_ceph_public_address} |
| ceph_backup_time_hour: ${_param:ceph_mon_node04_ceph_backup_hour} |
| ceph_backup_time_minute: ${_param:ceph_mon_node04_ceph_backup_minute} |
| salt_master_host: ${_param:reclass_config_master} |
| linux_system_codename: ${_param:ceph_mon_system_codename} |
| single_address: ${_param:ceph_mon_node04_address} |
| keepalived_vip_priority: 104 |
| """ # noqa: E501 |
| |
| add_mon_infra_kvm_yml = """ |
| parameters: |
| salt: |
| control: |
| size: |
| ceph.mon: |
| cpu: 8 |
| ram: 16384 |
| disk_profile: small |
| net_profile: default |
| cluster: |
| internal: |
| node: |
| cmn04: |
| name: ${_param:ceph_mon_node04_hostname} |
| provider: ${_param:infra_kvm_node03_hostname}.${_param:cluster_domain} |
| image: ${_param:salt_control_xenial_image} |
| size: ceph.mon |
| """ # noqa: E501 |
| |
| |
| @pytest.mark.usefixtures("add_xtra_node_to_salt", |
| "wa_prod36167") |
| class TestCephMon(object): |
| @pytest.fixture |
| def describe_node_in_reclass(self, |
| reclass_actions, salt_actions): |
| LOG.info("Executing pytest SETUP " |
| "from describe_node_in_reclass fixture") |
| reclass = reclass_actions |
| # ---- cluster/*/ceph/init.yml -------------- |
| reclass.merge_context(yaml_context=add_mon_ceph_init_yml, |
| short_path="cluster/*/ceph/init.yml") |
| |
| # ------- cluster/infra/config/init.yml ----------- |
| reclass.merge_context(yaml_context=add_mon_ceph_common_yml, |
| short_path="cluster/*/ceph/common.yml") |
| reclass.merge_context(yaml_context=add_mon_config_node_yml, |
| short_path="cluster/*/infra/config/nodes.yml") |
| |
| # ------- define settings for new mon node in KVM cluster ----------- |
| reclass.merge_context(yaml_context=add_mon_infra_kvm_yml, |
| short_path="cluster/*/infra/kvm.yml") |
| |
| salt_actions.run_state("*", "saltutil.refresh_pillar") |
| |
| @pytest.fixture |
| def remove_node_from_reclass(self, |
| reclass_actions, salt_actions): |
| LOG.info("Executing pytest SETUP " |
| "from remove_node_from_reclass fixture") |
| reclass = reclass_actions |
| reclass.delete_key( |
| key="parameters.reclass.storage.node.ceph_mon_node04", |
| short_path="cluster/*/infra/config/init.yml") |
| reclass.delete_key( |
| key="parameters.salt.control.cluster.internal.node.cmn04", |
| short_path="cluster/*/infra/kvm.yml" |
| ) |
| reclass.delete_key( |
| key="parameters.linux.network.host.xtra", |
| short_path="cluster/*/ceph/init.yml" |
| ) |
| |
| def test_add_node_process(self, |
| drivetrain_actions, |
| describe_node_in_reclass): |
| """ |
| https://docs.mirantis.com/mcp/q4-18/mcp-operations-guide/openstack-operations/ceph-operations/manage-nodes/add-mon-nodes.html |
| :param drivetrain_actions: |
| :param describe_node_in_reclass: |
| :return: |
| """ |
| dt = drivetrain_actions |
| |
| job_name = "ceph-add-node" |
| job_parameters = { |
| 'HOST': 'xtra*', |
| 'USE_UPMAP': True |
| } |
| job_result, job_description = dt.start_job_on_jenkins( |
| job_name=job_name, |
| job_parameters=job_parameters, |
| verbose=True) |
| assert job_result == 'SUCCESS', job_description |
| |
| def test_delete_node_process(self, |
| remove_node_from_reclass, |
| drivetrain_actions): |
| dt = drivetrain_actions |
| |
| job_name = "ceph-remove-node" |
| job_parameters = { |
| 'HOST': 'xtra*', |
| 'USE_UPMAP': True |
| } |
| job_result, job_description = dt.start_job_on_jenkins( |
| job_name=job_name, |
| job_parameters=job_parameters, |
| verbose=True) |
| assert job_result == 'SUCCESS', job_description |
| |
| |
| add_rgw_ceph_init_yml = """ |
| parameters: |
| _param: |
| ceph_rgw_node04_hostname: xtra |
| ceph_rgw_node04_address: 10.6.0.205 |
| ceph_rgw_node04_ceph_public_address: 10.166.49.209 |
| linux: |
| network: |
| host: |
| rgw04: |
| address: ${_param:ceph_rgw_node04_address} |
| names: |
| - ${_param:ceph_rgw_node04_hostname} |
| - ${_param:ceph_rgw_node04_hostname}.${_param:cluster_domain} |
| """ # noqa: E501 |
| |
| add_rgw_ceph_rgw_yml = """ |
| parameters: |
| _param: |
| cluster_node04_hostname: ${_param:ceph_rgw_node04_hostname} |
| cluster_node04_address: ${_param:ceph_rgw_node04_address} |
| ceph: |
| common: |
| keyring: |
| rgw.xtra: |
| caps: |
| mon: "allow rw" |
| osd: "allow rwx" |
| haproxy: |
| proxy: |
| listen: |
| radosgw: |
| servers: |
| - name: ${_param:cluster_node04_hostname} |
| host: ${_param:cluster_node04_address} |
| port: ${_param:haproxy_radosgw_source_port} |
| params: check |
| """ |
| |
| add_rgw_config_init_yml = """ |
| parameters: |
| reclass: |
| storage: |
| node: |
| ceph_rgw_node04: |
| name: ${_param:ceph_rgw_node04_hostname} |
| domain: ${_param:cluster_domain} |
| classes: |
| - cluster.${_param:cluster_name}.ceph.rgw |
| params: |
| salt_master_host: ${_param:reclass_config_master} |
| linux_system_codename: ${_param:ceph_rgw_system_codename} |
| single_address: ${_param:ceph_rgw_node04_address} |
| deploy_address: ${_param:ceph_rgw_node04_deploy_address} |
| ceph_public_address: ${_param:ceph_rgw_node04_public_address} |
| keepalived_vip_priority: 104 |
| """ |
| |
| |
| @pytest.mark.usefixtures("add_xtra_node_to_salt", |
| "wa_prod36167") |
| class TestCephRgw(object): |
| @pytest.fixture |
| def describe_node_in_reclass(self, |
| reclass_actions, salt_actions): |
| LOG.info("Executing pytest SETUP " |
| "from describe_node_in_reclass fixture") |
| reclass = reclass_actions |
| # ---- cluster/*/ceph/init.yml -------------- |
| reclass.merge_context(yaml_context=add_rgw_ceph_init_yml, |
| short_path="cluster/*/ceph/init.yml") |
| |
| reclass.merge_context(yaml_context=add_rgw_ceph_rgw_yml, |
| short_path="cluster/*/ceph/rgw.yml") |
| |
| reclass.merge_context(yaml_context=add_rgw_config_init_yml, |
| short_path="cluster/*/infra/config/init.yml") |
| |
| salt_actions.run_state("*", "saltutil.refresh_pillar") |
| |
| @pytest.fixture |
| def remove_node_from_reclass(self, |
| reclass_actions, salt_actions): |
| LOG.info("Executing pytest SETUP " |
| "from remove_node_from_reclass fixture") |
| # reclass = reclass_actions |
| # reclass.delete_key( |
| # key="parameters.reclass.storage.node.ceph_rgw_node04", |
| # short_path="cluster/*/infra/config/init.yml") |
| # reclass.delete_key( |
| # key="parameters.linux.network.host.xtra", |
| # short_path="cluster/*/ceph/init.yml" |
| # ) |
| |
| def test_add_node_process(self, |
| drivetrain_actions, |
| describe_node_in_reclass): |
| """ |
| https://docs.mirantis.com/mcp/q4-18/mcp-operations-guide/openstack-operations/ceph-operations/manage-nodes/add-rgw-nodes.html |
| :param drivetrain_actions: |
| :param describe_node_in_reclass: |
| :return: |
| """ |
| dt = drivetrain_actions |
| |
| job_name = "ceph-add-node" |
| job_parameters = { |
| 'HOST': 'xtra*', |
| 'USE_UPMAP': True |
| } |
| job_result, job_description = dt.start_job_on_jenkins( |
| job_name=job_name, |
| job_parameters=job_parameters, |
| verbose=True) |
| assert job_result == 'SUCCESS', job_description |
| |
| def test_delete_node_process(self, |
| remove_node_from_reclass, |
| drivetrain_actions): |
| dt = drivetrain_actions |
| |
| job_name = "ceph-remove-node" |
| job_parameters = { |
| 'HOST': 'xtra*', |
| 'USE_UPMAP': True |
| } |
| job_result, job_description = dt.start_job_on_jenkins( |
| job_name=job_name, |
| job_parameters=job_parameters, |
| verbose=True) |
| assert job_result == 'SUCCESS', job_description |
| |
| |
| @pytest.mark.usefixtures("add_xtra_node_to_salt", |
| "wa_prod36167") |
| class TestCephMgr(object): |
| def test_add_node(self): |
| pass |
| |
| def test_delete_node(self): |
| pass |