Ceph operations, network interfaces and overrides are included to node definition in reclass.storage.nodes section in reclass
Created method to build a node config due to extended context to be merger to infra/config/nodes.yaml
Supressed some logs in Reclass Manager
Change-Id: I5bd5bb83c2029c34c1da19545683f9c40654610a
diff --git a/tcp_tests/managers/reclass_manager.py b/tcp_tests/managers/reclass_manager.py
index 7e6243a..3952b5f 100644
--- a/tcp_tests/managers/reclass_manager.py
+++ b/tcp_tests/managers/reclass_manager.py
@@ -84,7 +84,7 @@
:param file_name: name of YAML file to find a key
:return: str, key if found
"""
- LOG.info("Try to get '{key}' key from '{file}' file".format(
+ LOG.debug("Try to get '{key}' key from '{file}' file".format(
file=file_name,
key=key
))
@@ -95,11 +95,11 @@
key=key,
file_name=file_name))['stdout']
- LOG.info("Raw output from reclass.get_key {}".format(request_key))
+ LOG.debug("Raw output from reclass.get_key {}".format(request_key))
encoded_request_key = ''.join(request_key).encode(encoding='UTF-8')
- key = yaml.load(encoded_request_key)
- LOG.info("From reclass.get_key {}".format(key))
- return key
+ value = yaml.load(encoded_request_key)
+ LOG.info("From reclass.get_key {}: {}".format(key, value))
+ return value
def add_bool_key(self, key, value, short_path):
"""
diff --git a/tcp_tests/tests/system/test_ceph_operations.py b/tcp_tests/tests/system/test_ceph_operations.py
index 44ae014..ef23b67 100644
--- a/tcp_tests/tests/system/test_ceph_operations.py
+++ b/tcp_tests/tests/system/test_ceph_operations.py
@@ -1,70 +1,10 @@
import pytest
from tcp_tests import logger
+from string import Template
LOG = logger.logger
-xtra_network_interface = """
-parameters:
- _param:
- linux_network_interfaces:
- br_ctl:
- address: ${_param:single_address}
- enabled: True
- name_servers:
- - ${_param:dns_server01}
- - ${_param:dns_server02}
- netmask: ${_param:control_network_netmask}
- proto: static
- require_interfaces: ['ens4']
- type: bridge
- use_interfaces: ['ens4']
- ens3:
- enabled: True
- name: ens3
- proto: dhcp
- type: eth
- ens4:
- enabled: True
- ipflush_onchange: True
- name: ens4
- proto: manual
- type: eth
-"""
-
-add_osd_ceph_init_yml = """
-parameters:
- _param:
- ceph_osd_node04_hostname: xtra
- ceph_osd_node04_address: 10.6.0.205
- ceph_osd_system_codename: xenial
- linux:
- network:
- host:
- xtra:
- address: ${_param:ceph_osd_node04_address}
- names:
- - ${_param:ceph_osd_node04_hostname}
- - ${_param:ceph_osd_node04_hostname}.${_param:cluster_domain}
- """
-
-add_osd_config_init_yml = """
-parameters:
- reclass:
- storage:
- node:
- ceph_osd_node04:
- name: ${_param:ceph_osd_node04_hostname}
- domain: ${_param:cluster_domain}
- classes:
- - cluster.${_param:cluster_name}.ceph.osd
- params:
- salt_master_host: ${_param:reclass_config_master}
- linux_system_codename: ${_param:ceph_osd_system_codename}
- single_address: ${_param:ceph_osd_node04_address}
- ceph_crush_parent: rack02
-"""
-
@pytest.fixture(scope='session')
def add_xtra_node_to_salt(salt_actions, underlay_actions,
@@ -80,27 +20,19 @@
cfg_node = [node['node_name'] for node in config.underlay.ssh
if 'salt_master' in node.get('roles')][0]
- salt_actions.enforce_state("*", "reclass")
- reclass_actions.add_class(
- "environment.heat-cicd-queens-dvr-sl.linux_network_interface",
- short_path="../nodes/_generated/xtra.*.yml")
- reclass_actions.add_class("environment.heat-cicd-queens-dvr-sl.overrides",
- short_path="../nodes/_generated/xtra.*.yml")
- reclass_actions.merge_context(yaml_context=xtra_network_interface,
- short_path="../nodes/_generated/xtra.*.yml")
+ # salt_actions.enforce_state("I@salt:master", "reclass")
underlay_actions.check_call(
"salt-key -a {node} --include-all -y".format(node=xtra_node),
node_name=cfg_node,
raise_on_err=False)
# Need to restart salt-minion service after accepting it in Salt Master
- underlay_actions.check_call(
- "systemctl restart salt-minion",
- node_name=xtra_node,
- raise_on_err=False)
- salt_actions.enforce_state("I@salt:master", "reclass")
- salt_actions.enforce_state("xtra*", "linux")
- salt_actions.enforce_state("xtra*", "openssh")
+ # underlay_actions.check_call(
+ # "systemctl restart salt-minion",
+ # node_name=xtra_node,
+ # raise_on_err=False)
+ # salt_actions.enforce_state("xtra*", "linux")
+ # salt_actions.enforce_state("xtra*", "openssh")
yield
@@ -121,6 +53,64 @@
"wa_prod36167")
class TestCephOsd(object):
+ add_osd_ceph_init_yml = """
+ parameters:
+ _param:
+ ceph_osd_node04_hostname: xtra
+ ceph_osd_node04_address: 10.6.0.205
+ ceph_osd_system_codename: xenial
+ linux:
+ network:
+ host:
+ xtra:
+ address: ${_param:ceph_osd_node04_address}
+ names:
+ - ${_param:ceph_osd_node04_hostname}
+ - ${_param:ceph_osd_node04_hostname}.${_param:cluster_domain}
+ """
+
+ add_osd_config_init_yml = """
+ parameters:
+ reclass:
+ storage:
+ node:
+ ceph_osd_node04:
+ name: ${_param:ceph_osd_node04_hostname}
+ domain: ${_param:cluster_domain}
+ classes:
+ - cluster.${_param:cluster_name}.ceph.osd
+ - environment.heat-cicd-queens-dvr-sl.linux_network_interface
+ - environment.heat-cicd-queens-dvr-sl.overrides
+ params:
+ salt_master_host: ${_param:reclass_config_master}
+ linux_system_codename: ${_param:ceph_osd_system_codename}
+ single_address: ${_param:ceph_osd_node04_address}
+ ceph_crush_parent: rack02
+ linux_network_interfaces:
+ br_ctl:
+ address: ${_param:ceph_osd_node04_address}
+ enabled: True
+ name_servers:
+ - ${_param:dns_server01}
+ - ${_param:dns_server02}
+ netmask: ${_param:control_network_netmask}
+ proto: static
+ require_interfaces: ['ens4']
+ type: bridge
+ use_interfaces: ['ens4']
+ ens3:
+ enabled: True
+ name: ens3
+ proto: dhcp
+ type: eth
+ ens4:
+ enabled: True
+ ipflush_onchange: True
+ name: ens4
+ proto: manual
+ type: eth
+ """
+
@pytest.fixture
def describe_node_in_reclass(self,
reclass_actions,
@@ -128,14 +118,17 @@
LOG.info("Executing pytest SETUP "
"from describe_node_in_reclass fixture")
reclass = reclass_actions
+
# ---- cluster/*/ceph/init.yml ---------------
- reclass.merge_context(yaml_context=add_osd_ceph_init_yml,
+ reclass.merge_context(yaml_context=self.add_osd_ceph_init_yml,
short_path="cluster/*/ceph/init.yml")
# ------- cluster/infra/config/init.yml -----------
- reclass.merge_context(yaml_context=add_osd_config_init_yml,
- short_path="cluster/*/infra/config/init.yml")
- salt_actions.run_state("*", "saltutil.refresh_pillar")
+ reclass.merge_context(yaml_context=build_node_config('osd'),
+ short_path="cluster/*/infra/config/nodes.yml")
+
+ # salt_actions.run_state("*", "saltutil.refresh_pillar")
+ # salt_actions.enforce_state("I@salt:master", "reclass")
@pytest.fixture
def remove_node_from_reclass(self,
@@ -144,7 +137,7 @@
reclass.delete_key(
key="parameters.reclass.storage.node.ceph_osd_node04",
- short_path="cluster/*/infra/config/init.yml"
+ short_path="cluster/*/infra/config/nodes.yml"
)
reclass.delete_key(
key="parameters.linux.network.host.xtra",
@@ -212,77 +205,36 @@
assert job_result == 'SUCCESS', job_description
-add_mon_ceph_init_yml = """
-parameters:
- _param:
- ceph_mon_node04_hostname: xtra
- ceph_mon_node04_address: 10.6.0.205
- ceph_mon_node04_ceph_public_address: 10.166.49.209
- ceph_mon_node04_ceph_backup_hour: 4
- ceph_mon_node04_ceph_backup_minute: 0
- linux:
- network:
- host:
- xtra:
- address: ${_param:ceph_mon_node04_address}
- names:
- - ${_param:ceph_mon_node04_hostname}
- - ${_param:ceph_mon_node04_hostname}.${_param:cluster_domain}
-"""
-
-add_mon_ceph_common_yml = """
-parameters:
- ceph:
- common:
- members:
- - name: ${_param:ceph_mon_node04_hostname}
- host: ${_param:ceph_mon_node04_address}
-"""
-
-add_mon_config_node_yml = """
-parameters:
- reclass:
- storage:
- node:
- ceph_mon_node04:
- name: ${_param:ceph_mon_node04_hostname}
- domain: ${_param:cluster_domain}
- classes:
- - cluster.${_param:cluster_name}.ceph.mon
- params:
- ceph_public_address: ${_param:ceph_mon_node04_ceph_public_address}
- ceph_backup_time_hour: ${_param:ceph_mon_node04_ceph_backup_hour}
- ceph_backup_time_minute: ${_param:ceph_mon_node04_ceph_backup_minute}
- salt_master_host: ${_param:reclass_config_master}
- linux_system_codename: ${_param:ceph_mon_system_codename}
- single_address: ${_param:ceph_mon_node04_address}
- keepalived_vip_priority: 104
-""" # noqa: E501
-
-add_mon_infra_kvm_yml = """
-parameters:
- salt:
- control:
- size:
- ceph.mon:
- cpu: 8
- ram: 16384
- disk_profile: small
- net_profile: default
- cluster:
- internal:
- node:
- cmn04:
- name: ${_param:ceph_mon_node04_hostname}
- provider: ${_param:infra_kvm_node03_hostname}.${_param:cluster_domain}
- image: ${_param:salt_control_xenial_image}
- size: ceph.mon
-""" # noqa: E501
-
-
@pytest.mark.usefixtures("add_xtra_node_to_salt",
"wa_prod36167")
class TestCephMon(object):
+ add_mon_ceph_init_yml = """
+ parameters:
+ _param:
+ ceph_mon_node04_hostname: xtra
+ ceph_mon_node04_address: 10.6.0.205
+ ceph_mon_node04_ceph_public_address: 10.166.49.209
+ ceph_mon_node04_ceph_backup_hour: 4
+ ceph_mon_node04_ceph_backup_minute: 0
+ linux:
+ network:
+ host:
+ xtra:
+ address: ${_param:ceph_mon_node04_address}
+ names:
+ - ${_param:ceph_mon_node04_hostname}
+ - ${_param:ceph_mon_node04_hostname}.${_param:cluster_domain}
+ """
+
+ add_mon_ceph_common_yml = """
+ parameters:
+ ceph:
+ common:
+ members:
+ - name: ${_param:ceph_mon_node04_hostname}
+ host: ${_param:ceph_mon_node04_address}
+ """
+
@pytest.fixture
def describe_node_in_reclass(self,
reclass_actions, salt_actions):
@@ -290,18 +242,20 @@
"from describe_node_in_reclass fixture")
reclass = reclass_actions
# ---- cluster/*/ceph/init.yml --------------
- reclass.merge_context(yaml_context=add_mon_ceph_init_yml,
+ reclass.merge_context(yaml_context=self.add_mon_ceph_init_yml,
short_path="cluster/*/ceph/init.yml")
# ------- cluster/infra/config/init.yml -----------
- reclass.merge_context(yaml_context=add_mon_ceph_common_yml,
+ reclass.merge_context(yaml_context=self.add_mon_ceph_common_yml,
short_path="cluster/*/ceph/common.yml")
- reclass.merge_context(yaml_context=add_mon_config_node_yml,
+ reclass.merge_context(yaml_context=build_node_config('mon'),
short_path="cluster/*/infra/config/nodes.yml")
# ------- define settings for new mon node in KVM cluster -----------
- reclass.merge_context(yaml_context=add_mon_infra_kvm_yml,
- short_path="cluster/*/infra/kvm.yml")
+ # Commented because we don't add VM machine, we add already
+ # deployed node
+ # reclass.merge_context(yaml_context=add_mon_infra_kvm_yml,
+ # short_path="cluster/*/infra/kvm.yml")
salt_actions.run_state("*", "saltutil.refresh_pillar")
@@ -313,7 +267,7 @@
reclass = reclass_actions
reclass.delete_key(
key="parameters.reclass.storage.node.ceph_mon_node04",
- short_path="cluster/*/infra/config/init.yml")
+ short_path="cluster/*/infra/config/nodes.yml")
reclass.delete_key(
key="parameters.salt.control.cluster.internal.node.cmn04",
short_path="cluster/*/infra/kvm.yml"
@@ -362,68 +316,48 @@
assert job_result == 'SUCCESS', job_description
-add_rgw_ceph_init_yml = """
-parameters:
- _param:
- ceph_rgw_node04_hostname: xtra
- ceph_rgw_node04_address: 10.6.0.205
- ceph_rgw_node04_ceph_public_address: 10.166.49.209
- linux:
- network:
- host:
- rgw04:
- address: ${_param:ceph_rgw_node04_address}
- names:
- - ${_param:ceph_rgw_node04_hostname}
- - ${_param:ceph_rgw_node04_hostname}.${_param:cluster_domain}
-""" # noqa: E501
-
-add_rgw_ceph_rgw_yml = """
-parameters:
- _param:
- cluster_node04_hostname: ${_param:ceph_rgw_node04_hostname}
- cluster_node04_address: ${_param:ceph_rgw_node04_address}
- ceph:
- common:
- keyring:
- rgw.xtra:
- caps:
- mon: "allow rw"
- osd: "allow rwx"
- haproxy:
- proxy:
- listen:
- radosgw:
- servers:
- - name: ${_param:cluster_node04_hostname}
- host: ${_param:cluster_node04_address}
- port: ${_param:haproxy_radosgw_source_port}
- params: check
-"""
-
-add_rgw_config_init_yml = """
-parameters:
- reclass:
- storage:
- node:
- ceph_rgw_node04:
- name: ${_param:ceph_rgw_node04_hostname}
- domain: ${_param:cluster_domain}
- classes:
- - cluster.${_param:cluster_name}.ceph.rgw
- params:
- salt_master_host: ${_param:reclass_config_master}
- linux_system_codename: ${_param:ceph_rgw_system_codename}
- single_address: ${_param:ceph_rgw_node04_address}
- deploy_address: ${_param:ceph_rgw_node04_deploy_address}
- ceph_public_address: ${_param:ceph_rgw_node04_public_address}
- keepalived_vip_priority: 104
-"""
-
-
@pytest.mark.usefixtures("add_xtra_node_to_salt",
"wa_prod36167")
class TestCephRgw(object):
+ add_rgw_ceph_init_yml = """
+ parameters:
+ _param:
+ ceph_rgw_node04_hostname: xtra
+ ceph_rgw_node04_address: 10.6.0.205
+ ceph_rgw_node04_ceph_public_address: 10.166.49.209
+ linux:
+ network:
+ host:
+ rgw04:
+ address: ${_param:ceph_rgw_node04_address}
+ names:
+ - ${_param:ceph_rgw_node04_hostname}
+ - ${_param:ceph_rgw_node04_hostname}.${_param:cluster_domain}
+ """ # noqa: E501
+
+ add_rgw_ceph_rgw_yml = """
+ parameters:
+ _param:
+ cluster_node04_hostname: ${_param:ceph_rgw_node04_hostname}
+ cluster_node04_address: ${_param:ceph_rgw_node04_address}
+ ceph:
+ common:
+ keyring:
+ rgw.xtra:
+ caps:
+ mon: "allow rw"
+ osd: "allow rwx"
+ haproxy:
+ proxy:
+ listen:
+ radosgw:
+ servers:
+ - name: ${_param:cluster_node04_hostname}
+ host: ${_param:cluster_node04_address}
+ port: ${_param:haproxy_radosgw_source_port}
+ params: check
+ """
+
@pytest.fixture
def describe_node_in_reclass(self,
reclass_actions, salt_actions):
@@ -431,14 +365,14 @@
"from describe_node_in_reclass fixture")
reclass = reclass_actions
# ---- cluster/*/ceph/init.yml --------------
- reclass.merge_context(yaml_context=add_rgw_ceph_init_yml,
+ reclass.merge_context(yaml_context=self.add_rgw_ceph_init_yml,
short_path="cluster/*/ceph/init.yml")
- reclass.merge_context(yaml_context=add_rgw_ceph_rgw_yml,
+ reclass.merge_context(yaml_context=self.add_rgw_ceph_rgw_yml,
short_path="cluster/*/ceph/rgw.yml")
- reclass.merge_context(yaml_context=add_rgw_config_init_yml,
- short_path="cluster/*/infra/config/init.yml")
+ reclass.merge_context(yaml_context=build_node_config('rgw'),
+ short_path="cluster/*/infra/config/nodes.yml")
salt_actions.run_state("*", "saltutil.refresh_pillar")
@@ -503,3 +437,90 @@
def test_delete_node(self):
pass
+
+
+def build_node_config(node=''):
+ """
+
+ :param node: [osd, mon, rgw, mgr]
+ :return: string in yaml format
+ """
+
+ class _Template(Template):
+ delimiter = "#"
+ idpattern = '[A-Z]*'
+
+ template = _Template("""
+ parameters:
+ reclass:
+ storage:
+ node:
+ ceph_#NODE_node04:
+ name: ${_param:ceph_#NODE_node04_hostname}
+ domain: ${_param:cluster_domain}
+ classes:
+ - cluster.${_param:cluster_name}.ceph.#NODE
+ - environment.${_param:cluster_name}.linux_network_interface
+ - environment.${_param:cluster_name}.overrides
+ params:
+ salt_master_host: ${_param:reclass_config_master}
+ linux_system_codename: ${_param:ceph_#NODE_system_codename}
+ single_address: ${_param:ceph_#NODE_node04_address}
+ #OSDSETTINGS
+ #MONSETTINGS
+ #RGWSETTINGS
+ linux_network_interfaces:
+ br_ctl:
+ address: ${_param:ceph_#NODE_node04_address}
+ enabled: True
+ name_servers:
+ - ${_param:dns_server01}
+ - ${_param:dns_server02}
+ netmask: ${_param:control_network_netmask}
+ proto: static
+ require_interfaces: ['ens4']
+ type: bridge
+ use_interfaces: ['ens4']
+ ens3:
+ enabled: True
+ name: ens3
+ proto: dhcp
+ type: eth
+ ens4:
+ enabled: True
+ ipflush_onchange: True
+ name: ens4
+ proto: manual
+ type: eth
+ """)
+
+ data = {
+ 'NODE': node,
+ 'OSDSETTINGS': '',
+ 'MONSETTINGS': '',
+ 'RGWSETTINGS': '',
+
+ }
+ # # ------------------OSD specific settings ----------
+ if node == 'osd':
+ data['OSDSETTINGS'] = """
+ ceph_crush_parent: rack02
+ """
+ # # ------------------MON specific settings ----------
+ if node == 'mon':
+ data['MONSETTINGS'] = """
+ ceph_backup_time_hour: ${_param:ceph_mon_node04_ceph_backup_hour}
+ ceph_backup_time_minute: ${_param:ceph_mon_node04_ceph_backup_minute}
+ ceph_public_address: ${_param:ceph_mon_node04_public_address}
+ keepalived_vip_priority: 104
+ """ # noqa: E501
+ # # ------------------RGW specific settings -----------
+ if node == 'rgw':
+ data['RGWSETTINGS'] = """
+ ceph_public_address: ${_param:ceph_rgw_node04_public_address}
+ keepalived_vip_priority: 104
+ """ # noqa: E501
+
+ yaml_config = template.substitute(data)
+
+ return yaml_config