Add 'reclass-create-inventory-context' command
diff --git a/README.md b/README.md
index a01aab7..d120c27 100644
--- a/README.md
+++ b/README.md
@@ -12,3 +12,35 @@
Usage:
------
+ This tool can be used to create a new class 'environment' generated from custom inventory.
+
+Requirements
+============
+
+- Installed and configured 'reclass' package
+- Prepared 'cluster', 'system' and 'service' classes
+- [Optional] Nodes generated with salt-call state.sls reclass.storage
+
+Create 'environment' class
+==========================
+
+ # 1. Create a context file from the current reclass inventory:
+
+ reclass-create-inventory-context -d mcp11-ovs-dpdk.local > /tmp/context-mcp11-ovs-dpdk.local.yaml
+
+ # 2. Remove existing hardware-related objects from 'cluster', 'system' and 'service' classes:
+
+ reclass-remove-key -r parameters.linux.network.interface /srv/salt/reclass/classes/cluster/physical_mcp11_ovs_dpdk
+ reclass-remove-key -r parameters.linux.network.interface /srv/salt/reclass/classes/system/
+ reclass-remove-key -r parameters.linux.network.interface /usr/share/salt-formulas/reclass/
+
+ # 3. Render the 'environment' class using example template based on cookiecutter:
+
+ git clone https://github.com/dis-xcom/reclass_tools ~/reclass_tools
+ reclass-render-dir -t ~/reclass_tools/examples/environment -o /tmp/environment -c /tmp/context-mcp11-ovs-dpdk.local.yaml # You can add multiple YAMLs here
+
+ # 4. Symlink 'environment' to the /srv/reclass/salt/classes
+
+ ln -s /tmp/environment /srv/reclass/salt/classes
+
+ # 5. Add new class '- environment.mcp11-ovs-dpdk.local' to the classes/cluster/<cluster_model>/infra/config.yml
diff --git a/examples/environment/context-mcp11-ovs-dpdk.local.yaml b/examples/environment/context-mcp11-ovs-dpdk.local.yaml
new file mode 100644
index 0000000..ed3bf3a
--- /dev/null
+++ b/examples/environment/context-mcp11-ovs-dpdk.local.yaml
@@ -0,0 +1,182 @@
+cookiecutter:
+ cluster_name: mcp11-ovs-dpdk.local
+ nodes:
+ cmp001.mcp11-ovs-dpdk.local:
+ name: cmp001
+ parameters: {}
+ reclass_storage_name: openstack_compute_node01
+ roles: []
+ cmp002.mcp11-ovs-dpdk.local:
+ name: cmp002
+ parameters: {}
+ reclass_storage_name: openstack_compute_node02
+ roles: []
+ ctl01.mcp11-ovs-dpdk.local:
+ name: ctl01
+ parameters: {}
+ reclass_storage_name: openstack_control_node01
+ roles:
+ - vcp
+ ctl02.mcp11-ovs-dpdk.local:
+ name: ctl02
+ parameters: {}
+ reclass_storage_name: openstack_control_node02
+ roles:
+ - vcp
+ ctl03.mcp11-ovs-dpdk.local:
+ name: ctl03
+ parameters: {}
+ reclass_storage_name: openstack_control_node03
+ roles:
+ - vcp
+ dbs01.mcp11-ovs-dpdk.local:
+ name: dbs01
+ parameters: {}
+ reclass_storage_name: openstack_database_node01
+ roles:
+ - vcp
+ dbs02.mcp11-ovs-dpdk.local:
+ name: dbs02
+ parameters: {}
+ reclass_storage_name: openstack_database_node02
+ roles:
+ - vcp
+ dbs03.mcp11-ovs-dpdk.local:
+ name: dbs03
+ parameters: {}
+ reclass_storage_name: openstack_database_node03
+ roles:
+ - vcp
+ gtw01.mcp11-ovs-dpdk.local:
+ name: gtw01
+ parameters: {}
+ reclass_storage_name: openstack_gateway_node01
+ roles: []
+ gtw02.mcp11-ovs-dpdk.local:
+ name: gtw02
+ parameters: {}
+ reclass_storage_name: openstack_gateway_node02
+ roles: []
+ gtw03.mcp11-ovs-dpdk.local:
+ name: gtw03
+ parameters: {}
+ reclass_storage_name: openstack_gateway_node03
+ roles: []
+ kvm01.mcp11-ovs-dpdk.local:
+ name: kvm01
+ parameters: {}
+ reclass_storage_name: infra_kvm_node01
+ roles: []
+ kvm02.mcp11-ovs-dpdk.local:
+ name: kvm02
+ parameters: {}
+ reclass_storage_name: infra_kvm_node02
+ roles: []
+ kvm03.mcp11-ovs-dpdk.local:
+ name: kvm03
+ parameters: {}
+ reclass_storage_name: infra_kvm_node03
+ roles: []
+ log01.mcp11-ovs-dpdk.local:
+ name: log01
+ parameters: {}
+ reclass_storage_name: stacklight_log_node01
+ roles:
+ - vcp
+ log02.mcp11-ovs-dpdk.local:
+ name: log02
+ parameters: {}
+ reclass_storage_name: stacklight_log_node02
+ roles:
+ - vcp
+ log03.mcp11-ovs-dpdk.local:
+ name: log03
+ parameters: {}
+ reclass_storage_name: stacklight_log_node03
+ roles:
+ - vcp
+ mdb01.mcp11-ovs-dpdk.local:
+ name: mdb01
+ parameters: {}
+ reclass_storage_name: openstack_telemetry_node01
+ roles:
+ - vcp
+ mdb02.mcp11-ovs-dpdk.local:
+ name: mdb02
+ parameters: {}
+ reclass_storage_name: openstack_telemetry_node02
+ roles:
+ - vcp
+ mdb03.mcp11-ovs-dpdk.local:
+ name: mdb03
+ parameters: {}
+ reclass_storage_name: openstack_telemetry_node03
+ roles:
+ - vcp
+ mon01.mcp11-ovs-dpdk.local:
+ name: mon01
+ parameters: {}
+ reclass_storage_name: stacklight_monitor_node01
+ roles:
+ - vcp
+ mon02.mcp11-ovs-dpdk.local:
+ name: mon02
+ parameters: {}
+ reclass_storage_name: stacklight_monitor_node02
+ roles:
+ - vcp
+ mon03.mcp11-ovs-dpdk.local:
+ name: mon03
+ parameters: {}
+ reclass_storage_name: stacklight_monitor_node03
+ roles:
+ - vcp
+ msg01.mcp11-ovs-dpdk.local:
+ name: msg01
+ parameters: {}
+ reclass_storage_name: openstack_message_queue_node01
+ roles:
+ - vcp
+ msg02.mcp11-ovs-dpdk.local:
+ name: msg02
+ parameters: {}
+ reclass_storage_name: openstack_message_queue_node02
+ roles:
+ - vcp
+ msg03.mcp11-ovs-dpdk.local:
+ name: msg03
+ parameters: {}
+ reclass_storage_name: openstack_message_queue_node03
+ roles:
+ - vcp
+ mtr01.mcp11-ovs-dpdk.local:
+ name: mtr01
+ parameters: {}
+ reclass_storage_name: stacklight_telemetry_node01
+ roles:
+ - vcp
+ mtr02.mcp11-ovs-dpdk.local:
+ name: mtr02
+ parameters: {}
+ reclass_storage_name: stacklight_telemetry_node02
+ roles:
+ - vcp
+ mtr03.mcp11-ovs-dpdk.local:
+ name: mtr03
+ parameters: {}
+ reclass_storage_name: stacklight_telemetry_node03
+ roles:
+ - vcp
+ prx01.mcp11-ovs-dpdk.local:
+ name: prx01
+ parameters: {}
+ reclass_storage_name: openstack_proxy_node01
+ roles:
+ - vcp
+ prx02.mcp11-ovs-dpdk.local:
+ name: prx02
+ parameters: {}
+ reclass_storage_name: openstack_proxy_node02
+ roles:
+ - vcp
+
diff --git "a/examples/environment/\173\173 cookiecutter.cluster_name \175\175/init.yml" "b/examples/environment/\173\173 cookiecutter.cluster_name \175\175/init.yml"
new file mode 100644
index 0000000..633ed40
--- /dev/null
+++ "b/examples/environment/\173\173 cookiecutter.cluster_name \175\175/init.yml"
@@ -0,0 +1,16 @@
+parameters:
+ reclass:
+ storage:
+ node:
+{%- for inventory_node_name, node in cookiecutter.nodes.items() %}
+ {{ node['reclass_storage_name'] }}:
+ params:
+{%- if 'vcp' in node['roles'] %}
+ linux_network_interfaces:
+ ens3: ${_param:linux_single_interface}
+{%- else %}
+ linux_network_interfaces: None # Create interfaces configuration from your inventory
+{%- endif %}
+ classes:
+ - environment.{{ cookiecutter.cluster_name }}.networking
+{%- endfor %}
diff --git "a/examples/environment/\173\173 cookiecutter.cluster_name \175\175/networking.yml" "b/examples/environment/\173\173 cookiecutter.cluster_name \175\175/networking.yml"
new file mode 100644
index 0000000..1ef4d17
--- /dev/null
+++ "b/examples/environment/\173\173 cookiecutter.cluster_name \175\175/networking.yml"
@@ -0,0 +1,4 @@
+parameters:
+ linux:
+ network:
+ interface: ${_param:linux_network_interfaces}
diff --git a/reclass_tools/cli.py b/reclass_tools/cli.py
index 1bd80b7..becff4e 100644
--- a/reclass_tools/cli.py
+++ b/reclass_tools/cli.py
@@ -128,6 +128,7 @@
params = parser.parse_args(args)
vcp_node_names = reclass_models.vcp_list(domain=params.domain)
+ #print('\n'.join(sorted(vcp_node_names)))
print('\n'.join(sorted(('{0}.{1}'.format(name, domain) for name, domain in vcp_node_names))))
@@ -152,3 +153,26 @@
print(yaml.dump(current_underlay_context, default_flow_style=False))
+
+def render_dir(args=None):
+ try:
+ from reclass_tools import create_inventory
+ except ImportError:
+ print("Please run this tool on the salt-master node with installed 'reclass'")
+ return
+
+ parser = argparse.ArgumentParser(formatter_class=argparse.RawTextHelpFormatter,
+ description="Render a coockiecutter-based template directory using several different context files")
+ parser.add_argument('--template-dir', '-t', dest='template_dir',
+ help=('Coockiecutter-based template directory'))
+ parser.add_argument('--output-dir', '-o', dest='output_dir',
+ help=('Path to the directory where the rendered template will be placed'))
+ parser.add_argument('--context', '-c', dest='contexts',
+ help=('Path to the directory where the rendered template will be placed'),
+ nargs='+')
+
+ params = parser.parse_args(args)
+
+ create_inventory.render_dir(template_dir=params.template_dir, output_dir=params.output_dir, contexts=params.contexts)
+
+
diff --git a/reclass_tools/create_inventory.py b/reclass_tools/create_inventory.py
index 26ad89e..ce0d7a6 100644
--- a/reclass_tools/create_inventory.py
+++ b/reclass_tools/create_inventory.py
@@ -40,12 +40,17 @@
vcp_list = reclass_models.vcp_list(domain=domain, inventory=inventory)
reclass_storage = reclass_models.reclass_storage(domain=domain, inventory=inventory)
- current_underlay_context = {
- 'current_clusters': {
- }
- }
+ if domain is None:
+ raise Exception("Please specify a domain name from: \n{}".format('\n'.join(reclass_storage.keys())))
- for domain, storage_nodes in reclass_storage.items():
+ #current_underlay_context = {
+ # 'current_clusters': {
+ # }
+ #}
+
+ for storage_domain, storage_nodes in reclass_storage.items():
+ if storage_domain != domain:
+ continue
current_cluster_nodes = {}
for storage_node_name, storage_node in storage_nodes.items():
@@ -73,8 +78,14 @@
if reclass_key:
helpers.create_nested_key(current_cluster_nodes[inventory_node_name], path=key_path, value=reclass_key)
- current_underlay_context['current_clusters'][domain] = {
- 'nodes': current_cluster_nodes
+ #current_underlay_context['current_clusters'][domain] = {
+ # 'nodes': current_cluster_nodes
+ #}
+ current_underlay_context = {
+ 'cookiecutter': {
+ 'cluster_name': storage_domain,
+ 'nodes': current_cluster_nodes,
+ }
}
return current_underlay_context
@@ -106,7 +117,7 @@
# ..
-def render_environment_class():
+def render_dir(template_dir, output_dir, contexts):
"""Coockiecutter echancement to use several source JSON files
:param template_dir: directory with templates to render
@@ -126,15 +137,36 @@
#ipdb> output_dir
#'/root/my_new_deployment/'
- repo_dir = '/root/cookiecutter-templates/cluster_product/openstack'
+ print(template_dir)
+ print(output_dir)
+ print(contexts)
+ #return
+ #repo_dir = '/root/cookiecutter-templates/cluster_product/openstack'
overwrite_if_exists = True
- output_dir = '/root/my_new_deployment/'
- context = {'cookiecutter': {'openstack_telemetry_node02_hostname': 'mdb02' }}
+ #output_dir = '/root/my_new_deployment/'
+ #context = {'cookiecutter': {'openstack_telemetry_node02_hostname': 'mdb02' }}
+
+ merged_context = {}
+ for fcon in contexts:
+ if fcon.endswith('.yaml'):
+ context = helpers.yaml_read(fcon)
+ elif fcon.endswith('.json'):
+ context = helpers.json_read(fcon)
+ else:
+ print("Error: Please use YAML or JSON files for contexts")
+ return # should be exit 1
+
+
+ #merged_context.update(context)
+ #merged_context = dict(chain(merged_context.items(), context.items()))
+ merged_context = helpers.merge_nested_objects(merged_context, context)
+
+ #print(yaml.dump(merged_context, default_flow_style=False))
try:
generate.generate_files(
- repo_dir=repo_dir,
- context=context,
+ repo_dir=template_dir,
+ context=merged_context,
overwrite_if_exists=overwrite_if_exists,
output_dir=output_dir
)
@@ -146,7 +178,6 @@
context_str = yaml.dump(
undefined_err.context,
- indent=4,
default_flow_style=False
)
print('='*15 + ' Context: '+ '='*15 + '\n{}'.format(context_str) + '='*40)
diff --git a/reclass_tools/helpers.py b/reclass_tools/helpers.py
index 75e3185..322ac71 100644
--- a/reclass_tools/helpers.py
+++ b/reclass_tools/helpers.py
@@ -1,3 +1,7 @@
+import os
+import json
+import yaml
+
def get_nested_key(data, path=None):
if type(path) is not list:
@@ -42,3 +46,134 @@
path = path[:-1]
+def yaml_read(yaml_file):
+ if os.path.isfile(yaml_file):
+ with open(yaml_file, 'r') as f:
+ return yaml.load(f)
+ else:
+ print("\'{}\' is not a file!".format(yaml_file))
+
+
+def json_read(yaml_file):
+ if os.path.isfile(yaml_file):
+ with open(yaml_file, 'r') as f:
+ return json.load(f)
+ else:
+ print("\'{}\' is not a file!".format(yaml_file))
+
+
+def merge_nested_objects(obj_1, obj_2):
+ """Merge two objects with optional key overwrites
+
+ Original : https://stackoverflow.com/a/17860173
+ - Merges dicts and lists
+ - If a dict key has the suffix '__overwrite__' and boolean value,
+ then the key is assumed as a special keyword for merging:
+ <key>__overwrite__: True # Overwrite the existing <key> content with <key> from obj_2
+ <key>__overwrite__: False # Keep the existing <key> content from obj_1
+
+
+ Case #1: Merge dicts and lists, overwrite other types with latest value
+
+ dict_a = {
+ 'host': '1.1.1.1',
+ 'ssh': {
+ 'login': 'user'
+ }
+ }
+
+ dict_b = {
+ 'host': '2.2.2.2',
+ 'ssh': {
+ 'password': 'pass'
+ }
+ }
+
+ print(merge_nested_objects(dict_a, dict_b))
+ {
+ 'host': '2.2.2.2',
+ 'ssh': {
+ 'login': 'user',
+ 'password': 'pass',
+ }
+ }
+
+ Case #2: Use <key>__overwrite__: True to remove previous key content
+
+ dict_a = {
+ 'host': '1.1.1.1'
+ 'ssh': {
+ 'login': 'user'
+ }
+ }
+
+ dict_b = {
+ 'ssh__overwrite__': True
+ 'ssh': {
+ 'password': 'pass'
+ }
+ }
+
+ print(merge_nested_objects(dict_a, dict_b))
+ {
+ 'host': '1.1.1.1',
+ 'ssh': {
+ 'password': 'pass',
+ }
+ }
+
+ Case #3: Use <key>__overwrite__: False to skip merging key if already exists
+
+ dict_a = {
+ 'host': '1.1.1.1'
+ 'ssh': {
+ 'login': 'user'
+ }
+ }
+
+ dict_b = {
+ 'host__overwrite__': False
+ 'host': '2.2.2.2'
+ 'ssh': {
+ 'login__overwrite__': False
+ 'login': 'new_user'
+ 'password': 'pass'
+ }
+ }
+
+ print(merge_nested_objects(dict_a, dict_b))
+ {
+ 'host': '1.1.1.1',
+ 'ssh': {
+ 'login': 'user',
+ 'password': 'pass'
+ }
+ }
+
+
+ """
+ # Merge two dicts
+ if isinstance(obj_1, dict) and isinstance(obj_2, dict):
+ result = {}
+ for key, value in obj_1.iteritems():
+ if key not in obj_2:
+ result[key] = value
+ else:
+ overwrite_key = key + '__overwrite__'
+ if overwrite_key in obj_2 and obj_2[overwrite_key] == True:
+ result[key] = obj_2[key]
+ elif overwrite_key in obj_2 and obj_2[overwrite_key] == False:
+ result[key] = value
+ else:
+ result[key] = merge_nested_objects(value, obj_2[key])
+ for key, value in obj_2.iteritems():
+ if key not in obj_1:
+ result[key] = value
+ return result
+
+ # Add two lists
+ if isinstance(obj_1, list) and isinstance(obj_2, list):
+ return obj_1 + obj_2
+
+ # Overwrite a value with new one
+ return obj_2
diff --git a/reclass_tools/walk_models.py b/reclass_tools/walk_models.py
index d57aef5..7017722 100644
--- a/reclass_tools/walk_models.py
+++ b/reclass_tools/walk_models.py
@@ -32,12 +32,12 @@
yield (log)
-def yaml_read(yaml_file):
- if os.path.isfile(yaml_file):
- with open(yaml_file, 'r') as f:
- return yaml.load(f)
- else:
- print("\'{}\' is not a file!".format(yaml_file))
+#def yaml_read(yaml_file):
+# if os.path.isfile(yaml_file):
+# with open(yaml_file, 'r') as f:
+# return yaml.load(f)
+# else:
+# print("\'{}\' is not a file!".format(yaml_file))
class OpenFile(object):
@@ -110,7 +110,7 @@
for path in paths:
for log in walkfiles(path, verbose):
if log.fname.endswith('.yml'):
- model = yaml_read(log.fname)
+ model = helpers.yaml_read(log.fname)
if model is not None:
# Collect all params from the models
_param = helpers.get_nested_key(model, ['parameters', '_param'])
@@ -140,7 +140,7 @@
for path in paths:
for fyml in walkfiles(path, verbose=verbose):
if fyml.fname.endswith('.yml'):
- model = yaml_read(fyml.fname)
+ model = helpers.yaml_read(fyml.fname)
if model is not None:
# Clear linux.network.interfaces
diff --git a/setup.cfg b/setup.cfg
index 985cf66..bad4709 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -32,3 +32,4 @@
reclass-inventory-list = reclass_tools.cli:inventory_list
reclass-vcp-list = reclass_tools.cli:vcp_list
reclass-create-inventory-context = reclass_tools.cli:create_inventory_context
+ reclass-render-dir = reclass_tools.cli:render_dir