| {% from 'cookied-bm-mcp-dvr-vxlan/underlay.yaml' import HOSTNAME_CFG01 with context %} |
| {% from 'cookied-bm-mcp-dvr-vxlan/underlay.yaml' import LAB_CONFIG_NAME with context %} |
| {% from 'cookied-bm-mcp-dvr-vxlan/underlay.yaml' import DOMAIN_NAME with context %} |
| {% from 'cookied-bm-mcp-dvr-vxlan/underlay.yaml' import HOSTNAME_KVM01 with context %} |
| {% from 'cookied-bm-mcp-dvr-vxlan/underlay.yaml' import HOSTNAME_KVM02 with context %} |
| {% from 'cookied-bm-mcp-dvr-vxlan/underlay.yaml' import HOSTNAME_KVM03 with context %} |
| |
| {% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.net/salt-models/mcp-virtual-lab') %} |
| # Other salt model repository parameters see in shared-salt.yaml |
| |
| # Name of the context file (without extension, that is fixed .yaml) used to render the Environment model |
| {% set ENVIRONMENT_MODEL_INVENTORY_NAME = os_env('ENVIRONMENT_MODEL_INVENTORY_NAME','cookied-bm-mcp-dvr-vxlan') %} |
| # Path to the context files used to render Cluster and Environment models |
| {%- set CLUSTER_CONTEXT_NAME = 'salt-context-cookiecutter-openstack_ovs_dvr_vxlan.yaml' %} |
| {%- set ENVIRONMENT_CONTEXT_NAMES = ['salt-context-vcp-environment.yaml', 'salt-context-lab03-environment.yaml'] %} |
| |
| {%- set CONTROL_VLAN = os_env('CONTROL_VLAN', '2416') %} |
| {%- set TENANT_VLAN = os_env('TENANT_VLAN', '2417') %} |
| |
| {% import 'shared-salt.yaml' as SHARED with context %} |
| |
| {{ SHARED.MACRO_INSTALL_SALT_MASTER() }} |
| |
| {{ SHARED.MACRO_GENERATE_COOKIECUTTER_MODEL(CONTROL_VLAN=CONTROL_VLAN, TENANT_VLAN=TENANT_VLAN) }} |
| |
| - description: Temporary WR for cinder backend defined by default in reclass.system |
| cmd: | |
| sed -i 's/backend\:\ {}//g' /srv/salt/reclass/classes/system/cinder/control/cluster.yml; |
| node_name: {{ HOSTNAME_CFG01 }} |
| retry: {count: 1, delay: 5} |
| skip_fail: false |
| |
| |
| {{ SHARED.MACRO_GENERATE_AND_ENABLE_ENVIRONMENT_MODEL() }} |
| |
| - description: "Workaround for rack01 compute generator" |
| cmd: | |
| set -e; |
| # Remove rack01 key |
| . /root/venv-reclass-tools/bin/activate; |
| reclass-tools del-key parameters.reclass.storage.node.openstack_compute_rack01 /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/config.yml; |
| |
| # Add openstack_compute_node definition from system |
| reclass-tools add-key 'classes' 'system.reclass.storage.system.openstack_compute_multi' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/config.yml --merge; |
| |
| # Set ipaddresses for our nodes |
| reclass-tools add-key parameters._param.openstack_compute_node01_control_address {{ SHARED.IPV4_NET_CONTROL_PREFIX }}.3 /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/config.yml; |
| reclass-tools add-key parameters._param.openstack_compute_node02_control_address {{ SHARED.IPV4_NET_CONTROL_PREFIX }}.31 /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/config.yml; |
| reclass-tools add-key parameters._param.openstack_compute_node01_tenant_address {{ SHARED.IPV4_NET_TENANT_PREFIX }}.3 /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/config.yml; |
| reclass-tools add-key parameters._param.openstack_compute_node02_tenant_address {{ SHARED.IPV4_NET_TENANT_PREFIX }}.31 /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/config.yml; |
| node_name: {{ HOSTNAME_CFG01 }} |
| retry: {count: 1, delay: 10} |
| skip_fail: false |
| |
| {{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='"linux" "reclass" "salt" "openssh" "ntp" "git" "nginx" "collectd" "sensu" "heka" "sphinx" "keystone" "mysql" "grafana" "haproxy" "rsyslog" "horizon" "prometheus" "telegraf" "elasticsearch" "powerdns" "glusterfs" "xtrabackup" "maas" "backupninja" "jenkins" "fluentd"') }} |
| |
| {{ SHARED.MACRO_INSTALL_SALT_MINIONS() }} |
| |
| {{ SHARED.MACRO_RUN_SALT_MASTER_UNDERLAY_STATES() }} |
| |
| {{ SHARED.ADJUST_SL_OPTS(OVERRIDES_FILENAME='/srv/salt/reclass/classes/environment/' + ENVIRONMENT_MODEL_INVENTORY_NAME + '/overrides.yml') }} |
| |
| {{ SHARED.MACRO_GENERATE_INVENTORY() }} |
| |
| {{ SHARED.MACRO_NETWORKING_WORKAROUNDS() }} |
| |
| {{ SHARED.MACRO_BOOTSTRAP_ALL_MINIONS() }} |
| |
| - description: "Lab03 workaround: Control network access from cfg01 node using sshuttle via kvm01" |
| cmd: | |
| set -e; |
| set -x; |
| KVM01_DEPLOY_ADDRESS=$(salt-call --out=newline_values_only pillar.get _param:infra_kvm_node01_deploy_address); |
| apt-get install -y sshuttle; |
| sshuttle -r ${KVM01_DEPLOY_ADDRESS} {{ SHARED.IPV4_NET_CONTROL }} -D >/dev/null; |
| node_name: {{ HOSTNAME_CFG01 }} |
| retry: {count: 1, delay: 5} |
| skip_fail: false |
| |
| - description: Temporary workaround for removing cinder-volume from CTL nodes |
| cmd: | |
| sed -i 's/\-\ system\.cinder\.volume\.single//g' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/control.yml; |
| sed -i 's/\-\ system\.cinder\.volume\.notification\.messagingv2//g' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/control.yml; |
| node_name: {{ HOSTNAME_CFG01 }} |
| retry: {count: 1, delay: 5} |
| skip_fail: true |
| |
| ######################################## |
| # Spin up Control Plane VMs on KVM nodes |
| ######################################## |
| |
| - description: Execute 'libvirt' states to create necessary libvirt networks |
| cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'kvm*' state.sls libvirt |
| node_name: {{ HOSTNAME_CFG01 }} |
| retry: {count: 2, delay: 10} |
| skip_fail: false |
| |
| - description: Create VMs for control plane |
| cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'kvm*' state.sls salt.control |
| node_name: {{ HOSTNAME_CFG01 }} |
| retry: {count: 3, delay: 10} |
| skip_fail: false |
| |
| - description: '*Workaround* for waiting the control-plane VMs in the salt-key (instead of sleep)' |
| cmd: | |
| salt-key -l acc| sort > /tmp/current_keys.txt && |
| salt 'kvm*' cmd.run 'virsh list --name' | grep -v 'kvm'|sort|xargs -I {} fgrep {} /tmp/current_keys.txt |
| node_name: {{ HOSTNAME_CFG01 }} |
| retry: {count: 20, delay: 30} |
| skip_fail: false |
| |
| ######################################### |
| # Configure all running salt minion nodes |
| ######################################### |
| |
| - description: Refresh pillars on all minions |
| cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' saltutil.refresh_pillar |
| node_name: {{ HOSTNAME_CFG01 }} |
| retry: {count: 1, delay: 5} |
| skip_fail: false |
| |
| - description: Sync all salt resources |
| cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' saltutil.sync_all && sleep 5 |
| node_name: {{ HOSTNAME_CFG01 }} |
| retry: {count: 1, delay: 5} |
| skip_fail: false |
| |
| - description: Show reclass-salt --top for generated nodes |
| cmd: reclass-salt --top -u /srv/salt/reclass/nodes/_generated/ |
| node_name: {{ HOSTNAME_CFG01 }} |
| retry: {count: 1, delay: 5} |
| skip_fail: false |
| |
| {{ SHARED.MACRO_BOOTSTRAP_ALL_MINIONS() }} |