blob: 24ee31ff5a4ca47da7e3997e57dc4418d860cbeb [file] [log] [blame]
{% from 'cookied-bm-mcp-ocata-contrail/underlay.yaml' import HOSTNAME_CFG01 with context %}
{% from 'cookied-bm-mcp-ocata-contrail/underlay.yaml' import LAB_CONFIG_NAME with context %}
{% from 'cookied-bm-mcp-ocata-contrail/underlay.yaml' import DOMAIN_NAME with context %}
{% from 'cookied-bm-mcp-ocata-contrail/underlay.yaml' import CUSTOM_VCP_TRUSTY_IMAGE_URL with context %}
{% from 'cookied-bm-mcp-ocata-contrail/underlay.yaml' import CUSTOM_VCP_XENIAL_IMAGE_URL with context %}
{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.com/salt-models/mcp-virtual-lab') %}
# Other salt model repository parameters see in shared-salt.yaml
# Name of the context file (without extension, that is fixed .yaml) used to render the Environment model
{% set ENVIRONMENT_MODEL_INVENTORY_NAME = os_env('ENVIRONMENT_MODEL_INVENTORY_NAME','physical-cookied-bm-mcp-ocata-contrail') %}
# Path to the context files used to render Cluster and Environment models
{%- set CLUSTER_CONTEXT_NAME = 'salt-context-cookiecutter-contrail.yaml' %}
{%- set ENVIRONMENT_CONTEXT_NAMES = ['salt-context-environment.yaml','lab04-physical-inventory.yaml'] %}
{%- set CONTROL_VLAN = os_env('CONTROL_VLAN', '2422') %}
{%- set TENANT_VLAN = os_env('TENANT_VLAN', '2423') %}
{% import 'shared-salt.yaml' as SHARED with context %}
{{ SHARED.MACRO_INSTALL_SALT_MASTER() }}
{{ SHARED.MACRO_GENERATE_COOKIECUTTER_MODEL(CONTROL_VLAN=CONTROL_VLAN, TENANT_VLAN=TENANT_VLAN) }}
{{ SHARED.MACRO_GENERATE_AND_ENABLE_ENVIRONMENT_MODEL() }}
{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='\*') }}
{{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
{{ SHARED.MACRO_RUN_SALT_MASTER_UNDERLAY_STATES() }}
- description: "Workaround for rack01 compute generator"
cmd: |
set -e;
# Remove rack01 key
. /root/venv-reclass-tools/bin/activate;
reclass-tools del-key parameters.reclass.storage.node.openstack_compute_rack01 /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/config/init.yml;
# Add openstack_compute_node definition from system
reclass-tools add-key 'classes' 'system.reclass.storage.system.openstack_compute_multi' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/config/init.yml --merge;
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
- description: "Change path to internal storage for salt.control images"
cmd: |
set -e;
. /root/venv-reclass-tools/bin/activate;
reclass-tools add-key parameters._param.salt_control_xenial_image 'http://images.mcp.mirantis.net/ubuntu-16-04-x64-mcp{{ SHARED.REPOSITORY_SUITE }}.qcow2' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/init.yml;
reclass-tools add-key parameters._param.salt_control_trusty_image 'http://images.mcp.mirantis.net/ubuntu-14-04-x64-mcp{{ SHARED.REPOSITORY_SUITE }}.qcow2' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/init.yml;
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
{%- if CUSTOM_VCP_TRUSTY_IMAGE_URL != '' %}
- description: "Change trusty image to custom"
cmd: |
echo "CUSTOM_TRUSTY_IMAGE is {{ CUSTOM_VCP_TRUSTY_IMAGE_URL }}";
. /root/venv-reclass-tools/bin/activate;
reclass-tools add-key parameters._param.salt_control_trusty_image "{{ CUSTOM_VCP_TRUSTY_IMAGE_URL }}" /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/init.yml;
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
{%- endif %}
{%- if CUSTOM_VCP_XENIAL_IMAGE_URL != '' %}
- description: "Change xenial image to custom"
cmd: |
echo "CUSTOM_XENIAL_IMAGE is {{ CUSTOM_VCP_XENIAL_IMAGE_URL }}";
. /root/venv-reclass-tools/bin/activate;
reclass-tools add-key parameters._param.salt_control_xenial_image "{{ CUSTOM_VCP_XENIAL_IMAGE_URL }}" /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/init.yml;
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
{%- endif %}
- description: Temporary workaround for removing cinder-volume from CTL nodes
cmd: |
sed -i 's/\-\ system\.cinder\.volume\.single//g' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/control.yml;
sed -i 's/\-\ system\.cinder\.volume\.notification\.messagingv2//g' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/control.yml;
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 5}
skip_fail: true
- description: Temporary WR for correct bridge name according to envoronment templates
cmd: |
sed -i 's/br\-ctl/br\_ctl/g' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/kvm.yml;
sed -i 's/br\-mgm/br\_mgm/g' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/kvm.yml;
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
{{ SHARED.MACRO_GENERATE_INVENTORY() }}
{{ SHARED.MACRO_NETWORKING_WORKAROUNDS() }}
- description: Update minion information
cmd: |
salt --hard-crash --state-output=mixed --state-verbose=False '*' saltutil.sync_grains &&
salt --hard-crash --state-output=mixed --state-verbose=False '*' mine.update &&
salt --hard-crash --state-output=mixed --state-verbose=False '*' saltutil.refresh_pillar && sleep 10
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
- description: Rerun openssh after env model is generated
cmd: |
salt-call state.sls openssh
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
- description: Execute linux.network.host one more time after salt.minion to apply dynamically registered hosts on the cluster nodes
cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@linux:system' state.sls linux.network.host
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 2, delay: 10}
skip_fail: false
{{ SHARED.MACRO_BOOTSTRAP_ALL_MINIONS() }}
########################################
# Spin up Control Plane VMs on KVM nodes
########################################
- description: Execute 'libvirt' states to create necessary libvirt networks
cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'kvm*' state.sls libvirt
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 2, delay: 10}
skip_fail: false
- description: Create VMs for control plane
cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'kvm*' state.sls salt.control
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 3, delay: 10}
skip_fail: false
- description: '*Workaround* for waiting the control-plane VMs in the salt-key (instead of sleep)'
cmd: |
salt-key -l acc| sort > /tmp/current_keys.txt &&
salt 'kvm*' cmd.run 'virsh list --name' | grep -v 'kvm'|sort|xargs -I {} fgrep {} /tmp/current_keys.txt
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 20, delay: 30}
skip_fail: false
#########################################
# Configure all running salt minion nodes
#########################################
- description: Hack resolv.conf on VCP nodes for internal services access
cmd: |
salt --hard-crash --state-output=mixed --state-verbose=False -C '* and not cfg*' cmd.run "echo 'nameserver 172.18.208.44' > /etc/resolv.conf;"
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 5}
skip_fail: false
- description: Refresh pillars on all minions
cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' saltutil.refresh_pillar
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 5}
skip_fail: false
- description: Sync all salt resources
cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' saltutil.sync_all && sleep 5
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 5}
skip_fail: false
- description: Show reclass-salt --top for generated nodes
cmd: reclass-salt --top -u /srv/salt/reclass/nodes/_generated/
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 5}
skip_fail: false
{{ SHARED.MACRO_BOOTSTRAP_ALL_MINIONS() }}
{{SHARED.MACRO_CHECK_SALT_VERSION_SERVICES_ON_CFG()}}
{{SHARED.MACRO_CHECK_SALT_VERSION_ON_NODES()}}
- description: "Lab04 workaround: Give each node root acces with key from cfg01"
cmd: |
set -e;
set -x;
key=$(ssh-keygen -y -f /root/.ssh/id_rsa);
salt '*' cmd.run "echo $key >> /root/.ssh/authorized_keys";
salt '*' cmd.run "service sshd restart"
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 5}
skip_fail: true
- description: "Lab04 workaround: Control network access from cfg01 node using sshuttle via kvm01"
cmd: |
set -e;
set -x;
KVM01_DEPLOY_ADDRESS=$(salt-call --out=newline_values_only pillar.get _param:infra_kvm_node01_deploy_address);
apt-get install -y sshuttle;
sshuttle -r ${KVM01_DEPLOY_ADDRESS} 10.167.8.0/24 -D >/dev/null;
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 5}
skip_fail: true