blob: 1b97dc75fcb95109b66d46fad8ee618c064d3b90 [file] [log] [blame]
sgudz850e6072018-06-05 14:46:53 +03001{% from 'cookied-bm-contrail40/underlay.yaml' import HOSTNAME_CFG01 with context %}
2{% from 'cookied-bm-contrail40/underlay.yaml' import LAB_CONFIG_NAME with context %}
3{% from 'cookied-bm-contrail40/underlay.yaml' import DOMAIN_NAME with context %}
sgudz850e6072018-06-05 14:46:53 +03004
5{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.net/salt-models/mcp-virtual-lab') %}
6# Other salt model repository parameters see in shared-salt.yaml
7
8# Name of the context file (without extension, that is fixed .yaml) used to render the Environment model
9{% set ENVIRONMENT_MODEL_INVENTORY_NAME = os_env('ENVIRONMENT_MODEL_INVENTORY_NAME','physical-cookied-bm-contrail40') %}
10# Path to the context files used to render Cluster and Environment models
11{%- set CLUSTER_CONTEXT_NAME = 'salt-context-cookiecutter-contrail.yaml' %}
12{%- set ENVIRONMENT_CONTEXT_NAMES = ['salt-context-environment.yaml','lab04-physical-inventory.yaml'] %}
13{%- set CONTROL_VLAN = os_env('CONTROL_VLAN', '2422') %}
14{%- set TENANT_VLAN = os_env('TENANT_VLAN', '2423') %}
15
16
17{% import 'shared-salt.yaml' as SHARED with context %}
18
19{{ SHARED.MACRO_INSTALL_SALT_MASTER() }}
20
21{{ SHARED.MACRO_GENERATE_COOKIECUTTER_MODEL(CONTROL_VLAN=CONTROL_VLAN, TENANT_VLAN=TENANT_VLAN) }}
22
23{{ SHARED.MACRO_GENERATE_AND_ENABLE_ENVIRONMENT_MODEL() }}
24
25{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='"linux" "reclass" "salt" "openssh" "ntp" "git" "nginx" "collectd" "sensu" "heka" "sphinx" "keystone" "mysql" "auditd" "grafana" "haproxy" "rsyslog" "horizon" "prometheus" "telegraf" "elasticsearch" "powerdns" "glusterfs" "xtrabackup" "maas" "backupninja" "jenkins" "fluentd"') }}
26
27{{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
28
29{{ SHARED.MACRO_RUN_SALT_MASTER_UNDERLAY_STATES() }}
30
31- description: "Workaround for rack01 compute generator"
32 cmd: |
33 set -e;
34 # Remove rack01 key
35 . /root/venv-reclass-tools/bin/activate;
36 reclass-tools del-key parameters.reclass.storage.node.openstack_compute_rack01 /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/config.yml;
37 # Add openstack_compute_node definition from system
38 reclass-tools add-key 'classes' 'system.reclass.storage.system.openstack_compute_multi' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/config.yml --merge;
39 node_name: {{ HOSTNAME_CFG01 }}
40 retry: {count: 1, delay: 10}
41 skip_fail: false
42
sgudz1fae1002018-07-16 13:15:11 +030043- description: "WR for changing image to proposed"
44 cmd: |
45 set -e;
sgudz850e6072018-06-05 14:46:53 +030046 # Add message_queu host for opencontrail
sgudz1fae1002018-07-16 13:15:11 +030047 . /root/venv-reclass-tools/bin/activate;
48 reclass-tools add-key parameters._param.salt_control_xenial_image 'http://ci.mcp.mirantis.net:8085/images/ubuntu-16-04-x64-mcp{{ SHARED.REPOSITORY_SUITE }}.qcow2' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/init.yml;
49 node_name: {{ HOSTNAME_CFG01 }}
50 retry: {count: 1, delay: 10}
51 skip_fail: false
sgudz850e6072018-06-05 14:46:53 +030052
53- description: Temporary workaround for removing cinder-volume from CTL nodes
54 cmd: |
55 sed -i 's/\-\ system\.cinder\.volume\.single//g' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/control.yml;
56 sed -i 's/\-\ system\.cinder\.volume\.notification\.messagingv2//g' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/control.yml;
57 node_name: {{ HOSTNAME_CFG01 }}
58 retry: {count: 1, delay: 5}
59 skip_fail: true
60
61- description: Temporary WR for correct bridge name according to envoronment templates
62 cmd: |
63 sed -i 's/br\-ctl/br\_ctl/g' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/kvm.yml;
64 sed -i 's/br\-mgm/br\_mgm/g' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/kvm.yml;
65 node_name: {{ HOSTNAME_CFG01 }}
66 retry: {count: 1, delay: 10}
67 skip_fail: false
68
69{{ SHARED.MACRO_GENERATE_INVENTORY() }}
70
71{{ SHARED.MACRO_NETWORKING_WORKAROUNDS() }}
72
73- description: Update minion information
74 cmd: |
75 salt --hard-crash --state-output=mixed --state-verbose=False '*' saltutil.sync_grains &&
76 salt --hard-crash --state-output=mixed --state-verbose=False '*' mine.update &&
77 salt --hard-crash --state-output=mixed --state-verbose=False '*' saltutil.refresh_pillar && sleep 10
78 node_name: {{ HOSTNAME_CFG01 }}
79 retry: {count: 1, delay: 10}
80 skip_fail: false
81
82- description: Rerun openssh after env model is generated
83 cmd: |
84 salt-call state.sls openssh
85 node_name: {{ HOSTNAME_CFG01 }}
86 retry: {count: 1, delay: 10}
87 skip_fail: false
88
89- description: Execute linux.network.host one more time after salt.minion to apply dynamically registered hosts on the cluster nodes
90 cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@linux:system' state.sls linux.network.host
91 node_name: {{ HOSTNAME_CFG01 }}
92 retry: {count: 2, delay: 10}
93 skip_fail: false
94
95{{ SHARED.MACRO_BOOTSTRAP_ALL_MINIONS() }}
96
97########################################
98# Spin up Control Plane VMs on KVM nodes
99########################################
100
101- description: Execute 'libvirt' states to create necessary libvirt networks
102 cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'kvm*' state.sls libvirt
103 node_name: {{ HOSTNAME_CFG01 }}
104 retry: {count: 2, delay: 10}
105 skip_fail: false
106
107- description: Create VMs for control plane
108 cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'kvm*' state.sls salt.control
109 node_name: {{ HOSTNAME_CFG01 }}
110 retry: {count: 3, delay: 10}
111 skip_fail: false
112
113- description: '*Workaround* for waiting the control-plane VMs in the salt-key (instead of sleep)'
114 cmd: |
115 salt-key -l acc| sort > /tmp/current_keys.txt &&
116 salt 'kvm*' cmd.run 'virsh list --name' | grep -v 'kvm'|sort|xargs -I {} fgrep {} /tmp/current_keys.txt
117 node_name: {{ HOSTNAME_CFG01 }}
118 retry: {count: 20, delay: 30}
119 skip_fail: false
120
121#########################################
122# Configure all running salt minion nodes
123#########################################
124
125- description: Hack resolv.conf on VCP nodes for internal services access
126 cmd: |
sgudz1fae1002018-07-16 13:15:11 +0300127 salt --hard-crash --state-output=mixed --state-verbose=False -C '* and not cfg*' cmd.run "echo 'nameserver 172.18.208.44' > /etc/resolv.conf;"
sgudz850e6072018-06-05 14:46:53 +0300128 node_name: {{ HOSTNAME_CFG01 }}
129 retry: {count: 1, delay: 5}
130 skip_fail: false
131
132- description: Refresh pillars on all minions
133 cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' saltutil.refresh_pillar
134 node_name: {{ HOSTNAME_CFG01 }}
135 retry: {count: 1, delay: 5}
136 skip_fail: false
137
138- description: Sync all salt resources
139 cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' saltutil.sync_all && sleep 5
140 node_name: {{ HOSTNAME_CFG01 }}
141 retry: {count: 1, delay: 5}
142 skip_fail: false
143
144- description: Show reclass-salt --top for generated nodes
145 cmd: reclass-salt --top -u /srv/salt/reclass/nodes/_generated/
146 node_name: {{ HOSTNAME_CFG01 }}
147 retry: {count: 1, delay: 5}
148 skip_fail: false
149
150{{ SHARED.MACRO_BOOTSTRAP_ALL_MINIONS() }}
151
sgudz1fae1002018-07-16 13:15:11 +0300152{{SHARED.MACRO_CHECK_SALT_VERSION_SERVICES_ON_CFG()}}
153
154{{SHARED.MACRO_CHECK_SALT_VERSION_ON_NODES()}}
155
sgudz850e6072018-06-05 14:46:53 +0300156- description: "Lab04 workaround: Give each node root acces with key from cfg01"
157 cmd: |
158 set -e;
159 set -x;
160 key=$(ssh-keygen -y -f /root/.ssh/id_rsa);
161 salt '*' cmd.run "echo $key >> /root/.ssh/authorized_keys";
162 salt '*' cmd.run "service sshd restart"
163 node_name: {{ HOSTNAME_CFG01 }}
164 retry: {count: 1, delay: 5}
165 skip_fail: true
166
167- description: "Lab04 workaround: Control network access from cfg01 node using sshuttle via kvm01"
168 cmd: |
169 set -e;
170 set -x;
171 KVM01_DEPLOY_ADDRESS=$(salt-call --out=newline_values_only pillar.get _param:infra_kvm_node01_deploy_address);
172 apt-get install -y sshuttle;
173 sshuttle -r ${KVM01_DEPLOY_ADDRESS} 10.167.8.0/24 -D >/dev/null;
174 node_name: {{ HOSTNAME_CFG01 }}
175 retry: {count: 1, delay: 5}
176 skip_fail: true