| abaraniuk | 68c7f7c | 2018-08-06 16:06:29 +0300 | [diff] [blame^] | 1 | {% from 'cookied-bm-contrail40-nfv/underlay.yaml' import HOSTNAME_CFG01 with context %} | 
 | 2 | {% from 'cookied-bm-contrail40-nfv/underlay.yaml' import LAB_CONFIG_NAME with context %} | 
 | 3 | {% from 'cookied-bm-contrail40-nfv/underlay.yaml' import DOMAIN_NAME with context %} | 
 | 4 |  | 
 | 5 | {% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.net/salt-models/mcp-virtual-lab') %} | 
 | 6 | # Other salt model repository parameters see in shared-salt.yaml | 
 | 7 |  | 
 | 8 | # Name of the context file (without extension, that is fixed .yaml) used to render the Environment model | 
 | 9 | {% set ENVIRONMENT_MODEL_INVENTORY_NAME = os_env('ENVIRONMENT_MODEL_INVENTORY_NAME','physical-cookied-bm-contrail40-nfv') %} | 
 | 10 | # Path to the context files used to render Cluster and Environment models | 
 | 11 | {%- set CLUSTER_CONTEXT_NAME = 'salt-context-cookiecutter-contrail.yaml' %} | 
 | 12 | {%- set ENVIRONMENT_CONTEXT_NAMES = ['salt-context-environment.yaml','lab04-physical-inventory.yaml'] %} | 
 | 13 | {%- set CONTROL_VLAN = os_env('CONTROL_VLAN', '2422') %} | 
 | 14 | {%- set TENANT_VLAN = os_env('TENANT_VLAN', '2423') %} | 
 | 15 |  | 
 | 16 |  | 
 | 17 | {% import 'shared-salt.yaml' as SHARED with context %} | 
 | 18 |  | 
 | 19 | {{ SHARED.MACRO_INSTALL_SALT_MASTER() }} | 
 | 20 |  | 
 | 21 | {{ SHARED.MACRO_GENERATE_COOKIECUTTER_MODEL(CONTROL_VLAN=CONTROL_VLAN, TENANT_VLAN=TENANT_VLAN) }} | 
 | 22 |  | 
 | 23 | {{ SHARED.MACRO_GENERATE_AND_ENABLE_ENVIRONMENT_MODEL() }} | 
 | 24 |  | 
 | 25 | {{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='"linux" "reclass" "salt" "openssh" "ntp" "git" "nginx" "collectd" "sensu" "heka" "sphinx" "keystone" "mysql" "auditd" "grafana" "haproxy" "rsyslog" "horizon" "prometheus" "telegraf" "elasticsearch" "powerdns" "glusterfs" "xtrabackup" "maas" "backupninja" "jenkins" "fluentd" "logrotate"') }} | 
 | 26 |  | 
 | 27 | {{ SHARED.MACRO_INSTALL_SALT_MINIONS() }} | 
 | 28 |  | 
 | 29 | {{ SHARED.MACRO_RUN_SALT_MASTER_UNDERLAY_STATES() }} | 
 | 30 |  | 
 | 31 | - description: "Workaround for rack01 compute generator" | 
 | 32 |   cmd: | | 
 | 33 |     set -e; | 
 | 34 |     # Remove rack01 key | 
 | 35 |     . /root/venv-reclass-tools/bin/activate; | 
 | 36 |     reclass-tools del-key parameters.reclass.storage.node.openstack_compute_rack01 /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/config.yml; | 
 | 37 |     # Add openstack_compute_node definition from system | 
 | 38 |     reclass-tools add-key 'classes' 'system.reclass.storage.system.openstack_compute_multi' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/config.yml --merge; | 
 | 39 |   node_name: {{ HOSTNAME_CFG01 }} | 
 | 40 |   retry: {count: 1, delay: 10} | 
 | 41 |   skip_fail: false | 
 | 42 |  | 
 | 43 | - description: "WR for changing image to proposed" | 
 | 44 |   cmd: | | 
 | 45 |     set -e; | 
 | 46 |     # Add message_queu host for opencontrail | 
 | 47 |     . /root/venv-reclass-tools/bin/activate; | 
 | 48 |     reclass-tools add-key parameters._param.salt_control_xenial_image 'http://ci.mcp.mirantis.net:8085/images/ubuntu-16-04-x64-mcp{{ SHARED.REPOSITORY_SUITE }}.qcow2' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/init.yml; | 
 | 49 |   node_name: {{ HOSTNAME_CFG01 }} | 
 | 50 |   retry: {count: 1, delay: 10} | 
 | 51 |   skip_fail: false | 
 | 52 |  | 
 | 53 | - description: Temporary workaround for removing cinder-volume from CTL nodes | 
 | 54 |   cmd: | | 
 | 55 |     sed -i 's/\-\ system\.cinder\.volume\.single//g' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/control.yml; | 
 | 56 |     sed -i 's/\-\ system\.cinder\.volume\.notification\.messagingv2//g' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/control.yml; | 
 | 57 |   node_name: {{ HOSTNAME_CFG01 }} | 
 | 58 |   retry: {count: 1, delay: 5} | 
 | 59 |   skip_fail: true | 
 | 60 |  | 
 | 61 | - description: Temporary WR for correct bridge name according to envoronment templates | 
 | 62 |   cmd: | | 
 | 63 |     sed -i 's/br\-ctl/br\_ctl/g' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/kvm.yml; | 
 | 64 |     sed -i 's/br\-mgm/br\_mgm/g' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/kvm.yml; | 
 | 65 |   node_name: {{ HOSTNAME_CFG01 }} | 
 | 66 |   retry: {count: 1, delay: 10} | 
 | 67 |   skip_fail: false | 
 | 68 |  | 
 | 69 | {{ SHARED.MACRO_GENERATE_INVENTORY() }} | 
 | 70 |  | 
 | 71 | {{ SHARED.MACRO_NETWORKING_WORKAROUNDS() }} | 
 | 72 |  | 
 | 73 | - description: Update minion information | 
 | 74 |   cmd: | | 
 | 75 |     salt --hard-crash --state-output=mixed --state-verbose=False '*' saltutil.sync_grains && | 
 | 76 |     salt --hard-crash --state-output=mixed --state-verbose=False '*' mine.update && | 
 | 77 |     salt --hard-crash --state-output=mixed --state-verbose=False '*' saltutil.refresh_pillar && sleep 10 | 
 | 78 |   node_name: {{ HOSTNAME_CFG01 }} | 
 | 79 |   retry: {count: 1, delay: 10} | 
 | 80 |   skip_fail: false | 
 | 81 |  | 
 | 82 | - description: Rerun openssh after env model is generated | 
 | 83 |   cmd: | | 
 | 84 |     salt-call state.sls openssh | 
 | 85 |   node_name: {{ HOSTNAME_CFG01 }} | 
 | 86 |   retry: {count: 1, delay: 10} | 
 | 87 |   skip_fail: false | 
 | 88 |  | 
 | 89 | - description: "WR for dpdk pci to be in correct quotes" | 
 | 90 |   cmd: | | 
 | 91 |     set -e; | 
 | 92 |     . /root/venv-reclass-tools/bin/activate; | 
 | 93 |     reclass-tools add-key parameters._param.compute_vrouter_dpdk_pci '0000:05:00.0' /srv/salt/reclass/nodes/_generated/cmp001.{{ DOMAIN_NAME }}.yml; | 
 | 94 |     reclass-tools add-key parameters._param.compute_vrouter_dpdk_pci '0000:05:00.0' /srv/salt/reclass/nodes/_generated/cmp002.{{ DOMAIN_NAME }}.yml; | 
 | 95 |   node_name: {{ HOSTNAME_CFG01 }} | 
 | 96 |   retry: {count: 1, delay: 10} | 
 | 97 |   skip_fail: false | 
 | 98 |  | 
 | 99 | - description: Execute linux.network.host one more time after salt.minion to apply dynamically registered hosts on the cluster nodes | 
 | 100 |   cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@linux:system' state.sls linux.network.host | 
 | 101 |   node_name: {{ HOSTNAME_CFG01 }} | 
 | 102 |   retry: {count: 2, delay: 10} | 
 | 103 |   skip_fail: false | 
 | 104 |  | 
 | 105 | {{ SHARED.MACRO_BOOTSTRAP_ALL_MINIONS() }} | 
 | 106 |  | 
 | 107 | ######################################## | 
 | 108 | # Spin up Control Plane VMs on KVM nodes | 
 | 109 | ######################################## | 
 | 110 |  | 
 | 111 | - description: Execute 'libvirt' states to create necessary libvirt networks | 
 | 112 |   cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'kvm*' state.sls libvirt | 
 | 113 |   node_name: {{ HOSTNAME_CFG01 }} | 
 | 114 |   retry: {count: 2, delay: 10} | 
 | 115 |   skip_fail: false | 
 | 116 |  | 
 | 117 | - description: Create VMs for control plane | 
 | 118 |   cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'kvm*' state.sls salt.control | 
 | 119 |   node_name: {{ HOSTNAME_CFG01 }} | 
 | 120 |   retry: {count: 3, delay: 10} | 
 | 121 |   skip_fail: false | 
 | 122 |  | 
 | 123 | - description: '*Workaround* for waiting the control-plane VMs in the salt-key (instead of sleep)' | 
 | 124 |   cmd: | | 
 | 125 |     salt-key -l acc| sort > /tmp/current_keys.txt && | 
 | 126 |     salt 'kvm*' cmd.run 'virsh list --name' | grep -v 'kvm'|sort|xargs -I {} fgrep {} /tmp/current_keys.txt | 
 | 127 |   node_name: {{ HOSTNAME_CFG01 }} | 
 | 128 |   retry: {count: 20, delay: 30} | 
 | 129 |   skip_fail: false | 
 | 130 |  | 
 | 131 | ######################################### | 
 | 132 | # Configure all running salt minion nodes | 
 | 133 | ######################################### | 
 | 134 |  | 
 | 135 | - description: Hack resolv.conf on VCP nodes for internal services access | 
 | 136 |   cmd: | | 
 | 137 |     salt --hard-crash --state-output=mixed --state-verbose=False -C '* and not cfg*' cmd.run "echo 'nameserver 172.18.208.44' > /etc/resolv.conf;" | 
 | 138 |   node_name: {{ HOSTNAME_CFG01 }} | 
 | 139 |   retry: {count: 1, delay: 5} | 
 | 140 |   skip_fail: false | 
 | 141 |  | 
 | 142 | - description: Refresh pillars on all minions | 
 | 143 |   cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' saltutil.refresh_pillar | 
 | 144 |   node_name: {{ HOSTNAME_CFG01 }} | 
 | 145 |   retry: {count: 1, delay: 5} | 
 | 146 |   skip_fail: false | 
 | 147 |  | 
 | 148 | - description: Sync all salt resources | 
 | 149 |   cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' saltutil.sync_all && sleep 5 | 
 | 150 |   node_name: {{ HOSTNAME_CFG01 }} | 
 | 151 |   retry: {count: 1, delay: 5} | 
 | 152 |   skip_fail: false | 
 | 153 |  | 
 | 154 | - description: Show  reclass-salt --top for generated nodes | 
 | 155 |   cmd: reclass-salt --top -u /srv/salt/reclass/nodes/_generated/ | 
 | 156 |   node_name: {{ HOSTNAME_CFG01 }} | 
 | 157 |   retry: {count: 1, delay: 5} | 
 | 158 |   skip_fail: false | 
 | 159 |  | 
 | 160 | {{ SHARED.MACRO_BOOTSTRAP_ALL_MINIONS() }} | 
 | 161 |  | 
 | 162 | {{SHARED.MACRO_CHECK_SALT_VERSION_SERVICES_ON_CFG()}} | 
 | 163 |  | 
 | 164 | {{SHARED.MACRO_CHECK_SALT_VERSION_ON_NODES()}} | 
 | 165 |  | 
 | 166 | - description: "Lab04 workaround: Give each node root acces with key from cfg01" | 
 | 167 |   cmd: | | 
 | 168 |     set -e; | 
 | 169 |     set -x; | 
 | 170 |     key=$(ssh-keygen -y -f /root/.ssh/id_rsa); | 
 | 171 |     salt '*' cmd.run "echo $key >> /root/.ssh/authorized_keys"; | 
 | 172 |     salt '*' cmd.run "service sshd restart" | 
 | 173 |   node_name: {{ HOSTNAME_CFG01 }} | 
 | 174 |   retry: {count: 1, delay: 5} | 
 | 175 |   skip_fail: true | 
 | 176 |  | 
 | 177 | - description: "Lab04 workaround: Control network access from cfg01 node using sshuttle via kvm01" | 
 | 178 |   cmd: | | 
 | 179 |     set -e; | 
 | 180 |     set -x; | 
 | 181 |     KVM01_DEPLOY_ADDRESS=$(salt-call --out=newline_values_only pillar.get _param:infra_kvm_node01_deploy_address); | 
 | 182 |     apt-get install -y sshuttle; | 
 | 183 |     sshuttle -r ${KVM01_DEPLOY_ADDRESS} 10.167.8.0/24 -D >/dev/null; | 
 | 184 |   node_name: {{ HOSTNAME_CFG01 }} | 
 | 185 |   retry: {count: 1, delay: 5} | 
 | 186 |   skip_fail: true |