blob: 3b1a71636b7b25b8d6bf50078574fbfa0899dacd [file] [log] [blame]
{# Collection of common macroses shared across different deployments #}
{%- macro MACRO_INSTALL_SMTP_ALERTS_RECEIVER() %}
- description: Install exim4 on {{ HOSTNAME_CFG01 }}
cmd: apt -y install exim4-daemon-light
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 1}
skip_fail: false
- description: Configure exim4 to receive emails to the ~/Maildir for local users
cmd: |
sed -i "s/dc_other_hostnames=.*/dc_other_hostnames='*'/g" /etc/exim4/update-exim4.conf.conf
sed -i "s/dc_local_interfaces=.*/dc_local_interfaces=''/g" /etc/exim4/update-exim4.conf.conf
sed -i "s/dc_localdelivery=.*/dc_localdelivery='maildir_home'/g" /etc/exim4/update-exim4.conf.conf
update-exim4.conf
service exim4 restart
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 1}
skip_fail: false
- description: Create a special user to receive alerts
cmd: useradd _alertsreceiver -m
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 1}
skip_fail: false
- description: Modify SL pillars to send alerts to SMTP server on {{ HOSTNAME_CFG01 }}
cmd: |
# Remove existing definitions for email alerting
export REPLACE_DIR="/srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}"
find ${REPLACE_DIR} -type f -exec sed -i '/pushkin_smtp_host:.*/d' {} +
find ${REPLACE_DIR} -type f -exec sed -i '/pushkin_smtp_port:.*/d' {} +
find ${REPLACE_DIR} -type f -exec sed -i '/pushkin_email_sender_password:.*/d' {} +
find ${REPLACE_DIR} -type f -exec sed -i '/webhook_from:.*/d' {} +
find ${REPLACE_DIR} -type f -exec sed -i '/webhook_recipients:.*/d' {} +
salt-call reclass.cluster_meta_set name='pushkin_smtp_host' value='${_param:salt_master_host}' file_name=${REPLACE_DIR}/stacklight/init.yml
salt-call reclass.cluster_meta_set name='pushkin_smtp_port' value='25' file_name=${REPLACE_DIR}/stacklight/init.yml
salt-call reclass.cluster_meta_set name='pushkin_email_sender_password' value='""' file_name=${REPLACE_DIR}/stacklight/init.yml
salt-call reclass.cluster_meta_set name='webhook_from' value='monitoring@{{ LAB_CONFIG_NAME }}.local' file_name=${REPLACE_DIR}/stacklight/init.yml
salt-call reclass.cluster_meta_set name='webhook_recipients' value='alerts@integration.ci.local' file_name=${REPLACE_DIR}/stacklight/init.yml
salt '*' saltutil.refresh_pillar
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 1}
skip_fail: false
{%- endmacro %}
{%- macro MACRO_INSTALL_KEEPALIVED() %}
- description: Install keepalived on ctl01
cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-C 'I@keepalived:cluster and *01*' state.sls keepalived
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: true
- description: Install keepalived
cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-C 'I@keepalived:cluster' state.sls keepalived
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: true
{%- endmacro %}
{%- macro MACRO_INSTALL_GLUSTERFS() %}
- description: Install glusterfs
cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-C 'I@glusterfs:server' state.sls glusterfs.server.service && sleep 20
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 5}
skip_fail: false
- description: Setup glusterfs on primary controller
cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-C 'I@glusterfs:server:role:primary' state.sls glusterfs.server.setup -b 1
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 5, delay: 5}
skip_fail: false
- description: Setup glusterfs on other nodes
cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-C 'I@glusterfs:server' state.sls glusterfs
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 5, delay: 5}
skip_fail: false
- description: Check the gluster status
cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-C 'I@glusterfs:server' cmd.run 'gluster peer status && gluster volume status' -b 1
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 5}
skip_fail: false
- description: Refresh pillar before glusterfs client
cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-C 'I@glusterfs:client' saltutil.refresh_pillar
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 5}
skip_fail: false
- description: Setup glusterfs client
cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-C 'I@glusterfs:client' state.sls glusterfs.client
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 15}
skip_fail: false
{%- endmacro %}
{%- macro MACRO_INSTALL_RABBITMQ() %}
- description: Install RabbitMQ on ctl01
cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-C 'I@rabbitmq:server and *01*' state.sls rabbitmq
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 5}
skip_fail: false
- description: Install RabbitMQ
cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-C 'I@rabbitmq:server' state.sls rabbitmq
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 5}
skip_fail: false
- description: Check the rabbitmq status
cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-C 'I@rabbitmq:server' cmd.run 'rabbitmqctl cluster_status'
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 5}
skip_fail: false
{%- endmacro %}
{%- macro MACRO_INSTALL_GALERA() %}
- description: Install Galera on first server
cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-C 'I@galera:master' state.sls galera
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 5}
skip_fail: false
- description: Install Galera on other servers
cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-C 'I@galera:slave' state.sls galera -b 1
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 5}
skip_fail: false
- description: Check mysql status
cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-C 'I@galera:*' mysql.status | grep -A1 -e "wsrep_incoming_addresses\|wsrep_cluster_size"
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 5}
skip_fail: true
{%- endmacro %}
{%- macro MACRO_INSTALL_HAPROXY() %}
- description: Install haproxy
cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-C 'I@haproxy:proxy' state.sls haproxy
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 5}
skip_fail: false
- description: Check haproxy status
cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-C 'I@haproxy:proxy' service.status haproxy
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 5}
skip_fail: false
- description: Restart rsyslog
cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-C 'I@haproxy:proxy' service.restart rsyslog
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 5}
skip_fail: false
{%- endmacro %}
{%- macro MACRO_INSTALL_NGINX() %}
- description: Update certificate files on nginx nodes
cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-C 'I@nginx:server' state.sls salt.minion.cert
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 3, delay: 15}
skip_fail: false
- description: Install nginx server
cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-C 'I@nginx:server' state.sls nginx
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 5}
skip_fail: false
{%- endmacro %}
{%- macro MACRO_INSTALL_MEMCACHED() %}
- description: Install memcached on all controllers
cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-C 'I@memcached:server' state.sls memcached
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 5}
skip_fail: false
{%- endmacro %}
{%- macro MACRO_CHECK_VIP() %}
- description: Check the VIP
cmd: |
OPENSTACK_CONTROL_ADDRESS=`salt-call --out=newline_values_only pillar.get _param:openstack_control_address`;
echo "_param:openstack_control_address (vip): ${OPENSTACK_CONTROL_ADDRESS}";
salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@keepalived:cluster' cmd.run "ip a | grep ${OPENSTACK_CONTROL_ADDRESS}" | grep -B1 ${OPENSTACK_CONTROL_ADDRESS}
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 3, delay: 10}
skip_fail: false
{%- endmacro %}