blob: cc38df3c8a1c470cb993b027a17cdbc78cecf531 [file] [log] [blame]
{# Collection of common macroses shared across different deployments #}
{% set SALT_MODELS_BRANCH = os_env('SALT_MODELS_BRANCH','master') %}
{% set JENKINS_PIPELINE_BRANCH = os_env('JENKINS_PIPELINE_BRANCH','') %}
{% set SALT_MODELS_COMMIT = os_env('SALT_MODELS_COMMIT','master') %}
{# Reference to a patch that should be applied to the model if required, for example: export SALT_MODELS_REF_CHANGE=refs/changes/19/7219/12 #}
{% set SALT_MODELS_REF_CHANGE = os_env('SALT_MODELS_REF_CHANGE', '') %}
{# Pin to a specified commit in salt-models/reclass-system #}
{% set SALT_MODELS_SYSTEM_REPOSITORY = os_env('SALT_MODELS_SYSTEM_REPOSITORY','https://gerrit.mcp.mirantis.com/salt-models/reclass-system') %}
{% set SALT_MODELS_SYSTEM_COMMIT = os_env('SALT_MODELS_SYSTEM_COMMIT','') %}
{% set SALT_MODELS_SYSTEM_REF_CHANGE = os_env('SALT_MODELS_SYSTEM_REF_CHANGE','') %}
{% set SALT_MODELS_SYSTEM_TAG = os_env('SALT_MODELS_SYSTEM_TAG','') %}
{% set COOKIECUTTER_TEMPLATES_REPOSITORY_USER = os_env('COOKIECUTTER_TEMPLATES_REPOSITORY_USER','mcp-gerrit') %}
{% set COOKIECUTTER_TEMPLATES_REPOSITORY_KEY_PATH = os_env('COOKIECUTTER_TEMPLATES_REPOSITORY_KEY_PATH','') %}
{% set COOKIECUTTER_TEMPLATES_REPOSITORY = os_env('COOKIECUTTER_TEMPLATES_REPOSITORY','ssh://' + COOKIECUTTER_TEMPLATES_REPOSITORY_USER +'@gerrit.mcp.mirantis.com:29418/mk/cookiecutter-templates') %}
{% set COOKIECUTTER_REF_CHANGE = os_env('COOKIECUTTER_REF_CHANGE','') %}
{% set COOKIECUTTER_TAG = os_env('COOKIECUTTER_TAG','') %}
{% set COOKIECUTTER_TEMPLATE_COMMIT = os_env('COOKIECUTTER_TEMPLATE_COMMIT','') %}
{% set ENVIRONMENT_TEMPLATE_COMMIT = os_env('ENVIRONMENT_TEMPLATE_COMMIT','') %}
{% set ENVIRONMENT_TEMPLATE_REF_CHANGE = os_env('ENVIRONMENT_TEMPLATE_REF_CHANGE','') %}
# Currently we support 2 salt version that can be set over bellow var
{% set SALT_VERSION = os_env('SALT_VERSION','2017.7') %}
{% set REPOSITORY_SUITE = os_env('REPOSITORY_SUITE', 'testing') %}
{% set UPDATE_REPO_CUSTOM_TAG = os_env('UPDATE_REPO_CUSTOM_TAG', '') %}
{% set UPDATE_VERSION = os_env('UPDATE_VERSION', 'proposed') %}
{# set FORMULA_REPOSITORY = os_env('FORMULA_REPOSITORY', 'deb [arch=amd64] http://apt.mirantis.com/${DISTRIB_CODENAME} ' + REPOSITORY_SUITE + ' salt extra') #}
{% set FORMULA_REPOSITORY = os_env('FORMULA_REPOSITORY', "deb [arch=amd64] http://mirror.mirantis.com/" + REPOSITORY_SUITE + "/salt-formulas"+"/${DISTRIB_CODENAME} ${DISTRIB_CODENAME} main") %}
{% set UPDATE_FORMULA_REPOSITORY = os_env('UPDATE_FORMULA_REPOSITORY', "deb [arch=amd64] http://mirror.mirantis.com/update/" + UPDATE_VERSION + "/salt-formulas"+"/${DISTRIB_CODENAME} ${DISTRIB_CODENAME} main") %}
{# set FORMULA_GPG = os_env('FORMULA_GPG', 'http://apt.mirantis.com/public.gpg') #}
{% set FORMULA_GPG = os_env('FORMULA_GPG', "http://mirror.mirantis.com/" + REPOSITORY_SUITE + "/salt-formulas/xenial/archive-salt-formulas.key") %}
{# set SALT_REPOSITORY = os_env('SALT_REPOSITORY', "deb [arch=amd64] http://apt.mirantis.com/${DISTRIB_CODENAME}/salt/2016.3 " + REPOSITORY_SUITE + " main") #}
# Note repo is changed so new one looks like defined bellow
{% set SALT_REPOSITORY = os_env('SALT_REPOSITORY', "deb [arch=amd64] http://mirror.mirantis.com/" + REPOSITORY_SUITE+ "/saltstack-" + SALT_VERSION+ "/${DISTRIB_CODENAME} ${DISTRIB_CODENAME} main") %}
{# set SALT_GPG = os_env('SALT_GPG', 'http://apt.mirantis.com/public.gpg') #}
{% set SALT_GPG = os_env('SALT_GPG', "http://mirror.mirantis.com/" + REPOSITORY_SUITE + "/salt-formulas/xenial/archive-salt-formulas.key") %}
{% set UBUNTU_REPOSITORY = os_env('UBUNTU_REPOSITORY', "deb [arch=amd64] http://mirror.mirantis.com/" + REPOSITORY_SUITE + "/ubuntu/ ${DISTRIB_CODENAME} main restricted universe") %}
{% set UBUNTU_UPDATES_REPOSITORY = os_env('UBUNTU_UPDATES_REPOSITORY', "deb [arch=amd64] http://mirror.mirantis.com/" + REPOSITORY_SUITE + "/ubuntu/ ${DISTRIB_CODENAME}-updates main restricted universe") %}
{% set UBUNTU_SECURITY_REPOSITORY = os_env('UBUNTU_SECURITY_REPOSITORY', "deb [arch=amd64] http://mirror.mirantis.com/" + REPOSITORY_SUITE + "/ubuntu/ ${DISTRIB_CODENAME}-security main restricted universe") %}
{% set UBUNTU_KEY_SERVER = os_env('UBUNTU_KEY_SERVER', 'keyserver.ubuntu.com') %}
{% set UBUNTU_KEY_ID = os_env('UBUNTU_KEY_ID', '0E08A149DE57BFBE') %}
{# Address pools for reclass cluster model are taken in the following order:
# 1. environment variables,
# 2. config.underlay.address_pools based on fuel-devops address pools
# (see generated '.ini' file after underlay is created),
# 3. defaults #}
{% set address_pools = config.underlay.address_pools %}
{% set IPV4_NET_ADMIN = os_env('IPV4_NET_ADMIN', address_pools.get('admin-pool01', '192.168.10.0/24')) %}
{% set IPV4_NET_CONTROL = os_env('IPV4_NET_CONTROL', address_pools.get('private-pool01', '172.16.10.0/24')) %}
{% set IPV4_NET_TENANT = os_env('IPV4_NET_TENANT', address_pools.get('tenant-pool01', '10.1.0.0/24')) %}
{% set IPV4_NET_EXTERNAL = os_env('IPV4_NET_EXTERNAL', address_pools.get('external-pool01', '10.16.0.0/24')) %}
{% set IPV4_NET_ADMIN_PREFIX = '.'.join(IPV4_NET_ADMIN.split('.')[0:3]) %}
{% set IPV4_NET_CONTROL_PREFIX = '.'.join(IPV4_NET_CONTROL.split('.')[0:3]) %}
{% set IPV4_NET_TENANT_PREFIX = '.'.join(IPV4_NET_TENANT.split('.')[0:3]) %}
{% set IPV4_NET_EXTERNAL_PREFIX = '.'.join(IPV4_NET_EXTERNAL.split('.')[0:3]) %}
{# Format for formula replacement:
# space separated string:
# export SALT_FORMULAS_REFS='apache:refs/changes/xxxx kubernetes:refs/changes/xxxx' #}
{% set SALT_FORMULAS_REFS = os_env('SALT_FORMULAS_REFS', '') %}
{% set TEMPEST_PATTERN = os_env('TEMPEST_PATTERN', 'tempest') %}
{% set EXCLUDE_TEST_ARGS = os_env('EXCLUDE_TEST_ARGS', '') %}
{% set SALT_FORMULAS_REPO = os_env('SALT_FORMULAS_REPO', 'https://gerrit.mcp.mirantis.com/salt-formulas') %}
# Needed for using different models in different templates
{% set CLUSTER_NAME = os_env('CLUSTER_NAME', LAB_CONFIG_NAME) %}
# Ubuntu-postEOL updates by cloudlinux
{% set UPDATES_MIRANTIS_VERSION = os_env('UPDATES_MIRANTIS_VERSION', '') %}
{%- macro MACRO_INSTALL_PACKAGES_ON_NODES(NODE_NAME) %}
{#####################################################}
- description: 'Configure key on nodes and install packages'
cmd: |
rm -rf trusted* ;
rm -rf /etc/apt/sources.list ;
. /etc/lsb-release; # Get DISTRIB_CODENAME variable
echo "{{ FORMULA_REPOSITORY }}" > /etc/apt/sources.list.d/mcp_salt.list;
wget -O - "{{ FORMULA_GPG }}" | apt-key add -;
echo "{{ SALT_REPOSITORY }}" > /etc/apt/sources.list.d/mcp_saltstack.list;
apt-key adv --keyserver "{{UBUNTU_KEY_SERVER}}" --recv-keys "{{ UBUNTU_KEY_ID}}"
echo "{{ UBUNTU_REPOSITORY }}" > /etc/apt/sources.list.d/ubuntu.list
echo "{{ UBUNTU_UPDATES_REPOSITORY }}" > /etc/apt/sources.list.d/ubuntu_updates.list
echo "{{ UBUNTU_SECURITY_REPOSITORY }}" > /etc/apt/sources.list.d/ubuntu_security.list
eatmydata apt-get clean;
apt-get update;
sync;
node_name: {{ NODE_NAME }}
retry: {count: 1, delay: 5}
skip_fail: false
{%- endmacro %}
{%- macro MACRO_INSTALL_FORMULAS_FROM_UPDATE() %}
{#####################################################}
- description: 'Configure key on nodes and install packages'
cmd: |
rm -rf trusted* ;
rm -rf /etc/apt/sources.list ;
. /etc/lsb-release; # Get DISTRIB_CODENAME variable
echo "{{ UPDATE_FORMULA_REPOSITORY }}" > /etc/apt/sources.list.d/mcp_update_salt.list;
wget -O - "{{ FORMULA_GPG }}" | apt-key add -;
eatmydata apt-get clean;
apt-get update;
sync;
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 5}
skip_fail: false
{%- endmacro %}
{%- macro MACRO_INSTALL_SALT_MASTER() %}
{######################################}
- description: Installing salt master on cfg01
cmd: |
which wget >/dev/null || (apt-get update; apt-get install -y wget);
# Configure ubuntu and salt repositories
. /etc/lsb-release; # Get DISTRIB_CODENAME variable
echo "{{ UBUNTU_REPOSITORY }}" > /etc/apt/sources.list
echo "{{ UBUNTU_UPDATES_REPOSITORY }}" >> /etc/apt/sources.list
echo "{{ UBUNTU_SECURITY_REPOSITORY }}" >> /etc/apt/sources.list
echo "{{ FORMULA_REPOSITORY }}" > /etc/apt/sources.list.d/mcp_salt.list;
wget -O - {{ FORMULA_GPG }} | apt-key add -;
echo "{{ SALT_REPOSITORY }}" > /etc/apt/sources.list.d/mcp_saltstack.list;
apt-key adv --keyserver "{{UBUNTU_KEY_SERVER}}" --recv-keys "{{ UBUNTU_KEY_ID}}"
apt-get clean
apt-get update
# Install salt-master and reclass
eatmydata apt-get install -y --allow-unauthenticated reclass salt-master
# Install common packages
eatmydata apt-get install -y python-pip git curl at tmux byobu iputils-ping traceroute htop tree mc
sync;
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 1}
skip_fail: false
- description: Remove any existing minion keys
cmd: salt-key -y -D || true
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 1}
skip_fail: false
- description: Configure salt-master on cfg01
cmd: |
cat << 'EOF' > /etc/salt/master.d/master.conf
file_roots:
base:
- /usr/share/salt-formulas/env
pillar_opts: False
open_mode: True
reclass: &reclass
storage_type: yaml_fs
inventory_base_uri: /srv/salt/reclass
ext_pillar:
- reclass: *reclass
master_tops:
reclass: *reclass
EOF
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 1}
skip_fail: false
- description: Configure GIT settings and certificates
cmd: |
set -e;
#touch /root/.git_trusted_certs.pem;
#for server in github.com; do \
# openssl s_client -showcerts -connect $server:443 </dev/null \
# | openssl x509 -outform PEM \
# >> /root/.git_trusted_certs.pem;
#done;
#HOME=/root git config --global http.sslCAInfo /root/.git_trusted_certs.pem;
HOME=/root git config --global user.email "mcp-integration-qa@example.com";
HOME=/root git config --global user.name "MCP Integration QA";
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 1}
skip_fail: false
{%- endmacro %}
{%- macro MACRO_CONFIG_DAY01_SALT_MASTER() %}
{######################################}
- description: Remove /etc/update-motd.d/52-info
cmd: rm -vf /etc/update-motd.d/52-info
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
- description: Set up static interface config
cmd: |
kill $(pidof /sbin/dhclient) || /bin/true
cat << 'EOF' > /etc/network/interfaces
# This file describes the network interfaces available on your system
# and how to activate them. For more information, see interfaces(5).
# The loopback network interface
auto lo
iface lo inet loopback
# The primary network interface
auto ens3
iface ens3 inet static
address {{ IPV4_NET_ADMIN_PREFIX }}.90
netmask 255.255.255.0
gateway {{ IPV4_NET_ADMIN_PREFIX }}.1
EOF
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
- description: Install common packages on cfg01
cmd: eatmydata apt-get update && apt-get install -y python-pip git curl at tmux byobu iputils-ping traceroute htop tree wget jq ntpdate
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 1}
skip_fail: false
- description: Remove any existing minion keys
cmd: salt-key -y -D || true
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 1}
skip_fail: false
- description: Configure GIT settings and certificates
cmd: |
set -e;
#touch /root/.git_trusted_certs.pem;
#for server in github.com; do \
# openssl s_client -showcerts -connect $server:443 </dev/null \
# | openssl x509 -outform PEM \
# >> /root/.git_trusted_certs.pem;
#done;
#HOME=/root git config --global http.sslCAInfo /root/.git_trusted_certs.pem;
HOME=/root git config --global user.email "mcp-integration-qa@example.com";
HOME=/root git config --global user.name "MCP Integration QA";
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 1}
skip_fail: false
{%- endmacro %}
{%- macro MACRO_CLONE_RECLASS_MODELS(IS_CONTRAIL_LAB=false, WITH_MAAS=false) %}
{############################################################}
{# Creates a 'cluster' model from cookiecutter-templates and 'environment' model from uploaded template #}
- description: Clone reclass models with submodules
cmd: |
set -e;
set -x;
#ssh-keyscan -H github.com >> ~/.ssh/known_hosts;
# In the day01 image, /srv/salt/reclass directory is already exists, so clone the model into the reclass.tmp directory
export GIT_SSL_NO_VERIFY=true; git clone -b {{ SALT_MODELS_BRANCH }} {{ SALT_MODELS_REPOSITORY }} /srv/salt/reclass.tmp;
rsync -a /srv/salt/reclass.tmp/ /srv/salt/reclass;
pushd /srv/salt/reclass;
git config submodule."classes/system".url "{{ SALT_MODELS_SYSTEM_REPOSITORY }}";
git submodule update --init --recursive;
{%- if SALT_MODELS_REF_CHANGE != '' %}
{%- for item in SALT_MODELS_REF_CHANGE.split(" ") %}
git fetch {{ SALT_MODELS_REPOSITORY }} {{ item }} && git cherry-pick FETCH_HEAD;
{%- endfor %}
{%- elif SALT_MODELS_COMMIT != 'master' %}
git checkout {{ SALT_MODELS_COMMIT }};
{%- endif %}
{%- if SALT_MODELS_SYSTEM_REF_CHANGE != '' %}
pushd classes/system/ && \
{%- for item in SALT_MODELS_SYSTEM_REF_CHANGE.split(" ") %}
git fetch {{ SALT_MODELS_SYSTEM_REPOSITORY }} {{ item }} && git cherry-pick FETCH_HEAD;
{%- endfor %}
popd;
{%- elif SALT_MODELS_SYSTEM_COMMIT != '' %}
pushd classes/system/;
git checkout {{ SALT_MODELS_SYSTEM_COMMIT }};
popd;
{%- elif SALT_MODELS_SYSTEM_TAG != '' %}
pushd classes/system/;
git fetch --all --tags --prune
git checkout tags/{{ SALT_MODELS_SYSTEM_TAG }};
popd;
{%- endif %}
popd;
mkdir -p /srv/salt/reclass/classes/service;
mkdir -p /srv/salt/reclass/nodes/_generated/;
# Replace firstly to an intermediate value to avoid intersection between
# already replaced and replacing networks.
# For example, if generated IPV4_NET_ADMIN_PREFIX=10.16.0 , then there is a risk of replacing twice:
# 192.168.10 -> 10.16.0 (generated network for admin)
# 10.16.0 -> <external network>
# So let's replace constant networks to the keywords, and then keywords to the desired networks.
export REPLACE_DIRS="/srv/salt/reclass/classes/ /srv/salt/reclass/nodes/"
find ${REPLACE_DIRS} -type f -exec sed -i 's/192\.168\.10/==IPV4_NET_ADMIN_PREFIX==/g' {} +
find ${REPLACE_DIRS} -type f -exec sed -i 's/172\.16\.10/==IPV4_NET_CONTROL_PREFIX==/g' {} +
find ${REPLACE_DIRS} -type f -exec sed -i 's/10\.1\.0/==IPV4_NET_TENANT_PREFIX==/g' {} +
find ${REPLACE_DIRS} -type f -exec sed -i 's/10\.16\.0/==IPV4_NET_EXTERNAL_PREFIX==/g' {} +
find ${REPLACE_DIRS} -type f -exec sed -i 's/10\.60\.0/==IPV4_NET_CONTROL_PREFIX==/g' {} +
find ${REPLACE_DIRS} -type f -exec sed -i 's/10\.70\.0/==IPV4_NET_ADMIN_PREFIX==/g' {} +
find ${REPLACE_DIRS} -type f -exec sed -i 's/==IPV4_NET_ADMIN_PREFIX==/{{ IPV4_NET_ADMIN_PREFIX }}/g' {} +
find ${REPLACE_DIRS} -type f -exec sed -i 's/==IPV4_NET_CONTROL_PREFIX==/{{ IPV4_NET_CONTROL_PREFIX }}/g' {} +
find ${REPLACE_DIRS} -type f -exec sed -i 's/==IPV4_NET_TENANT_PREFIX==/{{ IPV4_NET_TENANT_PREFIX }}/g' {} +
find ${REPLACE_DIRS} -type f -exec sed -i 's/==IPV4_NET_EXTERNAL_PREFIX==/{{ IPV4_NET_EXTERNAL_PREFIX }}/g' {} +
find ${REPLACE_DIRS} -type f -exec sed -i 's/apt_mk_version:.*/apt_mk_version: {{ REPOSITORY_SUITE }}/g' {} +
find ${REPLACE_DIRS} -type f -exec sed -i 's/mcp_version:.*/mcp_version: {{ REPOSITORY_SUITE }}/g' {} +
{%- if IS_CONTRAIL_LAB %}
# vSRX IPs for tcp-qa images have 172.16.10.90 hardcoded
find ${REPLACE_DIRS} -type f -exec sed -i 's/opencontrail_router01_address:.*/opencontrail_router01_address: 172.16.10.90/g' {} +
find ${REPLACE_DIRS} -type f -exec sed -i 's/infra_config_deploy_address: 1.*/infra_config_deploy_address: {{ IPV4_NET_ADMIN_PREFIX }}.15/g' {} +
{%- endif %}
# Override some context parameters
{%- set CLUSTER_PATH = '/srv/salt/reclass/classes/cluster/' + CLUSTER_NAME %}
find {{ CLUSTER_PATH }} -type f -exec sed -i 's/cluster_name: .*/cluster_name: {{ CLUSTER_NAME }}/g' {} +
find {{ CLUSTER_PATH }} -type f -exec sed -i 's/cluster_domain: .*/cluster_domain: {{ DOMAIN_NAME }}/g' {} +
# Create the cfg01 inventory file or use existing
export CFG01_INVENTORY_FILE="/srv/salt/reclass/nodes/_generated/cfg01.{{ DOMAIN_NAME }}.yml"
[ -f ${CFG01_INVENTORY_FILE} ] || cat << 'EOF' > ${CFG01_INVENTORY_FILE}
classes:
- cluster.{{ CLUSTER_NAME }}.infra.config
{%- if WITH_MAAS %}
- cluster.{{ CLUSTER_NAME }}.infra.maas
- cluster.{{ CLUSTER_NAME }}.infra.maas-machines
{%- endif %}
parameters:
_param:
linux_system_codename: xenial
reclass_data_revision: master
linux:
system:
name: cfg01
domain: {{ DOMAIN_NAME }}
reclass:
storage:
data_source:
engine: local
EOF
# Show the changes to the console
cd /srv/salt/reclass/; git diff
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 1}
skip_fail: false
{%- endmacro %}
{%- macro MACRO_UPLOAD_AND_IMPORT_GPG_ENCRYPTION_KEY() %}
{%- set KEY_FILE_NAME = 'encryption-key.asc' %}
- description: "Upload gpg encruption key"
upload:
local_path: {{ config.salt_deploy.templates_dir }}{{ LAB_CONFIG_NAME }}/
local_filename: {{ KEY_FILE_NAME }}
remote_path: /root/
node_name: {{ HOSTNAME_CFG01 }}
- description: Import encryption key
cmd: |
set -ex;
mkdir -p /etc/salt/gpgkeys
chmod 0700 /etc/salt/gpgkeys
export GNUPGHOME=/etc/salt/gpgkeys
gpg --import {{ KEY_FILE_NAME }}
gpg --list-keys
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 1}
skip_fail: false
- description: Put encryption key to reclass folder
cmd: |
set -e;
set -x;
mkdir -p /srv/salt/reclass
cp /root/{{ KEY_FILE_NAME }} /srv/salt/reclass/
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 1}
skip_fail: false
{%- endmacro %}
{%- macro MACRO_GENERATE_COOKIECUTTER_MODEL(IS_CONTRAIL_LAB=false, CONTROL_VLAN=None, TENANT_VLAN=None, CLUSTER_PRODUCT_MODELS='') %}
{###################################################################}
{%- set CLUSTER_CONTEXT_PATH = '/root/' + CLUSTER_CONTEXT_NAME %}
- description: "[EXPERIMENTAL] Upload cookiecutter-templates context to cfg01.{{ DOMAIN_NAME }}"
upload:
local_path: {{ config.salt_deploy.templates_dir }}{{ LAB_CONFIG_NAME }}/
local_filename: {{ CLUSTER_CONTEXT_NAME }}
remote_path: /root/
node_name: {{ HOSTNAME_CFG01 }}
- description: "Show options enabled in the context file for model generation"
cmd: |
echo "===== Options enabled in the context for generation the model {{ LAB_CONFIG_NAME }} ====="
fgrep "True" {{ CLUSTER_CONTEXT_PATH }}
echo "===== Sources for model generation ====="
echo "# mcp_version: {{ REPOSITORY_SUITE }}"
echo "COOKIECUTTER_TEMPLATES_REPOSITORY={{ COOKIECUTTER_TEMPLATES_REPOSITORY }}"
{%- if CLUSTER_PRODUCT_MODELS != '' %}
echo "CLUSTER_PRODUCT_MODELS={{ CLUSTER_PRODUCT_MODELS }}"
{%- endif %}
{%- if COOKIECUTTER_REF_CHANGE != '' %}
echo "COOKIECUTTER_REF_CHANGE={{ COOKIECUTTER_REF_CHANGE }}"
{%- elif COOKIECUTTER_TEMPLATE_COMMIT != '' %}
echo "COOKIECUTTER_TEMPLATE_COMMIT={{ COOKIECUTTER_TEMPLATE_COMMIT }}"
{%- elif COOKIECUTTER_TAG != '' %}
echo "COOKIECUTTER_TAG={{ COOKIECUTTER_TAG }}"
{%- endif %}
echo "SALT_MODELS_SYSTEM_REPOSITORY={{ SALT_MODELS_SYSTEM_REPOSITORY }}"
{%- if SALT_MODELS_SYSTEM_REF_CHANGE != '' %}
echo "SALT_MODELS_SYSTEM_REF_CHANGE={{ SALT_MODELS_SYSTEM_REF_CHANGE }}"
{%- elif SALT_MODELS_SYSTEM_COMMIT != '' %}
echo "SALT_MODELS_SYSTEM_COMMIT={{ SALT_MODELS_SYSTEM_COMMIT }}"
{%- elif SALT_MODELS_SYSTEM_TAG != '' %}
echo "SALT_MODELS_SYSTEM_TAG={{ SALT_MODELS_SYSTEM_TAG }}"
{%- endif %}
{%- if JENKINS_PIPELINE_BRANCH != '' %}
echo "JENKINS_PIPELINE_BRANCH={{ JENKINS_PIPELINE_BRANCH }}"
{%- endif %}
{%- if UPDATES_MIRANTIS_VERSION != '' %}
echo "UPDATES_MIRANTIS_VERSION={{ UPDATES_MIRANTIS_VERSION }}"
{%- endif %}
echo "======================================="
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 1}
skip_fail: false
- description: "Upload {{ COOKIECUTTER_TEMPLATES_REPOSITORY_USER }} key"
upload:
local_path: {{ COOKIECUTTER_TEMPLATES_REPOSITORY_KEY_PATH | dirname }}/
local_filename: {{ COOKIECUTTER_TEMPLATES_REPOSITORY_KEY_PATH | basename }}
remote_path: /tmp/
node_name: {{ HOSTNAME_CFG01 }}
- description: Create cluster model from cookiecutter templates
cmd: |
set -e;
set -x;
sudo apt-get install virtualenv -y
sudo apt-get install python-setuptools -y
[[ -d /root/venv-reclass-tools ]] || virtualenv /root/venv-reclass-tools;
. /root/venv-reclass-tools/bin/activate;
pip install -U pip
pip install cookiecutter
chmod 0600 /tmp/{{ COOKIECUTTER_TEMPLATES_REPOSITORY_KEY_PATH | basename }}
eval $(ssh-agent)
ssh-add /tmp/{{ COOKIECUTTER_TEMPLATES_REPOSITORY_KEY_PATH | basename }}
git config --global user.name {{ HOSTNAME_CFG01 }}
git config --global user.email {{ HOSTNAME_CFG01 }}@example.com
export GIT_SSL_NO_VERIFY=true; git clone {{ COOKIECUTTER_TEMPLATES_REPOSITORY }} /root/cookiecutter-templates
{%- if COOKIECUTTER_REF_CHANGE != '' %}
pushd /root/cookiecutter-templates
git fetch {{ COOKIECUTTER_TEMPLATES_REPOSITORY }} {{ COOKIECUTTER_REF_CHANGE }} && git checkout FETCH_HEAD
popd
{%- elif COOKIECUTTER_TEMPLATE_COMMIT != '' %}
pushd /root/cookiecutter-templates
git checkout {{ COOKIECUTTER_TEMPLATE_COMMIT }}
popd
{%- elif COOKIECUTTER_TAG != '' %}
pushd /root/cookiecutter-templates
git fetch --all --tags --prune
git checkout tags/{{ COOKIECUTTER_TAG }}
popd
{%- endif %}
if [ -f /root/cookiecutter-templates/requirements.txt ]; then
pip install -r /root/cookiecutter-templates/requirements.txt
fi
mkdir -p /srv/salt/reclass/classes/cluster/
mkdir -p /srv/salt/reclass/classes/system/
mkdir -p /srv/salt/reclass/classes/service/
mkdir -p /srv/salt/reclass/nodes/_generated
# Override some context parameters
sed -i 's/cluster_name: .*/cluster_name: {{ CLUSTER_NAME }}/g' {{ CLUSTER_CONTEXT_PATH }}
sed -i 's/cluster_domain: .*/cluster_domain: {{ DOMAIN_NAME }}/g' {{ CLUSTER_CONTEXT_PATH }}
sed -i 's/mcp_version:.*/mcp_version: {{ REPOSITORY_SUITE }}/g' {{ CLUSTER_CONTEXT_PATH }}
{%- if JENKINS_PIPELINE_BRANCH != '' %}
# Escape / with \/
escaped_branch=$(echo {{ JENKINS_PIPELINE_BRANCH }} | sed s'/\//\\\//g')
sed -i "s/jenkins_pipelines_branch:.*/jenkins_pipelines_branch: '$escaped_branch'/g" {{ CLUSTER_CONTEXT_PATH }}
{%- endif %}
{%- if CONTROL_VLAN %}
sed -i 's/control_vlan: .*/control_vlan: {{ CONTROL_VLAN }}/g' {{ CLUSTER_CONTEXT_PATH }}
{%- endif %}
{%- if TENANT_VLAN %}
sed -i 's/tenant_vlan: .*/tenant_vlan: {{ TENANT_VLAN }}/g' {{ CLUSTER_CONTEXT_PATH }}
{%- endif %}
# Replace firstly to an intermediate value to avoid intersection between
# already replaced and replacing networks.
# For example, if generated IPV4_NET_ADMIN_PREFIX=10.16.0 , then there is a risk of replacing twice:
# 192.168.10 -> 10.16.0 (generated network for admin)
# 10.16.0 -> <external network>
# So let's replace constant networks to the keywords, and then keywords to the desired networks.
export REPLACE_DIRS="{{ CLUSTER_CONTEXT_PATH }} /root/cookiecutter-templates"
find ${REPLACE_DIRS} -type f -exec sed -i 's/10\.167\.5/==IPV4_NET_ADMIN_PREFIX==/g' {} +
find ${REPLACE_DIRS} -type f -exec sed -i 's/10\.167\.4/==IPV4_NET_CONTROL_PREFIX==/g' {} +
find ${REPLACE_DIRS} -type f -exec sed -i 's/10\.167\.6/==IPV4_NET_TENANT_PREFIX==/g' {} +
find ${REPLACE_DIRS} -type f -exec sed -i 's/172\.17\.16/==IPV4_NET_EXTERNAL_PREFIX==/g' {} +
find ${REPLACE_DIRS} -type f -exec sed -i 's/==IPV4_NET_ADMIN_PREFIX==/{{ IPV4_NET_ADMIN_PREFIX }}/g' {} +
find ${REPLACE_DIRS} -type f -exec sed -i 's/==IPV4_NET_CONTROL_PREFIX==/{{ IPV4_NET_CONTROL_PREFIX }}/g' {} +
find ${REPLACE_DIRS} -type f -exec sed -i 's/==IPV4_NET_TENANT_PREFIX==/{{ IPV4_NET_TENANT_PREFIX }}/g' {} +
find ${REPLACE_DIRS} -type f -exec sed -i 's/==IPV4_NET_EXTERNAL_PREFIX==/{{ IPV4_NET_EXTERNAL_PREFIX }}/g' {} +
{%- if UPDATES_MIRANTIS_VERSION != '' %}
sed -i "s/updates_mirantis_version:.*/updates_mirantis_version: '{{ UPDATES_MIRANTIS_VERSION }}'/g" {{ CLUSTER_CONTEXT_PATH }}
{%- endif %}
{% set items = CLUSTER_PRODUCT_MODELS or '$(ls /root/cookiecutter-templates/cluster_product/)' %}
mkdir -p /tmp/output /srv/salt/reclass/classes/cluster/{{ CLUSTER_NAME }}/
# Allow to use encryption keys
export GNUPGHOME=/etc/salt/gpgkeys
for item in {{ items }}; do
[ "$item" = "stacklight2" ] && continue;
python /root/cookiecutter-templates/generate.py \
--template /root/cookiecutter-templates/cluster_product/$item \
--config-file {{ CLUSTER_CONTEXT_PATH }} \
--output-dir /tmp/output/$item/;
mv -v /tmp/output/$item/{{ CLUSTER_NAME }}/$item /srv/salt/reclass/classes/cluster/{{ CLUSTER_NAME }}/;
done
export GIT_SSL_NO_VERIFY=true; git clone {{ SALT_MODELS_SYSTEM_REPOSITORY }} /srv/salt/reclass/classes/system/
# Create the cfg01 inventory file or use existing
export CFG01_INVENTORY_FILE="/srv/salt/reclass/nodes/_generated/cfg01.{{ DOMAIN_NAME }}.yml"
[ -f ${CFG01_INVENTORY_FILE} ] || cat << 'EOF' > ${CFG01_INVENTORY_FILE}
classes:
- system.openssh.server.team.all
- cluster.{{ CLUSTER_NAME }}.infra.config
EOF
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 1}
skip_fail: false
- description: Modify generated model and reclass-system if necessary
cmd: |
set -e;
set -x;
{%- if SALT_MODELS_SYSTEM_COMMIT != '' %}
pushd /srv/salt/reclass/classes/system/
git checkout {{ SALT_MODELS_SYSTEM_COMMIT }};
popd;
{%- elif SALT_MODELS_SYSTEM_TAG != '' %}
pushd /srv/salt/reclass/classes/system/
git fetch --all --tags --prune
git checkout tags/{{ SALT_MODELS_SYSTEM_TAG }};
popd;
{%- endif %}
{%- if SALT_MODELS_SYSTEM_REF_CHANGE != '' %}
pushd /srv/salt/reclass/classes/system/ && \
{%- for item in SALT_MODELS_SYSTEM_REF_CHANGE.split(" ") %}
git fetch {{ SALT_MODELS_SYSTEM_REPOSITORY }} {{ item }} && git cherry-pick FETCH_HEAD;
{%- endfor %}
popd;
{%- endif %}
{%- if IS_CONTRAIL_LAB %}
export REPLACE_DIRS="/srv/salt/reclass/classes/ /srv/salt/reclass/nodes/"
# vSRX IPs for tcp-qa images have 172.16.10.90 hardcoded
find ${REPLACE_DIRS} -type f -exec sed -i 's/opencontrail_router01_address:.*/opencontrail_router01_address: 172.16.10.90/g' {} +
find ${REPLACE_DIRS} -type f -exec sed -i 's/infra_config_deploy_address: 1.*/infra_config_deploy_address: {{ IPV4_NET_ADMIN_PREFIX }}.15/g' {} +
{%- endif %}
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 1}
skip_fail: false
- description: Restart salt-api
cmd: |
set -e;
systemctl restart salt-api
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 1}
skip_fail: false
{%- endmacro %}
{%- macro MACRO_GENERATE_AND_ENABLE_ENVIRONMENT_MODEL(KNOWN_HOST_AUTOPOPULATION=true) %}
{########################################################}
- description: "[EXPERIMENTAL] Clone 'environment-template' repository to cfg01.{{ DOMAIN_NAME }}"
cmd: |
set -e;
mkdir -p /root/environment/;
export GIT_SSL_NO_VERIFY=true; git clone https://gerrit.mcp.mirantis.com/mcp/tcp-qa-environment-template /root/environment/environment_template
{%- if ENVIRONMENT_TEMPLATE_REF_CHANGE != '' %}
pushd /root/environment/environment_template &&
git fetch https://gerrit.mcp.mirantis.com/mcp/tcp-qa-environment-template {{ ENVIRONMENT_TEMPLATE_REF_CHANGE }} &&
git checkout FETCH_HEAD &&
popd
{%- elif ENVIRONMENT_TEMPLATE_COMMIT != '' %}
pushd /root/environment/environment_template
git checkout {{ ENVIRONMENT_TEMPLATE_COMMIT }}
popd
{%- endif %}
node_name: {{ HOSTNAME_CFG01 }}
skip_fail: false
{%- for ENVIRONMENT_CONTEXT_NAME in ENVIRONMENT_CONTEXT_NAMES %}
- description: "[EXPERIMENTAL] Upload environment inventory to cfg01.{{ DOMAIN_NAME }}"
upload:
local_path: {{ config.salt_deploy.templates_dir }}{{ LAB_CONFIG_NAME }}/
local_filename: {{ ENVIRONMENT_CONTEXT_NAME }}
remote_path: /root/environment/
node_name: {{ HOSTNAME_CFG01 }}
{%- endfor %}
- description: "Replace template addresses to actual environment addresses"
cmd: |
set -ex;
# Replace firstly to an intermediate value to avoid intersection between
# already replaced and replacing networks.
# For example, if generated IPV4_NET_ADMIN_PREFIX=10.16.0 , then there is a risk of replacing twice:
# 192.168.10 -> 10.16.0 (generated network for admin)
# 10.16.0 -> <external network>
# So let's replace constant networks to the keywords, and then keywords to the desired networks.
export REPLACE_DIRS="/root/environment/"
find ${REPLACE_DIRS} -type f -exec sed -i 's/10\.167\.5/==IPV4_NET_ADMIN_PREFIX==/g' {} +
find ${REPLACE_DIRS} -type f -exec sed -i 's/10\.167\.4/==IPV4_NET_CONTROL_PREFIX==/g' {} +
find ${REPLACE_DIRS} -type f -exec sed -i 's/10\.167\.6/==IPV4_NET_TENANT_PREFIX==/g' {} +
find ${REPLACE_DIRS} -type f -exec sed -i 's/172\.17\.16/==IPV4_NET_EXTERNAL_PREFIX==/g' {} +
find ${REPLACE_DIRS} -type f -exec sed -i 's/==IPV4_NET_ADMIN_PREFIX==/{{ IPV4_NET_ADMIN_PREFIX }}/g' {} +
find ${REPLACE_DIRS} -type f -exec sed -i 's/==IPV4_NET_CONTROL_PREFIX==/{{ IPV4_NET_CONTROL_PREFIX }}/g' {} +
find ${REPLACE_DIRS} -type f -exec sed -i 's/==IPV4_NET_TENANT_PREFIX==/{{ IPV4_NET_TENANT_PREFIX }}/g' {} +
find ${REPLACE_DIRS} -type f -exec sed -i 's/==IPV4_NET_EXTERNAL_PREFIX==/{{ IPV4_NET_EXTERNAL_PREFIX }}/g' {} +
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 5}
skip_fail: false
- description: "[EXPERIMENTAL] Remove linux.network.interface object from the cluster/system models and use fixed 'environment' model instead"
cmd: |
set -e;
set -x;
apt-get -y install python-virtualenv python-pip build-essential python-dev libssl-dev;
[[ -d /root/venv-reclass-tools ]] || virtualenv /root/venv-reclass-tools;
. /root/venv-reclass-tools/bin/activate;
pip install git+https://gerrit.mcp.mirantis.com/mcp/tcp-qa-reclass-tools;
reclass-tools del-key parameters.linux.network.interface /srv/salt/reclass/classes/cluster/;
reclass-tools del-key parameters.linux.network.interface /srv/salt/reclass/classes/system/;
reclass-tools del-key parameters.linux.network.interface /usr/share/salt-formulas/reclass/;
if ! reclass-tools get-key 'classes' /srv/salt/reclass/nodes/_generated/cfg01.{{ DOMAIN_NAME }}.yml | grep -q "environment.{{ ENVIRONMENT_MODEL_INVENTORY_NAME }}$"; then
reclass-tools add-key 'classes' 'environment.{{ ENVIRONMENT_MODEL_INVENTORY_NAME }}.reclass_datasource_local' /srv/salt/reclass/nodes/_generated/cfg01.{{ DOMAIN_NAME }}.yml --merge ;
reclass-tools add-key 'classes' 'environment.{{ ENVIRONMENT_MODEL_INVENTORY_NAME }}' /srv/salt/reclass/nodes/_generated/cfg01.{{ DOMAIN_NAME }}.yml --merge ;
fi;
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 5}
skip_fail: false
- description: "Workaround for PROD-15923: all keepalive instances must be named 'VIP'"
cmd: |
set -e;
find /srv/salt/reclass/classes/{system,cluster}/ -name '*.yml' -type f -exec sed -ri '/^ keepalived:/{:1;N;/\n instance:/!b1 {N; /\n [[:graph:]]+:/ { s/\n instance:\n [[:graph:]]+:/\n instance:\n VIP:/ } } }' {} +
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 5}
skip_fail: false
- description: "[EXPERIMENTAL] Create environment model for virtual environment"
cmd: |
set -e;
set -x;
. /root/venv-reclass-tools/bin/activate;
reclass-tools render --template-dir /root/environment/environment_template/ \
--output-dir /srv/salt/reclass/classes/environment/ \
{% for ENVIRONMENT_CONTEXT_NAME in ENVIRONMENT_CONTEXT_NAMES %} --context /root/environment/{{ENVIRONMENT_CONTEXT_NAME}}{% endfor %} \
--env-name {{ ENVIRONMENT_MODEL_INVENTORY_NAME }}
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 5}
skip_fail: false
- description: "[EXPERIMENTAL] Disable known_host_autopopulation if its not enabled"
cmd: |
{%- if not KNOWN_HOST_AUTOPOPULATION %}
set -e;
set -x;
. /root/venv-reclass-tools/bin/activate;
reclass-tools add-bool-key parameters.reclass.storage.node.infra_config_node01.parameters.openssh.client.known_hosts_autopopulation false /srv/salt/reclass/classes/environment/{{ ENVIRONMENT_MODEL_INVENTORY_NAME }}/init.yml
{%- endif %}
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 5}
skip_fail: false
- description: Modify generated model and reclass-system
cmd: |
export REPLACE_DIRS="/srv/salt/reclass/classes/ /srv/salt/reclass/nodes/"
find ${REPLACE_DIRS} -type f -exec sed -i 's/apt_mk_version:.*/apt_mk_version: {{ REPOSITORY_SUITE }}/g' {} +
find ${REPLACE_DIRS} -type f -exec sed -i 's/mcp_version:.*/mcp_version: {{ REPOSITORY_SUITE }}/g' {} +
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 1}
skip_fail: false
{%- if UPDATE_REPO_CUSTOM_TAG != '' %}
- description: "[EXPERIMENTAL] Change update repos tag according to defined in job"
cmd: |
. /root/venv-reclass-tools/bin/activate;
reclass-tools add-key parameters._param.linux_system_repo_update_url 'http://mirror.mirantis.com/update/{{ UPDATE_REPO_CUSTOM_TAG }}/' /srv/salt/reclass/classes/cluster/{{ CLUSTER_NAME }}/openstack/init.yml;
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 5}
skip_fail: false
{%- endif %}
{%- endmacro %}
{%- macro MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='') %}
{#######################################################}
- description: Configure reclass
cmd: |
set -e;
set -x;
FORMULA_PATH=${FORMULA_PATH:-/usr/share/salt-formulas};
which wget > /dev/null || (apt-get update; apt-get install -y wget);
. /etc/lsb-release; # Get DISTRIB_CODENAME variable
echo "{{ FORMULA_REPOSITORY }}" > /etc/apt/sources.list.d/mcp_salt.list;
wget -O - "{{ FORMULA_GPG }}" | apt-key add -;
apt-get clean; apt-get update;
[ ! -d /srv/salt/reclass/classes/service ] && mkdir -p /srv/salt/reclass/classes/service;
declare -a formula_services=({{ FORMULA_SERVICES }});
echo -e "\nInstalling all required salt formulas\n";
apt-get install -y "${formula_services[@]/#/salt-formula-}";
for formula_service in $(ls -1 ${FORMULA_PATH}/reclass/service); do
echo -e "\nLink service metadata for formula ${formula_service} ...\n";
[ ! -L "/srv/salt/reclass/classes/service/${formula_service}" ] && ln -s ${FORMULA_PATH}/reclass/service/${formula_service} /srv/salt/reclass/classes/service/${formula_service};
done;
[ ! -d /srv/salt/env ] && mkdir -p /srv/salt/env;
[ ! -L /srv/salt/env/prd ] && ln -s ${FORMULA_PATH}/env /srv/salt/env/prd;
[ ! -d /etc/reclass ] && mkdir /etc/reclass;
cat << 'EOF' > /etc/reclass/reclass-config.yml
storage_type: yaml_fs
pretty_print: True
output: yaml
inventory_base_uri: /srv/salt/reclass
EOF
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 1}
skip_fail: false
- description: "*Workaround* remove all cfg01 nodes except cfg01.{{ DOMAIN_NAME }} to not depend on other clusters in 'reclass --top'"
cmd: |
# Remove all other nodes except cfg01.{{ DOMAIN_NAME }} to not rely on them for 'reclass --top'
find /srv/salt/reclass/nodes/ -type f -not -name cfg01.{{ DOMAIN_NAME }}.yml -delete
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 5}
skip_fail: false
- description: Configure salt adoptors on cfg01
cmd: |
ln -s /usr/lib/python2.7/dist-packages/reclass/adapters/salt.py /usr/local/sbin/reclass-salt;
chmod +x /usr/lib/python2.7/dist-packages/reclass/adapters/salt.py
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 1}
skip_fail: false
- description: Show reclass-salt --top for cfg01 node
cmd: reclass-salt --top
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 5}
skip_fail: false
- description: Restart salt-master service
cmd: service salt-master restart;
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 5}
skip_fail: false
{%- endmacro %}
{%- macro MACRO_INSTALL_SALT_MINIONS() %}
{#######################################}
{%- for ssh in config.underlay.ssh %}
{%- set salt_roles = [] %}
{%- for role in ssh['roles'] %}
{%- if role in config.salt_deploy.salt_roles %}
{%- set _ = salt_roles.append(role) %}
{%- endif %}
{%- endfor %}
{%- if salt_roles %}
- description: Configure salt-minion on {{ ssh['node_name'] }}
parallel: True
cmd: |
set -ex;
[ ! -d /etc/salt/minion.d ] && mkdir -p /etc/salt/minion.d;
cat << EOF > /etc/salt/minion.d/minion.conf
id: $(hostname -s).{{ DOMAIN_NAME }}
master: {{ config.salt.salt_master_host }}
EOF
# Configure ubuntu and salt repositories
which wget >/dev/null || (apt-get update; apt-get install -y wget);
. /etc/lsb-release; # Get DISTRIB_CODENAME variable
echo "{{ UBUNTU_REPOSITORY }}" > /etc/apt/sources.list
echo "{{ UBUNTU_UPDATES_REPOSITORY }}" >> /etc/apt/sources.list
echo "{{ UBUNTU_SECURITY_REPOSITORY }}" >> /etc/apt/sources.list
echo "{{ SALT_REPOSITORY }}" > /etc/apt/sources.list.d/mcp_saltstack.list;
apt-key adv --keyserver "{{UBUNTU_KEY_SERVER}}" --recv-keys "{{ UBUNTU_KEY_ID}}"
apt-get clean
apt-get update
# Install salt-minion
apt-get install eatmydata -y;
eatmydata apt-get install -y salt-minion;
# Install common packages
eatmydata apt-get install -y python-pip git curl tmux byobu iputils-ping traceroute htop tree mc
sync
# Restart salt-minion if it was already installed
service salt-minion restart
node_name: {{ ssh['node_name'] }}
retry: {count: 3, delay: 10}
skip_fail: false
{%- else %}
- description: Check SSH connectivity to non-salt-minion node {{ ssh['node_name'] }}
cmd: echo "SSH to $(hostname -f) passed"
node_name: {{ ssh['node_name'] }}
retry: {count: 3, delay: 10}
skip_fail: false
{%- endif %}
{%- endfor %}
- description: Accept salt keys from all the nodes
cmd: salt-key -A -y
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 5}
skip_fail: true
{%- endmacro %}
{%- macro DISABLE_EMPTY_NODE() %}
{#######################################}
{%- for ssh in config.underlay.ssh %}
{%- set none_roles = [] %}
{%- for role in ssh['roles'] %}
{%- if role == "none" %}
{%- set _ = none_roles.append(role) %}
{%- endif %}
{%- endfor %}
{%- if none_roles %}
- description: Move non-product node out of Salt Cluster ( {{ ssh['node_name'] }} )
cmd: |
set -ex;
# Use salt-key -a {{ ssh['node_name'] }} --include-all -y command to return node back
salt-key -r {{ ssh['node_name'] }} --include-all -y
node_name: {{ HOSTNAME_CFG01 }}
retry: { count: 1, delay: 5 }
skip_fail: true
{%- endif %}
{%- endfor %}
{%- endmacro %}
{%- macro MACRO_INSTALL_FORMULAS(FORMULA_SERVICES='') %}
{#######################################################}
- description: Install salt formulas
cmd: |
set -e;
set -x;
FORMULA_PATH=${FORMULA_PATH:-/usr/share/salt-formulas};
which wget > /dev/null || (apt-get update; apt-get install -y wget);
. /etc/lsb-release; # Get DISTRIB_CODENAME variable
# echo "{{ FORMULA_REPOSITORY }}" > /etc/apt/sources.list.d/mcp_salt.list;
# wget -O - "{{ FORMULA_GPG }}" | apt-key add -;
apt-get clean; apt-get update;
[ ! -d /srv/salt/reclass/classes/service ] && mkdir -p /srv/salt/reclass/classes/service;
declare -a formula_services=({{ FORMULA_SERVICES }});
echo -e "\nInstalling all required salt formulas\n";
eatmydata apt-get install -y "${formula_services[@]/#/salt-formula-}";
for formula_service in $(ls -1 ${FORMULA_PATH}/reclass/service); do
echo -e "\nLink service metadata for formula ${formula_service} ...\n";
[ ! -L "/srv/salt/reclass/classes/service/${formula_service}" ] && ln -s ${FORMULA_PATH}/reclass/service/${formula_service} /srv/salt/reclass/classes/service/${formula_service};
done;
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 1}
skip_fail: false
- description: Sync all salt resources on salt-master minion
cmd: salt-call saltutil.sync_all && sleep 5
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 5}
skip_fail: false
{%- endmacro %}
{%- macro MACRO_CONFIG_DAY01_SALT_MINION() %}
{#######################################}
- description: Configure salt-minion on {{ HOSTNAME_CFG01 }}
cmd: |
set -ex;
export SALT_MASTER_MINION_ID={{ HOSTNAME_CFG01 }}
envsubst < /root/minion.conf > /etc/salt/minion.d/minion.conf
service salt-minion restart
while true; do
salt-key | grep "$SALT_MASTER_MINION_ID" && break
sleep 5
done
sleep 5
for i in `salt-key -l accepted | grep -v Accepted | grep -v "$SALT_MASTER_MINION_ID"`; do
salt-key -d $i -y
done
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 1}
skip_fail: false
{%- endmacro %}
{%- macro MACRO_RUN_SALT_MASTER_UNDERLAY_STATES() %}
{##################################################}
{# Prepare salt services and nodes settings #}
- description: '*Workaround* of harcoded host from day01 grains'
cmd: salt-key -d cfg01.mcp-day01.local -y
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 1}
skip_fail: true
- description: Run 'linux' formula on cfg01
cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:master' state.sls linux;
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 3, delay: 5}
skip_fail: false
- description: Run 'openssh' formula on cfg01
cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-C 'I@salt:master' state.sls openssh &&
salt --hard-crash --state-output=mixed --state-verbose=False
-C 'I@salt:master' cmd.run "sed -i 's/PasswordAuthentication no/PasswordAuthentication
yes/' /etc/ssh/sshd_config && service ssh reload"
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 3, delay: 5}
skip_fail: false
- description: '*Workaround* of the bug https://mirantis.jira.com/browse/PROD-7962'
cmd: salt --hard-crash --state-output=mixed --state-verbose=False
'*' cmd.run "echo ' StrictHostKeyChecking no' >> /root/.ssh/config"
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 1}
skip_fail: false
- description: Run 'salt.master' formula on cfg01
cmd: timeout 600 salt-call -l info --hard-crash --state-output=mixed --state-verbose=False state.sls salt.master;
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 2, delay: 30}
skip_fail: false
{%- if SALT_FORMULAS_REFS != '' %}
- description: Replace needed formulas to desired version
cmd: |
set -e;
set -x;
{%- for formula_set in SALT_FORMULAS_REFS.split(' ') %}
{% set formula = formula_set.split(':') %}
{% set formula_name = formula[0] %}
{% set formula_ref = formula[1] %}
{% set formula_dir = '/tmp/salt-formula-' + formula_name %}
git clone {{ SALT_FORMULAS_REPO }}/{{ formula_name }} {{ formula_dir }} &&
pushd {{ formula_dir }} &&
git fetch {{ SALT_FORMULAS_REPO }}/{{ formula_name }} {{ formula_ref }} &&
git checkout FETCH_HEAD &&
popd &&
if [ -d "{{ formula_dir }}" ]; then
echo "Going to replace packaged formula {{ formula_name }}" &&
rm -rfv /usr/share/salt-formulas/{env,reclass/service}/{{ formula_name }} &&
ln -v -s "{{ formula_dir }}/{{ formula_name }}" "/usr/share/salt-formulas/env/{{ formula_name }}" &&
ln -v -s "{{ formula_dir }}/metadata/service/" "/usr/share/salt-formulas/reclass/service/{{ formula_name }}";
else
echo "Stopped, directory /root/salt-formula-{{ formula_name }} does not exist!";
fi
{%- endfor %}
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
{%- endif %}
- description: Refresh pillars on salt-master minion
cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:master' saltutil.refresh_pillar
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 5}
skip_fail: false
- description: Show reclass-salt --top for salt-master node
cmd: reclass-salt --top
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 5}
skip_fail: false
- description: Sync all salt resources on salt-master minion
cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:master' saltutil.sync_all && sleep 5
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 5}
skip_fail: false
- description: Configure linux on master
cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:master' state.sls 'linux.system'
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 5}
skip_fail: false
- description: Configure salt.minion on master
cmd: salt --timeout=120 --hard-crash --state-output=mixed --state-verbose=False
-C 'I@salt:master' state.sls salt.minion && sleep 10
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 3, delay: 10}
skip_fail: false
- description: Run state 'salt' on master (for salt.api, etc)
cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-C 'I@salt:master' state.sls salt
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 3, delay: 10}
skip_fail: false
- description: Restart salt-api after states
cmd: systemctl restart salt-api
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 3, delay: 10}
skip_fail: false
{%- endmacro %}
{%- macro MACRO_GENERATE_INVENTORY(RERUN_SALTMASTER_STATE=false) %}
{#################################################################}
- description: Refresh pillars before generating nodes
cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' saltutil.refresh_pillar
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 5}
skip_fail: false
- description: Validate pillar on salt master node
cmd: |
set -e
set -x;
if salt-call sys.doc reclass.validate_node_params | grep -q reclass.validate_node_params ; then salt-call reclass.validate_nodes_params ; fi
if salt-call sys.doc reclass.validate_pillar | grep -q reclass.validate_pillar ; then salt-call reclass.validate_pillar ; fi
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 5}
skip_fail: false
- description: Generate inventory for all the nodes to the /srv/salt/reclass/nodes/_generated
cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-C 'I@salt:master' state.sls reclass
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 5}
skip_fail: false
{%- if RERUN_SALTMASTER_STATE %}
- description: Regenerate salt.master states for the newly uploaded/created model
cmd: |
# Need to refresh installed formulas that are required in the model
salt --hard-crash --state-output=mixed --state-verbose=False '*' saltutil.refresh_pillar; sleep 5;
salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:master' state.sls salt.master;
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 5}
skip_fail: false
{%- endif %}
- description: Refresh pillars on all minions
cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' saltutil.refresh_pillar; sleep 5
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 5}
skip_fail: false
- description: Show reclass-salt --top for all generated nodes
cmd: |
set -e
set -x;
if salt-call sys.doc reclass.validate_node_params | grep -q reclass.validate_node_params ; then salt-call reclass.validate_nodes_params ; fi
if salt-call sys.doc reclass.validate_pillar | grep -q reclass.validate_pillar ; then salt-call reclass.validate_pillar ; fi
reclass-salt --top
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 5}
skip_fail: false
- description: Sync all salt resources
cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' saltutil.sync_all && sleep 5
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 5}
skip_fail: false
{%- endmacro %}
{%- macro MACRO_BOOTSTRAP_ALL_MINIONS() %}
{########################################}
# Bootstrap all nodes
- description: Get linux kernel version from all minions
cmd: |
salt --hard-crash --state-output=mixed --state-verbose=False --out=txt '*' cmd.run 'uname -r' | sort
salt --hard-crash --state-output=mixed --state-verbose=False --out=txt '*' cmd.run 'dpkg -l|grep "linux-image-.*-generic"' | sort
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 5, delay: 10}
skip_fail: false
- description: Configure linux on other nodes
cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@linux:system' state.sls linux
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 5, delay: 10}
skip_fail: false
- description: Configure openssh on all nodes
cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@linux:system and not cfg01*' state.sls openssh &&
salt --hard-crash --state-output=mixed --state-verbose=False
-C 'I@linux:system and not cfg01*' cmd.run "sed -i 's/PasswordAuthentication no/PasswordAuthentication
yes/' /etc/ssh/sshd_config && service ssh reload"
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 5}
skip_fail: false
- description: Configure salt.minion on other nodes
cmd: salt --timeout=120 --hard-crash --state-output=mixed --state-verbose=False -C 'I@linux:system and not cfg01*' state.sls salt.minion &&
sleep 30
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 3, delay: 15}
skip_fail: false
- description: Wait for salt-minions wake up after restart
cmd: salt --timeout=30 --hard-crash --state-output=mixed --state-verbose=False '*' test.ping
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 25, delay: 30}
skip_fail: false
- description: Update minion information
cmd: |
salt --hard-crash --state-output=mixed --state-verbose=False '*' saltutil.sync_grains &&
salt --hard-crash --state-output=mixed --state-verbose=False '*' mine.update && sleep 60
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
- description: Execute linux.network.host one more time after salt.minion to apply dynamically registered hosts on the cluster nodes
cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@linux:system' state.sls linux.network.host
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
- description: Check salt minion versions on slaves
cmd: salt --timeout=60 '*' test.version
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 3, delay: 15}
skip_fail: false
- description: Check salt top states on nodes
cmd: salt --timeout=60 '*' state.show_top
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 3, delay: 15}
skip_fail: false
- description: Configure ntp and rsyslog on nodes
cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@linux:system' state.sls ntp,rsyslog
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
- description: Install and configure iptables if it is in pillar
cmd: |
if salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@iptables:service' match.pillar 'iptables:service' ; then
salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@iptables:service' state.sls iptables
fi
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
- description: Install and configure logrotate if it is in pillar
cmd: |
if salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@logrotate:server' match.pillar 'logrotate:server' ; then
salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@logrotate:server' state.sls logrotate
fi
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
- description: Install and configure auditd if it is enabled
cmd: |
if salt --hard-crash --state-output=mixed --state-verbose=False -C "I@auditd:service" match.pillar 'auditd:service'; then
salt --hard-crash --state-output=mixed --state-verbose=False -C "I@auditd:service" state.sls auditd
fi
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
{%- endmacro %}
{%- macro MACRO_NETWORKING_WORKAROUNDS() %}
{#########################################}
- description: '*Workaround: Load bonding module before call state.linux'
cmd: salt -C "I@linux:network:interface:*:type:bond" cmd.run 'modprobe bonding'
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 5}
skip_fail: true
- description: '*Workaround* install bridge-utils before running linux formula'
# The formula removes default route and then tries to install the package, fails.
cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C '* and not
cfg01*' cmd.run 'sudo apt-get install -y bridge-utils'
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 5}
skip_fail: false
{%- endmacro %}
{%- macro ADJUST_K8S_OPTS() %}
{############################}
- description: Set k8s deploy parameters
cmd: |
{% for k8s_opt, value in config.k8s_deploy.items() %}
{% if value|string() %}
salt-call reclass.cluster_meta_set name={{ k8s_opt }} value={{ value }};
{% endif %}
{% endfor %}
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 1}
skip_fail: false
{%- endmacro %}
{%- macro ADJUST_SL_OPTS(OVERRIDES_FILENAME='') %}
{#############################################}
- description: Set SL docker images deploy parameters
cmd: |
{#- For cookiecutter-generated model, use overrides.yml from environment model instead of cluster model #}
{%- for sl_opt, value in config.sl_deploy.items() %}
{%- if value|string() %}
{%- if OVERRIDES_FILENAME %}
salt-call reclass.cluster_meta_set name={{ sl_opt }} value={{ value }} file_name={{ OVERRIDES_FILENAME }};
{%- else %}
salt-call reclass.cluster_meta_set name={{ sl_opt }} value={{ value }};
{%- endif %}
{%- endif %}
{%- endfor %}
salt '*' saltutil.refresh_pillar;
sleep 10
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 1}
skip_fail: false
{%- endmacro %}
{%- macro REGISTER_COMPUTE_NODES() %}
{###################################}
{% for ssh in config.underlay.ssh %}
{% if ssh["node_name"].startswith("cmp") %}
{% set node_hostname = ssh["node_name"].split('.')[0] %}
- description: Register compute {{ ssh['node_name'] }}
cmd: |
exec > >(tee -i /tmp/cloud-init-bootstrap.log) 2>&1
export network01_prefix={{ IPV4_NET_ADMIN_PREFIX }}
export network02_prefix={{ IPV4_NET_CONTROL_PREFIX }}
export cluster_name={{ CLUSTER_NAME }}
export config_host={{ config.salt.salt_master_host }}
export node_domain={{ DOMAIN_NAME }}
export nodes_os="xenial"
export node_hostname={{ node_hostname }}
export saltversion={{ SALT_VERSION }}
set -xe
export BOOTSTRAP_SCRIPT_URL=$bootstrap_script_url
export BOOTSTRAP_SCRIPT_URL=${BOOTSTRAP_SCRIPT_URL:-https://raw.githubusercontent.com/salt-formulas/salt-formulas-scripts/master/bootstrap.sh}
export DISTRIB_REVISION={{ REPOSITORY_SUITE }}
export DISTRIB_REVISION=${DISTRIB_REVISION:-nightly}
# Add wrapper to apt-get to avoid race conditions
# with cron jobs running 'unattended-upgrades' script
aptget_wrapper() {
local apt_wrapper_timeout=300
local start_time=$(date '+%s')
local fin_time=$((start_time + apt_wrapper_timeout))
while true; do
if (( "$(date '+%s')" > fin_time )); then
msg="Timeout exceeded ${apt_wrapper_timeout} s. Lock files are still not released. Terminating..."
fi
if fuser /var/lib/apt/lists/lock >/dev/null 2>&1 || fuser /var/lib/dpkg/lock >/dev/null 2>&1; then
echo "Waiting while another apt/dpkg process releases locks ..."
sleep 30
continue
else
apt-get $@
break
fi
done
}
# Set default salt version
if [ -z "$saltversion" ]; then
saltversion="2017.7"
fi
echo "Using Salt version $saltversion"
echo "Preparing base OS ..."
case "$node_os" in
trusty)
# workaround for old cloud-init only configuring the first iface
iface_config_dir="/etc/network/interfaces"
ifaces=$(ip a | awk '/^[1-9]:/ {print $2}' | grep -v "lo:" | rev | cut -c2- | rev)
for iface in $ifaces; do
grep $iface $iface_config_dir &> /dev/null || (echo -e "\nauto $iface\niface $iface inet dhcp" >> $iface_config_dir && ifup $iface)
done
which wget > /dev/null || (aptget_wrapper update; aptget_wrapper install -y wget)
# SUGGESTED UPDATE:
#export MASTER_IP="$config_host" MINION_ID="$node_hostname.$node_domain" SALT_VERSION=$saltversion
#source <(curl -qL ${BOOTSTRAP_SCRIPT_URL})
## Update BOOTSTRAP_SALTSTACK_OPTS, as by default they contain "-dX" not to start service
#BOOTSTRAP_SALTSTACK_OPTS=" stable $SALT_VERSION "
#install_salt_minion_pkg
# DEPRECATED:
echo "deb [arch=amd64] http://apt-mk.mirantis.com/trusty ${DISTRIB_REVISION} salt extra" > /etc/apt/sources.list.d/mcp_salt.list
wget -O - http://apt-mk.mirantis.com/public.gpg | apt-key add -
echo "deb http://repo.saltstack.com/apt/ubuntu/14.04/amd64/$saltversion trusty main" > /etc/apt/sources.list.d/saltstack.list
wget -O - "https://repo.saltstack.com/apt/ubuntu/14.04/amd64/$saltversion/SALTSTACK-GPG-KEY.pub" | apt-key add -
aptget_wrapper clean
aptget_wrapper update
aptget_wrapper install -y salt-common
aptget_wrapper install -y salt-minion
;;
xenial)
# workaround for new cloud-init setting all interfaces statically
which resolvconf > /dev/null 2>&1 && systemctl restart resolvconf
which wget > /dev/null || (aptget_wrapper update; aptget_wrapper install -y wget)
# SUGGESTED UPDATE:
#export MASTER_IP="$config_host" MINION_ID="$node_hostname.$node_domain" SALT_VERSION=$saltversion
#source <(curl -qL ${BOOTSTRAP_SCRIPT_URL})
## Update BOOTSTRAP_SALTSTACK_OPTS, as by default they contain "-dX" not to start service
#BOOTSTRAP_SALTSTACK_OPTS=" stable $SALT_VERSION "
#install_salt_minion_pkg
# DEPRECATED:
echo "deb [arch=amd64] http://apt-mk.mirantis.com/xenial ${DISTRIB_REVISION} salt extra" > /etc/apt/sources.list.d/mcp_salt.list
wget -O - http://apt-mk.mirantis.com/public.gpg | apt-key add -
echo "deb http://repo.saltstack.com/apt/ubuntu/16.04/amd64/$saltversion xenial main" > /etc/apt/sources.list.d/saltstack.list
wget -O - "https://repo.saltstack.com/apt/ubuntu/16.04/amd64/$saltversion/SALTSTACK-GPG-KEY.pub" | apt-key add -
aptget_wrapper clean
aptget_wrapper update
aptget_wrapper install -y salt-minion
;;
rhel|centos|centos7|centos7|rhel6|rhel7)
yum install -y git
export MASTER_IP="$config_host" MINION_ID="$node_hostname.$node_domain" SALT_VERSION=$saltversion
source <(curl -qL ${BOOTSTRAP_SCRIPT_URL})
# Update BOOTSTRAP_SALTSTACK_OPTS, as by default they contain "-dX" not to start service
BOOTSTRAP_SALTSTACK_OPTS=" stable $SALT_VERSION "
install_salt_minion_pkg
;;
*)
msg="OS '$node_os' is not supported."
esac
echo "Configuring Salt minion ..."
[ ! -d /etc/salt/minion.d ] && mkdir -p /etc/salt/minion.d
echo -e "id: $node_hostname.$node_domain\nmaster: $config_host" > /etc/salt/minion.d/minion.conf
service salt-minion restart || wait_condition_send "FAILURE" "Failed to restart salt-minion service."
sleep 1
echo "Classifying node ..."
os_codename=$(salt-call grains.item oscodename --out key | awk '/oscodename/ {print $2}')
node_network01_ip="$(ip a | awk -v prefix="^ inet $network01_prefix[.]" '$0 ~ prefix {split($2, a, "/"); print a[1]}')"
node_network02_ip="$(ip a | awk -v prefix="^ inet $network02_prefix[.]" '$0 ~ prefix {split($2, a, "/"); print a[1]}')"
node_network03_ip="$(ip a | awk -v prefix="^ inet $network03_prefix[.]" '$0 ~ prefix {split($2, a, "/"); print a[1]}')"
node_network04_ip="$(ip a | awk -v prefix="^ inet $network04_prefix[.]" '$0 ~ prefix {split($2, a, "/"); print a[1]}')"
node_network05_ip="$(ip a | awk -v prefix="^ inet $network05_prefix[.]" '$0 ~ prefix {split($2, a, "/"); print a[1]}')"
node_network01_iface="$(ip a | awk -v prefix="^ inet $network01_prefix[.]" '$0 ~ prefix {split($7, a, "/"); print a[1]}')"
node_network02_iface="$(ip a | awk -v prefix="^ inet $network02_prefix[.]" '$0 ~ prefix {split($7, a, "/"); print a[1]}')"
node_network03_iface="$(ip a | awk -v prefix="^ inet $network03_prefix[.]" '$0 ~ prefix {split($7, a, "/"); print a[1]}')"
node_network04_iface="$(ip a | awk -v prefix="^ inet $network04_prefix[.]" '$0 ~ prefix {split($7, a, "/"); print a[1]}')"
node_network05_iface="$(ip a | awk -v prefix="^ inet $network05_prefix[.]" '$0 ~ prefix {split($7, a, "/"); print a[1]}')"
if [ "$node_network05_iface" != "" ]; then
node_network05_hwaddress="$(cat /sys/class/net/$node_network05_iface/address)"
fi
# find more parameters (every env starting param_)
more_params=$(env | grep "^param_" | sed -e 's/=/":"/g' -e 's/^/"/g' -e 's/$/",/g' | tr "\n" " " | sed 's/, $//g')
if [ "$more_params" != "" ]; then
echo "Additional params: $more_params"
more_params=", $more_params"
fi
salt-call event.send "reclass/minion/classify" "{\"node_master_ip\": \"$config_host\", \"node_os\": \"${os_codename}\", \"node_deploy_ip\": \"${node_network01_ip}\", \"node_deploy_iface\": \"${node_network01_iface}\", \"node_control_ip\": \"${node_network02_ip}\", \"node_control_iface\": \"${node_network02_iface}\", \"node_tenant_ip\": \"${node_network03_ip}\", \"node_tenant_iface\": \"${node_network03_iface}\", \"node_external_ip\": \"${node_network04_ip}\", \"node_external_iface\": \"${node_network04_iface}\", \"node_baremetal_ip\": \"${node_network05_ip}\", \"node_baremetal_iface\": \"${node_network05_iface}\", \"node_baremetal_hwaddress\": \"${node_network05_hwaddress}\", \"node_domain\": \"$node_domain\", \"node_cluster\": \"$cluster_name\", \"node_hostname\": \"$node_hostname\"${more_params}}"
sleep 5
salt-call saltutil.sync_all
salt-call mine.flush
salt-call mine.update
node_name: {{ ssh['node_name'] }}
retry: {count: 1, delay: 1}
skip_fail: false
{%- endif %}
{%- endfor %}
- description: Refresh pillars on all minions
cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' saltutil.refresh_pillar
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 5}
skip_fail: false
- description: Sync all salt resources
cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' saltutil.sync_all && sleep 5
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 5}
skip_fail: false
{%- endmacro %}
{%- macro RUN_NEW_TEMPEST() %}
{###################################}
- description: Upload tempest template
upload:
local_path: {{ config.salt_deploy.templates_dir }}
local_filename: runtest.yml
remote_path: /srv/salt/reclass/classes/cluster/{{ CLUSTER_NAME }}/infra/
node_name: {{ HOSTNAME_CFG01 }}
skip_fail: False
- description: Include class with tempest template into cfg node
cmd: |
sed -i 's/classes\:/classes\:\n- cluster.{{ CLUSTER_NAME }}.infra.runtest/g' /srv/salt/reclass/nodes/_generated/cfg01.{{ DOMAIN_NAME }}.yml;
salt 'cfg01*' saltutil.refresh_pillar;
salt 'cfg01*' saltutil.sync_all;
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
- description: Execute salt.minion on config node
cmd: salt-call --hard-crash --state-output=mixed --state-verbose=False state.sls salt.minion;
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 5}
skip_fail: false
- description: Test ping
cmd: salt-call --hard-crash --state-output=mixed --state-verbose=False test.ping;
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 5}
skip_fail: false
- description: Create flavors for tests
cmd: |
salt 'cfg01*' state.sls nova.client;
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 5}
skip_fail: false
- description: Create networks for tests
cmd: |
salt 'cfg01*' state.sls neutron.client;
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 5}
skip_fail: false
- description: Upload cirros image
cmd: |
salt 'cfg01*' state.sls glance.client;
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 5}
skip_fail: false
- description: Generate tempest config
cmd: |
salt 'cfg01*' state.sls runtest;
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 5}
skip_fail: false
- description: Run tempest from new docker image
cmd: |
docker run -e ARGS="-r {{TEMPEST_PATTERN }} -w 2 {{ EXCLUDE_TEST_ARGS }}" -v /root/test/tempest.conf:/etc/tempest/tempest.conf -v /tmp/:/tmp/ -v /root/test:/root/tempest -v /etc/ssl/certs/:/etc/ssl/certs/ --rm docker-prod-virtual.docker.mirantis.net/mirantis/cicd/ci-tempest:pike /bin/bash -c "run-tempest"
node_name: {{ HOSTNAME_GTW01 }}
retry: {count: 1, delay: 30}
skip_fail: true
- description: Download xml results
download:
remote_path: /root/test/
remote_filename: "report_*.xml"
local_path: {{ os_env('PWD') }}
node_name: {{ HOSTNAME_GTW01 }}
skip_fail: true
{%- endmacro %}
{%- macro INSTALL_DOCKER_ON_GTW() %}
{###################################}
- description: Install docker.io on gtw
cmd: salt-call cmd.run 'apt-get install docker.io -y'
node_name: {{ HOSTNAME_GTW01 }}
retry: {count: 1, delay: 30}
skip_fail: false
- description: Enable forward policy
cmd: iptables --policy FORWARD ACCEPT
node_name: {{ HOSTNAME_GTW01 }}
retry: {count: 1, delay: 30}
skip_fail: false
{%- endmacro %}
{%- macro MACRO_CHECK_SALT_VERSION_ON_NODES() %}
{#####################################################}
- description: 'Check salt version is as expected on nodes'
cmd: salt --timeout=15 '*' cmd.run "if salt-call test.version |grep -q {{ SALT_VERSION }} ; then echo True; fi"
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 5, delay: 5}
skip_fail: false
{%- endmacro %}
{%- macro MACRO_CHECK_SALT_VERSION_SERVICES_ON_CFG() %}
{#####################################################}
- description: 'Check salt-api, salt-master and salt-minion version'
cmd: |
salt-master --version | grep {{ SALT_VERSION }};
salt-minion --version | grep {{ SALT_VERSION }};
salt-api --version | grep {{ SALT_VERSION }};
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 5}
skip_fail: false
{%- endmacro %}
{%- macro MACRO_IPFLUSH_TENANTS_IFACES() %}
{######################################}
- description: WR Flush addresses on tenant and external ifaces
cmd: |
salt -C 'gtw* or cmp*' cmd.run 'ip address flush dev ens5';
salt -C 'gtw* or cmp*' cmd.run 'ip address flush dev ens6';
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 1}
skip_fail: false
{%- endmacro %}