| {# Collection of common macroses shared across different deployments #} |
| |
| {% set SALT_MODELS_BRANCH = os_env('SALT_MODELS_BRANCH','master') %} |
| {% set SALT_MODELS_COMMIT = os_env('SALT_MODELS_COMMIT','master') %} |
| {# Reference to a patch that should be applied to the model if required, for example: export SALT_MODELS_REF_CHANGE=refs/changes/19/7219/12 #} |
| {% set SALT_MODELS_REF_CHANGE = os_env('SALT_MODELS_REF_CHANGE', '') %} |
| {# Pin to a specified commit in salt-models/reclass-system #} |
| {% set SALT_MODELS_SYSTEM_REPOSITORY = os_env('SALT_MODELS_SYSTEM_REPOSITORY','https://gerrit.mcp.mirantis.net/salt-models/reclass-system') %} |
| {% set SALT_MODELS_SYSTEM_COMMIT = os_env('SALT_MODELS_SYSTEM_COMMIT','') %} |
| {% set SALT_MODELS_SYSTEM_REF_CHANGE = os_env('SALT_MODELS_SYSTEM_REF_CHANGE','') %} |
| {% set COOKIECUTTER_REF_CHANGE = os_env('COOKIECUTTER_REF_CHANGE','') %} |
| {% set ENVIRONMENT_TEMPLATE_REF_CHANGE = os_env('ENVIRONMENT_TEMPLATE_REF_CHANGE','') %} |
| |
| {% set REPOSITORY_SUITE = os_env('REPOSITORY_SUITE', 'testing') %} |
| {% set FORMULA_REPOSITORY = os_env('FORMULA_REPOSITORY', 'deb [arch=amd64] http://apt.mirantis.com/xenial ' + REPOSITORY_SUITE + ' salt') %} |
| {% set FORMULA_GPG = os_env('FORMULA_GPG', 'http://apt.mirantis.com/public.gpg') %} |
| {% set SALT_REPOSITORY = os_env('SALT_REPOSITORY', "deb http://repo.saltstack.local.test/apt/ubuntu/16.04/amd64/2016.3 xenial main") %} |
| {% set SALT_GPG = os_env('SALT_GPG', 'http://repo.saltstack.local.test/apt/ubuntu/16.04/amd64/2016.3/SALTSTACK-GPG-KEY.pub') %} |
| {% set UBUNTU_REPOSITORY = os_env('UBUNTU_REPOSITORY', "deb http://archive.ubuntu.com/ubuntu/ xenial main universe restricted") %} |
| {% set UBUNTU_UPDATES_REPOSITORY = os_env('UBUNTU_UPDATES_REPOSITORY', "deb http://archive.ubuntu.com/ubuntu/ xenial-updates main universe restricted") %} |
| {% set UBUNTU_SECURITY_REPOSITORY = os_env('UBUNTU_SECURITY_REPOSITORY', "deb http://archive.ubuntu.com/ubuntu/ xenial-security main universe restricted") %} |
| |
| {# Address pools for reclass cluster model are taken in the following order: |
| # 1. environment variables, |
| # 2. config.underlay.address_pools based on fuel-devops address pools |
| # (see generated '.ini' file after underlay is created), |
| # 3. defaults #} |
| {% set address_pools = config.underlay.address_pools %} |
| {% set IPV4_NET_ADMIN = os_env('IPV4_NET_ADMIN', address_pools.get('admin-pool01', '192.168.10.0/24')) %} |
| {% set IPV4_NET_CONTROL = os_env('IPV4_NET_CONTROL', address_pools.get('private-pool01', '172.16.10.0/24')) %} |
| {% set IPV4_NET_TENANT = os_env('IPV4_NET_TENANT', address_pools.get('tenant-pool01', '10.1.0.0/24')) %} |
| {% set IPV4_NET_EXTERNAL = os_env('IPV4_NET_EXTERNAL', address_pools.get('external-pool01', '10.16.0.0/24')) %} |
| {% set IPV4_NET_ADMIN_PREFIX = '.'.join(IPV4_NET_ADMIN.split('.')[0:3]) %} |
| {% set IPV4_NET_CONTROL_PREFIX = '.'.join(IPV4_NET_CONTROL.split('.')[0:3]) %} |
| {% set IPV4_NET_TENANT_PREFIX = '.'.join(IPV4_NET_TENANT.split('.')[0:3]) %} |
| {% set IPV4_NET_EXTERNAL_PREFIX = '.'.join(IPV4_NET_EXTERNAL.split('.')[0:3]) %} |
| |
| {# Format for formula replacement: |
| # space separated string: |
| # export SALT_FORMULAS_REFS='apache:refs/changes/xxxx kubernetes:refs/changes/xxxx' #} |
| |
| {% set SALT_FORMULAS_REFS = os_env('SALT_FORMULAS_REFS', '') %} |
| {% set SALT_FORMULAS_REPO = os_env('SALT_FORMULAS_REPO', 'https://gerrit.mcp.mirantis.net/salt-formulas') %} |
| |
| # Needed for using different models in different templates |
| {% set CLUSTER_NAME = os_env('CLUSTER_NAME', LAB_CONFIG_NAME) %} |
| |
| |
| {%- macro MACRO_INSTALL_PACKAGES_ON_NODES(NODE_NAME) %} |
| {#####################################################} |
| |
| - description: 'Configure key on nodes and install packages' |
| cmd: | |
| rm -rf trusted* ; |
| rm -rf /etc/apt/sources.list ; |
| echo "{{ FORMULA_REPOSITORY }}" > /etc/apt/sources.list.d/mcp_salt.list; |
| wget -O - "{{ FORMULA_GPG }}" | apt-key add -; |
| echo "{{ SALT_REPOSITORY }}" > /etc/apt/sources.list.d/mcp_saltstack.list; |
| wget -O - "{{ SALT_GPG }}" | apt-key add -; |
| echo "{{ UBUNTU_REPOSITORY }}" > /etc/apt/sources.list.d/ubuntu.list |
| echo "{{ UBUNTU_UPDATES_REPOSITORY }}" > /etc/apt/sources.list.d/ubuntu_updates.list |
| echo "{{ UBUNTU_SECURITY_REPOSITORY }}" > /etc/apt/sources.list.d/ubuntu_security.list |
| eatmydata apt-get clean && apt-get update; |
| node_name: {{ NODE_NAME }} |
| retry: {count: 1, delay: 5} |
| skip_fail: false |
| |
| {%- endmacro %} |
| |
| |
| {%- macro MACRO_INSTALL_SALT_MASTER() %} |
| {######################################} |
| - description: Installing salt master on cfg01 |
| cmd: eatmydata apt-get install -y --allow-unauthenticated reclass git salt-master |
| node_name: {{ HOSTNAME_CFG01 }} |
| retry: {count: 1, delay: 1} |
| skip_fail: false |
| |
| - description: Configure salt-master on cfg01 |
| cmd: | |
| cat << 'EOF' >> /etc/salt/master.d/master.conf |
| file_roots: |
| base: |
| - /usr/share/salt-formulas/env |
| pillar_opts: False |
| open_mode: True |
| reclass: &reclass |
| storage_type: yaml_fs |
| inventory_base_uri: /srv/salt/reclass |
| ext_pillar: |
| - reclass: *reclass |
| master_tops: |
| reclass: *reclass |
| EOF |
| node_name: {{ HOSTNAME_CFG01 }} |
| retry: {count: 1, delay: 1} |
| skip_fail: false |
| |
| - description: Configure GIT settings and certificates |
| cmd: | |
| set -e; |
| #touch /root/.git_trusted_certs.pem; |
| #for server in github.com; do \ |
| # openssl s_client -showcerts -connect $server:443 </dev/null \ |
| # | openssl x509 -outform PEM \ |
| # >> /root/.git_trusted_certs.pem; |
| #done; |
| #HOME=/root git config --global http.sslCAInfo /root/.git_trusted_certs.pem; |
| HOME=/root git config --global user.email "mcp-integration-qa@example.com"; |
| HOME=/root git config --global user.name "MCP Integration QA"; |
| node_name: {{ HOSTNAME_CFG01 }} |
| retry: {count: 1, delay: 1} |
| skip_fail: false |
| {%- endmacro %} |
| |
| {%- macro MACRO_CLONE_RECLASS_MODELS(IS_CONTRAIL_LAB=false) %} |
| {############################################################} |
| {# Creates a 'cluster' model from cookiecutter-templates and 'environment' model from uploaded template #} |
| |
| - description: Clone reclass models with submodules |
| cmd: | |
| set -e; |
| #ssh-keyscan -H github.com >> ~/.ssh/known_hosts; |
| export GIT_SSL_NO_VERIFY=true; git clone -b {{ SALT_MODELS_BRANCH }} {{ SALT_MODELS_REPOSITORY }} /srv/salt/reclass; |
| pushd /srv/salt/reclass; |
| git config submodule."classes/system".url "{{ SALT_MODELS_SYSTEM_REPOSITORY }}"; |
| git submodule update --init --recursive; |
| {%- if SALT_MODELS_REF_CHANGE != '' %} |
| {%- for item in SALT_MODELS_REF_CHANGE.split(" ") %} |
| git fetch {{ SALT_MODELS_REPOSITORY }} {{ item }} && git cherry-pick FETCH_HEAD; |
| {%- endfor %} |
| {%- elif SALT_MODELS_COMMIT != 'master' %} |
| git checkout {{ SALT_MODELS_COMMIT }}; |
| {%- endif %} |
| {%- if SALT_MODELS_SYSTEM_COMMIT != '' %} |
| pushd classes/system/; |
| git checkout {{ SALT_MODELS_SYSTEM_COMMIT }}; |
| popd; |
| {%- elif SALT_MODELS_SYSTEM_REF_CHANGE != '' %} |
| pushd classes/system/ && \ |
| {%- for item in SALT_MODELS_SYSTEM_REF_CHANGE.split(" ") %} |
| git fetch {{ SALT_MODELS_SYSTEM_REPOSITORY }} {{ item }} && git cherry-pick FETCH_HEAD; |
| {%- endfor %} |
| popd; |
| {%- else %} |
| git submodule update --init --recursive; |
| {%- endif %} |
| popd; |
| mkdir -p /srv/salt/reclass/classes/service; |
| rm -rf /srv/salt/reclass/nodes/ # For backward compatibility. New cfg node will be regenerated here |
| mkdir -p /srv/salt/reclass/nodes/_generated/; |
| |
| # Replace firstly to an intermediate value to avoid intersection between |
| # already replaced and replacing networks. |
| # For example, if generated IPV4_NET_ADMIN_PREFIX=10.16.0 , then there is a risk of replacing twice: |
| # 192.168.10 -> 10.16.0 (generated network for admin) |
| # 10.16.0 -> <external network> |
| # So let's replace constant networks to the keywords, and then keywords to the desired networks. |
| export REPLACE_DIRS="/srv/salt/reclass/classes/ /srv/salt/reclass/nodes/" |
| find ${REPLACE_DIRS} -type f -exec sed -i 's/192\.168\.10\./==IPV4_NET_ADMIN_PREFIX==/g' {} + |
| find ${REPLACE_DIRS} -type f -exec sed -i 's/172\.16\.10\./==IPV4_NET_CONTROL_PREFIX==/g' {} + |
| find ${REPLACE_DIRS} -type f -exec sed -i 's/10\.1\.0\./==IPV4_NET_TENANT_PREFIX==/g' {} + |
| find ${REPLACE_DIRS} -type f -exec sed -i 's/10\.16\.0\./==IPV4_NET_EXTERNAL_PREFIX==/g' {} + |
| |
| find ${REPLACE_DIRS} -type f -exec sed -i 's/==IPV4_NET_ADMIN_PREFIX==/{{ IPV4_NET_ADMIN_PREFIX }}./g' {} + |
| find ${REPLACE_DIRS} -type f -exec sed -i 's/==IPV4_NET_CONTROL_PREFIX==/{{ IPV4_NET_CONTROL_PREFIX }}./g' {} + |
| find ${REPLACE_DIRS} -type f -exec sed -i 's/==IPV4_NET_TENANT_PREFIX==/{{ IPV4_NET_TENANT_PREFIX }}./g' {} + |
| find ${REPLACE_DIRS} -type f -exec sed -i 's/==IPV4_NET_EXTERNAL_PREFIX==/{{ IPV4_NET_EXTERNAL_PREFIX }}./g' {} + |
| |
| find ${REPLACE_DIRS} -type f -exec sed -i 's/apt_mk_version:.*/apt_mk_version: {{ REPOSITORY_SUITE }}/g' {} + |
| |
| {%- if IS_CONTRAIL_LAB %} |
| # vSRX IPs for tcp-qa images have 172.16.10.90 hardcoded |
| find ${REPLACE_DIRS} -type f -exec sed -i 's/opencontrail_router01_address:.*/opencontrail_router01_address: 172.16.10.90/g' {} + |
| find ${REPLACE_DIRS} -type f -exec sed -i 's/infra_config_deploy_address: 1.*/infra_config_deploy_address: {{ IPV4_NET_ADMIN_PREFIX }}.15/g' {} + |
| {%- endif %} |
| |
| # Override some context parameters |
| {%- set CLUSTER_PATH = '/srv/salt/reclass/classes/cluster/' + CLUSTER_NAME %} |
| find {{ CLUSTER_PATH }} -type f -exec sed -i 's/cluster_name: .*/cluster_name: {{ CLUSTER_NAME }}/g' {} + |
| find {{ CLUSTER_PATH }} -type f -exec sed -i 's/cluster_domain: .*/cluster_domain: {{ DOMAIN_NAME }}/g' {} + |
| |
| # Disable checkouting the model from remote repository |
| cat << 'EOF' >> /srv/salt/reclass/nodes/_generated/{{ HOSTNAME_CFG01 }}.yml |
| classes: |
| - cluster.{{ CLUSTER_NAME }}.infra.config |
| parameters: |
| _param: |
| linux_system_codename: xenial |
| reclass_data_revision: master |
| linux: |
| system: |
| name: cfg01 |
| domain: {{ DOMAIN_NAME }} |
| reclass: |
| storage: |
| data_source: |
| engine: local |
| EOF |
| |
| # Show the changes to the console |
| cd /srv/salt/reclass/; git diff |
| node_name: {{ HOSTNAME_CFG01 }} |
| retry: {count: 1, delay: 1} |
| skip_fail: false |
| {%- endmacro %} |
| |
| |
| {%- macro MACRO_GENERATE_COOKIECUTTER_MODEL(IS_CONTRAIL_LAB=false, CONTROL_VLAN=None, TENANT_VLAN=None, CLUSTER_PRODUCT_MODELS='') %} |
| {###################################################################} |
| {%- set CLUSTER_CONTEXT_PATH = '/tmp/' + CLUSTER_CONTEXT_NAME %} |
| - description: "[EXPERIMENTAL] Upload cookiecutter-templates context to {{ HOSTNAME_CFG01 }}" |
| upload: |
| local_path: {{ config.salt_deploy.templates_dir }}{{ LAB_CONFIG_NAME }}/ |
| local_filename: {{ CLUSTER_CONTEXT_NAME }} |
| remote_path: /tmp/ |
| node_name: {{ HOSTNAME_CFG01 }} |
| |
| - description: Create cluster model from cookiecutter templates |
| cmd: | |
| set -e; |
| pip install cookiecutter |
| export GIT_SSL_NO_VERIFY=true; git clone https://gerrit.mcp.mirantis.net/mk/cookiecutter-templates /tmp/cookiecutter-templates |
| |
| {%- if COOKIECUTTER_REF_CHANGE != '' %} |
| pushd /tmp/cookiecutter-templates |
| git fetch https://gerrit.mcp.mirantis.net/mk/cookiecutter-templates {{ COOKIECUTTER_REF_CHANGE }} && git checkout FETCH_HEAD |
| popd |
| {%- endif %} |
| |
| mkdir -p /srv/salt/reclass/classes/cluster/ |
| mkdir -p /srv/salt/reclass/classes/system/ |
| mkdir -p /srv/salt/reclass/classes/service/ |
| rm -rf /srv/salt/reclass/nodes/ # For backward compatibility. New cfg node will be regenerated here |
| mkdir -p /srv/salt/reclass/nodes/_generated |
| |
| # Override some context parameters |
| sed -i 's/cluster_name: .*/cluster_name: {{ CLUSTER_NAME }}/g' {{ CLUSTER_CONTEXT_PATH }} |
| sed -i 's/cluster_domain: .*/cluster_domain: {{ DOMAIN_NAME }}/g' {{ CLUSTER_CONTEXT_PATH }} |
| {%- if CONTROL_VLAN %} |
| sed -i 's/control_vlan: .*/control_vlan: {{ CONTROL_VLAN }}/g' {{ CLUSTER_CONTEXT_PATH }} |
| {%- endif %} |
| {%- if TENANT_VLAN %} |
| sed -i 's/tenant_vlan: .*/tenant_vlan: {{ TENANT_VLAN }}/g' {{ CLUSTER_CONTEXT_PATH }} |
| {%- endif %} |
| |
| # Replace firstly to an intermediate value to avoid intersection between |
| # already replaced and replacing networks. |
| # For example, if generated IPV4_NET_ADMIN_PREFIX=10.16.0 , then there is a risk of replacing twice: |
| # 192.168.10 -> 10.16.0 (generated network for admin) |
| # 10.16.0 -> <external network> |
| # So let's replace constant networks to the keywords, and then keywords to the desired networks. |
| sed -i 's/10\.167\.5/==IPV4_NET_ADMIN_PREFIX==/g' {{ CLUSTER_CONTEXT_PATH }} |
| sed -i 's/10\.167\.4/==IPV4_NET_CONTROL_PREFIX==/g' {{ CLUSTER_CONTEXT_PATH }} |
| sed -i 's/10\.167\.6/==IPV4_NET_TENANT_PREFIX==/g' {{ CLUSTER_CONTEXT_PATH }} |
| sed -i 's/172\.17\.16\./==IPV4_NET_EXTERNAL_PREFIX==/g' {{ CLUSTER_CONTEXT_PATH }} |
| |
| sed -i 's/==IPV4_NET_ADMIN_PREFIX==/{{ IPV4_NET_ADMIN_PREFIX }}/g' {{ CLUSTER_CONTEXT_PATH }} |
| sed -i 's/==IPV4_NET_CONTROL_PREFIX==/{{ IPV4_NET_CONTROL_PREFIX }}/g' {{ CLUSTER_CONTEXT_PATH }} |
| sed -i 's/==IPV4_NET_TENANT_PREFIX==/{{ IPV4_NET_TENANT_PREFIX }}/g' {{ CLUSTER_CONTEXT_PATH }} |
| sed -i 's/==IPV4_NET_EXTERNAL_PREFIX==/{{ IPV4_NET_EXTERNAL_PREFIX }}./g' {{ CLUSTER_CONTEXT_PATH }} |
| |
| {% set items = CLUSTER_PRODUCT_MODELS or '$(ls /tmp/cookiecutter-templates/cluster_product/)' %} |
| for item in {{ items }}; do |
| python /tmp/cookiecutter-templates/generate.py \ |
| --template /tmp/cookiecutter-templates/cluster_product/$item \ |
| --config-file {{ CLUSTER_CONTEXT_PATH }} \ |
| --output-dir /srv/salt/reclass/classes/cluster/; |
| done |
| |
| export GIT_SSL_NO_VERIFY=true; git clone https://gerrit.mcp.mirantis.net/salt-models/reclass-system /srv/salt/reclass/classes/system/ |
| |
| # Create the cfg01 node and disable checkouting the model from remote repository |
| cat << 'EOF' >> /srv/salt/reclass/nodes/_generated/{{ HOSTNAME_CFG01 }}.yml |
| classes: |
| - system.openssh.server.team.all |
| - cluster.{{ CLUSTER_NAME }}.infra.config |
| EOF |
| |
| node_name: {{ HOSTNAME_CFG01 }} |
| retry: {count: 1, delay: 1} |
| skip_fail: false |
| |
| - description: Modify generated model and reclass-system if necessary |
| cmd: | |
| set -e; |
| {%- if SALT_MODELS_SYSTEM_COMMIT != '' %} |
| pushd /srv/salt/reclass/classes/system/ |
| git checkout {{ SALT_MODELS_SYSTEM_COMMIT }} && \ |
| popd |
| {%- elif SALT_MODELS_SYSTEM_REF_CHANGE != '' %} |
| pushd /srv/salt/reclass/classes/system/ |
| {%- for item in SALT_MODELS_SYSTEM_REF_CHANGE.split(" ") %} |
| git fetch {{ SALT_MODELS_SYSTEM_REPOSITORY }} {{ item }} && git cherry-pick FETCH_HEAD |
| {%- endfor %} |
| popd |
| {%- endif %} |
| |
| export REPLACE_DIRS="/srv/salt/reclass/classes/ /srv/salt/reclass/nodes/" |
| find ${REPLACE_DIRS} -type f -exec sed -i 's/apt_mk_version:.*/apt_mk_version: {{ REPOSITORY_SUITE }}/g' {} + |
| |
| {%- if IS_CONTRAIL_LAB %} |
| # vSRX IPs for tcp-qa images have 172.16.10.90 hardcoded |
| find ${REPLACE_DIRS} -type f -exec sed -i 's/opencontrail_router01_address:.*/opencontrail_router01_address: 172.16.10.90/g' {} + |
| find ${REPLACE_DIRS} -type f -exec sed -i 's/infra_config_deploy_address: 1.*/infra_config_deploy_address: {{ IPV4_NET_ADMIN_PREFIX }}.15/g' {} + |
| {%- endif %} |
| |
| node_name: {{ HOSTNAME_CFG01 }} |
| retry: {count: 1, delay: 1} |
| skip_fail: false |
| {%- endmacro %} |
| |
| |
| {%- macro MACRO_GENERATE_AND_ENABLE_ENVIRONMENT_MODEL() %} |
| {########################################################} |
| |
| - description: "[EXPERIMENTAL] Clone 'environment-template' repository to {{ HOSTNAME_CFG01 }}." |
| cmd: | |
| set -e; |
| mkdir -p /tmp/environment/; |
| export GIT_SSL_NO_VERIFY=true; git clone https://github.com/Mirantis/environment-template /tmp/environment/environment_template |
| node_name: {{ HOSTNAME_CFG01 }} |
| skip_fail: false |
| |
| {%- if ENVIRONMENT_TEMPLATE_REF_CHANGE != '' %} |
| - description: Fetch changes for environment templates |
| cmd: | |
| set -e; |
| pushd /tmp/environment/environment_template && |
| git fetch https://github.com/Mirantis/environment-template {{ ENVIRONMENT_TEMPLATE_REF_CHANGE }} && |
| git checkout FETCH_HEAD && |
| popd |
| node_name: {{ HOSTNAME_CFG01 }} |
| skip_fail: false |
| {%- endif %} |
| |
| {%- for ENVIRONMENT_CONTEXT_NAME in ENVIRONMENT_CONTEXT_NAMES %} |
| - description: "[EXPERIMENTAL] Upload environment inventory to {{ HOSTNAME_CFG01 }}" |
| upload: |
| local_path: {{ config.salt_deploy.templates_dir }}{{ LAB_CONFIG_NAME }}/ |
| local_filename: {{ ENVIRONMENT_CONTEXT_NAME }} |
| remote_path: /tmp/environment/ |
| node_name: {{ HOSTNAME_CFG01 }} |
| {%- endfor %} |
| |
| - description: "[EXPERIMENTAL] Remove linux.network.interface object from the cluster/system models and use fixed 'environment' model instead" |
| cmd: | |
| set -e; |
| apt-get -y install python-virtualenv python-pip build-essential python-dev libssl-dev; |
| pip install git+https://github.com/dis-xcom/reclass-tools; |
| reclass-tools del-key parameters.linux.network.interface /srv/salt/reclass/classes/cluster/; |
| reclass-tools del-key parameters.linux.network.interface /srv/salt/reclass/classes/system/; |
| reclass-tools del-key parameters.linux.network.interface /usr/share/salt-formulas/reclass/; |
| |
| if ! reclass-tools get-key 'classes' /srv/salt/reclass/nodes/{{ HOSTNAME_CFG01 }}.yml | grep -q "environment.{{ ENVIRONMENT_MODEL_INVENTORY_NAME }}$"; then |
| reclass-tools add-key 'classes' 'environment.{{ ENVIRONMENT_MODEL_INVENTORY_NAME }}.reclass_datasource_local' /srv/salt/reclass/nodes/_generated/{{ HOSTNAME_CFG01 }}.yml --merge ; |
| reclass-tools add-key 'classes' 'environment.{{ ENVIRONMENT_MODEL_INVENTORY_NAME }}' /srv/salt/reclass/nodes/_generated/{{ HOSTNAME_CFG01 }}.yml --merge ; |
| fi; |
| |
| node_name: {{ HOSTNAME_CFG01 }} |
| retry: {count: 1, delay: 5} |
| skip_fail: false |
| |
| - description: "Workaround for PROD-15923: all keepalive instances must be named 'VIP'" |
| cmd: | |
| set -e; |
| find /srv/salt/reclass/classes/{system,cluster}/ -name '*.yml' -type f -exec sed -ri '/^ keepalived:/{:1;N;/\n instance:/!b1 {N; /\n [[:graph:]]+:/ { s/\n instance:\n [[:graph:]]+:/\n instance:\n VIP:/ } } }' {} + |
| |
| node_name: {{ HOSTNAME_CFG01 }} |
| retry: {count: 1, delay: 5} |
| skip_fail: false |
| |
| - description: "[EXPERIMENTAL] Create environment model for virtual environment" |
| cmd: | |
| set -e; |
| reclass-tools render --template-dir /tmp/environment/environment_template/ \ |
| --output-dir /srv/salt/reclass/classes/environment/ \ |
| {% for ENVIRONMENT_CONTEXT_NAME in ENVIRONMENT_CONTEXT_NAMES %} --context /tmp/environment/{{ENVIRONMENT_CONTEXT_NAME}}{% endfor %} \ |
| --env-name {{ ENVIRONMENT_MODEL_INVENTORY_NAME }} |
| node_name: {{ HOSTNAME_CFG01 }} |
| retry: {count: 1, delay: 5} |
| skip_fail: false |
| |
| - description: Modify generated model and reclass-system |
| cmd: | |
| export REPLACE_DIRS="/srv/salt/reclass/classes/ /srv/salt/reclass/nodes/" |
| find ${REPLACE_DIRS} -type f -exec sed -i 's/apt_mk_version:.*/apt_mk_version: {{ REPOSITORY_SUITE }}/g' {} + |
| node_name: {{ HOSTNAME_CFG01 }} |
| retry: {count: 1, delay: 1} |
| skip_fail: false |
| |
| {%- endmacro %} |
| |
| |
| {%- macro MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='') %} |
| {#######################################################} |
| - description: Configure reclass |
| cmd: | |
| set -e; |
| FORMULA_PATH=${FORMULA_PATH:-/usr/share/salt-formulas}; |
| which wget > /dev/null || (apt-get update; apt-get install -y wget); |
| echo "{{ FORMULA_REPOSITORY }}" > /etc/apt/sources.list.d/mcp_salt.list; |
| wget -O - "{{ FORMULA_GPG }}" | apt-key add -; |
| apt-get clean; apt-get update; |
| [ ! -d /srv/salt/reclass/classes/service ] && mkdir -p /srv/salt/reclass/classes/service; |
| declare -a formula_services=({{ FORMULA_SERVICES }}); |
| echo -e "\nInstalling all required salt formulas\n"; |
| eatmydata apt-get install -y "${formula_services[@]/#/salt-formula-}"; |
| for formula_service in "${formula_services[@]}"; do |
| echo -e "\nLink service metadata for formula ${formula_service} ...\n"; |
| [ ! -L "/srv/salt/reclass/classes/service/${formula_service}" ] && ln -s ${FORMULA_PATH}/reclass/service/${formula_service} /srv/salt/reclass/classes/service/${formula_service}; |
| done; |
| [ ! -d /srv/salt/env ] && mkdir -p /srv/salt/env; |
| [ ! -L /srv/salt/env/prd ] && ln -s ${FORMULA_PATH}/env /srv/salt/env/prd; |
| [ ! -d /etc/reclass ] && mkdir /etc/reclass; |
| |
| cat << 'EOF' >> /etc/reclass/reclass-config.yml |
| storage_type: yaml_fs |
| pretty_print: True |
| output: yaml |
| inventory_base_uri: /srv/salt/reclass |
| EOF |
| node_name: {{ HOSTNAME_CFG01 }} |
| retry: {count: 1, delay: 1} |
| skip_fail: false |
| |
| - description: "*Workaround* remove all cfg01 nodes except {{ HOSTNAME_CFG01 }} to not depend on other clusters in 'reclass --top'" |
| cmd: | |
| # Remove all other nodes except {{ HOSTNAME_CFG01 }} to not rely on them for 'reclass --top' |
| find /srv/salt/reclass/nodes/ -type f -not -name {{ HOSTNAME_CFG01 }}.yml -delete |
| node_name: {{ HOSTNAME_CFG01 }} |
| retry: {count: 1, delay: 5} |
| skip_fail: false |
| |
| - description: Configure salt adoptors on cfg01 |
| cmd: | |
| ln -s /usr/lib/python2.7/dist-packages/reclass/adapters/salt.py /usr/local/sbin/reclass-salt; |
| chmod +x /usr/lib/python2.7/dist-packages/reclass/adapters/salt.py |
| node_name: {{ HOSTNAME_CFG01 }} |
| retry: {count: 1, delay: 1} |
| skip_fail: false |
| |
| - description: Show reclass-salt --top for cfg01 node |
| cmd: reclass-salt --top |
| node_name: {{ HOSTNAME_CFG01 }} |
| retry: {count: 1, delay: 5} |
| skip_fail: false |
| |
| - description: Restart salt-master service |
| cmd: systemctl restart salt-master; |
| node_name: {{ HOSTNAME_CFG01 }} |
| retry: {count: 1, delay: 5} |
| skip_fail: false |
| {%- endmacro %} |
| |
| |
| {%- macro MACRO_INSTALL_SALT_MINIONS() %} |
| {#######################################} |
| {% for ssh in config.underlay.ssh %} |
| - description: Configure salt-minion on {{ ssh['node_name'] }} |
| cmd: | |
| [ ! -d /etc/salt/minion.d ] && mkdir -p /etc/salt/minion.d; |
| cat << "EOF" >> /etc/salt/minion.d/minion.conf |
| id: {{ ssh['node_name'] }} |
| master: {{ config.salt.salt_master_host }} |
| EOF |
| eatmydata apt-get install -y salt-minion; |
| service salt-minion restart; # For case if salt-minion was already installed |
| node_name: {{ ssh['node_name'] }} |
| retry: {count: 1, delay: 1} |
| skip_fail: false |
| {% endfor %} |
| |
| |
| - description: Accept salt keys from all the nodes |
| cmd: salt-key -A -y |
| node_name: {{ HOSTNAME_CFG01 }} |
| retry: {count: 1, delay: 5} |
| skip_fail: true |
| {%- endmacro %} |
| |
| |
| {%- macro MACRO_RUN_SALT_MASTER_UNDERLAY_STATES() %} |
| {##################################################} |
| |
| {# Prepare salt services and nodes settings #} |
| |
| - description: Run 'linux' formula on cfg01 |
| cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:master' state.sls linux; |
| node_name: {{ HOSTNAME_CFG01 }} |
| retry: {count: 3, delay: 5} |
| skip_fail: false |
| |
| - description: Run 'openssh' formula on cfg01 |
| cmd: salt --hard-crash --state-output=mixed --state-verbose=False |
| -C 'I@salt:master' state.sls openssh && |
| salt --hard-crash --state-output=mixed --state-verbose=False |
| -C 'I@salt:master' cmd.run "sed -i 's/PasswordAuthentication no/PasswordAuthentication |
| yes/' /etc/ssh/sshd_config && service ssh reload" |
| node_name: {{ HOSTNAME_CFG01 }} |
| retry: {count: 3, delay: 5} |
| skip_fail: false |
| |
| - description: '*Workaround* of the bug https://mirantis.jira.com/browse/PROD-7962' |
| cmd: salt --hard-crash --state-output=mixed --state-verbose=False |
| '*' cmd.run "echo ' StrictHostKeyChecking no' >> /root/.ssh/config" |
| node_name: {{ HOSTNAME_CFG01 }} |
| retry: {count: 1, delay: 1} |
| skip_fail: false |
| |
| - description: Run 'salt.master' formula on cfg01 |
| cmd: timeout 600 salt-call -l info --hard-crash --state-output=mixed --state-verbose=False state.sls salt.master; |
| node_name: {{ HOSTNAME_CFG01 }} |
| retry: {count: 2, delay: 30} |
| skip_fail: false |
| |
| {%- if SALT_FORMULAS_REFS != '' %} |
| - description: Replace needed formulas to desired version |
| cmd: | |
| set -e; |
| {%- for formula_set in SALT_FORMULAS_REFS.split(' ') %} |
| {% set formula = formula_set.split(':') %} |
| {% set formula_name = formula[0] %} |
| {% set formula_ref = formula[1] %} |
| {% set formula_dir = '/tmp/salt-formula-' + formula_name %} |
| git clone {{ SALT_FORMULAS_REPO }}/{{ formula_name }} {{ formula_dir }} && |
| pushd {{ formula_dir }} && |
| git fetch {{ SALT_FORMULAS_REPO }}/{{ formula_name }} {{ formula_ref }} && |
| git checkout FETCH_HEAD && |
| popd && |
| if [ -d "{{ formula_dir }}" ]; then |
| echo "Going to replace packaged formula {{ formula_name }}" && |
| rm -rfv /usr/share/salt-formulas/{env,reclass/service}/{{ formula_name }} && |
| ln -v -s "{{ formula_dir }}/{{ formula_name }}" "/usr/share/salt-formulas/env/{{ formula_name }}" && |
| ln -v -s "{{ formula_dir }}/metadata/service/" "/usr/share/salt-formulas/reclass/service/{{ formula_name }}"; |
| else |
| echo "Stopped, directory /root/salt-formula-{{ formula_name }} does not exist!"; |
| fi |
| {%- endfor %} |
| node_name: {{ HOSTNAME_CFG01 }} |
| retry: {count: 1, delay: 10} |
| skip_fail: false |
| {%- endif %} |
| |
| - description: Refresh pillars on salt-master minion |
| cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:master' saltutil.refresh_pillar |
| node_name: {{ HOSTNAME_CFG01 }} |
| retry: {count: 1, delay: 5} |
| skip_fail: false |
| |
| - description: Show reclass-salt --top for salt-master node |
| cmd: reclass-salt --top |
| node_name: {{ HOSTNAME_CFG01 }} |
| retry: {count: 1, delay: 5} |
| skip_fail: false |
| |
| - description: Sync all salt resources on salt-master minion |
| cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:master' saltutil.sync_all && sleep 5 |
| node_name: {{ HOSTNAME_CFG01 }} |
| retry: {count: 1, delay: 5} |
| skip_fail: false |
| |
| - description: Configure linux on master |
| cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:master' state.sls 'linux.system' |
| node_name: {{ HOSTNAME_CFG01 }} |
| retry: {count: 1, delay: 5} |
| skip_fail: false |
| |
| - description: Configure salt.minion on master |
| cmd: salt --timeout=120 --hard-crash --state-output=mixed --state-verbose=False |
| -C 'I@salt:master' state.sls salt.minion && sleep 10 |
| node_name: {{ HOSTNAME_CFG01 }} |
| retry: {count: 3, delay: 10} |
| skip_fail: false |
| |
| - description: Run state 'salt' on master (for salt.api, etc) |
| cmd: salt --hard-crash --state-output=mixed --state-verbose=False |
| -C 'I@salt:master' state.sls salt |
| node_name: {{ HOSTNAME_CFG01 }} |
| retry: {count: 3, delay: 10} |
| skip_fail: false |
| {%- endmacro %} |
| |
| {%- macro MACRO_GENERATE_INVENTORY() %} |
| {#####################################} |
| - description: Generate inventory for all the nodes to the /srv/salt/reclass/nodes/_generated |
| cmd: salt --hard-crash --state-output=mixed --state-verbose=False |
| -C 'I@salt:master' state.sls reclass |
| node_name: {{ HOSTNAME_CFG01 }} |
| retry: {count: 1, delay: 5} |
| skip_fail: false |
| |
| - description: Refresh pillars on all minions |
| cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' saltutil.refresh_pillar |
| node_name: {{ HOSTNAME_CFG01 }} |
| retry: {count: 1, delay: 5} |
| skip_fail: false |
| |
| - description: Show reclass-salt --top for all generated nodes |
| cmd: | |
| set -e |
| if salt-call sys.doc reclass.validate_node_params | grep -q reclass.validate_node_params ; then salt-call reclass.validate_nodes_params ; fi |
| if salt-call sys.doc reclass.validate_pillar | grep -q reclass.validate_pillar ; then salt-call reclass.validate_pillar ; fi |
| reclass-salt --top |
| node_name: {{ HOSTNAME_CFG01 }} |
| retry: {count: 1, delay: 5} |
| skip_fail: false |
| |
| - description: Sync all salt resources |
| cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' saltutil.sync_all && sleep 5 |
| node_name: {{ HOSTNAME_CFG01 }} |
| retry: {count: 1, delay: 5} |
| skip_fail: false |
| {%- endmacro %} |
| |
| |
| {%- macro MACRO_BOOTSTRAP_ALL_MINIONS() %} |
| {########################################} |
| # Bootstrap all nodes |
| - description: Configure linux on other nodes |
| cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@linux:system' state.sls linux |
| node_name: {{ HOSTNAME_CFG01 }} |
| retry: {count: 5, delay: 10} |
| skip_fail: false |
| |
| - description: Configure openssh on all nodes |
| cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@linux:system and not cfg01*' state.sls openssh && |
| salt --hard-crash --state-output=mixed --state-verbose=False |
| -C 'I@linux:system and not cfg01*' cmd.run "sed -i 's/PasswordAuthentication no/PasswordAuthentication |
| yes/' /etc/ssh/sshd_config && service ssh reload" |
| node_name: {{ HOSTNAME_CFG01 }} |
| retry: {count: 1, delay: 5} |
| skip_fail: false |
| |
| - description: Configure salt.minion on other nodes |
| cmd: salt --timeout=120 --hard-crash --state-output=mixed --state-verbose=False -C 'I@linux:system and not cfg01*' state.sls salt.minion && |
| sleep 10 |
| node_name: {{ HOSTNAME_CFG01 }} |
| retry: {count: 3, delay: 15} |
| skip_fail: false |
| |
| - description: Check salt minion versions on slaves |
| cmd: salt --timeout=60 '*' test.version |
| node_name: {{ HOSTNAME_CFG01 }} |
| retry: {count: 3, delay: 15} |
| skip_fail: false |
| |
| - description: Check salt top states on nodes |
| cmd: salt --timeout=60 '*' state.show_top |
| node_name: {{ HOSTNAME_CFG01 }} |
| retry: {count: 3, delay: 15} |
| skip_fail: false |
| |
| - description: Configure ntp and rsyslog on nodes |
| cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@linux:system' state.sls ntp,rsyslog |
| node_name: {{ HOSTNAME_CFG01 }} |
| retry: {count: 1, delay: 10} |
| skip_fail: false |
| {%- endmacro %} |
| |
| |
| {%- macro MACRO_NETWORKING_WORKAROUNDS() %} |
| {#########################################} |
| |
| #- description: '*Workaround 1/2* of the bug PROD-9576 to get bond0-connectivity *without* rebooting nodes' |
| # cmd: salt-call --hard-crash --state-output=mixed --state-verbose=False cmd.run |
| # "mkdir -p /tmp/PROD-9576; cd /tmp/PROD-9576; git clone https://gerrit.mcp.mirantis.net/salt-formulas/linux; cd linux; |
| # git fetch https://gerrit.mcp.mirantis.net/salt-formulas/linux refs/changes/54/2354/16 && git checkout FETCH_HEAD; |
| # cp -f linux/network/interface.sls /srv/salt/env/prd/linux/network/; |
| # cp -f linux/map.jinja /srv/salt/env/prd/linux/;" |
| # node_name: {{ HOSTNAME_CFG01 }} |
| # retry: {count: 1, delay: 5} |
| # skip_fail: false |
| |
| - description: '*Workaround: Load bonding module before call state.linux' |
| cmd: salt -C "I@linux:network:interface:*:type:bond" cmd.run 'modprobe bonding' |
| node_name: {{ HOSTNAME_CFG01 }} |
| retry: {count: 1, delay: 5} |
| skip_fail: true |
| |
| - description: '*Workaround* install bridge-utils before running linux formula' |
| # The formula removes default route and then tries to install the package, fails. |
| cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C '* and not |
| cfg01*' cmd.run 'sudo apt-get install -y bridge-utils' |
| node_name: {{ HOSTNAME_CFG01 }} |
| retry: {count: 1, delay: 5} |
| skip_fail: false |
| |
| {%- endmacro %} |
| |
| {%- macro ADJUST_K8S_OPTS() %} |
| {#########################################} |
| |
| - description: Set k8s deploy parameters |
| cmd: | |
| {% for k8s_opt, value in config.k8s_deploy.items() %} |
| {% if value|string() %} |
| salt-call reclass.cluster_meta_set name={{ k8s_opt }} value={{ value }}; |
| {% endif %} |
| {% endfor %} |
| node_name: {{ HOSTNAME_CFG01 }} |
| retry: {count: 1, delay: 1} |
| skip_fail: false |
| |
| {%- endmacro %} |
| |
| {%- macro REGISTER_COMPUTE_NODES() %} |
| {#########################################} |
| |
| {% for ssh in config.underlay.ssh %} |
| {% if ssh["node_name"].startswith("cmp") %} |
| {% set node_hostname = ssh["node_name"].split('.')[0] %} |
| - description: Register compute {{ ssh['node_name'] }} |
| cmd: | |
| exec > >(tee -i /tmp/cloud-init-bootstrap.log) 2>&1 |
| export network01_prefix={{ IPV4_NET_ADMIN_PREFIX }} |
| export network02_prefix={{ IPV4_NET_CONTROL_PREFIX }} |
| export cluster_name={{ CLUSTER_NAME }} |
| export config_host={{ config.salt.salt_master_host }} |
| export node_domain={{ DOMAIN_NAME }} |
| export nodes_os="xenial" |
| export node_hostname={{ node_hostname }} |
| set -xe |
| export BOOTSTRAP_SCRIPT_URL=$bootstrap_script_url |
| export BOOTSTRAP_SCRIPT_URL=${BOOTSTRAP_SCRIPT_URL:-https://raw.githubusercontent.com/salt-formulas/salt-formulas-scripts/master/bootstrap.sh} |
| export DISTRIB_REVISION={{ REPOSITORY_SUITE }} |
| export DISTRIB_REVISION=${DISTRIB_REVISION:-nightly} |
| |
| # Add wrapper to apt-get to avoid race conditions |
| # with cron jobs running 'unattended-upgrades' script |
| aptget_wrapper() { |
| local apt_wrapper_timeout=300 |
| local start_time=$(date '+%s') |
| local fin_time=$((start_time + apt_wrapper_timeout)) |
| while true; do |
| if (( "$(date '+%s')" > fin_time )); then |
| msg="Timeout exceeded ${apt_wrapper_timeout} s. Lock files are still not released. Terminating..." |
| fi |
| if fuser /var/lib/apt/lists/lock >/dev/null 2>&1 || fuser /var/lib/dpkg/lock >/dev/null 2>&1; then |
| echo "Waiting while another apt/dpkg process releases locks ..." |
| sleep 30 |
| continue |
| else |
| apt-get $@ |
| break |
| fi |
| done |
| } |
| |
| # Set default salt version |
| if [ -z "$saltversion" ]; then |
| saltversion="2016.3" |
| fi |
| echo "Using Salt version $saltversion" |
| |
| echo "Preparing base OS ..." |
| |
| case "$node_os" in |
| trusty) |
| # workaround for old cloud-init only configuring the first iface |
| iface_config_dir="/etc/network/interfaces" |
| ifaces=$(ip a | awk '/^[1-9]:/ {print $2}' | grep -v "lo:" | rev | cut -c2- | rev) |
| |
| for iface in $ifaces; do |
| grep $iface $iface_config_dir &> /dev/null || (echo -e "\nauto $iface\niface $iface inet dhcp" >> $iface_config_dir && ifup $iface) |
| done |
| |
| which wget > /dev/null || (aptget_wrapper update; aptget_wrapper install -y wget) |
| |
| # SUGGESTED UPDATE: |
| #export MASTER_IP="$config_host" MINION_ID="$node_hostname.$node_domain" SALT_VERSION=$saltversion |
| #source <(curl -qL ${BOOTSTRAP_SCRIPT_URL}) |
| ## Update BOOTSTRAP_SALTSTACK_OPTS, as by default they contain "-dX" not to start service |
| #BOOTSTRAP_SALTSTACK_OPTS=" stable $SALT_VERSION " |
| #install_salt_minion_pkg |
| |
| # DEPRECATED: |
| echo "deb [arch=amd64] http://apt-mk.mirantis.com/trusty ${DISTRIB_REVISION} salt extra" > /etc/apt/sources.list.d/mcp_salt.list |
| wget -O - http://apt-mk.mirantis.com/public.gpg | apt-key add - |
| |
| echo "deb http://repo.saltstack.com/apt/ubuntu/14.04/amd64/$saltversion trusty main" > /etc/apt/sources.list.d/saltstack.list |
| wget -O - "https://repo.saltstack.com/apt/ubuntu/14.04/amd64/$saltversion/SALTSTACK-GPG-KEY.pub" | apt-key add - |
| |
| aptget_wrapper clean |
| aptget_wrapper update |
| aptget_wrapper install -y salt-common |
| aptget_wrapper install -y salt-minion |
| ;; |
| xenial) |
| |
| # workaround for new cloud-init setting all interfaces statically |
| which resolvconf > /dev/null 2>&1 && systemctl restart resolvconf |
| |
| which wget > /dev/null || (aptget_wrapper update; aptget_wrapper install -y wget) |
| |
| # SUGGESTED UPDATE: |
| #export MASTER_IP="$config_host" MINION_ID="$node_hostname.$node_domain" SALT_VERSION=$saltversion |
| #source <(curl -qL ${BOOTSTRAP_SCRIPT_URL}) |
| ## Update BOOTSTRAP_SALTSTACK_OPTS, as by default they contain "-dX" not to start service |
| #BOOTSTRAP_SALTSTACK_OPTS=" stable $SALT_VERSION " |
| #install_salt_minion_pkg |
| |
| # DEPRECATED: |
| echo "deb [arch=amd64] http://apt-mk.mirantis.com/xenial ${DISTRIB_REVISION} salt extra" > /etc/apt/sources.list.d/mcp_salt.list |
| wget -O - http://apt-mk.mirantis.com/public.gpg | apt-key add - |
| |
| echo "deb http://repo.saltstack.com/apt/ubuntu/16.04/amd64/$saltversion xenial main" > /etc/apt/sources.list.d/saltstack.list |
| wget -O - "https://repo.saltstack.com/apt/ubuntu/16.04/amd64/$saltversion/SALTSTACK-GPG-KEY.pub" | apt-key add - |
| |
| aptget_wrapper clean |
| aptget_wrapper update |
| aptget_wrapper install -y salt-minion |
| ;; |
| rhel|centos|centos7|centos7|rhel6|rhel7) |
| yum install -y git |
| export MASTER_IP="$config_host" MINION_ID="$node_hostname.$node_domain" SALT_VERSION=$saltversion |
| source <(curl -qL ${BOOTSTRAP_SCRIPT_URL}) |
| # Update BOOTSTRAP_SALTSTACK_OPTS, as by default they contain "-dX" not to start service |
| BOOTSTRAP_SALTSTACK_OPTS=" stable $SALT_VERSION " |
| install_salt_minion_pkg |
| ;; |
| *) |
| msg="OS '$node_os' is not supported." |
| esac |
| |
| echo "Configuring Salt minion ..." |
| [ ! -d /etc/salt/minion.d ] && mkdir -p /etc/salt/minion.d |
| echo -e "id: $node_hostname.$node_domain\nmaster: $config_host" > /etc/salt/minion.d/minion.conf |
| |
| service salt-minion restart || wait_condition_send "FAILURE" "Failed to restart salt-minion service." |
| |
| sleep 1 |
| |
| echo "Classifying node ..." |
| os_codename=$(salt-call grains.item oscodename --out key | awk '/oscodename/ {print $2}') |
| node_network01_ip="$(ip a | awk -v prefix="^ inet $network01_prefix[.]" '$0 ~ prefix {split($2, a, "/"); print a[1]}')" |
| node_network02_ip="$(ip a | awk -v prefix="^ inet $network02_prefix[.]" '$0 ~ prefix {split($2, a, "/"); print a[1]}')" |
| node_network03_ip="$(ip a | awk -v prefix="^ inet $network03_prefix[.]" '$0 ~ prefix {split($2, a, "/"); print a[1]}')" |
| node_network04_ip="$(ip a | awk -v prefix="^ inet $network04_prefix[.]" '$0 ~ prefix {split($2, a, "/"); print a[1]}')" |
| node_network05_ip="$(ip a | awk -v prefix="^ inet $network05_prefix[.]" '$0 ~ prefix {split($2, a, "/"); print a[1]}')" |
| |
| node_network01_iface="$(ip a | awk -v prefix="^ inet $network01_prefix[.]" '$0 ~ prefix {split($7, a, "/"); print a[1]}')" |
| node_network02_iface="$(ip a | awk -v prefix="^ inet $network02_prefix[.]" '$0 ~ prefix {split($7, a, "/"); print a[1]}')" |
| node_network03_iface="$(ip a | awk -v prefix="^ inet $network03_prefix[.]" '$0 ~ prefix {split($7, a, "/"); print a[1]}')" |
| node_network04_iface="$(ip a | awk -v prefix="^ inet $network04_prefix[.]" '$0 ~ prefix {split($7, a, "/"); print a[1]}')" |
| node_network05_iface="$(ip a | awk -v prefix="^ inet $network05_prefix[.]" '$0 ~ prefix {split($7, a, "/"); print a[1]}')" |
| |
| if [ "$node_network05_iface" != "" ]; then |
| node_network05_hwaddress="$(cat /sys/class/net/$node_network05_iface/address)" |
| fi |
| |
| # find more parameters (every env starting param_) |
| more_params=$(env | grep "^param_" | sed -e 's/=/":"/g' -e 's/^/"/g' -e 's/$/",/g' | tr "\n" " " | sed 's/, $//g') |
| if [ "$more_params" != "" ]; then |
| echo "Additional params: $more_params" |
| more_params=", $more_params" |
| fi |
| |
| salt-call event.send "reclass/minion/classify" "{\"node_master_ip\": \"$config_host\", \"node_os\": \"${os_codename}\", \"node_deploy_ip\": \"${node_network01_ip}\", \"node_deploy_iface\": \"${node_network01_iface}\", \"node_control_ip\": \"${node_network02_ip}\", \"node_control_iface\": \"${node_network02_iface}\", \"node_tenant_ip\": \"${node_network03_ip}\", \"node_tenant_iface\": \"${node_network03_iface}\", \"node_external_ip\": \"${node_network04_ip}\", \"node_external_iface\": \"${node_network04_iface}\", \"node_baremetal_ip\": \"${node_network05_ip}\", \"node_baremetal_iface\": \"${node_network05_iface}\", \"node_baremetal_hwaddress\": \"${node_network05_hwaddress}\", \"node_domain\": \"$node_domain\", \"node_cluster\": \"$cluster_name\", \"node_hostname\": \"$node_hostname\"${more_params}}" |
| |
| sleep 5 |
| |
| salt-call saltutil.sync_all |
| salt-call mine.flush |
| salt-call mine.update |
| node_name: {{ ssh['node_name'] }} |
| retry: {count: 1, delay: 1} |
| skip_fail: false |
| {%- endif %} |
| {%- endfor %} |
| |
| - description: Refresh pillars on all minions |
| cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' saltutil.refresh_pillar |
| node_name: {{ HOSTNAME_CFG01 }} |
| retry: {count: 1, delay: 5} |
| skip_fail: false |
| |
| - description: Sync all salt resources |
| cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' saltutil.sync_all && sleep 5 |
| node_name: {{ HOSTNAME_CFG01 }} |
| retry: {count: 1, delay: 5} |
| skip_fail: false |
| |
| {%- endmacro %} |