Offline deploy test

Scenario:
* apt node host dnsmasq and nxign proxy,
  has connectivity to inet and become a proxy to
  mcp.mirror
* All other nodes uses ip of apt node as nameserver
* All other nodes do not have connectivity to internet

Change-Id: I94f5bba191f6846e5e10124f2850a6b4f8be1cca
diff --git a/tcp_tests/templates/shared-salt-offline.yaml b/tcp_tests/templates/shared-salt-offline.yaml
new file mode 100644
index 0000000..b4a9dab
--- /dev/null
+++ b/tcp_tests/templates/shared-salt-offline.yaml
@@ -0,0 +1,642 @@
+{# Collection of common macroses shared across different deployments #}
+
+{% set SALT_MODELS_BRANCH = os_env('SALT_MODELS_BRANCH','master') %}
+{% set SALT_MODELS_COMMIT = os_env('SALT_MODELS_COMMIT','master') %}
+{# Reference to a patch that should be applied to the model if required, for example: export SALT_MODELS_REF_CHANGE=refs/changes/19/7219/12 #}
+{% set SALT_MODELS_REF_CHANGE = os_env('SALT_MODELS_REF_CHANGE', '') %}
+{# Pin to a specified commit in salt-models/reclass-system #}
+{% set SALT_MODELS_SYSTEM_REPOSITORY = os_env('SALT_MODELS_SYSTEM_REPOSITORY','https://gerrit.mcp.mirantis.local.test/salt-models/reclass-system') %}
+{% set SALT_MODELS_SYSTEM_COMMIT = os_env('SALT_MODELS_SYSTEM_COMMIT','') %}
+{% set SALT_MODELS_SYSTEM_REF_CHANGE = os_env('SALT_MODELS_SYSTEM_REF_CHANGE','') %}
+{% set LAB_CONFIG_NAME = os_env('LAB_CONFIG_NAME','mcp-ocata-local-repo-dvr') %}
+
+{% set REPOSITORY_SUITE = os_env('REPOSITORY_SUITE', 'testing') %}
+
+{# Address pools for reclass cluster model are taken in the following order:
+ # 1. environment variables,
+ # 2. config.underlay.address_pools based on fuel-devops address pools
+ #    (see generated '.ini' file after underlay is created),
+ # 3. defaults #}
+{% set address_pools = config.underlay.address_pools %}
+{% set IPV4_NET_ADMIN = os_env('IPV4_NET_ADMIN', address_pools.get('admin-pool01', '192.168.10.0/24')) %}
+{% set IPV4_NET_CONTROL = os_env('IPV4_NET_CONTROL', address_pools.get('private-pool01', '172.16.10.0/24')) %}
+{% set IPV4_NET_TENANT = os_env('IPV4_NET_TENANT', address_pools.get('tenant-pool01', '10.1.0.0/24')) %}
+{% set IPV4_NET_EXTERNAL = os_env('IPV4_NET_EXTERNAL', address_pools.get('external-pool01', '10.16.0.0/24')) %}
+{% set IPV4_NET_ADMIN_PREFIX = '.'.join(IPV4_NET_ADMIN.split('.')[0:3]) %}
+{% set IPV4_NET_CONTROL_PREFIX = '.'.join(IPV4_NET_CONTROL.split('.')[0:3]) %}
+{% set IPV4_NET_TENANT_PREFIX = '.'.join(IPV4_NET_TENANT.split('.')[0:3]) %}
+{% set IPV4_NET_EXTERNAL_PREFIX = '.'.join(IPV4_NET_EXTERNAL.split('.')[0:3]) %}
+
+{# Format for formula replacement:
+ # space separated string:
+ # export SALT_FORMULAS_REFS='apache:refs/changes/xxxx kubernetes:refs/changes/xxxx' #}
+
+{% set SALT_FORMULAS_REFS = os_env('SALT_FORMULAS_REFS', '') %}
+{% set SALT_FORMULAS_REPO = os_env('SALT_FORMULAS_REPO', 'https://gerrit.mcp.mirantis.local.test/salt-formulas') %}
+
+{%- macro MACRO_INSTALL_PACKAGES_ON_NODES(NODE_NAME) %}
+{#########################################}
+
+- description: 'Configure key on nodes and install packages'
+  cmd: |
+    rm -rf trusted* ;
+    rm -rf /etc/apt/sources.list ;
+    echo "deb [arch=amd64] http://apt.mirantis.local.test/xenial {{ REPOSITORY_SUITE }} salt extra" > /etc/apt/sources.list.d/mcp_salt.list;
+    wget -O - http://apt.mirantis.local.test/public.gpg | apt-key add -;
+    echo "deb http://repo.saltstack.local.test/apt/ubuntu/16.04/amd64/2016.3 xenial main" > /etc/apt/sources.list.d/saltstack.list;
+    wget -O - http://repo.saltstack.local.test/apt/ubuntu/16.04/amd64/2016.3/SALTSTACK-GPG-KEY.pub | apt-key add -;
+    echo "deb http://mirror.mcp.mirantis.local.test/ubuntu xenial main" > /etc/apt/sources.list.d/ubuntu_main.list
+    echo "deb http://mirror.mcp.mirantis.local.test/ubuntu xenial universe" > /etc/apt/sources.list.d/ubuntu_universe.list
+    eatmydata apt-get clean && apt-get update;
+  node_name: {{ NODE_NAME }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+{%- endmacro %}
+
+{%- macro MACRO_INSTALL_SALT_MASTER() %}
+{######################################}
+- description: Installing salt master on cfg01
+  cmd:  eatmydata apt-get install -y --allow-unauthenticated reclass git salt-master
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 1}
+  skip_fail: false
+
+- description: Configure salt-master on cfg01
+  cmd: |
+    cat << 'EOF' >> /etc/salt/master.d/master.conf
+    file_roots:
+      base:
+      - /usr/share/salt-formulas/env
+    pillar_opts: False
+    open_mode: True
+    reclass: &reclass
+      storage_type: yaml_fs
+      inventory_base_uri: /srv/salt/reclass
+    ext_pillar:
+      - reclass: *reclass
+    master_tops:
+      reclass: *reclass
+    EOF
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 1}
+  skip_fail: false
+
+{%- endmacro %}
+
+
+{%- macro MACRO_CLONE_RECLASS_MODELS(IS_CONTRAIL_LAB=false) %}
+{############################################################}
+{# Creates a 'cluster' model from cookiecutter-templates and 'environment' model from uploaded template #}
+
+- description: Clone reclass models with submodules
+  cmd: |
+    set -e;
+    export GIT_SSL_NO_VERIFY=true; git clone -b {{ SALT_MODELS_BRANCH }} {{ SALT_MODELS_REPOSITORY }} /srv/salt/reclass;
+    pushd /srv/salt/reclass && \
+    git config submodule."classes/system".url "{{ SALT_MODELS_SYSTEM_REPOSITORY }}" ;
+    git submodule update --init --recursive && \
+    {%- if SALT_MODELS_REF_CHANGE != '' %}
+    {%- for item in SALT_MODELS_REF_CHANGE.split(" ") %}
+    git fetch {{ SALT_MODELS_REPOSITORY }} {{ item }} && git cherry-pick FETCH_HEAD && \
+    {%- endfor %}
+    {%- elif SALT_MODELS_COMMIT != 'master' %}
+    git checkout {{ SALT_MODELS_COMMIT }} && \
+    {%- endif %}
+    {%- if SALT_MODELS_SYSTEM_COMMIT != '' %}
+    pushd classes/system/ && \
+    git checkout {{ SALT_MODELS_SYSTEM_COMMIT }} && \
+    popd && \
+    {%- elif SALT_MODELS_SYSTEM_REF_CHANGE != '' -%}
+    pushd classes/system/ && \
+    {%- for item in SALT_MODELS_SYSTEM_REF_CHANGE.split(" ") %}
+    git fetch {{ SALT_MODELS_SYSTEM_REPOSITORY }} {{ item }} && git cherry-pick FETCH_HEAD && \
+    {%- endfor %}
+    popd && \
+    {%- else %}
+    git submodule update --init --recursive && \
+    {%- endif %}
+    popd;
+    mkdir -p /srv/salt/reclass/classes/service;
+    rm -rf /srv/salt/reclass/nodes/  # For backward compatibility. New cfg node will be regenerated here
+    mkdir -p /srv/salt/reclass/nodes/_generated/;
+
+    # Replace firstly to an intermediate value to avoid intersection between
+    # already replaced and replacing networks.
+    # For example, if generated IPV4_NET_ADMIN_PREFIX=10.16.0 , then there is a risk of replacing twice:
+    # 192.168.10 -> 10.16.0 (generated network for admin)
+    # 10.16.0 -> <external network>
+    # So let's replace constant networks to the keywords, and then keywords to the desired networks.
+    export REPLACE_DIRS="/srv/salt/reclass/classes/ /srv/salt/reclass/nodes/"
+    find ${REPLACE_DIRS} -type f -exec sed -i 's/192\.168\.10\./==IPV4_NET_ADMIN_PREFIX==/g' {} +
+    find ${REPLACE_DIRS} -type f -exec sed -i 's/172\.16\.10\./==IPV4_NET_CONTROL_PREFIX==/g' {} +
+    find ${REPLACE_DIRS} -type f -exec sed -i 's/10\.1\.0\./==IPV4_NET_TENANT_PREFIX==/g' {} +
+    find ${REPLACE_DIRS} -type f -exec sed -i 's/10\.16\.0\./==IPV4_NET_EXTERNAL_PREFIX==/g' {} +
+
+    find ${REPLACE_DIRS} -type f -exec sed -i 's/==IPV4_NET_ADMIN_PREFIX==/{{ IPV4_NET_ADMIN_PREFIX }}./g' {} +
+    find ${REPLACE_DIRS} -type f -exec sed -i 's/==IPV4_NET_CONTROL_PREFIX==/{{ IPV4_NET_CONTROL_PREFIX }}./g' {} +
+    find ${REPLACE_DIRS} -type f -exec sed -i 's/==IPV4_NET_TENANT_PREFIX==/{{ IPV4_NET_TENANT_PREFIX }}./g' {} +
+    find ${REPLACE_DIRS} -type f -exec sed -i 's/==IPV4_NET_EXTERNAL_PREFIX==/{{ IPV4_NET_EXTERNAL_PREFIX }}./g' {} +
+
+    find ${REPLACE_DIRS} -type f -exec sed -i 's/apt_mk_version:.*/apt_mk_version: {{ REPOSITORY_SUITE }}/g' {} +
+
+    {%- if IS_CONTRAIL_LAB %}
+    # vSRX IPs for tcp-qa images have 172.16.10.90 hardcoded
+    find ${REPLACE_DIRS} -type f -exec sed -i 's/opencontrail_router01_address:.*/opencontrail_router01_address: 172.16.10.90/g' {} +
+    find ${REPLACE_DIRS} -type f -exec sed -i 's/infra_config_deploy_address: 1.*/infra_config_deploy_address: {{ IPV4_NET_ADMIN_PREFIX }}.15/g' {} +
+    {%- endif %}
+
+    # Disable checkouting the model from remote repository
+    cat << 'EOF' >> /srv/salt/reclass/nodes/_generated/{{ HOSTNAME_CFG01 }}.yml
+    classes:
+    - cluster.{{ LAB_CONFIG_NAME }}.infra.config
+    parameters:
+      _param:
+        linux_system_codename: xenial
+        reclass_data_revision: master
+      linux:
+        system:
+          name: cfg01
+          domain: {{ DOMAIN_NAME }}
+      reclass:
+        storage:
+          data_source:
+            engine: local
+    EOF
+
+    # Show the changes to the console
+    cd /srv/salt/reclass/; git diff
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 1}
+  skip_fail: false
+{%- endmacro %}
+
+
+{%- macro MACRO_GENERATE_COOKIECUTTER_MODEL(IS_CONTRAIL_LAB=false, CONTROL_VLAN=None, TENANT_VLAN=None) %}
+{###################################################################}
+{%- set CLUSTER_CONTEXT_PATH = '/tmp/' + CLUSTER_CONTEXT_NAME %}
+- description: "[EXPERIMENTAL] Upload cookiecutter-templates context to {{ HOSTNAME_CFG01 }}"
+  upload:
+    local_path:  {{ config.salt_deploy.templates_dir }}{{ LAB_CONFIG_NAME }}/
+    local_filename: {{ CLUSTER_CONTEXT_NAME }}
+    remote_path: /tmp/
+  node_name: {{ HOSTNAME_CFG01 }}
+
+- description: Create cluster model from cookiecutter templates
+  cmd: |
+    set -e;
+    pip install cookiecutter
+    export GIT_SSL_NO_VERIFY=true; git clone  https://gerrit.mcp.mirantis.net/mk/cookiecutter-templates /tmp/cookiecutter-templates
+    mkdir -p /srv/salt/reclass/classes/cluster/
+    mkdir -p /srv/salt/reclass/classes/system/
+    mkdir -p /srv/salt/reclass/classes/service/
+    rm -rf /srv/salt/reclass/nodes/  # For backward compatibility. New cfg node will be regenerated here
+    mkdir -p /srv/salt/reclass/nodes/_generated
+
+    # Override some context parameters
+    sed -i 's/cluster_name:.*/cluster_name: {{ LAB_CONFIG_NAME }}/g' {{ CLUSTER_CONTEXT_PATH }}
+    sed -i 's/cluster_domain:.*/cluster_domain: {{ DOMAIN_NAME }}/g' {{ CLUSTER_CONTEXT_PATH }}
+    {%- if CONTROL_VLAN  %}
+    sed -i 's/control_vlan:.*/control_vlan: {{ CONTROL_VLAN }}/g' {{ CLUSTER_CONTEXT_PATH }}
+    {%- endif %}
+    {%- if TENANT_VLAN  %}
+    sed -i 's/tenant_vlan:.*/tenant_vlan: {{ TENANT_VLAN }}/g' {{ CLUSTER_CONTEXT_PATH }}
+    {%- endif %}
+
+    # Temporary workaround (with hardcoded address .90 -> .15) of bug https://mirantis.jira.com/browse/PROD-14377
+    # sed -i 's/salt_master_address:.*/salt_master_address: {{ IPV4_NET_CONTROL_PREFIX }}.15/g' {{ CLUSTER_CONTEXT_PATH }}
+    # sed -i 's/salt_master_management_address:.*/salt_master_management_address: {{ IPV4_NET_ADMIN_PREFIX }}.15/g' {{ CLUSTER_CONTEXT_PATH }}
+
+    # Replace firstly to an intermediate value to avoid intersection between
+    # already replaced and replacing networks.
+    # For example, if generated IPV4_NET_ADMIN_PREFIX=10.16.0 , then there is a risk of replacing twice:
+    # 192.168.10 -> 10.16.0 (generated network for admin)
+    # 10.16.0 -> <external network>
+    # So let's replace constant networks to the keywords, and then keywords to the desired networks.
+    sed -i 's/10\.167\.5/==IPV4_NET_ADMIN_PREFIX==/g' {{ CLUSTER_CONTEXT_PATH }}
+    sed -i 's/10\.167\.4/==IPV4_NET_CONTROL_PREFIX==/g' {{ CLUSTER_CONTEXT_PATH }}
+    sed -i 's/10\.167\.6/==IPV4_NET_TENANT_PREFIX==/g' {{ CLUSTER_CONTEXT_PATH }}
+    sed -i 's/172\.17\.16\./==IPV4_NET_EXTERNAL_PREFIX==/g' {{ CLUSTER_CONTEXT_PATH }}
+
+    sed -i 's/==IPV4_NET_ADMIN_PREFIX==/{{ IPV4_NET_ADMIN_PREFIX }}/g' {{ CLUSTER_CONTEXT_PATH }}
+    sed -i 's/==IPV4_NET_CONTROL_PREFIX==/{{ IPV4_NET_CONTROL_PREFIX }}/g' {{ CLUSTER_CONTEXT_PATH }}
+    sed -i 's/==IPV4_NET_TENANT_PREFIX==/{{ IPV4_NET_TENANT_PREFIX }}/g' {{ CLUSTER_CONTEXT_PATH }}
+    sed -i 's/==IPV4_NET_EXTERNAL_PREFIX==/{{ IPV4_NET_EXTERNAL_PREFIX }}./g' {{ CLUSTER_CONTEXT_PATH }}
+
+    for i in $(ls /tmp/cookiecutter-templates/cluster_product/); do
+      python /tmp/cookiecutter-templates/generate.py \
+        --template /tmp/cookiecutter-templates/cluster_product/$i \
+        --config-file {{ CLUSTER_CONTEXT_PATH }} \
+        --output-dir /srv/salt/reclass/classes/cluster/;
+    done
+
+    export GIT_SSL_NO_VERIFY=true; git clone https://gerrit.mcp.mirantis.net/salt-models/reclass-system /srv/salt/reclass/classes/system/
+
+    # Create the cfg01 node and disable checkouting the model from remote repository
+    cat << 'EOF' >> /srv/salt/reclass/nodes/_generated/{{ HOSTNAME_CFG01 }}.yml
+    classes:
+    - system.openssh.server.team.all
+    - cluster.{{ LAB_CONFIG_NAME }}.infra.config
+    EOF
+
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 1}
+  skip_fail: false
+
+- description: Modify generated model and reclass-system if necessary
+  cmd: |
+    set -e;
+    {%- if SALT_MODELS_SYSTEM_COMMIT != '' %}
+    pushd /srv/salt/reclass/classes/system/
+    git checkout {{ SALT_MODELS_SYSTEM_COMMIT }} && \
+    popd
+    {%- elif SALT_MODELS_SYSTEM_REF_CHANGE != '' -%}
+    pushd /srv/salt/reclass/classes/system/
+    {%- for item in SALT_MODELS_SYSTEM_REF_CHANGE.split(" ") %}
+    git fetch {{ SALT_MODELS_SYSTEM_REPOSITORY }} {{ item }} && git cherry-pick FETCH_HEAD
+    {%- endfor %}
+    popd
+    {%- endif %}
+
+    export REPLACE_DIRS="/srv/salt/reclass/classes/ /srv/salt/reclass/nodes/"
+    find ${REPLACE_DIRS} -type f -exec sed -i 's/apt_mk_version:.*/apt_mk_version: {{ REPOSITORY_SUITE }}/g' {} +
+
+    {%- if IS_CONTRAIL_LAB %}
+    # vSRX IPs for tcp-qa images have 172.16.10.90 hardcoded
+    find ${REPLACE_DIRS} -type f -exec sed -i 's/opencontrail_router01_address:.*/opencontrail_router01_address: 172.16.10.90/g' {} +
+    find ${REPLACE_DIRS} -type f -exec sed -i 's/infra_config_deploy_address: 1.*/infra_config_deploy_address: {{ IPV4_NET_ADMIN_PREFIX }}.15/g' {} +
+    {%- endif %}
+
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 1}
+  skip_fail: false
+{%- endmacro %}
+
+
+{%- macro MACRO_GENERATE_AND_ENABLE_ENVIRONMENT_MODEL() %}
+{########################################################}
+
+- description: "[EXPERIMENTAL] Clone 'environment-template' repository to {{ HOSTNAME_CFG01 }}"
+  cmd: |
+    set -e;
+    mkdir -p /tmp/environment/;
+    export GIT_SSL_NO_VERIFY=true; git clone https://github.com/Mirantis/environment-template /tmp/environment/environment_template
+  node_name: {{ HOSTNAME_CFG01 }}
+  skip_fail: false
+
+{%- for ENVIRONMENT_CONTEXT_NAME in ENVIRONMENT_CONTEXT_NAMES %}
+- description: "[EXPERIMENTAL] Upload environment inventory to {{ HOSTNAME_CFG01 }}"
+  upload:
+    local_path:  {{ config.salt_deploy.templates_dir }}{{ LAB_CONFIG_NAME }}/
+    local_filename: {{ ENVIRONMENT_CONTEXT_NAME }}
+    remote_path: /tmp/environment/
+  node_name: {{ HOSTNAME_CFG01 }}
+{%- endfor %}
+
+- description: "[EXPERIMENTAL] Remove linux.network.interface object from the cluster/system models and use fixed 'environment' model instead"
+  cmd: |
+    set -e;
+    apt-get -y install python-virtualenv python-pip build-essential python-dev libssl-dev;
+    pip install git+https://github.com/dis-xcom/reclass-tools;
+    reclass-tools del-key parameters.linux.network.interface /srv/salt/reclass/classes/cluster/;
+    reclass-tools del-key parameters.linux.network.interface /srv/salt/reclass/classes/system/;
+    reclass-tools del-key parameters.linux.network.interface /usr/share/salt-formulas/reclass/;
+
+    if ! reclass-tools get-key 'classes' /srv/salt/reclass/nodes/{{ HOSTNAME_CFG01 }}.yml | grep -q "environment.{{ ENVIRONMENT_MODEL_INVENTORY_NAME }}$"; then
+      reclass-tools add-key 'classes' 'environment.{{ ENVIRONMENT_MODEL_INVENTORY_NAME }}.reclass_datasource_local' /srv/salt/reclass/nodes/_generated/{{ HOSTNAME_CFG01 }}.yml --merge ;
+      reclass-tools add-key 'classes' 'environment.{{ ENVIRONMENT_MODEL_INVENTORY_NAME }}' /srv/salt/reclass/nodes/_generated/{{ HOSTNAME_CFG01 }}.yml --merge ;
+    fi;
+
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: "Workaround for PROD-14756: all roles must use service.keepalived.cluster.single with the default 'VIP' instance"
+  cmd: |
+    set -e;
+    find /srv/salt/reclass/classes/cluster/ -type f -exec sed -i 's/system.keepalived.*/service.keepalived.cluster.single/g' {} +
+    find /srv/salt/reclass/classes/system/ -type f -exec sed -i 's/system.keepalived.*/service.keepalived.cluster.single/g' {} +
+
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: "[EXPERIMENTAL] Create environment model for virtual environment"
+  cmd: |
+    set -e;
+    reclass-tools render --template-dir /tmp/environment/environment_template/ \
+                         --output-dir /srv/salt/reclass/classes/environment/ \
+                         {% for ENVIRONMENT_CONTEXT_NAME in ENVIRONMENT_CONTEXT_NAMES %} --context /tmp/environment/{{ENVIRONMENT_CONTEXT_NAME}}{% endfor %} \
+                         --env-name {{ ENVIRONMENT_MODEL_INVENTORY_NAME }}
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Modify generated model and reclass-system
+  cmd: |
+    export REPLACE_DIRS="/srv/salt/reclass/classes/ /srv/salt/reclass/nodes/"
+    find ${REPLACE_DIRS} -type f -exec sed -i 's/apt_mk_version:.*/apt_mk_version: {{ REPOSITORY_SUITE }}/g' {} +
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 1}
+  skip_fail: false
+
+{%- endmacro %}
+
+
+{%- macro MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='') %}
+{#######################################################}
+- description: Configure reclass
+  cmd: |
+    set -e;
+    FORMULA_PATH=${FORMULA_PATH:-/usr/share/salt-formulas};
+    FORMULA_REPOSITORY=${FORMULA_REPOSITORY:-deb [arch=amd64] http://apt.mirantis.local.test/xenial {{ REPOSITORY_SUITE }} salt};
+    FORMULA_GPG=${FORMULA_GPG:-http://apt.mirantis.local.test/public.gpg};
+    which wget > /dev/null || (apt-get update; apt-get install -y wget);
+    echo "${FORMULA_REPOSITORY}" > /etc/apt/sources.list.d/mcp_salt.list;
+    wget -O - "${FORMULA_GPG}" | apt-key add -;
+    apt-get clean; apt-get update;
+    [ ! -d /srv/salt/reclass/classes/service ] && mkdir -p /srv/salt/reclass/classes/service;
+    declare -a formula_services=({{ FORMULA_SERVICES }});
+    echo -e "\nInstalling all required salt formulas\n";
+    eatmydata apt-get install -y "${formula_services[@]/#/salt-formula-}";
+    for formula_service in "${formula_services[@]}"; do
+      echo -e "\nLink service metadata for formula ${formula_service} ...\n";
+      [ ! -L "/srv/salt/reclass/classes/service/${formula_service}" ] && ln -s ${FORMULA_PATH}/reclass/service/${formula_service} /srv/salt/reclass/classes/service/${formula_service};
+    done;
+    [ ! -d /srv/salt/env ] && mkdir -p /srv/salt/env;
+    [ ! -L /srv/salt/env/prd ] && ln -s ${FORMULA_PATH}/env /srv/salt/env/prd;
+    [ ! -d /etc/reclass ] && mkdir /etc/reclass;
+
+    cat << 'EOF' >> /etc/reclass/reclass-config.yml
+    storage_type: yaml_fs
+    pretty_print: True
+    output: yaml
+    inventory_base_uri: /srv/salt/reclass
+    EOF
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 1}
+  skip_fail: false
+
+- description: "*Workaround* remove all cfg01 nodes except {{ HOSTNAME_CFG01 }} to not depend on other clusters in 'reclass --top'"
+  cmd: |
+    # Remove all other nodes except {{ HOSTNAME_CFG01 }} to not rely on them for 'reclass --top'
+    find /srv/salt/reclass/nodes/ -type f -not -name {{ HOSTNAME_CFG01 }}.yml -delete
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Configure salt adoptors on cfg01
+  cmd: |
+    ln -s /usr/lib/python2.7/dist-packages/reclass/adapters/salt.py /usr/local/sbin/reclass-salt;
+    chmod +x /usr/lib/python2.7/dist-packages/reclass/adapters/salt.py
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 1}
+  skip_fail: false
+
+- description: Show reclass-salt --top for cfg01 node
+  cmd: reclass-salt --top
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Restart salt-master service
+  cmd: systemctl restart salt-master;
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+{%- endmacro %}
+
+
+{%- macro MACRO_INSTALL_SALT_MINIONS() %}
+{#######################################}
+{% for ssh in config.underlay.ssh %}
+- description: Configure salt-minion on {{ ssh['node_name'] }}
+  cmd: |
+    [ ! -d /etc/salt/minion.d ] && mkdir -p /etc/salt/minion.d;
+    cat << "EOF" >> /etc/salt/minion.d/minion.conf
+    id: {{ ssh['node_name'] }}
+    master: {{ config.salt.salt_master_host }}
+    EOF
+    eatmydata apt-get install -y salt-minion;
+    service salt-minion restart;  # For case if salt-minion was already installed
+  node_name: {{ ssh['node_name'] }}
+  retry: {count: 1, delay: 1}
+  skip_fail: false
+{% endfor %}
+
+
+- description: Accept salt keys from all the nodes
+  cmd: salt-key -A -y
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: true
+{%- endmacro %}
+
+
+{%- macro MACRO_RUN_SALT_MASTER_UNDERLAY_STATES() %}
+{##################################################}
+
+{# Prepare salt services and nodes settings #}
+
+- description: Run 'linux' formula on cfg01
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:master' state.sls linux;
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 3, delay: 5}
+  skip_fail: false
+
+- description: Run 'openssh' formula on cfg01
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@salt:master' state.sls openssh &&
+    salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@salt:master' cmd.run "sed -i 's/PasswordAuthentication no/PasswordAuthentication
+    yes/' /etc/ssh/sshd_config && service ssh reload"
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 3, delay: 5}
+  skip_fail: false
+
+- description: '*Workaround* of the bug https://mirantis.jira.com/browse/PROD-7962'
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    '*' cmd.run "echo '    StrictHostKeyChecking no' >> /root/.ssh/config"
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 1}
+  skip_fail: false
+
+- description: Run 'salt.master' formula on cfg01
+  cmd: timeout 120 salt --timeout=120 --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:master' state.sls salt.master;
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 2, delay: 5}
+  skip_fail: false
+
+{%- if SALT_FORMULAS_REFS != '' %}
+- description: Replace needed formulas to desired version
+  cmd: |
+    set -e;
+    {%- for formula_set in SALT_FORMULAS_REFS.split(' ') %}
+    {% set formula = formula_set.split(':') %}
+    {% set formula_name = formula[0] %}
+    {% set formula_ref = formula[1] %}
+    {% set formula_dir = '/tmp/salt-formula-' + formula_name %}
+    git clone {{ SALT_FORMULAS_REPO }}/{{ formula_name }} {{ formula_dir }} &&
+    pushd {{ formula_dir }} &&
+    git fetch {{ SALT_FORMULAS_REPO }}/{{ formula_name }} {{ formula_ref }} &&
+    git checkout FETCH_HEAD &&
+    popd &&
+    if [ -d "{{ formula_dir }}" ]; then
+    echo "Going to replace packaged formula {{ formula_name }}" &&
+    rm -rfv /usr/share/salt-formulas/{env,reclass/service}/{{ formula_name }} &&
+    ln -v -s "{{ formula_dir }}/{{ formula_name }}" "/usr/share/salt-formulas/env/{{ formula_name }}" &&
+    ln -v -s "{{ formula_dir }}/metadata/service/" "/usr/share/salt-formulas/reclass/service/{{ formula_name }}";
+    else
+    echo "Stopped, directory /root/salt-formula-{{ formula_name }} does not exist!";
+    fi
+    {%- endfor %}
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+{%- endif %}
+
+- description: Refresh pillars on salt-master minion
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:master' saltutil.refresh_pillar
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Show reclass-salt --top for salt-master node
+  cmd: reclass-salt --top
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Sync all salt resources on salt-master minion
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:master' saltutil.sync_all && sleep 5
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Configure linux on master
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:master' state.sls 'linux.system'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Configure salt.minion on master
+  cmd: salt --timeout=120 --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@salt:master' state.sls salt.minion && sleep 10
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 3, delay: 10}
+  skip_fail: false
+
+- description: Run state 'salt' on master (for salt.api, etc)
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@salt:master' state.sls salt
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 3, delay: 10}
+  skip_fail: false
+{%- endmacro %}
+
+{%- macro MACRO_GENERATE_INVENTORY() %}
+{#####################################}
+- description: Generate inventory for all the nodes to the /srv/salt/reclass/nodes/_generated
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@salt:master' state.sls reclass
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Refresh pillars on all minions
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' saltutil.refresh_pillar
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Show reclass-salt --top for all generated nodes
+  cmd: |
+    set -e
+    if salt-call sys.doc reclass.validate_node_params | grep -q reclass.validate_node_params ; then salt-call reclass.validate_nodes_params ; fi
+    if salt-call sys.doc reclass.validate_pillar | grep -q reclass.validate_pillar ; then salt-call reclass.validate_pillar ; fi
+    reclass-salt --top
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Sync all salt resources
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' saltutil.sync_all && sleep 5
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+{%- endmacro %}
+
+
+{%- macro MACRO_BOOTSTRAP_ALL_MINIONS() %}
+{########################################}
+# Bootstrap all nodes
+- description: Configure linux on other nodes
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@linux:system' state.sls linux
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 5, delay: 10}
+  skip_fail: false
+
+- description: Configure openssh on all nodes
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@linux:system and not cfg01*' state.sls openssh &&
+    salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@linux:system and not cfg01*' cmd.run "sed -i 's/PasswordAuthentication no/PasswordAuthentication
+    yes/' /etc/ssh/sshd_config && service ssh reload"
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Configure salt.minion on other nodes
+  cmd: salt --timeout=120 --hard-crash --state-output=mixed --state-verbose=False -C 'I@linux:system and not cfg01*' state.sls salt.minion &&
+    sleep 10
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 3, delay: 15}
+  skip_fail: false
+
+- description: Check salt minion versions on slaves
+  cmd: salt '*' test.version
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 3, delay: 15}
+  skip_fail: false
+
+- description: Check salt top states on nodes
+  cmd: salt '*' state.show_top
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Configure ntp and rsyslog on nodes
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@linux:system' state.sls ntp,rsyslog
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+{%- endmacro %}
+
+
+{%- macro MACRO_NETWORKING_WORKAROUNDS() %}
+{#########################################}
+
+- description: '*Workaround 1/2* of the bug PROD-9576 to get bond0-connectivity *without* rebooting nodes'
+  cmd: salt-call --hard-crash --state-output=mixed --state-verbose=False cmd.run
+    "mkdir -p /tmp/PROD-9576; cd /tmp/PROD-9576; git clone https://gerrit.mcp.mirantis.net/salt-formulas/linux; cd linux;
+    git fetch https://gerrit.mcp.mirantis.net/salt-formulas/linux refs/changes/54/2354/16 && git checkout FETCH_HEAD;
+    cp -f linux/network/interface.sls /srv/salt/env/prd/linux/network/;
+    cp -f linux/map.jinja /srv/salt/env/prd/linux/;"
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: '*Workaround: Load bonding module before call state.linux'
+  cmd: salt -C "I@linux:network:interface:*:type:bond" cmd.run 'modprobe bonding'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: true
+
+- description: '*Workaround* install bridge-utils before running linux formula'
+  # The formula removes default route and then tries to install the package, fails.
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False  -C '* and not
+    cfg01*' cmd.run 'sudo apt-get install -y bridge-utils'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+{%- endmacro %}