Move common parts of salt deployment into shared-salt.yaml

- most of common steps for bootstrapping salt moved to
  shared-salt.yaml
- default value REPOSITORY_SUITE changed to 'testing'

Change-Id: I7bd66b8e84d266ddd424b788d2a1b5ff0c6ecad4
Reviewed-on: https://review.gerrithub.io/373554
Reviewed-by: Dennis Dmitriev <dis.xcom@gmail.com>
Tested-by: Dennis Dmitriev <dis.xcom@gmail.com>
diff --git a/tcp_tests/templates/shared-salt.yaml b/tcp_tests/templates/shared-salt.yaml
index 5e9179c..998aa0c 100644
--- a/tcp_tests/templates/shared-salt.yaml
+++ b/tcp_tests/templates/shared-salt.yaml
@@ -1,3 +1,5 @@
+{# Collection of common macroses shared across different deployments #}
+
 {% set SALT_MODELS_BRANCH = os_env('SALT_MODELS_BRANCH','master') %}
 {% set SALT_MODELS_COMMIT = os_env('SALT_MODELS_COMMIT','master') %}
 {# Reference to a patch that should be applied to the model if required, for example: export SALT_MODELS_REF_CHANGE=refs/changes/19/7219/12 #}
@@ -5,6 +7,8 @@
 {# Pin to a specified commit in salt-models/reclass-system #}
 {% set SALT_MODELS_SYSTEM_COMMIT = os_env('SALT_MODELS_SYSTEM_COMMIT','') %}
 
+{% set REPOSITORY_SUITE = os_env('REPOSITORY_SUITE', 'testing') %}
+
 {# Address pools for reclass cluster model are taken in the following order:
  # 1. environment variables,
  # 2. config.underlay.address_pools based on fuel-devops address pools
@@ -20,8 +24,55 @@
 {% set IPV4_NET_TENANT_PREFIX = '.'.join(IPV4_NET_TENANT.split('.')[0:3]) %}
 {% set IPV4_NET_EXTERNAL_PREFIX = '.'.join(IPV4_NET_EXTERNAL.split('.')[0:3]) %}
 
-{# - description: Clone reclass models with submodules #}
-{%- macro MACRO_CLONE_RECLASS_MODELS() %}
+
+{%- macro MACRO_INSTALL_SALT_MASTER() %}
+{######################################}
+- description: Installing salt master on cfg01
+  cmd:  eatmydata apt-get install -y reclass git salt-master
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 1}
+  skip_fail: false
+
+- description: Configure salt-master on cfg01
+  cmd: |
+    cat << 'EOF' >> /etc/salt/master.d/master.conf
+    file_roots:
+      base:
+      - /usr/share/salt-formulas/env
+    pillar_opts: False
+    open_mode: True
+    reclass: &reclass
+      storage_type: yaml_fs
+      inventory_base_uri: /srv/salt/reclass
+    ext_pillar:
+      - reclass: *reclass
+    master_tops:
+      reclass: *reclass
+    EOF
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 1}
+  skip_fail: false
+
+- description: Configure GIT settings and certificates
+  cmd: touch /root/.git_trusted_certs.pem;
+    for server in github.com; do
+        openssl s_client -showcerts -connect $server:443 </dev/null
+        | openssl x509 -outform PEM
+        >> /root/.git_trusted_certs.pem;
+    done;
+    HOME=/root git config --global http.sslCAInfo /root/.git_trusted_certs.pem;
+    HOME=/root git config --global user.email "tcp-qa@example.com";
+    HOME=/root git config --global user.name "TCP QA";
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 1}
+  skip_fail: false
+{%- endmacro %}
+
+
+{%- macro MACRO_CLONE_RECLASS_MODELS(IS_CONTRAIL_LAB=false) %}
+{############################################################}
+- description: Clone reclass models with submodules
+  cmd: |
     ssh-keyscan -H github.com >> ~/.ssh/known_hosts;
     git clone -b {{ SALT_MODELS_BRANCH }} --recurse-submodules {{ SALT_MODELS_REPOSITORY }} /srv/salt/reclass;
     pushd /srv/salt/reclass && \
@@ -60,6 +111,11 @@
 
     find /srv/salt/reclass/ -type f -exec sed -i 's/apt_mk_version:.*/apt_mk_version: {{ REPOSITORY_SUITE }}/g' {} +
 
+    {%- if IS_CONTRAIL_LAB %}
+    # vSRX IPs for tcp-qa images have 172.16.10.90 hardcoded
+    find /srv/salt/reclass/ -type f -exec sed -i 's/opencontrail_router01_address:.*/opencontrail_router01_address: 172.16.10.90/g' {} +
+    {%- endif %}
+
     # Disable checkouting the model from remote repository
     cat << 'EOF' >> /srv/salt/reclass/nodes/{{ HOSTNAME_CFG01 }}.yml
     # local storage
@@ -68,4 +124,250 @@
           data_source:
             engine: local
     EOF
+
+    # Show the changes to the console
+    cd /srv/salt/reclass/; git diff
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 1}
+  skip_fail: false
+{%- endmacro %}
+
+
+{%- macro MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='') %}
+{#######################################################}
+- description: Configure reclass
+  cmd: |
+    FORMULA_PATH=${FORMULA_PATH:-/usr/share/salt-formulas};
+    FORMULA_REPOSITORY=${FORMULA_REPOSITORY:-deb [arch=amd64] http://apt.mirantis.com/xenial {{ REPOSITORY_SUITE }} salt};
+    FORMULA_GPG=${FORMULA_GPG:-http://apt.mirantis.com/public.gpg};
+    which wget > /dev/null || (apt-get update; apt-get install -y wget);
+    echo "${FORMULA_REPOSITORY}" > /etc/apt/sources.list.d/mcp_salt.list;
+    wget -O - "${FORMULA_GPG}" | apt-key add -;
+    apt-get clean; apt-get update;
+    [ ! -d /srv/salt/reclass/classes/service ] && mkdir -p /srv/salt/reclass/classes/service;
+    declare -a formula_services=({{ FORMULA_SERVICES }});
+    echo -e "\nInstalling all required salt formulas\n";
+    eatmydata apt-get install -y "${formula_services[@]/#/salt-formula-}";
+    for formula_service in "${formula_services[@]}"; do
+      echo -e "\nLink service metadata for formula ${formula_service} ...\n";
+      [ ! -L "/srv/salt/reclass/classes/service/${formula_service}" ] && ln -s ${FORMULA_PATH}/reclass/service/${formula_service} /srv/salt/reclass/classes/service/${formula_service};
+    done;
+    [ ! -d /srv/salt/env ] && mkdir -p /srv/salt/env;
+    [ ! -L /srv/salt/env/prd ] && ln -s ${FORMULA_PATH}/env /srv/salt/env/prd;
+    [ ! -d /etc/reclass ] && mkdir /etc/reclass;
+
+    cat << 'EOF' >> /etc/reclass/reclass-config.yml
+    storage_type: yaml_fs
+    pretty_print: True
+    output: yaml
+    inventory_base_uri: /srv/salt/reclass
+    EOF
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 1}
+  skip_fail: false
+
+- description: "*Workaround* remove all cfg01 nodes except {{ HOSTNAME_CFG01 }} to not depend on other clusters in 'reclass --top'"
+  cmd: |
+    # Remove all other nodes except {{ HOSTNAME_CFG01 }} to not rely on them for 'reclass --top'
+    find /srv/salt/reclass/nodes/ -type f -not -name {{ HOSTNAME_CFG01 }}.yml -delete
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Configure salt adoptors on cfg01
+  cmd: |
+    ln -s /usr/lib/python2.7/dist-packages/reclass/adapters/salt.py /usr/local/sbin/reclass-salt;
+    chmod +x /usr/lib/python2.7/dist-packages/reclass/adapters/salt.py
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 1}
+  skip_fail: false
+
+- description: Show reclass-salt --top for cfg01 node
+  cmd: reclass-salt --top
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Restart salt-master service
+  cmd: systemctl restart salt-master;
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+{%- endmacro %}
+
+
+{%- macro MACRO_INSTALL_SALT_MINIONS() %}
+{#######################################}
+{% for ssh in config.underlay.ssh %}
+- description: Configure salt-minion on {{ ssh['node_name'] }}
+  cmd: |
+    [ ! -d /etc/salt/minion.d ] && mkdir -p /etc/salt/minion.d;
+    cat << "EOF" >> /etc/salt/minion.d/minion.conf
+    id: {{ ssh['node_name'] }}
+    master: {{ config.salt.salt_master_host }}
+    EOF
+    eatmydata apt-get install -y salt-minion;
+    echo "Check for system info and metadata availability ...";
+    salt-call --no-color grains.items;
+    salt-call --no-color pillar.items;
+  node_name: {{ ssh['node_name'] }}
+  retry: {count: 1, delay: 1}
+  skip_fail: false
+{% endfor %}
+
+
+- description: Accept salt keys from all the nodes
+  cmd: salt-key -A -y
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: true
+{%- endmacro %}
+
+
+{%- macro MACRO_RUN_SALT_MASTER_UNDERLAY_STATES() %}
+{##################################################}
+
+{# Prepare salt services and nodes settings #}
+
+- description: Run 'linux' formula on cfg01
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:master' state.sls linux;
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 3, delay: 5}
+  skip_fail: false
+
+- description: Run 'openssh' formula on cfg01
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@salt:master' state.sls openssh;
+    salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@salt:master' cmd.run "sed -i 's/PasswordAuthentication no/PasswordAuthentication
+    yes/' /etc/ssh/sshd_config && service ssh reload";
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 3, delay: 5}
+  skip_fail: false
+
+- description: '*Workaround* of the bug https://mirantis.jira.com/browse/PROD-7962'
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    '*' cmd.run "echo '    StrictHostKeyChecking no' >> /root/.ssh/config"
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 1}
+  skip_fail: false
+
+- description: Run 'salt.master' formula on cfg01
+  cmd: timeout 120 salt --timeout=120 --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:master' state.sls salt.master;
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 2, delay: 5}
+  skip_fail: false
+
+# TODO(ddmitriev): apply custom patches for formulas here
+
+- description: Refresh pillars on salt-master minion
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:master' saltutil.refresh_pillar
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Show  reclass-salt --top for all generated nodes
+  cmd: reclass-salt --top
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Sync all salt resources on salt-master minion
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:master' saltutil.sync_all
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Configure linux on master
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:master' state.sls 'linux.system'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Configure salt.minion on master
+  cmd: salt --timeout=120 --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@salt:master' state.sls salt.minion && sleep 10
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 3, delay: 10}
+  skip_fail: false
+
+- description: Run state 'salt' on master (for salt.api, etc)
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@salt:master' state.sls salt
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 3, delay: 10}
+  skip_fail: false
+{%- endmacro %}
+
+{%- macro MACRO_GENERATE_INVENTORY() %}
+{#####################################}
+- description: Generate inventory for all the nodes to the /srv/salt/reclass/nodes/_generated
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@salt:master' state.sls reclass
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Refresh pillars on all minions
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' saltutil.refresh_pillar
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Show  reclass-salt --top for all generated nodes
+  cmd: reclass-salt --top
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Sync all salt resources
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' saltutil.sync_all
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+{%- endmacro %}
+
+
+{%- macro MACRO_BOOTSTRAP_ALL_MINIONS() %}
+{########################################}
+# Bootstrap all nodes
+- description: Configure linux on other nodes
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@linux:system' state.sls linux
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 5, delay: 10}
+  skip_fail: false
+
+- description: Configure openssh on all nodes
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@linux:system and not cfg01*' state.sls openssh;
+    salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@linux:system and not cfg01*' cmd.run "sed -i 's/PasswordAuthentication no/PasswordAuthentication
+    yes/' /etc/ssh/sshd_config && service ssh reload"
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Configure salt.minion on other nodes
+  cmd: salt --timeout=120 --hard-crash --state-output=mixed --state-verbose=False -C 'I@linux:system and not cfg01*' state.sls salt.minion &&
+    sleep 10
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 3, delay: 15}
+  skip_fail: false
+
+- description: Check salt minion versions on slaves
+  cmd: salt '*' test.version
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 3, delay: 15}
+  skip_fail: false
+
+- description: Check salt top states on nodes
+  cmd: salt '*' state.show_top
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Configure ntp and rsyslog on nodes
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@linux:system' state.sls ntp,rsyslog
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
 {%- endmacro %}