Actualize cicd underlay template for fix in model

In [1], stacklight services were moved on cid* nodes

[1] https://gerrit.mcp.mirantis.net/#/c/7885

Change-Id: I80ff603a24a06d4903f9e9a549650f45c1990a9c
Reviewed-on: https://review.gerrithub.io/371568
Reviewed-by: Dennis Dmitriev <dis.xcom@gmail.com>
Tested-by: Dennis Dmitriev <dis.xcom@gmail.com>
diff --git a/tcp_tests/templates/virtual-mcp-ocata-cicd/oss.yaml b/tcp_tests/templates/virtual-mcp-ocata-cicd/oss.yaml
index af678dc..9d6ead7 100644
--- a/tcp_tests/templates/virtual-mcp-ocata-cicd/oss.yaml
+++ b/tcp_tests/templates/virtual-mcp-ocata-cicd/oss.yaml
@@ -77,10 +77,12 @@
   retry: {count: 1, delay: 5}
   skip_fail: false
 
-- description: Setup salt-minion on docker swarm master
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master and {{ CICD_NODES_SELECTOR }}' state.sls salt &&
+- description: Collect grains
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master and {{ CICD_NODES_SELECTOR }}' state.sls salt.minion.grains &&
     salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master and {{ CICD_NODES_SELECTOR }}' mine.flush &&
-    salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master and {{ CICD_NODES_SELECTOR }}' mine.update
+    salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master and {{ CICD_NODES_SELECTOR }}' mine.update &&
+    salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm and {{ CICD_NODES_SELECTOR }}' saltutil.refresh_modules &&
+    sleep 10
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 3, delay: 15}
   skip_fail: false
@@ -88,7 +90,7 @@
 - description: Install Docker Swarm on other nodes
   cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm and {{ CICD_NODES_SELECTOR }}' state.sls docker.swarm
   node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
+  retry: {count: 2, delay: 5}
   skip_fail: false
 
 - description: Show Docker Swarm nodes
@@ -145,23 +147,33 @@
 # Aptly
 #------
 
-- description: "Install Aptly and check it's API"
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@aptly:server' state.sls aptly &&
-    timeout 90 salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@aptly:server' cmd.run
+- description: "Wait for Aptly to come up in container..."
+  cmd: timeout 300 salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@aptly:server' cmd.run
       'export CICD_CONTROL_ADDRESS=$(salt-call --out=newline_values_only pillar.get _param:cicd_control_address);
        while true; do curl -sf http://${CICD_CONTROL_ADDRESS}:8084/api/version  && break; sleep 2; done'
   node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 3, delay: 15}
+  skip_fail: false
+
+- description: "Setup Aptly"
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@aptly:server' state.sls aptly
+  node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 3, delay: 10}
   skip_fail: false
 
 # OpenLDAP
 #---------
 
-- description: "Install LDAP and check it's availability"
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@openldap:client' state.sls openldap &&
-    timeout 60 salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@openldap:client' cmd.run
+- description: "Waiting for OpenLDAP to come up in container..."
+  cmd: timeout 60 salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@openldap:client' cmd.run
       'export CICD_CONTROL_ADDRESS=$(salt-call --out=newline_values_only pillar.get _param:cicd_control_address);
-       while true; do curl -sf ldap://${CICD_CONTROL_ADDRESS} >/dev/null && break; sleep 2; done' &&
+       while true; do curl -sf ldap://${CICD_CONTROL_ADDRESS} && break; sleep 2; done'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 3, delay: 10}
+  skip_fail: false
+
+- description: "Setup OpenLDAP"
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@openldap:client' state.sls openldap &&
     sleep 20
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 3, delay: 10}
@@ -170,11 +182,16 @@
 # Gerrit
 #-------
 
-- description: "Install Gerrit and check it's availability"
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@gerrit:client' state.sls gerrit &&
-    timeout 60 salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@gerrit:client' cmd.run
+- description: "Waiting for Gerrit to come up in container..."
+  cmd: timeout 60 salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@gerrit:client' cmd.run
       'export CICD_CONTROL_ADDRESS=$(salt-call --out=newline_values_only pillar.get _param:cicd_control_address);
-       while true; do curl -sf http://${CICD_CONTROL_ADDRESS}:8080 >/dev/null && break; sleep 2; done'
+       while true; do curl -sf http://${CICD_CONTROL_ADDRESS}:8080/config/server/version && break; sleep 2; done'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 3, delay: 10}
+  skip_fail: false
+
+- description: "Setup Gerrit"
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@gerrit:client' state.sls gerrit
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 3, delay: 10}
   skip_fail: false
@@ -182,10 +199,8 @@
 # Jenkins
 #--------
 
-
-- description: "Install Jenkins and check it's availability"
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@jenkins:client' state.sls jenkins &&
-    timeout 60 salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@jenkins:client' cmd.run
+- description: "Waiting for Jenkins to come up in container..."
+  cmd: timeout 60 salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@jenkins:client' cmd.run
       'export CICD_CONTROL_ADDRESS=$(salt-call --out=newline_values_only pillar.get _param:cicd_control_address);
        export JENKINS_CLIENT_USER=$(salt-call --out=newline_values_only pillar.get _param:jenkins_client_user);
        export JENKINS_CLIENT_PASSWORD=$(salt-call --out=newline_values_only pillar.get _param:jenkins_client_password);
@@ -197,40 +212,69 @@
   retry: {count: 3, delay: 10}
   skip_fail: false
 
-# Postgres
-#---------
+- description: "Setup Jenkins"
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@jenkins:client' state.sls jenkins
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 3, delay: 10}
+  skip_fail: false
 
-- description: Install PostgreSQL and create databases
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@postgresql:client' state.sls postgresql.client -b 1&&
-    timeout 300 salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@postgresql:client' cmd.run
+# Postgres && Pushkin
+#--------------------
+
+- description: "Waiting for postgresql database to come up in container..."
+  cmd: timeout 300 salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@postgresql:client' cmd.run
       'while true; do if docker service logs postgresql_db | grep -q "ready to accept"; then break; else sleep 5; fi; done'
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 3, delay: 10}
   skip_fail: false
 
-# Pushkin
-#--------
-
-- description: Check Pushkin API availability
-  cmd: timeout 60 salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@postgresql:client' cmd.run
+- description: ("Create PostgreSQL databases, it fails at first run because of known deadlock:\n"
+                "1. State postgresql.client cannot insert values into 'pushkin' database because it is created empty,\n"
+                "2. Container with Pushkin cannot start and fill the database scheme until state postgresql.client created users.")
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@postgresql:client' state.sls postgresql.client -b 1 &&
+    timeout 300 salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@postgresql:client' cmd.run
     'export CICD_CONTROL_ADDRESS=$(salt-call --out=newline_values_only pillar.get _param:cicd_control_address);
      while true; do curl -sf http://${CICD_CONTROL_ADDRESS}:8887/apps && break; sleep 2; done'
   node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
+  retry: {count: 3, delay: 100}
   skip_fail: false
 
 # Rundeck
 #--------
 
-- description: Install Rundeck and check availability
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@rundeck:client' state.sls rundeck.client &&
-    timeout 30 salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@rundeck:client' cmd.run
+- description: Waiting for Rundeck to come up in container...
+  cmd: timeout 30 salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@rundeck:client' cmd.run
       'export CICD_CONTROL_ADDRESS=$(salt-call --out=newline_values_only pillar.get _param:cicd_control_address);
        while true; do curl -sf http://${CICD_CONTROL_ADDRESS}:4440 && break; sleep 2; done'
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 3, delay: 10}
   skip_fail: false
 
+- description: Setup Rundeck
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@rundeck:client' state.sls rundeck.client
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 3, delay: 10}
+  skip_fail: false
+
+# Elasticsearch
+#--------------
+
+- description: 'Waiting for Elasticsearch to come up in container...'
+  cmd: timeout 30 salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@elasticsearch:client' cmd.run
+      'export CICD_CONTROL_ADDRESS=$(salt-call --out=newline_values_only pillar.get _param:cicd_control_address);
+       while true; do curl -sf http://${CICD_CONTROL_ADDRESS}:9200/?pretty && break; sleep 2; done'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 3, delay: 10}
+  skip_fail: false
+
+- description: Setup Elasticsearch
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@elasticsearch:client' state.sls elasticsearch.client
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 3, delay: 10}
+  skip_fail: false
+
+
+
 # Generate docs
 #--------------
 
diff --git a/tcp_tests/templates/virtual-mcp-ocata-cicd/salt.yaml b/tcp_tests/templates/virtual-mcp-ocata-cicd/salt.yaml
index 2d7b520..bb54f8e 100644
--- a/tcp_tests/templates/virtual-mcp-ocata-cicd/salt.yaml
+++ b/tcp_tests/templates/virtual-mcp-ocata-cicd/salt.yaml
@@ -286,7 +286,7 @@
   skip_fail: false
 
 - description: Configure salt.minion on other nodes
-  cmd: salt --timeout=120 --hard-crash --state-output=mixed --state-verbose=False -C 'I@linux:system and not cfg01*' state.sls salt.minion
+  cmd: salt --timeout=120 --hard-crash --state-output=mixed --state-verbose=False -C 'I@linux:system and not cfg01*' state.sls salt.minion && sleep 10
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 3, delay: 15}
   skip_fail: false
@@ -294,7 +294,7 @@
 - description: Check salt minion versions on slaves
   cmd: salt '*' test.version
   node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
+  retry: {count: 2, delay: 5}
   skip_fail: false
 
 - description: Check salt top states on nodes
diff --git a/tcp_tests/templates/virtual-mcp-ocata-cicd/sl.yaml b/tcp_tests/templates/virtual-mcp-ocata-cicd/sl.yaml
index 7e0faf3..f3d07fc 100644
--- a/tcp_tests/templates/virtual-mcp-ocata-cicd/sl.yaml
+++ b/tcp_tests/templates/virtual-mcp-ocata-cicd/sl.yaml
@@ -1,9 +1,12 @@
 {% from 'virtual-mcp-ocata-cicd/underlay.yaml' import HOSTNAME_CFG01 with context %}
 
 # Pattern that helps salt to select Stacklight nodes
-{% set SL_NODES_SELECTOR = os_env('SL_NODES_SELECTOR','mon*') %}
+{% set SL_NODES_SELECTOR = os_env('SL_NODES_SELECTOR','cid*') %}
 
+#=====================
 # Install docker swarm
+#=====================
+
 - description: Prepare Docker host
   cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:host and {{ SL_NODES_SELECTOR }}' state.sls docker.host
   node_name: {{ HOSTNAME_CFG01 }}
@@ -16,16 +19,18 @@
   retry: {count: 1, delay: 5}
   skip_fail: false
 
-- description: Setup salt-minion on docker swarm master
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master and {{ SL_NODES_SELECTOR }}' state.sls salt &&
+- description: Collect grains
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master and {{ SL_NODES_SELECTOR }}' state.sls salt.minion.grains &&
     salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master and {{ SL_NODES_SELECTOR }}' mine.flush &&
-    salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master and {{ SL_NODES_SELECTOR }}' mine.update
+    salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master and {{ SL_NODES_SELECTOR }}' mine.update &&
+    salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm and {{ SL_NODES_SELECTOR }}' saltutil.refresh_modules &&
+    sleep 5
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 3, delay: 15}
   skip_fail: false
 
 - description: Install Docker Swarm on other nodes
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm and {{ SL_NODES_SELECTOR }}' state.sls docker.swarm
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:manager and {{ SL_NODES_SELECTOR }}' state.sls docker.swarm
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 5}
   skip_fail: false
@@ -36,27 +41,50 @@
   retry: {count: 1, delay: 5}
   skip_fail: false
 
-# Install slv2 infra
+#===================
+# Install StackLight
+#===================
+
+# Haproxy: installed in common-services
+#--------------------------------------
+
 - description: Install telegraf
   cmd: salt -C 'I@telegraf:agent or I@telegraf:remote_agent' state.sls telegraf
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
   skip_fail: false
 
-- description: Configure collector
-  cmd: salt -C 'I@heka:log_collector' state.sls heka.log_collector
+- description: Install Prometheus exporters, if pillar 'prometheus:exporters' exists on any server
+  cmd: |
+    if salt -C 'I@prometheus:exporters' match.pillar 'prometheus:exporters' ; then
+      salt -C 'I@prometheus:exporters' state.sls prometheus
+    fi
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
   skip_fail: false
 
+
+
+
+# Elasticsearch
+- description: Waiting for Elasticsearch to come up in container...
+  cmd: timeout 60 salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@elasticsearch:server' cmd.run
+      'export CICD_CONTROL_ADDRESS=$(salt-call --out=newline_values_only pillar.get _param:cicd_control_address);
+       while true; do curl -sf http://${CICD_CONTROL_ADDRESS}:9200 && break; sleep 2; done'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 3, delay: 30}
+  skip_fail: false
+
 - description: Install elasticsearch server
-  cmd: salt -C 'I@elasticsearch:server' state.sls elasticsearch.server -b 1
+  cmd: salt -C 'I@elasticsearch:server and *01*' state.sls elasticsearch.server &&
+    salt -C 'I@elasticsearch:server' state.sls elasticsearch.server -b 1
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
   skip_fail: false
 
 - description: Install kibana server
-  cmd: salt -C 'I@kibana:server' state.sls kibana.server -b 1
+  cmd: salt -C 'I@kibana:server and *01*' state.sls kibana.server &&
+    salt -C 'I@kibana:server' state.sls kibana.server -b 1
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
   skip_fail: false
@@ -73,36 +101,25 @@
   retry: {count: 1, delay: 10}
   skip_fail: false
 
-- description: Check influix db
-  cmd: |
-    INFLUXDB_SERVICE=`salt -C 'I@influxdb:server' test.ping 1>/dev/null 2>&1 && echo true`;
-    echo "Influxdb service presence: ${INFLUXDB_SERVICE}";
-    if [[ "$INFLUXDB_SERVICE" == "true" ]]; then
-        salt -C 'I@influxdb:server' state.sls influxdb
-    fi
+- description: Install influixdb
+  cmd: salt -C 'I@influxdb:server' state.sls influxdb
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 5}
-  skip_fail: true
-
-# Collect grains needed to configure the services
-
-- description: Get grains
-  cmd: salt -C 'I@salt:minion' state.sls salt.minion.grains
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
   skip_fail: false
 
-- description: Sync modules
-  cmd: salt -C 'I@salt:minion' saltutil.refresh_modules
+
+
+# Collect grains. Looks like this is an excess step, but it is present in the pipeline at the moment.
+- description: Collect grains
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master and {{ SL_NODES_SELECTOR }}' state.sls salt.minion.grains &&
+    salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm and {{ SL_NODES_SELECTOR }}' saltutil.refresh_modules &&
+    salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master and {{ SL_NODES_SELECTOR }}' mine.flush &&
+    salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master and {{ SL_NODES_SELECTOR }}' mine.update &&
+    sleep 5
   node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
+  retry: {count: 3, delay: 15}
   skip_fail: false
 
-- description: Update mine
-  cmd: salt -C 'I@salt:minion' mine.update; sleep 5;
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: false
 
 # Change environment configuration before deploy
 - description: Set SL docker images deploy parameters
@@ -135,12 +152,28 @@
   retry: {count: 1, delay: 10}
   skip_fail: false
 
+
+- description: Waiting for Elasticsearch to come up in container...
+  cmd: timeout 120 salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm and {{ SL_NODES_SELECTOR }}' cmd.run
+      'export CICD_CONTROL_ADDRESS=$(salt-call --out=newline_values_only pillar.get _param:cicd_control_address);
+       while true; do curl -sf http://${CICD_CONTROL_ADDRESS}:15013 && break; sleep 2; done'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 3, delay: 30}
+  skip_fail: false
+
+
 - description: Configure Grafana dashboards and datasources
   cmd: sleep 30;  salt -C 'I@grafana:client' state.sls grafana.client
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 2, delay: 10}
   skip_fail: false
 
+- description: Configure collector
+  cmd: salt -C 'I@heka:log_collector' state.sls heka.log_collector
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+
 - description: Run salt minion to create cert files
   cmd: salt --hard-crash --state-output=mixed --state-verbose=False "*" state.sls salt.minion
   node_name: {{ HOSTNAME_CFG01 }}
diff --git a/tcp_tests/templates/virtual-mcp-ocata-cicd/underlay.yaml b/tcp_tests/templates/virtual-mcp-ocata-cicd/underlay.yaml
index 2f0f861..9887a50 100644
--- a/tcp_tests/templates/virtual-mcp-ocata-cicd/underlay.yaml
+++ b/tcp_tests/templates/virtual-mcp-ocata-cicd/underlay.yaml
@@ -24,9 +24,6 @@
 {% set HOSTNAME_CTL03 = os_env('HOSTNAME_CTL03', 'ctl03.' + DOMAIN_NAME) %}
 {% set HOSTNAME_CMP01 = os_env('HOSTNAME_CMP01', 'cmp01.' + DOMAIN_NAME) %}
 {% set HOSTNAME_CMP02 = os_env('HOSTNAME_CMP02', 'cmp02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_MON01 = os_env('HOSTNAME_MON01', 'mon01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_MON02 = os_env('HOSTNAME_MON02', 'mon02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_MON03 = os_env('HOSTNAME_MON03', 'mon03.' + DOMAIN_NAME) %}
 {% set HOSTNAME_GTW01 = os_env('HOSTNAME_GTW01', 'gtw01.' + DOMAIN_NAME) %}
 {% set HOSTNAME_PRX01 = os_env('HOSTNAME_PRX01', 'prx01.' + DOMAIN_NAME) %}
 
@@ -50,9 +47,6 @@
             default_{{ HOSTNAME_CTL03 }}: +103
             default_{{ HOSTNAME_CMP01 }}: +105
             default_{{ HOSTNAME_CMP02 }}: +106
-            default_{{ HOSTNAME_MON01 }}: +107
-            default_{{ HOSTNAME_MON02 }}: +108
-            default_{{ HOSTNAME_MON03 }}: +109
             default_{{ HOSTNAME_GTW01 }}: +110
             default_{{ HOSTNAME_PRX01 }}: +121
           ip_ranges:
@@ -73,9 +67,6 @@
             default_{{ HOSTNAME_CTL03 }}: +103
             default_{{ HOSTNAME_CMP01 }}: +105
             default_{{ HOSTNAME_CMP02 }}: +106
-            default_{{ HOSTNAME_MON01 }}: +107
-            default_{{ HOSTNAME_MON02 }}: +108
-            default_{{ HOSTNAME_MON03 }}: +109
             default_{{ HOSTNAME_GTW01 }}: +110
             default_{{ HOSTNAME_PRX01 }}: +121
           ip_ranges:
@@ -96,9 +87,6 @@
             default_{{ HOSTNAME_CTL03 }}: +103
             default_{{ HOSTNAME_CMP01 }}: +105
             default_{{ HOSTNAME_CMP02 }}: +106
-            default_{{ HOSTNAME_MON01 }}: +107
-            default_{{ HOSTNAME_MON02 }}: +108
-            default_{{ HOSTNAME_MON03 }}: +109
             default_{{ HOSTNAME_GTW01 }}: +110
             default_{{ HOSTNAME_PRX01 }}: +121
           ip_ranges:
@@ -119,9 +107,6 @@
             default_{{ HOSTNAME_CTL03 }}: +103
             default_{{ HOSTNAME_CMP01 }}: +105
             default_{{ HOSTNAME_CMP02 }}: +106
-            default_{{ HOSTNAME_MON01 }}: +107
-            default_{{ HOSTNAME_MON02 }}: +108
-            default_{{ HOSTNAME_MON03 }}: +109
             default_{{ HOSTNAME_GTW01 }}: +110
             default_{{ HOSTNAME_PRX01 }}: +121
           ip_ranges:
@@ -217,7 +202,7 @@
           - name: {{ HOSTNAME_CID01 }}
             role: salt_minion
             params:
-              vcpu: !os_env SLAVE_NODE_CPU, 2
+              vcpu: !os_env SLAVE_NODE_CPU, 3
               memory: !os_env SLAVE_NODE_MEMORY, 8192
               boot:
                 - hd
@@ -255,7 +240,7 @@
           - name: {{ HOSTNAME_CID02 }}
             role: salt_minion
             params:
-              vcpu: !os_env SLAVE_NODE_CPU, 2
+              vcpu: !os_env SLAVE_NODE_CPU, 3
               memory: !os_env SLAVE_NODE_MEMORY, 6144
               boot:
                 - hd
@@ -281,7 +266,7 @@
           - name: {{ HOSTNAME_CID03 }}
             role: salt_minion
             params:
-              vcpu: !os_env SLAVE_NODE_CPU, 2
+              vcpu: !os_env SLAVE_NODE_CPU, 3
               memory: !os_env SLAVE_NODE_MEMORY, 6144
               boot:
                 - hd
@@ -308,7 +293,7 @@
           - name: {{ HOSTNAME_CTL01 }}
             role: salt_minion
             params:
-              vcpu: !os_env SLAVE_NODE_CPU, 2
+              vcpu: !os_env SLAVE_NODE_CPU, 3
               memory: !os_env SLAVE_NODE_MEMORY, 6144
               boot:
                 - hd
@@ -349,7 +334,7 @@
           - name: {{ HOSTNAME_CTL02 }}
             role: salt_minion
             params:
-              vcpu: !os_env SLAVE_NODE_CPU, 2
+              vcpu: !os_env SLAVE_NODE_CPU, 3
               memory: !os_env SLAVE_NODE_MEMORY, 6144
               boot:
                 - hd
@@ -378,7 +363,7 @@
           - name: {{ HOSTNAME_CTL03 }}
             role: salt_minion
             params:
-              vcpu: !os_env SLAVE_NODE_CPU, 2
+              vcpu: !os_env SLAVE_NODE_CPU, 3
               memory: !os_env SLAVE_NODE_MEMORY, 6144
               boot:
                 - hd
@@ -437,8 +422,8 @@
           - name: {{ HOSTNAME_CMP01 }}
             role: salt_minion
             params:
-              vcpu: !os_env SLAVE_NODE_CPU, 2
-              memory: !os_env SLAVE_NODE_MEMORY, 3072
+              vcpu: !os_env SLAVE_NODE_CPU, 4
+              memory: !os_env SLAVE_NODE_MEMORY, 6144
               boot:
                 - hd
               cloud_init_volume_name: iso
@@ -488,8 +473,8 @@
           - name: {{ HOSTNAME_CMP02 }}
             role: salt_minion
             params:
-              vcpu: !os_env SLAVE_NODE_CPU, 2
-              memory: !os_env SLAVE_NODE_MEMORY, 3072
+              vcpu: !os_env SLAVE_NODE_CPU, 4
+              memory: !os_env SLAVE_NODE_MEMORY, 6144
               boot:
                 - hd
               cloud_init_volume_name: iso
@@ -511,84 +496,6 @@
               interfaces: *all_interfaces
               network_config: *all_network_config
 
-          - name: {{ HOSTNAME_MON01 }}
-            role: salt_minion
-            params:
-              vcpu: !os_env SLAVE_NODE_CPU, 2
-              memory: !os_env SLAVE_NODE_MEMORY, 3072
-              boot:
-                - hd
-              cloud_init_volume_name: iso
-              cloud_init_iface_up: ens3
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 150
-                  backing_store: cloudimage1604
-                  format: qcow2
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-                  capacity: 1
-                  format: raw
-                  device: cdrom
-                  bus: ide
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data_openstack
-
-              interfaces: *interfaces
-              network_config: *network_config
-
-          - name: {{ HOSTNAME_MON02 }}
-            role: salt_minion
-            params:
-              vcpu: !os_env SLAVE_NODE_CPU, 2
-              memory: !os_env SLAVE_NODE_MEMORY, 3072
-              boot:
-                - hd
-              cloud_init_volume_name: iso
-              cloud_init_iface_up: ens3
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 150
-                  backing_store: cloudimage1604
-                  format: qcow2
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-                  capacity: 1
-                  format: raw
-                  device: cdrom
-                  bus: ide
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data_openstack
-
-              interfaces: *interfaces
-              network_config: *network_config
-
-          - name: {{ HOSTNAME_MON03 }}
-            role: salt_minion
-            params:
-              vcpu: !os_env SLAVE_NODE_CPU, 2
-              memory: !os_env SLAVE_NODE_MEMORY, 3072
-              boot:
-                - hd
-              cloud_init_volume_name: iso
-              cloud_init_iface_up: ens3
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 150
-                  backing_store: cloudimage1604
-                  format: qcow2
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-                  capacity: 1
-                  format: raw
-                  device: cdrom
-                  bus: ide
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data_openstack
-
-              interfaces: *interfaces
-              network_config: *network_config
-
           - name: {{ HOSTNAME_GTW01 }}
             role: salt_minion
             params: