Fix contrail deployment
* As a workaround k8s API is listening on ens04 interface.
* Image sizes were reduced
* Deployment was fix in accordance to latest fixes in
Orchestrate.groovy
* Universer was enabled to allow to install python-zmq for ubuntu 14.04
Change-Id: I66f80fbdd90c6527ac083fc516ab81ec1d9e8c1c
Signed-off-by: Sergii Golovatiuk <sgolovatiuk@mirantis.com>
Reviewed-on: https://review.gerrithub.io/363529
Reviewed-by: Tatyanka Leontovich <tleontovich@mirantis.com>
Tested-by: Tatyanka Leontovich <tleontovich@mirantis.com>
diff --git a/tcp_tests/templates/virtual-mcp11-k8s-calico/salt.yaml b/tcp_tests/templates/virtual-mcp11-k8s-calico/salt.yaml
index 4fda035..bd70138 100644
--- a/tcp_tests/templates/virtual-mcp11-k8s-calico/salt.yaml
+++ b/tcp_tests/templates/virtual-mcp11-k8s-calico/salt.yaml
@@ -86,10 +86,10 @@
# Disable checkouting the model from remote repository
cat << 'EOF' >> /srv/salt/reclass/nodes/{{ HOSTNAME_CFG01 }}.yml
# local storage
- reclass:
- storage:
- data_source:
- engine: local
+ reclass:
+ storage:
+ data_source:
+ engine: local
EOF
# Show the changes to the console
diff --git a/tcp_tests/templates/virtual-mcp11-k8s-contrail/common-services.yaml b/tcp_tests/templates/virtual-mcp11-k8s-contrail/common-services.yaml
index a637ebf..2f98744 100644
--- a/tcp_tests/templates/virtual-mcp11-k8s-contrail/common-services.yaml
+++ b/tcp_tests/templates/virtual-mcp11-k8s-contrail/common-services.yaml
@@ -1,10 +1,17 @@
{% from 'virtual-mcp11-k8s-contrail/underlay.yaml' import HOSTNAME_CFG01 with context %}
+- description: Temporary fix to untie k8s API from Opencontrail haproxy
+ That allows kube-api to be available over private network.
+ cmd: sed -i "s/vhost0/ens4/g" /srv/salt/reclass/classes/cluster/virtual-mcp11-k8s-contrail/kubernetes/control.yml
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: true
+
# Install support services
- description: Create and distribute SSL certificates for services using salt state
cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' state.sls salt
node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
+ retry: {count: 1, delay: 5}
skip_fail: true
- description: Install glusterfs
@@ -28,13 +35,6 @@
retry: {count: 1, delay: 5}
skip_fail: false
-- description: Install keepalived
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keepalived:cluster' state.sls keepalived -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: true
-
- description: Install haproxy
cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-C 'I@haproxy:proxy' state.sls haproxy
@@ -42,13 +42,21 @@
retry: {count: 1, delay: 5}
skip_fail: false
-- description: Install haproxy.service
+- description: Check haproxy service
cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-C 'I@haproxy:proxy' service.status haproxy
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 5}
skip_fail: false
+- description: Restart rsyslog
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@haproxy:proxy' service.restart rsyslog
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+#Kubernetes
- description: Install docker
cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-C 'I@docker:host' state.sls docker.host
@@ -77,103 +85,150 @@
retry: {count: 1, delay: 5}
skip_fail: false
-- description: Install k8s
+- description: Install keepalived on primary controller
cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@kubernetes:master' state.sls kubernetes.master.kube-addons;
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@kubernetes:pool' state.sls kubernetes.pool;
+ -C 'I@keepalived:cluster and *01*' state.sls keepalived
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: true
+
+- description: Install keepalived on all controllers
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@keepalived:cluster' state.sls keepalived
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: true
+
+- description: Install nginx
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@nginx:server' state.sls nginx
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 5}
skip_fail: false
-- description: Setup etcd server
+- description: Install Kubernetes Addons
cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@kubernetes:master and *01*' state.sls etcd.server.setup;
+ -C 'I@kubernetes:master' state.sls kubernetes.master.kube-addons
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: true
+
+- description: Install Kubernetes components
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@kubernetes:pool' state.sls kubernetes.pool
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 5}
skip_fail: false
-- description: Run master without setup
+- description: Setup etcd server on primary controller
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@kubernetes:master and *01*' state.sls etcd.server.setup
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Run Kubernetes master without setup
cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-C 'I@kubernetes:master' state.sls kubernetes exclude=kubernetes.master.setup
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 5}
skip_fail: true
-- description: Run master setup
+- description: Run Kubernetes master setup
cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-C 'I@kubernetes:master and *01*' state.sls kubernetes.master.setup
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 5}
skip_fail: true
-- description: Restart kubelet
+- description: Restart Kubelet
cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@kubernetes:pool' service.restart kubelet
+ -C 'I@kubernetes:pool' service.restart 'kubelet'
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 5}
+ skip_fail: true
+
+# Opencontrail
+- description: Install RabbitMQ
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@rabbitmq:server' state.sls rabbitmq
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 2, delay: 10}
skip_fail: false
-# install contrail
-- description: Install contrail db
+- description: Check RabbitMQ
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@rabbitmq:server' cmd.run "rabbitmqctl cluster_status"
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Install Opencontrail db on ctl01
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@opencontrail:database and *01*' state.sls opencontrail.database
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 2, delay: 20}
+ skip_fail: false
+
+- description: Install Opencontrail db on all nodes
cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-C 'I@opencontrail:database' state.sls opencontrail.database
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 2, delay: 20}
skip_fail: false
-- description: Install contrail control
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@opencontrail:control' state.sls opencontrail
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install contrail control with exclude client on node 1
+- description: Install Opencontrail control on ctl01
cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-C 'I@opencontrail:control and *01*' state.sls opencontrail exclude=opencontrail.client
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 5}
skip_fail: false
-- description: Install contrail control with exclude client on all nodes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=Falsa
+- description: Install Opencontrail control on all nodes
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-C 'I@opencontrail:control' state.sls opencontrail exclude=opencontrail.client
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 5}
skip_fail: false
-- description: Install contrail collector without contrail client
+- description: Install Opencontrail on collector
cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-C 'I@opencontrail:collector' state.sls opencontrail exclude=opencontrail.client
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 5}
skip_fail: false
-- description: Check contrail status
+- description: Test Opencontrail
cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@opencontrail:control' cmd.run contrail-status
+ -C 'I@opencontrail:control' cmd.run 'contrail-status'
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 5}
skip_fail: false
-# install contrail computes
-- description: Configure contrail
+- description: Install Opencontrail client
cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@opencontrail:database:id:1' state.sls opencontrail.client
+ -C 'I@opencontrail:database:id:1' state.sls 'opencontrail.client'
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 5}
skip_fail: false
-- description: Provision vrouter
+- description: Install Opencontrail client on computes
cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@nova:compute' cmd.run 'exec 0>&-; exec 1>&-; exec 2>&-; nohup bash -c "ip link | grep vhost && echo no_reboot || sleep 5 && reboot & "'
+ -C 'I@opencontrail:compute' state.sls 'opencontrail.client'
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 5}
skip_fail: false
-- description: Compute contrail client on cmp
- cmd: slepp 300; salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@opencontrail:compute' state.sls opencontrail.client
+- description: Install Opencontrail on computes
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@opencontrail:compute' state.sls 'opencontrail'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Reboot Opencontrail compute nodes
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@opencontrail:compute' system.reboot
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 5}
skip_fail: false
diff --git a/tcp_tests/templates/virtual-mcp11-k8s-contrail/salt.yaml b/tcp_tests/templates/virtual-mcp11-k8s-contrail/salt.yaml
index 0a00ccc..3ca2fa7 100644
--- a/tcp_tests/templates/virtual-mcp11-k8s-contrail/salt.yaml
+++ b/tcp_tests/templates/virtual-mcp11-k8s-contrail/salt.yaml
@@ -83,10 +83,10 @@
# Disable checkouting the model from remote repository
cat << 'EOF' >> /srv/salt/reclass/nodes/{{ HOSTNAME_CFG01 }}.yml
# local storage
- reclass:
- storage:
- data_source:
- engine: local
+ reclass:
+ storage:
+ data_source:
+ engine: local
EOF
node_name: {{ HOSTNAME_CFG01 }}
@@ -189,20 +189,11 @@
- description: Run 'salt.master' formula on cfg01
cmd: timeout 120 salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@salt:master' state.sls salt.master.service;
+ -C 'I@salt:master' state.sls salt.master;
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 2, delay: 5}
skip_fail: false
-- description: Run 'salt' formula on cfg01 with workaround proposed in PROD-10894
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@salt:master' state.sls salt;
- salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@salt:master' saltutil.sync_all
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 5, delay: 5}
- skip_fail: false
-
- description: Generate inventory for all the nodes to the /srv/salt/reclass/nodes/_generated
cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-C 'I@salt:master' state.sls reclass
@@ -216,55 +207,65 @@
retry: {count: 1, delay: 5}
skip_fail: false
-- description: Sync all salt resources
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' saltutil.sync_all
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
- description: Show reclass-salt --top
cmd: reclass-salt --top
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 5}
skip_fail: false
-- description: Execute salt.minion.cert
- cmd: salt-call --no-color state.sls salt.minion.cert -l info;
+- description: Sync all salt resources
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' saltutil.sync_all
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 5}
skip_fail: false
+- description: Configure linux on master
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@salt:master' state.sls 'linux.system'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Configure minion on master
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@salt:master' state.sls 'salt.minion'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 3, delay: 10}
+ skip_fail: false
+
# Bootstrap all nodes
- description: Configure linux on other nodes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C '* and not
- cfg01* and not mon*' state.sls linux
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@linux:system' state.sls 'linux'
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 3, delay: 5}
skip_fail: false
-- description: Configure linux on mon nodes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'mon*' state.sls linux
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 5}
- skip_fail: false
-
- description: Configure openssh on all nodes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C '* and not
- cfg01*' state.sls openssh;salt --hard-crash --state-output=mixed --state-verbose=False
- -C '* and not cfg*' cmd.run "sed -i 's/PasswordAuthentication no/PasswordAuthentication
- yes/' /etc/ssh/sshd_config && service ssh restart"
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@linux:system' state.sls openssh;
+ salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@linux:system' cmd.run "sed -i 's/PasswordAuthentication no/PasswordAuthentication
+ yes/' /etc/ssh/sshd_config && service ssh reload"
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 5}
skip_fail: false
-- description: Configure salt.minion on other nodes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C '* and not
- cfg01*' state.sls salt.minion
+- description: Configure salt.minion on all nodes
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@linux:system' state.sls salt.minion
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 3, delay: 5}
skip_fail: false
+- description: Configure ntp and rsyslog on nodes
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@linux:system' state.sls ntp,rsyslog
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
- description: Check salt minion versions on slaves
cmd: salt '*' test.version
node_name: {{ HOSTNAME_CFG01 }}
@@ -276,9 +277,3 @@
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 5}
skip_fail: false
-
-- description: Configure ntp and rsyslog on nodes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' state.sls ntp,rsyslog
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
diff --git a/tcp_tests/templates/virtual-mcp11-k8s-contrail/underlay--user-data1404.yaml b/tcp_tests/templates/virtual-mcp11-k8s-contrail/underlay--user-data1404.yaml
index f4a06e4..6445e9c 100644
--- a/tcp_tests/templates/virtual-mcp11-k8s-contrail/underlay--user-data1404.yaml
+++ b/tcp_tests/templates/virtual-mcp11-k8s-contrail/underlay--user-data1404.yaml
@@ -46,7 +46,7 @@
#- sleep 120
- echo "Preparing base OS"
- which wget >/dev/null || (apt-get update; apt-get install -y wget)
-
+ - sudo add-apt-repository universe
- echo "deb [arch=amd64] http://apt-mk.mirantis.com/trusty nightly salt extra" > /etc/apt/sources.list.d/mcp_salt.list
- wget -O - http://apt-mk.mirantis.com/public.gpg | apt-key add -
- echo "deb http://repo.saltstack.com/apt/ubuntu/14.04/amd64/2016.3 trusty main" > /etc/apt/sources.list.d/saltstack.list
diff --git a/tcp_tests/templates/virtual-mcp11-k8s-contrail/underlay.yaml b/tcp_tests/templates/virtual-mcp11-k8s-contrail/underlay.yaml
index 541b829..8187fb9 100644
--- a/tcp_tests/templates/virtual-mcp11-k8s-contrail/underlay.yaml
+++ b/tcp_tests/templates/virtual-mcp11-k8s-contrail/underlay.yaml
@@ -1,4 +1,4 @@
-# Set the repository suite, one of the: 'nightly', 'testing', 'stable', or any other required
+# This environment requires 50.5 GB of RAM and 270GB of Storage. Run with caution.
{% set REPOSITORY_SUITE = os_env('REPOSITORY_SUITE', 'stable') %}
{% import 'virtual-mcp11-k8s-contrail/underlay--meta-data.yaml' as CLOUDINIT_META_DATA with context %}
@@ -154,7 +154,7 @@
- name: {{ HOSTNAME_CFG01 }}
role: salt_master
params:
- vcpu: !os_env SLAVE_NODE_CPU, 4
+ vcpu: !os_env SLAVE_NODE_CPU, 2
memory: !os_env SLAVE_NODE_MEMORY, 2048
boot:
- hd
@@ -162,7 +162,7 @@
cloud_init_iface_up: ens3
volumes:
- name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
+ capacity: !os_env NODE_VOLUME_SIZE, 20
backing_store: cloudimage1604
format: qcow2
- name: iso # Volume with name 'iso' will be used
@@ -199,15 +199,15 @@
- name: {{ HOSTNAME_CTL01 }}
role: salt_minion
params:
- vcpu: !os_env SLAVE_NODE_CPU, 4
- memory: !os_env SLAVE_NODE_MEMORY, 8192
+ vcpu: !os_env SLAVE_NODE_CPU, 2
+ memory: !os_env SLAVE_NODE_MEMORY, 4096
boot:
- hd
cloud_init_volume_name: iso
cloud_init_iface_up: ens3
volumes:
- name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
+ capacity: !os_env NODE_VOLUME_SIZE, 20
backing_store: cloudimage1604
format: qcow2
- name: cinder
@@ -246,15 +246,15 @@
- name: {{ HOSTNAME_CTL02 }}
role: salt_minion
params:
- vcpu: !os_env SLAVE_NODE_CPU, 4
- memory: !os_env SLAVE_NODE_MEMORY, 8192
+ vcpu: !os_env SLAVE_NODE_CPU, 2
+ memory: !os_env SLAVE_NODE_MEMORY, 4096
boot:
- hd
cloud_init_volume_name: iso
cloud_init_iface_up: ens3
volumes:
- name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
+ capacity: !os_env NODE_VOLUME_SIZE, 20
backing_store: cloudimage1604
format: qcow2
- name: cinder
@@ -275,15 +275,15 @@
- name: {{ HOSTNAME_CTL03 }}
role: salt_minion
params:
- vcpu: !os_env SLAVE_NODE_CPU, 4
- memory: !os_env SLAVE_NODE_MEMORY, 8192
+ vcpu: !os_env SLAVE_NODE_CPU, 2
+ memory: !os_env SLAVE_NODE_MEMORY, 4096
boot:
- hd
cloud_init_volume_name: iso
cloud_init_iface_up: ens3
volumes:
- name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
+ capacity: !os_env NODE_VOLUME_SIZE, 20
backing_store: cloudimage1604
format: qcow2
- name: cinder
@@ -304,7 +304,7 @@
- name: {{ HOSTNAME_CMP01 }}
role: salt_minion
params:
- vcpu: !os_env SLAVE_NODE_CPU, 4
+ vcpu: !os_env SLAVE_NODE_CPU, 2
memory: !os_env SLAVE_NODE_MEMORY, 4096
boot:
- hd
@@ -312,7 +312,7 @@
cloud_init_iface_up: ens3
volumes:
- name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
+ capacity: !os_env NODE_VOLUME_SIZE, 20
backing_store: cloudimage1604
format: qcow2
- name: iso # Volume with name 'iso' will be used
@@ -330,7 +330,7 @@
- name: {{ HOSTNAME_CMP02 }}
role: salt_minion
params:
- vcpu: !os_env SLAVE_NODE_CPU, 4
+ vcpu: !os_env SLAVE_NODE_CPU, 2
memory: !os_env SLAVE_NODE_MEMORY, 4096
boot:
- hd
@@ -338,7 +338,7 @@
cloud_init_iface_up: ens3
volumes:
- name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
+ capacity: !os_env NODE_VOLUME_SIZE, 20
backing_store: cloudimage1604
format: qcow2
- name: iso # Volume with name 'iso' will be used
@@ -356,7 +356,7 @@
- name: {{ HOSTNAME_MON01 }}
role: salt_minion
params:
- vcpu: !os_env SLAVE_NODE_CPU, 4
+ vcpu: !os_env SLAVE_NODE_CPU, 1
memory: !os_env SLAVE_NODE_MEMORY, 1024
boot:
- hd
@@ -364,7 +364,7 @@
cloud_init_iface_up: ens3
volumes:
- name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
+ capacity: !os_env NODE_VOLUME_SIZE, 20
backing_store: cloudimage1604
format: qcow2
- name: iso # Volume with name 'iso' will be used
@@ -382,7 +382,7 @@
- name: {{ HOSTNAME_MON02 }}
role: salt_minion
params:
- vcpu: !os_env SLAVE_NODE_CPU, 4
+ vcpu: !os_env SLAVE_NODE_CPU, 1
memory: !os_env SLAVE_NODE_MEMORY, 1024
boot:
- hd
@@ -390,7 +390,7 @@
cloud_init_iface_up: ens3
volumes:
- name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
+ capacity: !os_env NODE_VOLUME_SIZE, 20
backing_store: cloudimage1604
format: qcow2
- name: iso # Volume with name 'iso' will be used
@@ -408,7 +408,7 @@
- name: {{ HOSTNAME_MON03 }}
role: salt_minion
params:
- vcpu: !os_env SLAVE_NODE_CPU, 4
+ vcpu: !os_env SLAVE_NODE_CPU, 1
memory: !os_env SLAVE_NODE_MEMORY, 1024
boot:
- hd
@@ -416,7 +416,7 @@
cloud_init_iface_up: ens3
volumes:
- name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
+ capacity: !os_env NODE_VOLUME_SIZE, 20
backing_store: cloudimage1604
format: qcow2
- name: iso # Volume with name 'iso' will be used
@@ -435,14 +435,14 @@
role: salt_minion
params:
vcpu: !os_env SLAVE_NODE_CPU, 4
- memory: !os_env SLAVE_NODE_MEMORY, 1024
+ memory: !os_env SLAVE_NODE_MEMORY, 8192
boot:
- hd
cloud_init_volume_name: iso
cloud_init_iface_up: ens3
volumes:
- name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
+ capacity: !os_env NODE_VOLUME_SIZE, 20
backing_store: cloudimage1404
format: qcow2
- name: iso # Volume with name 'iso' will be used
@@ -461,14 +461,14 @@
role: salt_minion
params:
vcpu: !os_env SLAVE_NODE_CPU, 4
- memory: !os_env SLAVE_NODE_MEMORY, 1024
+ memory: !os_env SLAVE_NODE_MEMORY, 8192
boot:
- hd
cloud_init_volume_name: iso
cloud_init_iface_up: ens3
volumes:
- name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
+ capacity: !os_env NODE_VOLUME_SIZE, 20
backing_store: cloudimage1404
format: qcow2
- name: iso # Volume with name 'iso' will be used
@@ -487,14 +487,14 @@
role: salt_minion
params:
vcpu: !os_env SLAVE_NODE_CPU, 4
- memory: !os_env SLAVE_NODE_MEMORY, 1024
+ memory: !os_env SLAVE_NODE_MEMORY, 8192
boot:
- hd
cloud_init_volume_name: iso
cloud_init_iface_up: ens3
volumes:
- name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
+ capacity: !os_env NODE_VOLUME_SIZE, 20
backing_store: cloudimage1404
format: qcow2
- name: iso # Volume with name 'iso' will be used
@@ -512,7 +512,7 @@
- name: {{ HOSTNAME_PRX01 }}
role: salt_minion
params:
- vcpu: !os_env SLAVE_NODE_CPU, 4
+ vcpu: !os_env SLAVE_NODE_CPU, 2
memory: !os_env SLAVE_NODE_MEMORY, 1024
boot:
- hd
@@ -520,7 +520,7 @@
cloud_init_iface_up: ens3
volumes:
- name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
+ capacity: !os_env NODE_VOLUME_SIZE, 20
backing_store: cloudimage1604
format: qcow2
- name: iso # Volume with name 'iso' will be used
@@ -539,12 +539,12 @@
role: vsrx
params:
vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 2048
+ memory: !os_env SLAVE_NODE_MEMORY, 512
boot:
- hd
volumes:
- name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
+ capacity: !os_env NODE_VOLUME_SIZE, 10
backing_store: vsrx_image
format: qcow2
#- name: iso