Add templates for mk22-lab-basic
- add set of templates for mk22-lab-basic
- add env variable LAB_CONFIG_NAME to easy select desired templates
diff --git a/README.md b/README.md
index 0d5a6b0..e8de9fc 100644
--- a/README.md
+++ b/README.md
@@ -29,12 +29,22 @@
Export variables
----------------
-export ENV_NAME=tcpcloud-mk22 # Optional
+Required:
export IMAGE_PATH1404=./trusty-server-cloudimg-amd64.qcow2
export IMAGE_PATH1604=./xenial-server-cloudimg-amd64.qcow2
+
+Optional:
+
+export ENV_NAME=tcpcloud-mk22 # You can set any env name
+
+export LAB_CONFIG_NAME=mk22-lab-basic # Prefix for names of the templates.
+ # Currently there are two template sets:
+ # mk22-lab-basic
+ # mk22-lab-advanced
+
Run deploy test
---------------
diff --git a/tcp_tests/fixtures/underlay_fixtures.py b/tcp_tests/fixtures/underlay_fixtures.py
index 0637abf..a19b6d1 100644
--- a/tcp_tests/fixtures/underlay_fixtures.py
+++ b/tcp_tests/fixtures/underlay_fixtures.py
@@ -194,7 +194,7 @@
config.underlay.ssh = hardware.get_ssh_data(
roles=config.underlay.roles)
- underlay = underlay_ssh_manager.UnderlaySSHManager(config.underlay.ssh)
+ underlay = underlay_ssh_manager.UnderlaySSHManager(config)
if not config.underlay.lvm:
underlay.enable_lvm(hardware.lvm_storages())
@@ -206,6 +206,6 @@
# 1. hardware environment created and powered on
# 2. config.underlay.ssh contains SSH access to provisioned nodes
# (can be passed from external config with TESTS_CONFIGS variable)
- underlay = underlay_ssh_manager.UnderlaySSHManager(config.underlay.ssh)
+ underlay = underlay_ssh_manager.UnderlaySSHManager(config)
return underlay
diff --git a/tcp_tests/managers/underlay_ssh_manager.py b/tcp_tests/managers/underlay_ssh_manager.py
index b61079b..3185371 100644
--- a/tcp_tests/managers/underlay_ssh_manager.py
+++ b/tcp_tests/managers/underlay_ssh_manager.py
@@ -68,21 +68,23 @@
self.remote(): SSHClient object by a node name (w/wo address pool)
or by a hostname.
"""
+ __config = None
config_ssh = None
config_lvm = None
- def __init__(self, config_ssh):
+ def __init__(self, config):
"""Read config.underlay.ssh object
:param config_ssh: dict
"""
+ self.__config = config
if self.config_ssh is None:
self.config_ssh = []
if self.config_lvm is None:
self.config_lvm = {}
- self.add_config_ssh(config_ssh)
+ self.add_config_ssh(self.__config.underlay.ssh)
def add_config_ssh(self, config_ssh):
@@ -365,7 +367,7 @@
password=ssh_data['password'],
private_keys=ssh_data['keys'])
- def ensure_running_service(self, service_name, node_name, check_cmd,
+ def ensure_running_service(self, service_name, host, check_cmd,
state_running='start/running'):
"""Check if the service_name running or try to restart it
@@ -376,11 +378,11 @@
"""
cmd = "service {0} status | grep -q '{1}'".format(
service_name, state_running)
- with self.remote(node_name=node_name) as remote:
+ with self.remote(host=host) as remote:
result = remote.execute(cmd)
if result.exit_code != 0:
LOG.info("{0} is not in running state on the node {1},"
- " trying to start".format(service_name, node_name))
+ " trying to start".format(service_name, host))
cmd = ("service {0} stop;"
" sleep 3; killall -9 {0};"
"service {0} start; sleep 5;"
@@ -457,18 +459,19 @@
LOG.info(" === RETRY ({0}/{1}) ======================="
.format(x-1, retry_count))
else:
- # Workarounds for crashed services
- self.ensure_running_service(
- "salt-master",
- "cfg01.mk22-lab-advanced.local",
- "salt-call pillar.items",
- 'active (running)') # Hardcoded for now
- self.ensure_running_service(
- "salt-minion",
- "cfg01.mk22-lab-advanced.local",
- "salt 'cfg01*' pillar.items",
- "active (running)") # Hardcoded for now
- break
+ if self.__config.salt.salt_master_host != '0.0.0.0':
+ # Workarounds for crashed services
+ self.ensure_running_service(
+ "salt-master",
+ self.__config.salt.salt_master_host,
+ "salt-call pillar.items",
+ 'active (running)') # Hardcoded for now
+ self.ensure_running_service(
+ "salt-minion",
+ self.__config.salt.salt_master_host,
+ "salt 'cfg01*' pillar.items",
+ "active (running)") # Hardcoded for now
+ break
if x == 1 and skip_fail == False:
# In the last retry iteration, raise an exception
diff --git a/tcp_tests/settings.py b/tcp_tests/settings.py
index d0005a3..5ec518d 100644
--- a/tcp_tests/settings.py
+++ b/tcp_tests/settings.py
@@ -29,6 +29,9 @@
'TIMESTAT_PATH_YAML', os.path.join(
LOGS_DIR, 'timestat_{}.yaml'.format(time.strftime("%Y%m%d"))))
+LAB_CONFIG_NAME = os.environ.get('LAB_CONFIG_NAME', 'mk22-lab-basic')
+#LAB_CONFIGS_NAME = os.environ.get('LAB_NAME', 'mk22-lab-advanced')
+
SSH_LOGIN = os.environ.get('SSH_LOGIN', 'root')
SSH_PASSWORD = os.environ.get('SSH_PASSWORD', 'r00tme')
SSH_NODE_CREDENTIALS = {"login": SSH_LOGIN,
diff --git a/tcp_tests/settings_oslo.py b/tcp_tests/settings_oslo.py
index bde95c4..88633a0 100644
--- a/tcp_tests/settings_oslo.py
+++ b/tcp_tests/settings_oslo.py
@@ -24,15 +24,17 @@
_default_conf = pkg_resources.resource_filename(
- __name__, 'templates/underlay/mk22-lab-advanced.yaml')
+ __name__, 'templates/underlay/{0}.yaml'.format(settings.LAB_CONFIG_NAME))
_default_salt_steps = pkg_resources.resource_filename(
- __name__, 'templates/salt/mk22-lab-advanced-salt.yaml')
+ __name__, 'templates/salt/{0}-salt.yaml'.format(settings.LAB_CONFIG_NAME))
_default_common_services_steps = pkg_resources.resource_filename(
__name__,
- 'templates/common-services/mk22-lab-advanced-common-services.yaml')
+ 'templates/common-services/{0}-common-services.yaml'.format(
+ settings.LAB_CONFIG_NAME))
_default_openstack_steps = pkg_resources.resource_filename(
- __name__, 'templates/openstack/mk22-lab-advanced-openstack.yaml')
+ __name__, 'templates/openstack/{0}-openstack.yaml'.format(
+ settings.LAB_CONFIG_NAME))
hardware_opts = [
diff --git a/tcp_tests/templates/common-services/mk22-lab-basic-common-services.yaml b/tcp_tests/templates/common-services/mk22-lab-basic-common-services.yaml
new file mode 100644
index 0000000..86914f5
--- /dev/null
+++ b/tcp_tests/templates/common-services/mk22-lab-basic-common-services.yaml
@@ -0,0 +1,96 @@
+# Install support services
+- description: Install keepalived on primary controller
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' state.sls
+ keepalived
+ node_name: cfg01.mk22-lab-basic.local
+ retry: {count: 3, delay: 5}
+ skip_fail: false
+- description: Install keepalived on other controllers
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl*' state.sls
+ keepalived -b 1
+ node_name: cfg01.mk22-lab-basic.local
+ retry: {count: 3, delay: 5}
+ skip_fail: false
+- description: Check the VIP
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl*' cmd.run
+ 'ip a | grep 172.16.10.254' | grep -B1 172.16.10.254
+ node_name: cfg01.mk22-lab-basic.local
+ retry: {count: 3, delay: 5}
+ skip_fail: false
+- description: Install glusterfs on all controllers
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl*' state.sls
+ glusterfs.server.service
+ node_name: cfg01.mk22-lab-basic.local
+ retry: {count: 3, delay: 5}
+ skip_fail: false
+- description: Setup glusterfs on primary controller
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' state.sls
+ glusterfs.server.setup
+ node_name: cfg01.mk22-lab-basic.local
+ retry: {count: 3, delay: 5}
+ skip_fail: false
+- description: Setup glusterfs on other controllers
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl*' state.sls
+ glusterfs.server.setup -b 1
+ node_name: cfg01.mk22-lab-basic.local
+ retry: {count: 3, delay: 5}
+ skip_fail: false
+- description: Check the gluster status
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+ 'gluster peer status; gluster volume status'
+ node_name: cfg01.mk22-lab-basic.local
+ retry: {count: 3, delay: 5}
+ skip_fail: false
+- description: Install RabbitMQ on all controllers
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl*' state.sls
+ rabbitmq
+ node_name: cfg01.mk22-lab-basic.local
+ retry: {count: 3, delay: 5}
+ skip_fail: false
+- description: Check the rabbitmq status
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl*' cmd.run
+ 'rabbitmqctl cluster_status'
+ node_name: cfg01.mk22-lab-basic.local
+ retry: {count: 3, delay: 5}
+ skip_fail: false
+- description: '*Workaround* Update salt-formula-galera on config node to the latest
+ version'
+ cmd: apt-get -y --force-yes install salt-formula-galera
+ node_name: cfg01.mk22-lab-basic.local
+ retry: {count: 3, delay: 5}
+ skip_fail: false
+- description: Install Galera on primary controller
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' state.sls
+ galera
+ node_name: cfg01.mk22-lab-basic.local
+ retry: {count: 3, delay: 5}
+ skip_fail: false
+- description: Install Galera on other controllers
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl*' state.sls
+ galera
+ node_name: cfg01.mk22-lab-basic.local
+ retry: {count: 3, delay: 5}
+ skip_fail: false
+- description: Check mysql status
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl*' mysql.status | grep -A1 wsrep_incoming_addresses
+ node_name: cfg01.mk22-lab-basic.local
+ retry: {count: 3, delay: 5}
+ skip_fail: true
+- description: Install haproxy on all controllers
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl*' state.sls
+ haproxy
+ node_name: cfg01.mk22-lab-basic.local
+ retry: {count: 3, delay: 5}
+ skip_fail: false
+- description: Check haproxy status
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl*' service.status
+ haproxy
+ node_name: cfg01.mk22-lab-basic.local
+ retry: {count: 3, delay: 5}
+ skip_fail: false
+- description: Install memcached on all controllers
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl*' state.sls
+ memcached
+ node_name: cfg01.mk22-lab-basic.local
+ retry: {count: 3, delay: 5}
+ skip_fail: false
diff --git a/tcp_tests/templates/openstack/mk22-lab-basic-openstack.yaml b/tcp_tests/templates/openstack/mk22-lab-basic-openstack.yaml
new file mode 100644
index 0000000..11bc50b
--- /dev/null
+++ b/tcp_tests/templates/openstack/mk22-lab-basic-openstack.yaml
@@ -0,0 +1,201 @@
+# Install OpenStack control services
+- description: Install keystone on primary controller
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' state.sls
+ keystone
+ node_name: cfg01.mk22-lab-basic.local
+ retry: {count: 3, delay: 5}
+ skip_fail: false
+- description: Install keystone on all controllers
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl*' state.sls
+ keystone -b 1
+ node_name: cfg01.mk22-lab-basic.local
+ retry: {count: 3, delay: 5}
+ skip_fail: false
+- description: Populate keystone services/tenants/admins
+ cmd: salt-call --hard-crash --state-output=mixed --state-verbose=False state.sls
+ keystone.client
+ node_name: cfg01.mk22-lab-basic.local
+ retry: {count: 3, delay: 5}
+ skip_fail: false
+- description: Check keystone service-list
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+ '. /root/keystonerc; keystone service-list'
+ node_name: cfg01.mk22-lab-basic.local
+ retry: {count: 3, delay: 5}
+ skip_fail: false
+- description: Install glance on primary controller
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' state.sls
+ glance
+ node_name: cfg01.mk22-lab-basic.local
+ retry: {count: 3, delay: 5}
+ skip_fail: false
+- description: Install glance on all controllers
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl*' state.sls
+ glance -b 1
+ node_name: cfg01.mk22-lab-basic.local
+ retry: {count: 3, delay: 5}
+ skip_fail: false
+- description: Configure glusterfs.client on all controllers
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl*' state.sls
+ glusterfs.client
+ node_name: cfg01.mk22-lab-basic.local
+ retry: {count: 3, delay: 5}
+ skip_fail: false
+- description: Configure(re-install) keystone on all controllers
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl*' state.sls
+ keystone -b 1
+ node_name: cfg01.mk22-lab-basic.local
+ retry: {count: 3, delay: 5}
+ skip_fail: false
+- description: Check glance image-list
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+ '. /root/keystonerc; glance image-list'
+ node_name: cfg01.mk22-lab-basic.local
+ retry: {count: 3, delay: 5}
+ skip_fail: false
+- description: Install cinder on all controllers
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl*' state.sls
+ cinder -b 1
+ node_name: cfg01.mk22-lab-basic.local
+ retry: {count: 3, delay: 5}
+ skip_fail: false
+- description: Check cinder list
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+ '. /root/keystonerc; cinder list'
+ node_name: cfg01.mk22-lab-basic.local
+ retry: {count: 3, delay: 5}
+ skip_fail: false
+- description: Install nova on ctl01
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' state.sls
+ nova
+ node_name: cfg01.mk22-lab-basic.local
+ retry: {count: 3, delay: 5}
+ skip_fail: false
+- description: Install nova on all controllers
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl*' state.sls
+ nova
+ node_name: cfg01.mk22-lab-basic.local
+ retry: {count: 3, delay: 5}
+ skip_fail: false
+- description: Check nova service-list
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+ '. /root/keystonerc; nova service-list'
+ node_name: cfg01.mk22-lab-basic.local
+ retry: {count: 3, delay: 5}
+ skip_fail: false
+- description: Install neutron on ctl01
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' state.sls
+ neutron
+ node_name: cfg01.mk22-lab-basic.local
+ retry: {count: 3, delay: 5}
+ skip_fail: false
+- description: Install neutron on all controllers
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl*' state.sls
+ neutron
+ node_name: cfg01.mk22-lab-basic.local
+ retry: {count: 3, delay: 5}
+ skip_fail: false
+- description: Check neutron agent-list
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+ '. /root/keystonerc; neutron agent-list'
+ node_name: cfg01.mk22-lab-basic.local
+ retry: {count: 3, delay: 5}
+ skip_fail: false
+- description: Deploy dashboard on prx*
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'prx*' state.apply
+ node_name: cfg01.mk22-lab-basic.local
+ retry: {count: 3, delay: 5}
+ skip_fail: true
+- description: Deploy nginx proxy
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cfg*' state.sls
+ nginx
+ node_name: cfg01.mk22-lab-basic.local
+ retry: {count: 3, delay: 5}
+ skip_fail: true
+
+# Install contrail on controllers
+- description: Install contrail database on controllers
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl*' state.sls
+ opencontrail.database -b 1
+ node_name: cfg01.mk22-lab-basic.local
+ retry: {count: 3, delay: 5}
+ skip_fail: false
+- description: Check cassandra status on ctl01
+ cmd: salt 'ctl01*' cmd.run 'nodetool status;nodetool compactionstats;nodetool describecluster;'
+ node_name: cfg01.mk22-lab-basic.local
+ retry: {count: 3, delay: 5}
+ skip_fail: false
+- description: Install contrail database on controllers
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl*' state.sls
+ opencontrail -b 1
+ node_name: cfg01.mk22-lab-basic.local
+ retry: {count: 3, delay: 5}
+ skip_fail: false
+- description: Check contrail status
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+ '. /root/keystonerc; contrail-status; neutron net-list; nova net-list'
+ node_name: cfg01.mk22-lab-basic.local
+ retry: {count: 3, delay: 5}
+ skip_fail: false
+- description: Add contrail bgp router on ctl01
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+ '/usr/share/contrail-utils/provision_control.py --oper add --api_server_ip 172.16.10.254
+ --api_server_port 8082 --host_name ctl01 --host_ip 172.16.10.101 --router_asn
+ 64512 --admin_user admin --admin_password workshop --admin_tenant_name admin'
+ node_name: cfg01.mk22-lab-basic.local
+ retry: {count: 3, delay: 5}
+ skip_fail: false
+- description: Add contrail bgp router on ctl02
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl02*' cmd.run
+ '/usr/share/contrail-utils/provision_control.py --oper add --api_server_ip 172.16.10.254
+ --api_server_port 8082 --host_name ctl02 --host_ip 172.16.10.102 --router_asn
+ 64512 --admin_user admin --admin_password workshop --admin_tenant_name admin'
+ node_name: cfg01.mk22-lab-basic.local
+ retry: {count: 3, delay: 5}
+ skip_fail: false
+- description: Add contrail bgp router on ctl03
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl03*' cmd.run
+ '/usr/share/contrail-utils/provision_control.py --oper add --api_server_ip 172.16.10.254
+ --api_server_port 8082 --host_name ctl03 --host_ip 172.16.10.103 --router_asn
+ 64512 --admin_user admin --admin_password workshop --admin_tenant_name admin'
+ node_name: cfg01.mk22-lab-basic.local
+ retry: {count: 3, delay: 5}
+ skip_fail: false
+
+
+# Install compute node
+- description: Apply formulas for compute node
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' state.apply
+ node_name: cfg01.mk22-lab-basic.local
+ retry: {count: 3, delay: 5}
+ skip_fail: false
+- description: Re-apply(as in doc) formulas for compute node
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' state.apply
+ node_name: cfg01.mk22-lab-basic.local
+ retry: {count: 3, delay: 5}
+ skip_fail: false
+- description: Add vrouter for cmp01
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+ '/usr/share/contrail-utils/provision_vrouter.py --oper add --host_name cmp01 --host_ip
+ 172.16.10.105 --api_server_ip 172.16.10.254 --api_server_port 8082 --admin_user
+ admin --admin_password workshop --admin_tenant_name admin'
+ node_name: cfg01.mk22-lab-basic.local
+ retry: {count: 3, delay: 5}
+ skip_fail: false
+- description: Reboot compute nodes
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' system.reboot
+ node_name: cfg01.mk22-lab-basic.local
+ retry: {count: 3, delay: 5}
+ skip_fail: false
+- description: Check IP on computes
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' cmd.run
+ 'ip a'
+ node_name: cfg01.mk22-lab-basic.local
+ retry: {count: 3, delay: 5}
+ skip_fail: false
+- description: Check contrail status on computes
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' cmd.run
+ 'contrail-status'
+ node_name: cfg01.mk22-lab-basic.local
+ retry: {count: 3, delay: 5}
+ skip_fail: false
diff --git a/tcp_tests/templates/salt/mk22-lab-basic-salt.yaml b/tcp_tests/templates/salt/mk22-lab-basic-salt.yaml
new file mode 100644
index 0000000..f32daea
--- /dev/null
+++ b/tcp_tests/templates/salt/mk22-lab-basic-salt.yaml
@@ -0,0 +1,231 @@
+# Install salt to the config node
+- description: Configure tcpcloud repository on the cfg01 node
+ cmd: echo 'deb [arch=amd64] http://apt.tcpcloud.eu/nightly/ xenial main security extra tcp tcp-salt' > /etc/apt/sources.list;
+ echo 'deb [arch=amd64] http://apt.tcpcloud.eu/nightly/ trusty tcp-salt' >> /etc/apt/sources.list;
+ wget -O - http://apt.tcpcloud.eu/public.gpg | apt-key add -
+ node_name: cfg01.mk22-lab-basic.local
+ retry: {count: 1, delay: 1}
+ skip_fail: false
+
+#- description: Configure tcpcloud and saltstack repositories on the rest of nodes
+# cmd: echo 'deb [arch=amd64] http://apt.tcpcloud.eu/nightly/ trusty main security extra tcp tcp-salt' > /etc/apt/sources.list;
+# wget -O - http://apt.tcpcloud.eu/public.gpg | apt-key add - ;
+# echo 'deb http://repo.saltstack.com/apt/ubuntu/14.04/amd64/latest trusty main' > /etc/apt/sources.list.d/saltstack.list;
+# wget -O - https://repo.saltstack.com/apt/ubuntu/14.04/amd64/latest/SALTSTACK-GPG-KEY.pub | apt-key add -
+# node_name: ***
+# retry: {count: 1, delay: 1}
+# skip_fail: false
+
+- description: Update packages on cfg01
+ cmd: apt-get clean; apt-get update && apt-get -y upgrade
+ node_name: cfg01.mk22-lab-basic.local
+ retry: {count: 3, delay: 10}
+ skip_fail: false
+
+- description: Install common packages on cfg01
+ cmd: apt-get install -y python-pip wget curl tmux byobu iputils-ping traceroute htop tree
+ node_name: cfg01.mk22-lab-basic.local
+ retry: {count: 3, delay: 10}
+ skip_fail: false
+
+- description: Install salt formulas, master and minion on cfg01
+ cmd: apt-get install -y salt-formula-* salt-master salt-minion reclass
+ node_name: cfg01.mk22-lab-basic.local
+ retry: {count: 3, delay: 10}
+ skip_fail: false
+
+- description: Configure salt-master on cfg01
+ cmd: |
+ cat << 'EOF' >> /etc/salt/master.d/master.conf
+ file_roots:
+ base:
+ - /usr/share/salt-formulas/env
+ pillar_opts: False
+ open_mode: True
+ reclass: &reclass
+ storage_type: yaml_fs
+ inventory_base_uri: /srv/salt/reclass
+ ext_pillar:
+ - reclass: *reclass
+ master_tops:
+ reclass: *reclass
+ EOF
+ node_name: cfg01.mk22-lab-basic.local
+ retry: {count: 1, delay: 1}
+ skip_fail: false
+
+- description: Configure GIT settings and certificates
+ cmd: touch /root/.git_trusted_certs.pem;
+ for server in git.tcpcloud.eu github.com; do
+ openssl s_client -showcerts -connect $server:443 </dev/null
+ | openssl x509 -outform PEM
+ >> /root/.git_trusted_certs.pem;
+ done;
+ HOME=/root git config --global http.sslCAInfo /root/.git_trusted_certs.pem;
+ HOME=/root git config --global user.email "tcp-qa@example.com";
+ HOME=/root git config --global user.name "TCP QA";
+ node_name: cfg01.mk22-lab-basic.local
+ retry: {count: 1, delay: 1}
+ skip_fail: false
+
+- description: Clone reclass models and perform a workaround for https://mirantis.jira.com/browse/PROD-8078
+ cmd: |
+ git clone https://github.com/Mirantis/mk-lab-salt-model.git /srv/salt/reclass -b dash;
+ cat << 'EOF' >> /srv/salt/reclass/nodes/control/cfg01.mk22-lab-basic.local.yml
+ # local storage
+ reclass:
+ storage:
+ data_source:
+ engine: local
+ EOF
+ sed -i '/nagios/d' /srv/salt/reclass/classes/system/salt/master/formula/pkg/stacklight.yml
+ cd /srv/salt/reclass; git add -A;git commit -m"use dash repo";
+ node_name: cfg01.mk22-lab-basic.local
+ retry: {count: 1, delay: 1}
+ skip_fail: false
+
+- description: Configure reclass
+ cmd: |
+ mkdir -p /srv/salt/reclass/classes/service;
+ for i in /usr/share/salt-formulas/reclass/service/*; do
+ ln -s $i /srv/salt/reclass/classes/service/;
+ done;
+ [ ! -d /etc/reclass ] && mkdir /etc/reclass;
+ cat << 'EOF' >> /etc/reclass/reclass-config.yml
+ storage_type: yaml_fs
+ pretty_print: True
+ output: yaml
+ inventory_base_uri: /srv/salt/reclass
+ EOF
+ node_name: cfg01.mk22-lab-basic.local
+ retry: {count: 1, delay: 1}
+ skip_fail: false
+
+- description: Configure salt-minion on cfg01
+ cmd: |
+ [ ! -d /etc/salt/minion.d ] && mkdir -p /etc/salt/minion.d;
+ cat << "EOF" >> /etc/salt/minion.d/minion.conf
+ id: cfg01.mk22-lab-basic.local
+ master: localhost
+ EOF
+ node_name: cfg01.mk22-lab-basic.local
+ retry: {count: 1, delay: 1}
+ skip_fail: false
+
+- description: Restarting salt services with workarounds
+ cmd: service salt-master restart;
+ sleep 60;
+ rm -f /etc/salt/pki/minion/minion_master.pub;
+ service salt-minion restart;
+ reclass -n cfg01.mk22-lab-basic.local;
+ node_name: cfg01.mk22-lab-basic.local
+ retry: {count: 3, delay: 10}
+ skip_fail: false
+
+
+# Prepare salt services and nodes settings
+- description: Run 'linux' formula on cfg01
+ cmd: salt-call --hard-crash --state-output=mixed --state-verbose=False state.sls
+ linux
+ node_name: cfg01.mk22-lab-basic.local
+ retry: {count: 3, delay: 5}
+ skip_fail: false
+
+- description: Run 'openssh' formula on cfg01
+ cmd: salt-call --hard-crash --state-output=mixed --state-verbose=False state.sls
+ openssh;sed -i 's/PasswordAuthentication no/PasswordAuthentication yes/' /etc/ssh/sshd_config
+ && service ssh restart
+ node_name: cfg01.mk22-lab-basic.local
+ retry: {count: 3, delay: 5}
+ skip_fail: false
+
+- description: '*Workaround* of the bug https://mirantis.jira.com/browse/PROD-7962'
+ cmd: echo ' StrictHostKeyChecking no' >> /root/.ssh/config
+ node_name: cfg01.mk22-lab-basic.local
+ retry: {count: 1, delay: 1}
+ skip_fail: false
+
+- description: Run 'salt' formula on cfg01
+ cmd: salt-call --hard-crash --state-output=mixed --state-verbose=False state.sls
+ salt
+ node_name: cfg01.mk22-lab-basic.local
+ retry: {count: 3, delay: 5}
+ skip_fail: true
+
+- description: Accept salt keys from all the nodes
+ cmd: salt-key -A -y
+ node_name: cfg01.mk22-lab-basic.local
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Generate inventory for all the nodes to the /srv/salt/reclass/nodes/_generated
+ cmd: salt-call --hard-crash --state-output=mixed --state-verbose=False state.sls
+ reclass.storage
+ node_name: cfg01.mk22-lab-basic.local
+ retry: {count: 3, delay: 5}
+ skip_fail: false
+
+- description: Refresh pillars on all minions
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' saltutil.refresh_pillar
+ node_name: cfg01.mk22-lab-basic.local
+ retry: {count: 3, delay: 5}
+ skip_fail: false
+
+# Bootstrap all nodes
+- description: Configure linux on controllers
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' state.sls
+ linux
+ node_name: cfg01.mk22-lab-basic.local
+ retry: {count: 5, delay: 5}
+ skip_fail: false
+
+- description: Configure openssh on controllers
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C '* and not
+ cfg*' state.sls openssh;salt --hard-crash --state-output=mixed --state-verbose=False
+ -C '* and not cfg*' cmd.run "sed -i 's/PasswordAuthentication no/PasswordAuthentication
+ yes/' /etc/ssh/sshd_config && service ssh restart"
+ node_name: cfg01.mk22-lab-basic.local
+ retry: {count: 3, delay: 5}
+ skip_fail: false
+
+- description: '*Workaround* for the bug https://mirantis.jira.com/browse/PROD-8025'
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' cmd.run 'apt-get
+ update && apt-get -y upgrade'
+ node_name: cfg01.mk22-lab-basic.local
+ retry: {count: 3, delay: 5}
+ skip_fail: false
+
+- description: '*Workaround* for the bug https://mirantis.jira.com/browse/PROD-8021'
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' cmd.run 'apt-get
+ -y install linux-image-extra-$(uname -r)'
+ node_name: cfg01.mk22-lab-basic.local
+ retry: {count: 3, delay: 5}
+ skip_fail: false
+
+- description: '*Workaround* for the bug https://mirantis.jira.com/browse/PROD-8025'
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' cmd.run 'apt-get
+ -y install python-requests'
+ node_name: cfg01.mk22-lab-basic.local
+ retry: {count: 3, delay: 5}
+ skip_fail: false
+
+#- description: '*Workaround* of the bug https://mirantis.jira.com/browse/PROD-8063'
+# cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' cmd.run 'dhclient
+# -r;dhclient'
+# node_name: cfg01.mk22-lab-basic.local
+# retry: {count: 1, delay: 1}
+# skip_fail: false
+
+- description: Configure salt.minion on controllers
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' state.sls
+ salt.minion
+ node_name: cfg01.mk22-lab-basic.local
+ retry: {count: 3, delay: 5}
+ skip_fail: false
+
+- description: Configure ntp on controllers
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' state.sls
+ ntp
+ node_name: cfg01.mk22-lab-basic.local
+ retry: {count: 5, delay: 10}
+ skip_fail: false
diff --git a/tcp_tests/templates/underlay/mk22-lab-basic--meta-data.yaml b/tcp_tests/templates/underlay/mk22-lab-basic--meta-data.yaml
new file mode 100644
index 0000000..3699401
--- /dev/null
+++ b/tcp_tests/templates/underlay/mk22-lab-basic--meta-data.yaml
@@ -0,0 +1,4 @@
+| # All the data below will be stored as a string object
+ instance-id: iid-local1
+ hostname: {hostname}
+ local-hostname: {hostname}
diff --git a/tcp_tests/templates/underlay/mk22-lab-basic--user-data-cfg01.yaml b/tcp_tests/templates/underlay/mk22-lab-basic--user-data-cfg01.yaml
new file mode 100644
index 0000000..ef53c9b
--- /dev/null
+++ b/tcp_tests/templates/underlay/mk22-lab-basic--user-data-cfg01.yaml
@@ -0,0 +1,92 @@
+| # All the data below will be stored as a string object
+ #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
+
+ ssh_pwauth: True
+ users:
+ - name: root
+ sudo: ALL=(ALL) NOPASSWD:ALL
+ shell: /bin/bash
+ ssh_authorized_keys:
+ - ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDGwjUlYn9UsmWmAGSuEA2sICad7WqxgsJR0HKcMbbxi0tn96h4Cq2iGYmzlJ48egLm5R5pxyWnFvL4b/2zb+kKTPCMwRc9nv7xEGosEFNQEoSDd+gYu2CO0dgS2bX/7m2DXmzvhqPjxWQUXXsb0OYAS1r9Es65FE8y4rLaegz8V35xfH45bTCA0W8VSKh264XtGz12hacqsttE/UvyjJTZe+/XV+xJy3WAWxe8J/MuW1VqbqNewTmpTE/LJU8i6pG4msU6+wH99UvsGAOKQOduynUHKWG3VZg5YCjpbbV/t/pfW/vHB3b3jiifQmNhulyiG/CNnSQ5BahtV/7qPsYt vagrant@cfg01
+
+ disable_root: false
+ chpasswd:
+ list: |
+ root:r00tme
+ expire: False
+
+ bootcmd:
+ # Block access to SSH while node is preparing
+ - cloud-init-per once sudo iptables -A INPUT -p tcp --dport 22 -j DROP
+ # Enable root access
+ - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
+ - service sshd restart
+ output:
+ all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
+
+ runcmd:
+ # Configure dhclient
+ - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
+ - sudo resolvconf -u
+
+ # Prepare network connection
+ - sudo ifup ens3
+ #- sudo route add default gw {gateway} {interface_name}
+ - sudo ifup ens4
+
+
+ ########################################################
+ # Node is ready, allow SSH access
+ - echo "Allow SSH access ..."
+ - sudo iptables -D INPUT -p tcp --dport 22 -j DROP
+ ########################################################
+
+ write_files:
+ - path: /etc/network/interfaces.d/99-tcp-tests.cfg
+ content: |
+ auto ens3
+ iface ens3 inet dhcp
+ auto ens4
+ iface ens4 inet dhcp
+
+ - path: /root/.ssh/id_rsa
+ owner: root:root
+ permissions: '0600'
+ content: |
+ -----BEGIN RSA PRIVATE KEY-----
+ MIIEpAIBAAKCAQEAxsI1JWJ/VLJlpgBkrhANrCAmne1qsYLCUdBynDG28YtLZ/eo
+ eAqtohmJs5SePHoC5uUeacclpxby+G/9s2/pCkzwjMEXPZ7+8RBqLBBTUBKEg3fo
+ GLtgjtHYEtm1/+5tg15s74aj48VkFF17G9DmAEta/RLOuRRPMuKy2noM/Fd+cXx+
+ OW0wgNFvFUioduuF7Rs9doWnKrLbRP1L8oyU2Xvv11fsSct1gFsXvCfzLltVam6j
+ XsE5qUxPyyVPIuqRuJrFOvsB/fVL7BgDikDnbsp1Bylht1WYOWAo6W21f7f6X1v7
+ xwd2944on0JjYbpcohvwjZ0kOQWobVf+6j7GLQIDAQABAoIBAF0tAAMlmLGY7CQU
+ /R3IctBlRhU1DpZmyTfXc1MbzzqO5Wu44yZbQyjBthcUrdWGEUQy1r4Z2OHq1T54
+ KcPry6DDjuU9Q+rkVXmnC07a3GOmOq7zEEA/3zU01ImJvFNdb8NtCb6ELOKDT7Zo
+ WGUi2h/7M41+OqDzD2m4csYO/3Vvr12sMhn9BfwU4OPpL44A4PJiEryEAw9o5/j/
+ 73eyPvgf6tkC4l0mMtfHB9tg/F++iH8fiEr1SMvHGIc9gZNmFYMrs2XfLkAejPfH
+ XrOyw6eqd+kluqw51gHhdeQYwBx6mfOkbhPHWU79FzpH5M1ikdfImZmPCxVf3Ykj
+ nxLoK9UCgYEA4c9agPb/OFyN00nnUMBxzQt1pErpOf/7QhnvNZThomzSV7PyefxF
+ H6G/VlS3gCcrWBCh7mqOSxGcNQwgudVqzUm7QXruQeg4nWcCGSxg7lGYSEf0MyWL
+ 5wrd+f9MoV/VV8udIPENjp96o5kwQEVRfsTBNwmk54kup2+br5q8re8CgYEA4VT8
+ UeIN+plP6FjZYITI+SO/ou5goKIhfBrqa5gOXXPc2y6sIu9wBWCr+T7FAF/2gGhS
+ rpVx76zcmx05nwkxIlJh58+G3MVyUDFoWnrtL38vdkBSuOGgNfzcBsFpQvFs8WaW
+ otbuTtkPcXbVdYRr32/C620MxXhUO+svo3CLaaMCgYEA1rjlF8NHl+Gy31rkQg5t
+ aIxgFpVBR+zZkNa2d94V3Ozb65fqmALB/D1Dg6VVROB6P+i5AsyCeHHLd0oMCIof
+ YAyfqrlpvHRE+bAM98ESfyxJwVnipYwrh8z2nZYd2UoWxcCRrtRpjtipts2ha0w/
+ HWudS2e5To5NNdxUT9y1VDMCgYEAxkQiE+ZkyGiXv+hVtLCBqX4EA9fdm9msvudr
+ 9qn/kcj9vrntanvlxEWQbCoH61GEsu2YOtdyPiKKpc1sQvwyiHGWhgK7NoxhDiC7
+ IknhYxZ064ajgtu8PWS1MRiDhwypACt1Rej6HNSu2vZl0hZnWF2dU8tLHoHHFEXX
+ T+caNCMCgYBZpD6XBiiEXf0ikXYnXKOmbsyVG80V+yqfLo85qb2RW9TaviOSP43g
+ nB22ReMSHq2cOrs6VTTgfhxefBwzdDFbfKMf6ZU82jCNlpetAZOrhdMHUvcsjSQk
+ XKI6Ldfq6TU3xKujRHfGP+oQ6GLwVCL/kjGxOuSRLFGfRiiqYI3nww==
+ -----END RSA PRIVATE KEY-----
+
+ - path: /root/.ssh/config
+ owner: root:root
+ permissions: '0600'
+ content: |
+ Host *
+ ServerAliveInterval 300
+ ServerAliveCountMax 10
+ StrictHostKeyChecking no
+ UserKnownHostsFile /dev/null
diff --git a/tcp_tests/templates/underlay/mk22-lab-basic--user-data.yaml b/tcp_tests/templates/underlay/mk22-lab-basic--user-data.yaml
new file mode 100644
index 0000000..b0dee66
--- /dev/null
+++ b/tcp_tests/templates/underlay/mk22-lab-basic--user-data.yaml
@@ -0,0 +1,87 @@
+| # All the data below will be stored as a string object
+ #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
+
+ ssh_pwauth: True
+ users:
+ - name: root
+ sudo: ALL=(ALL) NOPASSWD:ALL
+ shell: /bin/bash
+ ssh_authorized_keys:
+ - ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDGwjUlYn9UsmWmAGSuEA2sICad7WqxgsJR0HKcMbbxi0tn96h4Cq2iGYmzlJ48egLm5R5pxyWnFvL4b/2zb+kKTPCMwRc9nv7xEGosEFNQEoSDd+gYu2CO0dgS2bX/7m2DXmzvhqPjxWQUXXsb0OYAS1r9Es65FE8y4rLaegz8V35xfH45bTCA0W8VSKh264XtGz12hacqsttE/UvyjJTZe+/XV+xJy3WAWxe8J/MuW1VqbqNewTmpTE/LJU8i6pG4msU6+wH99UvsGAOKQOduynUHKWG3VZg5YCjpbbV/t/pfW/vHB3b3jiifQmNhulyiG/CNnSQ5BahtV/7qPsYt vagrant@cfg01
+
+ disable_root: false
+ chpasswd:
+ list: |
+ root:r00tme
+ expire: False
+
+ bootcmd:
+ # Block access to SSH while node is preparing
+ - cloud-init-per once sudo iptables -A INPUT -p tcp --dport 22 -j DROP
+ # Enable root access
+ - sed -i'.orig' -e's/without-password/yes/' /etc/ssh/sshd_config
+ - service sshd restart
+ output:
+ all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
+
+ runcmd:
+ # Configure dhclient
+ - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
+ - sudo resolvconf -u
+
+ # Prepare network connection
+ - sudo ifup eth0
+ #- sudo route add default gw {gateway} {interface_name}
+ - sudo ifup eth1
+
+ ############## TCP Cloud cfg01 node ##################
+ #- sleep 120
+ - echo "Preparing base OS"
+ - which wget >/dev/null || (apt-get update; apt-get install -y wget)
+
+ - echo "deb [arch=amd64] http://apt.tcpcloud.eu/nightly/ trusty main security extra tcp tcp-salt" > /etc/apt/sources.list
+ - wget -O - http://apt.tcpcloud.eu/public.gpg | apt-key add -
+ # saltstack repo is for minions that have the same version in the xenial and trusty (2016.3.3)
+ - echo "deb http://repo.saltstack.com/apt/ubuntu/14.04/amd64/latest trusty main" > /etc/apt/sources.list.d/saltstack.list
+ - wget -O - https://repo.saltstack.com/apt/ubuntu/14.04/amd64/latest/SALTSTACK-GPG-KEY.pub | apt-key add -
+
+ - apt-get clean
+ - apt-get update
+ - apt-get -y upgrade
+
+ # Install common packages
+ - apt-get install -y python-pip git
+ - apt-get install -y curl tmux byobu iputils-ping traceroute htop tree
+
+ - apt-get install -y salt-minion
+
+ # To be configured from inventory/fuel-devops by operator or autotests
+ - 'echo "id: {hostname}" >> /etc/salt/minion'
+ - 'echo "master: 172.16.10.100" >> /etc/salt/minion'
+
+ - echo "Restarting minion service with workarounds..."
+ - rm -f /etc/salt/pki/minion/minion_master.pub
+ - service salt-minion restart
+ - sleep 5
+ - rm -f /etc/salt/pki/minion/minion_master.pub
+ - service salt-minion restart
+
+ #- echo "Showing node metadata..."
+ #- salt-call pillar.data
+
+ #- echo "Running complete state ..."
+ #- salt-call state.sls linux,openssh,salt
+
+ ########################################################
+ # Node is ready, allow SSH access
+ - echo "Allow SSH access ..."
+ - sudo iptables -D INPUT -p tcp --dport 22 -j DROP
+ ########################################################
+
+ write_files:
+ - path: /etc/network/interfaces.d/99-tcp-tests.cfg
+ content: |
+ auto eth0
+ iface eth0 inet dhcp
+ auto eth1
+ iface eth1 inet dhcp
diff --git a/tcp_tests/templates/underlay/mk22-lab-basic.yaml b/tcp_tests/templates/underlay/mk22-lab-basic.yaml
new file mode 100644
index 0000000..f23eade
--- /dev/null
+++ b/tcp_tests/templates/underlay/mk22-lab-basic.yaml
@@ -0,0 +1,262 @@
+---
+aliases:
+ dynamic_addresses_pool:
+ - &pool_default !os_env POOL_DEFAULT, 172.16.10.0/24:24
+
+ default_interface_model:
+ - &interface_model !os_env INTERFACE_MODEL, virtio
+
+template:
+ devops_settings:
+ env_name: !os_env ENV_NAME, tcpcloud-mk22
+
+ address_pools:
+ public-pool01:
+ net: 172.16.10.0/24:24
+ params:
+ ip_reserved:
+ gateway: +1
+ l2_network_device: +1
+ default_cfg01.mk22-lab-basic.local: +100
+ default_ctl01.mk22-lab-basic.local: +101
+ default_ctl02.mk22-lab-basic.local: +102
+ default_ctl03.mk22-lab-basic.local: +103
+ default_prx01.mk22-lab-basic.local: +121
+ default_cmp01.mk22-lab-basic.local: +105
+ ip_ranges:
+ dhcp: [+100, -2]
+
+ private-pool01:
+ net: 192.168.10.0/24:24
+ params:
+ ip_reserved:
+ gateway: +1
+ l2_network_device: +1
+ default_cfg01.mk22-lab-basic.local: +100
+ default_ctl01.mk22-lab-basic.local: +101
+ default_ctl02.mk22-lab-basic.local: +102
+ default_ctl03.mk22-lab-basic.local: +103
+ default_prx01.mk22-lab-basic.local: +121
+ default_cmp01.mk22-lab-basic.local: +105
+ ip_ranges:
+ dhcp: [+100, -2]
+
+ groups:
+ - name: default
+ driver:
+ name: devops.driver.libvirt
+ params:
+ connection_string: !os_env CONNECTION_STRING, qemu:///system
+ storage_pool_name: !os_env STORAGE_POOL_NAME, default
+ stp: False
+ hpet: False
+ enable_acpi: true
+ use_host_cpu: !os_env DRIVER_USE_HOST_CPU, true
+
+ network_pools:
+ public: public-pool01
+ private: private-pool01
+
+ l2_network_devices:
+ public:
+ address_pool: public-pool01
+ dhcp: true
+# forward:
+# mode: nat
+
+ private:
+ address_pool: private-pool01
+ dhcp: true
+ forward:
+ mode: nat
+
+ group_volumes:
+ - name: cloudimage1404 # This name is used for 'backing_store' option for node volumes.
+ source_image: !os_env IMAGE_PATH1404 # https://cloud-images.ubuntu.com/trusty/current/trusty-server-cloudimg-amd64-disk1.img or
+ # http://apt.tcpcloud.eu/images/ubuntu-14-04-x64-201608231134.qcow2
+ format: qcow2
+ - name: cloudimage1604 # This name is used for 'backing_store' option for node volumes.
+ source_image: !os_env IMAGE_PATH1604 # https://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-amd64-disk1.img or
+ # http://apt.tcpcloud.eu/images/ubuntu-16-04-x64-201608231004.qcow2
+ format: qcow2
+
+ nodes:
+ - name: cfg01.mk22-lab-basic.local
+ role: salt_master
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 4
+ memory: !os_env SLAVE_NODE_MEMORY, 8192
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: cloudimage1604
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: !include mk22-lab-basic--meta-data.yaml
+ cloudinit_user_data: !include mk22-lab-basic--user-data-cfg01.yaml
+
+ interfaces:
+ - label: ens3
+ l2_network_device: private
+ interface_model: *interface_model
+ - label: ens4
+ l2_network_device: public
+ interface_model: *interface_model
+ network_config:
+ ens3:
+ networks:
+ - private
+ ens4:
+ networks:
+ - public
+
+ - name: ctl01.mk22-lab-basic.local
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 4
+ memory: !os_env SLAVE_NODE_MEMORY, 12400
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: eth0
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: cloudimage1404
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: !include mk22-lab-basic--meta-data.yaml
+ cloudinit_user_data: !include mk22-lab-basic--user-data.yaml
+
+ interfaces: &interfaces
+ - label: eth0
+ l2_network_device: private
+ interface_model: *interface_model
+ - label: eth1
+ l2_network_device: public
+ interface_model: *interface_model
+ network_config: &network_config
+ eth0:
+ networks:
+ - private
+ eth1:
+ networks:
+ - public
+
+ - name: ctl02.mk22-lab-basic.local
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 4
+ memory: !os_env SLAVE_NODE_MEMORY, 12400
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: eth0
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: cloudimage1404
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: !include mk22-lab-basic--meta-data.yaml
+ cloudinit_user_data: !include mk22-lab-basic--user-data.yaml
+
+ interfaces: *interfaces
+ network_config: *network_config
+
+ - name: ctl03.mk22-lab-basic.local
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 4
+ memory: !os_env SLAVE_NODE_MEMORY, 12400
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: eth0
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: cloudimage1404
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: !include mk22-lab-basic--meta-data.yaml
+ cloudinit_user_data: !include mk22-lab-basic--user-data.yaml
+
+ interfaces: *interfaces
+ network_config: *network_config
+
+ - name: prx01.mk22-lab-basic.local
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 1
+ memory: !os_env SLAVE_NODE_MEMORY, 2048
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: eth0
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: cloudimage1404
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: !include mk22-lab-basic--meta-data.yaml
+ cloudinit_user_data: !include mk22-lab-basic--user-data.yaml
+
+ interfaces: *interfaces
+ network_config: *network_config
+
+ - name: cmp01.mk22-lab-basic.local
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 4
+ memory: !os_env SLAVE_NODE_MEMORY, 8192
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: eth0
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: cloudimage1404
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: !include mk22-lab-basic--meta-data.yaml
+ cloudinit_user_data: !include mk22-lab-basic--user-data.yaml
+
+ interfaces: *interfaces
+ network_config: *network_config