blob: 36247e6f80861a01d9d6b8a9ebfe2dbf4afe3cc7 [file] [log] [blame]
{%- from "baremetal_simulator/map.jinja" import baremetal_simulator,bs_nodes with context %}
{%- if baremetal_simulator.enabled %}
simulator_pkgs:
pkg.installed:
- names: {{ baremetal_simulator.pkgs }}
simulator_pip_pkgs:
pip.installed:
- names: {{ baremetal_simulator.pip_pkgs }}
- require:
- simulator_pkgs
{%- if baremetal_simulator.get('tor', {}).get('enabled', False) %}
openvswitch-vtep-pkg:
pkg.installed:
- name: openvswitch-vtep
openvswitch-vtep.init:
file.managed:
- name: /etc/init.d/openvswitch-vtep
- source: salt://baremetal_simulator/files/openvswitch/openvswitch-vtep.init
- require:
- openvswitch-vtep-pkg
openvswitch-vtep.default:
file.managed:
- name: /etc/default/openvswitch-vtep
- source: salt://baremetal_simulator/files/openvswitch/openvswitch-vtep.default
- require:
- openvswitch-vtep-pkg
openvswitch-vtep:
service.running:
- watch:
- file: /etc/init.d/openvswitch-vtep
- file: /etc/default/openvswitch-vtep
- require:
- openvswitch-vtep.init
- openvswitch-vtep.default
br_simulator_ps_present:
cmd.run:
- name: vtep-ctl add-ps br-simulator
- unless: vtep-ctl ps-exists br-simulator
br_simulator_vtep_tunnel:
cmd.run:
- name: vtep-ctl set Physical_Switch br-simulator tunnel_ips={{ baremetal_simulator.tor.tunnel_ips }}
- unless: vtep-ctl show br-baremetal | grep -q "tunnel_ips.*{{ baremetal_simulator.tor.tunnel_ips }}"
- require:
- br_simulator_ps_present
br_simulator_vtep_management:
cmd.run:
- name: vtep-ctl set Physical_Switch br-simulator management_ips={{ baremetal_simulator.tor.tunnel_ips }}
- unless: vtep-ctl show br-baremetal | grep -q "management_ips.*{{ baremetal_simulator.tor.tunnel_ips }}"
- require:
- br_simulator_ps_present
br_simulator_vtep:
cmd.run:
- name: /usr/share/openvswitch/scripts/ovs-vtep --log-file=/var/log/openvswitch/ovs-vtep.log --pidfile=/var/run/openvswitch/ovs-vtep.pid --detach br-simulator
- unless: ps xafu |grep -v grep |grep -q "ovs-vtep .* br-simulator"
- require:
- br_simulator_vtep_management
- br_simulator_vtep_tunnel
{%- endif %}
libvirt-bin:
service.running:
- watch:
- file: /etc/libvirt/qemu.conf
cgroup:
file.append:
- name: /etc/libvirt/qemu.conf
- source: salt://baremetal_simulator/files/qemu-cgroup.conf
default_pool:
cmd.run:
- name: virsh pool-define-as --name default dir --target /var/lib/libvirt/images && virsh pool-autostart default && virsh pool-start default
- unless: virsh pool-info default
/var/log/ironic-bm-logs/:
file.directory:
- makedirs: true
{%- for identity_name, nodes in bs_nodes.iteritems() %}
{%- for node in nodes %}
disk_create_node{{ loop.index }}:
cmd.run:
- name: virsh vol-create-as default {{ node.name }}.qcow2 --capacity {{ node.properties.local_gb }}G --format qcow2
- unless: test -f /var/lib/libvirt/images/{{ node.name }}.qcow2
vm_{{ node.name }}_present:
cmd.run:
- name: virsh undefine {{ node.name }} && sleep 1; virsh define /tmp/{{ node.name }}.xml
- onchanges:
- file: /tmp/{{ node.name }}.xml
/tmp/{{ node.name }}.xml:
file.managed:
- source: salt://baremetal_simulator/files/vm.xml
- template: jinja
- defaults:
node: {{ node }}
vbcm_add_{{ node.name }}:
cmd.run:
- name: vbmc add {{ node.name }} --port {{ node.driver_info.ipmi_port }}
- unless: vbmc show {{ node.name }}
vbmc_start_{{ node.name }}:
cmd.run:
- name: vbmc start {{ node.name }} > /dev/null 2>&1 && sleep 1
- unless: vbmc show {{ node.name }} | grep status |grep -q running
{%- for port in node.ports %}
{{ node.name }}_tap_device_{{ loop.index }}:
cmd.run:
- name: ip tuntap add dev tap-{{ node.name }}i{{ loop.index }} mode tap; ip link set dev tap-{{ node.name }}i{{ loop.index }} up
- unless: ip link show tap-{{ node.name }}i{{ loop.index }}
{{ node.name }}_tap{{ loop.index }}_in_ovs:
openvswitch_port.present:
- name: tap-{{ node.name }}i{{ loop.index }}
- bridge: br-simulator
{%- endfor %}
{%- endfor %}
{%- endfor %}
{%- endif %}