blob: 4c55e2281da2d77e7ce993f6ece46adb3ac311ff [file] [log] [blame]
smolaon1fb381d2016-03-09 11:10:58 +01001# -*- coding: utf-8 -*-
2'''
3Work with virtual machines managed by libvirt
4
5:depends: libvirt Python module
6'''
7# Special Thanks to Michael Dehann, many of the concepts, and a few structures
8# of his in the virt func module have been used
9
10# Import python libs
11from __future__ import absolute_import
12import os
13import re
14import sys
15import shutil
16import subprocess
17import string # pylint: disable=deprecated-module
18import logging
19
20# Import third party libs
21import yaml
22import jinja2
23import jinja2.exceptions
24import salt.ext.six as six
25from salt.ext.six.moves import StringIO as _StringIO # pylint: disable=import-error
26from xml.dom import minidom
27try:
28 import libvirt # pylint: disable=import-error
29 HAS_ALL_IMPORTS = True
30except ImportError:
31 HAS_ALL_IMPORTS = False
32
33# Import salt libs
34import salt.utils
35import salt.utils.files
36import salt.utils.templates
37import salt.utils.validate.net
38from salt.exceptions import CommandExecutionError, SaltInvocationError
39
40log = logging.getLogger(__name__)
41
42# Set up template environment
43JINJA = jinja2.Environment(
44 loader=jinja2.FileSystemLoader(
45 os.path.join(salt.utils.templates.TEMPLATE_DIRNAME, 'virt')
46 )
47)
48
49VIRT_STATE_NAME_MAP = {0: 'running',
50 1: 'running',
51 2: 'running',
52 3: 'paused',
53 4: 'shutdown',
54 5: 'shutdown',
55 6: 'crashed'}
56
57VIRT_DEFAULT_HYPER = 'kvm'
58
59
60def __virtual__():
61 if not HAS_ALL_IMPORTS:
62 return False
63 return 'virt'
64
65
66def __get_conn():
67 '''
68 Detects what type of dom this node is and attempts to connect to the
69 correct hypervisor via libvirt.
70 '''
71 # This has only been tested on kvm and xen, it needs to be expanded to
72 # support all vm layers supported by libvirt
73
74 def __esxi_uri():
75 '''
76 Connect to an ESXi host with a configuration like so:
77
78 .. code-block:: yaml
79
80 libvirt:
81 hypervisor: esxi
82 connection: esx01
83
84 The connection setting can either be an explicit libvirt URI,
85 or a libvirt URI alias as in this example. No, it cannot be
86 just a hostname.
87
88
89 Example libvirt `/etc/libvirt/libvirt.conf`:
90
91 .. code-block::
92
93 uri_aliases = [
94 "esx01=esx://10.1.1.101/?no_verify=1&auto_answer=1",
95 "esx02=esx://10.1.1.102/?no_verify=1&auto_answer=1",
96 ]
97
98 Reference:
99
100 - http://libvirt.org/drvesx.html#uriformat
101 - http://libvirt.org/uri.html#URI_config
102 '''
103 connection = __salt__['config.get']('libvirt:connection', 'esx')
104 return connection
105
106 def __esxi_auth():
107 '''
108 We rely on that the credentials is provided to libvirt through
109 its built in mechanisms.
110
111 Example libvirt `/etc/libvirt/auth.conf`:
112
113 .. code-block::
114
115 [credentials-myvirt]
116 username=user
117 password=secret
118
119 [auth-esx-10.1.1.101]
120 credentials=myvirt
121
122 [auth-esx-10.1.1.102]
123 credentials=myvirt
124
125 Reference:
126
127 - http://libvirt.org/auth.html#Auth_client_config
128 '''
129 return [[libvirt.VIR_CRED_EXTERNAL], lambda: 0, None]
130
131 if 'virt.connect' in __opts__:
132 conn_str = __opts__['virt.connect']
133 else:
134 conn_str = 'qemu:///system'
135
136 conn_func = {
137 'esxi': [libvirt.openAuth, [__esxi_uri(),
138 __esxi_auth(),
139 0]],
140 'qemu': [libvirt.open, [conn_str]],
141 }
142
143 hypervisor = __salt__['config.get']('libvirt:hypervisor', 'qemu')
144
145 try:
146 conn = conn_func[hypervisor][0](*conn_func[hypervisor][1])
147 except Exception:
148 raise CommandExecutionError(
149 'Sorry, {0} failed to open a connection to the hypervisor '
150 'software at {1}'.format(
151 __grains__['fqdn'],
152 conn_func[hypervisor][1][0]
153 )
154 )
155 return conn
156
157
158def _get_dom(vm_):
159 '''
160 Return a domain object for the named vm
161 '''
162 conn = __get_conn()
163 if vm_ not in list_vms():
164 raise CommandExecutionError('The specified vm is not present')
165 return conn.lookupByName(vm_)
166
167
168def _libvirt_creds():
169 '''
170 Returns the user and group that the disk images should be owned by
171 '''
172 g_cmd = 'grep ^\\s*group /etc/libvirt/qemu.conf'
173 u_cmd = 'grep ^\\s*user /etc/libvirt/qemu.conf'
174 try:
175 group = subprocess.Popen(g_cmd,
176 shell=True,
177 stdout=subprocess.PIPE).communicate()[0].split('"')[1]
178 except IndexError:
179 group = 'root'
180 try:
181 user = subprocess.Popen(u_cmd,
182 shell=True,
183 stdout=subprocess.PIPE).communicate()[0].split('"')[1]
184 except IndexError:
185 user = 'root'
186 return {'user': user, 'group': group}
187
188
189def _get_migrate_command():
190 '''
191 Returns the command shared by the different migration types
192 '''
193 if __salt__['config.option']('virt.tunnel'):
194 return ('virsh migrate --p2p --tunnelled --live --persistent '
195 '--undefinesource ')
196 return 'virsh migrate --live --persistent --undefinesource '
197
198
199def _get_target(target, ssh):
200 proto = 'qemu'
201 if ssh:
202 proto += '+ssh'
203 return ' {0}://{1}/{2}'.format(proto, target, 'system')
204
205
206def _gen_xml(name,
207 cpu,
208 mem,
209 diskp,
210 nicp,
211 hypervisor,
212 **kwargs):
213 '''
214 Generate the XML string to define a libvirt vm
215 '''
216 hypervisor = 'vmware' if hypervisor == 'esxi' else hypervisor
217 mem = mem * 1024 # MB
218 context = {
219 'hypervisor': hypervisor,
220 'name': name,
221 'cpu': str(cpu),
222 'mem': str(mem),
223 }
224 if hypervisor in ['qemu', 'kvm']:
225 context['controller_model'] = False
226 elif hypervisor in ['esxi', 'vmware']:
227 # TODO: make bus and model parameterized, this works for 64-bit Linux
228 context['controller_model'] = 'lsilogic'
229
230 if 'boot_dev' in kwargs:
231 context['boot_dev'] = []
232 for dev in kwargs['boot_dev'].split():
233 context['boot_dev'].append(dev)
234 else:
235 context['boot_dev'] = ['hd']
236
237 if 'serial_type' in kwargs:
238 context['serial_type'] = kwargs['serial_type']
239 if 'serial_type' in context and context['serial_type'] == 'tcp':
240 if 'telnet_port' in kwargs:
241 context['telnet_port'] = kwargs['telnet_port']
242 else:
243 context['telnet_port'] = 23023 # FIXME: use random unused port
244 if 'serial_type' in context:
245 if 'console' in kwargs:
246 context['console'] = kwargs['console']
247 else:
248 context['console'] = True
249
250 context['disks'] = {}
251 for i, disk in enumerate(diskp):
252 for disk_name, args in disk.items():
253 context['disks'][disk_name] = {}
254 fn_ = '{0}.{1}'.format(disk_name, args['format'])
255 context['disks'][disk_name]['file_name'] = fn_
256 context['disks'][disk_name]['source_file'] = os.path.join(args['pool'],
257 name,
258 fn_)
259 if hypervisor in ['qemu', 'kvm']:
260 context['disks'][disk_name]['target_dev'] = 'vd{0}'.format(string.ascii_lowercase[i])
261 context['disks'][disk_name]['address'] = False
262 context['disks'][disk_name]['driver'] = True
263 elif hypervisor in ['esxi', 'vmware']:
264 context['disks'][disk_name]['target_dev'] = 'sd{0}'.format(string.ascii_lowercase[i])
265 context['disks'][disk_name]['address'] = True
266 context['disks'][disk_name]['driver'] = False
267 context['disks'][disk_name]['disk_bus'] = args['model']
268 context['disks'][disk_name]['type'] = args['format']
269 context['disks'][disk_name]['index'] = str(i)
270
271 context['nics'] = nicp
272
273 fn_ = 'libvirt_domain.jinja'
274 try:
275 template = JINJA.get_template(fn_)
276 except jinja2.exceptions.TemplateNotFound:
277 log.error('Could not load template {0}'.format(fn_))
278 return ''
279
280 return template.render(**context)
281
282
283def _gen_vol_xml(vmname,
284 diskname,
285 size,
286 hypervisor,
287 **kwargs):
288 '''
289 Generate the XML string to define a libvirt storage volume
290 '''
291 size = int(size) * 1024 # MB
292 disk_info = _get_image_info(hypervisor, vmname, **kwargs)
293 context = {
294 'name': vmname,
295 'filename': '{0}.{1}'.format(diskname, disk_info['disktype']),
296 'volname': diskname,
297 'disktype': disk_info['disktype'],
298 'size': str(size),
299 'pool': disk_info['pool'],
300 }
301 fn_ = 'libvirt_volume.jinja'
302 try:
303 template = JINJA.get_template(fn_)
304 except jinja2.exceptions.TemplateNotFound:
305 log.error('Could not load template {0}'.format(fn_))
306 return ''
307 return template.render(**context)
308
309
310def _qemu_image_info(path):
311 '''
312 Detect information for the image at path
313 '''
314 ret = {}
315 out = __salt__['cmd.run']('qemu-img info {0}'.format(path))
316
317 match_map = {'size': r'virtual size: \w+ \((\d+) byte[s]?\)',
318 'format': r'file format: (\w+)'}
319
320 for info, search in match_map.items():
321 try:
322 ret[info] = re.search(search, out).group(1)
323 except AttributeError:
324 continue
325 return ret
326
327
328# TODO: this function is deprecated, should be replaced with
329# _qemu_image_info()
330def _image_type(vda):
331 '''
332 Detect what driver needs to be used for the given image
333 '''
334 out = __salt__['cmd.run']('qemu-img info {0}'.format(vda))
335 if 'file format: qcow2' in out:
336 return 'qcow2'
337 else:
338 return 'raw'
339
340
341# TODO: this function is deprecated, should be merged and replaced
342# with _disk_profile()
343def _get_image_info(hypervisor, name, **kwargs):
344 '''
345 Determine disk image info, such as filename, image format and
346 storage pool, based on which hypervisor is used
347 '''
348 ret = {}
349 if hypervisor in ['esxi', 'vmware']:
350 ret['disktype'] = 'vmdk'
351 ret['filename'] = '{0}{1}'.format(name, '.vmdk')
352 ret['pool'] = '[{0}] '.format(kwargs.get('pool', '0'))
353 elif hypervisor in ['kvm', 'qemu']:
354 ret['disktype'] = 'qcow2'
355 ret['filename'] = '{0}{1}'.format(name, '.qcow2')
356 ret['pool'] = __salt__['config.option']('virt.images')
357 return ret
358
359
360def _disk_profile(profile, hypervisor, **kwargs):
361 '''
362 Gather the disk profile from the config or apply the default based
363 on the active hypervisor
364
365 This is the ``default`` profile for KVM/QEMU, which can be
366 overridden in the configuration:
367
368 .. code-block:: yaml
369
370 virt:
371 disk:
372 default:
373 - system:
374 size: 8192
375 format: qcow2
376 model: virtio
377
378 The ``format`` and ``model`` parameters are optional, and will
379 default to whatever is best suitable for the active hypervisor.
380 '''
381 default = [
382 {'system':
383 {'size': '8192'}
384 }
385 ]
386 if hypervisor in ['esxi', 'vmware']:
387 overlay = {'format': 'vmdk',
388 'model': 'scsi',
389 'pool': '[{0}] '.format(kwargs.get('pool', '0'))
390 }
391 elif hypervisor in ['qemu', 'kvm']:
392 overlay = {'format': 'qcow2',
393 'model': 'virtio',
394 'pool': __salt__['config.option']('virt.images')
395 }
396 else:
397 overlay = {}
398
399 disklist = __salt__['config.get']('virt:disk', {}).get(profile, default)
400 for key, val in overlay.items():
401 for i, disks in enumerate(disklist):
402 for disk in disks:
403 if key not in disks[disk]:
404 disklist[i][disk][key] = val
405 return disklist
406
407
408def _nic_profile(profile_name, hypervisor, **kwargs):
409
410 default = [{'eth0': {}}]
411 vmware_overlay = {'type': 'bridge', 'source': 'DEFAULT', 'model': 'e1000'}
412 kvm_overlay = {'type': 'bridge', 'source': 'br0', 'model': 'virtio'}
413 overlays = {
414 'kvm': kvm_overlay,
415 'qemu': kvm_overlay,
416 'esxi': vmware_overlay,
417 'vmware': vmware_overlay,
418 }
419
420 # support old location
421 config_data = __salt__['config.option']('virt.nic', {}).get(
422 profile_name, None
423 )
424
425 if config_data is None:
426 config_data = __salt__['config.get']('virt:nic', {}).get(
427 profile_name, default
428 )
429
430 interfaces = []
431
432 def append_dict_profile_to_interface_list(profile_dict):
433 for interface_name, attributes in profile_dict.items():
434 attributes['name'] = interface_name
435 interfaces.append(attributes)
436
437 # old style dicts (top-level dicts)
438 #
439 # virt:
440 # nic:
441 # eth0:
442 # bridge: br0
443 # eth1:
444 # network: test_net
445 if isinstance(config_data, dict):
446 append_dict_profile_to_interface_list(config_data)
447
448 # new style lists (may contain dicts)
449 #
450 # virt:
451 # nic:
452 # - eth0:
453 # bridge: br0
454 # - eth1:
455 # network: test_net
456 #
457 # virt:
458 # nic:
459 # - name: eth0
460 # bridge: br0
461 # - name: eth1
462 # network: test_net
463 elif isinstance(config_data, list):
464 for interface in config_data:
465 if isinstance(interface, dict):
466 if len(interface) == 1:
467 append_dict_profile_to_interface_list(interface)
468 else:
469 interfaces.append(interface)
470
471 def _normalize_net_types(attributes):
472 '''
473 Guess which style of definition:
474
475 bridge: br0
476
477 or
478
479 network: net0
480
481 or
482
483 type: network
484 source: net0
485 '''
486 for type_ in ['bridge', 'network']:
487 if type_ in attributes:
488 attributes['type'] = type_
489 # we want to discard the original key
490 attributes['source'] = attributes.pop(type_)
491
492 attributes['type'] = attributes.get('type', None)
493 attributes['source'] = attributes.get('source', None)
494
495 def _apply_default_overlay(attributes):
496 for key, value in overlays[hypervisor].items():
497 if key not in attributes or not attributes[key]:
498 attributes[key] = value
499
500 def _assign_mac(attributes):
501 dmac = '{0}_mac'.format(attributes['name'])
502 if dmac in kwargs:
503 dmac = kwargs[dmac]
504 if salt.utils.validate.net.mac(dmac):
505 attributes['mac'] = dmac
506 else:
507 msg = 'Malformed MAC address: {0}'.format(dmac)
508 raise CommandExecutionError(msg)
509 else:
510 attributes['mac'] = salt.utils.gen_mac()
511
512 for interface in interfaces:
513 _normalize_net_types(interface)
514 _assign_mac(interface)
515 if hypervisor in overlays:
516 _apply_default_overlay(interface)
517
518 return interfaces
519
520
521def init(name,
522 cpu,
523 mem,
524 image=None,
525 nic='default',
526 hypervisor=VIRT_DEFAULT_HYPER,
527 start=True, # pylint: disable=redefined-outer-name
528 disk='default',
529 saltenv='base',
530 **kwargs):
531 '''
532 Initialize a new vm
533
534 CLI Example:
535
536 .. code-block:: bash
537
538 salt 'hypervisor' virt.init vm_name 4 512 salt://path/to/image.raw
539 salt 'hypervisor' virt.init vm_name 4 512 nic=profile disk=profile
540 '''
541 hypervisor = __salt__['config.get']('libvirt:hypervisor', hypervisor)
542
543 nicp = _nic_profile(nic, hypervisor, **kwargs)
544
545 diskp = None
546 seedable = False
547 if image: # with disk template image
548 # if image was used, assume only one disk, i.e. the
549 # 'default' disk profile
550 # TODO: make it possible to use disk profiles and use the
551 # template image as the system disk
552 #diskp = _disk_profile('default', hypervisor, **kwargs)
553 #new diskp TCP cloud
554 diskp = _disk_profile(disk, hypervisor, **kwargs)
555 # When using a disk profile extract the sole dict key of the first
556 # array element as the filename for disk
557 disk_name = next(diskp[0].iterkeys())
558 disk_type = diskp[0][disk_name]['format']
559 disk_file_name = '{0}.{1}'.format(disk_name, disk_type)
560 # disk size TCP cloud
561 disk_size = diskp[0][disk_name]['size']
562
563
564 if hypervisor in ['esxi', 'vmware']:
565 # TODO: we should be copying the image file onto the ESX host
566 raise SaltInvocationError('virt.init does not support image '
567 'template template in conjunction '
568 'with esxi hypervisor')
569 elif hypervisor in ['qemu', 'kvm']:
570 img_dir = __salt__['config.option']('virt.images')
571 img_dest = os.path.join(
572 img_dir,
573 name,
574 disk_file_name
575 )
576 img_dir = os.path.dirname(img_dest)
577 sfn = __salt__['cp.cache_file'](image, saltenv)
578 if not os.path.isdir(img_dir):
579 os.makedirs(img_dir)
580 try:
581 salt.utils.files.copyfile(sfn, img_dest)
582 mask = os.umask(0)
583 os.umask(mask)
584 # Apply umask and remove exec bit
585
586 # Resizing image TCP cloud
587 cmd = 'qemu-img resize ' + img_dest + ' ' + str(disk_size) + 'M'
588 subprocess.call(cmd, shell=True)
589
590 mode = (0o0777 ^ mask) & 0o0666
591 os.chmod(img_dest, mode)
592
593 except (IOError, OSError) as e:
594 raise CommandExecutionError('problem copying image. {0} - {1}'.format(image, e))
595
596 seedable = True
597 else:
598 log.error('unsupported hypervisor when handling disk image')
599
600 else:
601 # no disk template image specified, create disks based on disk profile
602 diskp = _disk_profile(disk, hypervisor, **kwargs)
603 if hypervisor in ['qemu', 'kvm']:
604 # TODO: we should be creating disks in the local filesystem with
605 # qemu-img
606 raise SaltInvocationError('virt.init does not support disk '
607 'profiles in conjunction with '
608 'qemu/kvm at this time, use image '
609 'template instead')
610 else:
611 # assume libvirt manages disks for us
612 for disk in diskp:
613 for disk_name, args in disk.items():
614 xml = _gen_vol_xml(name,
615 disk_name,
616 args['size'],
617 hypervisor)
618 define_vol_xml_str(xml)
619
620 xml = _gen_xml(name, cpu, mem, diskp, nicp, hypervisor, **kwargs)
621 define_xml_str(xml)
622
623 if kwargs.get('seed') and seedable:
624 install = kwargs.get('install', True)
625 seed_cmd = kwargs.get('seed_cmd', 'seed.apply')
626
627 __salt__[seed_cmd](img_dest,
628 id_=name,
629 config=kwargs.get('config'),
630 install=install)
631 if start:
632 create(name)
633
634 return True
635
636
637def list_vms():
638 '''
639 Return a list of virtual machine names on the minion
640
641 CLI Example:
642
643 .. code-block:: bash
644
645 salt '*' virt.list_vms
646 '''
647 vms = []
648 vms.extend(list_active_vms())
649 vms.extend(list_inactive_vms())
650 return vms
651
652
653def list_active_vms():
654 '''
655 Return a list of names for active virtual machine on the minion
656
657 CLI Example:
658
659 .. code-block:: bash
660
661 salt '*' virt.list_active_vms
662 '''
663 conn = __get_conn()
664 vms = []
665 for id_ in conn.listDomainsID():
666 vms.append(conn.lookupByID(id_).name())
667 return vms
668
669
670def list_inactive_vms():
671 '''
672 Return a list of names for inactive virtual machine on the minion
673
674 CLI Example:
675
676 .. code-block:: bash
677
678 salt '*' virt.list_inactive_vms
679 '''
680 conn = __get_conn()
681 vms = []
682 for id_ in conn.listDefinedDomains():
683 vms.append(id_)
684 return vms
685
686
687def vm_info(vm_=None):
688 '''
689 Return detailed information about the vms on this hyper in a
690 list of dicts:
691
692 .. code-block:: python
693
694 [
695 'your-vm': {
696 'cpu': <int>,
697 'maxMem': <int>,
698 'mem': <int>,
699 'state': '<state>',
700 'cputime' <int>
701 },
702 ...
703 ]
704
705 If you pass a VM name in as an argument then it will return info
706 for just the named VM, otherwise it will return all VMs.
707
708 CLI Example:
709
710 .. code-block:: bash
711
712 salt '*' virt.vm_info
713 '''
714 def _info(vm_):
715 dom = _get_dom(vm_)
716 raw = dom.info()
717 return {'cpu': raw[3],
718 'cputime': int(raw[4]),
719 'disks': get_disks(vm_),
720 'graphics': get_graphics(vm_),
721 'nics': get_nics(vm_),
722 'maxMem': int(raw[1]),
723 'mem': int(raw[2]),
724 'state': VIRT_STATE_NAME_MAP.get(raw[0], 'unknown')}
725 info = {}
726 if vm_:
727 info[vm_] = _info(vm_)
728 else:
729 for vm_ in list_vms():
730 info[vm_] = _info(vm_)
731 return info
732
733
734def vm_state(vm_=None):
735 '''
736 Return list of all the vms and their state.
737
738 If you pass a VM name in as an argument then it will return info
739 for just the named VM, otherwise it will return all VMs.
740
741 CLI Example:
742
743 .. code-block:: bash
744
745 salt '*' virt.vm_state <vm name>
746 '''
747 def _info(vm_):
748 state = ''
749 dom = _get_dom(vm_)
750 raw = dom.info()
751 state = VIRT_STATE_NAME_MAP.get(raw[0], 'unknown')
752 return state
753 info = {}
754 if vm_:
755 info[vm_] = _info(vm_)
756 else:
757 for vm_ in list_vms():
758 info[vm_] = _info(vm_)
759 return info
760
761
762def node_info():
763 '''
764 Return a dict with information about this node
765
766 CLI Example:
767
768 .. code-block:: bash
769
770 salt '*' virt.node_info
771 '''
772 conn = __get_conn()
773 raw = conn.getInfo()
774 info = {'cpucores': raw[6],
775 'cpumhz': raw[3],
776 'cpumodel': str(raw[0]),
777 'cpus': raw[2],
778 'cputhreads': raw[7],
779 'numanodes': raw[4],
780 'phymemory': raw[1],
781 'sockets': raw[5]}
782 return info
783
784
785def get_nics(vm_):
786 '''
787 Return info about the network interfaces of a named vm
788
789 CLI Example:
790
791 .. code-block:: bash
792
793 salt '*' virt.get_nics <vm name>
794 '''
795 nics = {}
796 doc = minidom.parse(_StringIO(get_xml(vm_)))
797 for node in doc.getElementsByTagName('devices'):
798 i_nodes = node.getElementsByTagName('interface')
799 for i_node in i_nodes:
800 nic = {}
801 nic['type'] = i_node.getAttribute('type')
802 for v_node in i_node.getElementsByTagName('*'):
803 if v_node.tagName == 'mac':
804 nic['mac'] = v_node.getAttribute('address')
805 if v_node.tagName == 'model':
806 nic['model'] = v_node.getAttribute('type')
807 if v_node.tagName == 'target':
808 nic['target'] = v_node.getAttribute('dev')
809 # driver, source, and match can all have optional attributes
810 if re.match('(driver|source|address)', v_node.tagName):
811 temp = {}
812 for key, value in v_node.attributes.items():
813 temp[key] = value
814 nic[str(v_node.tagName)] = temp
815 # virtualport needs to be handled separately, to pick up the
816 # type attribute of the virtualport itself
817 if v_node.tagName == 'virtualport':
818 temp = {}
819 temp['type'] = v_node.getAttribute('type')
820 for key, value in v_node.attributes.items():
821 temp[key] = value
822 nic['virtualport'] = temp
823 if 'mac' not in nic:
824 continue
825 nics[nic['mac']] = nic
826 return nics
827
828
829def get_macs(vm_):
830 '''
831 Return a list off MAC addresses from the named vm
832
833 CLI Example:
834
835 .. code-block:: bash
836
837 salt '*' virt.get_macs <vm name>
838 '''
839 macs = []
840 doc = minidom.parse(_StringIO(get_xml(vm_)))
841 for node in doc.getElementsByTagName('devices'):
842 i_nodes = node.getElementsByTagName('interface')
843 for i_node in i_nodes:
844 for v_node in i_node.getElementsByTagName('mac'):
845 macs.append(v_node.getAttribute('address'))
846 return macs
847
848
849def get_graphics(vm_):
850 '''
851 Returns the information on vnc for a given vm
852
853 CLI Example:
854
855 .. code-block:: bash
856
857 salt '*' virt.get_graphics <vm name>
858 '''
859 out = {'autoport': 'None',
860 'keymap': 'None',
861 'listen': 'None',
862 'port': 'None',
863 'type': 'vnc'}
864 xml = get_xml(vm_)
865 ssock = _StringIO(xml)
866 doc = minidom.parse(ssock)
867 for node in doc.getElementsByTagName('domain'):
868 g_nodes = node.getElementsByTagName('graphics')
869 for g_node in g_nodes:
870 for key, value in g_node.attributes.items():
871 out[key] = value
872 return out
873
874
875def get_disks(vm_):
876 '''
877 Return the disks of a named vm
878
879 CLI Example:
880
881 .. code-block:: bash
882
883 salt '*' virt.get_disks <vm name>
884 '''
885 disks = {}
886 doc = minidom.parse(_StringIO(get_xml(vm_)))
887 for elem in doc.getElementsByTagName('disk'):
888 sources = elem.getElementsByTagName('source')
889 targets = elem.getElementsByTagName('target')
890 if len(sources) > 0:
891 source = sources[0]
892 else:
893 continue
894 if len(targets) > 0:
895 target = targets[0]
896 else:
897 continue
898 if target.hasAttribute('dev'):
899 qemu_target = ''
900 if source.hasAttribute('file'):
901 qemu_target = source.getAttribute('file')
902 elif source.hasAttribute('dev'):
903 qemu_target = source.getAttribute('dev')
904 elif source.hasAttribute('protocol') and \
905 source.hasAttribute('name'): # For rbd network
906 qemu_target = '{0}:{1}'.format(
907 source.getAttribute('protocol'),
908 source.getAttribute('name'))
909 if qemu_target:
910 disks[target.getAttribute('dev')] = {
911 'file': qemu_target}
912 for dev in disks:
913 try:
914 hypervisor = __salt__['config.get']('libvirt:hypervisor', 'kvm')
915 if hypervisor not in ['qemu', 'kvm']:
916 break
917
918 output = []
919 qemu_output = subprocess.Popen(['qemu-img', 'info',
920 disks[dev]['file']],
921 shell=False,
922 stdout=subprocess.PIPE).communicate()[0]
923 snapshots = False
924 columns = None
925 lines = qemu_output.strip().split('\n')
926 for line in lines:
927 if line.startswith('Snapshot list:'):
928 snapshots = True
929 continue
930
931 # If this is a copy-on-write image, then the backing file
932 # represents the base image
933 #
934 # backing file: base.qcow2 (actual path: /var/shared/base.qcow2)
935 elif line.startswith('backing file'):
936 matches = re.match(r'.*\(actual path: (.*?)\)', line)
937 if matches:
938 output.append('backing file: {0}'.format(matches.group(1)))
939 continue
940
941 elif snapshots:
942 if line.startswith('ID'): # Do not parse table headers
943 line = line.replace('VM SIZE', 'VMSIZE')
944 line = line.replace('VM CLOCK', 'TIME VMCLOCK')
945 columns = re.split(r'\s+', line)
946 columns = [c.lower() for c in columns]
947 output.append('snapshots:')
948 continue
949 fields = re.split(r'\s+', line)
950 for i, field in enumerate(fields):
951 sep = ' '
952 if i == 0:
953 sep = '-'
954 output.append(
955 '{0} {1}: "{2}"'.format(
956 sep, columns[i], field
957 )
958 )
959 continue
960 output.append(line)
961 output = '\n'.join(output)
962 disks[dev].update(yaml.safe_load(output))
963 except TypeError:
964 disks[dev].update(yaml.safe_load('image: Does not exist'))
965 return disks
966
967
968def setmem(vm_, memory, config=False):
969 '''
970 Changes the amount of memory allocated to VM. The VM must be shutdown
971 for this to work.
972
973 memory is to be specified in MB
974 If config is True then we ask libvirt to modify the config as well
975
976 CLI Example:
977
978 .. code-block:: bash
979
980 salt '*' virt.setmem myvm 768
981 '''
982 if vm_state(vm_) != 'shutdown':
983 return False
984
985 dom = _get_dom(vm_)
986
987 # libvirt has a funny bitwise system for the flags in that the flag
988 # to affect the "current" setting is 0, which means that to set the
989 # current setting we have to call it a second time with just 0 set
990 flags = libvirt.VIR_DOMAIN_MEM_MAXIMUM
991 if config:
992 flags = flags | libvirt.VIR_DOMAIN_AFFECT_CONFIG
993
994 ret1 = dom.setMemoryFlags(memory * 1024, flags)
995 ret2 = dom.setMemoryFlags(memory * 1024, libvirt.VIR_DOMAIN_AFFECT_CURRENT)
996
997 # return True if both calls succeeded
998 return ret1 == ret2 == 0
999
1000
1001def setvcpus(vm_, vcpus, config=False):
1002 '''
1003 Changes the amount of vcpus allocated to VM. The VM must be shutdown
1004 for this to work.
1005
1006 vcpus is an int representing the number to be assigned
1007 If config is True then we ask libvirt to modify the config as well
1008
1009 CLI Example:
1010
1011 .. code-block:: bash
1012
1013 salt '*' virt.setvcpus myvm 2
1014 '''
1015 if vm_state(vm_) != 'shutdown':
1016 return False
1017
1018 dom = _get_dom(vm_)
1019
1020 # see notes in setmem
1021 flags = libvirt.VIR_DOMAIN_VCPU_MAXIMUM
1022 if config:
1023 flags = flags | libvirt.VIR_DOMAIN_AFFECT_CONFIG
1024
1025 ret1 = dom.setVcpusFlags(vcpus, flags)
1026 ret2 = dom.setVcpusFlags(vcpus, libvirt.VIR_DOMAIN_AFFECT_CURRENT)
1027
1028 return ret1 == ret2 == 0
1029
1030
1031def freemem():
1032 '''
1033 Return an int representing the amount of memory that has not been given
1034 to virtual machines on this node
1035
1036 CLI Example:
1037
1038 .. code-block:: bash
1039
1040 salt '*' virt.freemem
1041 '''
1042 conn = __get_conn()
1043 mem = conn.getInfo()[1]
1044 # Take off just enough to sustain the hypervisor
1045 mem -= 256
1046 for vm_ in list_vms():
1047 dom = _get_dom(vm_)
1048 if dom.ID() > 0:
1049 mem -= dom.info()[2] / 1024
1050 return mem
1051
1052
1053def freecpu():
1054 '''
1055 Return an int representing the number of unallocated cpus on this
1056 hypervisor
1057
1058 CLI Example:
1059
1060 .. code-block:: bash
1061
1062 salt '*' virt.freecpu
1063 '''
1064 conn = __get_conn()
1065 cpus = conn.getInfo()[2]
1066 for vm_ in list_vms():
1067 dom = _get_dom(vm_)
1068 if dom.ID() > 0:
1069 cpus -= dom.info()[3]
1070 return cpus
1071
1072
1073def full_info():
1074 '''
1075 Return the node_info, vm_info and freemem
1076
1077 CLI Example:
1078
1079 .. code-block:: bash
1080
1081 salt '*' virt.full_info
1082 '''
1083 return {'freecpu': freecpu(),
1084 'freemem': freemem(),
1085 'node_info': node_info(),
1086 'vm_info': vm_info()}
1087
1088
1089def get_xml(vm_):
1090 '''
1091 Returns the XML for a given vm
1092
1093 CLI Example:
1094
1095 .. code-block:: bash
1096
1097 salt '*' virt.get_xml <vm name>
1098 '''
1099 dom = _get_dom(vm_)
1100 return dom.XMLDesc(0)
1101
1102
1103def get_profiles(hypervisor=None):
1104 '''
1105 Return the virt profiles for hypervisor.
1106
1107 Currently there are profiles for:
1108
1109 - nic
1110 - disk
1111
1112 CLI Example:
1113
1114 .. code-block:: bash
1115
1116 salt '*' virt.get_profiles
1117 salt '*' virt.get_profiles hypervisor=esxi
1118 '''
1119 ret = {}
1120 if hypervisor:
1121 hypervisor = hypervisor
1122 else:
1123 hypervisor = __salt__['config.get']('libvirt:hypervisor', VIRT_DEFAULT_HYPER)
1124 virtconf = __salt__['config.get']('virt', {})
1125 for typ in ['disk', 'nic']:
1126 _func = getattr(sys.modules[__name__], '_{0}_profile'.format(typ))
1127 ret[typ] = {'default': _func('default', hypervisor)}
1128 if typ in virtconf:
1129 ret.setdefault(typ, {})
1130 for prf in virtconf[typ]:
1131 ret[typ][prf] = _func(prf, hypervisor)
1132 return ret
1133
1134
1135def shutdown(vm_):
1136 '''
1137 Send a soft shutdown signal to the named vm
1138
1139 CLI Example:
1140
1141 .. code-block:: bash
1142
1143 salt '*' virt.shutdown <vm name>
1144 '''
1145 dom = _get_dom(vm_)
1146 return dom.shutdown() == 0
1147
1148
1149def pause(vm_):
1150 '''
1151 Pause the named vm
1152
1153 CLI Example:
1154
1155 .. code-block:: bash
1156
1157 salt '*' virt.pause <vm name>
1158 '''
1159 dom = _get_dom(vm_)
1160 return dom.suspend() == 0
1161
1162
1163def resume(vm_):
1164 '''
1165 Resume the named vm
1166
1167 CLI Example:
1168
1169 .. code-block:: bash
1170
1171 salt '*' virt.resume <vm name>
1172 '''
1173 dom = _get_dom(vm_)
1174 return dom.resume() == 0
1175
1176
1177def create(vm_):
1178 '''
1179 Start a defined domain
1180
1181 CLI Example:
1182
1183 .. code-block:: bash
1184
1185 salt '*' virt.create <vm name>
1186 '''
1187 dom = _get_dom(vm_)
1188 return dom.create() == 0
1189
1190
1191def start(vm_):
1192 '''
1193 Alias for the obscurely named 'create' function
1194
1195 CLI Example:
1196
1197 .. code-block:: bash
1198
1199 salt '*' virt.start <vm name>
1200 '''
1201 return create(vm_)
1202
1203
1204def stop(vm_):
1205 '''
1206 Alias for the obscurely named 'destroy' function
1207
1208 CLI Example:
1209
1210 .. code-block:: bash
1211
1212 salt '*' virt.stop <vm name>
1213 '''
1214 return destroy(vm_)
1215
1216
1217def reboot(vm_):
1218 '''
1219 Reboot a domain via ACPI request
1220
1221 CLI Example:
1222
1223 .. code-block:: bash
1224
1225 salt '*' virt.reboot <vm name>
1226 '''
1227 dom = _get_dom(vm_)
1228
1229 # reboot has a few modes of operation, passing 0 in means the
1230 # hypervisor will pick the best method for rebooting
1231 return dom.reboot(0) == 0
1232
1233
1234def reset(vm_):
1235 '''
1236 Reset a VM by emulating the reset button on a physical machine
1237
1238 CLI Example:
1239
1240 .. code-block:: bash
1241
1242 salt '*' virt.reset <vm name>
1243 '''
1244 dom = _get_dom(vm_)
1245
1246 # reset takes a flag, like reboot, but it is not yet used
1247 # so we just pass in 0
1248 # see: http://libvirt.org/html/libvirt-libvirt.html#virDomainReset
1249 return dom.reset(0) == 0
1250
1251
1252def ctrl_alt_del(vm_):
1253 '''
1254 Sends CTRL+ALT+DEL to a VM
1255
1256 CLI Example:
1257
1258 .. code-block:: bash
1259
1260 salt '*' virt.ctrl_alt_del <vm name>
1261 '''
1262 dom = _get_dom(vm_)
1263 return dom.sendKey(0, 0, [29, 56, 111], 3, 0) == 0
1264
1265
1266def create_xml_str(xml):
1267 '''
1268 Start a domain based on the XML passed to the function
1269
1270 CLI Example:
1271
1272 .. code-block:: bash
1273
1274 salt '*' virt.create_xml_str <XML in string format>
1275 '''
1276 conn = __get_conn()
1277 return conn.createXML(xml, 0) is not None
1278
1279
1280def create_xml_path(path):
1281 '''
1282 Start a domain based on the XML-file path passed to the function
1283
1284 CLI Example:
1285
1286 .. code-block:: bash
1287
1288 salt '*' virt.create_xml_path <path to XML file on the node>
1289 '''
1290 if not os.path.isfile(path):
1291 return False
1292 return create_xml_str(salt.utils.fopen(path, 'r').read())
1293
1294
1295def define_xml_str(xml):
1296 '''
1297 Define a domain based on the XML passed to the function
1298
1299 CLI Example:
1300
1301 .. code-block:: bash
1302
1303 salt '*' virt.define_xml_str <XML in string format>
1304 '''
1305 conn = __get_conn()
1306 return conn.defineXML(xml) is not None
1307
1308
1309def define_xml_path(path):
1310 '''
1311 Define a domain based on the XML-file path passed to the function
1312
1313 CLI Example:
1314
1315 .. code-block:: bash
1316
1317 salt '*' virt.define_xml_path <path to XML file on the node>
1318
1319 '''
1320 if not os.path.isfile(path):
1321 return False
1322 return define_xml_str(salt.utils.fopen(path, 'r').read())
1323
1324
1325def define_vol_xml_str(xml):
1326 '''
1327 Define a volume based on the XML passed to the function
1328
1329 CLI Example:
1330
1331 .. code-block:: bash
1332
1333 salt '*' virt.define_vol_xml_str <XML in string format>
1334 '''
1335 poolname = __salt__['config.get']('libvirt:storagepool', 'default')
1336 conn = __get_conn()
1337 pool = conn.storagePoolLookupByName(str(poolname))
1338 return pool.createXML(xml, 0) is not None
1339
1340
1341def define_vol_xml_path(path):
1342 '''
1343 Define a volume based on the XML-file path passed to the function
1344
1345 CLI Example:
1346
1347 .. code-block:: bash
1348
1349 salt '*' virt.define_vol_xml_path <path to XML file on the node>
1350
1351 '''
1352 if not os.path.isfile(path):
1353 return False
1354 return define_vol_xml_str(salt.utils.fopen(path, 'r').read())
1355
1356
1357def migrate_non_shared(vm_, target, ssh=False):
1358 '''
1359 Attempt to execute non-shared storage "all" migration
1360
1361 CLI Example:
1362
1363 .. code-block:: bash
1364
1365 salt '*' virt.migrate_non_shared <vm name> <target hypervisor>
1366 '''
1367 cmd = _get_migrate_command() + ' --copy-storage-all ' + vm_\
1368 + _get_target(target, ssh)
1369
1370 return subprocess.Popen(cmd,
1371 shell=True,
1372 stdout=subprocess.PIPE).communicate()[0]
1373
1374
1375def migrate_non_shared_inc(vm_, target, ssh=False):
1376 '''
1377 Attempt to execute non-shared storage "all" migration
1378
1379 CLI Example:
1380
1381 .. code-block:: bash
1382
1383 salt '*' virt.migrate_non_shared_inc <vm name> <target hypervisor>
1384 '''
1385 cmd = _get_migrate_command() + ' --copy-storage-inc ' + vm_\
1386 + _get_target(target, ssh)
1387
1388 return subprocess.Popen(cmd,
1389 shell=True,
1390 stdout=subprocess.PIPE).communicate()[0]
1391
1392
1393def migrate(vm_, target, ssh=False):
1394 '''
1395 Shared storage migration
1396
1397 CLI Example:
1398
1399 .. code-block:: bash
1400
1401 salt '*' virt.migrate <vm name> <target hypervisor>
1402 '''
1403 cmd = _get_migrate_command() + ' ' + vm_\
1404 + _get_target(target, ssh)
1405
1406 return subprocess.Popen(cmd,
1407 shell=True,
1408 stdout=subprocess.PIPE).communicate()[0]
1409
1410
1411def seed_non_shared_migrate(disks, force=False):
1412 '''
1413 Non shared migration requires that the disks be present on the migration
1414 destination, pass the disks information via this function, to the
1415 migration destination before executing the migration.
1416
1417 CLI Example:
1418
1419 .. code-block:: bash
1420
1421 salt '*' virt.seed_non_shared_migrate <disks>
1422 '''
1423 for _, data in disks.items():
1424 fn_ = data['file']
1425 form = data['file format']
1426 size = data['virtual size'].split()[1][1:]
1427 if os.path.isfile(fn_) and not force:
1428 # the target exists, check to see if it is compatible
1429 pre = yaml.safe_load(subprocess.Popen('qemu-img info arch',
1430 shell=True,
1431 stdout=subprocess.PIPE).communicate()[0])
1432 if pre['file format'] != data['file format']\
1433 and pre['virtual size'] != data['virtual size']:
1434 return False
1435 if not os.path.isdir(os.path.dirname(fn_)):
1436 os.makedirs(os.path.dirname(fn_))
1437 if os.path.isfile(fn_):
1438 os.remove(fn_)
1439 cmd = 'qemu-img create -f ' + form + ' ' + fn_ + ' ' + size
1440 subprocess.call(cmd, shell=True)
1441 creds = _libvirt_creds()
1442 cmd = 'chown ' + creds['user'] + ':' + creds['group'] + ' ' + fn_
1443 subprocess.call(cmd, shell=True)
1444 return True
1445
1446
1447def set_autostart(vm_, state='on'):
1448 '''
1449 Set the autostart flag on a VM so that the VM will start with the host
1450 system on reboot.
1451
1452 CLI Example:
1453
1454 .. code-block:: bash
1455
1456 salt "*" virt.set_autostart <vm name> <on | off>
1457 '''
1458
1459 dom = _get_dom(vm_)
1460
1461 if state == 'on':
1462 return dom.setAutostart(1) == 0
1463
1464 elif state == 'off':
1465 return dom.setAutostart(0) == 0
1466
1467 else:
1468 # return False if state is set to something other then on or off
1469 return False
1470
1471
1472def destroy(vm_):
1473 '''
1474 Hard power down the virtual machine, this is equivalent to pulling the
1475 power
1476
1477 CLI Example:
1478
1479 .. code-block:: bash
1480
1481 salt '*' virt.destroy <vm name>
1482 '''
1483 dom = _get_dom(vm_)
1484 return dom.destroy() == 0
1485
1486
1487def undefine(vm_):
1488 '''
1489 Remove a defined vm, this does not purge the virtual machine image, and
1490 this only works if the vm is powered down
1491
1492 CLI Example:
1493
1494 .. code-block:: bash
1495
1496 salt '*' virt.undefine <vm name>
1497 '''
1498 dom = _get_dom(vm_)
1499 return dom.undefine() == 0
1500
1501
1502def purge(vm_, dirs=False):
1503 '''
1504 Recursively destroy and delete a virtual machine, pass True for dir's to
1505 also delete the directories containing the virtual machine disk images -
1506 USE WITH EXTREME CAUTION!
1507
1508 CLI Example:
1509
1510 .. code-block:: bash
1511
1512 salt '*' virt.purge <vm name>
1513 '''
1514 disks = get_disks(vm_)
1515 try:
1516 if not destroy(vm_):
1517 return False
1518 except libvirt.libvirtError:
1519 # This is thrown if the machine is already shut down
1520 pass
1521 directories = set()
1522 for disk in disks:
1523 os.remove(disks[disk]['file'])
1524 directories.add(os.path.dirname(disks[disk]['file']))
1525 if dirs:
1526 for dir_ in directories:
1527 shutil.rmtree(dir_)
1528 undefine(vm_)
1529 return True
1530
1531
1532def virt_type():
1533 '''
1534 Returns the virtual machine type as a string
1535
1536 CLI Example:
1537
1538 .. code-block:: bash
1539
1540 salt '*' virt.virt_type
1541 '''
1542 return __grains__['virtual']
1543
1544
1545def is_kvm_hyper():
1546 '''
1547 Returns a bool whether or not this node is a KVM hypervisor
1548
1549 CLI Example:
1550
1551 .. code-block:: bash
1552
1553 salt '*' virt.is_kvm_hyper
1554 '''
1555 try:
1556 if 'kvm_' not in salt.utils.fopen('/proc/modules').read():
1557 return False
1558 except IOError:
1559 # No /proc/modules? Are we on Windows? Or Solaris?
1560 return False
1561 return 'libvirtd' in __salt__['cmd.run'](__grains__['ps'])
1562
1563
1564def is_xen_hyper():
1565 '''
1566 Returns a bool whether or not this node is a XEN hypervisor
1567
1568 CLI Example:
1569
1570 .. code-block:: bash
1571
1572 salt '*' virt.is_xen_hyper
1573 '''
1574 try:
1575 if __grains__['virtual_subtype'] != 'Xen Dom0':
1576 return False
1577 except KeyError:
1578 # virtual_subtype isn't set everywhere.
1579 return False
1580 try:
1581 if 'xen_' not in salt.utils.fopen('/proc/modules').read():
1582 return False
1583 except IOError:
1584 # No /proc/modules? Are we on Windows? Or Solaris?
1585 return False
1586 return 'libvirtd' in __salt__['cmd.run'](__grains__['ps'])
1587
1588
1589def is_hyper():
1590 '''
1591 Returns a bool whether or not this node is a hypervisor of any kind
1592
1593 CLI Example:
1594
1595 .. code-block:: bash
1596
1597 salt '*' virt.is_hyper
1598 '''
1599 try:
1600 import libvirt # pylint: disable=import-error
1601 except ImportError:
1602 # not a usable hypervisor without libvirt module
1603 return False
1604 return is_xen_hyper() or is_kvm_hyper()
1605
1606
1607def vm_cputime(vm_=None):
1608 '''
1609 Return cputime used by the vms on this hyper in a
1610 list of dicts:
1611
1612 .. code-block:: python
1613
1614 [
1615 'your-vm': {
1616 'cputime' <int>
1617 'cputime_percent' <int>
1618 },
1619 ...
1620 ]
1621
1622 If you pass a VM name in as an argument then it will return info
1623 for just the named VM, otherwise it will return all VMs.
1624
1625 CLI Example:
1626
1627 .. code-block:: bash
1628
1629 salt '*' virt.vm_cputime
1630 '''
1631 host_cpus = __get_conn().getInfo()[2]
1632
1633 def _info(vm_):
1634 dom = _get_dom(vm_)
1635 raw = dom.info()
1636 vcpus = int(raw[3])
1637 cputime = int(raw[4])
1638 cputime_percent = 0
1639 if cputime:
1640 # Divide by vcpus to always return a number between 0 and 100
1641 cputime_percent = (1.0e-7 * cputime / host_cpus) / vcpus
1642 return {
1643 'cputime': int(raw[4]),
1644 'cputime_percent': int('{0:.0f}'.format(cputime_percent))
1645 }
1646 info = {}
1647 if vm_:
1648 info[vm_] = _info(vm_)
1649 else:
1650 for vm_ in list_vms():
1651 info[vm_] = _info(vm_)
1652 return info
1653
1654
1655def vm_netstats(vm_=None):
1656 '''
1657 Return combined network counters used by the vms on this hyper in a
1658 list of dicts:
1659
1660 .. code-block:: python
1661
1662 [
1663 'your-vm': {
1664 'rx_bytes' : 0,
1665 'rx_packets' : 0,
1666 'rx_errs' : 0,
1667 'rx_drop' : 0,
1668 'tx_bytes' : 0,
1669 'tx_packets' : 0,
1670 'tx_errs' : 0,
1671 'tx_drop' : 0
1672 },
1673 ...
1674 ]
1675
1676 If you pass a VM name in as an argument then it will return info
1677 for just the named VM, otherwise it will return all VMs.
1678
1679 CLI Example:
1680
1681 .. code-block:: bash
1682
1683 salt '*' virt.vm_netstats
1684 '''
1685 def _info(vm_):
1686 dom = _get_dom(vm_)
1687 nics = get_nics(vm_)
1688 ret = {
1689 'rx_bytes': 0,
1690 'rx_packets': 0,
1691 'rx_errs': 0,
1692 'rx_drop': 0,
1693 'tx_bytes': 0,
1694 'tx_packets': 0,
1695 'tx_errs': 0,
1696 'tx_drop': 0
1697 }
1698 for attrs in six.itervalues(nics):
1699 if 'target' in attrs:
1700 dev = attrs['target']
1701 stats = dom.interfaceStats(dev)
1702 ret['rx_bytes'] += stats[0]
1703 ret['rx_packets'] += stats[1]
1704 ret['rx_errs'] += stats[2]
1705 ret['rx_drop'] += stats[3]
1706 ret['tx_bytes'] += stats[4]
1707 ret['tx_packets'] += stats[5]
1708 ret['tx_errs'] += stats[6]
1709 ret['tx_drop'] += stats[7]
1710
1711 return ret
1712 info = {}
1713 if vm_:
1714 info[vm_] = _info(vm_)
1715 else:
1716 for vm_ in list_vms():
1717 info[vm_] = _info(vm_)
1718 return info
1719
1720
1721def vm_diskstats(vm_=None):
1722 '''
1723 Return disk usage counters used by the vms on this hyper in a
1724 list of dicts:
1725
1726 .. code-block:: python
1727
1728 [
1729 'your-vm': {
1730 'rd_req' : 0,
1731 'rd_bytes' : 0,
1732 'wr_req' : 0,
1733 'wr_bytes' : 0,
1734 'errs' : 0
1735 },
1736 ...
1737 ]
1738
1739 If you pass a VM name in as an argument then it will return info
1740 for just the named VM, otherwise it will return all VMs.
1741
1742 CLI Example:
1743
1744 .. code-block:: bash
1745
1746 salt '*' virt.vm_blockstats
1747 '''
1748 def get_disk_devs(vm_):
1749 doc = minidom.parse(_StringIO(get_xml(vm_)))
1750 disks = []
1751 for elem in doc.getElementsByTagName('disk'):
1752 targets = elem.getElementsByTagName('target')
1753 target = targets[0]
1754 disks.append(target.getAttribute('dev'))
1755 return disks
1756
1757 def _info(vm_):
1758 dom = _get_dom(vm_)
1759 # Do not use get_disks, since it uses qemu-img and is very slow
1760 # and unsuitable for any sort of real time statistics
1761 disks = get_disk_devs(vm_)
1762 ret = {'rd_req': 0,
1763 'rd_bytes': 0,
1764 'wr_req': 0,
1765 'wr_bytes': 0,
1766 'errs': 0
1767 }
1768 for disk in disks:
1769 stats = dom.blockStats(disk)
1770 ret['rd_req'] += stats[0]
1771 ret['rd_bytes'] += stats[1]
1772 ret['wr_req'] += stats[2]
1773 ret['wr_bytes'] += stats[3]
1774 ret['errs'] += stats[4]
1775
1776 return ret
1777 info = {}
1778 if vm_:
1779 info[vm_] = _info(vm_)
1780 else:
1781 # Can not run function blockStats on inactive VMs
1782 for vm_ in list_active_vms():
1783 info[vm_] = _info(vm_)
1784 return info