Merge "Add gitfs support"
diff --git a/README.rst b/README.rst
index d18f053..e305eae 100644
--- a/README.rst
+++ b/README.rst
@@ -410,6 +410,16 @@
 
 .. code-block:: yaml
 
+    _param:
+      private-ipv4: &private-ipv4
+      - id: private-ipv4
+        type: ipv4
+        link: ens2
+        netmask: 255.255.255.0
+        routes:
+        - gateway: 192.168.0.1
+          netmask: 0.0.0.0
+          network: 0.0.0.0
     virt:
       disk:
         three_disks:
@@ -456,6 +466,24 @@
             engine: virt
             #Option to set rng globaly
             rng: false
+            cloud_init:
+              user_data:
+                disable_ec2_metadata: true
+                resize_rootfs: True
+                timezone: UTC
+                ssh_deletekeys: True
+                ssh_genkeytypes: ['rsa', 'dsa', 'ecdsa']
+                ssh_svcname: ssh
+                locale: en_US.UTF-8
+                disable_root: true
+                apt_preserve_sources_list: false
+                apt:
+                  sources_list: ""
+                  sources:
+                    ubuntu.list:
+                      source: ${linux:system:repo:ubuntu:source}
+                    mcp_saltstack.list:
+                      source: ${linux:system:repo:mcp_saltstack:source}
             node:
               ubuntu1:
                 provider: node01.domain.com
@@ -469,9 +497,23 @@
                   rate:
                     period: '1800'
                     bytes: '1500'
+                # Custom per-node loader definition (e.g. for AArch64 UEFI)
+                loader:
+                  readonly: yes
+                  type: pflash
+                  path: /usr/share/AAVMF/AAVMF_CODE.fd
+                machine: virt-2.11  # Custom per-node virt machine type
+                cpu_mode: host-passthrough
                 mac:
                   nic01: AC:DE:48:AA:AA:AA
                   nic02: AC:DE:48:AA:AA:BB
+                # netconfig affects: hostname during boot
+                # manual interfaces configuration
+                cloud_init:
+                  network_data:
+                    networks:
+                    - <<: *private-ipv4
+                      ip_address: 192.168.0.161
 
 To enable Redis plugin for the Salt caching subsystem, use the
 below pillar structure:
diff --git a/_modules/cfgdrive.py b/_modules/cfgdrive.py
new file mode 100644
index 0000000..bc76b77
--- /dev/null
+++ b/_modules/cfgdrive.py
@@ -0,0 +1,114 @@
+# -*- coding: utf-8 -*-
+
+import json
+import logging
+import os
+import shutil
+import six
+import tempfile
+import yaml
+
+from oslo_utils import uuidutils
+from oslo_utils import fileutils
+from oslo_concurrency import processutils
+
+class ConfigDriveBuilder(object):
+    """Build config drives, optionally as a context manager."""
+
+    def __init__(self, image_file):
+        self.image_file = image_file
+        self.mdfiles=[]
+
+    def __enter__(self):
+        fileutils.delete_if_exists(self.image_file)
+        return self
+
+    def __exit__(self, exctype, excval, exctb):
+        self.make_drive()
+
+    def add_file(self, path, data):
+        self.mdfiles.append((path, data))
+
+    def _add_file(self, basedir, path, data):
+        filepath = os.path.join(basedir, path)
+        dirname = os.path.dirname(filepath)
+        fileutils.ensure_tree(dirname)
+        with open(filepath, 'wb') as f:
+            if isinstance(data, six.text_type):
+                data = data.encode('utf-8')
+            f.write(data)
+
+    def _write_md_files(self, basedir):
+        for data in self.mdfiles:
+            self._add_file(basedir, data[0], data[1])
+
+    def _make_iso9660(self, path, tmpdir):
+
+        processutils.execute('mkisofs',
+            '-o', path,
+            '-ldots',
+            '-allow-lowercase',
+            '-allow-multidot',
+            '-l',
+            '-V', 'config-2',
+            '-r',
+            '-J',
+            '-quiet',
+            tmpdir,
+            attempts=1,
+            run_as_root=False)
+
+    def make_drive(self):
+        """Make the config drive.
+        :raises ProcessExecuteError if a helper process has failed.
+        """
+        try:
+            tmpdir = tempfile.mkdtemp()
+            self._write_md_files(tmpdir)
+            self._make_iso9660(self.image_file, tmpdir)
+        finally:
+            shutil.rmtree(tmpdir)
+
+
+def generate(
+               dst,
+               hostname,
+               domainname,
+               instance_id=None,
+               user_data=None,
+               network_data=None,
+               saltconfig=None
+            ):
+
+    ''' Generate config drive
+
+    :param dst: destination file to place config drive.
+    :param hostname: hostname of Instance.
+    :param domainname: instance domain.
+    :param instance_id: UUID of the instance.
+    :param user_data: custom user data dictionary. type: json
+    :param network_data: custom network info dictionary. type: json
+    :param saltconfig: salt minion configuration. type: json
+
+    '''
+
+    instance_md              = {}
+    instance_md['uuid']      = instance_id or uuidutils.generate_uuid()
+    instance_md['hostname']  = '%s.%s' % (hostname, domainname)
+    instance_md['name']      = hostname
+
+    if user_data:
+      user_data = '#cloud-config\n\n' + yaml.dump(yaml.load(user_data), default_flow_style=False)
+      if saltconfig:
+        user_data += yaml.dump(yaml.load(str(saltconfig)), default_flow_style=False)
+
+    data = json.dumps(instance_md)
+
+    with ConfigDriveBuilder(dst) as cfgdrive:
+      cfgdrive.add_file('openstack/latest/meta_data.json', data)
+      if user_data:
+        cfgdrive.add_file('openstack/latest/user_data', user_data)
+      if network_data:
+         cfgdrive.add_file('openstack/latest/network_data.json', network_data)
+      cfgdrive.add_file('openstack/latest/vendor_data.json', '{}')
+      cfgdrive.add_file('openstack/latest/vendor_data2.json', '{}')
diff --git a/_modules/seedng.py b/_modules/seedng.py
index 1d93c5d..4d67e27 100644
--- a/_modules/seedng.py
+++ b/_modules/seedng.py
@@ -91,8 +91,16 @@
         __salt__['mount.umount'](mpt, util='qemu_nbd')
 
 
-def apply_(path, id_=None, config=None, approve_key=True, install=True,
-           prep_install=False, pub_key=None, priv_key=None, mount_point=None):
+def apply_(
+             path, id_=None,
+             config=None,
+             approve_key=True,
+             install=True,
+             prep_install=False,
+             pub_key=None,
+             priv_key=None,
+             mount_point=None
+          ):
     '''
     Seed a location (disk image, directory, or block device) with the
     minion config, approve the minion's key, and/or install salt-minion.
diff --git a/_modules/virtng.py b/_modules/virtng.py
index ce09508..b405d59 100644
--- a/_modules/virtng.py
+++ b/_modules/virtng.py
@@ -20,6 +20,7 @@
 
 # Import third party libs
 import yaml
+import json
 import jinja2
 import jinja2.exceptions
 import salt.ext.six as six
@@ -530,6 +531,9 @@
          disk='default',
          saltenv='base',
          rng=None,
+         loader=None,
+         machine=None,
+         cpu_mode=None,
          **kwargs):
     '''
     Initialize a new vm
@@ -558,6 +562,9 @@
             diskp[0][disk_name]['image'] = image
 
     # Create multiple disks, empty or from specified images.
+    cloud_init = None
+    cfg_drive  = None
+
     for disk in diskp:
         log.debug("Creating disk for VM [ {0} ]: {1}".format(name, disk))
 
@@ -618,13 +625,39 @@
                         raise CommandExecutionError('problem while copying image. {0} - {1}'.format(args['image'], e))
 
                     if kwargs.get('seed'):
-                        install = kwargs.get('install', True)
-                        seed_cmd = kwargs.get('seed_cmd', 'seedng.apply')
+                        seed_cmd   = kwargs.get('seed_cmd', 'seedng.apply')
+                        cloud_init = kwargs.get('cloud_init', None)
+                        master     = __salt__['config.option']('master')
+                        cfg_drive  = os.path.join(img_dir,'config-2.iso')
 
-                        __salt__[seed_cmd](img_dest,
-                                           id_=name,
-                                           config=kwargs.get('config'),
-                                           install=install)
+                        if cloud_init:
+                          _tmp         = name.split('.')
+
+                          try:
+                            user_data  = json.dumps(cloud_init["user_data"])
+                          except:
+                            user_data  = None
+
+                          try:
+                            network_data = json.dumps(cloud_init["network_data"])
+                          except:
+                            network_data = None
+
+                          __salt__["cfgdrive.generate"](
+                            dst          = cfg_drive,
+                            hostname     = _tmp.pop(0),
+                            domainname   = '.'.join(_tmp),
+                            user_data    = user_data,
+                            network_data = network_data,
+                            saltconfig   = { "salt_minion": { "conf": { "master": master, "id": name } } }
+                          )
+                        else:
+                          __salt__[seed_cmd](
+                            path      = img_dest,
+                            id_       = name,
+                            config    = kwargs.get('config'),
+                            install   = kwargs.get('install', True)
+                          )
                 else:
                     # Create empty disk
                     try:
@@ -649,6 +682,57 @@
 
     xml = _gen_xml(name, cpu, mem, diskp, nicp, hypervisor, **kwargs)
 
+    if cloud_init and cfg_drive:
+      xml_doc = minidom.parseString(xml)
+      iso_xml = xml_doc.createElement("disk")
+      iso_xml.setAttribute("type", "file")
+      iso_xml.setAttribute("device", "cdrom")
+      iso_xml.appendChild(xml_doc.createElement("readonly"))
+      driver = xml_doc.createElement("driver")
+      driver.setAttribute("name", "qemu")
+      driver.setAttribute("type", "raw")
+      target = xml_doc.createElement("target")
+      target.setAttribute("dev", "hdc")
+      target.setAttribute("bus", "ide")
+      source = xml_doc.createElement("source")
+      source.setAttribute("file", cfg_drive)
+      iso_xml.appendChild(driver)
+      iso_xml.appendChild(target)
+      iso_xml.appendChild(source)
+      xml_doc.getElementsByTagName("domain")[0].getElementsByTagName("devices")[0].appendChild(iso_xml)
+      xml = xml_doc.toxml()
+
+    # TODO: Remove this code and refactor module, when salt-common would have updated libvirt_domain.jinja template
+    if cpu_mode:
+        xml_doc = minidom.parseString(xml)
+        cpu_xml = xml_doc.createElement("cpu")
+        cpu_xml.setAttribute('mode', cpu_mode)
+        xml_doc.getElementsByTagName("domain")[0].appendChild(cpu_xml)
+        xml = xml_doc.toxml()
+
+    # TODO: Remove this code and refactor module, when salt-common would have updated libvirt_domain.jinja template
+    if machine:
+        xml_doc = minidom.parseString(xml)
+        os_xml = xml_doc.getElementsByTagName("domain")[0].getElementsByTagName("os")[0]
+        os_xml.getElementsByTagName("type")[0].setAttribute('machine', machine)
+        xml = xml_doc.toxml()
+
+    # TODO: Remove this code and refactor module, when salt-common would have updated libvirt_domain.jinja template
+    if loader and 'path' not in loader:
+        log.info('`path` is a required property of `loader`, and cannot be found. Skipping loader configuration')
+        loader = None
+    elif loader:
+        xml_doc = minidom.parseString(xml)
+        loader_xml = xml_doc.createElement("loader")
+        for key, val in loader.items():
+            if key == 'path':
+                continue
+            loader_xml.setAttribute(key, val)
+        loader_path_xml = xml_doc.createTextNode(loader['path'])
+        loader_xml.appendChild(loader_path_xml)
+        xml_doc.getElementsByTagName("domain")[0].getElementsByTagName("os")[0].appendChild(loader_xml)
+        xml = xml_doc.toxml()
+
     # TODO: Remove this code and refactor module, when salt-common would have updated libvirt_domain.jinja template
     for _nic in nicp:
         if _nic['virtualport']:
@@ -1552,7 +1636,11 @@
         salt '*' virtng.undefine <vm name>
     '''
     dom = _get_dom(vm_)
-    return dom.undefine() == 0
+    if getattr(libvirt, 'VIR_DOMAIN_UNDEFINE_NVRAM', False):
+        # This one is only in 1.2.8+
+        return dom.undefineFlags(libvirt.VIR_DOMAIN_UNDEFINE_NVRAM) == 0
+    else:
+        return dom.undefine() == 0
 
 
 def purge(vm_, dirs=False):
diff --git a/salt/control/virt.sls b/salt/control/virt.sls
index a2e56ff..731efeb 100644
--- a/salt/control/virt.sls
+++ b/salt/control/virt.sls
@@ -42,6 +42,9 @@
 {%- if node.provider == grains.id %}
 
 {%- set size = control.size.get(node.size) %}
+{%- set cluster_cloud_init = cluster.get('cloud_init', {}) %}
+{%- set node_cloud_init = node.get('cloud_init', {}) %}
+{%- set cloud_init = salt['grains.filter_by']({'default': cluster_cloud_init}, merge=node_cloud_init) %}
 
 salt_control_virt_{{ cluster_name }}_{{ node_name }}:
   module.run:
@@ -58,7 +61,19 @@
   {%- elif rng is defined %}
   - rng: {{ rng }}
   {%- endif %}
+  {%- if  node.loader is defined %}
+  - loader: {{  node.loader }}
+  {%- endif %}
+  {%- if  node.machine is defined %}
+  - machine: {{ node.machine }}
+  {%- endif %}
+  {%- if  node.cpu_mode is defined %}
+  - cpu_mode: {{ node.cpu_mode }}
+  {%- endif %}
   - kwargs:
+      {%- if cloud_init is defined %}
+      cloud_init: {{ cloud_init }}
+      {%- endif %}
       seed: True
       serial_type: pty
       console: True
diff --git a/tests/pillar/control_virt_custom.sls b/tests/pillar/control_virt_custom.sls
index 71cf37f..833b5a2 100644
--- a/tests/pillar/control_virt_custom.sls
+++ b/tests/pillar/control_virt_custom.sls
@@ -1,3 +1,13 @@
+_param:
+  private-ipv4: &private-ipv4
+  - id: private-ipv4
+    type: ipv4
+    link: ens2
+    netmask: 255.255.255.0
+    routes:
+    - gateway: 192.168.0.1
+      netmask: 0.0.0.0
+      network: 0.0.0.0
 virt:
   disk:
     three_disks:
@@ -57,21 +67,55 @@
         config:
           engine: salt
           host: master.domain.com
+        cloud_init:
+          user_data:
+            disable_ec2_metadata: true
+            resize_rootfs: True
+            timezone: UTC
+            ssh_deletekeys: True
+            ssh_genkeytypes: ['rsa', 'dsa', 'ecdsa']
+            ssh_svcname: ssh
+            locale: en_US.UTF-8
+            disable_root: true
+            apt_preserve_sources_list: false
+            apt:
+              sources_list: ""
+              sources:
+                ubuntu.list:
+                  source: ${linux:system:repo:ubuntu:source}
+                mcp_saltstack.list:
+                  source: ${linux:system:repo:mcp_saltstack:source}
+          network_data:
+            links:
+            - id: ens2
+              type: phy
+              name: ens2
         node:
           ubuntu1:
             provider: node01.domain.com
             image: ubuntu.qcow
             size: medium
             img_dest: /var/lib/libvirt/ssdimages
+            machine: virt-2.11
+            cpu_mode: host-passthrough
           ubuntu2:
             provider: node02.domain.com
             image: bubuntu.qcomw
             size: small
             img_dest: /var/lib/libvirt/hddimages
+            loader:
+              readonly: yes
+              type: pflash
+              path: /usr/share/AAVMF/AAVMF_CODE.fd
           ubuntu3:
             provider: node03.domain.com
             image: meowbuntu.qcom2
             size: medium_three_disks
+            cloud_init:
+              network_data:
+                networks:
+                - <<: *private-ipv4
+                  ip_address: 192.168.0.161
             rng:
               backend: /dev/urandom
               model: random