Merge "allow-multiple-ext-pillars-and-reclass-options"
diff --git a/.gitreview b/.gitreview
new file mode 100644
index 0000000..2924158
--- /dev/null
+++ b/.gitreview
@@ -0,0 +1,4 @@
+[gerrit]
+host=gerrit.mcp.mirantis.net
+port=29418
+project=salt-formulas/salt.git
diff --git a/.travis.yml b/.travis.yml
index 3cb3de2..eed1da2 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -17,17 +17,24 @@
   - bundle install
 
 env:
-    - PLATFORM=trevorj/salty-whales:trusty
-    - PLATFORM=trevorj/salty-whales:xenial
-
+  - PLATFORM=trevorj/salty-whales:trusty SUITE=minion-default
+  - PLATFORM=trevorj/salty-whales:xenial SUITE=minion-default
+  - PLATFORM=trevorj/salty-whales:trusty SUITE=master-default
+  - PLATFORM=trevorj/salty-whales:xenial SUITE=master-default
+  - PLATFORM=trevorj/salty-whales:trusty SUITE=minion-default
+  - PLATFORM=trevorj/salty-whales:xenial SUITE=minion-default
+  - PLATFORM=trevorj/salty-whales:trusty SUITE=control-default
+  - PLATFORM=trevorj/salty-whales:xenial SUITE=control-default
+  - PLATFORM=trevorj/salty-whales:trusty SUITE=minion-multi-master-failover
+  - PLATFORM=trevorj/salty-whales:xenial SUITE=minion-multi-master-failover
 
 before_script:
   - set -o pipefail
   - make test | tail
 
 script:
-  - test ! -e .kitchen.yml || bundle exec kitchen converge || true
-  - test ! -e .kitchen.yml || bundle exec kitchen verify -t tests/integration
+  - test ! -e .kitchen.yml || bundle exec kitchen converge ${SUITE} || true
+  - test ! -e .kitchen.yml || bundle exec kitchen verify ${SUITE} -t tests/integration
 
 notifications:
   webhooks:
diff --git a/README.rst b/README.rst
index e4317ee..d3a64b0 100644
--- a/README.rst
+++ b/README.rst
@@ -150,6 +150,32 @@
               host: 127.0.0.1
               port: 9999
 
+
+Salt engine definition for saltgraph metadata collector
+
+.. code-block:: yaml
+
+    salt:
+      master:
+        engine:
+          graph_metadata:
+            engine: saltgraph
+            host: 127.0.0.1
+            port: 5432
+            user: salt
+            password: salt
+            database: salt
+
+Salt engine definition for sending events from docker events
+
+.. code-block:: yaml
+
+    salt:
+      master:
+        engine:
+          docker_events:
+            docker_url: unix://var/run/docker.sock
+
 Salt master peer setup for remote certificate signing
 
 .. code-block:: yaml
@@ -195,7 +221,7 @@
 
     salt-call event.send 'salt/minion/install'
 
-Run any orchestration pipeline
+Run any defined orchestration pipeline
 
 .. code-block:: yaml
 
@@ -211,22 +237,50 @@
 
     salt-call event.send 'salt/orchestrate/start' "{'orchestrate': 'salt/orchestrate/infra_install.sls'}"
 
-Classify node after start
+Synchronise modules and pillars on minion start.
 
 .. code-block:: yaml
 
     salt:
       master:
         reactor:
-          reclass/minion/classify:
-          - salt://reclass/reactor/node_register.sls
+          'salt/minion/*/start':
+          - salt://salt/reactor/minion_start.sls
 
-Event to trigger the node classification
+Add and/or remove the minion key
+
+.. code-block:: yaml
+
+    salt:
+      master:
+        reactor:
+          salt/key/create:
+          - salt://salt/reactor/key_create.sls
+          salt/key/remove:
+          - salt://salt/reactor/key_remove.sls
+
+Event to trigger the key creation
 
 .. code-block:: bash
 
-    salt-call event.send 'reclass/minion/classify' "{'node_master_ip': '$config_host', 'node_ip': '${node_ip}', 'node_domain': '$node_domain', 'node_cluster': '$node_cluster', 'node_hostname': '$node_hostname', 'node_os': '$node_os'}"
+    salt-call event.send 'salt/key/create' \
+    > "{'node_id': 'id-of-minion', 'node_host': '172.16.10.100', 'orch_post_create': 'kubernetes.orchestrate.compute_install', 'post_create_pillar': {'node_name': 'id-of-minion'}}"
 
+.. note::
+
+    You can add pass additional `orch_pre_create`, `orch_post_create`,
+    `orch_pre_remove` or `orch_post_remove` parameters to the event to call
+    extra orchestrate files. This can be useful for example for
+    registering/unregistering nodes from the monitoring alarms or dashboards.
+
+    The key creation event needs to be run from other machine than the one
+    being registered.
+
+Event to trigger the key removal
+
+.. code-block:: bash
+
+    salt-call event.send 'salt/key/remove'
 
 
 Encrypted pillars
diff --git a/_engines/saltgraph.py b/_engines/saltgraph.py
new file mode 100644
index 0000000..0287449
--- /dev/null
+++ b/_engines/saltgraph.py
@@ -0,0 +1,217 @@
+# -*- coding: utf-8 -*-
+'''
+Saltgraph engine for catching returns of state runs, parsing them 
+and passing them to flat database of latest Salt resource runs.
+'''
+
+# Import python libs
+from __future__ import absolute_import
+import datetime
+import json
+import logging
+
+# Import salt libs
+import salt.utils.event
+
+# Import third party libs
+try:
+    import psycopg2
+    import psycopg2.extras
+    HAS_POSTGRES = True
+except ImportError:
+    HAS_POSTGRES = False
+
+__virtualname__ = 'saltgraph'
+log = logging.getLogger(__name__)
+
+
+def __virtual__():
+    if not HAS_POSTGRES:
+        return False, 'Could not import saltgraph engine. python-psycopg2 is not installed.'
+    return __virtualname__
+
+
+def _get_conn(options={}):
+    '''
+    Return a postgres connection.
+    '''
+    host = options.get('host', '127.0.0.1')
+    user = options.get('user', 'salt')
+    passwd = options.get('passwd', 'salt')
+    datab = options.get('db', 'salt')
+    port = options.get('port', 5432)
+
+    return psycopg2.connect(
+            host=host,
+            user=user,
+            password=passwd,
+            database=datab,
+            port=port)
+
+
+def _close_conn(conn):
+    '''
+    Close the Postgres connection
+    '''
+    conn.commit()
+    conn.close()
+
+
+def _get_lowstate_data(options={}):
+    '''
+    TODO: document this method
+    '''
+    conn = _get_conn(options)
+    cur = conn.cursor()
+
+    try:
+        # you can only do this on Salt Masters minion
+        lowstate_req = __salt__['saltutil.cmd']('*', 'state.show_lowstate', **{'timeout': 15, 'concurrent': True, 'queue': True})
+    except:
+        lowstate_req = {}
+
+    for minion, lowstate_ret in lowstate_req.items():
+        if lowstate_ret.get('retcode') != 0:
+            continue
+        for resource in lowstate_ret.get('ret', []):
+            low_sql = '''INSERT INTO salt_resources
+                         (id, resource_id, host, service, module, fun, status)
+                         VALUES (%s, %s, %s, %s, %s, %s, %s)
+                         ON CONFLICT (id) DO UPDATE
+                           SET resource_id = excluded.resource_id,
+                               host = excluded.host,
+                               service = excluded.service,
+                               module = excluded.module,
+                               fun = excluded.fun,
+                               alter_time = current_timestamp'''
+
+            rid = "%s|%s" % (minion, resource.get('__id__'))
+            cur.execute(
+                low_sql, (
+                    rid,
+                    resource.get('__id__'),
+                    minion,
+                    resource.get('__sls__'),
+                    resource.get('state') if 'state' in resource else resource.get('module'),
+                    resource.get('fun'),
+                    'unknown'
+                )
+            )
+            conn.commit()
+
+    if lowstate_req:
+        meta_sql = '''INSERT INTO salt_resources_meta
+                      (id, options)
+                      VALUES (%s, %s)
+                      ON CONFLICT (id) DO UPDATE
+                        SET options = excluded.options,
+                            alter_time = current_timestamp'''
+
+        cur.execute(
+            meta_sql, (
+                'lowstate_data',
+                '{}'
+            )
+        )
+    _close_conn(conn)
+
+
+def _up_to_date(options={}):
+    '''
+    TODO: document this method
+    '''
+    conn = _get_conn(options)
+    cur = conn.cursor()
+    #cur_dict = conn.cursor(cursor_factory=psycopg2.extras.DictCursor)
+
+    ret = False
+
+    # if lowstate data are older than 1 day, refresh them
+    cur.execute('SELECT alter_time FROM salt_resources_meta WHERE id = %s', ('lowstate_data',))
+    alter_time = cur.fetchone()
+
+    if alter_time:
+        now = datetime.datetime.utcnow()
+        day = datetime.timedelta(days=1)
+        time_diff = now - alter_time[0].replace(tzinfo=None)
+        if time_diff < day:
+            ret = True
+    else:
+        skip = False
+
+    _close_conn(conn)
+
+    return ret
+
+
+def _update_resources(event, options):
+    '''
+    TODO: document this method
+    '''
+    conn = _get_conn(options)
+    cur = conn.cursor()
+
+    cur.execute('SELECT id FROM salt_resources')
+    resources_db = [res[0] for res in cur.fetchall()]
+    resources = event.get('return', {}).values()
+
+    for resource in resources:
+        rid = '%s|%s' % (event.get('id'), resource.get('__id__'))
+        if rid in resources_db:
+            status = 'unknown'
+            if resource.get('result', None) is not None:
+                status = 'success' if resource.get('result') else 'failed'
+
+            resource_sql = '''UPDATE salt_resources SET (status, last_ret, alter_time) = (%s, %s, current_timestamp)
+                                WHERE id = %s'''
+
+            cur.execute(
+                resource_sql, (
+                    status,
+                    repr(resource),
+                    rid
+                )
+            )
+
+            conn.commit()
+
+    _close_conn(conn)
+
+
+def start(host='salt', user='salt', password='salt', database='salt', port=5432, **kwargs):
+    '''
+    Listen to events and parse Salt state returns
+    '''
+    if __opts__['__role'] == 'master':
+        event_bus = salt.utils.event.get_master_event(
+                __opts__,
+                __opts__['sock_dir'],
+                listen=True)
+    else:
+        event_bus = salt.utils.event.get_event(
+            'minion',
+            transport=__opts__['transport'],
+            opts=__opts__,
+            sock_dir=__opts__['sock_dir'],
+            listen=True)
+        log.debug('Saltgraph engine started')
+
+    while True:
+        event = event_bus.get_event()
+        supported_funcs = ['state.sls', 'state.apply', 'state.highstate']
+        if event and event.get('fun', None) in supported_funcs:
+            test = 'test=true' in [arg.lower() for arg in event.get('fun_args', [])]
+            if not test:
+                options = {
+                    'host': host,
+                    'user': user,
+                    'passwd': password,
+                    'db': database,
+                    'port': port
+                }
+                is_reclass = [arg for arg in event.get('fun_args', []) if arg.startswith('reclass')]
+                if is_reclass or not _up_to_date(options):
+                    _get_lowstate_data(options)
+
+                _update_resources(event, options)
+
diff --git a/_modules/saltkey.py b/_modules/saltkey.py
new file mode 100644
index 0000000..1645a66
--- /dev/null
+++ b/_modules/saltkey.py
@@ -0,0 +1,109 @@
+from __future__ import absolute_import
+
+# Import python libs
+import logging
+import os
+
+try:
+    import paramiko
+    HAS_PARAMIKO = True
+except:
+    HAS_PARAMIKO = False
+
+# Import Salt libs
+import salt.config
+import salt.wheel
+
+LOG = logging.getLogger(__name__)
+
+
+def __virtual__():
+    '''
+    Only load if paramiko library exist.
+    '''
+    if not HAS_PARAMIKO:
+        return (
+            False,
+            'Can not load module saltkey: paramiko library not found')
+    return True
+
+
+def key_create(id_, host, force=False):
+    '''
+    Generates minion keypair, accepts it on master and injects it to minion via SSH.
+
+    :param id_: expected minion ID of target node
+    :param host: IP address or resolvable hostname/FQDN of target node
+
+    CLI Examples:
+
+    .. code-block:: bash
+
+        salt-call saltkey.key_create <MINION_ID> <MINION_IP_ADDRESS> force=False
+    '''
+    ret = {
+        'retcode': 0,
+        'msg': 'Salt Key for minion %s is already accepted' % id_,
+    }
+
+    opts = salt.config.master_config('/etc/salt/master')
+    wheel = salt.wheel.WheelClient(opts)
+    keys = wheel.cmd('key.gen_accept', arg=[id_], kwarg={'force': force})
+    pub_key = keys.get('pub', None)
+    priv_key = keys.get('priv', None)
+
+    if pub_key and priv_key:
+        ssh = paramiko.SSHClient()
+        ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
+        # Establish SSH connection to minion
+        try:
+            ssh.connect(host)
+        except paramiko.ssh_exception.AuthenticationException:
+            msg = ('Could not establish SSH connection to minion "%s" on address %s, please ensure '
+                   'that current user\'s SSH key is present in minions authorized_keys.') % (id_, host)
+            LOG.error(msg)
+            ret['retcode'] = 1
+            ret['msg'] = msg
+            wheel.cmd_async({'fun': 'key.delete', 'match': id_})
+            return ret
+        except Exception as e:
+            msg = ('Unknown error occured while establishing SSH connection '
+                   'to minion "%s" on address %s: %s') % (id_, host, repr(e))
+            LOG.error(msg)
+            ret['retcode'] = 1
+            ret['msg'] = msg
+            wheel.cmd_async({'fun': 'key.delete', 'match': id_})
+            return ret
+        # Setup the keys on minion side the ugly way, nice one didn't work
+        key_path = '/etc/salt/pki/minion'
+        command = ('echo "%(pub_key)s" > %(pub_path)s && chmod 644 %(pub_path)s && '
+                   'echo "%(priv_key)s" > %(priv_path)s && chmod 400 %(priv_path)s && '
+                   'salt-call --local service.restart salt-minion') % {
+            'pub_path': os.path.join(key_path, 'minion.pub'),
+            'pub_key': pub_key,
+            'priv_path': os.path.join(key_path, 'minion.pem'),
+            'priv_key': priv_key
+        }
+
+        ssh_chan = ssh.get_transport().open_session()
+        ssh_chan.exec_command(command)
+        # Wait for command return
+        while True:
+            if ssh_chan.exit_status_ready():
+                exit_status = ssh_chan.recv_exit_status()
+                stderr = ssh_chan.recv_stderr(1000)
+                stdout = ssh_chan.recv(1000)
+                break
+        ssh.close()
+        # Evaluate SSH command exit status
+        if exit_status != 0:
+            msg = 'Keypair injection to Salt minion failed on target with following error: %s' % stderr
+            LOG.error(msg)
+            ret['retcode'] = exit_status
+            ret['msg'] = msg
+            return ret
+
+        ret['msg'] = 'Salt Key successfully created'
+
+    return ret
+
diff --git a/_modules/saltresource.py b/_modules/saltresource.py
index f9d0987..bf847d6 100644
--- a/_modules/saltresource.py
+++ b/_modules/saltresource.py
@@ -16,30 +16,30 @@
 except ImportError:
     HAS_POSTGRES = False
 
+__virtualname__ = 'saltresource'
 LOG = logging.getLogger(__name__)
 
 
 def __virtual__():
     if not HAS_POSTGRES:
         return False, 'Could not import saltresource module; psycopg2 is not installed.'
-    return 'saltresource'
+    return __virtualname__
 
 
 def _get_options(ret=None):
     '''
     Get the postgres options from salt.
     '''
-    attrs = {'host': 'host',
-             'user': 'user',
-             'passwd': 'passwd',
-             'db': 'db',
-             'port': 'port'}
+    defaults = {'host': '127.0.0.1',
+                'user': 'salt',
+                'passwd': 'salt',
+                'db': 'salt',
+                'port': '5432'}
 
-    _options = salt.returners.get_returner_options('returner.postgres_graph_db',
-                                                   ret,
-                                                   attrs,
-                                                   __salt__=__salt__,
-                                                   __opts__=__opts__)
+    _options = {}
+    for key, default in defaults.items():
+        _options[key] = __salt__['config.get']('%s.%s' % (__virtualname__, key), default)
+
     return _options
 
 
diff --git a/_modules/virtng.py b/_modules/virtng.py
index 2bf3766..0a87e56 100644
--- a/_modules/virtng.py
+++ b/_modules/virtng.py
@@ -375,6 +375,24 @@
                   format: qcow2
                   model: virtio
 
+    Example profile for KVM/QEMU with two disks, first is created
+    from specified image, the second is empty:
+
+    .. code-block:: yaml
+
+        virt:
+          disk:
+            two_disks:
+              - system:
+                  size: 8192
+                  format: qcow2
+                  model: virtio
+                  image: http://path/to/image.qcow2
+              - lvm:
+                  size: 32768
+                  format: qcow2
+                  model: virtio
+
     The ``format`` and ``model`` parameters are optional, and will
     default to whatever is best suitable for the active hypervisor.
     '''
@@ -538,96 +556,110 @@
         salt 'hypervisor' virt.init vm_name 4 512 salt://path/to/image.raw
         salt 'hypervisor' virt.init vm_name 4 512 nic=profile disk=profile
     '''
+
     hypervisor = __salt__['config.get']('libvirt:hypervisor', hypervisor)
 
     nicp = _nic_profile(nic, hypervisor, **kwargs)
 
-    diskp = None
-    seedable = False
-    if image:  # with disk template image
-        # if image was used, assume only one disk, i.e. the
-        # 'default' disk profile
-        # TODO: make it possible to use disk profiles and use the
-        # template image as the system disk
-        #diskp = _disk_profile('default', hypervisor, **kwargs)
-	#new diskp TCP cloud
-	diskp = _disk_profile(disk, hypervisor, **kwargs)
-        # When using a disk profile extract the sole dict key of the first
-        # array element as the filename for disk
+    diskp = _disk_profile(disk, hypervisor, **kwargs)
+
+    if image:
+        # Backward compatibility: if 'image' is specified in the VMs arguments
+        # instead of a disk arguments. In this case, 'image' will be assigned
+        # to the first disk for the VM.
         disk_name = next(diskp[0].iterkeys())
-        disk_type = diskp[0][disk_name]['format']
-        disk_file_name = '{0}.{1}'.format(disk_name, disk_type)
-	# disk size TCP cloud
-	disk_size = diskp[0][disk_name]['size']
+        if not diskp[0][disk_name].get('image', None):
+            diskp[0][disk_name]['image'] = image
 
+    # Create multiple disks, empty or from specified images.
+    for disk in diskp:
+        log.debug("Creating disk for VM [ {0} ]: {1}".format(name, disk))
 
-        if hypervisor in ['esxi', 'vmware']:
-            # TODO: we should be copying the image file onto the ESX host
-            raise SaltInvocationError('virt.init does not support image '
-                                      'template template in conjunction '
-                                      'with esxi hypervisor')
-        elif hypervisor in ['qemu', 'kvm']:
-            img_dir = __salt__['config.option']('virt.images')
-            img_dest = os.path.join(
-                img_dir,
-                name,
-                disk_file_name
-            )
-            img_dir = os.path.dirname(img_dest)
-            sfn = __salt__['cp.cache_file'](image, saltenv)
-            if not os.path.isdir(img_dir):
-                os.makedirs(img_dir)
-            try:
-                salt.utils.files.copyfile(sfn, img_dest)
-                mask = os.umask(0)
-                os.umask(mask)
-                # Apply umask and remove exec bit
+        for disk_name, args in disk.items():
 
-		# Resizing image TCP cloud
-		cmd = 'qemu-img resize ' + img_dest  + ' ' +  str(disk_size) + 'M'
- 	        subprocess.call(cmd, shell=True)
-		
-                mode = (0o0777 ^ mask) & 0o0666
-                os.chmod(img_dest, mode)
-
-            except (IOError, OSError) as e:
-                raise CommandExecutionError('problem copying image. {0} - {1}'.format(image, e))
-
-            seedable = True
-        else:
-            log.error('unsupported hypervisor when handling disk image')
-
-    else:
-        # no disk template image specified, create disks based on disk profile
-        diskp = _disk_profile(disk, hypervisor, **kwargs)
-        if hypervisor in ['qemu', 'kvm']:
-            # TODO: we should be creating disks in the local filesystem with
-            # qemu-img
-            raise SaltInvocationError('virt.init does not support disk '
-                                      'profiles in conjunction with '
-                                      'qemu/kvm at this time, use image '
-                                      'template instead')
-        else:
-            # assume libvirt manages disks for us
-            for disk in diskp:
-                for disk_name, args in disk.items():
+            if hypervisor in ['esxi', 'vmware']:
+                if 'image' in args:
+                    # TODO: we should be copying the image file onto the ESX host
+                    raise SaltInvocationError('virt.init does not support image '
+                                              'template template in conjunction '
+                                              'with esxi hypervisor')
+                else:
+                    # assume libvirt manages disks for us
                     xml = _gen_vol_xml(name,
                                        disk_name,
                                        args['size'],
                                        hypervisor)
                     define_vol_xml_str(xml)
 
+            elif hypervisor in ['qemu', 'kvm']:
+
+                disk_type = args['format']
+                disk_file_name = '{0}.{1}'.format(disk_name, disk_type)
+                # disk size TCP cloud
+                disk_size = args['size']
+
+                img_dir = __salt__['config.option']('virt.images')
+                img_dest = os.path.join(
+                    img_dir,
+                    name,
+                    disk_file_name
+                )
+                img_dir = os.path.dirname(img_dest)
+                if not os.path.isdir(img_dir):
+                    os.makedirs(img_dir)
+
+                if 'image' in args:
+                    # Create disk from specified image
+                    sfn = __salt__['cp.cache_file'](args['image'], saltenv)
+                    try:
+                        salt.utils.files.copyfile(sfn, img_dest)
+                        mask = os.umask(0)
+                        os.umask(mask)
+                        # Apply umask and remove exec bit
+
+                        # Resizing image TCP cloud
+                        cmd = 'qemu-img resize ' + img_dest  + ' ' +  str(disk_size) + 'M'
+                        subprocess.call(cmd, shell=True)
+
+                        mode = (0o0777 ^ mask) & 0o0666
+                        os.chmod(img_dest, mode)
+
+                    except (IOError, OSError) as e:
+                        raise CommandExecutionError('problem while copying image. {0} - {1}'.format(args['image'], e))
+
+                    if kwargs.get('seed'):
+                        install = kwargs.get('install', True)
+                        seed_cmd = kwargs.get('seed_cmd', 'seedng.apply')
+
+                        __salt__[seed_cmd](img_dest,
+                                           id_=name,
+                                           config=kwargs.get('config'),
+                                           install=install)
+                else:
+                    # Create empty disk
+                    try:
+                        mask = os.umask(0)
+                        os.umask(mask)
+                        # Apply umask and remove exec bit
+
+                        # Create empty image
+                        cmd = 'qemu-img create -f ' + disk_type + ' '  + img_dest  + ' ' +  str(disk_size) + 'M'
+                        subprocess.call(cmd, shell=True)
+
+                        mode = (0o0777 ^ mask) & 0o0666
+                        os.chmod(img_dest, mode)
+
+                    except (IOError, OSError) as e:
+                        raise CommandExecutionError('problem while creating volume {0} - {1}'.format(img_dest, e))
+
+            else:
+                # Unknown hypervisor
+                raise SaltInvocationError('Unsupported hypervisor when handling disk image: {0}'
+                                          .format(hypervisor))
+
     xml = _gen_xml(name, cpu, mem, diskp, nicp, hypervisor, **kwargs)
     define_xml_str(xml)
 
-    if kwargs.get('seed') and seedable:
-        install = kwargs.get('install', True)
-        seed_cmd = kwargs.get('seed_cmd', 'seedng.apply')
-
-        __salt__[seed_cmd](img_dest,
-                           id_=name,
-                           config=kwargs.get('config'),
-                           install=install)
     if start:
         create(name)
 
diff --git a/metadata/service/master/reactor/minion_start.yml b/metadata/service/master/reactor/minion_start.yml
new file mode 100644
index 0000000..cbe41d9
--- /dev/null
+++ b/metadata/service/master/reactor/minion_start.yml
@@ -0,0 +1,6 @@
+parameters:
+  salt:
+    master:
+      reactor:
+        'salt/minion/*/start':
+        - salt://salt/reactor/minion_start.sls
diff --git a/metadata/service/support.yml b/metadata/service/support.yml
index 9beb7a7..6472c6c 100644
--- a/metadata/service/support.yml
+++ b/metadata/service/support.yml
@@ -3,6 +3,10 @@
     _orchestrate:
       priority: 20
     _support:
+      telegraf:
+        enabled: true
+      prometheus:
+        enabled: true
       collectd:
         enabled: false
       heka:
diff --git a/salt/api.sls b/salt/api.sls
index 61e5687..2d7ac5a 100644
--- a/salt/api.sls
+++ b/salt/api.sls
@@ -15,6 +15,20 @@
   - watch_in:
     - service: salt_api_service
 
+{%- if api.get('ssl', {}).authority is defined %}
+
+{%- set cert_file = "/etc/ssl/certs/" + api.ssl.get('name', grains.id) + ".crt" %}
+{%- set ca_file = "/etc/ssl/certs/ca-" + api.ssl.authority + ".crt" %}
+
+salt_api_init_tls:
+  cmd.run:
+  - name: "cat {{ cert_file }} {{ ca_file }} > /etc/ssl/certs/{{ api.ssl.get('name', grains.id) }}-chain.crt"
+  - creates: /etc/ssl/certs/{{ api.ssl.get('name', grains.id) }}-chain.crt
+  - watch_in:
+    - service: salt_api_service
+
+{%- endif %}
+
 salt_api_service:
   service.running:
   - name: salt-api
@@ -23,4 +37,4 @@
   - watch:
     - file: /etc/salt/master.d/_api.conf
 
-{%- endif %}
+{%- endif %}
\ No newline at end of file
diff --git a/salt/files/_api.conf b/salt/files/_api.conf
index a1e2368..26856f8 100644
--- a/salt/files/_api.conf
+++ b/salt/files/_api.conf
@@ -9,8 +9,11 @@
   ssl_crt: /etc/letsencrypt/live/{{ api.ssl.name }}/cert.pem
   ssl_key: /etc/letsencrypt/live/{{ api.ssl.name }}/privkey.pem
   {%- elif api.ssl.engine == 'salt' %}
-  ssl_crt: /etc/ssl/certs/{{ system.name }}.{{ system.domain }}.crt
-  ssl_key: /etc/ssl/private/{{ system.name }}.{{ system.domain }}.key
+  ssl_crt: /etc/ssl/certs/{{ api.ssl.get('name', grains.id) }}.crt
+  ssl_key: /etc/ssl/private/{{ api.ssl.get('name', grains.id) }}.key
+  {%- if api.ssl.authority is defined %}
+  ssl_chain: /etc/ssl/certs/{{ api.ssl.get('name', grains.id) }}-chain.crt
+  {%- endif %}
   {%- else %}
   ssl_crt: {{ api.ssl.get('cert_file')|default("/etc/ssl/certs/"+grains.get('fqdn')+".crt") }}
   ssl_key: {{ api.ssl.get('key_file')|default("/etc/ssl/private/"+grains.get('fqdn')+".key") }}
@@ -25,3 +28,4 @@
 {#-
   vim: syntax=jinja
 -#}
+
diff --git a/salt/files/_engine.conf b/salt/files/_engine.conf
new file mode 100644
index 0000000..7d80849
--- /dev/null
+++ b/salt/files/_engine.conf
@@ -0,0 +1,8 @@
+{% from "salt/map.jinja" import master with context %}
+
+engines:
+{%- for engine_name, engine in master.engine.items() %}
+{%- set name = engine.get('engine', engine_name) %}
+{%- if engine.engine is defined %}{%- do engine.pop('engine') %}{% endif %}
+- {{ name }}: {{ engine|yaml }}
+{%- endfor %}
diff --git a/salt/map.jinja b/salt/map.jinja
index 8920fa6..ab40acf 100644
--- a/salt/map.jinja
+++ b/salt/map.jinja
@@ -95,9 +95,19 @@
 Debian:
   pkgs:
   - salt-minion
+  dependency_pkgs:
   - python-m2crypto
   - python-psutil
   - python-yaml
+  - python-msgpack
+  - python-oauth
+  dependency_pkgs_pip:
+  - PyYAML
+  - M2Crypto
+  - psutil
+  - oauth
+  cert_pkgs:
+  - ca-certificates
 Gentoo:
   pkgs:
   - app-admin/salt
@@ -107,9 +117,18 @@
 RedHat:
   pkgs:
   - salt-minion
+  dependency_pkgs:
   - m2crypto
   - psutils
   - PyYAML
+  - python-oauth
+  dependency_pkgs_pip:
+  - PyYAML
+  - M2Crypto
+  - psutil
+  - oauth
+  cert_pkgs:
+  - ca-certificates
 {%- endload %}
 
 {%- if pillar.salt.minion is defined %}
diff --git a/salt/master/env.sls b/salt/master/env.sls
index a61d249..4568e20 100644
--- a/salt/master/env.sls
+++ b/salt/master/env.sls
@@ -13,6 +13,7 @@
     - /srv/salt/env/{{ master.system.environment }}/_modules
     - /srv/salt/env/{{ master.system.environment }}/_states
     - /srv/salt/env/{{ master.system.environment }}/_grains
+    - /srv/salt/env/{{ master.system.environment }}/_engines
     - /srv/salt/env/{{ master.system.environment }}
   - makedirs: True
 
@@ -329,6 +330,17 @@
 
 {%- endfor %}
 
+{%- for engine_name, engine in formula.get('engine', {}).iteritems() %}
+
+salt_master_{{ environment_name }}_{{ engine_name }}_state:
+  file.symlink:
+  - name: /srv/salt/env/{{ environment_name }}/_engines/{{ engine_name }}
+  - target: /srv/salt/env/{{ environment_name }}/_formulas/{{ formula_name }}/_engines/{{ engine_name }}
+  - force: True
+  - makedirs: True
+
+{%- endfor %}
+
 {%- endif %}
 
 {%- endif %}
diff --git a/salt/master/service.sls b/salt/master/service.sls
index 5af786e..0c0ce8c 100644
--- a/salt/master/service.sls
+++ b/salt/master/service.sls
@@ -42,6 +42,20 @@
 
 {%- endif %}
 
+{%- if master.engine is defined %}
+
+/etc/salt/master.d/_engine.conf:
+  file.managed:
+  - source: salt://salt/files/_engine.conf
+  - user: root
+  - template: jinja
+  - require:
+    - {{ master.install_state }}
+  - watch_in:
+    - service: salt_master_service
+
+{%- endif %}
+
 {%- if master.peer is defined %}
 
 /etc/salt/master.d/_peer.conf:
diff --git a/salt/master/test.sls b/salt/master/test.sls
new file mode 100644
index 0000000..3f4d376
--- /dev/null
+++ b/salt/master/test.sls
@@ -0,0 +1,18 @@
+{%- from "salt/map.jinja" import master with context %}
+{%- if master.enabled %}
+
+salt_master_test_packages:
+  pkg.latest:
+  - names: {{ master.test_pkgs }}
+
+/etc/salt/roster:
+  file.managed:
+  - source: salt://salt/files/roster
+  - user: root
+  - template: jinja
+  - require:
+    - {{ master.install_state }}
+  - watch_in:
+    - service: salt_master_service
+
+{%- endif %}
\ No newline at end of file
diff --git a/salt/meta/prometheus.yml b/salt/meta/prometheus.yml
new file mode 100644
index 0000000..8575ac9
--- /dev/null
+++ b/salt/meta/prometheus.yml
@@ -0,0 +1,32 @@
+{%- if pillar.salt is defined %}
+{%- if pillar.salt.get('master', {}).get('enabled', False) or pillar.salt.get('minion', {}).get('enabled', False) %}
+server:
+  alert:
+  {%- if pillar.salt.get('master', {}).get('enabled', False)  %}
+    SaltMasterProcessDown:
+      if: >-
+        procstat_running{process_name="salt-master"} == 0
+      {%- raw %}
+      labels:
+        severity: warning
+        service: salt-master
+      annotations:
+        summary: 'Salt-master service is down'
+        description: 'Salt-master service is down on node {{ $labels.host }}'
+      {%- endraw %}
+  {%- endif %}
+  {%- if pillar.salt.get('minion', {}).get('enabled', False)  %}
+    SaltMinionProcessDown:
+      if: >-
+        procstat_running{process_name="salt-minion"} == 0
+      {%- raw %}
+      labels:
+        severity: warning
+        service: salt-minion
+      annotations:
+        summary: 'Salt-minion service is down'
+        description: 'Salt-minion service is down on node {{ $labels.host }}'
+      {%- endraw %}
+  {%- endif %}
+{%- endif %}
+{%- endif %}
diff --git a/salt/meta/sensu.yml b/salt/meta/sensu.yml
index ebf81c0..f5eeb82 100644
--- a/salt/meta/sensu.yml
+++ b/salt/meta/sensu.yml
@@ -1,12 +1,12 @@
 check:
   local_salt_master_proc:
-    command: "PATH=$PATH:/usr/lib64/nagios/plugins:/usr/lib/nagios/plugins check_procs -C salt-master -u root -c 1:50"
+    command: "PATH=$PATH:/usr/lib64/nagios/plugins:/usr/lib/nagios/plugins check_procs -a salt-master -u root -c 1:50"
     interval: 60
     occurrences: 1
     subscribers:
     - local-salt-master
   local_salt_minion_proc:
-    command: "PATH=$PATH:/usr/lib64/nagios/plugins:/usr/lib/nagios/plugins check_procs -C salt-minion -u root -c 1:10"
+    command: "PATH=$PATH:/usr/lib64/nagios/plugins:/usr/lib/nagios/plugins check_procs -a salt-minion -u root -c 1:10"
     interval: 60
     occurrences: 1
     subscribers:
diff --git a/salt/meta/telegraf.yml b/salt/meta/telegraf.yml
new file mode 100644
index 0000000..77fec3c
--- /dev/null
+++ b/salt/meta/telegraf.yml
@@ -0,0 +1,16 @@
+{%- if pillar.salt is defined %}
+agent:
+  input:
+  {%- if pillar.salt.get('master', {}).get('enabled', False) or pillar.salt.get('minion', {}).get('enabled', False) %}
+    procstat:
+      process:
+        {%- if pillar.salt.get('master', {}).get('enabled', False)  %}
+        salt-master:
+          pattern: salt-master
+        {%- endif %}
+        {%- if pillar.salt.get('minion', {}).get('enabled', False)  %}
+        salt-minion:
+          pattern: salt-minion
+        {%- endif %}
+  {%- endif %}
+{%- endif %}
diff --git a/salt/minion/cert.sls b/salt/minion/cert.sls
index c04215a..09ef9d1 100644
--- a/salt/minion/cert.sls
+++ b/salt/minion/cert.sls
@@ -171,13 +171,7 @@
 
 salt_ca_certificates_packages:
   pkg.installed:
-{%- if grains.os_family == 'Debian' %}
-    - name: ca-certificates
-{%- elif grains.os_family == 'RedHat' %}
-    - name: ca-certificates
-{%- else %}
-    - name: []
-{%- endif %}
+    - names: {{ minion.cert_pkgs }}
 
 salt_update_certificates:
   cmd.wait:
@@ -206,7 +200,6 @@
   x509.pem_managed:
     - name: {{ cacert_file }}
     - text: {{ ca_cert|replace('\n', '') }}
-    - makedirs: True
     - watch_in:
       - file: salt_trust_ca_{{ cacert_file }}_permissions
       - cmd: salt_update_certificates
diff --git a/salt/minion/grains.sls b/salt/minion/grains.sls
index df5205b..9920082 100644
--- a/salt/minion/grains.sls
+++ b/salt/minion/grains.sls
@@ -40,6 +40,8 @@
 salt_minion_grain_{{ service_name }}_{{ name }}_validity_check:
   cmd.wait:
     - name: python -c "import yaml; stream = file('/etc/salt/grains.d/{{ name }}', 'r'); yaml.load(stream); stream.close()"
+    - require:
+      - pkg: salt_minion_dependency_packages
     - watch:
       - file: salt_minion_grain_{{ service_name }}_{{ name }}
     - watch_in:
diff --git a/salt/minion/service.sls b/salt/minion/service.sls
index 7951b2f..36d8aff 100644
--- a/salt/minion/service.sls
+++ b/salt/minion/service.sls
@@ -10,12 +10,20 @@
   - version: {{ minion.source.version }}
   {%- endif %}
 
+salt_minion_dependency_packages:
+  pkg.installed:
+  - pkgs: {{ minion.dependency_pkgs }}
+
 {%- elif minion.source.get('engine', 'pkg') == 'pip' %}
 
 salt_minion_packages:
   pip.installed:
   - name: salt{% if minion.source.version is defined %}=={{ minion.source.version }}{% endif %}
 
+salt_minion_dependency_packages:
+  pkg.installed:
+  - pkgs: {{ minion.dependency_pkgs_pip }}
+
 {%- endif %}
 
 /etc/salt/minion.d/minion.conf:
@@ -57,6 +65,9 @@
   service.running:
     - name: {{ minion.service }}
     - enable: true
+    - require:
+      - pkg: salt_minion_packages
+      - pkg: salt_minion_dependency_packages
     {%- if grains.get('noservices') %}
     - onlyif: /bin/false
     {%- endif %}
@@ -81,5 +92,8 @@
     - name: 'saltutil.sync_all'
     - onchanges:
       - service: salt_minion_service
+    - require:
+      - pkg: salt_minion_packages
+      - pkg: salt_minion_dependency_packages
 
 {%- endif %}
diff --git a/salt/orchestrate/node_install.sls b/salt/orchestrate/node_install.sls
deleted file mode 100644
index c66b4e2..0000000
--- a/salt/orchestrate/node_install.sls
+++ /dev/null
@@ -1,23 +0,0 @@
-{%- set node_name = salt['pillar.get']('event_originator') %}
-
-linux_state:
-  salt.state:
-    - tgt: '{{ node_name }}'
-    - sls: linux
-    - queue: True
-
-salt_state:
-  salt.state:
-    - tgt: '{{ node_name }}'
-    - sls: salt.minion
-    - queue: True
-    - require:
-      - salt: linux_state
-
-misc_states:
-  salt.state:
-    - tgt: '{{ node_name }}'
-    - sls: ntp,openssh
-    - queue: True
-    - require:
-      - salt: salt_state
diff --git a/salt/orchestrate/infra_install.sls b/salt/orchestrate/reactor/infra_install.sls
similarity index 100%
rename from salt/orchestrate/infra_install.sls
rename to salt/orchestrate/reactor/infra_install.sls
diff --git a/salt/orchestrate/reactor/key_create.sls b/salt/orchestrate/reactor/key_create.sls
new file mode 100644
index 0000000..47a85aa
--- /dev/null
+++ b/salt/orchestrate/reactor/key_create.sls
@@ -0,0 +1,13 @@
+{%- set node_id = salt['pillar.get']('node_id') %}
+{%- set node_host = salt['pillar.get']('node_host') %}
+
+linux_state_all_nodes:
+  salt.state:
+    - tgt: 'salt:master'
+    - tgt_type: pillar
+    - sls: salt.reactor_sls.key_create
+    - queue: True
+    - pillar:
+        node_id: {{ node_id }}
+        node_host: {{ node_host }}
+
diff --git a/salt/orchestrate/reactor/key_remove.sls b/salt/orchestrate/reactor/key_remove.sls
new file mode 100644
index 0000000..9295e7f
--- /dev/null
+++ b/salt/orchestrate/reactor/key_remove.sls
@@ -0,0 +1,11 @@
+{%- set node_id = salt['pillar.get']('node_id') %}
+
+linux_state_all_nodes:
+  salt.state:
+    - tgt: 'salt:master'
+    - tgt_type: pillar
+    - sls: salt.reactor_sls.key_remove
+    - queue: True
+    - pillar:
+        node_id: {{ node_id }}
+
diff --git a/salt/orchestrate/master_update.sls b/salt/orchestrate/reactor/master_update.sls
similarity index 99%
rename from salt/orchestrate/master_update.sls
rename to salt/orchestrate/reactor/master_update.sls
index 89876b2..602f314 100644
--- a/salt/orchestrate/master_update.sls
+++ b/salt/orchestrate/reactor/master_update.sls
@@ -14,3 +14,4 @@
     - queue: True
     - require:
       - salt: salt_state_config_node
+
diff --git a/salt/orchestrate/reactor/node_install.sls b/salt/orchestrate/reactor/node_install.sls
new file mode 100644
index 0000000..426a050
--- /dev/null
+++ b/salt/orchestrate/reactor/node_install.sls
@@ -0,0 +1,24 @@
+{%- set node_name = salt['pillar.get']('event_originator') %}
+
+linux_state:
+  salt.state:
+  - tgt: '{{ node_name }}'
+  - sls: linux
+  - queue: True
+
+salt_state:
+  salt.state:
+  - tgt: '{{ node_name }}'
+  - sls: salt.minion
+  - queue: True
+  - require:
+    - salt: linux_state
+
+misc_states:
+  salt.state:
+  - tgt: '{{ node_name }}'
+  - sls: ntp,openssh
+  - queue: True
+  - require:
+    - salt: salt_state
+
diff --git a/salt/reactor/infra_install.sls b/salt/reactor/infra_install.sls
index 17e7d9c..7be16c4 100644
--- a/salt/reactor/infra_install.sls
+++ b/salt/reactor/infra_install.sls
@@ -1,5 +1,6 @@
 
 orchestrate_infra_install:
   runner.state.orchestrate:
-    - mods: salt://salt/orchestrate/infra_install.sls
-    - queue: True
+  - mods: salt.orchestrate.reactor.infra_install
+  - queue: True
+
diff --git a/salt/reactor/key_create.sls b/salt/reactor/key_create.sls
new file mode 100644
index 0000000..c4be57e
--- /dev/null
+++ b/salt/reactor/key_create.sls
@@ -0,0 +1,28 @@
+
+{% if data.data.orch_pre_create is defined %}
+
+orchestrate_node_key_pre_create:
+  runner.state.orchestrate:
+  - mods: {{ data.data.orch_pre_create }}
+  - queue: True
+  - pillar: {{ data.data.get('orch_pre_create_pillar', {}) }}
+
+{% endif %}
+
+node_key_create:
+  runner.state.orchestrate:
+  - mods: salt.orchestrate.reactor.key_create
+  - queue: True
+  - pillar:
+      node_id: {{ data.data['node_id'] }}
+      node_host: {{ data.data['node_host'] }}
+
+{% if data.data.orch_post_create is defined %}
+
+orchestrate_node_key_post_create:
+  runner.state.orchestrate:
+  - mods: {{ data.data.orch_post_create }}
+  - queue: True
+  - pillar: {{ data.data.get('orch_post_create_pillar', {}) }}
+
+{% endif %}
diff --git a/salt/reactor/key_remove.sls b/salt/reactor/key_remove.sls
new file mode 100644
index 0000000..8088476
--- /dev/null
+++ b/salt/reactor/key_remove.sls
@@ -0,0 +1,27 @@
+
+{% if data.data.orch_pre_remove is defined %}
+
+orchestrate_node_key_pre_remove:
+  runner.state.orchestrate:
+  - mods: {{ data.data.orch_pre_remove }}
+  - queue: True
+  - pillar: {{ data.data.get('orch_pre_remove_pillar', {}) }}
+
+{% endif %}
+
+node_key_remove:
+  runner.state.orchestrate:
+  - mods: salt.orchestrate.reactor.key_remove.sls
+  - queue: True
+  - pillar:
+      node_id: {{ data.data['node_id'] }}
+
+{% if data.data.orch_post_remove is defined %}
+
+orchestrate_node_key_post_remove:
+  runner.state.orchestrate:
+  - mods: {{ data.data.orch_post_remove }}
+  - queue: True
+  - pillar: {{ data.data.get('orch_post_remove_pillar', {}) }}
+
+{% endif %}
diff --git a/salt/reactor/minion_start.sls b/salt/reactor/minion_start.sls
new file mode 100644
index 0000000..f267bbf
--- /dev/null
+++ b/salt/reactor/minion_start.sls
@@ -0,0 +1,11 @@
+
+minion_sync_all:
+  local.saltutil.sync_all:
+  - tgt: {{ data.id }}
+  - queue: True
+
+minion_refresh_pillar:
+  local.saltutil.refresh_pillar:
+  - tgt: {{ data.id }}
+  - queue: True
+
diff --git a/salt/reactor/node_install.sls b/salt/reactor/node_install.sls
index 96e3c3b..0a71929 100644
--- a/salt/reactor/node_install.sls
+++ b/salt/reactor/node_install.sls
@@ -1,7 +1,8 @@
 
 orchestrate_node_install:
   runner.state.orchestrate:
-    - mods: salt://salt/orchestrate/node_install.sls
-    - queue: True
-    - pillar:
-        event_originator: {{ data.id }}
+  - mods: salt.reactor.orchestrate.node_install
+  - queue: True
+  - pillar:
+      event_originator: {{ data.id }}
+
diff --git a/salt/reactor/node_start.sls b/salt/reactor/node_start.sls
deleted file mode 100644
index 904822c..0000000
--- a/salt/reactor/node_start.sls
+++ /dev/null
@@ -1,8 +0,0 @@
-
-node_sync_all:
-  local.saltutil.sync_all:
-  - tgt: {{ data.id }}
-
-node_refresh_pillar:
-  local.saltutil.refresh_pillar:
-  - tgt: {{ data.id }}
diff --git a/salt/reactor/orchestrate_start.sls b/salt/reactor/orchestrate_start.sls
index 752dc55..40b4c5a 100644
--- a/salt/reactor/orchestrate_start.sls
+++ b/salt/reactor/orchestrate_start.sls
@@ -1,5 +1,6 @@
 
-orchestrate_orchestrate_start:
+orchestrate_orchestrate_run:
   runner.state.orchestrate:
-    - mods: salt://{{ data.data.orchestrate }}
-    - queue: {{ data.data.get('queue', True) }}
+  - mods: {{ data.data.orchestrate }}
+  - queue: {{ data.data.get('queue', True) }}
+
diff --git a/salt/reactor_sls/key_create.sls b/salt/reactor_sls/key_create.sls
new file mode 100644
index 0000000..14589c1
--- /dev/null
+++ b/salt/reactor_sls/key_create.sls
@@ -0,0 +1,9 @@
+{%- set node_id = salt['pillar.get']('node_id') %}
+{%- set node_host = salt['pillar.get']('node_host') %}
+
+key_create_{{ node_id }}:
+  module.run:
+    saltkey.key_create:
+    - id_: {{ node_id }}
+    - host: {{ node_host }}
+
diff --git a/salt/reactor_sls/key_remove.sls b/salt/reactor_sls/key_remove.sls
new file mode 100644
index 0000000..819d4ae
--- /dev/null
+++ b/salt/reactor_sls/key_remove.sls
@@ -0,0 +1,6 @@
+{%- set node_id = salt['pillar.get']('node_id') %}
+
+key_create_{{ node_id }}:
+  salt.wheel:
+  - name: key.delete
+  - match: {{ node_id }}
diff --git a/tests/pillar/control_virt.sls b/tests/pillar/control_virt.sls
index e84c5dd..7587594 100644
--- a/tests/pillar/control_virt.sls
+++ b/tests/pillar/control_virt.sls
@@ -1,3 +1,14 @@
+virt:
+  disk:
+    three_disks:
+      - system:
+          size: 4096
+          image: ubuntu.qcow
+      - repository_snapshot:
+          size: 8192
+          image: snapshot.qcow
+      - cinder-volume:
+          size: 2048
 salt:
   minion:
     enabled: true
@@ -16,6 +27,10 @@
       large:
         cpu: 4
         ram: 8
+      medium_three_disks:
+        cpu: 2
+        ram: 4
+        disk_profile: three_disks
     cluster:
       vpc20_infra:
         domain: neco.virt.domain.com
@@ -32,3 +47,7 @@
             provider: node02.domain.com
             image: bubuntu.qcomw
             size: small
+          ubuntu3:
+            provider: node03.domain.com
+            image: meowbuntu.qcom2
+            size: medium_three_disks