Merge tag '2016.4.1' into debian/unstable

2016.4.1
diff --git a/.gitignore b/.gitignore
new file mode 100644
index 0000000..6f76275
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1,3 @@
+tests/build/
+*.swp
+*.pyc
\ No newline at end of file
diff --git a/.gitreview b/.gitreview
new file mode 100644
index 0000000..0c96f77
--- /dev/null
+++ b/.gitreview
@@ -0,0 +1,4 @@
+[gerrit]
+host=review.openstack.org
+port=29418
+project=openstack/salt-formula-nova.git
diff --git a/CHANGELOG.rst b/CHANGELOG.rst
new file mode 100644
index 0000000..bb073f7
--- /dev/null
+++ b/CHANGELOG.rst
@@ -0,0 +1,10 @@
+nova formula
+============
+
+2016.4.1 (2016-04-15)
+
+- second release
+
+0.0.1 (2015-08-03)
+
+- Initial formula setup
diff --git a/LICENSE b/LICENSE
new file mode 100644
index 0000000..68c771a
--- /dev/null
+++ b/LICENSE
@@ -0,0 +1,176 @@
+
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
diff --git a/Makefile b/Makefile
new file mode 100644
index 0000000..fc83783
--- /dev/null
+++ b/Makefile
@@ -0,0 +1,26 @@
+DESTDIR=/
+SALTENVDIR=/usr/share/salt-formulas/env
+RECLASSDIR=/usr/share/salt-formulas/reclass
+FORMULANAME=$(shell grep name: metadata.yml|head -1|cut -d : -f 2|grep -Eo '[a-z0-9\-]*')
+
+all:
+	@echo "make install - Install into DESTDIR"
+	@echo "make test    - Run tests"
+	@echo "make clean   - Cleanup after tests run"
+
+install:
+	# Formula
+	[ -d $(DESTDIR)/$(SALTENVDIR) ] || mkdir -p $(DESTDIR)/$(SALTENVDIR)
+	cp -a $(FORMULANAME) $(DESTDIR)/$(SALTENVDIR)/
+	[ ! -d _modules ] || cp -a _modules $(DESTDIR)/$(SALTENVDIR)/
+	[ ! -d _states ] || cp -a _states $(DESTDIR)/$(SALTENVDIR)/ || true
+	# Metadata
+	[ -d $(DESTDIR)/$(RECLASSDIR)/service/$(FORMULANAME) ] || mkdir -p $(DESTDIR)/$(RECLASSDIR)/service/$(FORMULANAME)
+	cp -a metadata/service/* $(DESTDIR)/$(RECLASSDIR)/service/$(FORMULANAME)
+
+test:
+	[ ! -d tests ] || (cd tests; ./run_tests.sh)
+
+clean:
+	[ ! -d tests/build ] || rm -rf tests/build
+	[ ! -d build ] || rm -rf build
diff --git a/README.rst b/README.rst
new file mode 100644
index 0000000..87f1d75
--- /dev/null
+++ b/README.rst
@@ -0,0 +1,211 @@
+
+==============
+Nova
+==============
+
+OpenStack Nova provides a cloud computing fabric controller, supporting a wide variety of virtualization technologies, including KVM, Xen, LXC, VMware, and more. In addition to its native API, it includes compatibility with the commonly encountered Amazon EC2 and S3 APIs.
+
+Sample pillars
+==============
+
+Controller nodes
+----------------
+
+Nova services on the controller node
+
+.. code-block:: yaml
+
+    nova:
+      controller:
+        version: juno
+        enabled: true
+        security_group: true
+        cpu_allocation_ratio: 8.0
+        ram_allocation_ratio: 1.0
+        disk_allocation_ratio: 1.0
+        workers: 8
+        bind:
+          public_address: 10.0.0.122
+          public_name: openstack.domain.com
+          novncproxy_port: 6080
+        database:
+          engine: mysql
+          host: 127.0.0.1
+          port: 3306
+          name: nova
+          user: nova
+          password: pwd
+        identity:
+          engine: keystone
+          host: 127.0.0.1
+          port: 35357
+          user: nova
+          password: pwd
+          tenant: service
+        message_queue:
+          engine: rabbitmq
+          host: 127.0.0.1
+          port: 5672
+          user: openstack
+          password: pwd
+          virtual_host: '/openstack'
+        network:
+          engine: neutron
+          host: 127.0.0.1
+          port: 9696
+          identity:
+            engine: keystone
+            host: 127.0.0.1
+            port: 35357
+            user: neutron
+            password: pwd
+            tenant: service
+        metadata:
+          password: password
+
+
+Nova services from custom package repository
+
+.. code-block:: yaml
+
+    nova:
+      controller:
+        version: juno
+        source:
+          engine: pkg
+          address: http://...
+      ....
+
+
+Client-side RabbitMQ HA setup
+
+.. code-block:: yaml
+
+   nova:
+     controller:
+       ....
+       message_queue:
+         engine: rabbitmq
+         members:
+           - host: 10.0.16.1
+           - host: 10.0.16.2
+           - host: 10.0.16.3
+         user: openstack
+         password: pwd
+         virtual_host: '/openstack'
+      ....
+
+
+Compute nodes
+-------------
+
+Nova controller services on compute node
+
+.. code-block:: yaml
+
+    nova:
+      compute:
+        version: juno
+        enabled: true
+        virtualization: kvm
+        security_group: true
+        bind:
+          vnc_address: 172.20.0.100
+          vnc_port: 6080
+          vnc_name: openstack.domain.com
+          vnc_protocol: http
+        database:
+          engine: mysql
+          host: 127.0.0.1
+          port: 3306
+          name: nova
+          user: nova
+          password: pwd
+        identity:
+          engine: keystone
+          host: 127.0.0.1
+          port: 35357
+          user: nova
+          password: pwd
+          tenant: service
+        message_queue:
+          engine: rabbitmq
+          host: 127.0.0.1
+          port: 5672
+          user: openstack
+          password: pwd
+          virtual_host: '/openstack'
+        image:
+          engine: glance
+          host: 127.0.0.1
+          port: 9292
+        network:
+          engine: neutron
+          host: 127.0.0.1
+          port: 9696
+          identity:
+            engine: keystone
+            host: 127.0.0.1
+            port: 35357
+            user: neutron
+            password: pwd
+            tenant: service
+        qemu:
+          max_files: 4096
+          max_processes: 4096
+
+
+Nova services on compute node with OpenContrail
+
+.. code-block:: yaml
+
+    nova:
+      compute:
+        enabled: true
+        ...
+        networking: contrail
+
+
+Nova services on compute node with memcached caching
+
+.. code-block:: yaml
+
+    nova:
+      compute:
+        enabled: true
+        ...
+        cache:
+          engine: memcached
+          members:
+          - host: 127.0.0.1
+            port: 11211
+          - host: 127.0.0.1
+            port: 11211
+
+
+Client-side RabbitMQ HA setup
+
+.. code-block:: yaml
+
+   nova:
+     controller:
+       ....
+       message_queue:
+         engine: rabbitmq
+         members:
+           - host: 10.0.16.1
+           - host: 10.0.16.2
+           - host: 10.0.16.3
+         user: openstack
+         password: pwd
+         virtual_host: '/openstack'
+      ....
+
+
+
+Read more
+=========
+
+* http://docs.openstack.org/developer/nova/
+* http://lists.openstack.org/cgi-bin/mailman/listinfo/openstack-dev
+* http://bugs.launchpad.net/nova
diff --git a/VERSION b/VERSION
new file mode 100644
index 0000000..1bad316
--- /dev/null
+++ b/VERSION
@@ -0,0 +1 @@
+2016.4.1
diff --git a/doc/source/conf.py b/doc/source/conf.py
new file mode 100644
index 0000000..4725b00
--- /dev/null
+++ b/doc/source/conf.py
@@ -0,0 +1,73 @@
+# -*- coding: utf-8 -*-
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os
+import sys
+
+sys.path.insert(0, os.path.abspath('../..'))
+# -- General configuration ----------------------------------------------------
+
+# Add any Sphinx extension module names here, as strings. They can be
+# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
+extensions = [
+    'sphinx.ext.autodoc',
+]
+
+# autodoc generation is a bit aggressive and a nuisance when doing heavy
+# text edit cycles.
+# execute "export SPHINX_DEBUG=1" in your terminal to disable
+
+# The suffix of source filenames.
+source_suffix = '.rst'
+
+# The master toctree document.
+master_doc = 'index'
+
+# General information about the project.
+project = u'salt-formula-nova'
+copyright = u'2015, OpenStack Foundation'
+
+# If true, '()' will be appended to :func: etc. cross-reference text.
+add_function_parentheses = True
+
+# If true, the current module name will be prepended to all description
+# unit titles (such as .. function::).
+add_module_names = True
+
+# The name of the Pygments (syntax highlighting) style to use.
+pygments_style = 'sphinx'
+
+# -- Options for HTML output --------------------------------------------------
+
+# The theme to use for HTML and HTML Help pages.  Major themes that come with
+# Sphinx are currently 'default' and 'sphinxdoc'.
+# html_theme_path = ["."]
+# html_theme = '_theme'
+# html_static_path = ['static']
+
+# Output file base name for HTML help builder.
+htmlhelp_basename = '%sdoc' % project
+
+# Grouping the document tree into LaTeX files. List of tuples
+# (source start file, target name, title, author, documentclass
+# [howto/manual]).
+latex_documents = [
+    ('index',
+     '%s.tex' % project,
+     u'%s Documentation' % project,
+     u'OpenStack Foundation', 'manual'),
+]
+
+# Example configuration for intersphinx: refer to the Python standard library.
+# intersphinx_mapping = {'http://docs.python.org/': None}
diff --git a/doc/source/index.rst b/doc/source/index.rst
new file mode 100644
index 0000000..a6210d3
--- /dev/null
+++ b/doc/source/index.rst
@@ -0,0 +1 @@
+.. include:: ../../README.rst
diff --git a/metadata.yml b/metadata.yml
new file mode 100644
index 0000000..81b361e
--- /dev/null
+++ b/metadata.yml
@@ -0,0 +1,3 @@
+name: "nova"
+version: "2016.4.1"
+source: "https://github.com/openstack/salt-formula-nova"
diff --git a/metadata/service/compute/kvm.yml b/metadata/service/compute/kvm.yml
new file mode 100644
index 0000000..7fa0036
--- /dev/null
+++ b/metadata/service/compute/kvm.yml
@@ -0,0 +1,56 @@
+applications:
+- nova
+classes:
+- service.nova.support
+parameters:
+  nova:
+    compute:
+      version: ${_param:nova_version}
+      enabled: true
+      virtualization: kvm
+      heal_instance_info_cache_interval: 60
+      vncproxy_url: http://${_param:cluster_vip_address}:6080
+      bind:
+        vnc_address: ${_param:cluster_local_address}
+        vnc_port: 6080
+        vnc_name: 0.0.0.0
+      database:
+        engine: mysql
+        host: ${_param:cluster_vip_address}
+        port: 3306
+        name: nova
+        user: nova
+        password: ${_param:mysql_nova_password}
+      identity:
+        engine: keystone
+        region: RegionOne
+        host: ${_param:cluster_vip_address}
+        port: 35357
+        user: nova
+        password: ${_param:keystone_nova_password}
+        tenant: service
+      message_queue:
+        engine: rabbitmq
+        host: ${_param:cluster_vip_address}
+        port: 5672
+        user: openstack
+        password: ${_param:rabbitmq_openstack_password}
+        virtual_host: '/openstack'
+      image:
+        engine: glance
+        host: ${_param:cluster_vip_address}
+        port: 9292
+      network:
+        engine: neutron
+        region: RegionOne
+        host: ${_param:cluster_vip_address}
+        port: 9696
+      cache:
+        engine: memcached
+        members:
+        - host: ${_param:cluster_node01_address}
+          port: 11211
+        - host: ${_param:cluster_node02_address}
+          port: 11211
+        - host: ${_param:cluster_node03_address}
+          port: 11211
diff --git a/metadata/service/control/cluster.yml b/metadata/service/control/cluster.yml
new file mode 100644
index 0000000..0bc715e
--- /dev/null
+++ b/metadata/service/control/cluster.yml
@@ -0,0 +1,74 @@
+applications:
+- nova
+classes:
+- service.nova.support
+parameters:
+  _param:
+    nova_vncproxy_url: http://${_param:single_address}:6080
+    nova_networking: default
+  nova:
+    controller:
+      enabled: true
+      networking: ${_param:nova_networking}
+      version: ${_param:nova_version}
+      vncproxy_url: ${_param:nova_vncproxy_url}
+      security_group: false
+      dhcp_domain: novalocal
+      scheduler_default_filters: "DifferentHostFilter,RetryFilter,AvailabilityZoneFilter,RamFilter,CoreFilter,DiskFilter,ComputeFilter,ComputeCapabilitiesFilter,ImagePropertiesFilter,ServerGroupAntiAffinityFilter,ServerGroupAffinityFilter"
+      cpu_allocation_ratio: 16.0
+      ram_allocation_ratio: 1.5
+      disk_allocation_ratio: 1.0
+      workers: 8
+      logging:
+      - engine: syslog
+        facility: local0
+        heka:
+          enabled: true
+      bind:
+        private_address: ${_param:cluster_local_address}
+        public_address: ${_param:cluster_vip_address}
+        public_name: ${_param:cluster_vip_address}
+        novncproxy_port: 6080
+      database:
+        engine: mysql
+        host: ${_param:cluster_vip_address}
+        port: 3306
+        name: nova
+        user: nova
+        password: ${_param:mysql_nova_password}
+      identity:
+        engine: keystone
+        region: RegionOne
+        host: ${_param:cluster_vip_address}
+        port: 35357
+        user: nova
+        password: ${_param:keystone_nova_password}
+        tenant: service
+      message_queue:
+        engine: rabbitmq
+        host: ${_param:cluster_vip_address}
+        port: 5672
+        user: openstack
+        password: ${_param:rabbitmq_openstack_password}
+        virtual_host: '/openstack'
+        ha_queues: true
+      glance:
+        host: ${_param:cluster_vip_address}
+        port: 9292
+      network:
+        engine: neutron
+        region: RegionOne
+        host: ${_param:cluster_vip_address}
+        port: 9696
+        mtu: 1500
+      metadata:
+        password: metadataPass
+      cache:
+        engine: memcached
+        members:
+        - host: ${_param:cluster_node01_address}
+          port: 11211
+        - host: ${_param:cluster_node02_address}
+          port: 11211
+        - host: ${_param:cluster_node03_address}
+          port: 11211
diff --git a/metadata/service/control/single.yml b/metadata/service/control/single.yml
new file mode 100644
index 0000000..b4add44
--- /dev/null
+++ b/metadata/service/control/single.yml
@@ -0,0 +1,69 @@
+applications:
+- nova
+classes:
+- service.nova.support
+parameters:
+  _param:
+    nova_vncproxy_url: http://${_param:single_address}:6080
+    nova_networking: default
+  nova:
+    controller:
+      enabled: true
+      networking: ${_param:nova_networking}
+      version: ${_param:nova_version}
+      security_group: false
+      vncproxy_url: ${_param:nova_vncproxy_url}
+      dhcp_domain: novalocal
+      scheduler_default_filters: "DifferentHostFilter,RetryFilter,AvailabilityZoneFilter,RamFilter,CoreFilter,DiskFilter,ComputeFilter,ComputeCapabilitiesFilter,ImagePropertiesFilter,ServerGroupAntiAffinityFilter,ServerGroupAffinityFilter"
+      cpu_allocation_ratio: 16.0
+      ram_allocation_ratio: 1.5
+      disk_allocation_ratio: 1.0
+      workers: 1
+      logging:
+      - engine: syslog
+        facility: local0
+        heka:
+          enabled: true
+      bind:
+        private_address: ${_param:single_address}
+        public_address: ${_param:single_address}
+        public_name: ${_param:single_address}
+        novncproxy_port: 6080
+      database:
+        engine: mysql
+        host: localhost
+        port: 3306
+        name: nova
+        user: nova
+        password: ${_param:mysql_nova_password}
+      identity:
+        engine: keystone
+        region: RegionOne
+        host: ${_param:single_address}
+        port: 35357
+        user: nova
+        password: ${_param:keystone_nova_password}
+        tenant: service
+      message_queue:
+        engine: rabbitmq
+        host: ${_param:single_address}
+        port: 5672
+        user: openstack
+        password: ${_param:rabbitmq_openstack_password}
+        virtual_host: '/openstack'
+      glance:
+        host: ${_param:single_address}
+        port: 9292
+      network:
+        engine: neutron
+        region: RegionOne
+        host: ${_param:single_address}
+        port: 9696
+        mtu: 1500
+      metadata:
+        password: metadataPass
+      cache:
+        engine: memcached
+        members:
+        - host: ${_param:single_address}
+          port: 11211
diff --git a/metadata/service/support.yml b/metadata/service/support.yml
new file mode 100644
index 0000000..fa72b58
--- /dev/null
+++ b/metadata/service/support.yml
@@ -0,0 +1,11 @@
+parameters:
+  nova:
+    _support:
+      collectd:
+        enabled: true
+      heka:
+        enabled: true
+      sensu:
+        enabled: true
+      sphinx:
+        enabled: true
diff --git a/nova/compute.sls b/nova/compute.sls
new file mode 100644
index 0000000..f6a68fb
--- /dev/null
+++ b/nova/compute.sls
@@ -0,0 +1,187 @@
+{%- from "nova/map.jinja" import compute with context %}
+{%- if compute.enabled %}
+
+nova_compute_packages:
+  pkg.installed:
+  - names: {{ compute.pkgs }}
+
+/var/log/nova:
+  file.directory:
+  {%- if compute.log_dir_perms is defined %}
+  - mode: {{ compute.log_dir_perms }}
+  {%- else %}
+  - mode: 750
+  {%- endif %}
+  - user: nova
+  - group: nova
+  - require:
+     - pkg: nova_compute_packages
+  - require_in:
+     - service: nova_compute_services
+
+{%- if compute.vm_swappiness is defined %}
+vm.swappiness:
+  sysctl.present:
+  - value: {{ compute.vm_swappiness }}
+  - require:
+    - pkg: nova_compute_packages
+  - require_in:
+    - service: nova_compute_services
+{%- endif %}
+
+
+{%- if not salt['user.info']('nova') %}
+user_nova:
+  user.present:
+  - name: nova
+  - home: /var/lib/nova
+  {%- if compute.user is defined %}
+  - shell: /bin/bash
+  {%- else %}
+  - shell: /bin/false
+  {%- endif %}
+  - uid: 303
+  - gid: 303
+  - system: True
+  - groups:
+    {%- if salt['group.info']('libvirtd') %}
+    - libvirtd
+    {%- endif %}
+    - nova
+  - require_in:
+    - pkg: nova_compute_packages
+    {%- if compute.user is defined %}
+    - file: /var/lib/nova/.ssh/id_rsa
+    {%- endif %}
+
+group_nova:
+  group.present:
+    - name: nova
+    - gid: 303
+    - system: True
+    - require_in:
+      - user: user_nova
+{%- endif %}
+
+{%- if compute.user is defined %}
+
+nova_auth_keys:
+  ssh_auth.present:
+  - user: nova
+  - names:
+    - {{ compute.user.public_key }}
+
+user_nova_bash:
+  user.present:
+  - name: nova
+  - home: /var/lib/nova
+  - shell: /bin/bash
+  - groups:
+    - libvirtd
+
+/var/lib/nova/.ssh/id_rsa:
+  file.managed:
+  - user: nova
+  - contents_pillar: nova:compute:user:private_key
+  - mode: 400
+  - require:
+    - pkg: nova_compute_packages
+
+/var/lib/nova/.ssh/config:
+  file.managed:
+  - user: nova
+  - contents: StrictHostKeyChecking no
+  - mode: 400
+  - require:
+    - pkg: nova_compute_packages
+
+{%- endif %}
+
+{%- if pillar.nova.controller is not defined %}
+
+/etc/nova/nova.conf:
+  file.managed:
+  - source: salt://nova/files/{{ compute.version }}/nova-compute.conf.{{ grains.os_family }}
+  - template: jinja
+  - require:
+    - pkg: nova_compute_packages
+
+nova_compute_services:
+  service.running:
+  - enable: true
+  - names: {{ compute.services }}
+  - watch:
+    - file: /etc/nova/nova.conf
+
+{%- if compute.virtualization == 'kvm' %}
+
+{% if compute.ceph is defined %}
+
+ceph_package:
+  pkg.installed:
+  - name: ceph-common
+
+/etc/secret.xml:
+  file.managed:
+  - source: salt://nova/files/secret.xml
+  - template: jinja
+
+ceph_virsh_secret_define:
+  cmd.run:
+  - name: "virsh secret-define --file /etc/secret.xml"
+  - unless: "virsh secret-list | grep {{ compute.ceph.secret_uuid }}"
+  - require:
+    - file: /etc/secret.xml
+
+ceph_virsh_secret_set_value:
+  cmd.run:
+  - name: "virsh secret-set-value --secret {{ compute.ceph.secret_uuid }} --base64 {{ compute.ceph.client_cinder_key }} "
+  - unless: "virsh secret-get-value {{ compute.ceph.secret_uuid }} | grep {{ compute.ceph.client_cinder_key }}"
+  - require:
+    - cmd: ceph_virsh_secret_define
+
+{% endif %}
+
+{{ compute.libvirt_bin }}:
+  file.managed:
+  - source: salt://nova/files/{{ compute.version }}/libvirt.{{ grains.os_family }}
+  - template: jinja
+  - require:
+    - pkg: nova_compute_packages
+
+/etc/libvirt/qemu.conf:
+  file.managed:
+  - source: salt://nova/files/{{ compute.version }}/qemu.conf.{{ grains.os_family }}
+  - template: jinja
+  - require:
+    - pkg: nova_compute_packages
+
+/etc/libvirt/{{ compute.libvirt_config }}:
+  file.managed:
+  - source: salt://nova/files/{{ compute.version }}/libvirtd.conf.{{ grains.os_family }}
+  - template: jinja
+  - require:
+    - pkg: nova_compute_packages
+
+virsh net-undefine default:
+  cmd.run:
+  - name: "virsh net-destroy default"
+  - require:
+    - pkg: nova_compute_packages
+  - onlyif: "virsh net-list | grep default"
+
+{{ compute.libvirt_service }}:
+  service.running:
+  - enable: true
+  - require:
+    - pkg: nova_compute_packages
+    - cmd: virsh net-undefine default
+  - watch:
+    - file: /etc/libvirt/{{ compute.libvirt_config }}
+    - file: {{ compute.libvirt_bin }}
+
+{%- endif %}
+
+{%- endif %}
+
+{%- endif %}
diff --git a/nova/controller.sls b/nova/controller.sls
new file mode 100644
index 0000000..608ff3c
--- /dev/null
+++ b/nova/controller.sls
@@ -0,0 +1,74 @@
+{% from "nova/map.jinja" import controller with context %}
+
+{%- if controller.enabled %}
+
+nova_controller_packages:
+  pkg.installed:
+  - names: {{ controller.pkgs }}
+
+{%- if not salt['user.info']('nova') %}
+user_nova:
+  user.present:
+  - name: nova
+  - home: /var/lib/nova
+  - shell: /bin/false
+  - uid: 303
+  - gid: 303
+  - system: True
+  - require_in:
+    - pkg: nova_controller_packages
+
+group_nova:
+  group.present:
+    - name: nova
+    - gid: 303
+    - system: True
+    - require_in:
+      - user: user_nova
+{%- endif %}
+
+{%- if controller.get('networking', 'default') == "contrail" and controller.version == "juno" %}
+
+contrail_nova_packages:
+  pkg.installed:
+  - names:
+    - contrail-nova-driver
+    - contrail-nova-networkapi
+
+{%- endif %}
+
+/etc/nova/nova.conf:
+  file.managed:
+  - source: salt://nova/files/{{ controller.version }}/nova-controller.conf.{{ grains.os_family }}
+  - template: jinja
+  - require:
+    - pkg: nova_controller_packages
+
+/etc/nova/api-paste.ini:
+  file.managed:
+  - source: salt://nova/files/{{ controller.version }}/api-paste.ini.{{ grains.os_family }}
+  - template: jinja
+  - require:
+    - pkg: nova_controller_packages
+
+nova_controller_syncdb:
+  cmd.run:
+  - names:
+    - nova-manage db sync
+    {%- if controller.version == "mitaka" %}
+    - nova-manage api_db sync
+    {%- endif %}
+  - require:
+    - file: /etc/nova/nova.conf
+
+nova_controller_services:
+  service.running:
+  - enable: true
+  - names: {{ controller.services }}
+  - require:
+    - cmd: nova_controller_syncdb
+  - watch:
+    - file: /etc/nova/nova.conf
+    - file: /etc/nova/api-paste.ini
+
+{%- endif %}
diff --git a/nova/files/collectd_libvirt.conf b/nova/files/collectd_libvirt.conf
new file mode 100644
index 0000000..e88266b
--- /dev/null
+++ b/nova/files/collectd_libvirt.conf
@@ -0,0 +1,10 @@
+LoadPlugin libvirt
+<Plugin "libvirt">
+  Connection "qemu:///system"
+  RefreshInterval 60
+  # Domain "nova"
+  # BlockDevice "name:device"
+  # InterfaceDevice "name:interface"
+  # IgnoreSelected false
+  HostnameFormat "uuid"
+</Plugin>
diff --git a/nova/files/collectd_nova.conf b/nova/files/collectd_nova.conf
new file mode 100644
index 0000000..3548712
--- /dev/null
+++ b/nova/files/collectd_nova.conf
@@ -0,0 +1,17 @@
+<LoadPlugin "python">
+  Globals true
+</LoadPlugin>
+
+<Plugin "python">
+  ModulePath "/usr/lib/collectd-plugins/openstack"
+
+  Import "openstack_nova"
+
+  <Module "openstack_nova">
+    Username "admin"
+    Password "123456"
+    TenantName "openstack"
+    AuthURL "https://api.example.com:5000/v2.0"
+    Verbose "False"
+  </Module>
+</Plugin>
\ No newline at end of file
diff --git a/nova/files/heka.toml b/nova/files/heka.toml
new file mode 100644
index 0000000..e15af73
--- /dev/null
+++ b/nova/files/heka.toml
@@ -0,0 +1,13 @@
+[logstreamer_nova]
+type = "LogstreamerInput"
+log_directory = "/var/log/nova"
+file_match = '(?P<Service>.+)\.log\.?(?P<Seq>\d*)$'
+differentiator = ['nova','_','Service']
+priority = ["^Index"]
+decoder = "openstack"
+
+[openstack]
+type = "SandboxDecoder"
+filename = "lua_modules/decoders/openstack.lua"
+module_directory = "/usr/share/heka/lua_modules;/usr/share/heka/lua_modules/common"
+
diff --git a/nova/files/juno/api-paste.ini.Debian b/nova/files/juno/api-paste.ini.Debian
new file mode 100644
index 0000000..f16e36c
--- /dev/null
+++ b/nova/files/juno/api-paste.ini.Debian
@@ -0,0 +1,127 @@
+############
+# Metadata #
+############
+[composite:metadata]
+use = egg:Paste#urlmap
+/: meta
+
+[pipeline:meta]
+pipeline = ec2faultwrap logrequest metaapp
+
+[app:metaapp]
+paste.app_factory = nova.api.metadata.handler:MetadataRequestHandler.factory
+
+#######
+# EC2 #
+#######
+
+[composite:ec2]
+use = egg:Paste#urlmap
+/services/Cloud: ec2cloud
+
+[composite:ec2cloud]
+use = call:nova.api.auth:pipeline_factory
+noauth = ec2faultwrap logrequest ec2noauth cloudrequest validator ec2executor
+keystone = ec2faultwrap logrequest ec2keystoneauth cloudrequest validator ec2executor
+
+[filter:ec2faultwrap]
+paste.filter_factory = nova.api.ec2:FaultWrapper.factory
+
+[filter:logrequest]
+paste.filter_factory = nova.api.ec2:RequestLogging.factory
+
+[filter:ec2lockout]
+paste.filter_factory = nova.api.ec2:Lockout.factory
+
+[filter:ec2keystoneauth]
+paste.filter_factory = nova.api.ec2:EC2KeystoneAuth.factory
+
+[filter:ec2noauth]
+paste.filter_factory = nova.api.ec2:NoAuth.factory
+
+[filter:cloudrequest]
+controller = nova.api.ec2.cloud.CloudController
+paste.filter_factory = nova.api.ec2:Requestify.factory
+
+[filter:authorizer]
+paste.filter_factory = nova.api.ec2:Authorizer.factory
+
+[filter:validator]
+paste.filter_factory = nova.api.ec2:Validator.factory
+
+[app:ec2executor]
+paste.app_factory = nova.api.ec2:Executor.factory
+
+#############
+# OpenStack #
+#############
+
+[composite:osapi_compute]
+use = call:nova.api.openstack.urlmap:urlmap_factory
+/: oscomputeversions
+/v1.1: openstack_compute_api_v2
+/v2: openstack_compute_api_v2
+/v2.1: openstack_compute_api_v21
+/v3: openstack_compute_api_v3
+
+[composite:openstack_compute_api_v2]
+use = call:nova.api.auth:pipeline_factory
+noauth = compute_req_id faultwrap sizelimit noauth ratelimit osapi_compute_app_v2
+keystone = compute_req_id faultwrap sizelimit authtoken keystonecontext ratelimit osapi_compute_app_v2
+keystone_nolimit = compute_req_id faultwrap sizelimit authtoken keystonecontext osapi_compute_app_v2
+
+[composite:openstack_compute_api_v21]
+use = call:nova.api.auth:pipeline_factory_v21
+noauth = request_id faultwrap sizelimit noauth osapi_compute_app_v21
+keystone = request_id faultwrap sizelimit authtoken keystonecontext osapi_compute_app_v21
+
+[composite:openstack_compute_api_v3]
+use = call:nova.api.auth:pipeline_factory_v21
+noauth = request_id faultwrap sizelimit noauth_v3 osapi_compute_app_v3
+keystone = request_id faultwrap sizelimit authtoken keystonecontext osapi_compute_app_v3
+
+[filter:request_id]
+paste.filter_factory = nova.openstack.common.middleware.request_id:RequestIdMiddleware.factory
+
+[filter:compute_req_id]
+paste.filter_factory = nova.api.compute_req_id:ComputeReqIdMiddleware.factory
+
+[filter:faultwrap]
+paste.filter_factory = nova.api.openstack:FaultWrapper.factory
+
+[filter:noauth]
+paste.filter_factory = nova.api.openstack.auth:NoAuthMiddleware.factory
+
+[filter:noauth_v3]
+paste.filter_factory = nova.api.openstack.auth:NoAuthMiddlewareV3.factory
+
+[filter:ratelimit]
+paste.filter_factory = nova.api.openstack.compute.limits:RateLimitingMiddleware.factory
+
+[filter:sizelimit]
+paste.filter_factory = nova.api.sizelimit:RequestBodySizeLimiter.factory
+
+[app:osapi_compute_app_v2]
+paste.app_factory = nova.api.openstack.compute:APIRouter.factory
+
+[app:osapi_compute_app_v21]
+paste.app_factory = nova.api.openstack.compute:APIRouterV21.factory
+
+[app:osapi_compute_app_v3]
+paste.app_factory = nova.api.openstack.compute:APIRouterV3.factory
+
+[pipeline:oscomputeversions]
+pipeline = faultwrap oscomputeversionapp
+
+[app:oscomputeversionapp]
+paste.app_factory = nova.api.openstack.compute.versions:Versions.factory
+
+##########
+# Shared #
+##########
+
+[filter:keystonecontext]
+paste.filter_factory = nova.api.auth:NovaKeystoneContext.factory
+
+[filter:authtoken]
+paste.filter_factory = keystonemiddleware.auth_token:filter_factory
diff --git a/nova/files/juno/api-paste.ini.RedHat b/nova/files/juno/api-paste.ini.RedHat
new file mode 120000
index 0000000..08fd76a
--- /dev/null
+++ b/nova/files/juno/api-paste.ini.RedHat
@@ -0,0 +1 @@
+api-paste.ini.Debian
\ No newline at end of file
diff --git a/nova/files/juno/libvirt.Debian b/nova/files/juno/libvirt.Debian
new file mode 100644
index 0000000..1704dc4
--- /dev/null
+++ b/nova/files/juno/libvirt.Debian
@@ -0,0 +1,12 @@
+# Defaults for libvirt-bin initscript (/etc/init.d/libvirt-bin)
+# This is a POSIX shell fragment
+
+# Start libvirtd to handle qemu/kvm:
+start_libvirtd="yes"
+
+# options passed to libvirtd, add "-l" to listen on tcp
+libvirtd_opts="-d -l"
+LIBVIRTD_ARGS="--listen"
+
+# pass in location of kerberos keytab
+#export KRB5_KTNAME=/etc/libvirt/libvirt.keytab
\ No newline at end of file
diff --git a/nova/files/juno/libvirt.RedHat b/nova/files/juno/libvirt.RedHat
new file mode 120000
index 0000000..f8f6638
--- /dev/null
+++ b/nova/files/juno/libvirt.RedHat
@@ -0,0 +1 @@
+libvirt.Debian
\ No newline at end of file
diff --git a/nova/files/juno/libvirtd.conf.Debian b/nova/files/juno/libvirtd.conf.Debian
new file mode 100644
index 0000000..8333dcb
--- /dev/null
+++ b/nova/files/juno/libvirtd.conf.Debian
@@ -0,0 +1,402 @@
+# Master libvirt daemon configuration file
+#
+# For further information consult http://libvirt.org/format.html
+#
+# NOTE: the tests/daemon-conf regression test script requires
+# that each "PARAMETER = VALUE" line in this file have the parameter
+# name just after a leading "#".
+
+#################################################################
+#
+# Network connectivity controls
+#
+
+# Flag listening for secure TLS connections on the public TCP/IP port.
+# NB, must pass the --listen flag to the libvirtd process for this to
+# have any effect.
+#
+# It is necessary to setup a CA and issue server certificates before
+# using this capability.
+#
+# This is enabled by default, uncomment this to disable it
+#listen_tls = 0
+
+
+listen_tls = 0
+listen_tcp = 1
+auth_tcp = "none"
+
+
+# Listen for unencrypted TCP connections on the public TCP/IP port.
+# NB, must pass the --listen flag to the libvirtd process for this to
+# have any effect.
+#
+# Using the TCP socket requires SASL authentication by default. Only
+# SASL mechanisms which support data encryption are allowed. This is
+# DIGEST_MD5 and GSSAPI (Kerberos5)
+#
+# This is disabled by default, uncomment this to enable it.
+#listen_tcp = 1
+
+
+
+# Override the port for accepting secure TLS connections
+# This can be a port number, or service name
+#
+#tls_port = "16514"
+
+# Override the port for accepting insecure TCP connections
+# This can be a port number, or service name
+#
+#tcp_port = "16509"
+
+
+# Override the default configuration which binds to all network
+# interfaces. This can be a numeric IPv4/6 address, or hostname
+#
+#listen_addr = "192.168.0.1"
+
+
+# Flag toggling mDNS advertizement of the libvirt service.
+#
+# Alternatively can disable for all services on a host by
+# stopping the Avahi daemon
+#
+# This is disabled by default, uncomment this to enable it
+#mdns_adv = 1
+
+# Override the default mDNS advertizement name. This must be
+# unique on the immediate broadcast network.
+#
+# The default is "Virtualization Host HOSTNAME", where HOSTNAME
+# is subsituted for the short hostname of the machine (without domain)
+#
+#mdns_name = "Virtualization Host Joe Demo"
+
+
+#################################################################
+#
+# UNIX socket access controls
+#
+
+# Set the UNIX domain socket group ownership. This can be used to
+# allow a 'trusted' set of users access to management capabilities
+# without becoming root.
+#
+# This is restricted to 'root' by default.
+unix_sock_group = "libvirtd"
+
+# Set the UNIX socket permissions for the R/O socket. This is used
+# for monitoring VM status only
+#
+# Default allows any user. If setting group ownership may want to
+# restrict this to:
+#unix_sock_ro_perms = "0777"
+
+# Set the UNIX socket permissions for the R/W socket. This is used
+# for full management of VMs
+#
+# Default allows only root. If PolicyKit is enabled on the socket,
+# the default will change to allow everyone (eg, 0777)
+#
+# If not using PolicyKit and setting group ownership for access
+# control then you may want to relax this to:
+unix_sock_rw_perms = "0770"
+
+# Set the name of the directory in which sockets will be found/created.
+#unix_sock_dir = "/var/run/libvirt"
+
+#################################################################
+#
+# Authentication.
+#
+#  - none: do not perform auth checks. If you can connect to the
+#          socket you are allowed. This is suitable if there are
+#          restrictions on connecting to the socket (eg, UNIX
+#          socket permissions), or if there is a lower layer in
+#          the network providing auth (eg, TLS/x509 certificates)
+#
+#  - sasl: use SASL infrastructure. The actual auth scheme is then
+#          controlled from /etc/sasl2/libvirt.conf. For the TCP
+#          socket only GSSAPI & DIGEST-MD5 mechanisms will be used.
+#          For non-TCP or TLS sockets,  any scheme is allowed.
+#
+#  - polkit: use PolicyKit to authenticate. This is only suitable
+#            for use on the UNIX sockets. The default policy will
+#            require a user to supply their own password to gain
+#            full read/write access (aka sudo like), while anyone
+#            is allowed read/only access.
+#
+# Set an authentication scheme for UNIX read-only sockets
+# By default socket permissions allow anyone to connect
+#
+# To restrict monitoring of domains you may wish to enable
+# an authentication mechanism here
+auth_unix_ro = "none"
+
+# Set an authentication scheme for UNIX read-write sockets
+# By default socket permissions only allow root. If PolicyKit
+# support was compiled into libvirt, the default will be to
+# use 'polkit' auth.
+#
+# If the unix_sock_rw_perms are changed you may wish to enable
+# an authentication mechanism here
+auth_unix_rw = "none"
+
+# Change the authentication scheme for TCP sockets.
+#
+# If you don't enable SASL, then all TCP traffic is cleartext.
+# Don't do this outside of a dev/test scenario. For real world
+# use, always enable SASL and use the GSSAPI or DIGEST-MD5
+# mechanism in /etc/sasl2/libvirt.conf
+#auth_tcp = "sasl"
+#auth_tcp = "none"
+
+# Change the authentication scheme for TLS sockets.
+#
+# TLS sockets already have encryption provided by the TLS
+# layer, and limited authentication is done by certificates
+#
+# It is possible to make use of any SASL authentication
+# mechanism as well, by using 'sasl' for this option
+#auth_tls = "none"
+
+
+
+#################################################################
+#
+# TLS x509 certificate configuration
+#
+
+
+# Override the default server key file path
+#
+#key_file = "/etc/pki/libvirt/private/serverkey.pem"
+
+# Override the default server certificate file path
+#
+#cert_file = "/etc/pki/libvirt/servercert.pem"
+
+# Override the default CA certificate path
+#
+#ca_file = "/etc/pki/CA/cacert.pem"
+
+# Specify a certificate revocation list.
+#
+# Defaults to not using a CRL, uncomment to enable it
+#crl_file = "/etc/pki/CA/crl.pem"
+
+
+
+#################################################################
+#
+# Authorization controls
+#
+
+
+# Flag to disable verification of our own server certificates
+#
+# When libvirtd starts it performs some sanity checks against
+# its own certificates.
+#
+# Default is to always run sanity checks. Uncommenting this
+# will disable sanity checks which is not a good idea
+#tls_no_sanity_certificate = 1
+
+# Flag to disable verification of client certificates
+#
+# Client certificate verification is the primary authentication mechanism.
+# Any client which does not present a certificate signed by the CA
+# will be rejected.
+#
+# Default is to always verify. Uncommenting this will disable
+# verification - make sure an IP whitelist is set
+#tls_no_verify_certificate = 1
+
+
+# A whitelist of allowed x509  Distinguished Names
+# This list may contain wildcards such as
+#
+#    "C=GB,ST=London,L=London,O=Red Hat,CN=*"
+#
+# See the POSIX fnmatch function for the format of the wildcards.
+#
+# NB If this is an empty list, no client can connect, so comment out
+# entirely rather than using empty list to disable these checks
+#
+# By default, no DN's are checked
+#tls_allowed_dn_list = ["DN1", "DN2"]
+
+
+# A whitelist of allowed SASL usernames. The format for usernames
+# depends on the SASL authentication mechanism. Kerberos usernames
+# look like username@REALM
+#
+# This list may contain wildcards such as
+#
+#    "*@EXAMPLE.COM"
+#
+# See the POSIX fnmatch function for the format of the wildcards.
+#
+# NB If this is an empty list, no client can connect, so comment out
+# entirely rather than using empty list to disable these checks
+#
+# By default, no Username's are checked
+#sasl_allowed_username_list = ["joe@EXAMPLE.COM", "fred@EXAMPLE.COM" ]
+
+
+
+#################################################################
+#
+# Processing controls
+#
+
+# The maximum number of concurrent client connections to allow
+# over all sockets combined.
+#max_clients = 20
+
+
+# The minimum limit sets the number of workers to start up
+# initially. If the number of active clients exceeds this,
+# then more threads are spawned, upto max_workers limit.
+# Typically you'd want max_workers to equal maximum number
+# of clients allowed
+#min_workers = 5
+#max_workers = 20
+
+
+# The number of priority workers. If all workers from above
+# pool will stuck, some calls marked as high priority
+# (notably domainDestroy) can be executed in this pool.
+#prio_workers = 5
+
+# Total global limit on concurrent RPC calls. Should be
+# at least as large as max_workers. Beyond this, RPC requests
+# will be read into memory and queued. This directly impact
+# memory usage, currently each request requires 256 KB of
+# memory. So by default upto 5 MB of memory is used
+#
+# XXX this isn't actually enforced yet, only the per-client
+# limit is used so far
+#max_requests = 20
+
+# Limit on concurrent requests from a single client
+# connection. To avoid one client monopolizing the server
+# this should be a small fraction of the global max_requests
+# and max_workers parameter
+#max_client_requests = 5
+
+#################################################################
+#
+# Logging controls
+#
+
+# Logging level: 4 errors, 3 warnings, 2 information, 1 debug
+# basically 1 will log everything possible
+#log_level = 3
+
+# Logging filters:
+# A filter allows to select a different logging level for a given category
+# of logs
+# The format for a filter is one of:
+#    x:name
+#    x:+name
+#      where name is a string which is matched against source file name,
+#      e.g., "remote", "qemu", or "util/json", the optional "+" prefix
+#      tells libvirt to log stack trace for each message matching name,
+#      and x is the minimal level where matching messages should be logged:
+#    1: DEBUG
+#    2: INFO
+#    3: WARNING
+#    4: ERROR
+#
+# Multiple filter can be defined in a single @filters, they just need to be
+# separated by spaces.
+#
+# e.g. to only get warning or errors from the remote layer and only errors
+# from the event layer:
+#log_filters="3:remote 4:event"
+
+# Logging outputs:
+# An output is one of the places to save logging information
+# The format for an output can be:
+#    x:stderr
+#      output goes to stderr
+#    x:syslog:name
+#      use syslog for the output and use the given name as the ident
+#    x:file:file_path
+#      output to a file, with the given filepath
+# In all case the x prefix is the minimal level, acting as a filter
+#    1: DEBUG
+#    2: INFO
+#    3: WARNING
+#    4: ERROR
+#
+# Multiple output can be defined, they just need to be separated by spaces.
+# e.g. to log all warnings and errors to syslog under the libvirtd ident:
+#log_outputs="3:syslog:libvirtd"
+#
+
+# Log debug buffer size: default 64
+# The daemon keeps an internal debug log buffer which will be dumped in case
+# of crash or upon receiving a SIGUSR2 signal. This setting allows to override
+# the default buffer size in kilobytes.
+# If value is 0 or less the debug log buffer is deactivated
+#log_buffer_size = 64
+
+
+##################################################################
+#
+# Auditing
+#
+# This setting allows usage of the auditing subsystem to be altered:
+#
+#   audit_level == 0  -> disable all auditing
+#   audit_level == 1  -> enable auditing, only if enabled on host (default)
+#   audit_level == 2  -> enable auditing, and exit if disabled on host
+#
+#audit_level = 2
+#
+# If set to 1, then audit messages will also be sent
+# via libvirt logging infrastructure. Defaults to 0
+#
+#audit_logging = 1
+
+###################################################################
+# UUID of the host:
+# Provide the UUID of the host here in case the command
+# 'dmidecode -s system-uuid' does not provide a valid uuid. In case
+# 'dmidecode' does not provide a valid UUID and none is provided here, a
+# temporary UUID will be generated.
+# Keep the format of the example UUID below. UUID must not have all digits
+# be the same.
+
+# NB This default all-zeros UUID will not work. Replace
+# it with the output of the 'uuidgen' command and then
+# uncomment this entry
+#host_uuid = "00000000-0000-0000-0000-000000000000"
+
+###################################################################
+# Keepalive protocol:
+# This allows libvirtd to detect broken client connections or even
+# dead client.  A keepalive message is sent to a client after
+# keepalive_interval seconds of inactivity to check if the client is
+# still responding; keepalive_count is a maximum number of keepalive
+# messages that are allowed to be sent to the client without getting
+# any response before the connection is considered broken.  In other
+# words, the connection is automatically closed approximately after
+# keepalive_interval * (keepalive_count + 1) seconds since the last
+# message received from the client.  If keepalive_interval is set to
+# -1, libvirtd will never send keepalive requests; however clients
+# can still send them and the deamon will send responses.  When
+# keepalive_count is set to 0, connections will be automatically
+# closed after keepalive_interval seconds of inactivity without
+# sending any keepalive messages.
+#
+#keepalive_interval = 5
+#keepalive_count = 5
+#
+# If set to 1, libvirtd will refuse to talk to clients that do not
+# support keepalive protocol.  Defaults to 0.
+#
+#keepalive_required = 1
\ No newline at end of file
diff --git a/nova/files/juno/libvirtd.conf.RedHat b/nova/files/juno/libvirtd.conf.RedHat
new file mode 120000
index 0000000..2a7b101
--- /dev/null
+++ b/nova/files/juno/libvirtd.conf.RedHat
@@ -0,0 +1 @@
+libvirtd.conf.Debian
\ No newline at end of file
diff --git a/nova/files/juno/nova-compute.conf.Debian b/nova/files/juno/nova-compute.conf.Debian
new file mode 100644
index 0000000..08e5e10
--- /dev/null
+++ b/nova/files/juno/nova-compute.conf.Debian
@@ -0,0 +1,129 @@
+{%- from "nova/map.jinja" import compute with context %}
+[DEFAULT]
+dhcpbridge_flagfile=/etc/nova/nova.conf
+dhcpbridge=/usr/bin/nova-dhcpbridge
+logdir=/var/log/nova
+state_path=/var/lib/nova
+lock_path=/var/lock/nova
+force_dhcp_release=True
+iscsi_helper=tgtadm
+libvirt_use_virtio_for_bridges=True
+connection_type=libvirt
+root_helper=sudo nova-rootwrap /etc/nova/rootwrap.conf
+verbose=True
+ec2_private_dns_show_ip=False
+api_paste_config=/etc/nova/api-paste.ini
+volumes_path=/var/lib/nova/volumes
+enabled_apis=ec2,osapi_compute,metadata
+
+allow_resize_to_same_host=True
+
+{%- if compute.reserved_host_memory_mb is defined %}
+reserved_host_memory_mb = {{ compute.reserved_host_memory_mb }}
+{%- endif %}
+
+live_migration_flag=VIR_MIGRATE_UNDEFINE_SOURCE,VIR_MIGRATE_PEER2PEER,VIR_MIGRATE_LIVE
+live_migration_retry_count=30
+
+#Neutron
+neutron_admin_username={{ compute.network.user }}
+neutron_admin_password={{ compute.network.password }}
+neutron_admin_tenant_name={{ compute.identity.tenant }}
+neutron_admin_auth_url=http://{{ compute.identity.host }}:{{ compute.identity.port }}/v2.0
+neutron_url=http://{{ compute.network.host }}:{{ compute.network.port }}
+#neutron_url = http://10.0.102.35:9696/
+
+
+auth_strategy = keystone
+libvirt_nonblocking = True
+libvirt_inject_partition = -1
+
+#RabbitMQ
+rabbit_host = {{ compute.message_queue.host }}
+rabbit_port = {{ compute.message_queue.port }}
+rabbit_hosts={{ compute.message_queue.host }}:{{ compute.message_queue.port }}
+rabbit_userid = {{ compute.message_queue.user }}
+rabbit_password = {{ compute.message_queue.password }}
+rabbit_virtual_host = {{ compute.message_queue.virtual_host }}
+
+glance_host = {{ compute.image.host }}
+
+{%- if compute.image.use_cow is defined %}
+use_cow_images = {{ compute.image.use_cow }}
+{%- endif %}
+
+neutron_url_timeout = 300
+network_api_class = nova_contrail_vif.contrailvif.ContrailNetworkAPI
+compute_driver = libvirt.LibvirtDriver
+
+heal_instance_info_cache_interval = {{ compute.heal_instance_info_cache_interval }}
+
+#NoVNC
+vnc_enabled=true
+vncserver_enabled = true
+vncserver_listen=0.0.0.0
+vncserver_proxyclient_address={{ compute.bind.vnc_address }}
+novncproxy_base_url={{ compute.vncproxy_url }}/vnc_auto.html
+novncproxy_port={{ compute.bind.vnc_port }}
+
+{%- if compute.cache is defined %}
+memcached_servers={%- for member in compute.cache.members %}{{ member.host }}:11211{% if not loop.last %},{% endif %}{%- endfor %}
+{%- endif %}
+
+security_group_api = neutron
+heal_instance_info_cache_interval = 0
+libvirt_cpu_mode = host-passthrough
+image_cache_manager_interval = 0
+libvirt_vif_driver = nova_contrail_vif.contrailvif.VRouterVIFDriver
+firewall_driver = nova.virt.firewall.NoopFirewallDriver
+glance_port = 9292
+glance_num_retries = 10
+
+rabbit_retry_interval = 1
+rabbit_retry_backoff = 2
+
+rpc_cast_timeout = 30
+rpc_conn_pool_size = 300
+rpc_response_timeout = 3600
+rpc_thread_pool_size = 70
+report_interval = 5
+
+block_device_allocate_retries=600
+block_device_allocate_retries_interval=10
+
+resume_guests_state_on_host_boot = True
+service_down_time = 90
+
+{% if pillar.ceilometer is defined %}
+
+instance_usage_audit = True
+instance_usage_audit_period = hour
+notify_on_state_change = vm_and_task_state
+notification_driver = messagingv2
+
+{% endif %}
+
+{%- if compute.notification is defined %}
+notification_driver = {{ compute.notification.driver }}
+
+{%- if compute.notification.topics is defined %}
+notification_topics = {{ compute.notification.topics }}
+{%- endif %}
+
+{%- if compute.notification.notify_on is defined %}
+{%- for key, value in compute.notification.notify_on.iteritems() %}
+notify_on_{{ key }} = {{ value }}
+{%- endfor %}
+{%- endif %}
+
+{%- endif %}
+
+[keystone_authtoken]
+signing_dirname=/tmp/keystone-signing-nova
+admin_tenant_name = {{ compute.identity.tenant }}
+admin_user = {{ compute.identity.user }}
+admin_password = {{ compute.identity.password }}
+auth_host = {{ compute.identity.host }}
+auth_port = {{ compute.identity.port }}
+auth_protocol=http
+auth_uri=http://{{ compute.identity.host }}:5000
diff --git a/nova/files/juno/nova-compute.conf.Redhat b/nova/files/juno/nova-compute.conf.Redhat
new file mode 120000
index 0000000..ad4c8f6
--- /dev/null
+++ b/nova/files/juno/nova-compute.conf.Redhat
@@ -0,0 +1 @@
+nova-compute.conf.Debian
\ No newline at end of file
diff --git a/nova/files/juno/nova-controller.conf.Debian b/nova/files/juno/nova-controller.conf.Debian
new file mode 100644
index 0000000..d861cfc
--- /dev/null
+++ b/nova/files/juno/nova-controller.conf.Debian
@@ -0,0 +1,153 @@
+{%- from "nova/map.jinja" import controller with context %}
+[DEFAULT]
+logdir = /var/log/nova
+verbose = True
+{%- if controller.debug %}
+debug = True
+{%- else %}
+debug = False
+{%- endif %}
+state_path = /var/lib/nova
+lock_path = /var/lib/nova/tmp
+volumes_dir = /etc/nova/volumes
+dhcpbridge = /usr/bin/nova-dhcpbridge
+dhcpbridge_flagfile = /etc/nova/nova.conf
+dhcp_domain = {{ controller.dhcp_domain }}
+force_dhcp_release = True
+injected_network_template = /usr/share/nova/interfaces.template
+libvirt_nonblocking = True
+libvirt_use_virtio_for_bridges=True
+libvirt_inject_partition = -1
+vif_plugging_is_fatal = False
+vif_plugging_timeout = 0
+
+allow_resize_to_same_host = True
+
+logdir=/var/log/nova
+iscsi_helper=tgtadm
+connection_type=libvirt
+root_helper=sudo nova-rootwrap /etc/nova/rootwrap.conf
+ec2_private_dns_show_ip=True
+api_paste_config=/etc/nova/api-paste.ini
+volumes_path=/var/lib/nova/volumes
+
+sql_connection = {{ controller.database.engine }}://{{ controller.database.user }}:{{ controller.database.password }}@{{ controller.database.host }}/{{ controller.database.name }}
+
+{%- if controller.get('networking', 'default') == "contrail" %}
+libvirt_vif_driver = nova_contrail_vif.contrailvif.VRouterVIFDriver
+network_api_class = nova_contrail_vif.contrailvif.ContrailNetworkAPI
+{%- else %}
+network_api_class = nova.network.neutronv2.api.API
+{%- endif %}
+
+compute_driver = libvirt.LibvirtDriver
+libvirt_type=kvm
+rootwrap_config = /etc/nova/rootwrap.conf
+auth_strategy = keystone
+firewall_driver=nova.virt.firewall.NoopFirewallDriver
+enabled_apis = ec2,osapi_compute,metadata
+
+my_ip={{ controller.bind.private_address }}
+
+
+
+neutron_auth_strategy = keystone
+neutron_admin_auth_url = http://{{ controller.identity.host }}:35357/v2.0
+{% if pillar.neutron is defined %}
+neutron_admin_password={{ pillar.neutron.server.identity.password }}
+neutron_admin_tenant_name={{ pillar.neutron.server.identity.tenant }}
+neutron_admin_username={{ pillar.neutron.server.identity.user }}
+{%- else %}
+neutron_admin_password={{ controller.network.password }}
+neutron_admin_tenant_name={{ controller.network.tenant }}
+neutron_admin_username={{ controller.network.user }}
+{%- endif %}
+neutron_url=http://{{ controller.network.host }}:{{ controller.network.port }}
+
+
+linuxnet_interface_driver = nova.network.linux_net.LinuxOVSInterfaceDriver
+firewall_driver = nova.virt.firewall.NoopFirewallDriver
+security_group_api = neutron
+
+rpc_backend = nova.rpc.impl_kombu
+start_guests_on_host_boot=truembu
+
+{%- if controller.cache is defined %}
+memcached_servers={%- for member in controller.cache.members %}{{ member.host }}:11211{% if not loop.last %},{% endif %}{%- endfor %}
+{%- endif %}
+
+#RabbitMQ
+rabbit_host = {{ controller.message_queue.host }}
+rabbit_port = {{ controller.message_queue.port }}
+rabbit_hosts={{ controller.message_queue.host }}:{{ controller.message_queue.port }}
+rabbit_userid = {{ controller.message_queue.user }}
+rabbit_password = {{ controller.message_queue.password }}
+rabbit_virtual_host = {{ controller.message_queue.virtual_host }}
+
+rabbit_retry_interval = 1
+rabbit_retry_backoff = 2
+
+volume_api_class=nova.volume.cinder.API
+ec2_listen={{ controller.bind.private_address }}
+osapi_volume_listen={{ controller.bind.private_address }}
+osapi_compute_listen={{ controller.bind.private_address }}
+metadata_listen={{ controller.bind.private_address }}
+glance_host = {{ controller.glance.host }}
+#metadata_host = {{ controller.glance.host }}
+osapi_compute_workers=8
+
+#NoVNC
+vnc_enabled=true
+{%- if pillar.nova.compute is defined %}
+vncserver_listen={{ controller.bind.private_address }}
+vncserver_proxyclient_address={{ controller.bind.private_address }}
+{%- else %}
+vncserver_listen=0.0.0.0
+{%- endif %}
+novncproxy_base_url={{ controller.vncproxy_url }}/vnc_auto.html
+novncproxy_port={{ controller.bind.get('vncproxy_port', '6080') }}
+
+#metadata_listen_port = 8755
+service_neutron_metadata_proxy = True
+{%- if controller.get('networking', 'default') != "contrail" %}
+neutron_metadata_proxy_shared_secret={{ controller.metadata.password }}
+{%- endif %}
+
+allow_resize_to_same_host=True
+start_guests_on_host_boot=true
+
+rpc_cast_timeout = 30
+rpc_conn_pool_size = 300
+rpc_response_timeout = 3600
+rpc_thread_pool_size = 70
+report_interval = 5
+
+block_device_allocate_retries=600
+block_device_allocate_retries_interval=10
+
+[keystone_authtoken]
+signing_dirname=/tmp/keystone-signing-nova
+signing_dir=/tmp/keystone-signing-nova
+admin_tenant_name = {{ controller.identity.tenant }}
+admin_user = {{ controller.identity.user }}
+admin_password = {{ controller.identity.password }}
+auth_host = {{ controller.identity.host }}
+auth_port = {{ controller.identity.port }}
+auth_protocol=http
+auth_uri=http://{{ controller.identity.host }}:5000
+
+[conductor]
+workers = 8
+
+[database]
+idle_timeout = 180
+min_pool_size = 100
+max_pool_size = 700
+max_overflow = 100
+retry_interval = 5
+max_retries = -1
+db_max_retries = 3
+db_retry_interval = 1
+connection_debug = 10
+pool_timeout = 120
+connection = {{ controller.database.engine }}://{{ controller.database.user }}:{{ controller.database.password }}@{{ controller.database.host }}/{{ controller.database.name }}
diff --git a/nova/files/juno/nova-controller.conf.RedHat b/nova/files/juno/nova-controller.conf.RedHat
new file mode 120000
index 0000000..09c7524
--- /dev/null
+++ b/nova/files/juno/nova-controller.conf.RedHat
@@ -0,0 +1 @@
+nova-controller.conf.Debian
\ No newline at end of file
diff --git a/nova/files/juno/qemu.conf.Debian b/nova/files/juno/qemu.conf.Debian
new file mode 100644
index 0000000..5c5722b
--- /dev/null
+++ b/nova/files/juno/qemu.conf.Debian
@@ -0,0 +1,479 @@
+{%- from "nova/map.jinja" import compute with context %}
+# Master configuration file for the QEMU driver.
+# All settings described here are optional - if omitted, sensible
+# defaults are used.
+
+# VNC is configured to listen on 127.0.0.1 by default.
+# To make it listen on all public interfaces, uncomment
+# this next option.
+#
+# NB, strong recommendation to enable TLS + x509 certificate
+# verification when allowing public access
+#
+#vnc_listen = "0.0.0.0"
+
+# Enable this option to have VNC served over an automatically created
+# unix socket. This prevents unprivileged access from users on the
+# host machine, though most VNC clients do not support it.
+#
+# This will only be enabled for VNC configurations that do not have
+# a hardcoded 'listen' or 'socket' value. This setting takes preference
+# over vnc_listen.
+#
+#vnc_auto_unix_socket = 1
+
+# Enable use of TLS encryption on the VNC server. This requires
+# a VNC client which supports the VeNCrypt protocol extension.
+# Examples include vinagre, virt-viewer, virt-manager and vencrypt
+# itself. UltraVNC, RealVNC, TightVNC do not support this
+#
+# It is necessary to setup CA and issue a server certificate
+# before enabling this.
+#
+#vnc_tls = 1
+
+
+# Use of TLS requires that x509 certificates be issued. The
+# default it to keep them in /etc/pki/libvirt-vnc. This directory
+# must contain
+#
+#  ca-cert.pem - the CA master certificate
+#  server-cert.pem - the server certificate signed with ca-cert.pem
+#  server-key.pem  - the server private key
+#
+# This option allows the certificate directory to be changed
+#
+#vnc_tls_x509_cert_dir = "/etc/pki/libvirt-vnc"
+
+
+# The default TLS configuration only uses certificates for the server
+# allowing the client to verify the server's identity and establish
+# an encrypted channel.
+#
+# It is possible to use x509 certificates for authentication too, by
+# issuing a x509 certificate to every client who needs to connect.
+#
+# Enabling this option will reject any client who does not have a
+# certificate signed by the CA in /etc/pki/libvirt-vnc/ca-cert.pem
+#
+#vnc_tls_x509_verify = 1
+
+
+# The default VNC password. Only 8 bytes are significant for
+# VNC passwords. This parameter is only used if the per-domain
+# XML config does not already provide a password. To allow
+# access without passwords, leave this commented out. An empty
+# string will still enable passwords, but be rejected by QEMU,
+# effectively preventing any use of VNC. Obviously change this
+# example here before you set this.
+#
+#vnc_password = "XYZ12345"
+
+
+# Enable use of SASL encryption on the VNC server. This requires
+# a VNC client which supports the SASL protocol extension.
+# Examples include vinagre, virt-viewer and virt-manager
+# itself. UltraVNC, RealVNC, TightVNC do not support this
+#
+# It is necessary to configure /etc/sasl2/qemu.conf to choose
+# the desired SASL plugin (eg, GSSPI for Kerberos)
+#
+#vnc_sasl = 1
+
+
+# The default SASL configuration file is located in /etc/sasl2/
+# When running libvirtd unprivileged, it may be desirable to
+# override the configs in this location. Set this parameter to
+# point to the directory, and create a qemu.conf in that location
+#
+#vnc_sasl_dir = "/some/directory/sasl2"
+
+
+# QEMU implements an extension for providing audio over a VNC connection,
+# though if your VNC client does not support it, your only chance for getting
+# sound output is through regular audio backends. By default, libvirt will
+# disable all QEMU sound backends if using VNC, since they can cause
+# permissions issues. Enabling this option will make libvirtd honor the
+# QEMU_AUDIO_DRV environment variable when using VNC.
+#
+#vnc_allow_host_audio = 0
+
+
+
+# SPICE is configured to listen on 127.0.0.1 by default.
+# To make it listen on all public interfaces, uncomment
+# this next option.
+#
+# NB, strong recommendation to enable TLS + x509 certificate
+# verification when allowing public access
+#
+#spice_listen = "0.0.0.0"
+
+
+# Enable use of TLS encryption on the SPICE server.
+#
+# It is necessary to setup CA and issue a server certificate
+# before enabling this.
+#
+#spice_tls = 1
+
+
+# Use of TLS requires that x509 certificates be issued. The
+# default it to keep them in /etc/pki/libvirt-spice. This directory
+# must contain
+#
+#  ca-cert.pem - the CA master certificate
+#  server-cert.pem - the server certificate signed with ca-cert.pem
+#  server-key.pem  - the server private key
+#
+# This option allows the certificate directory to be changed.
+#
+#spice_tls_x509_cert_dir = "/etc/pki/libvirt-spice"
+
+
+# The default SPICE password. This parameter is only used if the
+# per-domain XML config does not already provide a password. To
+# allow access without passwords, leave this commented out. An
+# empty string will still enable passwords, but be rejected by
+# QEMU, effectively preventing any use of SPICE. Obviously change
+# this example here before you set this.
+#
+#spice_password = "XYZ12345"
+
+
+# Enable use of SASL encryption on the SPICE server. This requires
+# a SPICE client which supports the SASL protocol extension.
+#
+# It is necessary to configure /etc/sasl2/qemu.conf to choose
+# the desired SASL plugin (eg, GSSPI for Kerberos)
+#
+#spice_sasl = 1
+
+# The default SASL configuration file is located in /etc/sasl2/
+# When running libvirtd unprivileged, it may be desirable to
+# override the configs in this location. Set this parameter to
+# point to the directory, and create a qemu.conf in that location
+#
+#spice_sasl_dir = "/some/directory/sasl2"
+
+
+# By default, if no graphical front end is configured, libvirt will disable
+# QEMU audio output since directly talking to alsa/pulseaudio may not work
+# with various security settings. If you know what you're doing, enable
+# the setting below and libvirt will passthrough the QEMU_AUDIO_DRV
+# environment variable when using nographics.
+#
+#nographics_allow_host_audio = 1
+
+
+# Override the port for creating both VNC and SPICE sessions (min).
+# This defaults to 5900 and increases for consecutive sessions
+# or when ports are occupied, until it hits the maximum.
+#
+# Minimum must be greater than or equal to 5900 as lower number would
+# result into negative vnc display number.
+#
+# Maximum must be less than 65536, because higher numbers do not make
+# sense as a port number.
+#
+#remote_display_port_min = 5900
+#remote_display_port_max = 65535
+
+# VNC WebSocket port policies, same rules apply as with remote display
+# ports.  VNC WebSockets use similar display <-> port mappings, with
+# the exception being that ports start from 5700 instead of 5900.
+#
+#remote_websocket_port_min = 5700
+#remote_websocket_port_max = 65535
+
+# The default security driver is SELinux. If SELinux is disabled
+# on the host, then the security driver will automatically disable
+# itself. If you wish to disable QEMU SELinux security driver while
+# leaving SELinux enabled for the host in general, then set this
+# to 'none' instead. It's also possible to use more than one security
+# driver at the same time, for this use a list of names separated by
+# comma and delimited by square brackets. For example:
+#
+#       security_driver = [ "selinux", "apparmor" ]
+#
+# Notes: The DAC security driver is always enabled; as a result, the
+# value of security_driver cannot contain "dac".  The value "none" is
+# a special value; security_driver can be set to that value in
+# isolation, but it cannot appear in a list of drivers.
+#
+#security_driver = "selinux"
+
+# If set to non-zero, then the default security labeling
+# will make guests confined. If set to zero, then guests
+# will be unconfined by default. Defaults to 1.
+#security_default_confined = 1
+
+# If set to non-zero, then attempts to create unconfined
+# guests will be blocked. Defaults to 0.
+#security_require_confined = 1
+
+# The user for QEMU processes run by the system instance. It can be
+# specified as a user name or as a user id. The qemu driver will try to
+# parse this value first as a name and then, if the name doesn't exist,
+# as a user id.
+#
+# Since a sequence of digits is a valid user name, a leading plus sign
+# can be used to ensure that a user id will not be interpreted as a user
+# name.
+#
+# Some examples of valid values are:
+#
+#       user = "qemu"   # A user named "qemu"
+#       user = "+0"     # Super user (uid=0)
+#       user = "100"    # A user named "100" or a user with uid=100
+#
+#user = "root"
+
+# The group for QEMU processes run by the system instance. It can be
+# specified in a similar way to user.
+#group = "root"
+
+# Whether libvirt should dynamically change file ownership
+# to match the configured user/group above. Defaults to 1.
+# Set to 0 to disable file ownership changes.
+#dynamic_ownership = 1
+
+
+# What cgroup controllers to make use of with QEMU guests
+#
+#  - 'cpu' - use for schedular tunables
+#  - 'devices' - use for device whitelisting
+#  - 'memory' - use for memory tunables
+#  - 'blkio' - use for block devices I/O tunables
+#  - 'cpuset' - use for CPUs and memory nodes
+#  - 'cpuacct' - use for CPUs statistics.
+#
+# NB, even if configured here, they won't be used unless
+# the administrator has mounted cgroups, e.g.:
+#
+#  mkdir /dev/cgroup
+#  mount -t cgroup -o devices,cpu,memory,blkio,cpuset none /dev/cgroup
+#
+# They can be mounted anywhere, and different controllers
+# can be mounted in different locations. libvirt will detect
+# where they are located.
+#
+#cgroup_controllers = [ "cpu", "devices", "memory", "blkio", "cpuset", "cpuacct" ]
+
+# This is the basic set of devices allowed / required by
+# all virtual machines.
+#
+# As well as this, any configured block backed disks,
+# all sound device, and all PTY devices are allowed.
+#
+# This will only need setting if newer QEMU suddenly
+# wants some device we don't already know about.
+#
+#cgroup_device_acl = [
+#    "/dev/null", "/dev/full", "/dev/zero",
+#    "/dev/random", "/dev/urandom",
+#    "/dev/ptmx", "/dev/kvm", "/dev/kqemu",
+#    "/dev/rtc","/dev/hpet", "/dev/vfio/vfio"
+#]
+
+
+# The default format for Qemu/KVM guest save images is raw; that is, the
+# memory from the domain is dumped out directly to a file.  If you have
+# guests with a large amount of memory, however, this can take up quite
+# a bit of space.  If you would like to compress the images while they
+# are being saved to disk, you can also set "lzop", "gzip", "bzip2", or "xz"
+# for save_image_format.  Note that this means you slow down the process of
+# saving a domain in order to save disk space; the list above is in descending
+# order by performance and ascending order by compression ratio.
+#
+# save_image_format is used when you use 'virsh save' or 'virsh managedsave'
+# at scheduled saving, and it is an error if the specified save_image_format
+# is not valid, or the requested compression program can't be found.
+#
+# dump_image_format is used when you use 'virsh dump' at emergency
+# crashdump, and if the specified dump_image_format is not valid, or
+# the requested compression program can't be found, this falls
+# back to "raw" compression.
+#
+# snapshot_image_format specifies the compression algorithm of the memory save
+# image when an external snapshot of a domain is taken. This does not apply
+# on disk image format. It is an error if the specified format isn't valid,
+# or the requested compression program can't be found.
+#
+#save_image_format = "raw"
+#dump_image_format = "raw"
+#snapshot_image_format = "raw"
+
+# When a domain is configured to be auto-dumped when libvirtd receives a
+# watchdog event from qemu guest, libvirtd will save dump files in directory
+# specified by auto_dump_path. Default value is /var/lib/libvirt/qemu/dump
+#
+#auto_dump_path = "/var/lib/libvirt/qemu/dump"
+
+# When a domain is configured to be auto-dumped, enabling this flag
+# has the same effect as using the VIR_DUMP_BYPASS_CACHE flag with the
+# virDomainCoreDump API.  That is, the system will avoid using the
+# file system cache while writing the dump file, but may cause
+# slower operation.
+#
+#auto_dump_bypass_cache = 0
+
+# When a domain is configured to be auto-started, enabling this flag
+# has the same effect as using the VIR_DOMAIN_START_BYPASS_CACHE flag
+# with the virDomainCreateWithFlags API.  That is, the system will
+# avoid using the file system cache when restoring any managed state
+# file, but may cause slower operation.
+#
+#auto_start_bypass_cache = 0
+
+# If provided by the host and a hugetlbfs mount point is configured,
+# a guest may request huge page backing.  When this mount point is
+# unspecified here, determination of a host mount point in /proc/mounts
+# will be attempted.  Specifying an explicit mount overrides detection
+# of the same in /proc/mounts.  Setting the mount point to "" will
+# disable guest hugepage backing.
+#
+# NB, within this mount point, guests will create memory backing files
+# in a location of $MOUNTPOINT/libvirt/qemu
+#
+#hugetlbfs_mount = "/dev/hugepages"
+
+
+# Path to the setuid helper for creating tap devices.  This executable
+# is used to create <source type='bridge'> interfaces when libvirtd is
+# running unprivileged.  libvirt invokes the helper directly, instead
+# of using "-netdev bridge", for security reasons.
+#bridge_helper = "/usr/libexec/qemu-bridge-helper"
+
+
+
+# If clear_emulator_capabilities is enabled, libvirt will drop all
+# privileged capabilities of the QEmu/KVM emulator. This is enabled by
+# default.
+#
+# Warning: Disabling this option means that a compromised guest can
+# exploit the privileges and possibly do damage to the host.
+#
+#clear_emulator_capabilities = 1
+
+
+# If enabled, libvirt will have QEMU set its process name to
+# "qemu:VM_NAME", where VM_NAME is the name of the VM. The QEMU
+# process will appear as "qemu:VM_NAME" in process listings and
+# other system monitoring tools. By default, QEMU does not set
+# its process title, so the complete QEMU command (emulator and
+# its arguments) appear in process listings.
+#
+#set_process_name = 1
+
+
+# If max_processes is set to a positive integer, libvirt will use
+# it to set the maximum number of processes that can be run by qemu
+# user. This can be used to override default value set by host OS.
+# The same applies to max_files which sets the limit on the maximum
+# number of opened files.
+#
+{%- if compute.qemu is defined %}
+
+{%- if compute.qemu.max_processes is defined %}
+max_processes = {{ compute.qemu.max_processes }}
+{%- endif %}
+
+{%- if compute.qemu.max_files is defined %}
+max_files = {{ compute.qemu.max_files }}
+{%- endif %}
+
+{%- endif %}
+
+# mac_filter enables MAC addressed based filtering on bridge ports.
+# This currently requires ebtables to be installed.
+#
+#mac_filter = 1
+
+
+# By default, PCI devices below non-ACS switch are not allowed to be assigned
+# to guests. By setting relaxed_acs_check to 1 such devices will be allowed to
+# be assigned to guests.
+#
+#relaxed_acs_check = 1
+
+
+# If allow_disk_format_probing is enabled, libvirt will probe disk
+# images to attempt to identify their format, when not otherwise
+# specified in the XML. This is disabled by default.
+#
+# WARNING: Enabling probing is a security hole in almost all
+# deployments. It is strongly recommended that users update their
+# guest XML <disk> elements to include  <driver type='XXXX'/>
+# elements instead of enabling this option.
+#
+#allow_disk_format_probing = 1
+
+
+# To enable 'Sanlock' project based locking of the file
+# content (to prevent two VMs writing to the same
+# disk), uncomment this
+#
+#lock_manager = "sanlock"
+
+
+
+# Set limit of maximum APIs queued on one domain. All other APIs
+# over this threshold will fail on acquiring job lock. Specially,
+# setting to zero turns this feature off.
+# Note, that job lock is per domain.
+#
+#max_queued = 0
+
+###################################################################
+# Keepalive protocol:
+# This allows qemu driver to detect broken connections to remote
+# libvirtd during peer-to-peer migration.  A keepalive message is
+# sent to the daemon after keepalive_interval seconds of inactivity
+# to check if the daemon is still responding; keepalive_count is a
+# maximum number of keepalive messages that are allowed to be sent
+# to the daemon without getting any response before the connection
+# is considered broken.  In other words, the connection is
+# automatically closed approximately after
+# keepalive_interval * (keepalive_count + 1) seconds since the last
+# message received from the daemon.  If keepalive_interval is set to
+# -1, qemu driver will not send keepalive requests during
+# peer-to-peer migration; however, the remote libvirtd can still
+# send them and source libvirtd will send responses.  When
+# keepalive_count is set to 0, connections will be automatically
+# closed after keepalive_interval seconds of inactivity without
+# sending any keepalive messages.
+#
+#keepalive_interval = 5
+#keepalive_count = 5
+
+
+
+# Use seccomp syscall whitelisting in QEMU.
+# 1 = on, 0 = off, -1 = use QEMU default
+# Defaults to -1.
+#
+#seccomp_sandbox = 1
+
+
+
+# Override the listen address for all incoming migrations. Defaults to
+# 0.0.0.0, or :: if both host and qemu are capable of IPv6.
+#migration_address = "127.0.0.1"
+
+
+# Override the port range used for incoming migrations.
+#
+# Minimum must be greater than 0, however when QEMU is not running as root,
+# setting the minimum to be lower than 1024 will not work.
+#
+# Maximum must not be greater than 65535.
+#
+#migration_port_min = 49152
+#migration_port_max = 49215
+cgroup_device_acl = [
+    "/dev/null", "/dev/full", "/dev/zero",
+    "/dev/random", "/dev/urandom",
+    "/dev/ptmx", "/dev/kvm", "/dev/kqemu",
+    "/dev/rtc", "/dev/hpet","/dev/net/tun",
+]
diff --git a/nova/files/juno/qemu.conf.RedHat b/nova/files/juno/qemu.conf.RedHat
new file mode 120000
index 0000000..1d23f19
--- /dev/null
+++ b/nova/files/juno/qemu.conf.RedHat
@@ -0,0 +1 @@
+qemu.conf.Debian
\ No newline at end of file
diff --git a/nova/files/kilo/api-paste.ini.Debian b/nova/files/kilo/api-paste.ini.Debian
new file mode 100644
index 0000000..1a87f0c
--- /dev/null
+++ b/nova/files/kilo/api-paste.ini.Debian
@@ -0,0 +1,134 @@
+############
+# Metadata #
+############
+[composite:metadata]
+use = egg:Paste#urlmap
+/: meta
+
+[pipeline:meta]
+pipeline = ec2faultwrap logrequest metaapp
+
+[app:metaapp]
+paste.app_factory = nova.api.metadata.handler:MetadataRequestHandler.factory
+
+#######
+# EC2 #
+#######
+
+[composite:ec2]
+use = egg:Paste#urlmap
+/: ec2cloud
+
+[composite:ec2cloud]
+use = call:nova.api.auth:pipeline_factory
+noauth = ec2faultwrap logrequest ec2noauth cloudrequest validator ec2executor
+noauth2 = ec2faultwrap logrequest ec2noauth cloudrequest validator ec2executor
+keystone = ec2faultwrap logrequest ec2keystoneauth cloudrequest validator ec2executor
+
+[filter:ec2faultwrap]
+paste.filter_factory = nova.api.ec2:FaultWrapper.factory
+
+[filter:logrequest]
+paste.filter_factory = nova.api.ec2:RequestLogging.factory
+
+[filter:ec2lockout]
+paste.filter_factory = nova.api.ec2:Lockout.factory
+
+[filter:ec2keystoneauth]
+paste.filter_factory = nova.api.ec2:EC2KeystoneAuth.factory
+
+[filter:ec2noauth]
+paste.filter_factory = nova.api.ec2:NoAuth.factory
+
+[filter:cloudrequest]
+controller = nova.api.ec2.cloud.CloudController
+paste.filter_factory = nova.api.ec2:Requestify.factory
+
+[filter:authorizer]
+paste.filter_factory = nova.api.ec2:Authorizer.factory
+
+[filter:validator]
+paste.filter_factory = nova.api.ec2:Validator.factory
+
+[app:ec2executor]
+paste.app_factory = nova.api.ec2:Executor.factory
+
+#############
+# OpenStack #
+#############
+
+[composite:osapi_compute]
+use = call:nova.api.openstack.urlmap:urlmap_factory
+/: oscomputeversions
+/v1.1: openstack_compute_api_v2
+/v2: openstack_compute_api_v2
+/v2.1: openstack_compute_api_v21
+/v3: openstack_compute_api_v3
+
+[composite:openstack_compute_api_v2]
+use = call:nova.api.auth:pipeline_factory
+noauth = compute_req_id faultwrap sizelimit noauth ratelimit osapi_compute_app_v2
+noauth2 = compute_req_id faultwrap sizelimit noauth2 ratelimit osapi_compute_app_v2
+keystone = compute_req_id faultwrap sizelimit authtoken keystonecontext ratelimit osapi_compute_app_v2
+keystone_nolimit = compute_req_id faultwrap sizelimit authtoken keystonecontext osapi_compute_app_v2
+
+[composite:openstack_compute_api_v21]
+use = call:nova.api.auth:pipeline_factory_v21
+noauth = compute_req_id faultwrap sizelimit noauth osapi_compute_app_v21
+noauth2 = compute_req_id faultwrap sizelimit noauth2 osapi_compute_app_v21
+keystone = compute_req_id faultwrap sizelimit authtoken keystonecontext osapi_compute_app_v21
+
+[composite:openstack_compute_api_v3]
+use = call:nova.api.auth:pipeline_factory_v21
+noauth = request_id faultwrap sizelimit noauth_v3 osapi_compute_app_v3
+noauth2 = request_id faultwrap sizelimit noauth_v3 osapi_compute_app_v3
+keystone = request_id faultwrap sizelimit authtoken keystonecontext osapi_compute_app_v3
+
+[filter:request_id]
+paste.filter_factory = oslo.middleware:RequestId.factory
+
+[filter:compute_req_id]
+paste.filter_factory = nova.api.compute_req_id:ComputeReqIdMiddleware.factory
+
+[filter:faultwrap]
+paste.filter_factory = nova.api.openstack:FaultWrapper.factory
+
+[filter:noauth]
+paste.filter_factory = nova.api.openstack.auth:NoAuthMiddlewareOld.factory
+
+[filter:noauth2]
+paste.filter_factory = nova.api.openstack.auth:NoAuthMiddleware.factory
+
+[filter:noauth_v3]
+paste.filter_factory = nova.api.openstack.auth:NoAuthMiddlewareV3.factory
+
+[filter:ratelimit]
+paste.filter_factory = nova.api.openstack.compute.limits:RateLimitingMiddleware.factory
+
+[filter:sizelimit]
+paste.filter_factory = oslo.middleware:RequestBodySizeLimiter.factory
+
+[app:osapi_compute_app_v2]
+paste.app_factory = nova.api.openstack.compute:APIRouter.factory
+
+[app:osapi_compute_app_v21]
+paste.app_factory = nova.api.openstack.compute:APIRouterV21.factory
+
+[app:osapi_compute_app_v3]
+paste.app_factory = nova.api.openstack.compute:APIRouterV3.factory
+
+[pipeline:oscomputeversions]
+pipeline = faultwrap oscomputeversionapp
+
+[app:oscomputeversionapp]
+paste.app_factory = nova.api.openstack.compute.versions:Versions.factory
+
+##########
+# Shared #
+##########
+
+[filter:keystonecontext]
+paste.filter_factory = nova.api.auth:NovaKeystoneContext.factory
+
+[filter:authtoken]
+paste.filter_factory = keystonemiddleware.auth_token:filter_factory
diff --git a/nova/files/kilo/api-paste.ini.RedHat b/nova/files/kilo/api-paste.ini.RedHat
new file mode 120000
index 0000000..08fd76a
--- /dev/null
+++ b/nova/files/kilo/api-paste.ini.RedHat
@@ -0,0 +1 @@
+api-paste.ini.Debian
\ No newline at end of file
diff --git a/nova/files/kilo/libvirt.Debian b/nova/files/kilo/libvirt.Debian
new file mode 100644
index 0000000..1704dc4
--- /dev/null
+++ b/nova/files/kilo/libvirt.Debian
@@ -0,0 +1,12 @@
+# Defaults for libvirt-bin initscript (/etc/init.d/libvirt-bin)
+# This is a POSIX shell fragment
+
+# Start libvirtd to handle qemu/kvm:
+start_libvirtd="yes"
+
+# options passed to libvirtd, add "-l" to listen on tcp
+libvirtd_opts="-d -l"
+LIBVIRTD_ARGS="--listen"
+
+# pass in location of kerberos keytab
+#export KRB5_KTNAME=/etc/libvirt/libvirt.keytab
\ No newline at end of file
diff --git a/nova/files/kilo/libvirt.RedHat b/nova/files/kilo/libvirt.RedHat
new file mode 100644
index 0000000..0d42d81
--- /dev/null
+++ b/nova/files/kilo/libvirt.RedHat
@@ -0,0 +1,10 @@
+# Defaults for libvirt-bin initscript (/etc/init.d/libvirt-bin)
+# This is a POSIX shell fragment
+
+# Start libvirtd to handle qemu/kvm:
+start_libvirtd="yes"
+
+# options passed to libvirtd, add "-l" to listen on tcp
+libvirtd_opts="-d -l"
+# pass in location of kerberos keytab
+#export KRB5_KTNAME=/etc/libvirt/libvirt.keytab
diff --git a/nova/files/kilo/libvirtd.conf.Debian b/nova/files/kilo/libvirtd.conf.Debian
new file mode 100644
index 0000000..8333dcb
--- /dev/null
+++ b/nova/files/kilo/libvirtd.conf.Debian
@@ -0,0 +1,402 @@
+# Master libvirt daemon configuration file
+#
+# For further information consult http://libvirt.org/format.html
+#
+# NOTE: the tests/daemon-conf regression test script requires
+# that each "PARAMETER = VALUE" line in this file have the parameter
+# name just after a leading "#".
+
+#################################################################
+#
+# Network connectivity controls
+#
+
+# Flag listening for secure TLS connections on the public TCP/IP port.
+# NB, must pass the --listen flag to the libvirtd process for this to
+# have any effect.
+#
+# It is necessary to setup a CA and issue server certificates before
+# using this capability.
+#
+# This is enabled by default, uncomment this to disable it
+#listen_tls = 0
+
+
+listen_tls = 0
+listen_tcp = 1
+auth_tcp = "none"
+
+
+# Listen for unencrypted TCP connections on the public TCP/IP port.
+# NB, must pass the --listen flag to the libvirtd process for this to
+# have any effect.
+#
+# Using the TCP socket requires SASL authentication by default. Only
+# SASL mechanisms which support data encryption are allowed. This is
+# DIGEST_MD5 and GSSAPI (Kerberos5)
+#
+# This is disabled by default, uncomment this to enable it.
+#listen_tcp = 1
+
+
+
+# Override the port for accepting secure TLS connections
+# This can be a port number, or service name
+#
+#tls_port = "16514"
+
+# Override the port for accepting insecure TCP connections
+# This can be a port number, or service name
+#
+#tcp_port = "16509"
+
+
+# Override the default configuration which binds to all network
+# interfaces. This can be a numeric IPv4/6 address, or hostname
+#
+#listen_addr = "192.168.0.1"
+
+
+# Flag toggling mDNS advertizement of the libvirt service.
+#
+# Alternatively can disable for all services on a host by
+# stopping the Avahi daemon
+#
+# This is disabled by default, uncomment this to enable it
+#mdns_adv = 1
+
+# Override the default mDNS advertizement name. This must be
+# unique on the immediate broadcast network.
+#
+# The default is "Virtualization Host HOSTNAME", where HOSTNAME
+# is subsituted for the short hostname of the machine (without domain)
+#
+#mdns_name = "Virtualization Host Joe Demo"
+
+
+#################################################################
+#
+# UNIX socket access controls
+#
+
+# Set the UNIX domain socket group ownership. This can be used to
+# allow a 'trusted' set of users access to management capabilities
+# without becoming root.
+#
+# This is restricted to 'root' by default.
+unix_sock_group = "libvirtd"
+
+# Set the UNIX socket permissions for the R/O socket. This is used
+# for monitoring VM status only
+#
+# Default allows any user. If setting group ownership may want to
+# restrict this to:
+#unix_sock_ro_perms = "0777"
+
+# Set the UNIX socket permissions for the R/W socket. This is used
+# for full management of VMs
+#
+# Default allows only root. If PolicyKit is enabled on the socket,
+# the default will change to allow everyone (eg, 0777)
+#
+# If not using PolicyKit and setting group ownership for access
+# control then you may want to relax this to:
+unix_sock_rw_perms = "0770"
+
+# Set the name of the directory in which sockets will be found/created.
+#unix_sock_dir = "/var/run/libvirt"
+
+#################################################################
+#
+# Authentication.
+#
+#  - none: do not perform auth checks. If you can connect to the
+#          socket you are allowed. This is suitable if there are
+#          restrictions on connecting to the socket (eg, UNIX
+#          socket permissions), or if there is a lower layer in
+#          the network providing auth (eg, TLS/x509 certificates)
+#
+#  - sasl: use SASL infrastructure. The actual auth scheme is then
+#          controlled from /etc/sasl2/libvirt.conf. For the TCP
+#          socket only GSSAPI & DIGEST-MD5 mechanisms will be used.
+#          For non-TCP or TLS sockets,  any scheme is allowed.
+#
+#  - polkit: use PolicyKit to authenticate. This is only suitable
+#            for use on the UNIX sockets. The default policy will
+#            require a user to supply their own password to gain
+#            full read/write access (aka sudo like), while anyone
+#            is allowed read/only access.
+#
+# Set an authentication scheme for UNIX read-only sockets
+# By default socket permissions allow anyone to connect
+#
+# To restrict monitoring of domains you may wish to enable
+# an authentication mechanism here
+auth_unix_ro = "none"
+
+# Set an authentication scheme for UNIX read-write sockets
+# By default socket permissions only allow root. If PolicyKit
+# support was compiled into libvirt, the default will be to
+# use 'polkit' auth.
+#
+# If the unix_sock_rw_perms are changed you may wish to enable
+# an authentication mechanism here
+auth_unix_rw = "none"
+
+# Change the authentication scheme for TCP sockets.
+#
+# If you don't enable SASL, then all TCP traffic is cleartext.
+# Don't do this outside of a dev/test scenario. For real world
+# use, always enable SASL and use the GSSAPI or DIGEST-MD5
+# mechanism in /etc/sasl2/libvirt.conf
+#auth_tcp = "sasl"
+#auth_tcp = "none"
+
+# Change the authentication scheme for TLS sockets.
+#
+# TLS sockets already have encryption provided by the TLS
+# layer, and limited authentication is done by certificates
+#
+# It is possible to make use of any SASL authentication
+# mechanism as well, by using 'sasl' for this option
+#auth_tls = "none"
+
+
+
+#################################################################
+#
+# TLS x509 certificate configuration
+#
+
+
+# Override the default server key file path
+#
+#key_file = "/etc/pki/libvirt/private/serverkey.pem"
+
+# Override the default server certificate file path
+#
+#cert_file = "/etc/pki/libvirt/servercert.pem"
+
+# Override the default CA certificate path
+#
+#ca_file = "/etc/pki/CA/cacert.pem"
+
+# Specify a certificate revocation list.
+#
+# Defaults to not using a CRL, uncomment to enable it
+#crl_file = "/etc/pki/CA/crl.pem"
+
+
+
+#################################################################
+#
+# Authorization controls
+#
+
+
+# Flag to disable verification of our own server certificates
+#
+# When libvirtd starts it performs some sanity checks against
+# its own certificates.
+#
+# Default is to always run sanity checks. Uncommenting this
+# will disable sanity checks which is not a good idea
+#tls_no_sanity_certificate = 1
+
+# Flag to disable verification of client certificates
+#
+# Client certificate verification is the primary authentication mechanism.
+# Any client which does not present a certificate signed by the CA
+# will be rejected.
+#
+# Default is to always verify. Uncommenting this will disable
+# verification - make sure an IP whitelist is set
+#tls_no_verify_certificate = 1
+
+
+# A whitelist of allowed x509  Distinguished Names
+# This list may contain wildcards such as
+#
+#    "C=GB,ST=London,L=London,O=Red Hat,CN=*"
+#
+# See the POSIX fnmatch function for the format of the wildcards.
+#
+# NB If this is an empty list, no client can connect, so comment out
+# entirely rather than using empty list to disable these checks
+#
+# By default, no DN's are checked
+#tls_allowed_dn_list = ["DN1", "DN2"]
+
+
+# A whitelist of allowed SASL usernames. The format for usernames
+# depends on the SASL authentication mechanism. Kerberos usernames
+# look like username@REALM
+#
+# This list may contain wildcards such as
+#
+#    "*@EXAMPLE.COM"
+#
+# See the POSIX fnmatch function for the format of the wildcards.
+#
+# NB If this is an empty list, no client can connect, so comment out
+# entirely rather than using empty list to disable these checks
+#
+# By default, no Username's are checked
+#sasl_allowed_username_list = ["joe@EXAMPLE.COM", "fred@EXAMPLE.COM" ]
+
+
+
+#################################################################
+#
+# Processing controls
+#
+
+# The maximum number of concurrent client connections to allow
+# over all sockets combined.
+#max_clients = 20
+
+
+# The minimum limit sets the number of workers to start up
+# initially. If the number of active clients exceeds this,
+# then more threads are spawned, upto max_workers limit.
+# Typically you'd want max_workers to equal maximum number
+# of clients allowed
+#min_workers = 5
+#max_workers = 20
+
+
+# The number of priority workers. If all workers from above
+# pool will stuck, some calls marked as high priority
+# (notably domainDestroy) can be executed in this pool.
+#prio_workers = 5
+
+# Total global limit on concurrent RPC calls. Should be
+# at least as large as max_workers. Beyond this, RPC requests
+# will be read into memory and queued. This directly impact
+# memory usage, currently each request requires 256 KB of
+# memory. So by default upto 5 MB of memory is used
+#
+# XXX this isn't actually enforced yet, only the per-client
+# limit is used so far
+#max_requests = 20
+
+# Limit on concurrent requests from a single client
+# connection. To avoid one client monopolizing the server
+# this should be a small fraction of the global max_requests
+# and max_workers parameter
+#max_client_requests = 5
+
+#################################################################
+#
+# Logging controls
+#
+
+# Logging level: 4 errors, 3 warnings, 2 information, 1 debug
+# basically 1 will log everything possible
+#log_level = 3
+
+# Logging filters:
+# A filter allows to select a different logging level for a given category
+# of logs
+# The format for a filter is one of:
+#    x:name
+#    x:+name
+#      where name is a string which is matched against source file name,
+#      e.g., "remote", "qemu", or "util/json", the optional "+" prefix
+#      tells libvirt to log stack trace for each message matching name,
+#      and x is the minimal level where matching messages should be logged:
+#    1: DEBUG
+#    2: INFO
+#    3: WARNING
+#    4: ERROR
+#
+# Multiple filter can be defined in a single @filters, they just need to be
+# separated by spaces.
+#
+# e.g. to only get warning or errors from the remote layer and only errors
+# from the event layer:
+#log_filters="3:remote 4:event"
+
+# Logging outputs:
+# An output is one of the places to save logging information
+# The format for an output can be:
+#    x:stderr
+#      output goes to stderr
+#    x:syslog:name
+#      use syslog for the output and use the given name as the ident
+#    x:file:file_path
+#      output to a file, with the given filepath
+# In all case the x prefix is the minimal level, acting as a filter
+#    1: DEBUG
+#    2: INFO
+#    3: WARNING
+#    4: ERROR
+#
+# Multiple output can be defined, they just need to be separated by spaces.
+# e.g. to log all warnings and errors to syslog under the libvirtd ident:
+#log_outputs="3:syslog:libvirtd"
+#
+
+# Log debug buffer size: default 64
+# The daemon keeps an internal debug log buffer which will be dumped in case
+# of crash or upon receiving a SIGUSR2 signal. This setting allows to override
+# the default buffer size in kilobytes.
+# If value is 0 or less the debug log buffer is deactivated
+#log_buffer_size = 64
+
+
+##################################################################
+#
+# Auditing
+#
+# This setting allows usage of the auditing subsystem to be altered:
+#
+#   audit_level == 0  -> disable all auditing
+#   audit_level == 1  -> enable auditing, only if enabled on host (default)
+#   audit_level == 2  -> enable auditing, and exit if disabled on host
+#
+#audit_level = 2
+#
+# If set to 1, then audit messages will also be sent
+# via libvirt logging infrastructure. Defaults to 0
+#
+#audit_logging = 1
+
+###################################################################
+# UUID of the host:
+# Provide the UUID of the host here in case the command
+# 'dmidecode -s system-uuid' does not provide a valid uuid. In case
+# 'dmidecode' does not provide a valid UUID and none is provided here, a
+# temporary UUID will be generated.
+# Keep the format of the example UUID below. UUID must not have all digits
+# be the same.
+
+# NB This default all-zeros UUID will not work. Replace
+# it with the output of the 'uuidgen' command and then
+# uncomment this entry
+#host_uuid = "00000000-0000-0000-0000-000000000000"
+
+###################################################################
+# Keepalive protocol:
+# This allows libvirtd to detect broken client connections or even
+# dead client.  A keepalive message is sent to a client after
+# keepalive_interval seconds of inactivity to check if the client is
+# still responding; keepalive_count is a maximum number of keepalive
+# messages that are allowed to be sent to the client without getting
+# any response before the connection is considered broken.  In other
+# words, the connection is automatically closed approximately after
+# keepalive_interval * (keepalive_count + 1) seconds since the last
+# message received from the client.  If keepalive_interval is set to
+# -1, libvirtd will never send keepalive requests; however clients
+# can still send them and the deamon will send responses.  When
+# keepalive_count is set to 0, connections will be automatically
+# closed after keepalive_interval seconds of inactivity without
+# sending any keepalive messages.
+#
+#keepalive_interval = 5
+#keepalive_count = 5
+#
+# If set to 1, libvirtd will refuse to talk to clients that do not
+# support keepalive protocol.  Defaults to 0.
+#
+#keepalive_required = 1
\ No newline at end of file
diff --git a/nova/files/kilo/libvirtd.conf.RedHat b/nova/files/kilo/libvirtd.conf.RedHat
new file mode 120000
index 0000000..2a7b101
--- /dev/null
+++ b/nova/files/kilo/libvirtd.conf.RedHat
@@ -0,0 +1 @@
+libvirtd.conf.Debian
\ No newline at end of file
diff --git a/nova/files/kilo/nova-compute.conf.Debian b/nova/files/kilo/nova-compute.conf.Debian
new file mode 100644
index 0000000..9c4a352
--- /dev/null
+++ b/nova/files/kilo/nova-compute.conf.Debian
@@ -0,0 +1,172 @@
+{%- from "nova/map.jinja" import compute with context %}
+
+[DEFAULT]
+dhcpbridge_flagfile=/etc/nova/nova.conf
+dhcpbridge=/usr/bin/nova-dhcpbridge
+logdir=/var/log/nova
+state_path=/var/lib/nova
+lock_path=/var/lock/nova
+force_dhcp_release=True
+iscsi_helper=tgtadm
+libvirt_use_virtio_for_bridges=True
+connection_type=libvirt
+root_helper=sudo nova-rootwrap /etc/nova/rootwrap.conf
+verbose=True
+ec2_private_dns_show_ip=False
+api_paste_config=/etc/nova/api-paste.ini
+volumes_path=/var/lib/nova/volumes
+enabled_apis=ec2,osapi_compute,metadata
+
+{%- if compute.image.use_cow is defined %}
+use_cow_images = {{ compute.image.use_cow }}
+{%- endif %}
+
+{%- if compute.reserved_host_memory_mb is defined %}
+reserved_host_memory_mb = {{ compute.reserved_host_memory_mb }}
+{%- endif %}
+
+allow_resize_to_same_host=True
+
+live_migration_flag=VIR_MIGRATE_UNDEFINE_SOURCE,VIR_MIGRATE_PEER2PEER,VIR_MIGRATE_LIVE
+live_migration_retry_count=30
+
+#Neutron
+neutron_admin_username={{ compute.network.user }}
+neutron_admin_password={{ compute.network.password }}
+neutron_admin_tenant_name={{ compute.identity.tenant }}
+neutron_admin_auth_url = http://{{ compute.identity.host }}:35357/v{% if compute.identity.get('version', 2) == 2 %}2.0{% else %}3{% endif %}
+neutron_url=http://{{ compute.network.host }}:{{ compute.network.port }}
+neutron_region_name= {{ compute.network.region }}
+#neutron_url = http://10.0.102.35:9696/
+
+
+auth_strategy = keystone
+libvirt_nonblocking = True
+libvirt_inject_partition = -1
+
+neutron_url_timeout = 300
+network_api_class=nova.network.neutronv2.api.API
+compute_driver = libvirt.LibvirtDriver
+
+#NoVNC
+vnc_enabled=true
+vncserver_enabled = true
+vncserver_listen=0.0.0.0
+vncserver_proxyclient_address={{ compute.bind.vnc_address }}
+novncproxy_base_url={{ compute.vncproxy_url }}/vnc_auto.html
+novncproxy_port={{ compute.bind.vnc_port }}
+
+heal_instance_info_cache_interval = {{ compute.heal_instance_info_cache_interval }}
+
+{%- if compute.cache is defined %}
+memcached_servers={%- for member in compute.cache.members %}{{ member.host }}:11211{% if not loop.last %},{% endif %}{%- endfor %}
+{%- endif %}
+
+security_group_api = neutron
+heal_instance_info_cache_interval = 0
+libvirt_cpu_mode = host-passthrough
+image_cache_manager_interval = 0
+firewall_driver = nova.virt.firewall.NoopFirewallDriver
+glance_port = 9292
+glance_num_retries = 10
+
+rabbit_retry_interval = 1
+rabbit_retry_backoff = 2
+
+rpc_cast_timeout = 30
+rpc_conn_pool_size = 300
+rpc_response_timeout = 3600
+rpc_thread_pool_size = 70
+report_interval = 5
+
+block_device_allocate_retries=600
+block_device_allocate_retries_interval=10
+
+resume_guests_state_on_host_boot = True
+service_down_time = 90
+
+{% if pillar.ceilometer is defined %}
+
+instance_usage_audit = True
+instance_usage_audit_period = hour
+notify_on_state_change = vm_and_task_state
+
+{% endif %}
+
+{%- if compute.notification is defined %}
+notification_driver = {{ compute.notification.driver }}
+
+{%- if compute.notification.topics is defined %}
+notification_topics = {{ compute.notification.topics }}
+{%- endif %}
+
+{%- if compute.notification.notify_on is defined %}
+{%- for key, value in compute.notification.notify_on.iteritems() %}
+notify_on_{{ key }} = {{ value }}
+{%- endfor %}
+{%- endif %}
+
+{%- endif %}
+
+{%- if compute.identity.get('version', 2) == 2 %}
+
+[keystone_authtoken]
+signing_dirname=/tmp/keystone-signing-nova
+admin_tenant_name = {{ compute.identity.tenant }}
+admin_user = {{ compute.identity.user }}
+admin_password = {{ compute.identity.password }}
+auth_host = {{ compute.identity.host }}
+auth_port = {{ compute.identity.port }}
+auth_protocol=http
+auth_uri=http://{{ compute.identity.host }}:5000
+
+{%- else %}
+
+[keystone_authtoken]
+signing_dirname=/tmp/keystone-signing-nova
+identity_uri = http://{{ compute.identity.host }}:35357/v3
+revocation_cache_time = 10
+auth_section = generic_password
+
+[generic_password]
+auth_plugin = password
+user_domain_id = {{ compute.identity.get('domain', 'default') }}
+project_domain_id = {{ compute.identity.get('domain', 'default') }}
+project_name = {{ compute.identity.tenant }}
+username = {{ compute.identity.user }}
+password = {{ compute.identity.password }}
+auth_uri=http://{{ compute.identity.host }}:5000/v3
+auth_url=http://{{ compute.identity.host }}:35357/v3
+
+{%- endif %}
+
+[oslo_messaging_rabbit]
+{%- if compute.message_queue.members is defined %}
+rabbit_hosts = {% for member in compute.message_queue.members -%}
+                   {{ member.host }}:{{ member.get('port', 5672) }}
+                   {%- if not loop.last -%},{%- endif -%}
+               {%- endfor -%}
+{%- else %}
+rabbit_host = {{ compute.message_queue.host }}
+rabbit_port = {{ compute.message_queue.port }}
+{%- endif %}
+
+rabbit_userid = {{ compute.message_queue.user }}
+rabbit_password = {{ compute.message_queue.password }}
+rabbit_virtual_host = {{ compute.message_queue.virtual_host }}
+
+[glance]
+
+host = {{ compute.image.host }}
+
+[neutron]
+auth_strategy = keystone
+admin_username={{ compute.network.user }}
+admin_password={{ compute.network.password }}
+admin_tenant_name={{ compute.identity.tenant }}
+admin_auth_url=http://{{ compute.identity.host }}:{{ compute.identity.port }}/v{% if compute.identity.get('version', 2) == 2 %}2.0{% else %}3{% endif %}
+url=http://{{ compute.network.host }}:{{ compute.network.port }}
+region_name= {{ compute.network.region }}
+
+[cinder]
+os_region_name = {{ compute.identity.region }}
diff --git a/nova/files/kilo/nova-compute.conf.Redhat b/nova/files/kilo/nova-compute.conf.Redhat
new file mode 120000
index 0000000..ad4c8f6
--- /dev/null
+++ b/nova/files/kilo/nova-compute.conf.Redhat
@@ -0,0 +1 @@
+nova-compute.conf.Debian
\ No newline at end of file
diff --git a/nova/files/kilo/nova-controller.conf.Debian b/nova/files/kilo/nova-controller.conf.Debian
new file mode 100644
index 0000000..fe39363
--- /dev/null
+++ b/nova/files/kilo/nova-controller.conf.Debian
@@ -0,0 +1,210 @@
+{%- from "nova/map.jinja" import controller with context %}
+[DEFAULT]
+logdir = /var/log/nova
+verbose = True
+{%- if controller.debug %}
+debug = True
+{%- else %}
+debug = False
+{%- endif %}
+state_path = /var/lib/nova
+lock_path = /var/lib/nova/tmp
+volumes_dir = /etc/nova/volumes
+dhcpbridge = /usr/bin/nova-dhcpbridge
+dhcpbridge_flagfile = /etc/nova/nova.conf
+dhcp_domain = {{ controller.dhcp_domain }}
+force_dhcp_release = True
+injected_network_template = /usr/share/nova/interfaces.template
+libvirt_nonblocking = True
+libvirt_use_virtio_for_bridges=True
+libvirt_inject_partition = -1
+vif_plugging_is_fatal = False
+vif_plugging_timeout = 0
+cpu_allocation_ratio = {{ controller.cpu_allocation_ratio }}
+ram_allocation_ratio = {{ controller.ram_allocation_ratio }}
+disk_allocation_ratio = {{ controller.disk_allocation_ratio }}
+scheduler_default_filters = {{ controller.scheduler_default_filters }}
+
+allow_resize_to_same_host = True
+
+logdir=/var/log/nova
+iscsi_helper=tgtadm
+connection_type=libvirt
+root_helper=sudo nova-rootwrap /etc/nova/rootwrap.conf
+ec2_private_dns_show_ip=True
+api_paste_config=/etc/nova/api-paste.ini
+volumes_path=/var/lib/nova/volumes
+
+sql_connection = {{ controller.database.engine }}://{{ controller.database.user }}:{{ controller.database.password }}@{{ controller.database.host }}/{{ controller.database.name }}
+
+network_api_class = nova.network.neutronv2.api.API
+
+compute_driver = libvirt.LibvirtDriver
+libvirt_type=kvm
+rootwrap_config = /etc/nova/rootwrap.conf
+auth_strategy = keystone
+firewall_driver=nova.virt.firewall.NoopFirewallDriver
+enabled_apis = ec2,osapi_compute,metadata
+
+my_ip={{ controller.bind.private_address }}
+
+
+
+neutron_auth_strategy = keystone
+neutron_admin_auth_url = http://{{ controller.identity.host }}:35357/v{% if controller.identity.get('version', 2) == 2 %}2.0{% else %}3{% endif %}
+{% if pillar.neutron is defined %}
+neutron_admin_password={{ pillar.neutron.server.identity.password }}
+neutron_admin_tenant_name={{ pillar.neutron.server.identity.tenant }}
+neutron_admin_username={{ pillar.neutron.server.identity.user }}
+neutron_region_name={{ pillar.neutron.server.identity.region }}
+{%- else %}
+neutron_admin_password={{ controller.network.password }}
+neutron_admin_tenant_name={{ controller.network.tenant }}
+neutron_admin_username={{ controller.network.user }}
+neutron_region_name={{ controller.network.region }}
+{%- endif %}
+neutron_url=http://{{ controller.network.host }}:{{ controller.network.port }}
+
+
+linuxnet_interface_driver = nova.network.linux_net.LinuxOVSInterfaceDriver
+firewall_driver = nova.virt.firewall.NoopFirewallDriver
+security_group_api = neutron
+
+rpc_backend = nova.rpc.impl_kombu
+start_guests_on_host_boot=truembu
+
+{%- if controller.cache is defined %}
+memcached_servers={%- for member in controller.cache.members %}{{ member.host }}:11211{% if not loop.last %},{% endif %}{%- endfor %}
+{%- endif %}
+
+
+volume_api_class=nova.volume.cinder.API
+ec2_listen={{ controller.bind.private_address }}
+osapi_volume_listen={{ controller.bind.private_address }}
+osapi_compute_listen={{ controller.bind.private_address }}
+metadata_listen={{ controller.bind.private_address }}
+glance_host = {{ controller.glance.host }}
+osapi_compute_workers = {{ controller.workers }}
+
+#NoVNC
+vnc_enabled=true
+{%- if pillar.nova.compute is defined %}
+vncserver_listen={{ controller.bind.private_address }}
+vncserver_proxyclient_address={{ controller.bind.private_address }}
+{%- else %}
+vncserver_listen=0.0.0.0
+{%- endif %}
+novncproxy_base_url={{ controller.vncproxy_url }}/vnc_auto.html
+novncproxy_port={{ controller.bind.get('vncproxy_port', '6080') }}
+
+{%- if controller.get('networking', 'default') != "contrail" %}
+neutron_metadata_proxy_shared_secret={{ controller.metadata.password }}
+{%- endif %}
+
+allow_resize_to_same_host=True
+start_guests_on_host_boot=true
+
+rpc_cast_timeout = 30
+rpc_conn_pool_size = 300
+rpc_response_timeout = 3600
+rpc_thread_pool_size = 70
+report_interval = 5
+
+block_device_allocate_retries=600
+block_device_allocate_retries_interval=10
+
+[oslo_concurrency]
+
+lock_path = /var/lib/nova/tmp
+
+[oslo_messaging_rabbit]
+{%- if controller.message_queue.members is defined %}
+rabbit_hosts = {% for member in controller.message_queue.members -%}
+                   {{ member.host }}:{{ member.get('port', 5672) }}
+                   {%- if not loop.last -%},{%- endif -%}
+               {%- endfor -%}
+{%- else %}
+rabbit_host = {{ controller.message_queue.host }}
+rabbit_port = {{ controller.message_queue.port }}
+{%- endif %}
+
+rabbit_userid = {{ controller.message_queue.user }}
+rabbit_password = {{ controller.message_queue.password }}
+rabbit_virtual_host = {{ controller.message_queue.virtual_host }}
+
+rabbit_retry_interval = 1
+rabbit_retry_backoff = 2
+
+{%- if controller.identity.get('version', 2) == 2 %}
+
+[keystone_authtoken]
+signing_dirname=/tmp/keystone-signing-nova
+signing_dir=/tmp/keystone-signing-nova
+admin_tenant_name = {{ controller.identity.tenant }}
+admin_user = {{ controller.identity.user }}
+admin_password = {{ controller.identity.password }}
+auth_host = {{ controller.identity.host }}
+auth_port = {{ controller.identity.port }}
+auth_protocol=http
+auth_uri=http://{{ controller.identity.host }}:5000
+
+{%- else %}
+
+[keystone_authtoken]
+signing_dirname=/tmp/keystone-signing-nova
+identity_uri = http://{{ controller.identity.host }}:35357/v3
+revocation_cache_time = 10
+auth_section = generic_password
+
+[generic_password]
+auth_plugin = password
+user_domain_id = {{ controller.identity.get('domain', 'default') }}
+project_domain_id = {{ controller.identity.get('domain', 'default') }}
+project_name = {{ controller.identity.tenant }}
+username = {{ controller.identity.user }}
+password = {{ controller.identity.password }}
+auth_uri=http://{{ controller.identity.host }}:5000/v3
+auth_url=http://{{ controller.identity.host }}:35357/v3
+
+{%- endif %}
+
+[conductor]
+workers = {{ controller.workers }}
+
+[database]
+idle_timeout = 180
+min_pool_size = 100
+max_pool_size = 700
+max_overflow = 100
+retry_interval = 5
+max_retries = -1
+db_max_retries = 3
+db_retry_interval = 1
+connection_debug = 10
+pool_timeout = 120
+connection = {{ controller.database.engine }}://{{ controller.database.user }}:{{ controller.database.password }}@{{ controller.database.host }}/{{ controller.database.name }}
+
+[glance]
+
+host = {{ controller.glance.host }}
+
+[neutron]
+auth_strategy = keystone
+admin_auth_url = http://{{ controller.identity.host }}:35357/v{% if controller.identity.get('version', 2) == 2 %}2.0{% else %}3{% endif %}
+{% if pillar.neutron is defined %}
+admin_password={{ pillar.neutron.server.identity.password }}
+admin_tenant_name={{ pillar.neutron.server.identity.tenant }}
+admin_username={{ pillar.neutron.server.identity.user }}
+region_name= {{ pillar.neutron.server.identity.region }}
+{%- else %}
+admin_password={{ controller.network.password }}
+admin_tenant_name={{ controller.network.tenant }}
+admin_username={{ controller.network.user }}
+region_name= {{ controller.network.region }}
+{%- endif %}
+url=http://{{ controller.network.host }}:{{ controller.network.port }}
+
+service_metadata_proxy=True
+
+[cinder]
+os_region_name = {{ controller.identity.region }}
diff --git a/nova/files/kilo/nova-controller.conf.RedHat b/nova/files/kilo/nova-controller.conf.RedHat
new file mode 120000
index 0000000..09c7524
--- /dev/null
+++ b/nova/files/kilo/nova-controller.conf.RedHat
@@ -0,0 +1 @@
+nova-controller.conf.Debian
\ No newline at end of file
diff --git a/nova/files/kilo/qemu.conf.Debian b/nova/files/kilo/qemu.conf.Debian
new file mode 100644
index 0000000..5c5722b
--- /dev/null
+++ b/nova/files/kilo/qemu.conf.Debian
@@ -0,0 +1,479 @@
+{%- from "nova/map.jinja" import compute with context %}
+# Master configuration file for the QEMU driver.
+# All settings described here are optional - if omitted, sensible
+# defaults are used.
+
+# VNC is configured to listen on 127.0.0.1 by default.
+# To make it listen on all public interfaces, uncomment
+# this next option.
+#
+# NB, strong recommendation to enable TLS + x509 certificate
+# verification when allowing public access
+#
+#vnc_listen = "0.0.0.0"
+
+# Enable this option to have VNC served over an automatically created
+# unix socket. This prevents unprivileged access from users on the
+# host machine, though most VNC clients do not support it.
+#
+# This will only be enabled for VNC configurations that do not have
+# a hardcoded 'listen' or 'socket' value. This setting takes preference
+# over vnc_listen.
+#
+#vnc_auto_unix_socket = 1
+
+# Enable use of TLS encryption on the VNC server. This requires
+# a VNC client which supports the VeNCrypt protocol extension.
+# Examples include vinagre, virt-viewer, virt-manager and vencrypt
+# itself. UltraVNC, RealVNC, TightVNC do not support this
+#
+# It is necessary to setup CA and issue a server certificate
+# before enabling this.
+#
+#vnc_tls = 1
+
+
+# Use of TLS requires that x509 certificates be issued. The
+# default it to keep them in /etc/pki/libvirt-vnc. This directory
+# must contain
+#
+#  ca-cert.pem - the CA master certificate
+#  server-cert.pem - the server certificate signed with ca-cert.pem
+#  server-key.pem  - the server private key
+#
+# This option allows the certificate directory to be changed
+#
+#vnc_tls_x509_cert_dir = "/etc/pki/libvirt-vnc"
+
+
+# The default TLS configuration only uses certificates for the server
+# allowing the client to verify the server's identity and establish
+# an encrypted channel.
+#
+# It is possible to use x509 certificates for authentication too, by
+# issuing a x509 certificate to every client who needs to connect.
+#
+# Enabling this option will reject any client who does not have a
+# certificate signed by the CA in /etc/pki/libvirt-vnc/ca-cert.pem
+#
+#vnc_tls_x509_verify = 1
+
+
+# The default VNC password. Only 8 bytes are significant for
+# VNC passwords. This parameter is only used if the per-domain
+# XML config does not already provide a password. To allow
+# access without passwords, leave this commented out. An empty
+# string will still enable passwords, but be rejected by QEMU,
+# effectively preventing any use of VNC. Obviously change this
+# example here before you set this.
+#
+#vnc_password = "XYZ12345"
+
+
+# Enable use of SASL encryption on the VNC server. This requires
+# a VNC client which supports the SASL protocol extension.
+# Examples include vinagre, virt-viewer and virt-manager
+# itself. UltraVNC, RealVNC, TightVNC do not support this
+#
+# It is necessary to configure /etc/sasl2/qemu.conf to choose
+# the desired SASL plugin (eg, GSSPI for Kerberos)
+#
+#vnc_sasl = 1
+
+
+# The default SASL configuration file is located in /etc/sasl2/
+# When running libvirtd unprivileged, it may be desirable to
+# override the configs in this location. Set this parameter to
+# point to the directory, and create a qemu.conf in that location
+#
+#vnc_sasl_dir = "/some/directory/sasl2"
+
+
+# QEMU implements an extension for providing audio over a VNC connection,
+# though if your VNC client does not support it, your only chance for getting
+# sound output is through regular audio backends. By default, libvirt will
+# disable all QEMU sound backends if using VNC, since they can cause
+# permissions issues. Enabling this option will make libvirtd honor the
+# QEMU_AUDIO_DRV environment variable when using VNC.
+#
+#vnc_allow_host_audio = 0
+
+
+
+# SPICE is configured to listen on 127.0.0.1 by default.
+# To make it listen on all public interfaces, uncomment
+# this next option.
+#
+# NB, strong recommendation to enable TLS + x509 certificate
+# verification when allowing public access
+#
+#spice_listen = "0.0.0.0"
+
+
+# Enable use of TLS encryption on the SPICE server.
+#
+# It is necessary to setup CA and issue a server certificate
+# before enabling this.
+#
+#spice_tls = 1
+
+
+# Use of TLS requires that x509 certificates be issued. The
+# default it to keep them in /etc/pki/libvirt-spice. This directory
+# must contain
+#
+#  ca-cert.pem - the CA master certificate
+#  server-cert.pem - the server certificate signed with ca-cert.pem
+#  server-key.pem  - the server private key
+#
+# This option allows the certificate directory to be changed.
+#
+#spice_tls_x509_cert_dir = "/etc/pki/libvirt-spice"
+
+
+# The default SPICE password. This parameter is only used if the
+# per-domain XML config does not already provide a password. To
+# allow access without passwords, leave this commented out. An
+# empty string will still enable passwords, but be rejected by
+# QEMU, effectively preventing any use of SPICE. Obviously change
+# this example here before you set this.
+#
+#spice_password = "XYZ12345"
+
+
+# Enable use of SASL encryption on the SPICE server. This requires
+# a SPICE client which supports the SASL protocol extension.
+#
+# It is necessary to configure /etc/sasl2/qemu.conf to choose
+# the desired SASL plugin (eg, GSSPI for Kerberos)
+#
+#spice_sasl = 1
+
+# The default SASL configuration file is located in /etc/sasl2/
+# When running libvirtd unprivileged, it may be desirable to
+# override the configs in this location. Set this parameter to
+# point to the directory, and create a qemu.conf in that location
+#
+#spice_sasl_dir = "/some/directory/sasl2"
+
+
+# By default, if no graphical front end is configured, libvirt will disable
+# QEMU audio output since directly talking to alsa/pulseaudio may not work
+# with various security settings. If you know what you're doing, enable
+# the setting below and libvirt will passthrough the QEMU_AUDIO_DRV
+# environment variable when using nographics.
+#
+#nographics_allow_host_audio = 1
+
+
+# Override the port for creating both VNC and SPICE sessions (min).
+# This defaults to 5900 and increases for consecutive sessions
+# or when ports are occupied, until it hits the maximum.
+#
+# Minimum must be greater than or equal to 5900 as lower number would
+# result into negative vnc display number.
+#
+# Maximum must be less than 65536, because higher numbers do not make
+# sense as a port number.
+#
+#remote_display_port_min = 5900
+#remote_display_port_max = 65535
+
+# VNC WebSocket port policies, same rules apply as with remote display
+# ports.  VNC WebSockets use similar display <-> port mappings, with
+# the exception being that ports start from 5700 instead of 5900.
+#
+#remote_websocket_port_min = 5700
+#remote_websocket_port_max = 65535
+
+# The default security driver is SELinux. If SELinux is disabled
+# on the host, then the security driver will automatically disable
+# itself. If you wish to disable QEMU SELinux security driver while
+# leaving SELinux enabled for the host in general, then set this
+# to 'none' instead. It's also possible to use more than one security
+# driver at the same time, for this use a list of names separated by
+# comma and delimited by square brackets. For example:
+#
+#       security_driver = [ "selinux", "apparmor" ]
+#
+# Notes: The DAC security driver is always enabled; as a result, the
+# value of security_driver cannot contain "dac".  The value "none" is
+# a special value; security_driver can be set to that value in
+# isolation, but it cannot appear in a list of drivers.
+#
+#security_driver = "selinux"
+
+# If set to non-zero, then the default security labeling
+# will make guests confined. If set to zero, then guests
+# will be unconfined by default. Defaults to 1.
+#security_default_confined = 1
+
+# If set to non-zero, then attempts to create unconfined
+# guests will be blocked. Defaults to 0.
+#security_require_confined = 1
+
+# The user for QEMU processes run by the system instance. It can be
+# specified as a user name or as a user id. The qemu driver will try to
+# parse this value first as a name and then, if the name doesn't exist,
+# as a user id.
+#
+# Since a sequence of digits is a valid user name, a leading plus sign
+# can be used to ensure that a user id will not be interpreted as a user
+# name.
+#
+# Some examples of valid values are:
+#
+#       user = "qemu"   # A user named "qemu"
+#       user = "+0"     # Super user (uid=0)
+#       user = "100"    # A user named "100" or a user with uid=100
+#
+#user = "root"
+
+# The group for QEMU processes run by the system instance. It can be
+# specified in a similar way to user.
+#group = "root"
+
+# Whether libvirt should dynamically change file ownership
+# to match the configured user/group above. Defaults to 1.
+# Set to 0 to disable file ownership changes.
+#dynamic_ownership = 1
+
+
+# What cgroup controllers to make use of with QEMU guests
+#
+#  - 'cpu' - use for schedular tunables
+#  - 'devices' - use for device whitelisting
+#  - 'memory' - use for memory tunables
+#  - 'blkio' - use for block devices I/O tunables
+#  - 'cpuset' - use for CPUs and memory nodes
+#  - 'cpuacct' - use for CPUs statistics.
+#
+# NB, even if configured here, they won't be used unless
+# the administrator has mounted cgroups, e.g.:
+#
+#  mkdir /dev/cgroup
+#  mount -t cgroup -o devices,cpu,memory,blkio,cpuset none /dev/cgroup
+#
+# They can be mounted anywhere, and different controllers
+# can be mounted in different locations. libvirt will detect
+# where they are located.
+#
+#cgroup_controllers = [ "cpu", "devices", "memory", "blkio", "cpuset", "cpuacct" ]
+
+# This is the basic set of devices allowed / required by
+# all virtual machines.
+#
+# As well as this, any configured block backed disks,
+# all sound device, and all PTY devices are allowed.
+#
+# This will only need setting if newer QEMU suddenly
+# wants some device we don't already know about.
+#
+#cgroup_device_acl = [
+#    "/dev/null", "/dev/full", "/dev/zero",
+#    "/dev/random", "/dev/urandom",
+#    "/dev/ptmx", "/dev/kvm", "/dev/kqemu",
+#    "/dev/rtc","/dev/hpet", "/dev/vfio/vfio"
+#]
+
+
+# The default format for Qemu/KVM guest save images is raw; that is, the
+# memory from the domain is dumped out directly to a file.  If you have
+# guests with a large amount of memory, however, this can take up quite
+# a bit of space.  If you would like to compress the images while they
+# are being saved to disk, you can also set "lzop", "gzip", "bzip2", or "xz"
+# for save_image_format.  Note that this means you slow down the process of
+# saving a domain in order to save disk space; the list above is in descending
+# order by performance and ascending order by compression ratio.
+#
+# save_image_format is used when you use 'virsh save' or 'virsh managedsave'
+# at scheduled saving, and it is an error if the specified save_image_format
+# is not valid, or the requested compression program can't be found.
+#
+# dump_image_format is used when you use 'virsh dump' at emergency
+# crashdump, and if the specified dump_image_format is not valid, or
+# the requested compression program can't be found, this falls
+# back to "raw" compression.
+#
+# snapshot_image_format specifies the compression algorithm of the memory save
+# image when an external snapshot of a domain is taken. This does not apply
+# on disk image format. It is an error if the specified format isn't valid,
+# or the requested compression program can't be found.
+#
+#save_image_format = "raw"
+#dump_image_format = "raw"
+#snapshot_image_format = "raw"
+
+# When a domain is configured to be auto-dumped when libvirtd receives a
+# watchdog event from qemu guest, libvirtd will save dump files in directory
+# specified by auto_dump_path. Default value is /var/lib/libvirt/qemu/dump
+#
+#auto_dump_path = "/var/lib/libvirt/qemu/dump"
+
+# When a domain is configured to be auto-dumped, enabling this flag
+# has the same effect as using the VIR_DUMP_BYPASS_CACHE flag with the
+# virDomainCoreDump API.  That is, the system will avoid using the
+# file system cache while writing the dump file, but may cause
+# slower operation.
+#
+#auto_dump_bypass_cache = 0
+
+# When a domain is configured to be auto-started, enabling this flag
+# has the same effect as using the VIR_DOMAIN_START_BYPASS_CACHE flag
+# with the virDomainCreateWithFlags API.  That is, the system will
+# avoid using the file system cache when restoring any managed state
+# file, but may cause slower operation.
+#
+#auto_start_bypass_cache = 0
+
+# If provided by the host and a hugetlbfs mount point is configured,
+# a guest may request huge page backing.  When this mount point is
+# unspecified here, determination of a host mount point in /proc/mounts
+# will be attempted.  Specifying an explicit mount overrides detection
+# of the same in /proc/mounts.  Setting the mount point to "" will
+# disable guest hugepage backing.
+#
+# NB, within this mount point, guests will create memory backing files
+# in a location of $MOUNTPOINT/libvirt/qemu
+#
+#hugetlbfs_mount = "/dev/hugepages"
+
+
+# Path to the setuid helper for creating tap devices.  This executable
+# is used to create <source type='bridge'> interfaces when libvirtd is
+# running unprivileged.  libvirt invokes the helper directly, instead
+# of using "-netdev bridge", for security reasons.
+#bridge_helper = "/usr/libexec/qemu-bridge-helper"
+
+
+
+# If clear_emulator_capabilities is enabled, libvirt will drop all
+# privileged capabilities of the QEmu/KVM emulator. This is enabled by
+# default.
+#
+# Warning: Disabling this option means that a compromised guest can
+# exploit the privileges and possibly do damage to the host.
+#
+#clear_emulator_capabilities = 1
+
+
+# If enabled, libvirt will have QEMU set its process name to
+# "qemu:VM_NAME", where VM_NAME is the name of the VM. The QEMU
+# process will appear as "qemu:VM_NAME" in process listings and
+# other system monitoring tools. By default, QEMU does not set
+# its process title, so the complete QEMU command (emulator and
+# its arguments) appear in process listings.
+#
+#set_process_name = 1
+
+
+# If max_processes is set to a positive integer, libvirt will use
+# it to set the maximum number of processes that can be run by qemu
+# user. This can be used to override default value set by host OS.
+# The same applies to max_files which sets the limit on the maximum
+# number of opened files.
+#
+{%- if compute.qemu is defined %}
+
+{%- if compute.qemu.max_processes is defined %}
+max_processes = {{ compute.qemu.max_processes }}
+{%- endif %}
+
+{%- if compute.qemu.max_files is defined %}
+max_files = {{ compute.qemu.max_files }}
+{%- endif %}
+
+{%- endif %}
+
+# mac_filter enables MAC addressed based filtering on bridge ports.
+# This currently requires ebtables to be installed.
+#
+#mac_filter = 1
+
+
+# By default, PCI devices below non-ACS switch are not allowed to be assigned
+# to guests. By setting relaxed_acs_check to 1 such devices will be allowed to
+# be assigned to guests.
+#
+#relaxed_acs_check = 1
+
+
+# If allow_disk_format_probing is enabled, libvirt will probe disk
+# images to attempt to identify their format, when not otherwise
+# specified in the XML. This is disabled by default.
+#
+# WARNING: Enabling probing is a security hole in almost all
+# deployments. It is strongly recommended that users update their
+# guest XML <disk> elements to include  <driver type='XXXX'/>
+# elements instead of enabling this option.
+#
+#allow_disk_format_probing = 1
+
+
+# To enable 'Sanlock' project based locking of the file
+# content (to prevent two VMs writing to the same
+# disk), uncomment this
+#
+#lock_manager = "sanlock"
+
+
+
+# Set limit of maximum APIs queued on one domain. All other APIs
+# over this threshold will fail on acquiring job lock. Specially,
+# setting to zero turns this feature off.
+# Note, that job lock is per domain.
+#
+#max_queued = 0
+
+###################################################################
+# Keepalive protocol:
+# This allows qemu driver to detect broken connections to remote
+# libvirtd during peer-to-peer migration.  A keepalive message is
+# sent to the daemon after keepalive_interval seconds of inactivity
+# to check if the daemon is still responding; keepalive_count is a
+# maximum number of keepalive messages that are allowed to be sent
+# to the daemon without getting any response before the connection
+# is considered broken.  In other words, the connection is
+# automatically closed approximately after
+# keepalive_interval * (keepalive_count + 1) seconds since the last
+# message received from the daemon.  If keepalive_interval is set to
+# -1, qemu driver will not send keepalive requests during
+# peer-to-peer migration; however, the remote libvirtd can still
+# send them and source libvirtd will send responses.  When
+# keepalive_count is set to 0, connections will be automatically
+# closed after keepalive_interval seconds of inactivity without
+# sending any keepalive messages.
+#
+#keepalive_interval = 5
+#keepalive_count = 5
+
+
+
+# Use seccomp syscall whitelisting in QEMU.
+# 1 = on, 0 = off, -1 = use QEMU default
+# Defaults to -1.
+#
+#seccomp_sandbox = 1
+
+
+
+# Override the listen address for all incoming migrations. Defaults to
+# 0.0.0.0, or :: if both host and qemu are capable of IPv6.
+#migration_address = "127.0.0.1"
+
+
+# Override the port range used for incoming migrations.
+#
+# Minimum must be greater than 0, however when QEMU is not running as root,
+# setting the minimum to be lower than 1024 will not work.
+#
+# Maximum must not be greater than 65535.
+#
+#migration_port_min = 49152
+#migration_port_max = 49215
+cgroup_device_acl = [
+    "/dev/null", "/dev/full", "/dev/zero",
+    "/dev/random", "/dev/urandom",
+    "/dev/ptmx", "/dev/kvm", "/dev/kqemu",
+    "/dev/rtc", "/dev/hpet","/dev/net/tun",
+]
diff --git a/nova/files/kilo/qemu.conf.RedHat b/nova/files/kilo/qemu.conf.RedHat
new file mode 120000
index 0000000..1d23f19
--- /dev/null
+++ b/nova/files/kilo/qemu.conf.RedHat
@@ -0,0 +1 @@
+qemu.conf.Debian
\ No newline at end of file
diff --git a/nova/files/liberty/api-paste.ini.Debian b/nova/files/liberty/api-paste.ini.Debian
new file mode 100644
index 0000000..cb5ea67
--- /dev/null
+++ b/nova/files/liberty/api-paste.ini.Debian
@@ -0,0 +1,140 @@
+############
+# Metadata #
+############
+[composite:metadata]
+use = egg:Paste#urlmap
+/: meta
+
+[pipeline:meta]
+pipeline = ec2faultwrap logrequest metaapp
+
+[app:metaapp]
+paste.app_factory = nova.api.metadata.handler:MetadataRequestHandler.factory
+
+#######
+# EC2 #
+#######
+
+# NOTE: this is now deprecated in favor of https://github.com/stackforge/ec2-api
+[composite:ec2]
+use = egg:Paste#urlmap
+/: ec2cloud
+
+[composite:ec2cloud]
+use = call:nova.api.auth:pipeline_factory
+noauth2 = ec2faultwrap logrequest ec2noauth cloudrequest validator ec2executor
+keystone = ec2faultwrap logrequest ec2keystoneauth cloudrequest validator ec2executor
+
+[filter:ec2faultwrap]
+paste.filter_factory = nova.api.ec2:FaultWrapper.factory
+
+[filter:logrequest]
+paste.filter_factory = nova.api.ec2:RequestLogging.factory
+
+[filter:ec2lockout]
+paste.filter_factory = nova.api.ec2:Lockout.factory
+
+[filter:ec2keystoneauth]
+paste.filter_factory = nova.api.ec2:EC2KeystoneAuth.factory
+
+[filter:ec2noauth]
+paste.filter_factory = nova.api.ec2:NoAuth.factory
+
+[filter:cloudrequest]
+controller = nova.api.ec2.cloud.CloudController
+paste.filter_factory = nova.api.ec2:Requestify.factory
+
+[filter:authorizer]
+paste.filter_factory = nova.api.ec2:Authorizer.factory
+
+[filter:validator]
+paste.filter_factory = nova.api.ec2:Validator.factory
+
+[app:ec2executor]
+paste.app_factory = nova.api.ec2:Executor.factory
+
+#############
+# OpenStack #
+#############
+
+[composite:osapi_compute]
+use = call:nova.api.openstack.urlmap:urlmap_factory
+/: oscomputeversions
+# starting in Liberty the v21 implementation replaces the v2
+# implementation and is suggested that you use it as the default. If
+# this causes issues with your clients you can rollback to the
+# *frozen* v2 api by commenting out the above stanza and using the
+# following instead::
+# /v1.1: openstack_compute_api_legacy_v2
+# /v2: openstack_compute_api_legacy_v2
+# if rolling back to v2 fixes your issue please file a critical bug
+# at - https://bugs.launchpad.net/nova/+bugs
+#
+# v21 is an exactly feature match for v2, except it has more stringent
+# input validation on the wsgi surface (prevents fuzzing early on the
+# API). It also provides new features via API microversions which are
+# opt into for clients. Unaware clients will receive the same frozen
+# v2 API feature set, but with some relaxed validation
+/v1.1: openstack_compute_api_v21_legacy_v2_compatible
+/v2: openstack_compute_api_v21_legacy_v2_compatible
+/v2.1: openstack_compute_api_v21
+
+# NOTE: this is deprecated in favor of openstack_compute_api_v21_legacy_v2_compatible
+[composite:openstack_compute_api_legacy_v2]
+use = call:nova.api.auth:pipeline_factory
+noauth2 = compute_req_id faultwrap sizelimit noauth2 legacy_ratelimit osapi_compute_app_legacy_v2
+keystone = compute_req_id faultwrap sizelimit authtoken keystonecontext legacy_ratelimit osapi_compute_app_legacy_v2
+keystone_nolimit = compute_req_id faultwrap sizelimit authtoken keystonecontext osapi_compute_app_legacy_v2
+
+[composite:openstack_compute_api_v21]
+use = call:nova.api.auth:pipeline_factory_v21
+noauth2 = compute_req_id faultwrap sizelimit noauth2 osapi_compute_app_v21
+keystone = compute_req_id faultwrap sizelimit authtoken keystonecontext osapi_compute_app_v21
+
+[composite:openstack_compute_api_v21_legacy_v2_compatible]
+use = call:nova.api.auth:pipeline_factory_v21
+noauth2 = compute_req_id faultwrap sizelimit noauth2 legacy_v2_compatible osapi_compute_app_v21
+keystone = compute_req_id faultwrap sizelimit authtoken keystonecontext legacy_v2_compatible osapi_compute_app_v21
+
+[filter:request_id]
+paste.filter_factory = oslo_middleware:RequestId.factory
+
+[filter:compute_req_id]
+paste.filter_factory = nova.api.compute_req_id:ComputeReqIdMiddleware.factory
+
+[filter:faultwrap]
+paste.filter_factory = nova.api.openstack:FaultWrapper.factory
+
+[filter:noauth2]
+paste.filter_factory = nova.api.openstack.auth:NoAuthMiddleware.factory
+
+[filter:legacy_ratelimit]
+paste.filter_factory = nova.api.openstack.compute.limits:RateLimitingMiddleware.factory
+
+[filter:sizelimit]
+paste.filter_factory = oslo_middleware:RequestBodySizeLimiter.factory
+
+[filter:legacy_v2_compatible]
+paste.filter_factory = nova.api.openstack:LegacyV2CompatibleWrapper.factory
+
+[app:osapi_compute_app_legacy_v2]
+paste.app_factory = nova.api.openstack.compute:APIRouter.factory
+
+[app:osapi_compute_app_v21]
+paste.app_factory = nova.api.openstack.compute:APIRouterV21.factory
+
+[pipeline:oscomputeversions]
+pipeline = faultwrap oscomputeversionapp
+
+[app:oscomputeversionapp]
+paste.app_factory = nova.api.openstack.compute.versions:Versions.factory
+
+##########
+# Shared #
+##########
+
+[filter:keystonecontext]
+paste.filter_factory = nova.api.auth:NovaKeystoneContext.factory
+
+[filter:authtoken]
+paste.filter_factory = keystonemiddleware.auth_token:filter_factory
diff --git a/nova/files/liberty/api-paste.ini.RedHat b/nova/files/liberty/api-paste.ini.RedHat
new file mode 120000
index 0000000..08fd76a
--- /dev/null
+++ b/nova/files/liberty/api-paste.ini.RedHat
@@ -0,0 +1 @@
+api-paste.ini.Debian
\ No newline at end of file
diff --git a/nova/files/liberty/libvirt.Debian b/nova/files/liberty/libvirt.Debian
new file mode 100644
index 0000000..1704dc4
--- /dev/null
+++ b/nova/files/liberty/libvirt.Debian
@@ -0,0 +1,12 @@
+# Defaults for libvirt-bin initscript (/etc/init.d/libvirt-bin)
+# This is a POSIX shell fragment
+
+# Start libvirtd to handle qemu/kvm:
+start_libvirtd="yes"
+
+# options passed to libvirtd, add "-l" to listen on tcp
+libvirtd_opts="-d -l"
+LIBVIRTD_ARGS="--listen"
+
+# pass in location of kerberos keytab
+#export KRB5_KTNAME=/etc/libvirt/libvirt.keytab
\ No newline at end of file
diff --git a/nova/files/liberty/libvirt.RedHat b/nova/files/liberty/libvirt.RedHat
new file mode 100644
index 0000000..f63cdab
--- /dev/null
+++ b/nova/files/liberty/libvirt.RedHat
@@ -0,0 +1,11 @@
+# Defaults for libvirt-bin initscript (/etc/init.d/libvirt-bin)
+# This is a POSIX shell fragment
+
+# Start libvirtd to handle qemu/kvm:
+start_libvirtd="yes"
+
+# options passed to libvirtd, add "-l" to listen on tcp
+libvirtd_opts="-d -l"
+
+# pass in location of kerberos keytab
+#export KRB5_KTNAME=/etc/libvirt/libvirt.keytab
diff --git a/nova/files/liberty/libvirtd.conf.Debian b/nova/files/liberty/libvirtd.conf.Debian
new file mode 100644
index 0000000..8333dcb
--- /dev/null
+++ b/nova/files/liberty/libvirtd.conf.Debian
@@ -0,0 +1,402 @@
+# Master libvirt daemon configuration file
+#
+# For further information consult http://libvirt.org/format.html
+#
+# NOTE: the tests/daemon-conf regression test script requires
+# that each "PARAMETER = VALUE" line in this file have the parameter
+# name just after a leading "#".
+
+#################################################################
+#
+# Network connectivity controls
+#
+
+# Flag listening for secure TLS connections on the public TCP/IP port.
+# NB, must pass the --listen flag to the libvirtd process for this to
+# have any effect.
+#
+# It is necessary to setup a CA and issue server certificates before
+# using this capability.
+#
+# This is enabled by default, uncomment this to disable it
+#listen_tls = 0
+
+
+listen_tls = 0
+listen_tcp = 1
+auth_tcp = "none"
+
+
+# Listen for unencrypted TCP connections on the public TCP/IP port.
+# NB, must pass the --listen flag to the libvirtd process for this to
+# have any effect.
+#
+# Using the TCP socket requires SASL authentication by default. Only
+# SASL mechanisms which support data encryption are allowed. This is
+# DIGEST_MD5 and GSSAPI (Kerberos5)
+#
+# This is disabled by default, uncomment this to enable it.
+#listen_tcp = 1
+
+
+
+# Override the port for accepting secure TLS connections
+# This can be a port number, or service name
+#
+#tls_port = "16514"
+
+# Override the port for accepting insecure TCP connections
+# This can be a port number, or service name
+#
+#tcp_port = "16509"
+
+
+# Override the default configuration which binds to all network
+# interfaces. This can be a numeric IPv4/6 address, or hostname
+#
+#listen_addr = "192.168.0.1"
+
+
+# Flag toggling mDNS advertizement of the libvirt service.
+#
+# Alternatively can disable for all services on a host by
+# stopping the Avahi daemon
+#
+# This is disabled by default, uncomment this to enable it
+#mdns_adv = 1
+
+# Override the default mDNS advertizement name. This must be
+# unique on the immediate broadcast network.
+#
+# The default is "Virtualization Host HOSTNAME", where HOSTNAME
+# is subsituted for the short hostname of the machine (without domain)
+#
+#mdns_name = "Virtualization Host Joe Demo"
+
+
+#################################################################
+#
+# UNIX socket access controls
+#
+
+# Set the UNIX domain socket group ownership. This can be used to
+# allow a 'trusted' set of users access to management capabilities
+# without becoming root.
+#
+# This is restricted to 'root' by default.
+unix_sock_group = "libvirtd"
+
+# Set the UNIX socket permissions for the R/O socket. This is used
+# for monitoring VM status only
+#
+# Default allows any user. If setting group ownership may want to
+# restrict this to:
+#unix_sock_ro_perms = "0777"
+
+# Set the UNIX socket permissions for the R/W socket. This is used
+# for full management of VMs
+#
+# Default allows only root. If PolicyKit is enabled on the socket,
+# the default will change to allow everyone (eg, 0777)
+#
+# If not using PolicyKit and setting group ownership for access
+# control then you may want to relax this to:
+unix_sock_rw_perms = "0770"
+
+# Set the name of the directory in which sockets will be found/created.
+#unix_sock_dir = "/var/run/libvirt"
+
+#################################################################
+#
+# Authentication.
+#
+#  - none: do not perform auth checks. If you can connect to the
+#          socket you are allowed. This is suitable if there are
+#          restrictions on connecting to the socket (eg, UNIX
+#          socket permissions), or if there is a lower layer in
+#          the network providing auth (eg, TLS/x509 certificates)
+#
+#  - sasl: use SASL infrastructure. The actual auth scheme is then
+#          controlled from /etc/sasl2/libvirt.conf. For the TCP
+#          socket only GSSAPI & DIGEST-MD5 mechanisms will be used.
+#          For non-TCP or TLS sockets,  any scheme is allowed.
+#
+#  - polkit: use PolicyKit to authenticate. This is only suitable
+#            for use on the UNIX sockets. The default policy will
+#            require a user to supply their own password to gain
+#            full read/write access (aka sudo like), while anyone
+#            is allowed read/only access.
+#
+# Set an authentication scheme for UNIX read-only sockets
+# By default socket permissions allow anyone to connect
+#
+# To restrict monitoring of domains you may wish to enable
+# an authentication mechanism here
+auth_unix_ro = "none"
+
+# Set an authentication scheme for UNIX read-write sockets
+# By default socket permissions only allow root. If PolicyKit
+# support was compiled into libvirt, the default will be to
+# use 'polkit' auth.
+#
+# If the unix_sock_rw_perms are changed you may wish to enable
+# an authentication mechanism here
+auth_unix_rw = "none"
+
+# Change the authentication scheme for TCP sockets.
+#
+# If you don't enable SASL, then all TCP traffic is cleartext.
+# Don't do this outside of a dev/test scenario. For real world
+# use, always enable SASL and use the GSSAPI or DIGEST-MD5
+# mechanism in /etc/sasl2/libvirt.conf
+#auth_tcp = "sasl"
+#auth_tcp = "none"
+
+# Change the authentication scheme for TLS sockets.
+#
+# TLS sockets already have encryption provided by the TLS
+# layer, and limited authentication is done by certificates
+#
+# It is possible to make use of any SASL authentication
+# mechanism as well, by using 'sasl' for this option
+#auth_tls = "none"
+
+
+
+#################################################################
+#
+# TLS x509 certificate configuration
+#
+
+
+# Override the default server key file path
+#
+#key_file = "/etc/pki/libvirt/private/serverkey.pem"
+
+# Override the default server certificate file path
+#
+#cert_file = "/etc/pki/libvirt/servercert.pem"
+
+# Override the default CA certificate path
+#
+#ca_file = "/etc/pki/CA/cacert.pem"
+
+# Specify a certificate revocation list.
+#
+# Defaults to not using a CRL, uncomment to enable it
+#crl_file = "/etc/pki/CA/crl.pem"
+
+
+
+#################################################################
+#
+# Authorization controls
+#
+
+
+# Flag to disable verification of our own server certificates
+#
+# When libvirtd starts it performs some sanity checks against
+# its own certificates.
+#
+# Default is to always run sanity checks. Uncommenting this
+# will disable sanity checks which is not a good idea
+#tls_no_sanity_certificate = 1
+
+# Flag to disable verification of client certificates
+#
+# Client certificate verification is the primary authentication mechanism.
+# Any client which does not present a certificate signed by the CA
+# will be rejected.
+#
+# Default is to always verify. Uncommenting this will disable
+# verification - make sure an IP whitelist is set
+#tls_no_verify_certificate = 1
+
+
+# A whitelist of allowed x509  Distinguished Names
+# This list may contain wildcards such as
+#
+#    "C=GB,ST=London,L=London,O=Red Hat,CN=*"
+#
+# See the POSIX fnmatch function for the format of the wildcards.
+#
+# NB If this is an empty list, no client can connect, so comment out
+# entirely rather than using empty list to disable these checks
+#
+# By default, no DN's are checked
+#tls_allowed_dn_list = ["DN1", "DN2"]
+
+
+# A whitelist of allowed SASL usernames. The format for usernames
+# depends on the SASL authentication mechanism. Kerberos usernames
+# look like username@REALM
+#
+# This list may contain wildcards such as
+#
+#    "*@EXAMPLE.COM"
+#
+# See the POSIX fnmatch function for the format of the wildcards.
+#
+# NB If this is an empty list, no client can connect, so comment out
+# entirely rather than using empty list to disable these checks
+#
+# By default, no Username's are checked
+#sasl_allowed_username_list = ["joe@EXAMPLE.COM", "fred@EXAMPLE.COM" ]
+
+
+
+#################################################################
+#
+# Processing controls
+#
+
+# The maximum number of concurrent client connections to allow
+# over all sockets combined.
+#max_clients = 20
+
+
+# The minimum limit sets the number of workers to start up
+# initially. If the number of active clients exceeds this,
+# then more threads are spawned, upto max_workers limit.
+# Typically you'd want max_workers to equal maximum number
+# of clients allowed
+#min_workers = 5
+#max_workers = 20
+
+
+# The number of priority workers. If all workers from above
+# pool will stuck, some calls marked as high priority
+# (notably domainDestroy) can be executed in this pool.
+#prio_workers = 5
+
+# Total global limit on concurrent RPC calls. Should be
+# at least as large as max_workers. Beyond this, RPC requests
+# will be read into memory and queued. This directly impact
+# memory usage, currently each request requires 256 KB of
+# memory. So by default upto 5 MB of memory is used
+#
+# XXX this isn't actually enforced yet, only the per-client
+# limit is used so far
+#max_requests = 20
+
+# Limit on concurrent requests from a single client
+# connection. To avoid one client monopolizing the server
+# this should be a small fraction of the global max_requests
+# and max_workers parameter
+#max_client_requests = 5
+
+#################################################################
+#
+# Logging controls
+#
+
+# Logging level: 4 errors, 3 warnings, 2 information, 1 debug
+# basically 1 will log everything possible
+#log_level = 3
+
+# Logging filters:
+# A filter allows to select a different logging level for a given category
+# of logs
+# The format for a filter is one of:
+#    x:name
+#    x:+name
+#      where name is a string which is matched against source file name,
+#      e.g., "remote", "qemu", or "util/json", the optional "+" prefix
+#      tells libvirt to log stack trace for each message matching name,
+#      and x is the minimal level where matching messages should be logged:
+#    1: DEBUG
+#    2: INFO
+#    3: WARNING
+#    4: ERROR
+#
+# Multiple filter can be defined in a single @filters, they just need to be
+# separated by spaces.
+#
+# e.g. to only get warning or errors from the remote layer and only errors
+# from the event layer:
+#log_filters="3:remote 4:event"
+
+# Logging outputs:
+# An output is one of the places to save logging information
+# The format for an output can be:
+#    x:stderr
+#      output goes to stderr
+#    x:syslog:name
+#      use syslog for the output and use the given name as the ident
+#    x:file:file_path
+#      output to a file, with the given filepath
+# In all case the x prefix is the minimal level, acting as a filter
+#    1: DEBUG
+#    2: INFO
+#    3: WARNING
+#    4: ERROR
+#
+# Multiple output can be defined, they just need to be separated by spaces.
+# e.g. to log all warnings and errors to syslog under the libvirtd ident:
+#log_outputs="3:syslog:libvirtd"
+#
+
+# Log debug buffer size: default 64
+# The daemon keeps an internal debug log buffer which will be dumped in case
+# of crash or upon receiving a SIGUSR2 signal. This setting allows to override
+# the default buffer size in kilobytes.
+# If value is 0 or less the debug log buffer is deactivated
+#log_buffer_size = 64
+
+
+##################################################################
+#
+# Auditing
+#
+# This setting allows usage of the auditing subsystem to be altered:
+#
+#   audit_level == 0  -> disable all auditing
+#   audit_level == 1  -> enable auditing, only if enabled on host (default)
+#   audit_level == 2  -> enable auditing, and exit if disabled on host
+#
+#audit_level = 2
+#
+# If set to 1, then audit messages will also be sent
+# via libvirt logging infrastructure. Defaults to 0
+#
+#audit_logging = 1
+
+###################################################################
+# UUID of the host:
+# Provide the UUID of the host here in case the command
+# 'dmidecode -s system-uuid' does not provide a valid uuid. In case
+# 'dmidecode' does not provide a valid UUID and none is provided here, a
+# temporary UUID will be generated.
+# Keep the format of the example UUID below. UUID must not have all digits
+# be the same.
+
+# NB This default all-zeros UUID will not work. Replace
+# it with the output of the 'uuidgen' command and then
+# uncomment this entry
+#host_uuid = "00000000-0000-0000-0000-000000000000"
+
+###################################################################
+# Keepalive protocol:
+# This allows libvirtd to detect broken client connections or even
+# dead client.  A keepalive message is sent to a client after
+# keepalive_interval seconds of inactivity to check if the client is
+# still responding; keepalive_count is a maximum number of keepalive
+# messages that are allowed to be sent to the client without getting
+# any response before the connection is considered broken.  In other
+# words, the connection is automatically closed approximately after
+# keepalive_interval * (keepalive_count + 1) seconds since the last
+# message received from the client.  If keepalive_interval is set to
+# -1, libvirtd will never send keepalive requests; however clients
+# can still send them and the deamon will send responses.  When
+# keepalive_count is set to 0, connections will be automatically
+# closed after keepalive_interval seconds of inactivity without
+# sending any keepalive messages.
+#
+#keepalive_interval = 5
+#keepalive_count = 5
+#
+# If set to 1, libvirtd will refuse to talk to clients that do not
+# support keepalive protocol.  Defaults to 0.
+#
+#keepalive_required = 1
\ No newline at end of file
diff --git a/nova/files/liberty/libvirtd.conf.RedHat b/nova/files/liberty/libvirtd.conf.RedHat
new file mode 120000
index 0000000..2a7b101
--- /dev/null
+++ b/nova/files/liberty/libvirtd.conf.RedHat
@@ -0,0 +1 @@
+libvirtd.conf.Debian
\ No newline at end of file
diff --git a/nova/files/liberty/nova-compute.conf.Debian b/nova/files/liberty/nova-compute.conf.Debian
new file mode 100644
index 0000000..85ed88b
--- /dev/null
+++ b/nova/files/liberty/nova-compute.conf.Debian
@@ -0,0 +1,172 @@
+{%- from "nova/map.jinja" import compute with context %}
+
+[DEFAULT]
+dhcpbridge_flagfile=/etc/nova/nova.conf
+dhcpbridge=/usr/bin/nova-dhcpbridge
+logdir=/var/log/nova
+state_path=/var/lib/nova
+lock_path=/var/lock/nova
+force_dhcp_release=True
+iscsi_helper=tgtadm
+libvirt_use_virtio_for_bridges=True
+connection_type=libvirt
+root_helper=sudo nova-rootwrap /etc/nova/rootwrap.conf
+verbose=True
+ec2_private_dns_show_ip=False
+api_paste_config=/etc/nova/api-paste.ini
+volumes_path=/var/lib/nova/volumes
+enabled_apis=ec2,osapi_compute,metadata
+
+{%- if compute.image.use_cow is defined %}
+use_cow_images = {{ compute.image.use_cow }}
+{%- endif %}
+
+{%- if compute.reserved_host_memory_mb is defined %}
+reserved_host_memory_mb = {{ compute.reserved_host_memory_mb }}
+{%- endif %}
+
+allow_resize_to_same_host=True
+
+live_migration_flag=VIR_MIGRATE_UNDEFINE_SOURCE,VIR_MIGRATE_PEER2PEER,VIR_MIGRATE_LIVE
+live_migration_retry_count=30
+
+#Neutron
+neutron_admin_username={{ compute.network.user }}
+neutron_admin_password={{ compute.network.password }}
+neutron_admin_tenant_name={{ compute.identity.tenant }}
+neutron_admin_auth_url=http://{{ compute.identity.host }}:{{ compute.identity.port }}/v{% if compute.identity.get('version', 2) == 2 %}2.0{% else %}3{% endif %}
+neutron_url=http://{{ compute.network.host }}:{{ compute.network.port }}
+neutron_region_name= {{ compute.network.region }}
+#neutron_url = http://10.0.102.35:9696/
+
+
+auth_strategy = keystone
+libvirt_nonblocking = True
+libvirt_inject_partition = -1
+
+neutron_url_timeout = 300
+network_api_class=nova.network.neutronv2.api.API
+compute_driver = libvirt.LibvirtDriver
+
+#NoVNC
+vnc_enabled=true
+vncserver_enabled = true
+vncserver_listen=0.0.0.0
+vncserver_proxyclient_address={{ compute.bind.vnc_address }}
+novncproxy_base_url={{ compute.vncproxy_url }}/vnc_auto.html
+novncproxy_port={{ compute.bind.vnc_port }}
+
+heal_instance_info_cache_interval = {{ compute.heal_instance_info_cache_interval }}
+
+{%- if compute.cache is defined %}
+memcached_servers={%- for member in compute.cache.members %}{{ member.host }}:11211{% if not loop.last %},{% endif %}{%- endfor %}
+{%- endif %}
+
+security_group_api = neutron
+heal_instance_info_cache_interval = 0
+libvirt_cpu_mode = host-passthrough
+image_cache_manager_interval = 0
+firewall_driver = nova.virt.firewall.NoopFirewallDriver
+glance_port = 9292
+glance_num_retries = 10
+
+rabbit_retry_interval = 1
+rabbit_retry_backoff = 2
+
+rpc_cast_timeout = 30
+rpc_conn_pool_size = 300
+rpc_response_timeout = 3600
+rpc_thread_pool_size = 70
+report_interval = 5
+
+block_device_allocate_retries=600
+block_device_allocate_retries_interval=10
+
+resume_guests_state_on_host_boot = True
+service_down_time = 90
+
+{% if pillar.ceilometer is defined %}
+
+instance_usage_audit = True
+instance_usage_audit_period = hour
+notify_on_state_change = vm_and_task_state
+
+{% endif %}
+
+{%- if compute.notification is defined %}
+notification_driver = {{ compute.notification.driver }}
+
+{%- if compute.notification.topics is defined %}
+notification_topics = {{ compute.notification.topics }}
+{%- endif %}
+
+{%- if compute.notification.notify_on is defined %}
+{%- for key, value in compute.notification.notify_on.iteritems() %}
+notify_on_{{ key }} = {{ value }}
+{%- endfor %}
+{%- endif %}
+
+{%- endif %}
+
+{%- if compute.identity.get('version', 2) == 2 %}
+
+[keystone_authtoken]
+signing_dirname=/tmp/keystone-signing-nova
+admin_tenant_name = {{ compute.identity.tenant }}
+admin_user = {{ compute.identity.user }}
+admin_password = {{ compute.identity.password }}
+auth_host = {{ compute.identity.host }}
+auth_port = {{ compute.identity.port }}
+auth_protocol=http
+auth_uri=http://{{ compute.identity.host }}:5000
+
+{%- else %}
+
+[keystone_authtoken]
+signing_dirname=/tmp/keystone-signing-nova
+identity_uri = http://{{ compute.identity.host }}:35357/v3
+revocation_cache_time = 10
+auth_section = generic_password
+
+[generic_password]
+auth_plugin = password
+user_domain_id = {{ compute.identity.get('domain', 'default') }}
+project_domain_id = {{ compute.identity.get('domain', 'default') }}
+project_name = {{ compute.identity.tenant }}
+username = {{ compute.identity.user }}
+password = {{ compute.identity.password }}
+auth_uri=http://{{ compute.identity.host }}:5000/v3
+auth_url=http://{{ compute.identity.host }}:35357/v3
+
+{%- endif %}
+
+[oslo_messaging_rabbit]
+{%- if compute.message_queue.members is defined %}
+rabbit_hosts = {% for member in compute.message_queue.members -%}
+                   {{ member.host }}:{{ member.get('port', 5672) }}
+                   {%- if not loop.last -%},{%- endif -%}
+               {%- endfor -%}
+{%- else %}
+rabbit_host = {{ compute.message_queue.host }}
+rabbit_port = {{ compute.message_queue.port }}
+{%- endif %}
+
+rabbit_userid = {{ compute.message_queue.user }}
+rabbit_password = {{ compute.message_queue.password }}
+rabbit_virtual_host = {{ compute.message_queue.virtual_host }}
+
+[glance]
+
+host = {{ compute.image.host }}
+
+[neutron]
+auth_strategy = keystone
+admin_username={{ compute.network.user }}
+admin_password={{ compute.network.password }}
+admin_tenant_name={{ compute.identity.tenant }}
+admin_auth_url=http://{{ compute.identity.host }}:{{ compute.identity.port }}/v{% if compute.identity.get('version', 2) == 2 %}2.0{% else %}3{% endif %}
+url=http://{{ compute.network.host }}:{{ compute.network.port }}
+region_name= {{ compute.network.region }}
+
+[cinder]
+os_region_name = {{ compute.identity.region }}
diff --git a/nova/files/liberty/nova-compute.conf.Redhat b/nova/files/liberty/nova-compute.conf.Redhat
new file mode 120000
index 0000000..ad4c8f6
--- /dev/null
+++ b/nova/files/liberty/nova-compute.conf.Redhat
@@ -0,0 +1 @@
+nova-compute.conf.Debian
\ No newline at end of file
diff --git a/nova/files/liberty/nova-controller.conf.Debian b/nova/files/liberty/nova-controller.conf.Debian
new file mode 100644
index 0000000..d0b970b
--- /dev/null
+++ b/nova/files/liberty/nova-controller.conf.Debian
@@ -0,0 +1,209 @@
+{%- from "nova/map.jinja" import controller with context %}
+[DEFAULT]
+logdir = /var/log/nova
+verbose = True
+{%- if controller.debug %}
+debug = True
+{%- else %}
+debug = False
+{%- endif %}
+state_path = /var/lib/nova
+lock_path = /var/lib/nova/tmp
+volumes_dir = /etc/nova/volumes
+dhcpbridge = /usr/bin/nova-dhcpbridge
+dhcpbridge_flagfile = /etc/nova/nova.conf
+force_dhcp_release = True
+injected_network_template = /usr/share/nova/interfaces.template
+libvirt_nonblocking = True
+libvirt_use_virtio_for_bridges=True
+libvirt_inject_partition = -1
+vif_plugging_is_fatal = False
+vif_plugging_timeout = 0
+cpu_allocation_ratio = {{ controller.cpu_allocation_ratio }}
+ram_allocation_ratio = {{ controller.ram_allocation_ratio }}
+disk_allocation_ratio = {{ controller.disk_allocation_ratio }}
+scheduler_default_filters = {{ controller.scheduler_default_filters }}
+
+allow_resize_to_same_host = True
+
+logdir=/var/log/nova
+iscsi_helper=tgtadm
+connection_type=libvirt
+root_helper=sudo nova-rootwrap /etc/nova/rootwrap.conf
+ec2_private_dns_show_ip=True
+api_paste_config=/etc/nova/api-paste.ini
+volumes_path=/var/lib/nova/volumes
+
+sql_connection = {{ controller.database.engine }}://{{ controller.database.user }}:{{ controller.database.password }}@{{ controller.database.host }}/{{ controller.database.name }}
+
+network_api_class = nova.network.neutronv2.api.API
+
+compute_driver = libvirt.LibvirtDriver
+libvirt_type=kvm
+rootwrap_config = /etc/nova/rootwrap.conf
+auth_strategy = keystone
+firewall_driver=nova.virt.firewall.NoopFirewallDriver
+enabled_apis = ec2,osapi_compute,metadata
+
+my_ip={{ controller.bind.private_address }}
+
+
+
+neutron_auth_strategy = keystone
+neutron_admin_auth_url = http://{{ controller.identity.host }}:35357/v{% if controller.identity.get('version', 2) == 2 %}2.0{% else %}3{% endif %}
+{% if pillar.neutron is defined %}
+neutron_admin_password={{ pillar.neutron.server.identity.password }}
+neutron_admin_tenant_name={{ pillar.neutron.server.identity.tenant }}
+neutron_admin_username={{ pillar.neutron.server.identity.user }}
+neutron_region_name={{ pillar.neutron.server.identity.region }}
+{%- else %}
+neutron_admin_password={{ controller.network.password }}
+neutron_admin_tenant_name={{ controller.network.tenant }}
+neutron_admin_username={{ controller.network.user }}
+neutron_region_name={{ controller.network.region }}
+{%- endif %}
+neutron_url=http://{{ controller.network.host }}:{{ controller.network.port }}
+
+
+linuxnet_interface_driver = nova.network.linux_net.LinuxOVSInterfaceDriver
+firewall_driver = nova.virt.firewall.NoopFirewallDriver
+security_group_api = neutron
+
+rpc_backend = nova.rpc.impl_kombu
+start_guests_on_host_boot=true
+
+{%- if controller.cache is defined %}
+memcached_servers={%- for member in controller.cache.members %}{{ member.host }}:11211{% if not loop.last %},{% endif %}{%- endfor %}
+{%- endif %}
+
+
+volume_api_class=nova.volume.cinder.API
+ec2_listen={{ controller.bind.private_address }}
+osapi_volume_listen={{ controller.bind.private_address }}
+osapi_compute_listen={{ controller.bind.private_address }}
+metadata_listen={{ controller.bind.private_address }}
+glance_host = {{ controller.glance.host }}
+osapi_compute_workers = {{ controller.workers }}
+
+#NoVNC
+vnc_enabled=true
+{%- if pillar.nova.compute is defined %}
+vncserver_listen={{ controller.bind.private_address }}
+vncserver_proxyclient_address={{ controller.bind.private_address }}
+{%- else %}
+vncserver_listen=0.0.0.0
+{%- endif %}
+novncproxy_base_url={{ controller.vncproxy_url }}/vnc_auto.html
+novncproxy_port={{ controller.bind.get('vncproxy_port', '6080') }}
+
+{%- if controller.get('networking', 'default') != "contrail" %}
+neutron_metadata_proxy_shared_secret={{ controller.metadata.password }}
+{%- endif %}
+
+allow_resize_to_same_host=True
+start_guests_on_host_boot=true
+
+rpc_cast_timeout = 30
+rpc_conn_pool_size = 300
+rpc_response_timeout = 3600
+rpc_thread_pool_size = 70
+report_interval = 5
+
+block_device_allocate_retries=600
+block_device_allocate_retries_interval=10
+
+[oslo_concurrency]
+
+lock_path = /var/lib/nova/tmp
+
+[oslo_messaging_rabbit]
+{%- if controller.message_queue.members is defined %}
+rabbit_hosts = {% for member in controller.message_queue.members -%}
+                   {{ member.host }}:{{ member.get('port', 5672) }}
+                   {%- if not loop.last -%},{%- endif -%}
+               {%- endfor -%}
+{%- else %}
+rabbit_host = {{ controller.message_queue.host }}
+rabbit_port = {{ controller.message_queue.port }}
+{%- endif %}
+
+rabbit_userid = {{ controller.message_queue.user }}
+rabbit_password = {{ controller.message_queue.password }}
+rabbit_virtual_host = {{ controller.message_queue.virtual_host }}
+
+rabbit_retry_interval = 1
+rabbit_retry_backoff = 2
+
+{%- if controller.identity.get('version', 2) == 2 %}
+
+[keystone_authtoken]
+signing_dirname=/tmp/keystone-signing-nova
+signing_dir=/tmp/keystone-signing-nova
+admin_tenant_name = {{ controller.identity.tenant }}
+admin_user = {{ controller.identity.user }}
+admin_password = {{ controller.identity.password }}
+auth_host = {{ controller.identity.host }}
+auth_port = {{ controller.identity.port }}
+auth_protocol=http
+auth_uri=http://{{ controller.identity.host }}:5000
+
+{%- else %}
+
+[keystone_authtoken]
+signing_dirname=/tmp/keystone-signing-nova
+identity_uri = http://{{ controller.identity.host }}:35357/v3
+revocation_cache_time = 10
+auth_section = generic_password
+
+[generic_password]
+auth_plugin = password
+user_domain_id = {{ controller.identity.get('domain', 'default') }}
+project_domain_id = {{ controller.identity.get('domain', 'default') }}
+project_name = {{ controller.identity.tenant }}
+username = {{ controller.identity.user }}
+password = {{ controller.identity.password }}
+auth_uri=http://{{ controller.identity.host }}:5000/v3
+auth_url=http://{{ controller.identity.host }}:35357/v3
+
+{%- endif %}
+
+[conductor]
+workers = {{ controller.workers  }}
+
+[database]
+idle_timeout = 180
+min_pool_size = 100
+max_pool_size = 700
+max_overflow = 100
+retry_interval = 5
+max_retries = -1
+db_max_retries = 3
+db_retry_interval = 1
+connection_debug = 10
+pool_timeout = 120
+connection = {{ controller.database.engine }}://{{ controller.database.user }}:{{ controller.database.password }}@{{ controller.database.host }}/{{ controller.database.name }}
+
+[glance]
+
+host = {{ controller.glance.host }}
+
+[neutron]
+auth_strategy = keystone
+admin_auth_url = http://{{ controller.identity.host }}:35357/v{% if controller.identity.get('version', 2) == 2 %}2.0{% else %}3{% endif %}
+{% if pillar.neutron is defined %}
+admin_password={{ pillar.neutron.server.identity.password }}
+admin_tenant_name={{ pillar.neutron.server.identity.tenant }}
+admin_username={{ pillar.neutron.server.identity.user }}
+region_name= {{ pillar.neutron.server.identity.region }}
+{%- else %}
+admin_password={{ controller.network.password }}
+admin_tenant_name={{ controller.network.tenant }}
+admin_username={{ controller.network.user }}
+region_name= {{ controller.network.region }}
+{%- endif %}
+url=http://{{ controller.network.host }}:{{ controller.network.port }}
+
+service_metadata_proxy=True
+
+[cinder]
+os_region_name = {{ controller.identity.region }}
diff --git a/nova/files/liberty/nova-controller.conf.RedHat b/nova/files/liberty/nova-controller.conf.RedHat
new file mode 120000
index 0000000..09c7524
--- /dev/null
+++ b/nova/files/liberty/nova-controller.conf.RedHat
@@ -0,0 +1 @@
+nova-controller.conf.Debian
\ No newline at end of file
diff --git a/nova/files/liberty/qemu.conf.Debian b/nova/files/liberty/qemu.conf.Debian
new file mode 100644
index 0000000..5c5722b
--- /dev/null
+++ b/nova/files/liberty/qemu.conf.Debian
@@ -0,0 +1,479 @@
+{%- from "nova/map.jinja" import compute with context %}
+# Master configuration file for the QEMU driver.
+# All settings described here are optional - if omitted, sensible
+# defaults are used.
+
+# VNC is configured to listen on 127.0.0.1 by default.
+# To make it listen on all public interfaces, uncomment
+# this next option.
+#
+# NB, strong recommendation to enable TLS + x509 certificate
+# verification when allowing public access
+#
+#vnc_listen = "0.0.0.0"
+
+# Enable this option to have VNC served over an automatically created
+# unix socket. This prevents unprivileged access from users on the
+# host machine, though most VNC clients do not support it.
+#
+# This will only be enabled for VNC configurations that do not have
+# a hardcoded 'listen' or 'socket' value. This setting takes preference
+# over vnc_listen.
+#
+#vnc_auto_unix_socket = 1
+
+# Enable use of TLS encryption on the VNC server. This requires
+# a VNC client which supports the VeNCrypt protocol extension.
+# Examples include vinagre, virt-viewer, virt-manager and vencrypt
+# itself. UltraVNC, RealVNC, TightVNC do not support this
+#
+# It is necessary to setup CA and issue a server certificate
+# before enabling this.
+#
+#vnc_tls = 1
+
+
+# Use of TLS requires that x509 certificates be issued. The
+# default it to keep them in /etc/pki/libvirt-vnc. This directory
+# must contain
+#
+#  ca-cert.pem - the CA master certificate
+#  server-cert.pem - the server certificate signed with ca-cert.pem
+#  server-key.pem  - the server private key
+#
+# This option allows the certificate directory to be changed
+#
+#vnc_tls_x509_cert_dir = "/etc/pki/libvirt-vnc"
+
+
+# The default TLS configuration only uses certificates for the server
+# allowing the client to verify the server's identity and establish
+# an encrypted channel.
+#
+# It is possible to use x509 certificates for authentication too, by
+# issuing a x509 certificate to every client who needs to connect.
+#
+# Enabling this option will reject any client who does not have a
+# certificate signed by the CA in /etc/pki/libvirt-vnc/ca-cert.pem
+#
+#vnc_tls_x509_verify = 1
+
+
+# The default VNC password. Only 8 bytes are significant for
+# VNC passwords. This parameter is only used if the per-domain
+# XML config does not already provide a password. To allow
+# access without passwords, leave this commented out. An empty
+# string will still enable passwords, but be rejected by QEMU,
+# effectively preventing any use of VNC. Obviously change this
+# example here before you set this.
+#
+#vnc_password = "XYZ12345"
+
+
+# Enable use of SASL encryption on the VNC server. This requires
+# a VNC client which supports the SASL protocol extension.
+# Examples include vinagre, virt-viewer and virt-manager
+# itself. UltraVNC, RealVNC, TightVNC do not support this
+#
+# It is necessary to configure /etc/sasl2/qemu.conf to choose
+# the desired SASL plugin (eg, GSSPI for Kerberos)
+#
+#vnc_sasl = 1
+
+
+# The default SASL configuration file is located in /etc/sasl2/
+# When running libvirtd unprivileged, it may be desirable to
+# override the configs in this location. Set this parameter to
+# point to the directory, and create a qemu.conf in that location
+#
+#vnc_sasl_dir = "/some/directory/sasl2"
+
+
+# QEMU implements an extension for providing audio over a VNC connection,
+# though if your VNC client does not support it, your only chance for getting
+# sound output is through regular audio backends. By default, libvirt will
+# disable all QEMU sound backends if using VNC, since they can cause
+# permissions issues. Enabling this option will make libvirtd honor the
+# QEMU_AUDIO_DRV environment variable when using VNC.
+#
+#vnc_allow_host_audio = 0
+
+
+
+# SPICE is configured to listen on 127.0.0.1 by default.
+# To make it listen on all public interfaces, uncomment
+# this next option.
+#
+# NB, strong recommendation to enable TLS + x509 certificate
+# verification when allowing public access
+#
+#spice_listen = "0.0.0.0"
+
+
+# Enable use of TLS encryption on the SPICE server.
+#
+# It is necessary to setup CA and issue a server certificate
+# before enabling this.
+#
+#spice_tls = 1
+
+
+# Use of TLS requires that x509 certificates be issued. The
+# default it to keep them in /etc/pki/libvirt-spice. This directory
+# must contain
+#
+#  ca-cert.pem - the CA master certificate
+#  server-cert.pem - the server certificate signed with ca-cert.pem
+#  server-key.pem  - the server private key
+#
+# This option allows the certificate directory to be changed.
+#
+#spice_tls_x509_cert_dir = "/etc/pki/libvirt-spice"
+
+
+# The default SPICE password. This parameter is only used if the
+# per-domain XML config does not already provide a password. To
+# allow access without passwords, leave this commented out. An
+# empty string will still enable passwords, but be rejected by
+# QEMU, effectively preventing any use of SPICE. Obviously change
+# this example here before you set this.
+#
+#spice_password = "XYZ12345"
+
+
+# Enable use of SASL encryption on the SPICE server. This requires
+# a SPICE client which supports the SASL protocol extension.
+#
+# It is necessary to configure /etc/sasl2/qemu.conf to choose
+# the desired SASL plugin (eg, GSSPI for Kerberos)
+#
+#spice_sasl = 1
+
+# The default SASL configuration file is located in /etc/sasl2/
+# When running libvirtd unprivileged, it may be desirable to
+# override the configs in this location. Set this parameter to
+# point to the directory, and create a qemu.conf in that location
+#
+#spice_sasl_dir = "/some/directory/sasl2"
+
+
+# By default, if no graphical front end is configured, libvirt will disable
+# QEMU audio output since directly talking to alsa/pulseaudio may not work
+# with various security settings. If you know what you're doing, enable
+# the setting below and libvirt will passthrough the QEMU_AUDIO_DRV
+# environment variable when using nographics.
+#
+#nographics_allow_host_audio = 1
+
+
+# Override the port for creating both VNC and SPICE sessions (min).
+# This defaults to 5900 and increases for consecutive sessions
+# or when ports are occupied, until it hits the maximum.
+#
+# Minimum must be greater than or equal to 5900 as lower number would
+# result into negative vnc display number.
+#
+# Maximum must be less than 65536, because higher numbers do not make
+# sense as a port number.
+#
+#remote_display_port_min = 5900
+#remote_display_port_max = 65535
+
+# VNC WebSocket port policies, same rules apply as with remote display
+# ports.  VNC WebSockets use similar display <-> port mappings, with
+# the exception being that ports start from 5700 instead of 5900.
+#
+#remote_websocket_port_min = 5700
+#remote_websocket_port_max = 65535
+
+# The default security driver is SELinux. If SELinux is disabled
+# on the host, then the security driver will automatically disable
+# itself. If you wish to disable QEMU SELinux security driver while
+# leaving SELinux enabled for the host in general, then set this
+# to 'none' instead. It's also possible to use more than one security
+# driver at the same time, for this use a list of names separated by
+# comma and delimited by square brackets. For example:
+#
+#       security_driver = [ "selinux", "apparmor" ]
+#
+# Notes: The DAC security driver is always enabled; as a result, the
+# value of security_driver cannot contain "dac".  The value "none" is
+# a special value; security_driver can be set to that value in
+# isolation, but it cannot appear in a list of drivers.
+#
+#security_driver = "selinux"
+
+# If set to non-zero, then the default security labeling
+# will make guests confined. If set to zero, then guests
+# will be unconfined by default. Defaults to 1.
+#security_default_confined = 1
+
+# If set to non-zero, then attempts to create unconfined
+# guests will be blocked. Defaults to 0.
+#security_require_confined = 1
+
+# The user for QEMU processes run by the system instance. It can be
+# specified as a user name or as a user id. The qemu driver will try to
+# parse this value first as a name and then, if the name doesn't exist,
+# as a user id.
+#
+# Since a sequence of digits is a valid user name, a leading plus sign
+# can be used to ensure that a user id will not be interpreted as a user
+# name.
+#
+# Some examples of valid values are:
+#
+#       user = "qemu"   # A user named "qemu"
+#       user = "+0"     # Super user (uid=0)
+#       user = "100"    # A user named "100" or a user with uid=100
+#
+#user = "root"
+
+# The group for QEMU processes run by the system instance. It can be
+# specified in a similar way to user.
+#group = "root"
+
+# Whether libvirt should dynamically change file ownership
+# to match the configured user/group above. Defaults to 1.
+# Set to 0 to disable file ownership changes.
+#dynamic_ownership = 1
+
+
+# What cgroup controllers to make use of with QEMU guests
+#
+#  - 'cpu' - use for schedular tunables
+#  - 'devices' - use for device whitelisting
+#  - 'memory' - use for memory tunables
+#  - 'blkio' - use for block devices I/O tunables
+#  - 'cpuset' - use for CPUs and memory nodes
+#  - 'cpuacct' - use for CPUs statistics.
+#
+# NB, even if configured here, they won't be used unless
+# the administrator has mounted cgroups, e.g.:
+#
+#  mkdir /dev/cgroup
+#  mount -t cgroup -o devices,cpu,memory,blkio,cpuset none /dev/cgroup
+#
+# They can be mounted anywhere, and different controllers
+# can be mounted in different locations. libvirt will detect
+# where they are located.
+#
+#cgroup_controllers = [ "cpu", "devices", "memory", "blkio", "cpuset", "cpuacct" ]
+
+# This is the basic set of devices allowed / required by
+# all virtual machines.
+#
+# As well as this, any configured block backed disks,
+# all sound device, and all PTY devices are allowed.
+#
+# This will only need setting if newer QEMU suddenly
+# wants some device we don't already know about.
+#
+#cgroup_device_acl = [
+#    "/dev/null", "/dev/full", "/dev/zero",
+#    "/dev/random", "/dev/urandom",
+#    "/dev/ptmx", "/dev/kvm", "/dev/kqemu",
+#    "/dev/rtc","/dev/hpet", "/dev/vfio/vfio"
+#]
+
+
+# The default format for Qemu/KVM guest save images is raw; that is, the
+# memory from the domain is dumped out directly to a file.  If you have
+# guests with a large amount of memory, however, this can take up quite
+# a bit of space.  If you would like to compress the images while they
+# are being saved to disk, you can also set "lzop", "gzip", "bzip2", or "xz"
+# for save_image_format.  Note that this means you slow down the process of
+# saving a domain in order to save disk space; the list above is in descending
+# order by performance and ascending order by compression ratio.
+#
+# save_image_format is used when you use 'virsh save' or 'virsh managedsave'
+# at scheduled saving, and it is an error if the specified save_image_format
+# is not valid, or the requested compression program can't be found.
+#
+# dump_image_format is used when you use 'virsh dump' at emergency
+# crashdump, and if the specified dump_image_format is not valid, or
+# the requested compression program can't be found, this falls
+# back to "raw" compression.
+#
+# snapshot_image_format specifies the compression algorithm of the memory save
+# image when an external snapshot of a domain is taken. This does not apply
+# on disk image format. It is an error if the specified format isn't valid,
+# or the requested compression program can't be found.
+#
+#save_image_format = "raw"
+#dump_image_format = "raw"
+#snapshot_image_format = "raw"
+
+# When a domain is configured to be auto-dumped when libvirtd receives a
+# watchdog event from qemu guest, libvirtd will save dump files in directory
+# specified by auto_dump_path. Default value is /var/lib/libvirt/qemu/dump
+#
+#auto_dump_path = "/var/lib/libvirt/qemu/dump"
+
+# When a domain is configured to be auto-dumped, enabling this flag
+# has the same effect as using the VIR_DUMP_BYPASS_CACHE flag with the
+# virDomainCoreDump API.  That is, the system will avoid using the
+# file system cache while writing the dump file, but may cause
+# slower operation.
+#
+#auto_dump_bypass_cache = 0
+
+# When a domain is configured to be auto-started, enabling this flag
+# has the same effect as using the VIR_DOMAIN_START_BYPASS_CACHE flag
+# with the virDomainCreateWithFlags API.  That is, the system will
+# avoid using the file system cache when restoring any managed state
+# file, but may cause slower operation.
+#
+#auto_start_bypass_cache = 0
+
+# If provided by the host and a hugetlbfs mount point is configured,
+# a guest may request huge page backing.  When this mount point is
+# unspecified here, determination of a host mount point in /proc/mounts
+# will be attempted.  Specifying an explicit mount overrides detection
+# of the same in /proc/mounts.  Setting the mount point to "" will
+# disable guest hugepage backing.
+#
+# NB, within this mount point, guests will create memory backing files
+# in a location of $MOUNTPOINT/libvirt/qemu
+#
+#hugetlbfs_mount = "/dev/hugepages"
+
+
+# Path to the setuid helper for creating tap devices.  This executable
+# is used to create <source type='bridge'> interfaces when libvirtd is
+# running unprivileged.  libvirt invokes the helper directly, instead
+# of using "-netdev bridge", for security reasons.
+#bridge_helper = "/usr/libexec/qemu-bridge-helper"
+
+
+
+# If clear_emulator_capabilities is enabled, libvirt will drop all
+# privileged capabilities of the QEmu/KVM emulator. This is enabled by
+# default.
+#
+# Warning: Disabling this option means that a compromised guest can
+# exploit the privileges and possibly do damage to the host.
+#
+#clear_emulator_capabilities = 1
+
+
+# If enabled, libvirt will have QEMU set its process name to
+# "qemu:VM_NAME", where VM_NAME is the name of the VM. The QEMU
+# process will appear as "qemu:VM_NAME" in process listings and
+# other system monitoring tools. By default, QEMU does not set
+# its process title, so the complete QEMU command (emulator and
+# its arguments) appear in process listings.
+#
+#set_process_name = 1
+
+
+# If max_processes is set to a positive integer, libvirt will use
+# it to set the maximum number of processes that can be run by qemu
+# user. This can be used to override default value set by host OS.
+# The same applies to max_files which sets the limit on the maximum
+# number of opened files.
+#
+{%- if compute.qemu is defined %}
+
+{%- if compute.qemu.max_processes is defined %}
+max_processes = {{ compute.qemu.max_processes }}
+{%- endif %}
+
+{%- if compute.qemu.max_files is defined %}
+max_files = {{ compute.qemu.max_files }}
+{%- endif %}
+
+{%- endif %}
+
+# mac_filter enables MAC addressed based filtering on bridge ports.
+# This currently requires ebtables to be installed.
+#
+#mac_filter = 1
+
+
+# By default, PCI devices below non-ACS switch are not allowed to be assigned
+# to guests. By setting relaxed_acs_check to 1 such devices will be allowed to
+# be assigned to guests.
+#
+#relaxed_acs_check = 1
+
+
+# If allow_disk_format_probing is enabled, libvirt will probe disk
+# images to attempt to identify their format, when not otherwise
+# specified in the XML. This is disabled by default.
+#
+# WARNING: Enabling probing is a security hole in almost all
+# deployments. It is strongly recommended that users update their
+# guest XML <disk> elements to include  <driver type='XXXX'/>
+# elements instead of enabling this option.
+#
+#allow_disk_format_probing = 1
+
+
+# To enable 'Sanlock' project based locking of the file
+# content (to prevent two VMs writing to the same
+# disk), uncomment this
+#
+#lock_manager = "sanlock"
+
+
+
+# Set limit of maximum APIs queued on one domain. All other APIs
+# over this threshold will fail on acquiring job lock. Specially,
+# setting to zero turns this feature off.
+# Note, that job lock is per domain.
+#
+#max_queued = 0
+
+###################################################################
+# Keepalive protocol:
+# This allows qemu driver to detect broken connections to remote
+# libvirtd during peer-to-peer migration.  A keepalive message is
+# sent to the daemon after keepalive_interval seconds of inactivity
+# to check if the daemon is still responding; keepalive_count is a
+# maximum number of keepalive messages that are allowed to be sent
+# to the daemon without getting any response before the connection
+# is considered broken.  In other words, the connection is
+# automatically closed approximately after
+# keepalive_interval * (keepalive_count + 1) seconds since the last
+# message received from the daemon.  If keepalive_interval is set to
+# -1, qemu driver will not send keepalive requests during
+# peer-to-peer migration; however, the remote libvirtd can still
+# send them and source libvirtd will send responses.  When
+# keepalive_count is set to 0, connections will be automatically
+# closed after keepalive_interval seconds of inactivity without
+# sending any keepalive messages.
+#
+#keepalive_interval = 5
+#keepalive_count = 5
+
+
+
+# Use seccomp syscall whitelisting in QEMU.
+# 1 = on, 0 = off, -1 = use QEMU default
+# Defaults to -1.
+#
+#seccomp_sandbox = 1
+
+
+
+# Override the listen address for all incoming migrations. Defaults to
+# 0.0.0.0, or :: if both host and qemu are capable of IPv6.
+#migration_address = "127.0.0.1"
+
+
+# Override the port range used for incoming migrations.
+#
+# Minimum must be greater than 0, however when QEMU is not running as root,
+# setting the minimum to be lower than 1024 will not work.
+#
+# Maximum must not be greater than 65535.
+#
+#migration_port_min = 49152
+#migration_port_max = 49215
+cgroup_device_acl = [
+    "/dev/null", "/dev/full", "/dev/zero",
+    "/dev/random", "/dev/urandom",
+    "/dev/ptmx", "/dev/kvm", "/dev/kqemu",
+    "/dev/rtc", "/dev/hpet","/dev/net/tun",
+]
diff --git a/nova/files/liberty/qemu.conf.RedHat b/nova/files/liberty/qemu.conf.RedHat
new file mode 120000
index 0000000..1d23f19
--- /dev/null
+++ b/nova/files/liberty/qemu.conf.RedHat
@@ -0,0 +1 @@
+qemu.conf.Debian
\ No newline at end of file
diff --git a/nova/files/mitaka/api-paste.ini.Debian b/nova/files/mitaka/api-paste.ini.Debian
new file mode 100644
index 0000000..951ae72
--- /dev/null
+++ b/nova/files/mitaka/api-paste.ini.Debian
@@ -0,0 +1,100 @@
+############
+# Metadata #
+############
+[composite:metadata]
+use = egg:Paste#urlmap
+/: meta
+
+[pipeline:meta]
+pipeline = cors metaapp
+
+[app:metaapp]
+paste.app_factory = nova.api.metadata.handler:MetadataRequestHandler.factory
+
+#############
+# OpenStack #
+#############
+
+[composite:osapi_compute]
+use = call:nova.api.openstack.urlmap:urlmap_factory
+/: oscomputeversions
+# starting in Liberty the v21 implementation replaces the v2
+# implementation and is suggested that you use it as the default. If
+# this causes issues with your clients you can rollback to the
+# *frozen* v2 api by commenting out the above stanza and using the
+# following instead::
+# /v2: openstack_compute_api_legacy_v2
+# if rolling back to v2 fixes your issue please file a critical bug
+# at - https://bugs.launchpad.net/nova/+bugs
+#
+# v21 is an exactly feature match for v2, except it has more stringent
+# input validation on the wsgi surface (prevents fuzzing early on the
+# API). It also provides new features via API microversions which are
+# opt into for clients. Unaware clients will receive the same frozen
+# v2 API feature set, but with some relaxed validation
+/v2: openstack_compute_api_v21_legacy_v2_compatible
+/v2.1: openstack_compute_api_v21
+
+# NOTE: this is deprecated in favor of openstack_compute_api_v21_legacy_v2_compatible
+[composite:openstack_compute_api_legacy_v2]
+use = call:nova.api.auth:pipeline_factory
+noauth2 = cors compute_req_id faultwrap sizelimit noauth2 legacy_ratelimit osapi_compute_app_legacy_v2
+keystone = cors compute_req_id faultwrap sizelimit authtoken keystonecontext legacy_ratelimit osapi_compute_app_legacy_v2
+keystone_nolimit = cors compute_req_id faultwrap sizelimit authtoken keystonecontext osapi_compute_app_legacy_v2
+
+[composite:openstack_compute_api_v21]
+use = call:nova.api.auth:pipeline_factory_v21
+noauth2 = cors compute_req_id faultwrap sizelimit noauth2 osapi_compute_app_v21
+keystone = cors compute_req_id faultwrap sizelimit authtoken keystonecontext osapi_compute_app_v21
+
+[composite:openstack_compute_api_v21_legacy_v2_compatible]
+use = call:nova.api.auth:pipeline_factory_v21
+noauth2 = cors compute_req_id faultwrap sizelimit noauth2 legacy_v2_compatible osapi_compute_app_v21
+keystone = cors compute_req_id faultwrap sizelimit authtoken keystonecontext legacy_v2_compatible osapi_compute_app_v21
+
+[filter:request_id]
+paste.filter_factory = oslo_middleware:RequestId.factory
+
+[filter:compute_req_id]
+paste.filter_factory = nova.api.compute_req_id:ComputeReqIdMiddleware.factory
+
+[filter:faultwrap]
+paste.filter_factory = nova.api.openstack:FaultWrapper.factory
+
+[filter:noauth2]
+paste.filter_factory = nova.api.openstack.auth:NoAuthMiddleware.factory
+
+[filter:legacy_ratelimit]
+paste.filter_factory = nova.api.openstack.compute.limits:RateLimitingMiddleware.factory
+
+[filter:sizelimit]
+paste.filter_factory = oslo_middleware:RequestBodySizeLimiter.factory
+
+[filter:legacy_v2_compatible]
+paste.filter_factory = nova.api.openstack:LegacyV2CompatibleWrapper.factory
+
+[app:osapi_compute_app_legacy_v2]
+paste.app_factory = nova.api.openstack.compute:APIRouter.factory
+
+[app:osapi_compute_app_v21]
+paste.app_factory = nova.api.openstack.compute:APIRouterV21.factory
+
+[pipeline:oscomputeversions]
+pipeline = faultwrap oscomputeversionapp
+
+[app:oscomputeversionapp]
+paste.app_factory = nova.api.openstack.compute.versions:Versions.factory
+
+##########
+# Shared #
+##########
+
+[filter:cors]
+paste.filter_factory = oslo_middleware.cors:filter_factory
+oslo_config_project = nova
+
+[filter:keystonecontext]
+paste.filter_factory = nova.api.auth:NovaKeystoneContext.factory
+
+[filter:authtoken]
+paste.filter_factory = keystonemiddleware.auth_token:filter_factory
diff --git a/nova/files/mitaka/api-paste.ini.RedHat b/nova/files/mitaka/api-paste.ini.RedHat
new file mode 120000
index 0000000..08fd76a
--- /dev/null
+++ b/nova/files/mitaka/api-paste.ini.RedHat
@@ -0,0 +1 @@
+api-paste.ini.Debian
\ No newline at end of file
diff --git a/nova/files/mitaka/libvirt.Debian b/nova/files/mitaka/libvirt.Debian
new file mode 100644
index 0000000..1704dc4
--- /dev/null
+++ b/nova/files/mitaka/libvirt.Debian
@@ -0,0 +1,12 @@
+# Defaults for libvirt-bin initscript (/etc/init.d/libvirt-bin)
+# This is a POSIX shell fragment
+
+# Start libvirtd to handle qemu/kvm:
+start_libvirtd="yes"
+
+# options passed to libvirtd, add "-l" to listen on tcp
+libvirtd_opts="-d -l"
+LIBVIRTD_ARGS="--listen"
+
+# pass in location of kerberos keytab
+#export KRB5_KTNAME=/etc/libvirt/libvirt.keytab
\ No newline at end of file
diff --git a/nova/files/mitaka/libvirt.RedHat b/nova/files/mitaka/libvirt.RedHat
new file mode 100644
index 0000000..f63cdab
--- /dev/null
+++ b/nova/files/mitaka/libvirt.RedHat
@@ -0,0 +1,11 @@
+# Defaults for libvirt-bin initscript (/etc/init.d/libvirt-bin)
+# This is a POSIX shell fragment
+
+# Start libvirtd to handle qemu/kvm:
+start_libvirtd="yes"
+
+# options passed to libvirtd, add "-l" to listen on tcp
+libvirtd_opts="-d -l"
+
+# pass in location of kerberos keytab
+#export KRB5_KTNAME=/etc/libvirt/libvirt.keytab
diff --git a/nova/files/mitaka/libvirtd.conf.Debian b/nova/files/mitaka/libvirtd.conf.Debian
new file mode 100644
index 0000000..8333dcb
--- /dev/null
+++ b/nova/files/mitaka/libvirtd.conf.Debian
@@ -0,0 +1,402 @@
+# Master libvirt daemon configuration file
+#
+# For further information consult http://libvirt.org/format.html
+#
+# NOTE: the tests/daemon-conf regression test script requires
+# that each "PARAMETER = VALUE" line in this file have the parameter
+# name just after a leading "#".
+
+#################################################################
+#
+# Network connectivity controls
+#
+
+# Flag listening for secure TLS connections on the public TCP/IP port.
+# NB, must pass the --listen flag to the libvirtd process for this to
+# have any effect.
+#
+# It is necessary to setup a CA and issue server certificates before
+# using this capability.
+#
+# This is enabled by default, uncomment this to disable it
+#listen_tls = 0
+
+
+listen_tls = 0
+listen_tcp = 1
+auth_tcp = "none"
+
+
+# Listen for unencrypted TCP connections on the public TCP/IP port.
+# NB, must pass the --listen flag to the libvirtd process for this to
+# have any effect.
+#
+# Using the TCP socket requires SASL authentication by default. Only
+# SASL mechanisms which support data encryption are allowed. This is
+# DIGEST_MD5 and GSSAPI (Kerberos5)
+#
+# This is disabled by default, uncomment this to enable it.
+#listen_tcp = 1
+
+
+
+# Override the port for accepting secure TLS connections
+# This can be a port number, or service name
+#
+#tls_port = "16514"
+
+# Override the port for accepting insecure TCP connections
+# This can be a port number, or service name
+#
+#tcp_port = "16509"
+
+
+# Override the default configuration which binds to all network
+# interfaces. This can be a numeric IPv4/6 address, or hostname
+#
+#listen_addr = "192.168.0.1"
+
+
+# Flag toggling mDNS advertizement of the libvirt service.
+#
+# Alternatively can disable for all services on a host by
+# stopping the Avahi daemon
+#
+# This is disabled by default, uncomment this to enable it
+#mdns_adv = 1
+
+# Override the default mDNS advertizement name. This must be
+# unique on the immediate broadcast network.
+#
+# The default is "Virtualization Host HOSTNAME", where HOSTNAME
+# is subsituted for the short hostname of the machine (without domain)
+#
+#mdns_name = "Virtualization Host Joe Demo"
+
+
+#################################################################
+#
+# UNIX socket access controls
+#
+
+# Set the UNIX domain socket group ownership. This can be used to
+# allow a 'trusted' set of users access to management capabilities
+# without becoming root.
+#
+# This is restricted to 'root' by default.
+unix_sock_group = "libvirtd"
+
+# Set the UNIX socket permissions for the R/O socket. This is used
+# for monitoring VM status only
+#
+# Default allows any user. If setting group ownership may want to
+# restrict this to:
+#unix_sock_ro_perms = "0777"
+
+# Set the UNIX socket permissions for the R/W socket. This is used
+# for full management of VMs
+#
+# Default allows only root. If PolicyKit is enabled on the socket,
+# the default will change to allow everyone (eg, 0777)
+#
+# If not using PolicyKit and setting group ownership for access
+# control then you may want to relax this to:
+unix_sock_rw_perms = "0770"
+
+# Set the name of the directory in which sockets will be found/created.
+#unix_sock_dir = "/var/run/libvirt"
+
+#################################################################
+#
+# Authentication.
+#
+#  - none: do not perform auth checks. If you can connect to the
+#          socket you are allowed. This is suitable if there are
+#          restrictions on connecting to the socket (eg, UNIX
+#          socket permissions), or if there is a lower layer in
+#          the network providing auth (eg, TLS/x509 certificates)
+#
+#  - sasl: use SASL infrastructure. The actual auth scheme is then
+#          controlled from /etc/sasl2/libvirt.conf. For the TCP
+#          socket only GSSAPI & DIGEST-MD5 mechanisms will be used.
+#          For non-TCP or TLS sockets,  any scheme is allowed.
+#
+#  - polkit: use PolicyKit to authenticate. This is only suitable
+#            for use on the UNIX sockets. The default policy will
+#            require a user to supply their own password to gain
+#            full read/write access (aka sudo like), while anyone
+#            is allowed read/only access.
+#
+# Set an authentication scheme for UNIX read-only sockets
+# By default socket permissions allow anyone to connect
+#
+# To restrict monitoring of domains you may wish to enable
+# an authentication mechanism here
+auth_unix_ro = "none"
+
+# Set an authentication scheme for UNIX read-write sockets
+# By default socket permissions only allow root. If PolicyKit
+# support was compiled into libvirt, the default will be to
+# use 'polkit' auth.
+#
+# If the unix_sock_rw_perms are changed you may wish to enable
+# an authentication mechanism here
+auth_unix_rw = "none"
+
+# Change the authentication scheme for TCP sockets.
+#
+# If you don't enable SASL, then all TCP traffic is cleartext.
+# Don't do this outside of a dev/test scenario. For real world
+# use, always enable SASL and use the GSSAPI or DIGEST-MD5
+# mechanism in /etc/sasl2/libvirt.conf
+#auth_tcp = "sasl"
+#auth_tcp = "none"
+
+# Change the authentication scheme for TLS sockets.
+#
+# TLS sockets already have encryption provided by the TLS
+# layer, and limited authentication is done by certificates
+#
+# It is possible to make use of any SASL authentication
+# mechanism as well, by using 'sasl' for this option
+#auth_tls = "none"
+
+
+
+#################################################################
+#
+# TLS x509 certificate configuration
+#
+
+
+# Override the default server key file path
+#
+#key_file = "/etc/pki/libvirt/private/serverkey.pem"
+
+# Override the default server certificate file path
+#
+#cert_file = "/etc/pki/libvirt/servercert.pem"
+
+# Override the default CA certificate path
+#
+#ca_file = "/etc/pki/CA/cacert.pem"
+
+# Specify a certificate revocation list.
+#
+# Defaults to not using a CRL, uncomment to enable it
+#crl_file = "/etc/pki/CA/crl.pem"
+
+
+
+#################################################################
+#
+# Authorization controls
+#
+
+
+# Flag to disable verification of our own server certificates
+#
+# When libvirtd starts it performs some sanity checks against
+# its own certificates.
+#
+# Default is to always run sanity checks. Uncommenting this
+# will disable sanity checks which is not a good idea
+#tls_no_sanity_certificate = 1
+
+# Flag to disable verification of client certificates
+#
+# Client certificate verification is the primary authentication mechanism.
+# Any client which does not present a certificate signed by the CA
+# will be rejected.
+#
+# Default is to always verify. Uncommenting this will disable
+# verification - make sure an IP whitelist is set
+#tls_no_verify_certificate = 1
+
+
+# A whitelist of allowed x509  Distinguished Names
+# This list may contain wildcards such as
+#
+#    "C=GB,ST=London,L=London,O=Red Hat,CN=*"
+#
+# See the POSIX fnmatch function for the format of the wildcards.
+#
+# NB If this is an empty list, no client can connect, so comment out
+# entirely rather than using empty list to disable these checks
+#
+# By default, no DN's are checked
+#tls_allowed_dn_list = ["DN1", "DN2"]
+
+
+# A whitelist of allowed SASL usernames. The format for usernames
+# depends on the SASL authentication mechanism. Kerberos usernames
+# look like username@REALM
+#
+# This list may contain wildcards such as
+#
+#    "*@EXAMPLE.COM"
+#
+# See the POSIX fnmatch function for the format of the wildcards.
+#
+# NB If this is an empty list, no client can connect, so comment out
+# entirely rather than using empty list to disable these checks
+#
+# By default, no Username's are checked
+#sasl_allowed_username_list = ["joe@EXAMPLE.COM", "fred@EXAMPLE.COM" ]
+
+
+
+#################################################################
+#
+# Processing controls
+#
+
+# The maximum number of concurrent client connections to allow
+# over all sockets combined.
+#max_clients = 20
+
+
+# The minimum limit sets the number of workers to start up
+# initially. If the number of active clients exceeds this,
+# then more threads are spawned, upto max_workers limit.
+# Typically you'd want max_workers to equal maximum number
+# of clients allowed
+#min_workers = 5
+#max_workers = 20
+
+
+# The number of priority workers. If all workers from above
+# pool will stuck, some calls marked as high priority
+# (notably domainDestroy) can be executed in this pool.
+#prio_workers = 5
+
+# Total global limit on concurrent RPC calls. Should be
+# at least as large as max_workers. Beyond this, RPC requests
+# will be read into memory and queued. This directly impact
+# memory usage, currently each request requires 256 KB of
+# memory. So by default upto 5 MB of memory is used
+#
+# XXX this isn't actually enforced yet, only the per-client
+# limit is used so far
+#max_requests = 20
+
+# Limit on concurrent requests from a single client
+# connection. To avoid one client monopolizing the server
+# this should be a small fraction of the global max_requests
+# and max_workers parameter
+#max_client_requests = 5
+
+#################################################################
+#
+# Logging controls
+#
+
+# Logging level: 4 errors, 3 warnings, 2 information, 1 debug
+# basically 1 will log everything possible
+#log_level = 3
+
+# Logging filters:
+# A filter allows to select a different logging level for a given category
+# of logs
+# The format for a filter is one of:
+#    x:name
+#    x:+name
+#      where name is a string which is matched against source file name,
+#      e.g., "remote", "qemu", or "util/json", the optional "+" prefix
+#      tells libvirt to log stack trace for each message matching name,
+#      and x is the minimal level where matching messages should be logged:
+#    1: DEBUG
+#    2: INFO
+#    3: WARNING
+#    4: ERROR
+#
+# Multiple filter can be defined in a single @filters, they just need to be
+# separated by spaces.
+#
+# e.g. to only get warning or errors from the remote layer and only errors
+# from the event layer:
+#log_filters="3:remote 4:event"
+
+# Logging outputs:
+# An output is one of the places to save logging information
+# The format for an output can be:
+#    x:stderr
+#      output goes to stderr
+#    x:syslog:name
+#      use syslog for the output and use the given name as the ident
+#    x:file:file_path
+#      output to a file, with the given filepath
+# In all case the x prefix is the minimal level, acting as a filter
+#    1: DEBUG
+#    2: INFO
+#    3: WARNING
+#    4: ERROR
+#
+# Multiple output can be defined, they just need to be separated by spaces.
+# e.g. to log all warnings and errors to syslog under the libvirtd ident:
+#log_outputs="3:syslog:libvirtd"
+#
+
+# Log debug buffer size: default 64
+# The daemon keeps an internal debug log buffer which will be dumped in case
+# of crash or upon receiving a SIGUSR2 signal. This setting allows to override
+# the default buffer size in kilobytes.
+# If value is 0 or less the debug log buffer is deactivated
+#log_buffer_size = 64
+
+
+##################################################################
+#
+# Auditing
+#
+# This setting allows usage of the auditing subsystem to be altered:
+#
+#   audit_level == 0  -> disable all auditing
+#   audit_level == 1  -> enable auditing, only if enabled on host (default)
+#   audit_level == 2  -> enable auditing, and exit if disabled on host
+#
+#audit_level = 2
+#
+# If set to 1, then audit messages will also be sent
+# via libvirt logging infrastructure. Defaults to 0
+#
+#audit_logging = 1
+
+###################################################################
+# UUID of the host:
+# Provide the UUID of the host here in case the command
+# 'dmidecode -s system-uuid' does not provide a valid uuid. In case
+# 'dmidecode' does not provide a valid UUID and none is provided here, a
+# temporary UUID will be generated.
+# Keep the format of the example UUID below. UUID must not have all digits
+# be the same.
+
+# NB This default all-zeros UUID will not work. Replace
+# it with the output of the 'uuidgen' command and then
+# uncomment this entry
+#host_uuid = "00000000-0000-0000-0000-000000000000"
+
+###################################################################
+# Keepalive protocol:
+# This allows libvirtd to detect broken client connections or even
+# dead client.  A keepalive message is sent to a client after
+# keepalive_interval seconds of inactivity to check if the client is
+# still responding; keepalive_count is a maximum number of keepalive
+# messages that are allowed to be sent to the client without getting
+# any response before the connection is considered broken.  In other
+# words, the connection is automatically closed approximately after
+# keepalive_interval * (keepalive_count + 1) seconds since the last
+# message received from the client.  If keepalive_interval is set to
+# -1, libvirtd will never send keepalive requests; however clients
+# can still send them and the deamon will send responses.  When
+# keepalive_count is set to 0, connections will be automatically
+# closed after keepalive_interval seconds of inactivity without
+# sending any keepalive messages.
+#
+#keepalive_interval = 5
+#keepalive_count = 5
+#
+# If set to 1, libvirtd will refuse to talk to clients that do not
+# support keepalive protocol.  Defaults to 0.
+#
+#keepalive_required = 1
\ No newline at end of file
diff --git a/nova/files/mitaka/libvirtd.conf.RedHat b/nova/files/mitaka/libvirtd.conf.RedHat
new file mode 120000
index 0000000..2a7b101
--- /dev/null
+++ b/nova/files/mitaka/libvirtd.conf.RedHat
@@ -0,0 +1 @@
+libvirtd.conf.Debian
\ No newline at end of file
diff --git a/nova/files/mitaka/nova-compute.conf.Debian b/nova/files/mitaka/nova-compute.conf.Debian
new file mode 100644
index 0000000..ef8c184
--- /dev/null
+++ b/nova/files/mitaka/nova-compute.conf.Debian
@@ -0,0 +1,145 @@
+{%- from "nova/map.jinja" import compute with context %}
+
+[DEFAULT]
+dhcpbridge_flagfile=/etc/nova/nova.conf
+dhcpbridge=/usr/bin/nova-dhcpbridge
+logdir=/var/log/nova
+state_path=/var/lib/nova
+force_dhcp_release=True
+iscsi_helper=tgtadm
+connection_type=libvirt
+root_helper=sudo nova-rootwrap /etc/nova/rootwrap.conf
+verbose=True
+api_paste_config=/etc/nova/api-paste.ini
+volumes_path=/var/lib/nova/volumes
+enabled_apis=osapi_compute,metadata
+
+use_neutron = True
+
+{%- if compute.image.use_cow is defined %}
+use_cow_images = {{ compute.image.use_cow }}
+{%- endif %}
+
+{%- if compute.reserved_host_memory_mb is defined %}
+reserved_host_memory_mb = {{ compute.reserved_host_memory_mb }}
+{%- endif %}
+
+allow_resize_to_same_host=True
+
+live_migration_flag=VIR_MIGRATE_UNDEFINE_SOURCE,VIR_MIGRATE_PEER2PEER,VIR_MIGRATE_LIVE
+live_migration_retry_count=30
+
+auth_strategy = keystone
+libvirt_nonblocking = True
+
+neutron_url_timeout = 300
+compute_driver = libvirt.LibvirtDriver
+
+heal_instance_info_cache_interval = {{ compute.heal_instance_info_cache_interval }}
+
+heal_instance_info_cache_interval = 0
+image_cache_manager_interval = 0
+firewall_driver = nova.virt.firewall.NoopFirewallDriver
+glance_port = 9292
+glance_num_retries = 10
+
+
+rpc_cast_timeout = 30
+rpc_response_timeout = 3600
+executor_thread_pool_size = 70
+report_interval = 5
+
+block_device_allocate_retries=600
+block_device_allocate_retries_interval=10
+
+resume_guests_state_on_host_boot = True
+service_down_time = 90
+
+{% if pillar.ceilometer is defined %}
+
+instance_usage_audit = True
+instance_usage_audit_period = hour
+notify_on_state_change = vm_and_task_state
+
+{% endif %}
+
+[oslo_concurrency]
+lock_path = /var/lib/nova/tmp
+
+[oslo_messaging_notifications]
+{%- if compute.notification is defined %}
+driver = messagingv2
+{%- endif %}
+
+[vnc]
+enabled = true
+novncproxy_base_url={{ compute.vncproxy_url }}/vnc_auto.html
+novncproxy_port={{ compute.bind.vnc_port }}
+vncserver_listen=0.0.0.0
+vncserver_proxyclient_address={{ compute.bind.vnc_address }}
+
+[spice]
+enabled = false
+html5proxy_base_url = {{ compute.vncproxy_url }}/spice_auto.html
+
+[cache]
+{%- if compute.cache is defined %}
+enabled = true
+memcached_servers={%- for member in compute.cache.members %}{{ member.host }}:11211{% if not loop.last %},{% endif %}{%- endfor %}
+{%- endif %}
+
+[libvirt]
+inject_partition = -1
+use_usb_tablet = True
+cpu_mode = host-passthrough
+virt_type = kvm
+use_virtio_for_bridges = True
+
+[keystone_authtoken]
+signing_dirname=/tmp/keystone-signing-nova
+revocation_cache_time = 10
+auth_type = password
+user_domain_id = {{ compute.identity.get('domain', 'default') }}
+project_domain_id = {{ compute.identity.get('domain', 'default') }}
+project_name = {{ compute.identity.tenant }}
+username = {{ compute.identity.user }}
+password = {{ compute.identity.password }}
+auth_uri=http://{{ compute.identity.host }}:5000
+auth_url=http://{{ compute.identity.host }}:35357
+{%- if compute.cache is defined %}
+memcached_servers={%- for member in compute.cache.members %}{{ member.host }}:11211{% if not loop.last %},{% endif %}{%- endfor %}
+{%- endif %}
+
+[oslo_messaging_rabbit]
+{%- if compute.message_queue.members is defined %}
+rabbit_hosts = {% for member in compute.message_queue.members -%}
+                   {{ member.host }}:{{ member.get('port', 5672) }}
+                   {%- if not loop.last -%},{%- endif -%}
+               {%- endfor -%}
+{%- else %}
+rabbit_host = {{ compute.message_queue.host }}
+rabbit_port = {{ compute.message_queue.port }}
+{%- endif %}
+
+rabbit_userid = {{ compute.message_queue.user }}
+rabbit_password = {{ compute.message_queue.password }}
+rabbit_virtual_host = {{ compute.message_queue.virtual_host }}
+rpc_conn_pool_size = 300
+rabbit_retry_interval = 1
+rabbit_retry_backoff = 2
+
+[glance]
+
+host = {{ compute.image.host }}
+
+[neutron]
+auth_strategy = keystone
+admin_username={{ compute.network.user }}
+admin_password={{ compute.network.password }}
+admin_tenant_name={{ compute.identity.tenant }}
+admin_auth_url=http://{{ compute.identity.host }}:{{ compute.identity.port }}/v{% if compute.identity.get('version', 2) == 2 %}2.0{% else %}3{% endif %}
+url=http://{{ compute.network.host }}:{{ compute.network.port }}
+region_name= {{ compute.network.region }}
+
+[cinder]
+os_region_name = {{ compute.identity.region }}
diff --git a/nova/files/mitaka/nova-compute.conf.RedHat b/nova/files/mitaka/nova-compute.conf.RedHat
new file mode 120000
index 0000000..ad4c8f6
--- /dev/null
+++ b/nova/files/mitaka/nova-compute.conf.RedHat
@@ -0,0 +1 @@
+nova-compute.conf.Debian
\ No newline at end of file
diff --git a/nova/files/mitaka/nova-controller.conf.Debian b/nova/files/mitaka/nova-controller.conf.Debian
new file mode 100644
index 0000000..37a72a3
--- /dev/null
+++ b/nova/files/mitaka/nova-controller.conf.Debian
@@ -0,0 +1,187 @@
+{%- from "nova/map.jinja" import controller with context %}
+[DEFAULT]
+verbose = True
+log-dir = /var/log/nova
+{%- if controller.debug %}
+debug = True
+{%- else %}
+debug = False
+{%- endif %}
+state_path = /var/lib/nova
+volumes_dir = /etc/nova/volumes
+dhcpbridge = /usr/bin/nova-dhcpbridge
+dhcpbridge_flagfile = /etc/nova/nova.conf
+force_dhcp_release = True
+injected_network_template = /usr/share/nova/interfaces.template
+libvirt_nonblocking = True
+vif_plugging_is_fatal = False
+vif_plugging_timeout = 0
+cpu_allocation_ratio = {{ controller.cpu_allocation_ratio }}
+ram_allocation_ratio = {{ controller.ram_allocation_ratio }}
+disk_allocation_ratio = {{ controller.disk_allocation_ratio }}
+scheduler_default_filters = {{ controller.scheduler_default_filters }}
+scheduler_driver = filter_scheduler
+allow_resize_to_same_host = True
+
+iscsi_helper=tgtadm
+connection_type=libvirt
+root_helper=sudo nova-rootwrap /etc/nova/rootwrap.conf
+api_paste_config=/etc/nova/api-paste.ini
+
+compute_driver = libvirt.LibvirtDriver
+rootwrap_config = /etc/nova/rootwrap.conf
+auth_strategy = keystone
+firewall_driver=nova.virt.firewall.NoopFirewallDriver
+enabled_apis = osapi_compute,metadata
+
+my_ip={{ controller.bind.private_address }}
+
+
+use_neutron = True
+firewall_driver = nova.virt.firewall.NoopFirewallDriver
+
+rpc_backend = rabbit
+
+osapi_volume_listen={{ controller.bind.private_address }}
+osapi_compute_listen={{ controller.bind.private_address }}
+metadata_listen={{ controller.bind.private_address }}
+glance_host = {{ controller.glance.host }}
+osapi_compute_workers = {{ controller.workers }}
+metadata_workers = {{ controller.workers }}
+
+
+{%- if controller.get('networking', 'default') != "contrail" %}
+neutron_metadata_proxy_shared_secret={{ controller.metadata.password }}
+{%- endif %}
+
+allow_resize_to_same_host=True
+start_guests_on_host_boot=true
+
+rpc_cast_timeout = 30
+rpc_response_timeout = 3600
+rpc_thread_pool_size = 70
+report_interval = 5
+
+block_device_allocate_retries=600
+block_device_allocate_retries_interval=10
+
+[vnc]
+enabled = true
+novncproxy_host = 0.0.0.0
+novncproxy_base_url = {{ controller.vncproxy_url }}/vnc_auto.html
+novncproxy_port={{ controller.bind.get('vncproxy_port', '6080') }}
+{%- if pillar.nova.compute is defined %}
+vncserver_listen={{ controller.bind.private_address }}
+vncserver_proxyclient_address={{ controller.bind.private_address }}
+{%- else %}
+vncserver_listen=0.0.0.0
+{%- endif %}
+
+[spice]
+enabled = false
+html5proxy_base_url = {{ controller.vncproxy_url }}/spice_auto.html
+
+[libvirt]
+inject_partition = -1
+use_usb_tablet = True
+cpu_mode = host-passthrough
+virt_type = kvm
+use_virtio_for_bridges = True
+
+[oslo_concurrency]
+
+lock_path = /var/lib/nova/tmp
+
+[oslo_messaging_rabbit]
+{%- if controller.message_queue.members is defined %}
+rabbit_hosts = {% for member in controller.message_queue.members -%}
+                   {{ member.host }}:{{ member.get('port', 5672) }}
+                   {%- if not loop.last -%},{%- endif -%}
+               {%- endfor -%}
+{%- else %}
+rabbit_host = {{ controller.message_queue.host }}
+rabbit_port = {{ controller.message_queue.port }}
+{%- endif %}
+
+rabbit_userid = {{ controller.message_queue.user }}
+rabbit_password = {{ controller.message_queue.password }}
+rabbit_virtual_host = {{ controller.message_queue.virtual_host }}
+
+rabbit_retry_interval = 1
+rabbit_retry_backoff = 2
+rpc_conn_pool_size = 300
+
+[cache]
+{%- if controller.cache is defined %}
+enabled = true
+memcached_servers={%- for member in controller.cache.members %}{{ member.host }}:11211{% if not loop.last %},{% endif %}{%- endfor %}
+{%- endif %}
+
+[keystone_authtoken]
+revocation_cache_time = 10
+signing_dir=/tmp/keystone-signing-nova
+auth_type = password
+user_domain_id = {{ controller.identity.get('domain', 'default') }}
+project_domain_id = {{ controller.identity.get('domain', 'default') }}
+project_name = {{ controller.identity.tenant }}
+username = {{ controller.identity.user }}
+password = {{ controller.identity.password }}
+auth_uri=http://{{ controller.identity.host }}:5000
+auth_url=http://{{ controller.identity.host }}:35357
+{%- if controller.cache is defined %}
+memcached_servers={%- for member in controller.cache.members %}{{ member.host }}:11211{% if not loop.last %},{% endif %}{%- endfor %}
+{%- endif %}
+
+[conductor]
+workers = {{ controller.workers }}
+
+[database]
+idle_timeout = 180
+min_pool_size = 100
+max_pool_size = 700
+max_overflow = 100
+retry_interval = 5
+max_retries = -1
+db_max_retries = 3
+db_retry_interval = 1
+connection_debug = 10
+pool_timeout = 120
+connection = {{ controller.database.engine }}+pymysql://{{ controller.database.user }}:{{ controller.database.password }}@{{ controller.database.host }}/{{ controller.database.name }}
+
+[api_database]
+idle_timeout = 180
+min_pool_size = 100
+max_pool_size = 700
+max_overflow = 100
+retry_interval = 5
+max_retries = -1
+db_max_retries = 3
+db_retry_interval = 1
+connection_debug = 10
+pool_timeout = 120
+connection = {{ controller.database.engine }}+pymysql://{{ controller.database.user }}:{{ controller.database.password }}@{{ controller.database.host }}/{{ controller.database.name }}_api
+
+[glance]
+
+host = {{ controller.glance.host }}
+
+[neutron]
+auth_strategy = keystone
+admin_auth_url = http://{{ controller.identity.host }}:35357/v{% if controller.identity.get('version', 2) == 2 %}2.0{% else %}3{% endif %}
+{% if pillar.neutron is defined %}
+admin_password={{ pillar.neutron.server.identity.password }}
+admin_tenant_name={{ pillar.neutron.server.identity.tenant }}
+admin_username={{ pillar.neutron.server.identity.user }}
+region_name= {{ pillar.neutron.server.identity.region }}
+{%- else %}
+admin_password={{ controller.network.password }}
+admin_tenant_name={{ controller.network.tenant }}
+admin_username={{ controller.network.user }}
+region_name= {{ controller.network.region }}
+{%- endif %}
+url=http://{{ controller.network.host }}:{{ controller.network.port }}
+
+service_metadata_proxy=True
+
+[cinder]
+os_region_name = {{ controller.identity.region }}
diff --git a/nova/files/mitaka/nova-controller.conf.RedHat b/nova/files/mitaka/nova-controller.conf.RedHat
new file mode 120000
index 0000000..09c7524
--- /dev/null
+++ b/nova/files/mitaka/nova-controller.conf.RedHat
@@ -0,0 +1 @@
+nova-controller.conf.Debian
\ No newline at end of file
diff --git a/nova/files/mitaka/qemu.conf.Debian b/nova/files/mitaka/qemu.conf.Debian
new file mode 100644
index 0000000..5c5722b
--- /dev/null
+++ b/nova/files/mitaka/qemu.conf.Debian
@@ -0,0 +1,479 @@
+{%- from "nova/map.jinja" import compute with context %}
+# Master configuration file for the QEMU driver.
+# All settings described here are optional - if omitted, sensible
+# defaults are used.
+
+# VNC is configured to listen on 127.0.0.1 by default.
+# To make it listen on all public interfaces, uncomment
+# this next option.
+#
+# NB, strong recommendation to enable TLS + x509 certificate
+# verification when allowing public access
+#
+#vnc_listen = "0.0.0.0"
+
+# Enable this option to have VNC served over an automatically created
+# unix socket. This prevents unprivileged access from users on the
+# host machine, though most VNC clients do not support it.
+#
+# This will only be enabled for VNC configurations that do not have
+# a hardcoded 'listen' or 'socket' value. This setting takes preference
+# over vnc_listen.
+#
+#vnc_auto_unix_socket = 1
+
+# Enable use of TLS encryption on the VNC server. This requires
+# a VNC client which supports the VeNCrypt protocol extension.
+# Examples include vinagre, virt-viewer, virt-manager and vencrypt
+# itself. UltraVNC, RealVNC, TightVNC do not support this
+#
+# It is necessary to setup CA and issue a server certificate
+# before enabling this.
+#
+#vnc_tls = 1
+
+
+# Use of TLS requires that x509 certificates be issued. The
+# default it to keep them in /etc/pki/libvirt-vnc. This directory
+# must contain
+#
+#  ca-cert.pem - the CA master certificate
+#  server-cert.pem - the server certificate signed with ca-cert.pem
+#  server-key.pem  - the server private key
+#
+# This option allows the certificate directory to be changed
+#
+#vnc_tls_x509_cert_dir = "/etc/pki/libvirt-vnc"
+
+
+# The default TLS configuration only uses certificates for the server
+# allowing the client to verify the server's identity and establish
+# an encrypted channel.
+#
+# It is possible to use x509 certificates for authentication too, by
+# issuing a x509 certificate to every client who needs to connect.
+#
+# Enabling this option will reject any client who does not have a
+# certificate signed by the CA in /etc/pki/libvirt-vnc/ca-cert.pem
+#
+#vnc_tls_x509_verify = 1
+
+
+# The default VNC password. Only 8 bytes are significant for
+# VNC passwords. This parameter is only used if the per-domain
+# XML config does not already provide a password. To allow
+# access without passwords, leave this commented out. An empty
+# string will still enable passwords, but be rejected by QEMU,
+# effectively preventing any use of VNC. Obviously change this
+# example here before you set this.
+#
+#vnc_password = "XYZ12345"
+
+
+# Enable use of SASL encryption on the VNC server. This requires
+# a VNC client which supports the SASL protocol extension.
+# Examples include vinagre, virt-viewer and virt-manager
+# itself. UltraVNC, RealVNC, TightVNC do not support this
+#
+# It is necessary to configure /etc/sasl2/qemu.conf to choose
+# the desired SASL plugin (eg, GSSPI for Kerberos)
+#
+#vnc_sasl = 1
+
+
+# The default SASL configuration file is located in /etc/sasl2/
+# When running libvirtd unprivileged, it may be desirable to
+# override the configs in this location. Set this parameter to
+# point to the directory, and create a qemu.conf in that location
+#
+#vnc_sasl_dir = "/some/directory/sasl2"
+
+
+# QEMU implements an extension for providing audio over a VNC connection,
+# though if your VNC client does not support it, your only chance for getting
+# sound output is through regular audio backends. By default, libvirt will
+# disable all QEMU sound backends if using VNC, since they can cause
+# permissions issues. Enabling this option will make libvirtd honor the
+# QEMU_AUDIO_DRV environment variable when using VNC.
+#
+#vnc_allow_host_audio = 0
+
+
+
+# SPICE is configured to listen on 127.0.0.1 by default.
+# To make it listen on all public interfaces, uncomment
+# this next option.
+#
+# NB, strong recommendation to enable TLS + x509 certificate
+# verification when allowing public access
+#
+#spice_listen = "0.0.0.0"
+
+
+# Enable use of TLS encryption on the SPICE server.
+#
+# It is necessary to setup CA and issue a server certificate
+# before enabling this.
+#
+#spice_tls = 1
+
+
+# Use of TLS requires that x509 certificates be issued. The
+# default it to keep them in /etc/pki/libvirt-spice. This directory
+# must contain
+#
+#  ca-cert.pem - the CA master certificate
+#  server-cert.pem - the server certificate signed with ca-cert.pem
+#  server-key.pem  - the server private key
+#
+# This option allows the certificate directory to be changed.
+#
+#spice_tls_x509_cert_dir = "/etc/pki/libvirt-spice"
+
+
+# The default SPICE password. This parameter is only used if the
+# per-domain XML config does not already provide a password. To
+# allow access without passwords, leave this commented out. An
+# empty string will still enable passwords, but be rejected by
+# QEMU, effectively preventing any use of SPICE. Obviously change
+# this example here before you set this.
+#
+#spice_password = "XYZ12345"
+
+
+# Enable use of SASL encryption on the SPICE server. This requires
+# a SPICE client which supports the SASL protocol extension.
+#
+# It is necessary to configure /etc/sasl2/qemu.conf to choose
+# the desired SASL plugin (eg, GSSPI for Kerberos)
+#
+#spice_sasl = 1
+
+# The default SASL configuration file is located in /etc/sasl2/
+# When running libvirtd unprivileged, it may be desirable to
+# override the configs in this location. Set this parameter to
+# point to the directory, and create a qemu.conf in that location
+#
+#spice_sasl_dir = "/some/directory/sasl2"
+
+
+# By default, if no graphical front end is configured, libvirt will disable
+# QEMU audio output since directly talking to alsa/pulseaudio may not work
+# with various security settings. If you know what you're doing, enable
+# the setting below and libvirt will passthrough the QEMU_AUDIO_DRV
+# environment variable when using nographics.
+#
+#nographics_allow_host_audio = 1
+
+
+# Override the port for creating both VNC and SPICE sessions (min).
+# This defaults to 5900 and increases for consecutive sessions
+# or when ports are occupied, until it hits the maximum.
+#
+# Minimum must be greater than or equal to 5900 as lower number would
+# result into negative vnc display number.
+#
+# Maximum must be less than 65536, because higher numbers do not make
+# sense as a port number.
+#
+#remote_display_port_min = 5900
+#remote_display_port_max = 65535
+
+# VNC WebSocket port policies, same rules apply as with remote display
+# ports.  VNC WebSockets use similar display <-> port mappings, with
+# the exception being that ports start from 5700 instead of 5900.
+#
+#remote_websocket_port_min = 5700
+#remote_websocket_port_max = 65535
+
+# The default security driver is SELinux. If SELinux is disabled
+# on the host, then the security driver will automatically disable
+# itself. If you wish to disable QEMU SELinux security driver while
+# leaving SELinux enabled for the host in general, then set this
+# to 'none' instead. It's also possible to use more than one security
+# driver at the same time, for this use a list of names separated by
+# comma and delimited by square brackets. For example:
+#
+#       security_driver = [ "selinux", "apparmor" ]
+#
+# Notes: The DAC security driver is always enabled; as a result, the
+# value of security_driver cannot contain "dac".  The value "none" is
+# a special value; security_driver can be set to that value in
+# isolation, but it cannot appear in a list of drivers.
+#
+#security_driver = "selinux"
+
+# If set to non-zero, then the default security labeling
+# will make guests confined. If set to zero, then guests
+# will be unconfined by default. Defaults to 1.
+#security_default_confined = 1
+
+# If set to non-zero, then attempts to create unconfined
+# guests will be blocked. Defaults to 0.
+#security_require_confined = 1
+
+# The user for QEMU processes run by the system instance. It can be
+# specified as a user name or as a user id. The qemu driver will try to
+# parse this value first as a name and then, if the name doesn't exist,
+# as a user id.
+#
+# Since a sequence of digits is a valid user name, a leading plus sign
+# can be used to ensure that a user id will not be interpreted as a user
+# name.
+#
+# Some examples of valid values are:
+#
+#       user = "qemu"   # A user named "qemu"
+#       user = "+0"     # Super user (uid=0)
+#       user = "100"    # A user named "100" or a user with uid=100
+#
+#user = "root"
+
+# The group for QEMU processes run by the system instance. It can be
+# specified in a similar way to user.
+#group = "root"
+
+# Whether libvirt should dynamically change file ownership
+# to match the configured user/group above. Defaults to 1.
+# Set to 0 to disable file ownership changes.
+#dynamic_ownership = 1
+
+
+# What cgroup controllers to make use of with QEMU guests
+#
+#  - 'cpu' - use for schedular tunables
+#  - 'devices' - use for device whitelisting
+#  - 'memory' - use for memory tunables
+#  - 'blkio' - use for block devices I/O tunables
+#  - 'cpuset' - use for CPUs and memory nodes
+#  - 'cpuacct' - use for CPUs statistics.
+#
+# NB, even if configured here, they won't be used unless
+# the administrator has mounted cgroups, e.g.:
+#
+#  mkdir /dev/cgroup
+#  mount -t cgroup -o devices,cpu,memory,blkio,cpuset none /dev/cgroup
+#
+# They can be mounted anywhere, and different controllers
+# can be mounted in different locations. libvirt will detect
+# where they are located.
+#
+#cgroup_controllers = [ "cpu", "devices", "memory", "blkio", "cpuset", "cpuacct" ]
+
+# This is the basic set of devices allowed / required by
+# all virtual machines.
+#
+# As well as this, any configured block backed disks,
+# all sound device, and all PTY devices are allowed.
+#
+# This will only need setting if newer QEMU suddenly
+# wants some device we don't already know about.
+#
+#cgroup_device_acl = [
+#    "/dev/null", "/dev/full", "/dev/zero",
+#    "/dev/random", "/dev/urandom",
+#    "/dev/ptmx", "/dev/kvm", "/dev/kqemu",
+#    "/dev/rtc","/dev/hpet", "/dev/vfio/vfio"
+#]
+
+
+# The default format for Qemu/KVM guest save images is raw; that is, the
+# memory from the domain is dumped out directly to a file.  If you have
+# guests with a large amount of memory, however, this can take up quite
+# a bit of space.  If you would like to compress the images while they
+# are being saved to disk, you can also set "lzop", "gzip", "bzip2", or "xz"
+# for save_image_format.  Note that this means you slow down the process of
+# saving a domain in order to save disk space; the list above is in descending
+# order by performance and ascending order by compression ratio.
+#
+# save_image_format is used when you use 'virsh save' or 'virsh managedsave'
+# at scheduled saving, and it is an error if the specified save_image_format
+# is not valid, or the requested compression program can't be found.
+#
+# dump_image_format is used when you use 'virsh dump' at emergency
+# crashdump, and if the specified dump_image_format is not valid, or
+# the requested compression program can't be found, this falls
+# back to "raw" compression.
+#
+# snapshot_image_format specifies the compression algorithm of the memory save
+# image when an external snapshot of a domain is taken. This does not apply
+# on disk image format. It is an error if the specified format isn't valid,
+# or the requested compression program can't be found.
+#
+#save_image_format = "raw"
+#dump_image_format = "raw"
+#snapshot_image_format = "raw"
+
+# When a domain is configured to be auto-dumped when libvirtd receives a
+# watchdog event from qemu guest, libvirtd will save dump files in directory
+# specified by auto_dump_path. Default value is /var/lib/libvirt/qemu/dump
+#
+#auto_dump_path = "/var/lib/libvirt/qemu/dump"
+
+# When a domain is configured to be auto-dumped, enabling this flag
+# has the same effect as using the VIR_DUMP_BYPASS_CACHE flag with the
+# virDomainCoreDump API.  That is, the system will avoid using the
+# file system cache while writing the dump file, but may cause
+# slower operation.
+#
+#auto_dump_bypass_cache = 0
+
+# When a domain is configured to be auto-started, enabling this flag
+# has the same effect as using the VIR_DOMAIN_START_BYPASS_CACHE flag
+# with the virDomainCreateWithFlags API.  That is, the system will
+# avoid using the file system cache when restoring any managed state
+# file, but may cause slower operation.
+#
+#auto_start_bypass_cache = 0
+
+# If provided by the host and a hugetlbfs mount point is configured,
+# a guest may request huge page backing.  When this mount point is
+# unspecified here, determination of a host mount point in /proc/mounts
+# will be attempted.  Specifying an explicit mount overrides detection
+# of the same in /proc/mounts.  Setting the mount point to "" will
+# disable guest hugepage backing.
+#
+# NB, within this mount point, guests will create memory backing files
+# in a location of $MOUNTPOINT/libvirt/qemu
+#
+#hugetlbfs_mount = "/dev/hugepages"
+
+
+# Path to the setuid helper for creating tap devices.  This executable
+# is used to create <source type='bridge'> interfaces when libvirtd is
+# running unprivileged.  libvirt invokes the helper directly, instead
+# of using "-netdev bridge", for security reasons.
+#bridge_helper = "/usr/libexec/qemu-bridge-helper"
+
+
+
+# If clear_emulator_capabilities is enabled, libvirt will drop all
+# privileged capabilities of the QEmu/KVM emulator. This is enabled by
+# default.
+#
+# Warning: Disabling this option means that a compromised guest can
+# exploit the privileges and possibly do damage to the host.
+#
+#clear_emulator_capabilities = 1
+
+
+# If enabled, libvirt will have QEMU set its process name to
+# "qemu:VM_NAME", where VM_NAME is the name of the VM. The QEMU
+# process will appear as "qemu:VM_NAME" in process listings and
+# other system monitoring tools. By default, QEMU does not set
+# its process title, so the complete QEMU command (emulator and
+# its arguments) appear in process listings.
+#
+#set_process_name = 1
+
+
+# If max_processes is set to a positive integer, libvirt will use
+# it to set the maximum number of processes that can be run by qemu
+# user. This can be used to override default value set by host OS.
+# The same applies to max_files which sets the limit on the maximum
+# number of opened files.
+#
+{%- if compute.qemu is defined %}
+
+{%- if compute.qemu.max_processes is defined %}
+max_processes = {{ compute.qemu.max_processes }}
+{%- endif %}
+
+{%- if compute.qemu.max_files is defined %}
+max_files = {{ compute.qemu.max_files }}
+{%- endif %}
+
+{%- endif %}
+
+# mac_filter enables MAC addressed based filtering on bridge ports.
+# This currently requires ebtables to be installed.
+#
+#mac_filter = 1
+
+
+# By default, PCI devices below non-ACS switch are not allowed to be assigned
+# to guests. By setting relaxed_acs_check to 1 such devices will be allowed to
+# be assigned to guests.
+#
+#relaxed_acs_check = 1
+
+
+# If allow_disk_format_probing is enabled, libvirt will probe disk
+# images to attempt to identify their format, when not otherwise
+# specified in the XML. This is disabled by default.
+#
+# WARNING: Enabling probing is a security hole in almost all
+# deployments. It is strongly recommended that users update their
+# guest XML <disk> elements to include  <driver type='XXXX'/>
+# elements instead of enabling this option.
+#
+#allow_disk_format_probing = 1
+
+
+# To enable 'Sanlock' project based locking of the file
+# content (to prevent two VMs writing to the same
+# disk), uncomment this
+#
+#lock_manager = "sanlock"
+
+
+
+# Set limit of maximum APIs queued on one domain. All other APIs
+# over this threshold will fail on acquiring job lock. Specially,
+# setting to zero turns this feature off.
+# Note, that job lock is per domain.
+#
+#max_queued = 0
+
+###################################################################
+# Keepalive protocol:
+# This allows qemu driver to detect broken connections to remote
+# libvirtd during peer-to-peer migration.  A keepalive message is
+# sent to the daemon after keepalive_interval seconds of inactivity
+# to check if the daemon is still responding; keepalive_count is a
+# maximum number of keepalive messages that are allowed to be sent
+# to the daemon without getting any response before the connection
+# is considered broken.  In other words, the connection is
+# automatically closed approximately after
+# keepalive_interval * (keepalive_count + 1) seconds since the last
+# message received from the daemon.  If keepalive_interval is set to
+# -1, qemu driver will not send keepalive requests during
+# peer-to-peer migration; however, the remote libvirtd can still
+# send them and source libvirtd will send responses.  When
+# keepalive_count is set to 0, connections will be automatically
+# closed after keepalive_interval seconds of inactivity without
+# sending any keepalive messages.
+#
+#keepalive_interval = 5
+#keepalive_count = 5
+
+
+
+# Use seccomp syscall whitelisting in QEMU.
+# 1 = on, 0 = off, -1 = use QEMU default
+# Defaults to -1.
+#
+#seccomp_sandbox = 1
+
+
+
+# Override the listen address for all incoming migrations. Defaults to
+# 0.0.0.0, or :: if both host and qemu are capable of IPv6.
+#migration_address = "127.0.0.1"
+
+
+# Override the port range used for incoming migrations.
+#
+# Minimum must be greater than 0, however when QEMU is not running as root,
+# setting the minimum to be lower than 1024 will not work.
+#
+# Maximum must not be greater than 65535.
+#
+#migration_port_min = 49152
+#migration_port_max = 49215
+cgroup_device_acl = [
+    "/dev/null", "/dev/full", "/dev/zero",
+    "/dev/random", "/dev/urandom",
+    "/dev/ptmx", "/dev/kvm", "/dev/kqemu",
+    "/dev/rtc", "/dev/hpet","/dev/net/tun",
+]
diff --git a/nova/files/mitaka/qemu.conf.RedHat b/nova/files/mitaka/qemu.conf.RedHat
new file mode 120000
index 0000000..1d23f19
--- /dev/null
+++ b/nova/files/mitaka/qemu.conf.RedHat
@@ -0,0 +1 @@
+qemu.conf.Debian
\ No newline at end of file
diff --git a/nova/files/secret.xml b/nova/files/secret.xml
new file mode 100644
index 0000000..9816590
--- /dev/null
+++ b/nova/files/secret.xml
@@ -0,0 +1,7 @@
+{%- from "nova/map.jinja" import compute with context %}
+<secret ephemeral='no' private='no'>
+  <uuid>{{ compute.ceph.secret_uuid }}</uuid>
+  <usage type='ceph'>
+    <name>client.cinder secret</name>
+  </usage>
+</secret>
diff --git a/nova/files/sensu.conf b/nova/files/sensu.conf
new file mode 100644
index 0000000..a2cff27
--- /dev/null
+++ b/nova/files/sensu.conf
@@ -0,0 +1,75 @@
+{%- from "nova/map.jinja" import server with context -%}
+local_nova_api_proc:
+  command: "PATH=$PATH:/usr/lib64/nagios/plugins:/usr/lib/nagios/plugins check_procs -C nova-api -u nova -c 1:30"
+  interval: 60
+  occurrences: 1
+  subscribers:
+  - local-nova-controller
+local_nova_scheduler_proc:
+  command: "PATH=$PATH:/usr/lib64/nagios/plugins:/usr/lib/nagios/plugins check_procs -C nova-scheduler -u nova -c 1:4"
+  interval: 60
+  occurrences: 1
+  subscribers:
+  - local-nova-controller
+local_nova_novncproxy_proc:
+  command: "PATH=$PATH:/usr/lib64/nagios/plugins:/usr/lib/nagios/plugins check_procs -C nova-novncproxy -u nova -c 1:5"
+  interval: 60
+  occurrences: 2
+  subscribers:
+  - local-nova-controller
+local_nova_cert_proc:
+  command: "PATH=$PATH:/usr/lib64/nagios/plugins:/usr/lib/nagios/plugins check_procs -C nova-cert -u nova -c 1:4"
+  interval: 60
+  occurrences: 1
+  subscribers:
+  - local-nova-controller
+local_nova_conductor_proc:
+  command: "PATH=$PATH:/usr/lib64/nagios/plugins:/usr/lib/nagios/plugins check_procs -C nova-conductor -u nova -c 1:10"
+  interval: 60
+  occurrences: 1
+  subscribers:
+  - local-nova-controller
+local_nova_consoleauth_proc:
+  command: "PATH=$PATH:/usr/lib64/nagios/plugins:/usr/lib/nagios/plugins check_procs -C nova-consoleaut -u nova -c 1:1"
+  interval: 60
+  occurrences: 1
+  subscribers:
+  - local-nova-controller
+#local_nova_api_rabbitmq_logs:
+#  command: "PATH=$PATH:/etc/sensu/plugins check_log.sh -p /var/log/nova/nova-api.log -l 10 -r 'Queue not found' -c 5"
+#  interval: 60
+#  occurrences: 1
+#  subscribers:
+#  - local-nova-controller
+local_nova_compute_proc:
+  command: "PATH=$PATH:/usr/lib64/nagios/plugins:/usr/lib/nagios/plugins check_procs -C nova-compute -u nova -c 1:10"
+  interval: 60
+  occurrences: 1
+  asset: asset01
+  customer: cloudlab
+  subscribers:
+  - local-nova-compute
+local_linux_storage_nova_instances_usage:
+  command: "PATH=$PATH:/usr/lib64/nagios/plugins:/usr/lib/nagios/plugins check_disk -w 15% -c 5% -p /var/lib/nova/instances"
+  interval: 60
+  occurrences: 1
+  subscribers:
+  - local-nova-compute
+remote_openstack_nova_instance:
+  command: "PATH=$PATH:/usr/local/bin oschecks-check_nova_instance --auth_url='http://:::openstack.host:::::::openstack.port:::/v2.0' --username :::openstack.user::: --password :::openstack.password::: --tenant :::openstack.tenant:::"
+  interval: 300
+  occurrences: 1
+  subscribers:
+  - remote-network
+remote_openstack_nova_api:
+  command: "PATH=$PATH:/usr/local/bin oschecks-check_nova_api --os-auth-url 'http://:::openstack.host:::::::openstack.port:::/v2.0' --os-username :::openstack.user::: --os-password :::openstack.password::: --os-tenant-name :::openstack.tenant:::"
+  interval: 60
+  occurrences: 1
+  subscribers:
+  - remote-network
+remote_nova_services:
+  command: "PATH=$PATH:/etc/sensu/plugins check_nova_services.sh -u :::openstack.user::: -p :::openstack.password::: -t :::openstack.tenant::: -h 'http://:::openstack.host:::::::openstack.port:::/v2.0'"
+  interval: 60
+  occurrences: 1
+  subscribers:
+  - remote-network
\ No newline at end of file
diff --git a/nova/init.sls b/nova/init.sls
new file mode 100644
index 0000000..61c4322
--- /dev/null
+++ b/nova/init.sls
@@ -0,0 +1,8 @@
+
+include:
+{%- if pillar.nova.controller is defined %}
+- nova.controller
+{%- endif %}
+{%- if pillar.nova.compute is defined %}
+- nova.compute
+{%- endif %}
diff --git a/nova/map.jinja b/nova/map.jinja
new file mode 100644
index 0000000..3c1aa98
--- /dev/null
+++ b/nova/map.jinja
@@ -0,0 +1,54 @@
+{% set compute_bind_defaults = {
+    'vnc_address': '10.0.0.10',
+    'vnc_port': '6080',
+    'vnc_name': 'cloud.domain.com',
+    'vnc_protocol': 'http',
+} %}
+
+{% set controller = salt['grains.filter_by']({
+    'Debian': {
+        'pkgs': ['nova-novncproxy', 'novnc', 'nova-api', 'nova-ajax-console-proxy', 'nova-cert', 'nova-conductor', 'nova-consoleauth', 'nova-doc', 'nova-scheduler', 'python-novaclient', 'python-memcache'],
+        'services': ['nova-api', 'nova-cert', 'nova-consoleauth', 'nova-scheduler', 'nova-conductor', 'nova-novncproxy'],
+        'debug': false,
+    },
+    'RedHat': {
+        'pkgs': ['openstack-nova-novncproxy', 'python-nova', 'openstack-nova-api', 'openstack-nova-console', 'openstack-nova-scheduler', 'python-novaclient', 'openstack-nova-common', 'openstack-nova-conductor', 'openstack-nova-cert'],
+        'services': ['openstack-nova-api', 'openstack-nova-cert', 'openstack-nova-consoleauth', 'openstack-nova-scheduler', 'openstack-nova-conductor', 'openstack-nova-novncproxy'],
+        'debug': false,
+    },
+}, merge=salt['pillar.get']('nova:controller')) %}
+
+{% set compute = salt['grains.filter_by']({
+    'Debian': {
+        'pkgs': ['nova-compute-kvm', 'python-novaclient', 'pm-utils', 'sysfsutils', 'sg3-utils', 'libvirt-bin', 'python-memcache', 'qemu-kvm','python-guestfs'],
+        'services': ['nova-compute', 'libvirt-bin'],
+        'libvirt_config': 'libvirtd.conf',
+        'libvirt_bin': '/etc/default/libvirt-bin',
+        'libvirt_service': 'libvirt-bin',
+        'bind': compute_bind_defaults,
+        'debug': false,
+        'identity': {
+            'region': 'RegionOne'
+        },
+        'network': {
+            'region': 'RegionOne'
+        },
+        'heal_instance_info_cache_interval': '60',
+    },
+    'RedHat': {
+        'pkgs': ['openstack-nova-compute', 'python-novaclient', 'python-nova', 'sysfsutils', 'sg3_utils'],
+        'services': ['messagebus', 'openstack-nova-compute', 'libvirtd'],
+        'libvirt_config': 'libvirt.conf',
+        'libvirt_bin': '/etc/sysconfig/libvirtd',
+        'libvirt_service': 'libvirtd',
+        'bind': compute_bind_defaults,
+        'debug': false,
+        'identity': {
+            'region': 'RegionOne'
+        },
+        'network': {
+            'region': 'RegionOne'
+        },
+        'heal_instance_info_cache_interval': '60',
+    },
+}, merge=salt['pillar.get']('nova:compute')) %}
diff --git a/nova/meta/collectd.yml b/nova/meta/collectd.yml
new file mode 100644
index 0000000..b258fc3
--- /dev/null
+++ b/nova/meta/collectd.yml
@@ -0,0 +1,14 @@
+{%- if pillar.nova.compute is defined %}
+plugin:
+  nova_compute_libvirt:
+    plugin: interface
+    interval: 60
+    template: nova/files/collectd_libvirt.conf
+{%- endif %}
+{%- if pillar.nova.controller is defined %}
+plugin: {}
+#  nova_controller_nova:
+#    plugin: nova
+#    interval: 60
+#    template: nova/files/collectd_nova.conf
+{%- endif %}
\ No newline at end of file
diff --git a/nova/meta/heka.yml b/nova/meta/heka.yml
new file mode 100644
index 0000000..9e847fd
--- /dev/null
+++ b/nova/meta/heka.yml
@@ -0,0 +1,54 @@
+input:
+  {%- if pillar.nova.compute is defined %}
+  nova_compute:
+    engine: logstreamer
+    log_directory: /var/log/nova
+    file_match: nova-compute\.log\.?(?P<Index>\d+)?(.gz)?
+    priority: ["^Index"]
+    decoder: multidecoder_openstack
+  libvirtd:
+    engine: logstreamer
+    log_directory: /var/log/libvirt
+    file_match: libvirtd\.log\.?(?P<Index>\d+)?(.gz)?
+    priority: ['^Index']
+    decoder: multidecoder_libvirtd
+  {%- endif %}
+  {%- if pillar.nova.controller is defined %}
+  nova_compute:
+    engine: logstreamer
+    log_directory: /var/log/nova
+    file_match: nova-api\.log\.?(?P<Index>\d+)?(.gz)?
+    priority: ["^Index"]
+    decoder: multidecoder_openstack
+  {%- endif %}
+decoder:
+  openstack:
+    engine: multidecoder
+    subs: [ 'Payloadregex_OpenStackLog','Payloadregex_OpenStackLogTrace' ]
+    cascade_strategy: first-wins
+    log_sub_errors: "false"
+  OpenStackLog:
+    engine: payloadregex
+    match_regex: ^(?P<timestamp>\d{4}-\d{2}-\d{2}\s+\d{2}:\d{2}:\d{2}.\d{3})\s(?P<pid>\d+)\s(?P<severity>\w+)\s(?P<programname>\D+)\s\[(?P<id>.+)\]\s(?P<payload>.*)
+    timestamp_layout: "2015-10-06 11:34:37.243"
+  OpenStackLogTrace:
+    engine: payloadregex
+    match_regex: ^(?P<timestamp>\d{4}-\d{2}-\d{2}\s+\d{2}:\d{2}:\d{2}.\d{3})\s(?P<pid>\d+)\s(?P<severity>TRACE)\s(?P<programname>.\S+)\s(?P<payload>.*)
+    timestamp_layout: "2015-10-06 11:34:37.243"
+  openstacksandbox:
+    engine: sandbox
+    file_name: "/etc/heka/plugins.d/decoders/openstack.lua"
+    module_directory: "/etc/heka/plugins.d/common;/usr/share/heka/lua_modules"
+  libvirtd:
+    engine: multidecoder
+    subs: [ 'Payloadregex_libvirtcommon', 'Payloadregex_libvirtaudit' ]
+    cascade_strategy: first-wins
+    log_sub_errors: "false"
+  libvirtcommon:
+    engine: payloadregex
+    match_regex: ^(?P<timestamp>\d{4}-\d{2}-\d{2}\s+\d{2}:\d{2}:\d{2}.\d{3}\+\d{4}):\s+(?P<pid>\d+):\s+(?P<severity>\w+)\s+:\s+(?P<qemutrace>\w+:\d+)\s+:\s+(?P<payload>.*)
+    timestamp_layout: "2015-09-01 15:56:14.675+0000"
+  libvirtaudit:
+    engine: payloadregex
+    match_regex: ^(?P<timestamp>\d{4}-\d{2}-\d{2}\s+\d{2}:\d{2}:\d{2}.\d{3}\+\d{4}):\s+(?P<pid>\d+):\s+(?P<severity>\w+)\s+:\s+(?P<message>.+):\s(?P<payload>.+)
+    timestamp_layout: "2015-09-01 15:56:14.675+0000"
diff --git a/nova/meta/sensu.yml b/nova/meta/sensu.yml
new file mode 100644
index 0000000..d5e1df0
--- /dev/null
+++ b/nova/meta/sensu.yml
@@ -0,0 +1,75 @@
+check:
+  local_nova_api_proc:
+    command: "PATH=$PATH:/usr/lib64/nagios/plugins:/usr/lib/nagios/plugins check_procs -C nova-api -u nova -c 1:30"
+    interval: 60
+    occurrences: 1
+    subscribers:
+    - local-nova-controller
+  local_nova_scheduler_proc:
+    command: "PATH=$PATH:/usr/lib64/nagios/plugins:/usr/lib/nagios/plugins check_procs -C nova-scheduler -u nova -c 1:4"
+    interval: 60
+    occurrences: 1
+    subscribers:
+    - local-nova-controller
+  local_nova_novncproxy_proc:
+    command: "PATH=$PATH:/usr/lib64/nagios/plugins:/usr/lib/nagios/plugins check_procs -C nova-novncproxy -u nova -c 1:5"
+    interval: 60
+    occurrences: 2
+    subscribers:
+    - local-nova-controller
+  local_nova_cert_proc:
+    command: "PATH=$PATH:/usr/lib64/nagios/plugins:/usr/lib/nagios/plugins check_procs -C nova-cert -u nova -c 1:4"
+    interval: 60
+    occurrences: 1
+    subscribers:
+    - local-nova-controller
+  local_nova_conductor_proc:
+    command: "PATH=$PATH:/usr/lib64/nagios/plugins:/usr/lib/nagios/plugins check_procs -C nova-conductor -u nova -c 1:10"
+    interval: 60
+    occurrences: 1
+    subscribers:
+    - local-nova-controller
+  local_nova_consoleauth_proc:
+    command: "PATH=$PATH:/usr/lib64/nagios/plugins:/usr/lib/nagios/plugins check_procs -C nova-consoleaut -u nova -c 1:1"
+    interval: 60
+    occurrences: 1
+    subscribers:
+    - local-nova-controller
+  #local_nova_api_rabbitmq_logs:
+  #  command: "PATH=$PATH:/etc/sensu/plugins check_log.sh -p /var/log/nova/nova-api.log -l 10 -r 'Queue not found' -c 5"
+  #  interval: 60
+  #  occurrences: 1
+  #  subscribers:
+  #  - local-nova-controller
+  local_nova_compute_proc:
+    command: "PATH=$PATH:/usr/lib64/nagios/plugins:/usr/lib/nagios/plugins check_procs -C nova-compute -u nova -c 1:10"
+    interval: 60
+    occurrences: 1
+    asset: asset01
+    customer: cloudlab
+    subscribers:
+    - local-nova-compute
+  local_linux_storage_nova_instances_usage:
+    command: "PATH=$PATH:/usr/lib64/nagios/plugins:/usr/lib/nagios/plugins check_disk -w 15% -c 5% -p /var/lib/nova/instances"
+    interval: 60
+    occurrences: 1
+    subscribers:
+    - local-nova-compute
+  remote_openstack_nova_instance:
+    command: "PATH=$PATH:/usr/local/bin oschecks-check_nova_instance --auth_url='http://:::openstack.host:::::::openstack.port:::/v2.0' --username :::openstack.user::: --password :::openstack.password::: --tenant :::openstack.tenant:::"
+    interval: 300
+    occurrences: 1
+    subscribers:
+    - remote-network
+  remote_openstack_nova_api:
+    command: "PATH=$PATH:/usr/local/bin oschecks-check_nova_api --os-auth-url 'http://:::openstack.host:::::::openstack.port:::/v2.0' --os-username :::openstack.user::: --os-password :::openstack.password::: --os-tenant-name :::openstack.tenant:::"
+    interval: 60
+    occurrences: 1
+    subscribers:
+    - remote-network
+  remote_nova_services:
+    command: "PATH=$PATH:/etc/sensu/plugins check_nova_services.sh -u :::openstack.user::: -p :::openstack.password::: -t :::openstack.tenant::: -h 'http://:::openstack.host:::::::openstack.port:::/v2.0'"
+    interval: 60
+    occurrences: 1
+    subscribers:
+    - remote-network
\ No newline at end of file
diff --git a/nova/meta/sphinx.yml b/nova/meta/sphinx.yml
new file mode 100644
index 0000000..c4f67ed
--- /dev/null
+++ b/nova/meta/sphinx.yml
@@ -0,0 +1,125 @@
+doc:
+  name: Nova
+  description: OpenStack Nova provides a cloud computing fabric controller, supporting a wide variety of virtualization technologies, including KVM, Xen, LXC, VMware, and more.
+  role:
+  {%- if pillar.nova.compute is defined %}
+  {%- from "nova/map.jinja" import compute with context %}
+    compute:
+      name: compute
+      param:
+        version:
+          name: "Version"
+          value: {{ compute.version }}
+        virtualization:
+          name: "Virtualization type"
+          value: {{ compute.virtualization }}
+        vncproxy_url:
+          name: "VNC proxy URL"
+          value: {{ compute.vncproxy_url }}
+        reserved_host_memory_mb:
+          name: "Reserved Host Memmory"
+          value: {{ compute.reserved_host_memory_mb }}
+        database_host:
+          name: "Database"
+          value: {{ compute.database.user }}@{{ compute.database.host }}:{{ compute.database.port }}/{{ compute.database.name }}
+        network_host:
+          name: "Network service"
+          value: {{ compute.network.host }}:{{ compute.network.port }}
+        glance_host:
+          name: "Image service"
+          value: {{ compute.image.host }}:{{ compute.image.port }}
+        message_queue_ip:
+          name: "Message queue"
+          value: {{ compute.message_queue.user }}@{{ compute.message_queue.host }}:{{ compute.message_queue.port }}{{ compute.message_queue.virtual_host }}
+        identity_host:
+          name: "Identity host ip"
+          value: {{ compute.identity.user }}@{{ compute.identity.host }}:{{ compute.identity.port }}
+        {%- if compute.vm_swappiness is defined %}
+        vm_swappiness:
+          name: "VM SWAPpiness in sysctl.conf"
+          value: {{ compute.vm_swappiness }}
+        {%- endif %}
+        {%- if compute.ceph is defined %}
+        ceph:
+          name: "Ceph is configured"
+          value: "True"
+        {%- endif %}
+        packages:
+          value: |
+            {%- for pkg in compute.pkgs %}
+            {%- set pkg_version = "dpkg -l "+pkg+" | grep "+pkg+" | awk '{print $3}'" %}
+            * {{ pkg }}: {{ salt['cmd.run'](pkg_version) }}
+            {%- endfor %}
+  {%- endif %}
+  {%- if pillar.nova.controller is defined %}
+  {% from "nova/map.jinja" import controller with context %}
+    controller:
+      name: controller
+      endpoint:
+        nova_api:
+          name: nova-api
+          type: nova-api
+          address: http://{{ controller.bind.private_address }}:8774
+          protocol: http
+        nova_metadata:
+          name: nova-metadata
+          type: nova-metadata
+          address: http://{{ controller.bind.private_address }}:8775
+          protocol: http
+        nova_ec2_api:
+          name: nova-ec2-api
+          type: nova-ec2-api
+          address: http://{{ controller.bind.private_address }}:8773
+          protocol: http
+      param:
+        bind:
+          value: {{ controller.bind.private_address }}
+        version:
+          name: "Version"
+          value: {{ controller.version }}
+        networking:
+          name: "Networking plugin"
+          value: {{ controller.networking }}
+        ram_allocation_ratio:
+          name: "RAM allocation ratio"
+          value: {{ controller.ram_allocation_ratio }}
+        cpu_allocation_ratio:
+          name: "CPU allocation ratio"
+          value: {{ controller.cpu_allocation_ratio }}
+        disk_allocation_ratio:
+          name: "Disk allocation ratio"
+          value: {{ controller.disk_allocation_ratio }}
+        workers:
+          name: "Number of osapi and conductor workers"
+          value: {{ controller.workers }}
+        dhcp_domain:
+          name: "DHCP domain"
+          value: {{ controller.dhcp_domain }}
+        scheduler_default_filters:
+          name: "Scheduler default filters"
+          value: {{ controller.scheduler_default_filters|replace(',', ' ') }}
+        database_host:
+          name: "Database"
+          value: {{ controller.database.user }}@{{ controller.database.host }}:{{ controller.database.port }}/{{ controller.database.name }}
+        network_host:
+          name: "Network service"
+          value: {{ controller.network.host }}:{{ controller.network.port }}
+        glance_host:
+          name: "Glance service"
+          value: {{ controller.glance.host }}:{{ controller.glance.port }}
+        message_queue_ip:
+          name: "Message queue"
+          value: {{ controller.message_queue.user }}@{{ controller.message_queue.host }}:{{ controller.message_queue.port }}{{ controller.message_queue.virtual_host }}
+        identity_host:
+          name: "Identity host ip"
+          value: {{ controller.identity.user }}@{{ controller.identity.host }}:{{ controller.identity.port }}
+        vncproxy_url:
+          name: "VNC proxy URL"
+          value: {{ controller.vncproxy_url }}
+        packages:
+          value: |
+            {%- for pkg in controller.pkgs %}
+            {%- set pkg_version = "dpkg -l "+pkg+" | grep "+pkg+" | awk '{print $3}'" %}
+            * {{ pkg }}: {{ salt['cmd.run'](pkg_version) }}
+            {%- endfor %}
+  {%- endif %}
diff --git a/other-requirements.txt b/other-requirements.txt
new file mode 100644
index 0000000..ba84cc5
--- /dev/null
+++ b/other-requirements.txt
@@ -0,0 +1 @@
+python-yaml
diff --git a/tests/pillar/compute_cluster.sls b/tests/pillar/compute_cluster.sls
new file mode 100644
index 0000000..bbfb87c
--- /dev/null
+++ b/tests/pillar/compute_cluster.sls
@@ -0,0 +1,53 @@
+nova:
+  compute:
+    version: liberty
+    enabled: true
+    virtualization: kvm
+    heal_instance_info_cache_interval: 60
+    vncproxy_url: openstack:6080
+    bind:
+      vnc_address: 127.0.0.1
+      vnc_port: 6080
+      vnc_name: 0.0.0.0
+    database:
+      engine: mysql
+      host: 127.0.0.1
+      port: 3306
+      name: nova
+      user: nova
+      password: password
+    identity:
+      engine: keystone
+      region: RegionOne
+      host: 127.0.0.1
+      port: 35357
+      user: nova
+      password: password
+      tenant: service
+    message_queue:
+      engine: rabbitmq
+      members:
+      - host: 127.0.0.1
+      - host: 127.0.1.1
+      - host: 127.0.2.1
+      user: openstack
+      password: password
+      virtual_host: '/openstack'
+    image:
+      engine: glance
+      host: 127.0.0.1
+      port: 9292
+    network:
+      engine: neutron
+      region: RegionOne
+      host: 127.0.0.1
+      port: 9696
+    cache:
+      engine: memcached
+      members:
+      - host: 127.0.0.1
+        port: 11211
+      - host: 127.0.1.1
+        port: 11211
+      - host: 127.0.2.1
+        port: 11211
diff --git a/tests/pillar/compute_single.sls b/tests/pillar/compute_single.sls
new file mode 100644
index 0000000..fa22905
--- /dev/null
+++ b/tests/pillar/compute_single.sls
@@ -0,0 +1,47 @@
+nova:
+  compute:
+    version: liberty
+    enabled: true
+    virtualization: kvm
+    heal_instance_info_cache_interval: 60
+    vncproxy_url: openstack:6080
+    bind:
+      vnc_address: 127.0.0.1
+      vnc_port: 6080
+      vnc_name: 0.0.0.0
+    database:
+      engine: mysql
+      host: 127.0.0.1
+      port: 3306
+      name: nova
+      user: nova
+      password: password
+    identity:
+      engine: keystone
+      region: RegionOne
+      host: 127.0.0.1
+      port: 35357
+      user: nova
+      password: password
+      tenant: service
+    message_queue:
+      engine: rabbitmq
+      host: 127.0.0.1
+      port: 5672
+      user: openstack
+      password: password
+      virtual_host: '/openstack'
+    image:
+      engine: glance
+      host: 127.0.0.1
+      port: 9292
+    network:
+      engine: neutron
+      region: RegionOne
+      host: 127.0.0.1
+      port: 9696
+    cache:
+      engine: memcached
+      members:
+      - host: 127.0.0.1
+        port: 11211
diff --git a/tests/pillar/control_cluster.sls b/tests/pillar/control_cluster.sls
new file mode 100644
index 0000000..018f222
--- /dev/null
+++ b/tests/pillar/control_cluster.sls
@@ -0,0 +1,54 @@
+nova:
+  controller:
+    enabled: true
+    networking: default
+    version: liberty
+    vncproxy_url: 127.0.0.1
+    security_group: false
+    dhcp_domain: novalocal
+    scheduler_default_filters: "DifferentHostFilter,RetryFilter,AvailabilityZoneFilter,RamFilter,CoreFilter,DiskFilter,ComputeFilter,ComputeCapabilitiesFilter,ImagePropertiesFilter,ServerGroupAntiAffinityFilter,ServerGroupAffinityFilter"
+    cpu_allocation_ratio: 16.0
+    ram_allocation_ratio: 1.5
+    disk_allocation_ratio: 1.0
+    workers: 8
+    bind:
+      private_address: 127.0.0.1
+      public_address: 127.0.0.1
+      public_name: 127.0.0.1
+      novncproxy_port: 6080
+    database:
+      engine: mysql
+      host: 127.0.0.1
+      port: 3306
+      name: nova
+      user: nova
+      password: password
+    identity:
+      engine: keystone
+      region: RegionOne
+      host: 127.0.0.1
+      port: 35357
+      user: nova
+      password: password
+      tenant: service
+    message_queue:
+      engine: rabbitmq
+      members:
+      - host: 127.0.0.1
+      - host: 127.0.1.1
+      - host: 127.0.2.1
+      user: openstack
+      password: password
+      virtual_host: '/openstack'
+      ha_queues: true
+    glance:
+      host: 
+      port: 9292
+    network:
+      engine: neutron
+      region: RegionOne
+      host: 127.0.0.1
+      port: 9696
+      mtu: 1500
+    metadata:
+      password: metadata
diff --git a/tests/pillar/control_single.sls b/tests/pillar/control_single.sls
new file mode 100644
index 0000000..8a06a7d
--- /dev/null
+++ b/tests/pillar/control_single.sls
@@ -0,0 +1,56 @@
+nova:
+  controller:
+    enabled: true
+    networking: contrail
+    version: liberty
+    security_group: false
+    vncproxy_url: 127.0.0.1
+    dhcp_domain: novalocal
+    scheduler_default_filters: "DifferentHostFilter,RetryFilter,AvailabilityZoneFilter,RamFilter,CoreFilter,DiskFilter,ComputeFilter,ComputeCapabilitiesFilter,ImagePropertiesFilter,ServerGroupAntiAffinityFilter,ServerGroupAffinityFilter"
+    cpu_allocation_ratio: 16.0
+    ram_allocation_ratio: 1.5
+    disk_allocation_ratio: 1.0
+    workers: 8
+    bind:
+      private_address: 127.0.0.1
+      public_address: 127.0.0.1
+      public_name: 127.0.0.1
+      novncproxy_port: 6080
+    database:
+      engine: mysql
+      host: localhost
+      port: 3306
+      name: nova
+      user: nova
+      password: password
+    identity:
+      engine: keystone
+      region: RegionOne
+      host: 127.0.0.1
+      port: 35357
+      user: nova
+      password: password
+      tenant: service
+    message_queue:
+      engine: rabbitmq
+      host: 127.0.0.1
+      port: 5672
+      user: openstack
+      password: password
+      virtual_host: '/openstack'
+    glance:
+      host: 127.0.0.1
+      port: 9292
+    network:
+      engine: neutron
+      region: RegionOne
+      host: 127.0.0.1
+      port: 9696
+      mtu: 1500
+    metadata:
+      password: password
+    cache:
+      engine: memcached
+      members:
+      - host: 127.0.0.1
+        port: 11211
diff --git a/tests/run_tests.sh b/tests/run_tests.sh
new file mode 100755
index 0000000..3f42101
--- /dev/null
+++ b/tests/run_tests.sh
@@ -0,0 +1,162 @@
+#!/usr/bin/env bash
+
+set -e
+[ -n "$DEBUG" ] && set -x
+
+CURDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
+METADATA=${CURDIR}/../metadata.yml
+FORMULA_NAME=$(cat $METADATA | python -c "import sys,yaml; print yaml.load(sys.stdin)['name']")
+
+## Overrideable parameters
+PILLARDIR=${PILLARDIR:-${CURDIR}/pillar}
+BUILDDIR=${BUILDDIR:-${CURDIR}/build}
+VENV_DIR=${VENV_DIR:-${BUILDDIR}/virtualenv}
+DEPSDIR=${BUILDDIR}/deps
+
+SALT_FILE_DIR=${SALT_FILE_DIR:-${BUILDDIR}/file_root}
+SALT_PILLAR_DIR=${SALT_PILLAR_DIR:-${BUILDDIR}/pillar_root}
+SALT_CONFIG_DIR=${SALT_CONFIG_DIR:-${BUILDDIR}/salt}
+SALT_CACHE_DIR=${SALT_CACHE_DIR:-${SALT_CONFIG_DIR}/cache}
+
+SALT_OPTS="${SALT_OPTS} --retcode-passthrough --local -c ${SALT_CONFIG_DIR}"
+
+if [ "x${SALT_VERSION}" != "x" ]; then
+    PIP_SALT_VERSION="==${SALT_VERSION}"
+fi
+
+## Functions
+log_info() {
+    echo "[INFO] $*"
+}
+
+log_err() {
+    echo "[ERROR] $*" >&2
+}
+
+setup_virtualenv() {
+    log_info "Setting up Python virtualenv"
+    virtualenv $VENV_DIR
+    source ${VENV_DIR}/bin/activate
+    pip install salt${PIP_SALT_VERSION}
+}
+
+setup_pillar() {
+    [ ! -d ${SALT_PILLAR_DIR} ] && mkdir -p ${SALT_PILLAR_DIR}
+    echo "base:" > ${SALT_PILLAR_DIR}/top.sls
+    for pillar in ${PILLARDIR}/*; do
+        state_name=$(basename ${pillar%.sls})
+        echo -e "  ${state_name}:\n    - ${state_name}" >> ${SALT_PILLAR_DIR}/top.sls
+    done
+}
+
+setup_salt() {
+    [ ! -d ${SALT_FILE_DIR} ] && mkdir -p ${SALT_FILE_DIR}
+    [ ! -d ${SALT_CONFIG_DIR} ] && mkdir -p ${SALT_CONFIG_DIR}
+    [ ! -d ${SALT_CACHE_DIR} ] && mkdir -p ${SALT_CACHE_DIR}
+
+    echo "base:" > ${SALT_FILE_DIR}/top.sls
+    for pillar in ${PILLARDIR}/*.sls; do
+        state_name=$(basename ${pillar%.sls})
+        echo -e "  ${state_name}:\n    - ${FORMULA_NAME}" >> ${SALT_FILE_DIR}/top.sls
+    done
+
+    cat << EOF > ${SALT_CONFIG_DIR}/minion
+file_client: local
+cachedir: ${SALT_CACHE_DIR}
+verify_env: False
+
+file_roots:
+  base:
+  - ${SALT_FILE_DIR}
+  - ${CURDIR}/..
+  - /usr/share/salt-formulas/env
+
+pillar_roots:
+  base:
+  - ${SALT_PILLAR_DIR}
+  - ${PILLARDIR}
+EOF
+}
+
+fetch_dependency() {
+    dep_name="$(echo $1|cut -d : -f 1)"
+    dep_source="$(echo $1|cut -d : -f 2-)"
+    dep_root="${DEPSDIR}/$(basename $dep_source .git)"
+    dep_metadata="${dep_root}/metadata.yml"
+
+    [ -d /usr/share/salt-formulas/env/${dep_name} ] && log_info "Dependency $dep_name already present in system-wide salt env" && return 0
+    [ -d $dep_root ] && log_info "Dependency $dep_name already fetched" && return 0
+
+    log_info "Fetching dependency $dep_name"
+    [ ! -d ${DEPSDIR} ] && mkdir -p ${DEPSDIR}
+    git clone $dep_source ${DEPSDIR}/$(basename $dep_source .git)
+    ln -s ${dep_root}/${dep_name} ${SALT_FILE_DIR}/${dep_name}
+
+    METADATA="${dep_metadata}" install_dependencies
+}
+
+install_dependencies() {
+    grep -E "^dependencies:" ${METADATA} >/dev/null || return 0
+    (python - | while read dep; do fetch_dependency "$dep"; done) << EOF
+import sys,yaml
+for dep in yaml.load(open('${METADATA}', 'ro'))['dependencies']:
+    print '%s:%s' % (dep["name"], dep["source"])
+EOF
+}
+
+clean() {
+    log_info "Cleaning up ${BUILDDIR}"
+    [ -d ${BUILDDIR} ] && rm -rf ${BUILDDIR} || exit 0
+}
+
+salt_run() {
+    [ -e ${VEN_DIR}/bin/activate ] && source ${VENV_DIR}/bin/activate
+    salt-call ${SALT_OPTS} $*
+}
+
+prepare() {
+    [ -d ${BUILDDIR} ] && mkdir -p ${BUILDDIR}
+
+    which salt-call || setup_virtualenv
+    setup_pillar
+    setup_salt
+    install_dependencies
+}
+
+run() {
+    for pillar in ${PILLARDIR}/*.sls; do
+        state_name=$(basename ${pillar%.sls})
+        salt_run --id=${state_name} state.show_sls ${FORMULA_NAME} || (log_err "Execution of ${FORMULA_NAME}.${state_name} failed"; exit 1)
+    done
+}
+
+_atexit() {
+    RETVAL=$?
+    trap true INT TERM EXIT
+
+    if [ $RETVAL -ne 0 ]; then
+        log_err "Execution failed"
+    else
+        log_info "Execution successful"
+    fi
+    return $RETVAL
+}
+
+## Main
+trap _atexit INT TERM EXIT
+
+case $1 in
+    clean)
+        clean
+        ;;
+    prepare)
+        prepare
+        ;;
+    run)
+        run
+        ;;
+    *)
+        prepare
+        run
+        ;;
+esac