Initial commit
diff --git a/CHANGELOG.rst b/CHANGELOG.rst
new file mode 100644
index 0000000..eb4c597
--- /dev/null
+++ b/CHANGELOG.rst
@@ -0,0 +1,6 @@
+nova formula
+============
+
+0.0.1 (2015-08-03)
+
+- Initial formula setup
\ No newline at end of file
diff --git a/LICENSE b/LICENSE
new file mode 100644
index 0000000..8e80b12
--- /dev/null
+++ b/LICENSE
@@ -0,0 +1,13 @@
+Copyright (c) 2014-2015 tcp cloud a. s.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
\ No newline at end of file
diff --git a/README.rst b/README.rst
new file mode 100644
index 0000000..25b8042
--- /dev/null
+++ b/README.rst
@@ -0,0 +1,163 @@
+
+==============
+OpenStack Nova
+==============
+
+OpenStack Nova provides a cloud computing fabric controller, supporting a wide variety of virtualization technologies, including KVM, Xen, LXC, VMware, and more. In addition to its native API, it includes compatibility with the commonly encountered Amazon EC2 and S3 APIs.
+
+Sample pillars
+==============
+
+Controller nodes
+----------------
+
+Nova services on the controller node
+
+.. code-block:: yaml
+
+ nova:
+ controller:
+ version: juno
+ enabled: true
+ security_group: true
+ bind:
+ public_address: 10.0.0.122
+ public_name: openstack.domain.com
+ novncproxy_port: 6080
+ database:
+ engine: mysql
+ host: 127.0.0.1
+ port: 3306
+ name: nova
+ user: nova
+ password: pwd
+ identity:
+ engine: keystone
+ host: 127.0.0.1
+ port: 35357
+ user: nova
+ password: pwd
+ tenant: service
+ message_queue:
+ engine: rabbitmq
+ host: 127.0.0.1
+ port: 5672
+ user: openstack
+ password: pwd
+ virtual_host: '/openstack'
+ network:
+ engine: neutron
+ host: 127.0.0.1
+ port: 9696
+ identity:
+ engine: keystone
+ host: 127.0.0.1
+ port: 35357
+ user: neutron
+ password: pwd
+ tenant: service
+ metadata:
+ password: password
+
+Nova services from custom package repository
+
+.. code-block:: yaml
+
+ nova:
+ controller:
+ version: juno
+ source:
+ engine: pkg
+ address: http://...
+ ....
+
+Compute nodes
+-------------
+
+Nova controller services on compute node
+
+.. code-block:: yaml
+
+ nova:
+ compute:
+ version: juno
+ enabled: true
+ virtualization: kvm
+ security_group: true
+ bind:
+ vnc_address: 172.20.0.100
+ vnc_port: 6080
+ vnc_name: openstack.domain.com
+ vnc_protocol: http
+ database:
+ engine: mysql
+ host: 127.0.0.1
+ port: 3306
+ name: nova
+ user: nova
+ password: pwd
+ identity:
+ engine: keystone
+ host: 127.0.0.1
+ port: 35357
+ user: nova
+ password: pwd
+ tenant: service
+ message_queue:
+ engine: rabbitmq
+ host: 127.0.0.1
+ port: 5672
+ user: openstack
+ password: pwd
+ virtual_host: '/openstack'
+ image:
+ engine: glance
+ host: 127.0.0.1
+ port: 9292
+ network:
+ engine: neutron
+ host: 127.0.0.1
+ port: 9696
+ identity:
+ engine: keystone
+ host: 127.0.0.1
+ port: 35357
+ user: neutron
+ password: pwd
+ tenant: service
+ qemu:
+ max_files: 4096
+ max_processes: 4096
+
+Nova services on compute node with OpenContrail
+
+.. code-block:: yaml
+
+ nova:
+ compute:
+ enabled: true
+ ...
+ networking: contrail
+
+Nova services on compute node with memcached caching
+
+.. code-block:: yaml
+
+ nova:
+ compute:
+ enabled: true
+ ...
+ cache:
+ engine: memcached
+ members:
+ - host: 127.0.0.1
+ port: 11211
+ - host: 127.0.0.1
+ port: 11211
+
+Read more
+=========
+
+* http://docs.openstack.org/developer/nova/
+* http://lists.openstack.org/cgi-bin/mailman/listinfo/openstack-dev
+* http://bugs.launchpad.net/nova
diff --git a/VERSION b/VERSION
new file mode 100644
index 0000000..3b04cfb
--- /dev/null
+++ b/VERSION
@@ -0,0 +1 @@
+0.2
diff --git a/debian/changelog b/debian/changelog
new file mode 100644
index 0000000..7b415b9
--- /dev/null
+++ b/debian/changelog
@@ -0,0 +1,11 @@
+salt-formula-nova (0.2) trusty; urgency=medium
+
+ * First public release
+
+ -- Filip Pytloun <filip.pytloun@tcpcloud.eu> Tue, 06 Oct 2015 16:38:49 +0200
+
+salt-formula-nova (0.1) trusty; urgency=medium
+
+ * Initial release
+
+ -- Jakub Pavlik <jakub.pavlik@tcpcloud.eu> Thu, 13 Aug 2015 23:23:41 +0200
diff --git a/debian/compat b/debian/compat
new file mode 100644
index 0000000..ec63514
--- /dev/null
+++ b/debian/compat
@@ -0,0 +1 @@
+9
diff --git a/debian/control b/debian/control
new file mode 100644
index 0000000..9ca187d
--- /dev/null
+++ b/debian/control
@@ -0,0 +1,15 @@
+Source: salt-formula-nova
+Maintainer: Jakub Pavlik <jakub.pavlik@tcpcloud.eu>
+Section: admin
+Priority: optional
+Build-Depends: debhelper (>= 9)
+Standards-Version: 3.9.6
+Homepage: http://www.tcpcloud.eu
+Vcs-Browser: https://github.com/tcpcloud/salt-formula-nova
+Vcs-Git: https://github.com/tcpcloud/salt-formula-nova.git
+
+Package: salt-formula-nova
+Architecture: all
+Depends: ${misc:Depends}, salt-master, reclass
+Description: Nova Salt formula
+ Install and configure Nova server and client.
diff --git a/debian/copyright b/debian/copyright
new file mode 100644
index 0000000..dd88dfc
--- /dev/null
+++ b/debian/copyright
@@ -0,0 +1,15 @@
+Format: http://www.debian.org/doc/packaging-manuals/copyright-format/1.0/
+Upstream-Name: salt-formula-nova
+Upstream-Contact: Jakub Pavlik <jakub.pavlik@tcpcloud.eu>
+Source: https://github.com/tcpcloud/salt-formula-nova
+
+Files: *
+Copyright: 2014-2015 tcp cloud
+License: Apache-2.0
+ Copyright (C) 2014-2015 tcp cloud
+ .
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ .
+ On a Debian system you can find a copy of this license in
+ /usr/share/common-licenses/Apache-2.0.
diff --git a/debian/docs b/debian/docs
new file mode 100644
index 0000000..d585829
--- /dev/null
+++ b/debian/docs
@@ -0,0 +1,3 @@
+README.rst
+CHANGELOG.rst
+VERSION
diff --git a/debian/install b/debian/install
new file mode 100644
index 0000000..3514967
--- /dev/null
+++ b/debian/install
@@ -0,0 +1,2 @@
+nova/* /usr/share/salt-formulas/env/nova/
+metadata/service/* /usr/share/salt-formulas/reclass/service/nova/
diff --git a/debian/rules b/debian/rules
new file mode 100755
index 0000000..abde6ef
--- /dev/null
+++ b/debian/rules
@@ -0,0 +1,5 @@
+#!/usr/bin/make -f
+
+%:
+ dh $@
+
diff --git a/debian/source/format b/debian/source/format
new file mode 100644
index 0000000..89ae9db
--- /dev/null
+++ b/debian/source/format
@@ -0,0 +1 @@
+3.0 (native)
diff --git a/metadata/service/compute/kvm.yml b/metadata/service/compute/kvm.yml
new file mode 100644
index 0000000..a0a9c7f
--- /dev/null
+++ b/metadata/service/compute/kvm.yml
@@ -0,0 +1,41 @@
+applications:
+- nova
+parameters:
+ nova:
+ compute:
+ enabled: true
+ version: ${_param:nova_version}
+ virtualization: kvm
+ bind:
+ vnc_address: ${linux:network:host:local:address}
+ vnc_port: 6080
+ vnc_name: ${linux:network:host:public:address}
+ database:
+ engine: mysql
+ host: ${linux:network:host:vip:address}
+ port: 3306
+ name: nova
+ user: nova
+ password: ${_secret:mysql_nova_password}
+ identity:
+ engine: keystone
+ host: ${linux:network:host:vip:address}
+ port: 35357
+ user: nova
+ password: ${_secret:keystone_nova_password}
+ tenant: service
+ message_queue:
+ engine: rabbitmq
+ host: ${linux:network:host:vip:address}
+ port: 5672
+ user: openstack
+ password: ${_secret:rabbitmq_openstack_password}
+ virtual_host: '/openstack'
+ image:
+ engine: glance
+ host: ${linux:network:host:vip:address}
+ port: 9292
+ network:
+ engine: neutron
+ host: ${linux:network:host:vip:address}
+ port: 9696
diff --git a/metadata/service/control/cluster.yml b/metadata/service/control/cluster.yml
new file mode 100644
index 0000000..c27ee3c
--- /dev/null
+++ b/metadata/service/control/cluster.yml
@@ -0,0 +1,53 @@
+applications:
+- nova
+parameters:
+ _param:
+ nova_vncproxy_url: http://${_param:single_address}:6080
+ nova_networking: default
+ nova:
+ controller:
+ enabled: true
+ networking: ${_param:nova_networking}
+ version: ${_param:nova_version}
+ vncproxy_url: ${_param:nova_vncproxy_url}
+ security_group: false
+ logging:
+ - engine: syslog
+ facility: local0
+ bind:
+ private_address: ${_param:cluster_local_address}
+ public_address: ${_param:cluster_vip_address}
+ public_name: ${_param:cluster_vip_address}
+ novncproxy_port: 6080
+ database:
+ engine: mysql
+ host: ${_param:cluster_vip_address}
+ port: 3306
+ name: nova
+ user: nova
+ password: ${_param:mysql_nova_password}
+ identity:
+ engine: keystone
+ host: ${_param:cluster_vip_address}
+ port: 35357
+ user: nova
+ password: ${_param:keystone_nova_password}
+ tenant: service
+ message_queue:
+ engine: rabbitmq
+ host: ${_param:cluster_vip_address}
+ port: 5672
+ user: openstack
+ password: ${_param:rabbitmq_openstack_password}
+ virtual_host: '/openstack'
+ ha_queues: true
+ glance:
+ host: ${_param:cluster_vip_address}
+ port: 9292
+ network:
+ engine: neutron
+ host: ${_param:cluster_vip_address}
+ port: 9696
+ mtu: 1500
+ metadata:
+ password: metadataPass
diff --git a/metadata/service/control/single.yml b/metadata/service/control/single.yml
new file mode 100644
index 0000000..e6cbe11
--- /dev/null
+++ b/metadata/service/control/single.yml
@@ -0,0 +1,57 @@
+applications:
+- nova
+parameters:
+ _param:
+ nova_vncproxy_url: http://${_param:single_address}:6080
+ nova_networking: default
+ nova:
+ controller:
+ enabled: true
+ networking: ${_param:nova_networking}
+ version: ${_param:nova_version}
+ security_group: false
+ vncproxy_url: ${_param:nova_vncproxy_url}
+ logging:
+ - engine: syslog
+ facility: local0
+ bind:
+ private_address: ${_param:single_address}
+ public_address: ${_param:single_address}
+ public_name: ${_param:single_address}
+ novncproxy_port: 6080
+ database:
+ engine: mysql
+ host: localhost
+ port: 3306
+ name: nova
+ user: nova
+ password: ${_param:mysql_nova_password}
+ identity:
+ engine: keystone
+ host: ${_param:single_address}
+ port: 35357
+ user: nova
+ password: ${_param:keystone_nova_password}
+ tenant: service
+ message_queue:
+ engine: rabbitmq
+ host: ${_param:single_address}
+ port: 5672
+ user: openstack
+ password: ${_param:rabbitmq_openstack_password}
+ virtual_host: '/openstack'
+ glance:
+ host: ${_param:single_address}
+ port: 9292
+ network:
+ engine: neutron
+ host: ${_param:single_address}
+ port: 9696
+ mtu: 1500
+ metadata:
+ password: metadataPass
+ cache:
+ engine: memcached
+ members:
+ - host: ${_param:single_address}
+ port: 11211
diff --git a/nova/compute.sls b/nova/compute.sls
new file mode 100644
index 0000000..d8dd8a6
--- /dev/null
+++ b/nova/compute.sls
@@ -0,0 +1,161 @@
+{% from "nova/map.jinja" import compute with context %}
+
+{%- if compute.enabled %}
+
+nova_compute_packages:
+ pkg.installed:
+ - names: {{ compute.pkgs }}
+
+{%- if not salt['user.info']('nova') %}
+user_nova:
+ user.present:
+ - name: nova
+ - home: /var/lib/nova
+ {%- if compute.user is defined %}
+ - shell: /bin/bash
+ {%- else %}
+ - shell: /bin/false
+ {%- endif %}
+ - uid: 303
+ - gid: 303
+ - system: True
+ - groups:
+ {%- if salt['group.info']('libvirtd') %}
+ - libvirtd
+ {%- endif %}
+ - nova
+ - require_in:
+ - pkg: nova_compute_packages
+ {%- if compute.user is defined %}
+ - file: /var/lib/nova/.ssh/id_rsa
+ {%- endif %}
+
+group_nova:
+ group.present:
+ - name: nova
+ - gid: 303
+ - system: True
+ - require_in:
+ - user: user_nova
+{%- endif %}
+
+{%- if compute.user is defined %}
+
+nova_auth_keys:
+ ssh_auth.present:
+ - user: nova
+ - names:
+ - {{ compute.user.public_key }}
+
+/var/lib/nova/.ssh/id_rsa:
+ file.managed:
+ - user: nova
+ - contents_pillar: nova:compute:user:private_key
+ - mode: 400
+ - require:
+ - pkg: nova_compute_packages
+
+{%- endif %}
+
+{%- if pillar.nova.controller is not defined %}
+
+{%- if compute.get('networking', 'default') == "contrail" %}
+
+
+/etc/nova/nova.conf:
+ file.managed:
+ - source: salt://nova/files/{{ compute.version }}/nova-compute.conf.contrail.{{ grains.os_family }}
+ - template: jinja
+ - require:
+ - pkg: nova_compute_packages
+
+{%- else %}
+
+/etc/nova/nova.conf:
+ file.managed:
+ - source: salt://nova/files/{{ compute.version }}/nova-compute.conf.{{ grains.os_family }}
+ - template: jinja
+ - require:
+ - pkg: nova_compute_packages
+
+{%- endif %}
+
+nova_compute_services:
+ service.running:
+ - enable: true
+ - names: {{ compute.services }}
+ - watch:
+ - file: /etc/nova/nova.conf
+
+{%- if compute.virtualization == 'kvm' %}
+
+{% if compute.ceph is defined %}
+
+ceph_package:
+ pkg.installed:
+ - name: ceph-common
+
+/etc/secret.xml:
+ file.managed:
+ - source: salt://nova/files/secret.xml
+ - template: jinja
+
+ceph_virsh_secret_define:
+ cmd.run:
+ - name: "virsh secret-define --file /etc/secret.xml"
+ - unless: "virsh secret-list | grep {{ compute.ceph.secret_uuid }}"
+ - require:
+ - file: /etc/secret.xml
+
+ceph_virsh_secret_set_value:
+ cmd.run:
+ - name: "virsh secret-set-value --secret {{ compute.ceph.secret_uuid }} --base64 {{ compute.ceph.client_cinder_key }} "
+ - unless: "virsh secret-get-value {{ compute.ceph.secret_uuid }} | grep {{ compute.ceph.client_cinder_key }}"
+ - require:
+ - cmd: ceph_virsh_secret_define
+
+{% endif %}
+
+/etc/default/libvirt-bin:
+ file.managed:
+ - source: salt://nova/files/{{ compute.version }}/libvirt-bin
+ - template: jinja
+ - require:
+ - pkg: nova_compute_packages
+
+/etc/libvirt/qemu.conf:
+ file.managed:
+ - source: salt://nova/files/{{ compute.version }}/qemu.conf.{{ grains.os_family }}
+ - template: jinja
+ - require:
+ - pkg: nova_compute_packages
+
+/etc/libvirt/libvirtd.conf:
+ file.managed:
+ - source: salt://nova/files/{{ compute.version }}/libvirtd.conf.{{ grains.os_family }}
+ - template: jinja
+ - require:
+ - pkg: nova_compute_packages
+
+virsh net-undefine default:
+ cmd.run:
+ - name: "virsh net-destroy default"
+ - require:
+ - pkg: nova_compute_packages
+ - onlyif: "virsh net-list | grep default"
+
+libvirt-bin:
+ service.running:
+ - enable: true
+ - require:
+ - pkg: nova_compute_packages
+ - cmd: virsh net-undefine default
+ - watch:
+ - file: /etc/libvirt/libvirtd.conf
+ - file: /etc/default/libvirt-bin
+
+{%- endif %}
+
+{%- endif %}
+
+{%- endif %}
diff --git a/nova/controller.sls b/nova/controller.sls
new file mode 100644
index 0000000..91236a9
--- /dev/null
+++ b/nova/controller.sls
@@ -0,0 +1,81 @@
+{% from "nova/map.jinja" import controller with context %}
+
+{%- if controller.enabled %}
+
+nova_controller_packages:
+ pkg.installed:
+ - names: {{ controller.pkgs }}
+
+{%- if not salt['user.info']('nova') %}
+user_nova:
+ user.present:
+ - name: nova
+ - home: /var/lib/nova
+ - shell: /bin/false
+ - uid: 303
+ - gid: 303
+ - system: True
+ - require_in:
+ - pkg: nova_controller_packages
+
+group_nova:
+ group.present:
+ - name: nova
+ - gid: 303
+ - system: True
+ - require_in:
+ - user: user_nova
+{%- endif %}
+
+{%- if controller.get('networking', 'default') == "contrail" %}
+
+contrail_nova_packages:
+ pkg.installed:
+ - names:
+ - contrail-nova-driver
+ - contrail-nova-networkapi
+
+/etc/nova/nova.conf:
+ file.managed:
+ - source: salt://nova/files/{{ controller.version }}/nova-controller.conf.{{ grains.os_family }}
+ - template: jinja
+ - require:
+ - pkg: nova_controller_packages
+
+{%- else %}
+
+/etc/nova/nova.conf:
+ file.managed:
+ - source: salt://nova/files/{{ controller.version }}/nova-controller.conf.{{ grains.os_family }}
+ - template: jinja
+ - require:
+ - pkg: nova_controller_packages
+
+{%- endif %}
+
+/etc/nova/api-paste.ini:
+ file.managed:
+ - source: salt://nova/files/{{ controller.version }}/api-paste.ini.{{ grains.os_family }}
+ - template: jinja
+ - require:
+ - pkg: nova_controller_packages
+
+nova_controller_syncdb:
+ cmd.run:
+ - name: nova-manage db sync
+ - require:
+ - file: /etc/nova/nova.conf
+
+nova_controller_services:
+ service.running:
+ - enable: true
+ - names: {{ controller.services }}
+ - require:
+ - cmd: nova_controller_syncdb
+ - watch:
+ - file: /etc/nova/nova.conf
+ {%- if controller.version != "icehouse" %}
+ - file: /etc/nova/api-paste.ini
+ {%- endif %}
+
+{%- endif %}
diff --git a/nova/files/collectd.conf b/nova/files/collectd.conf
new file mode 100644
index 0000000..03806b2
--- /dev/null
+++ b/nova/files/collectd.conf
@@ -0,0 +1,12 @@
+
+LoadPlugin libvirt
+
+<Plugin "libvirt">
+ Connection "qemu:///system"
+ RefreshInterval 60
+ # Domain "nova"
+ # BlockDevice "name:device"
+ # InterfaceDevice "name:interface"
+ # IgnoreSelected false
+ HostnameFormat "uuid"
+</Plugin>
diff --git a/nova/files/juno/api-paste.ini.Debian b/nova/files/juno/api-paste.ini.Debian
new file mode 100644
index 0000000..f16e36c
--- /dev/null
+++ b/nova/files/juno/api-paste.ini.Debian
@@ -0,0 +1,127 @@
+############
+# Metadata #
+############
+[composite:metadata]
+use = egg:Paste#urlmap
+/: meta
+
+[pipeline:meta]
+pipeline = ec2faultwrap logrequest metaapp
+
+[app:metaapp]
+paste.app_factory = nova.api.metadata.handler:MetadataRequestHandler.factory
+
+#######
+# EC2 #
+#######
+
+[composite:ec2]
+use = egg:Paste#urlmap
+/services/Cloud: ec2cloud
+
+[composite:ec2cloud]
+use = call:nova.api.auth:pipeline_factory
+noauth = ec2faultwrap logrequest ec2noauth cloudrequest validator ec2executor
+keystone = ec2faultwrap logrequest ec2keystoneauth cloudrequest validator ec2executor
+
+[filter:ec2faultwrap]
+paste.filter_factory = nova.api.ec2:FaultWrapper.factory
+
+[filter:logrequest]
+paste.filter_factory = nova.api.ec2:RequestLogging.factory
+
+[filter:ec2lockout]
+paste.filter_factory = nova.api.ec2:Lockout.factory
+
+[filter:ec2keystoneauth]
+paste.filter_factory = nova.api.ec2:EC2KeystoneAuth.factory
+
+[filter:ec2noauth]
+paste.filter_factory = nova.api.ec2:NoAuth.factory
+
+[filter:cloudrequest]
+controller = nova.api.ec2.cloud.CloudController
+paste.filter_factory = nova.api.ec2:Requestify.factory
+
+[filter:authorizer]
+paste.filter_factory = nova.api.ec2:Authorizer.factory
+
+[filter:validator]
+paste.filter_factory = nova.api.ec2:Validator.factory
+
+[app:ec2executor]
+paste.app_factory = nova.api.ec2:Executor.factory
+
+#############
+# OpenStack #
+#############
+
+[composite:osapi_compute]
+use = call:nova.api.openstack.urlmap:urlmap_factory
+/: oscomputeversions
+/v1.1: openstack_compute_api_v2
+/v2: openstack_compute_api_v2
+/v2.1: openstack_compute_api_v21
+/v3: openstack_compute_api_v3
+
+[composite:openstack_compute_api_v2]
+use = call:nova.api.auth:pipeline_factory
+noauth = compute_req_id faultwrap sizelimit noauth ratelimit osapi_compute_app_v2
+keystone = compute_req_id faultwrap sizelimit authtoken keystonecontext ratelimit osapi_compute_app_v2
+keystone_nolimit = compute_req_id faultwrap sizelimit authtoken keystonecontext osapi_compute_app_v2
+
+[composite:openstack_compute_api_v21]
+use = call:nova.api.auth:pipeline_factory_v21
+noauth = request_id faultwrap sizelimit noauth osapi_compute_app_v21
+keystone = request_id faultwrap sizelimit authtoken keystonecontext osapi_compute_app_v21
+
+[composite:openstack_compute_api_v3]
+use = call:nova.api.auth:pipeline_factory_v21
+noauth = request_id faultwrap sizelimit noauth_v3 osapi_compute_app_v3
+keystone = request_id faultwrap sizelimit authtoken keystonecontext osapi_compute_app_v3
+
+[filter:request_id]
+paste.filter_factory = nova.openstack.common.middleware.request_id:RequestIdMiddleware.factory
+
+[filter:compute_req_id]
+paste.filter_factory = nova.api.compute_req_id:ComputeReqIdMiddleware.factory
+
+[filter:faultwrap]
+paste.filter_factory = nova.api.openstack:FaultWrapper.factory
+
+[filter:noauth]
+paste.filter_factory = nova.api.openstack.auth:NoAuthMiddleware.factory
+
+[filter:noauth_v3]
+paste.filter_factory = nova.api.openstack.auth:NoAuthMiddlewareV3.factory
+
+[filter:ratelimit]
+paste.filter_factory = nova.api.openstack.compute.limits:RateLimitingMiddleware.factory
+
+[filter:sizelimit]
+paste.filter_factory = nova.api.sizelimit:RequestBodySizeLimiter.factory
+
+[app:osapi_compute_app_v2]
+paste.app_factory = nova.api.openstack.compute:APIRouter.factory
+
+[app:osapi_compute_app_v21]
+paste.app_factory = nova.api.openstack.compute:APIRouterV21.factory
+
+[app:osapi_compute_app_v3]
+paste.app_factory = nova.api.openstack.compute:APIRouterV3.factory
+
+[pipeline:oscomputeversions]
+pipeline = faultwrap oscomputeversionapp
+
+[app:oscomputeversionapp]
+paste.app_factory = nova.api.openstack.compute.versions:Versions.factory
+
+##########
+# Shared #
+##########
+
+[filter:keystonecontext]
+paste.filter_factory = nova.api.auth:NovaKeystoneContext.factory
+
+[filter:authtoken]
+paste.filter_factory = keystonemiddleware.auth_token:filter_factory
diff --git a/nova/files/juno/libvirt-bin b/nova/files/juno/libvirt-bin
new file mode 100644
index 0000000..1704dc4
--- /dev/null
+++ b/nova/files/juno/libvirt-bin
@@ -0,0 +1,12 @@
+# Defaults for libvirt-bin initscript (/etc/init.d/libvirt-bin)
+# This is a POSIX shell fragment
+
+# Start libvirtd to handle qemu/kvm:
+start_libvirtd="yes"
+
+# options passed to libvirtd, add "-l" to listen on tcp
+libvirtd_opts="-d -l"
+LIBVIRTD_ARGS="--listen"
+
+# pass in location of kerberos keytab
+#export KRB5_KTNAME=/etc/libvirt/libvirt.keytab
\ No newline at end of file
diff --git a/nova/files/juno/libvirtd.conf.Debian b/nova/files/juno/libvirtd.conf.Debian
new file mode 100644
index 0000000..8333dcb
--- /dev/null
+++ b/nova/files/juno/libvirtd.conf.Debian
@@ -0,0 +1,402 @@
+# Master libvirt daemon configuration file
+#
+# For further information consult http://libvirt.org/format.html
+#
+# NOTE: the tests/daemon-conf regression test script requires
+# that each "PARAMETER = VALUE" line in this file have the parameter
+# name just after a leading "#".
+
+#################################################################
+#
+# Network connectivity controls
+#
+
+# Flag listening for secure TLS connections on the public TCP/IP port.
+# NB, must pass the --listen flag to the libvirtd process for this to
+# have any effect.
+#
+# It is necessary to setup a CA and issue server certificates before
+# using this capability.
+#
+# This is enabled by default, uncomment this to disable it
+#listen_tls = 0
+
+
+listen_tls = 0
+listen_tcp = 1
+auth_tcp = "none"
+
+
+# Listen for unencrypted TCP connections on the public TCP/IP port.
+# NB, must pass the --listen flag to the libvirtd process for this to
+# have any effect.
+#
+# Using the TCP socket requires SASL authentication by default. Only
+# SASL mechanisms which support data encryption are allowed. This is
+# DIGEST_MD5 and GSSAPI (Kerberos5)
+#
+# This is disabled by default, uncomment this to enable it.
+#listen_tcp = 1
+
+
+
+# Override the port for accepting secure TLS connections
+# This can be a port number, or service name
+#
+#tls_port = "16514"
+
+# Override the port for accepting insecure TCP connections
+# This can be a port number, or service name
+#
+#tcp_port = "16509"
+
+
+# Override the default configuration which binds to all network
+# interfaces. This can be a numeric IPv4/6 address, or hostname
+#
+#listen_addr = "192.168.0.1"
+
+
+# Flag toggling mDNS advertizement of the libvirt service.
+#
+# Alternatively can disable for all services on a host by
+# stopping the Avahi daemon
+#
+# This is disabled by default, uncomment this to enable it
+#mdns_adv = 1
+
+# Override the default mDNS advertizement name. This must be
+# unique on the immediate broadcast network.
+#
+# The default is "Virtualization Host HOSTNAME", where HOSTNAME
+# is subsituted for the short hostname of the machine (without domain)
+#
+#mdns_name = "Virtualization Host Joe Demo"
+
+
+#################################################################
+#
+# UNIX socket access controls
+#
+
+# Set the UNIX domain socket group ownership. This can be used to
+# allow a 'trusted' set of users access to management capabilities
+# without becoming root.
+#
+# This is restricted to 'root' by default.
+unix_sock_group = "libvirtd"
+
+# Set the UNIX socket permissions for the R/O socket. This is used
+# for monitoring VM status only
+#
+# Default allows any user. If setting group ownership may want to
+# restrict this to:
+#unix_sock_ro_perms = "0777"
+
+# Set the UNIX socket permissions for the R/W socket. This is used
+# for full management of VMs
+#
+# Default allows only root. If PolicyKit is enabled on the socket,
+# the default will change to allow everyone (eg, 0777)
+#
+# If not using PolicyKit and setting group ownership for access
+# control then you may want to relax this to:
+unix_sock_rw_perms = "0770"
+
+# Set the name of the directory in which sockets will be found/created.
+#unix_sock_dir = "/var/run/libvirt"
+
+#################################################################
+#
+# Authentication.
+#
+# - none: do not perform auth checks. If you can connect to the
+# socket you are allowed. This is suitable if there are
+# restrictions on connecting to the socket (eg, UNIX
+# socket permissions), or if there is a lower layer in
+# the network providing auth (eg, TLS/x509 certificates)
+#
+# - sasl: use SASL infrastructure. The actual auth scheme is then
+# controlled from /etc/sasl2/libvirt.conf. For the TCP
+# socket only GSSAPI & DIGEST-MD5 mechanisms will be used.
+# For non-TCP or TLS sockets, any scheme is allowed.
+#
+# - polkit: use PolicyKit to authenticate. This is only suitable
+# for use on the UNIX sockets. The default policy will
+# require a user to supply their own password to gain
+# full read/write access (aka sudo like), while anyone
+# is allowed read/only access.
+#
+# Set an authentication scheme for UNIX read-only sockets
+# By default socket permissions allow anyone to connect
+#
+# To restrict monitoring of domains you may wish to enable
+# an authentication mechanism here
+auth_unix_ro = "none"
+
+# Set an authentication scheme for UNIX read-write sockets
+# By default socket permissions only allow root. If PolicyKit
+# support was compiled into libvirt, the default will be to
+# use 'polkit' auth.
+#
+# If the unix_sock_rw_perms are changed you may wish to enable
+# an authentication mechanism here
+auth_unix_rw = "none"
+
+# Change the authentication scheme for TCP sockets.
+#
+# If you don't enable SASL, then all TCP traffic is cleartext.
+# Don't do this outside of a dev/test scenario. For real world
+# use, always enable SASL and use the GSSAPI or DIGEST-MD5
+# mechanism in /etc/sasl2/libvirt.conf
+#auth_tcp = "sasl"
+#auth_tcp = "none"
+
+# Change the authentication scheme for TLS sockets.
+#
+# TLS sockets already have encryption provided by the TLS
+# layer, and limited authentication is done by certificates
+#
+# It is possible to make use of any SASL authentication
+# mechanism as well, by using 'sasl' for this option
+#auth_tls = "none"
+
+
+
+#################################################################
+#
+# TLS x509 certificate configuration
+#
+
+
+# Override the default server key file path
+#
+#key_file = "/etc/pki/libvirt/private/serverkey.pem"
+
+# Override the default server certificate file path
+#
+#cert_file = "/etc/pki/libvirt/servercert.pem"
+
+# Override the default CA certificate path
+#
+#ca_file = "/etc/pki/CA/cacert.pem"
+
+# Specify a certificate revocation list.
+#
+# Defaults to not using a CRL, uncomment to enable it
+#crl_file = "/etc/pki/CA/crl.pem"
+
+
+
+#################################################################
+#
+# Authorization controls
+#
+
+
+# Flag to disable verification of our own server certificates
+#
+# When libvirtd starts it performs some sanity checks against
+# its own certificates.
+#
+# Default is to always run sanity checks. Uncommenting this
+# will disable sanity checks which is not a good idea
+#tls_no_sanity_certificate = 1
+
+# Flag to disable verification of client certificates
+#
+# Client certificate verification is the primary authentication mechanism.
+# Any client which does not present a certificate signed by the CA
+# will be rejected.
+#
+# Default is to always verify. Uncommenting this will disable
+# verification - make sure an IP whitelist is set
+#tls_no_verify_certificate = 1
+
+
+# A whitelist of allowed x509 Distinguished Names
+# This list may contain wildcards such as
+#
+# "C=GB,ST=London,L=London,O=Red Hat,CN=*"
+#
+# See the POSIX fnmatch function for the format of the wildcards.
+#
+# NB If this is an empty list, no client can connect, so comment out
+# entirely rather than using empty list to disable these checks
+#
+# By default, no DN's are checked
+#tls_allowed_dn_list = ["DN1", "DN2"]
+
+
+# A whitelist of allowed SASL usernames. The format for usernames
+# depends on the SASL authentication mechanism. Kerberos usernames
+# look like username@REALM
+#
+# This list may contain wildcards such as
+#
+# "*@EXAMPLE.COM"
+#
+# See the POSIX fnmatch function for the format of the wildcards.
+#
+# NB If this is an empty list, no client can connect, so comment out
+# entirely rather than using empty list to disable these checks
+#
+# By default, no Username's are checked
+#sasl_allowed_username_list = ["joe@EXAMPLE.COM", "fred@EXAMPLE.COM" ]
+
+
+
+#################################################################
+#
+# Processing controls
+#
+
+# The maximum number of concurrent client connections to allow
+# over all sockets combined.
+#max_clients = 20
+
+
+# The minimum limit sets the number of workers to start up
+# initially. If the number of active clients exceeds this,
+# then more threads are spawned, upto max_workers limit.
+# Typically you'd want max_workers to equal maximum number
+# of clients allowed
+#min_workers = 5
+#max_workers = 20
+
+
+# The number of priority workers. If all workers from above
+# pool will stuck, some calls marked as high priority
+# (notably domainDestroy) can be executed in this pool.
+#prio_workers = 5
+
+# Total global limit on concurrent RPC calls. Should be
+# at least as large as max_workers. Beyond this, RPC requests
+# will be read into memory and queued. This directly impact
+# memory usage, currently each request requires 256 KB of
+# memory. So by default upto 5 MB of memory is used
+#
+# XXX this isn't actually enforced yet, only the per-client
+# limit is used so far
+#max_requests = 20
+
+# Limit on concurrent requests from a single client
+# connection. To avoid one client monopolizing the server
+# this should be a small fraction of the global max_requests
+# and max_workers parameter
+#max_client_requests = 5
+
+#################################################################
+#
+# Logging controls
+#
+
+# Logging level: 4 errors, 3 warnings, 2 information, 1 debug
+# basically 1 will log everything possible
+#log_level = 3
+
+# Logging filters:
+# A filter allows to select a different logging level for a given category
+# of logs
+# The format for a filter is one of:
+# x:name
+# x:+name
+# where name is a string which is matched against source file name,
+# e.g., "remote", "qemu", or "util/json", the optional "+" prefix
+# tells libvirt to log stack trace for each message matching name,
+# and x is the minimal level where matching messages should be logged:
+# 1: DEBUG
+# 2: INFO
+# 3: WARNING
+# 4: ERROR
+#
+# Multiple filter can be defined in a single @filters, they just need to be
+# separated by spaces.
+#
+# e.g. to only get warning or errors from the remote layer and only errors
+# from the event layer:
+#log_filters="3:remote 4:event"
+
+# Logging outputs:
+# An output is one of the places to save logging information
+# The format for an output can be:
+# x:stderr
+# output goes to stderr
+# x:syslog:name
+# use syslog for the output and use the given name as the ident
+# x:file:file_path
+# output to a file, with the given filepath
+# In all case the x prefix is the minimal level, acting as a filter
+# 1: DEBUG
+# 2: INFO
+# 3: WARNING
+# 4: ERROR
+#
+# Multiple output can be defined, they just need to be separated by spaces.
+# e.g. to log all warnings and errors to syslog under the libvirtd ident:
+#log_outputs="3:syslog:libvirtd"
+#
+
+# Log debug buffer size: default 64
+# The daemon keeps an internal debug log buffer which will be dumped in case
+# of crash or upon receiving a SIGUSR2 signal. This setting allows to override
+# the default buffer size in kilobytes.
+# If value is 0 or less the debug log buffer is deactivated
+#log_buffer_size = 64
+
+
+##################################################################
+#
+# Auditing
+#
+# This setting allows usage of the auditing subsystem to be altered:
+#
+# audit_level == 0 -> disable all auditing
+# audit_level == 1 -> enable auditing, only if enabled on host (default)
+# audit_level == 2 -> enable auditing, and exit if disabled on host
+#
+#audit_level = 2
+#
+# If set to 1, then audit messages will also be sent
+# via libvirt logging infrastructure. Defaults to 0
+#
+#audit_logging = 1
+
+###################################################################
+# UUID of the host:
+# Provide the UUID of the host here in case the command
+# 'dmidecode -s system-uuid' does not provide a valid uuid. In case
+# 'dmidecode' does not provide a valid UUID and none is provided here, a
+# temporary UUID will be generated.
+# Keep the format of the example UUID below. UUID must not have all digits
+# be the same.
+
+# NB This default all-zeros UUID will not work. Replace
+# it with the output of the 'uuidgen' command and then
+# uncomment this entry
+#host_uuid = "00000000-0000-0000-0000-000000000000"
+
+###################################################################
+# Keepalive protocol:
+# This allows libvirtd to detect broken client connections or even
+# dead client. A keepalive message is sent to a client after
+# keepalive_interval seconds of inactivity to check if the client is
+# still responding; keepalive_count is a maximum number of keepalive
+# messages that are allowed to be sent to the client without getting
+# any response before the connection is considered broken. In other
+# words, the connection is automatically closed approximately after
+# keepalive_interval * (keepalive_count + 1) seconds since the last
+# message received from the client. If keepalive_interval is set to
+# -1, libvirtd will never send keepalive requests; however clients
+# can still send them and the deamon will send responses. When
+# keepalive_count is set to 0, connections will be automatically
+# closed after keepalive_interval seconds of inactivity without
+# sending any keepalive messages.
+#
+#keepalive_interval = 5
+#keepalive_count = 5
+#
+# If set to 1, libvirtd will refuse to talk to clients that do not
+# support keepalive protocol. Defaults to 0.
+#
+#keepalive_required = 1
\ No newline at end of file
diff --git a/nova/files/juno/nova-compute.conf.Debian b/nova/files/juno/nova-compute.conf.Debian
new file mode 100644
index 0000000..a797084
--- /dev/null
+++ b/nova/files/juno/nova-compute.conf.Debian
@@ -0,0 +1,107 @@
+{%- set nova = pillar.nova.compute %}
+{%- from "nova/map.jinja" import compute with context %}
+[DEFAULT]
+logdir = /var/log/nova
+state_path = /var/lib/nova
+lock_path = /var/lib/nova/tmp
+volumes_dir = /etc/nova/volumes
+dhcpbridge = /usr/bin/nova-dhcpbridge
+dhcpbridge_flagfile = /etc/nova/nova.conf
+force_dhcp_release = True
+injected_network_template = /usr/share/nova/interfaces.template
+libvirt_nonblocking = True
+libvirt_use_virtio_for_bridges=True
+libvirt_inject_partition = -1
+sql_connection = {{ nova.database.engine }}://{{ nova.database.user }}:{{ nova.database.password }}@{{ nova.database.host }}/{{ nova.database.name }}
+compute_driver = libvirt.LibvirtDriver
+libvirt_type=kvm
+rootwrap_config = /etc/nova/rootwrap.conf
+auth_strategy = keystone
+firewall_driver=nova.virt.firewall.NoopFirewallDriver
+enabled_apis = ec2,osapi_compute,metadata
+my_ip= {{ compute.bind.vnc_address }}
+
+logdir=/var/log/nova
+iscsi_helper=tgtadm
+connection_type=libvirt
+root_helper=sudo nova-rootwrap /etc/nova/rootwrap.conf
+ec2_private_dns_show_ip=True
+api_paste_config=/etc/nova/api-paste.ini
+volumes_path=/var/lib/nova/volumes
+
+network_api_class = nova.network.neutronv2.api.API
+
+#NoVNC
+vnc_enabled=true
+my_ip={{ compute.bind.vnc_address }}
+
+vncserver_proxyclient_address={{ compute.bind.vnc_address }}
+novncproxy_base_url={{ compute.vncproxy_url }}/vnc_auto.html
+novncproxy_port={{ nova.bind.vnc_port }}
+
+{%- if compute.cache is defined %}
+memcached_servers={%- for member in compute.cache.members %}{% if loop.index == 1 %}{{ member }}:11211{% endif %},{{ member }}:11211{%- endfor %}
+{%- endif %}
+
+
+#Neutron
+neutron_admin_username={{ pillar.neutron.switch.identity.user }}
+neutron_admin_password={{ pillar.neutron.switch.identity.password }}
+neutron_admin_tenant_name={{ pillar.neutron.switch.identity.tenant }}
+neutron_admin_auth_url=http://{{ pillar.neutron.switch.identity.host }}:{{ pillar.neutron.switch.identity.port }}/v2.0
+neutron_url=http://{{ compute.network.host }}:{{ compute.network.port }}
+
+linuxnet_interface_driver = nova.network.linux_net.LinuxOVSInterfaceDriver
+firewall_driver = nova.virt.firewall.NoopFirewallDriver
+security_group_api = neutron
+rpc_backend = nova.rpc.impl_kombu
+
+#RabbitMQ
+rabbit_host = {{ compute.message_queue.host }}
+rabbit_port = {{ compute.message_queue.port }}
+rabbit_hosts={{ compute.message_queue.host }}:{{ compute.message_queue.port }}
+rabbit_userid = {{ compute.message_queue.user }}
+rabbit_password = {{ compute.message_queue.password }}
+rabbit_virtual_host = {{ compute.message_queue.virtual_host }}
+
+glance_host = {{ compute.image.host }}
+
+{%- if compute.image.use_cow is defined %}
+use_cow_images = {{ compute.image.use_cow }}
+{%- endif %}
+
+live_migration_flag=VIR_MIGRATE_UNDEFINE_SOURCE,VIR_MIGRATE_PEER2PEER,VIR_MIGRATE_LIVE
+live_migration_retry_count=30
+
+{% if pillar.ceilometer is defined %}
+
+instance_usage_audit = True
+instance_usage_audit_period = hour
+notify_on_state_change = vm_and_task_state
+notification_driver = messagingv2
+
+{% endif %}
+
+{%- if compute.notification is defined %}
+notification_driver = {{ compute.notification.driver }}
+notification_topics = {{ compute.notification.topics }}
+
+{%- if compute.notification.notify_on %}
+{%- for key, value in compute.notification.notify_on.iteritems() %}
+notify_on_{{ key }} = {{ value }}
+{%- endfor %}
+{%- endif %}
+
+{%- endif %}
+
+[keystone_authtoken]
+signing_dirname=/tmp/keystone-signing-nova
+admin_tenant_name = {{ compute.identity.tenant }}
+admin_user = {{ compute.identity.user }}
+admin_password = {{ compute.identity.password }}
+auth_host = {{ compute.identity.host }}
+auth_port = {{ compute.identity.port }}
+auth_protocol=http
+auth_uri=http://{{ compute.identity.host }}:5000
+
+
diff --git a/nova/files/juno/nova-compute.conf.contrail.Debian b/nova/files/juno/nova-compute.conf.contrail.Debian
new file mode 100644
index 0000000..1367727
--- /dev/null
+++ b/nova/files/juno/nova-compute.conf.contrail.Debian
@@ -0,0 +1,123 @@
+{%- from "nova/map.jinja" import compute with context %}
+[DEFAULT]
+dhcpbridge_flagfile=/etc/nova/nova.conf
+dhcpbridge=/usr/bin/nova-dhcpbridge
+logdir=/var/log/nova
+state_path=/var/lib/nova
+lock_path=/var/lock/nova
+force_dhcp_release=True
+iscsi_helper=tgtadm
+libvirt_use_virtio_for_bridges=True
+connection_type=libvirt
+root_helper=sudo nova-rootwrap /etc/nova/rootwrap.conf
+verbose=True
+ec2_private_dns_show_ip=False
+api_paste_config=/etc/nova/api-paste.ini
+volumes_path=/var/lib/nova/volumes
+enabled_apis=ec2,osapi_compute,metadata
+
+allow_resize_to_same_host=True
+
+live_migration_flag=VIR_MIGRATE_UNDEFINE_SOURCE,VIR_MIGRATE_PEER2PEER,VIR_MIGRATE_LIVE
+live_migration_retry_count=30
+
+#Neutron
+neutron_admin_username={{ compute.network.user }}
+neutron_admin_password={{ compute.network.password }}
+neutron_admin_tenant_name={{ compute.identity.tenant }}
+neutron_admin_auth_url=http://{{ compute.identity.host }}:{{ compute.identity.port }}/v2.0
+neutron_url=http://{{ compute.network.host }}:{{ compute.network.port }}
+#neutron_url = http://10.0.102.35:9696/
+
+
+auth_strategy = keystone
+libvirt_nonblocking = True
+libvirt_inject_partition = -1
+
+#RabbitMQ
+rabbit_host = {{ compute.message_queue.host }}
+rabbit_port = {{ compute.message_queue.port }}
+rabbit_hosts={{ compute.message_queue.host }}:{{ compute.message_queue.port }}
+rabbit_userid = {{ compute.message_queue.user }}
+rabbit_password = {{ compute.message_queue.password }}
+rabbit_virtual_host = {{ compute.message_queue.virtual_host }}
+
+glance_host = {{ compute.image.host }}
+
+{%- if compute.image.use_cow is defined %}
+use_cow_images = {{ compute.image.use_cow }}
+{%- endif %}
+
+neutron_url_timeout = 300
+network_api_class = nova_contrail_vif.contrailvif.ContrailNetworkAPI
+compute_driver = libvirt.LibvirtDriver
+
+#NoVNC
+vnc_enabled=true
+vncserver_enabled = true
+vncserver_listen=0.0.0.0
+vncserver_proxyclient_address={{ compute.bind.vnc_address }}
+novncproxy_base_url={{ compute.vncproxy_url }}/vnc_auto.html
+novncproxy_port={{ compute.bind.vnc_port }}
+
+{%- if compute.cache is defined %}
+memcached_servers={%- for member in compute.cache.members %}{{ member.host }}:11211{% if not loop.last %},{% endif %}{%- endfor %}
+{%- endif %}
+
+security_group_api = neutron
+heal_instance_info_cache_interval = 0
+libvirt_cpu_mode = host-passthrough
+image_cache_manager_interval = 0
+libvirt_vif_driver = nova_contrail_vif.contrailvif.VRouterVIFDriver
+firewall_driver = nova.virt.firewall.NoopFirewallDriver
+glance_port = 9292
+glance_num_retries = 10
+
+rabbit_retry_interval = 1
+rabbit_retry_backoff = 2
+
+rpc_cast_timeout = 30
+rpc_conn_pool_size = 300
+rpc_response_timeout = 3600
+rpc_thread_pool_size = 70
+report_interval = 5
+
+block_device_allocate_retries=600
+block_device_allocate_retries_interval=10
+
+resume_guests_state_on_host_boot = True
+service_down_time = 90
+
+{% if pillar.ceilometer is defined %}
+
+instance_usage_audit = True
+instance_usage_audit_period = hour
+notify_on_state_change = vm_and_task_state
+notification_driver = messagingv2
+
+{% endif %}
+
+{%- if compute.notification is defined %}
+notification_driver = {{ compute.notification.driver }}
+
+{%- if compute.notification.topics is defined %}
+notification_topics = {{ compute.notification.topics }}
+{%- endif %}
+
+{%- if compute.notification.notify_on is defined %}
+{%- for key, value in compute.notification.notify_on.iteritems() %}
+notify_on_{{ key }} = {{ value }}
+{%- endfor %}
+{%- endif %}
+
+{%- endif %}
+
+[keystone_authtoken]
+signing_dirname=/tmp/keystone-signing-nova
+admin_tenant_name = {{ compute.identity.tenant }}
+admin_user = {{ compute.identity.user }}
+admin_password = {{ compute.identity.password }}
+auth_host = {{ compute.identity.host }}
+auth_port = {{ compute.identity.port }}
+auth_protocol=http
+auth_uri=http://{{ compute.identity.host }}:5000
diff --git a/nova/files/juno/nova-controller.conf.Debian b/nova/files/juno/nova-controller.conf.Debian
new file mode 100644
index 0000000..d32b380
--- /dev/null
+++ b/nova/files/juno/nova-controller.conf.Debian
@@ -0,0 +1,152 @@
+{%- from "nova/map.jinja" import controller with context %}
+[DEFAULT]
+logdir = /var/log/nova
+verbose = True
+{%- if controller.debug %}
+debug = True
+{%- else %}
+debug = False
+{%- endif %}
+state_path = /var/lib/nova
+lock_path = /var/lib/nova/tmp
+volumes_dir = /etc/nova/volumes
+dhcpbridge = /usr/bin/nova-dhcpbridge
+dhcpbridge_flagfile = /etc/nova/nova.conf
+force_dhcp_release = True
+injected_network_template = /usr/share/nova/interfaces.template
+libvirt_nonblocking = True
+libvirt_use_virtio_for_bridges=True
+libvirt_inject_partition = -1
+vif_plugging_is_fatal = False
+vif_plugging_timeout = 0
+
+allow_resize_to_same_host = True
+
+logdir=/var/log/nova
+iscsi_helper=tgtadm
+connection_type=libvirt
+root_helper=sudo nova-rootwrap /etc/nova/rootwrap.conf
+ec2_private_dns_show_ip=True
+api_paste_config=/etc/nova/api-paste.ini
+volumes_path=/var/lib/nova/volumes
+
+sql_connection = {{ controller.database.engine }}://{{ controller.database.user }}:{{ controller.database.password }}@{{ controller.database.host }}/{{ controller.database.name }}
+
+{%- if controller.get('networking', 'default') == "contrail" %}
+libvirt_vif_driver = nova_contrail_vif.contrailvif.VRouterVIFDriver
+network_api_class = nova_contrail_vif.contrailvif.ContrailNetworkAPI
+{%- else %}
+network_api_class = nova.network.neutronv2.api.API
+{%- endif %}
+
+compute_driver = libvirt.LibvirtDriver
+libvirt_type=kvm
+rootwrap_config = /etc/nova/rootwrap.conf
+auth_strategy = keystone
+firewall_driver=nova.virt.firewall.NoopFirewallDriver
+enabled_apis = ec2,osapi_compute,metadata
+
+my_ip={{ controller.bind.private_address }}
+
+
+
+neutron_auth_strategy = keystone
+neutron_admin_auth_url = http://{{ controller.identity.host }}:35357/v2.0
+{% if pillar.neutron is defined %}
+neutron_admin_password={{ pillar.neutron.server.identity.password }}
+neutron_admin_tenant_name={{ pillar.neutron.server.identity.tenant }}
+neutron_admin_username={{ pillar.neutron.server.identity.user }}
+{%- else %}
+neutron_admin_password={{ controller.network.password }}
+neutron_admin_tenant_name={{ controller.network.tenant }}
+neutron_admin_username={{ controller.network.user }}
+{%- endif %}
+neutron_url=http://{{ controller.network.host }}:{{ controller.network.port }}
+
+
+linuxnet_interface_driver = nova.network.linux_net.LinuxOVSInterfaceDriver
+firewall_driver = nova.virt.firewall.NoopFirewallDriver
+security_group_api = neutron
+
+rpc_backend = nova.rpc.impl_kombu
+start_guests_on_host_boot=truembu
+
+{%- if controller.cache is defined %}
+memcached_servers={%- for member in controller.cache.members %}{{ member.host }}:11211{% if not loop.last %},{% endif %}{%- endfor %}
+{%- endif %}
+
+#RabbitMQ
+rabbit_host = {{ controller.message_queue.host }}
+rabbit_port = {{ controller.message_queue.port }}
+rabbit_hosts={{ controller.message_queue.host }}:{{ controller.message_queue.port }}
+rabbit_userid = {{ controller.message_queue.user }}
+rabbit_password = {{ controller.message_queue.password }}
+rabbit_virtual_host = {{ controller.message_queue.virtual_host }}
+
+rabbit_retry_interval = 1
+rabbit_retry_backoff = 2
+
+volume_api_class=nova.volume.cinder.API
+ec2_listen={{ controller.bind.private_address }}
+osapi_volume_listen={{ controller.bind.private_address }}
+osapi_compute_listen={{ controller.bind.private_address }}
+metadata_listen={{ controller.bind.private_address }}
+glance_host = {{ controller.glance.host }}
+#metadata_host = {{ controller.glance.host }}
+osapi_compute_workers=8
+
+#NoVNC
+vnc_enabled=true
+{%- if pillar.nova.compute is defined %}
+vncserver_listen={{ controller.bind.private_address }}
+vncserver_proxyclient_address={{ controller.bind.private_address }}
+{%- else %}
+vncserver_listen=0.0.0.0
+{%- endif %}
+novncproxy_base_url={{ controller.vncproxy_url }}/vnc_auto.html
+novncproxy_port={{ controller.bind.get('vncproxy_port', '6080') }}
+
+#metadata_listen_port = 8755
+service_neutron_metadata_proxy = True
+{%- if controller.get('networking', 'default') != "contrail" %}
+neutron_metadata_proxy_shared_secret={{ controller.metadata.password }}
+{%- endif %}
+
+allow_resize_to_same_host=True
+start_guests_on_host_boot=true
+
+rpc_cast_timeout = 30
+rpc_conn_pool_size = 300
+rpc_response_timeout = 3600
+rpc_thread_pool_size = 70
+report_interval = 5
+
+block_device_allocate_retries=600
+block_device_allocate_retries_interval=10
+
+[keystone_authtoken]
+signing_dirname=/tmp/keystone-signing-nova
+signing_dir=/tmp/keystone-signing-nova
+admin_tenant_name = {{ controller.identity.tenant }}
+admin_user = {{ controller.identity.user }}
+admin_password = {{ controller.identity.password }}
+auth_host = {{ controller.identity.host }}
+auth_port = {{ controller.identity.port }}
+auth_protocol=http
+auth_uri=http://{{ controller.identity.host }}:5000
+
+[conductor]
+workers = 8
+
+[database]
+idle_timeout = 180
+min_pool_size = 100
+max_pool_size = 700
+max_overflow = 100
+retry_interval = 5
+max_retries = -1
+db_max_retries = 3
+db_retry_interval = 1
+connection_debug = 10
+pool_timeout = 120
+connection = {{ controller.database.engine }}://{{ controller.database.user }}:{{ controller.database.password }}@{{ controller.database.host }}/{{ controller.database.name }}
\ No newline at end of file
diff --git a/nova/files/juno/qemu.conf.Debian b/nova/files/juno/qemu.conf.Debian
new file mode 100644
index 0000000..5c5722b
--- /dev/null
+++ b/nova/files/juno/qemu.conf.Debian
@@ -0,0 +1,479 @@
+{%- from "nova/map.jinja" import compute with context %}
+# Master configuration file for the QEMU driver.
+# All settings described here are optional - if omitted, sensible
+# defaults are used.
+
+# VNC is configured to listen on 127.0.0.1 by default.
+# To make it listen on all public interfaces, uncomment
+# this next option.
+#
+# NB, strong recommendation to enable TLS + x509 certificate
+# verification when allowing public access
+#
+#vnc_listen = "0.0.0.0"
+
+# Enable this option to have VNC served over an automatically created
+# unix socket. This prevents unprivileged access from users on the
+# host machine, though most VNC clients do not support it.
+#
+# This will only be enabled for VNC configurations that do not have
+# a hardcoded 'listen' or 'socket' value. This setting takes preference
+# over vnc_listen.
+#
+#vnc_auto_unix_socket = 1
+
+# Enable use of TLS encryption on the VNC server. This requires
+# a VNC client which supports the VeNCrypt protocol extension.
+# Examples include vinagre, virt-viewer, virt-manager and vencrypt
+# itself. UltraVNC, RealVNC, TightVNC do not support this
+#
+# It is necessary to setup CA and issue a server certificate
+# before enabling this.
+#
+#vnc_tls = 1
+
+
+# Use of TLS requires that x509 certificates be issued. The
+# default it to keep them in /etc/pki/libvirt-vnc. This directory
+# must contain
+#
+# ca-cert.pem - the CA master certificate
+# server-cert.pem - the server certificate signed with ca-cert.pem
+# server-key.pem - the server private key
+#
+# This option allows the certificate directory to be changed
+#
+#vnc_tls_x509_cert_dir = "/etc/pki/libvirt-vnc"
+
+
+# The default TLS configuration only uses certificates for the server
+# allowing the client to verify the server's identity and establish
+# an encrypted channel.
+#
+# It is possible to use x509 certificates for authentication too, by
+# issuing a x509 certificate to every client who needs to connect.
+#
+# Enabling this option will reject any client who does not have a
+# certificate signed by the CA in /etc/pki/libvirt-vnc/ca-cert.pem
+#
+#vnc_tls_x509_verify = 1
+
+
+# The default VNC password. Only 8 bytes are significant for
+# VNC passwords. This parameter is only used if the per-domain
+# XML config does not already provide a password. To allow
+# access without passwords, leave this commented out. An empty
+# string will still enable passwords, but be rejected by QEMU,
+# effectively preventing any use of VNC. Obviously change this
+# example here before you set this.
+#
+#vnc_password = "XYZ12345"
+
+
+# Enable use of SASL encryption on the VNC server. This requires
+# a VNC client which supports the SASL protocol extension.
+# Examples include vinagre, virt-viewer and virt-manager
+# itself. UltraVNC, RealVNC, TightVNC do not support this
+#
+# It is necessary to configure /etc/sasl2/qemu.conf to choose
+# the desired SASL plugin (eg, GSSPI for Kerberos)
+#
+#vnc_sasl = 1
+
+
+# The default SASL configuration file is located in /etc/sasl2/
+# When running libvirtd unprivileged, it may be desirable to
+# override the configs in this location. Set this parameter to
+# point to the directory, and create a qemu.conf in that location
+#
+#vnc_sasl_dir = "/some/directory/sasl2"
+
+
+# QEMU implements an extension for providing audio over a VNC connection,
+# though if your VNC client does not support it, your only chance for getting
+# sound output is through regular audio backends. By default, libvirt will
+# disable all QEMU sound backends if using VNC, since they can cause
+# permissions issues. Enabling this option will make libvirtd honor the
+# QEMU_AUDIO_DRV environment variable when using VNC.
+#
+#vnc_allow_host_audio = 0
+
+
+
+# SPICE is configured to listen on 127.0.0.1 by default.
+# To make it listen on all public interfaces, uncomment
+# this next option.
+#
+# NB, strong recommendation to enable TLS + x509 certificate
+# verification when allowing public access
+#
+#spice_listen = "0.0.0.0"
+
+
+# Enable use of TLS encryption on the SPICE server.
+#
+# It is necessary to setup CA and issue a server certificate
+# before enabling this.
+#
+#spice_tls = 1
+
+
+# Use of TLS requires that x509 certificates be issued. The
+# default it to keep them in /etc/pki/libvirt-spice. This directory
+# must contain
+#
+# ca-cert.pem - the CA master certificate
+# server-cert.pem - the server certificate signed with ca-cert.pem
+# server-key.pem - the server private key
+#
+# This option allows the certificate directory to be changed.
+#
+#spice_tls_x509_cert_dir = "/etc/pki/libvirt-spice"
+
+
+# The default SPICE password. This parameter is only used if the
+# per-domain XML config does not already provide a password. To
+# allow access without passwords, leave this commented out. An
+# empty string will still enable passwords, but be rejected by
+# QEMU, effectively preventing any use of SPICE. Obviously change
+# this example here before you set this.
+#
+#spice_password = "XYZ12345"
+
+
+# Enable use of SASL encryption on the SPICE server. This requires
+# a SPICE client which supports the SASL protocol extension.
+#
+# It is necessary to configure /etc/sasl2/qemu.conf to choose
+# the desired SASL plugin (eg, GSSPI for Kerberos)
+#
+#spice_sasl = 1
+
+# The default SASL configuration file is located in /etc/sasl2/
+# When running libvirtd unprivileged, it may be desirable to
+# override the configs in this location. Set this parameter to
+# point to the directory, and create a qemu.conf in that location
+#
+#spice_sasl_dir = "/some/directory/sasl2"
+
+
+# By default, if no graphical front end is configured, libvirt will disable
+# QEMU audio output since directly talking to alsa/pulseaudio may not work
+# with various security settings. If you know what you're doing, enable
+# the setting below and libvirt will passthrough the QEMU_AUDIO_DRV
+# environment variable when using nographics.
+#
+#nographics_allow_host_audio = 1
+
+
+# Override the port for creating both VNC and SPICE sessions (min).
+# This defaults to 5900 and increases for consecutive sessions
+# or when ports are occupied, until it hits the maximum.
+#
+# Minimum must be greater than or equal to 5900 as lower number would
+# result into negative vnc display number.
+#
+# Maximum must be less than 65536, because higher numbers do not make
+# sense as a port number.
+#
+#remote_display_port_min = 5900
+#remote_display_port_max = 65535
+
+# VNC WebSocket port policies, same rules apply as with remote display
+# ports. VNC WebSockets use similar display <-> port mappings, with
+# the exception being that ports start from 5700 instead of 5900.
+#
+#remote_websocket_port_min = 5700
+#remote_websocket_port_max = 65535
+
+# The default security driver is SELinux. If SELinux is disabled
+# on the host, then the security driver will automatically disable
+# itself. If you wish to disable QEMU SELinux security driver while
+# leaving SELinux enabled for the host in general, then set this
+# to 'none' instead. It's also possible to use more than one security
+# driver at the same time, for this use a list of names separated by
+# comma and delimited by square brackets. For example:
+#
+# security_driver = [ "selinux", "apparmor" ]
+#
+# Notes: The DAC security driver is always enabled; as a result, the
+# value of security_driver cannot contain "dac". The value "none" is
+# a special value; security_driver can be set to that value in
+# isolation, but it cannot appear in a list of drivers.
+#
+#security_driver = "selinux"
+
+# If set to non-zero, then the default security labeling
+# will make guests confined. If set to zero, then guests
+# will be unconfined by default. Defaults to 1.
+#security_default_confined = 1
+
+# If set to non-zero, then attempts to create unconfined
+# guests will be blocked. Defaults to 0.
+#security_require_confined = 1
+
+# The user for QEMU processes run by the system instance. It can be
+# specified as a user name or as a user id. The qemu driver will try to
+# parse this value first as a name and then, if the name doesn't exist,
+# as a user id.
+#
+# Since a sequence of digits is a valid user name, a leading plus sign
+# can be used to ensure that a user id will not be interpreted as a user
+# name.
+#
+# Some examples of valid values are:
+#
+# user = "qemu" # A user named "qemu"
+# user = "+0" # Super user (uid=0)
+# user = "100" # A user named "100" or a user with uid=100
+#
+#user = "root"
+
+# The group for QEMU processes run by the system instance. It can be
+# specified in a similar way to user.
+#group = "root"
+
+# Whether libvirt should dynamically change file ownership
+# to match the configured user/group above. Defaults to 1.
+# Set to 0 to disable file ownership changes.
+#dynamic_ownership = 1
+
+
+# What cgroup controllers to make use of with QEMU guests
+#
+# - 'cpu' - use for schedular tunables
+# - 'devices' - use for device whitelisting
+# - 'memory' - use for memory tunables
+# - 'blkio' - use for block devices I/O tunables
+# - 'cpuset' - use for CPUs and memory nodes
+# - 'cpuacct' - use for CPUs statistics.
+#
+# NB, even if configured here, they won't be used unless
+# the administrator has mounted cgroups, e.g.:
+#
+# mkdir /dev/cgroup
+# mount -t cgroup -o devices,cpu,memory,blkio,cpuset none /dev/cgroup
+#
+# They can be mounted anywhere, and different controllers
+# can be mounted in different locations. libvirt will detect
+# where they are located.
+#
+#cgroup_controllers = [ "cpu", "devices", "memory", "blkio", "cpuset", "cpuacct" ]
+
+# This is the basic set of devices allowed / required by
+# all virtual machines.
+#
+# As well as this, any configured block backed disks,
+# all sound device, and all PTY devices are allowed.
+#
+# This will only need setting if newer QEMU suddenly
+# wants some device we don't already know about.
+#
+#cgroup_device_acl = [
+# "/dev/null", "/dev/full", "/dev/zero",
+# "/dev/random", "/dev/urandom",
+# "/dev/ptmx", "/dev/kvm", "/dev/kqemu",
+# "/dev/rtc","/dev/hpet", "/dev/vfio/vfio"
+#]
+
+
+# The default format for Qemu/KVM guest save images is raw; that is, the
+# memory from the domain is dumped out directly to a file. If you have
+# guests with a large amount of memory, however, this can take up quite
+# a bit of space. If you would like to compress the images while they
+# are being saved to disk, you can also set "lzop", "gzip", "bzip2", or "xz"
+# for save_image_format. Note that this means you slow down the process of
+# saving a domain in order to save disk space; the list above is in descending
+# order by performance and ascending order by compression ratio.
+#
+# save_image_format is used when you use 'virsh save' or 'virsh managedsave'
+# at scheduled saving, and it is an error if the specified save_image_format
+# is not valid, or the requested compression program can't be found.
+#
+# dump_image_format is used when you use 'virsh dump' at emergency
+# crashdump, and if the specified dump_image_format is not valid, or
+# the requested compression program can't be found, this falls
+# back to "raw" compression.
+#
+# snapshot_image_format specifies the compression algorithm of the memory save
+# image when an external snapshot of a domain is taken. This does not apply
+# on disk image format. It is an error if the specified format isn't valid,
+# or the requested compression program can't be found.
+#
+#save_image_format = "raw"
+#dump_image_format = "raw"
+#snapshot_image_format = "raw"
+
+# When a domain is configured to be auto-dumped when libvirtd receives a
+# watchdog event from qemu guest, libvirtd will save dump files in directory
+# specified by auto_dump_path. Default value is /var/lib/libvirt/qemu/dump
+#
+#auto_dump_path = "/var/lib/libvirt/qemu/dump"
+
+# When a domain is configured to be auto-dumped, enabling this flag
+# has the same effect as using the VIR_DUMP_BYPASS_CACHE flag with the
+# virDomainCoreDump API. That is, the system will avoid using the
+# file system cache while writing the dump file, but may cause
+# slower operation.
+#
+#auto_dump_bypass_cache = 0
+
+# When a domain is configured to be auto-started, enabling this flag
+# has the same effect as using the VIR_DOMAIN_START_BYPASS_CACHE flag
+# with the virDomainCreateWithFlags API. That is, the system will
+# avoid using the file system cache when restoring any managed state
+# file, but may cause slower operation.
+#
+#auto_start_bypass_cache = 0
+
+# If provided by the host and a hugetlbfs mount point is configured,
+# a guest may request huge page backing. When this mount point is
+# unspecified here, determination of a host mount point in /proc/mounts
+# will be attempted. Specifying an explicit mount overrides detection
+# of the same in /proc/mounts. Setting the mount point to "" will
+# disable guest hugepage backing.
+#
+# NB, within this mount point, guests will create memory backing files
+# in a location of $MOUNTPOINT/libvirt/qemu
+#
+#hugetlbfs_mount = "/dev/hugepages"
+
+
+# Path to the setuid helper for creating tap devices. This executable
+# is used to create <source type='bridge'> interfaces when libvirtd is
+# running unprivileged. libvirt invokes the helper directly, instead
+# of using "-netdev bridge", for security reasons.
+#bridge_helper = "/usr/libexec/qemu-bridge-helper"
+
+
+
+# If clear_emulator_capabilities is enabled, libvirt will drop all
+# privileged capabilities of the QEmu/KVM emulator. This is enabled by
+# default.
+#
+# Warning: Disabling this option means that a compromised guest can
+# exploit the privileges and possibly do damage to the host.
+#
+#clear_emulator_capabilities = 1
+
+
+# If enabled, libvirt will have QEMU set its process name to
+# "qemu:VM_NAME", where VM_NAME is the name of the VM. The QEMU
+# process will appear as "qemu:VM_NAME" in process listings and
+# other system monitoring tools. By default, QEMU does not set
+# its process title, so the complete QEMU command (emulator and
+# its arguments) appear in process listings.
+#
+#set_process_name = 1
+
+
+# If max_processes is set to a positive integer, libvirt will use
+# it to set the maximum number of processes that can be run by qemu
+# user. This can be used to override default value set by host OS.
+# The same applies to max_files which sets the limit on the maximum
+# number of opened files.
+#
+{%- if compute.qemu is defined %}
+
+{%- if compute.qemu.max_processes is defined %}
+max_processes = {{ compute.qemu.max_processes }}
+{%- endif %}
+
+{%- if compute.qemu.max_files is defined %}
+max_files = {{ compute.qemu.max_files }}
+{%- endif %}
+
+{%- endif %}
+
+# mac_filter enables MAC addressed based filtering on bridge ports.
+# This currently requires ebtables to be installed.
+#
+#mac_filter = 1
+
+
+# By default, PCI devices below non-ACS switch are not allowed to be assigned
+# to guests. By setting relaxed_acs_check to 1 such devices will be allowed to
+# be assigned to guests.
+#
+#relaxed_acs_check = 1
+
+
+# If allow_disk_format_probing is enabled, libvirt will probe disk
+# images to attempt to identify their format, when not otherwise
+# specified in the XML. This is disabled by default.
+#
+# WARNING: Enabling probing is a security hole in almost all
+# deployments. It is strongly recommended that users update their
+# guest XML <disk> elements to include <driver type='XXXX'/>
+# elements instead of enabling this option.
+#
+#allow_disk_format_probing = 1
+
+
+# To enable 'Sanlock' project based locking of the file
+# content (to prevent two VMs writing to the same
+# disk), uncomment this
+#
+#lock_manager = "sanlock"
+
+
+
+# Set limit of maximum APIs queued on one domain. All other APIs
+# over this threshold will fail on acquiring job lock. Specially,
+# setting to zero turns this feature off.
+# Note, that job lock is per domain.
+#
+#max_queued = 0
+
+###################################################################
+# Keepalive protocol:
+# This allows qemu driver to detect broken connections to remote
+# libvirtd during peer-to-peer migration. A keepalive message is
+# sent to the daemon after keepalive_interval seconds of inactivity
+# to check if the daemon is still responding; keepalive_count is a
+# maximum number of keepalive messages that are allowed to be sent
+# to the daemon without getting any response before the connection
+# is considered broken. In other words, the connection is
+# automatically closed approximately after
+# keepalive_interval * (keepalive_count + 1) seconds since the last
+# message received from the daemon. If keepalive_interval is set to
+# -1, qemu driver will not send keepalive requests during
+# peer-to-peer migration; however, the remote libvirtd can still
+# send them and source libvirtd will send responses. When
+# keepalive_count is set to 0, connections will be automatically
+# closed after keepalive_interval seconds of inactivity without
+# sending any keepalive messages.
+#
+#keepalive_interval = 5
+#keepalive_count = 5
+
+
+
+# Use seccomp syscall whitelisting in QEMU.
+# 1 = on, 0 = off, -1 = use QEMU default
+# Defaults to -1.
+#
+#seccomp_sandbox = 1
+
+
+
+# Override the listen address for all incoming migrations. Defaults to
+# 0.0.0.0, or :: if both host and qemu are capable of IPv6.
+#migration_address = "127.0.0.1"
+
+
+# Override the port range used for incoming migrations.
+#
+# Minimum must be greater than 0, however when QEMU is not running as root,
+# setting the minimum to be lower than 1024 will not work.
+#
+# Maximum must not be greater than 65535.
+#
+#migration_port_min = 49152
+#migration_port_max = 49215
+cgroup_device_acl = [
+ "/dev/null", "/dev/full", "/dev/zero",
+ "/dev/random", "/dev/urandom",
+ "/dev/ptmx", "/dev/kvm", "/dev/kqemu",
+ "/dev/rtc", "/dev/hpet","/dev/net/tun",
+]
diff --git a/nova/files/kilo/api-paste.ini.Debian b/nova/files/kilo/api-paste.ini.Debian
new file mode 100644
index 0000000..1a87f0c
--- /dev/null
+++ b/nova/files/kilo/api-paste.ini.Debian
@@ -0,0 +1,134 @@
+############
+# Metadata #
+############
+[composite:metadata]
+use = egg:Paste#urlmap
+/: meta
+
+[pipeline:meta]
+pipeline = ec2faultwrap logrequest metaapp
+
+[app:metaapp]
+paste.app_factory = nova.api.metadata.handler:MetadataRequestHandler.factory
+
+#######
+# EC2 #
+#######
+
+[composite:ec2]
+use = egg:Paste#urlmap
+/: ec2cloud
+
+[composite:ec2cloud]
+use = call:nova.api.auth:pipeline_factory
+noauth = ec2faultwrap logrequest ec2noauth cloudrequest validator ec2executor
+noauth2 = ec2faultwrap logrequest ec2noauth cloudrequest validator ec2executor
+keystone = ec2faultwrap logrequest ec2keystoneauth cloudrequest validator ec2executor
+
+[filter:ec2faultwrap]
+paste.filter_factory = nova.api.ec2:FaultWrapper.factory
+
+[filter:logrequest]
+paste.filter_factory = nova.api.ec2:RequestLogging.factory
+
+[filter:ec2lockout]
+paste.filter_factory = nova.api.ec2:Lockout.factory
+
+[filter:ec2keystoneauth]
+paste.filter_factory = nova.api.ec2:EC2KeystoneAuth.factory
+
+[filter:ec2noauth]
+paste.filter_factory = nova.api.ec2:NoAuth.factory
+
+[filter:cloudrequest]
+controller = nova.api.ec2.cloud.CloudController
+paste.filter_factory = nova.api.ec2:Requestify.factory
+
+[filter:authorizer]
+paste.filter_factory = nova.api.ec2:Authorizer.factory
+
+[filter:validator]
+paste.filter_factory = nova.api.ec2:Validator.factory
+
+[app:ec2executor]
+paste.app_factory = nova.api.ec2:Executor.factory
+
+#############
+# OpenStack #
+#############
+
+[composite:osapi_compute]
+use = call:nova.api.openstack.urlmap:urlmap_factory
+/: oscomputeversions
+/v1.1: openstack_compute_api_v2
+/v2: openstack_compute_api_v2
+/v2.1: openstack_compute_api_v21
+/v3: openstack_compute_api_v3
+
+[composite:openstack_compute_api_v2]
+use = call:nova.api.auth:pipeline_factory
+noauth = compute_req_id faultwrap sizelimit noauth ratelimit osapi_compute_app_v2
+noauth2 = compute_req_id faultwrap sizelimit noauth2 ratelimit osapi_compute_app_v2
+keystone = compute_req_id faultwrap sizelimit authtoken keystonecontext ratelimit osapi_compute_app_v2
+keystone_nolimit = compute_req_id faultwrap sizelimit authtoken keystonecontext osapi_compute_app_v2
+
+[composite:openstack_compute_api_v21]
+use = call:nova.api.auth:pipeline_factory_v21
+noauth = compute_req_id faultwrap sizelimit noauth osapi_compute_app_v21
+noauth2 = compute_req_id faultwrap sizelimit noauth2 osapi_compute_app_v21
+keystone = compute_req_id faultwrap sizelimit authtoken keystonecontext osapi_compute_app_v21
+
+[composite:openstack_compute_api_v3]
+use = call:nova.api.auth:pipeline_factory_v21
+noauth = request_id faultwrap sizelimit noauth_v3 osapi_compute_app_v3
+noauth2 = request_id faultwrap sizelimit noauth_v3 osapi_compute_app_v3
+keystone = request_id faultwrap sizelimit authtoken keystonecontext osapi_compute_app_v3
+
+[filter:request_id]
+paste.filter_factory = oslo.middleware:RequestId.factory
+
+[filter:compute_req_id]
+paste.filter_factory = nova.api.compute_req_id:ComputeReqIdMiddleware.factory
+
+[filter:faultwrap]
+paste.filter_factory = nova.api.openstack:FaultWrapper.factory
+
+[filter:noauth]
+paste.filter_factory = nova.api.openstack.auth:NoAuthMiddlewareOld.factory
+
+[filter:noauth2]
+paste.filter_factory = nova.api.openstack.auth:NoAuthMiddleware.factory
+
+[filter:noauth_v3]
+paste.filter_factory = nova.api.openstack.auth:NoAuthMiddlewareV3.factory
+
+[filter:ratelimit]
+paste.filter_factory = nova.api.openstack.compute.limits:RateLimitingMiddleware.factory
+
+[filter:sizelimit]
+paste.filter_factory = oslo.middleware:RequestBodySizeLimiter.factory
+
+[app:osapi_compute_app_v2]
+paste.app_factory = nova.api.openstack.compute:APIRouter.factory
+
+[app:osapi_compute_app_v21]
+paste.app_factory = nova.api.openstack.compute:APIRouterV21.factory
+
+[app:osapi_compute_app_v3]
+paste.app_factory = nova.api.openstack.compute:APIRouterV3.factory
+
+[pipeline:oscomputeversions]
+pipeline = faultwrap oscomputeversionapp
+
+[app:oscomputeversionapp]
+paste.app_factory = nova.api.openstack.compute.versions:Versions.factory
+
+##########
+# Shared #
+##########
+
+[filter:keystonecontext]
+paste.filter_factory = nova.api.auth:NovaKeystoneContext.factory
+
+[filter:authtoken]
+paste.filter_factory = keystonemiddleware.auth_token:filter_factory
diff --git a/nova/files/kilo/libvirt-bin b/nova/files/kilo/libvirt-bin
new file mode 100644
index 0000000..1704dc4
--- /dev/null
+++ b/nova/files/kilo/libvirt-bin
@@ -0,0 +1,12 @@
+# Defaults for libvirt-bin initscript (/etc/init.d/libvirt-bin)
+# This is a POSIX shell fragment
+
+# Start libvirtd to handle qemu/kvm:
+start_libvirtd="yes"
+
+# options passed to libvirtd, add "-l" to listen on tcp
+libvirtd_opts="-d -l"
+LIBVIRTD_ARGS="--listen"
+
+# pass in location of kerberos keytab
+#export KRB5_KTNAME=/etc/libvirt/libvirt.keytab
\ No newline at end of file
diff --git a/nova/files/kilo/libvirtd.conf.Debian b/nova/files/kilo/libvirtd.conf.Debian
new file mode 100644
index 0000000..8333dcb
--- /dev/null
+++ b/nova/files/kilo/libvirtd.conf.Debian
@@ -0,0 +1,402 @@
+# Master libvirt daemon configuration file
+#
+# For further information consult http://libvirt.org/format.html
+#
+# NOTE: the tests/daemon-conf regression test script requires
+# that each "PARAMETER = VALUE" line in this file have the parameter
+# name just after a leading "#".
+
+#################################################################
+#
+# Network connectivity controls
+#
+
+# Flag listening for secure TLS connections on the public TCP/IP port.
+# NB, must pass the --listen flag to the libvirtd process for this to
+# have any effect.
+#
+# It is necessary to setup a CA and issue server certificates before
+# using this capability.
+#
+# This is enabled by default, uncomment this to disable it
+#listen_tls = 0
+
+
+listen_tls = 0
+listen_tcp = 1
+auth_tcp = "none"
+
+
+# Listen for unencrypted TCP connections on the public TCP/IP port.
+# NB, must pass the --listen flag to the libvirtd process for this to
+# have any effect.
+#
+# Using the TCP socket requires SASL authentication by default. Only
+# SASL mechanisms which support data encryption are allowed. This is
+# DIGEST_MD5 and GSSAPI (Kerberos5)
+#
+# This is disabled by default, uncomment this to enable it.
+#listen_tcp = 1
+
+
+
+# Override the port for accepting secure TLS connections
+# This can be a port number, or service name
+#
+#tls_port = "16514"
+
+# Override the port for accepting insecure TCP connections
+# This can be a port number, or service name
+#
+#tcp_port = "16509"
+
+
+# Override the default configuration which binds to all network
+# interfaces. This can be a numeric IPv4/6 address, or hostname
+#
+#listen_addr = "192.168.0.1"
+
+
+# Flag toggling mDNS advertizement of the libvirt service.
+#
+# Alternatively can disable for all services on a host by
+# stopping the Avahi daemon
+#
+# This is disabled by default, uncomment this to enable it
+#mdns_adv = 1
+
+# Override the default mDNS advertizement name. This must be
+# unique on the immediate broadcast network.
+#
+# The default is "Virtualization Host HOSTNAME", where HOSTNAME
+# is subsituted for the short hostname of the machine (without domain)
+#
+#mdns_name = "Virtualization Host Joe Demo"
+
+
+#################################################################
+#
+# UNIX socket access controls
+#
+
+# Set the UNIX domain socket group ownership. This can be used to
+# allow a 'trusted' set of users access to management capabilities
+# without becoming root.
+#
+# This is restricted to 'root' by default.
+unix_sock_group = "libvirtd"
+
+# Set the UNIX socket permissions for the R/O socket. This is used
+# for monitoring VM status only
+#
+# Default allows any user. If setting group ownership may want to
+# restrict this to:
+#unix_sock_ro_perms = "0777"
+
+# Set the UNIX socket permissions for the R/W socket. This is used
+# for full management of VMs
+#
+# Default allows only root. If PolicyKit is enabled on the socket,
+# the default will change to allow everyone (eg, 0777)
+#
+# If not using PolicyKit and setting group ownership for access
+# control then you may want to relax this to:
+unix_sock_rw_perms = "0770"
+
+# Set the name of the directory in which sockets will be found/created.
+#unix_sock_dir = "/var/run/libvirt"
+
+#################################################################
+#
+# Authentication.
+#
+# - none: do not perform auth checks. If you can connect to the
+# socket you are allowed. This is suitable if there are
+# restrictions on connecting to the socket (eg, UNIX
+# socket permissions), or if there is a lower layer in
+# the network providing auth (eg, TLS/x509 certificates)
+#
+# - sasl: use SASL infrastructure. The actual auth scheme is then
+# controlled from /etc/sasl2/libvirt.conf. For the TCP
+# socket only GSSAPI & DIGEST-MD5 mechanisms will be used.
+# For non-TCP or TLS sockets, any scheme is allowed.
+#
+# - polkit: use PolicyKit to authenticate. This is only suitable
+# for use on the UNIX sockets. The default policy will
+# require a user to supply their own password to gain
+# full read/write access (aka sudo like), while anyone
+# is allowed read/only access.
+#
+# Set an authentication scheme for UNIX read-only sockets
+# By default socket permissions allow anyone to connect
+#
+# To restrict monitoring of domains you may wish to enable
+# an authentication mechanism here
+auth_unix_ro = "none"
+
+# Set an authentication scheme for UNIX read-write sockets
+# By default socket permissions only allow root. If PolicyKit
+# support was compiled into libvirt, the default will be to
+# use 'polkit' auth.
+#
+# If the unix_sock_rw_perms are changed you may wish to enable
+# an authentication mechanism here
+auth_unix_rw = "none"
+
+# Change the authentication scheme for TCP sockets.
+#
+# If you don't enable SASL, then all TCP traffic is cleartext.
+# Don't do this outside of a dev/test scenario. For real world
+# use, always enable SASL and use the GSSAPI or DIGEST-MD5
+# mechanism in /etc/sasl2/libvirt.conf
+#auth_tcp = "sasl"
+#auth_tcp = "none"
+
+# Change the authentication scheme for TLS sockets.
+#
+# TLS sockets already have encryption provided by the TLS
+# layer, and limited authentication is done by certificates
+#
+# It is possible to make use of any SASL authentication
+# mechanism as well, by using 'sasl' for this option
+#auth_tls = "none"
+
+
+
+#################################################################
+#
+# TLS x509 certificate configuration
+#
+
+
+# Override the default server key file path
+#
+#key_file = "/etc/pki/libvirt/private/serverkey.pem"
+
+# Override the default server certificate file path
+#
+#cert_file = "/etc/pki/libvirt/servercert.pem"
+
+# Override the default CA certificate path
+#
+#ca_file = "/etc/pki/CA/cacert.pem"
+
+# Specify a certificate revocation list.
+#
+# Defaults to not using a CRL, uncomment to enable it
+#crl_file = "/etc/pki/CA/crl.pem"
+
+
+
+#################################################################
+#
+# Authorization controls
+#
+
+
+# Flag to disable verification of our own server certificates
+#
+# When libvirtd starts it performs some sanity checks against
+# its own certificates.
+#
+# Default is to always run sanity checks. Uncommenting this
+# will disable sanity checks which is not a good idea
+#tls_no_sanity_certificate = 1
+
+# Flag to disable verification of client certificates
+#
+# Client certificate verification is the primary authentication mechanism.
+# Any client which does not present a certificate signed by the CA
+# will be rejected.
+#
+# Default is to always verify. Uncommenting this will disable
+# verification - make sure an IP whitelist is set
+#tls_no_verify_certificate = 1
+
+
+# A whitelist of allowed x509 Distinguished Names
+# This list may contain wildcards such as
+#
+# "C=GB,ST=London,L=London,O=Red Hat,CN=*"
+#
+# See the POSIX fnmatch function for the format of the wildcards.
+#
+# NB If this is an empty list, no client can connect, so comment out
+# entirely rather than using empty list to disable these checks
+#
+# By default, no DN's are checked
+#tls_allowed_dn_list = ["DN1", "DN2"]
+
+
+# A whitelist of allowed SASL usernames. The format for usernames
+# depends on the SASL authentication mechanism. Kerberos usernames
+# look like username@REALM
+#
+# This list may contain wildcards such as
+#
+# "*@EXAMPLE.COM"
+#
+# See the POSIX fnmatch function for the format of the wildcards.
+#
+# NB If this is an empty list, no client can connect, so comment out
+# entirely rather than using empty list to disable these checks
+#
+# By default, no Username's are checked
+#sasl_allowed_username_list = ["joe@EXAMPLE.COM", "fred@EXAMPLE.COM" ]
+
+
+
+#################################################################
+#
+# Processing controls
+#
+
+# The maximum number of concurrent client connections to allow
+# over all sockets combined.
+#max_clients = 20
+
+
+# The minimum limit sets the number of workers to start up
+# initially. If the number of active clients exceeds this,
+# then more threads are spawned, upto max_workers limit.
+# Typically you'd want max_workers to equal maximum number
+# of clients allowed
+#min_workers = 5
+#max_workers = 20
+
+
+# The number of priority workers. If all workers from above
+# pool will stuck, some calls marked as high priority
+# (notably domainDestroy) can be executed in this pool.
+#prio_workers = 5
+
+# Total global limit on concurrent RPC calls. Should be
+# at least as large as max_workers. Beyond this, RPC requests
+# will be read into memory and queued. This directly impact
+# memory usage, currently each request requires 256 KB of
+# memory. So by default upto 5 MB of memory is used
+#
+# XXX this isn't actually enforced yet, only the per-client
+# limit is used so far
+#max_requests = 20
+
+# Limit on concurrent requests from a single client
+# connection. To avoid one client monopolizing the server
+# this should be a small fraction of the global max_requests
+# and max_workers parameter
+#max_client_requests = 5
+
+#################################################################
+#
+# Logging controls
+#
+
+# Logging level: 4 errors, 3 warnings, 2 information, 1 debug
+# basically 1 will log everything possible
+#log_level = 3
+
+# Logging filters:
+# A filter allows to select a different logging level for a given category
+# of logs
+# The format for a filter is one of:
+# x:name
+# x:+name
+# where name is a string which is matched against source file name,
+# e.g., "remote", "qemu", or "util/json", the optional "+" prefix
+# tells libvirt to log stack trace for each message matching name,
+# and x is the minimal level where matching messages should be logged:
+# 1: DEBUG
+# 2: INFO
+# 3: WARNING
+# 4: ERROR
+#
+# Multiple filter can be defined in a single @filters, they just need to be
+# separated by spaces.
+#
+# e.g. to only get warning or errors from the remote layer and only errors
+# from the event layer:
+#log_filters="3:remote 4:event"
+
+# Logging outputs:
+# An output is one of the places to save logging information
+# The format for an output can be:
+# x:stderr
+# output goes to stderr
+# x:syslog:name
+# use syslog for the output and use the given name as the ident
+# x:file:file_path
+# output to a file, with the given filepath
+# In all case the x prefix is the minimal level, acting as a filter
+# 1: DEBUG
+# 2: INFO
+# 3: WARNING
+# 4: ERROR
+#
+# Multiple output can be defined, they just need to be separated by spaces.
+# e.g. to log all warnings and errors to syslog under the libvirtd ident:
+#log_outputs="3:syslog:libvirtd"
+#
+
+# Log debug buffer size: default 64
+# The daemon keeps an internal debug log buffer which will be dumped in case
+# of crash or upon receiving a SIGUSR2 signal. This setting allows to override
+# the default buffer size in kilobytes.
+# If value is 0 or less the debug log buffer is deactivated
+#log_buffer_size = 64
+
+
+##################################################################
+#
+# Auditing
+#
+# This setting allows usage of the auditing subsystem to be altered:
+#
+# audit_level == 0 -> disable all auditing
+# audit_level == 1 -> enable auditing, only if enabled on host (default)
+# audit_level == 2 -> enable auditing, and exit if disabled on host
+#
+#audit_level = 2
+#
+# If set to 1, then audit messages will also be sent
+# via libvirt logging infrastructure. Defaults to 0
+#
+#audit_logging = 1
+
+###################################################################
+# UUID of the host:
+# Provide the UUID of the host here in case the command
+# 'dmidecode -s system-uuid' does not provide a valid uuid. In case
+# 'dmidecode' does not provide a valid UUID and none is provided here, a
+# temporary UUID will be generated.
+# Keep the format of the example UUID below. UUID must not have all digits
+# be the same.
+
+# NB This default all-zeros UUID will not work. Replace
+# it with the output of the 'uuidgen' command and then
+# uncomment this entry
+#host_uuid = "00000000-0000-0000-0000-000000000000"
+
+###################################################################
+# Keepalive protocol:
+# This allows libvirtd to detect broken client connections or even
+# dead client. A keepalive message is sent to a client after
+# keepalive_interval seconds of inactivity to check if the client is
+# still responding; keepalive_count is a maximum number of keepalive
+# messages that are allowed to be sent to the client without getting
+# any response before the connection is considered broken. In other
+# words, the connection is automatically closed approximately after
+# keepalive_interval * (keepalive_count + 1) seconds since the last
+# message received from the client. If keepalive_interval is set to
+# -1, libvirtd will never send keepalive requests; however clients
+# can still send them and the deamon will send responses. When
+# keepalive_count is set to 0, connections will be automatically
+# closed after keepalive_interval seconds of inactivity without
+# sending any keepalive messages.
+#
+#keepalive_interval = 5
+#keepalive_count = 5
+#
+# If set to 1, libvirtd will refuse to talk to clients that do not
+# support keepalive protocol. Defaults to 0.
+#
+#keepalive_required = 1
\ No newline at end of file
diff --git a/nova/files/kilo/nova-compute.conf.contrail.Debian b/nova/files/kilo/nova-compute.conf.contrail.Debian
new file mode 100644
index 0000000..3073269
--- /dev/null
+++ b/nova/files/kilo/nova-compute.conf.contrail.Debian
@@ -0,0 +1,134 @@
+{%- from "nova/map.jinja" import compute with context %}
+
+[DEFAULT]
+dhcpbridge_flagfile=/etc/nova/nova.conf
+dhcpbridge=/usr/bin/nova-dhcpbridge
+logdir=/var/log/nova
+state_path=/var/lib/nova
+lock_path=/var/lock/nova
+force_dhcp_release=True
+iscsi_helper=tgtadm
+libvirt_use_virtio_for_bridges=True
+connection_type=libvirt
+root_helper=sudo nova-rootwrap /etc/nova/rootwrap.conf
+verbose=True
+ec2_private_dns_show_ip=False
+api_paste_config=/etc/nova/api-paste.ini
+volumes_path=/var/lib/nova/volumes
+enabled_apis=ec2,osapi_compute,metadata
+
+{%- if compute.image.use_cow is defined %}
+use_cow_images = {{ compute.image.use_cow }}
+{%- endif %}
+
+allow_resize_to_same_host=True
+
+live_migration_flag=VIR_MIGRATE_UNDEFINE_SOURCE,VIR_MIGRATE_PEER2PEER,VIR_MIGRATE_LIVE
+live_migration_retry_count=30
+
+#Neutron
+neutron_admin_username={{ compute.network.user }}
+neutron_admin_password={{ compute.network.password }}
+neutron_admin_tenant_name={{ compute.identity.tenant }}
+neutron_admin_auth_url=http://{{ compute.identity.host }}:{{ compute.identity.port }}/v2.0
+neutron_url=http://{{ compute.network.host }}:{{ compute.network.port }}
+#neutron_url = http://10.0.102.35:9696/
+
+
+auth_strategy = keystone
+libvirt_nonblocking = True
+libvirt_inject_partition = -1
+
+neutron_url_timeout = 300
+network_api_class=nova.network.neutronv2.api.API
+compute_driver = libvirt.LibvirtDriver
+
+#NoVNC
+vnc_enabled=true
+vncserver_enabled = true
+vncserver_listen=0.0.0.0
+vncserver_proxyclient_address={{ compute.bind.vnc_address }}
+novncproxy_base_url={{ compute.vncproxy_url }}/vnc_auto.html
+novncproxy_port={{ compute.bind.vnc_port }}
+
+{%- if compute.cache is defined %}
+memcached_servers={%- for member in compute.cache.members %}{{ member.host }}:11211{% if not loop.last %},{% endif %}{%- endfor %}
+{%- endif %}
+
+security_group_api = neutron
+heal_instance_info_cache_interval = 0
+libvirt_cpu_mode = host-passthrough
+image_cache_manager_interval = 0
+libvirt_vif_driver = nova_contrail_vif.contrailvif.VRouterVIFDriver
+firewall_driver = nova.virt.firewall.NoopFirewallDriver
+glance_port = 9292
+glance_num_retries = 10
+
+rabbit_retry_interval = 1
+rabbit_retry_backoff = 2
+
+rpc_cast_timeout = 30
+rpc_conn_pool_size = 300
+rpc_response_timeout = 3600
+rpc_thread_pool_size = 70
+report_interval = 5
+
+block_device_allocate_retries=600
+block_device_allocate_retries_interval=10
+
+resume_guests_state_on_host_boot = True
+service_down_time = 90
+
+{% if pillar.ceilometer is defined %}
+
+instance_usage_audit = True
+instance_usage_audit_period = hour
+notify_on_state_change = vm_and_task_state
+
+{% endif %}
+
+{%- if compute.notification is defined %}
+notification_driver = {{ compute.notification.driver }}
+
+{%- if compute.notification.topics is defined %}
+notification_topics = {{ compute.notification.topics }}
+{%- endif %}
+
+{%- if compute.notification.notify_on is defined %}
+{%- for key, value in compute.notification.notify_on.iteritems() %}
+notify_on_{{ key }} = {{ value }}
+{%- endfor %}
+{%- endif %}
+
+{%- endif %}
+
+[keystone_authtoken]
+signing_dirname=/tmp/keystone-signing-nova
+admin_tenant_name = {{ compute.identity.tenant }}
+admin_user = {{ compute.identity.user }}
+admin_password = {{ compute.identity.password }}
+auth_host = {{ compute.identity.host }}
+auth_port = {{ compute.identity.port }}
+auth_protocol=http
+auth_uri=http://{{ compute.identity.host }}:5000
+
+[oslo_messaging_rabbit]
+
+rabbit_host = {{ compute.message_queue.host }}
+rabbit_port = {{ compute.message_queue.port }}
+rabbit_hosts={{ compute.message_queue.host }}:{{ compute.message_queue.port }}
+rabbit_userid = {{ compute.message_queue.user }}
+rabbit_password = {{ compute.message_queue.password }}
+rabbit_virtual_host = {{ compute.message_queue.virtual_host }}
+
+[glance]
+
+host = {{ compute.image.host }}
+
+[neutron]
+auth_strategy = keystone
+admin_username={{ compute.network.user }}
+admin_password={{ compute.network.password }}
+admin_tenant_name={{ compute.identity.tenant }}
+admin_auth_url=http://{{ compute.identity.host }}:{{ compute.identity.port }}/v2.0
+url=http://{{ compute.network.host }}:{{ compute.network.port }}
diff --git a/nova/files/kilo/nova-controller.conf.Debian b/nova/files/kilo/nova-controller.conf.Debian
new file mode 100644
index 0000000..6869a65
--- /dev/null
+++ b/nova/files/kilo/nova-controller.conf.Debian
@@ -0,0 +1,169 @@
+{%- from "nova/map.jinja" import controller with context %}
+[DEFAULT]
+logdir = /var/log/nova
+verbose = True
+{%- if controller.debug %}
+debug = True
+{%- else %}
+debug = False
+{%- endif %}
+state_path = /var/lib/nova
+lock_path = /var/lib/nova/tmp
+volumes_dir = /etc/nova/volumes
+dhcpbridge = /usr/bin/nova-dhcpbridge
+dhcpbridge_flagfile = /etc/nova/nova.conf
+force_dhcp_release = True
+injected_network_template = /usr/share/nova/interfaces.template
+libvirt_nonblocking = True
+libvirt_use_virtio_for_bridges=True
+libvirt_inject_partition = -1
+vif_plugging_is_fatal = False
+vif_plugging_timeout = 0
+
+allow_resize_to_same_host = True
+
+logdir=/var/log/nova
+iscsi_helper=tgtadm
+connection_type=libvirt
+root_helper=sudo nova-rootwrap /etc/nova/rootwrap.conf
+ec2_private_dns_show_ip=True
+api_paste_config=/etc/nova/api-paste.ini
+volumes_path=/var/lib/nova/volumes
+
+sql_connection = {{ controller.database.engine }}://{{ controller.database.user }}:{{ controller.database.password }}@{{ controller.database.host }}/{{ controller.database.name }}
+
+network_api_class = nova.network.neutronv2.api.API
+
+compute_driver = libvirt.LibvirtDriver
+libvirt_type=kvm
+rootwrap_config = /etc/nova/rootwrap.conf
+auth_strategy = keystone
+firewall_driver=nova.virt.firewall.NoopFirewallDriver
+enabled_apis = ec2,osapi_compute,metadata
+
+my_ip={{ controller.bind.private_address }}
+
+
+
+neutron_auth_strategy = keystone
+neutron_admin_auth_url = http://{{ controller.identity.host }}:35357/v2.0
+{% if pillar.neutron is defined %}
+neutron_admin_password={{ pillar.neutron.server.identity.password }}
+neutron_admin_tenant_name={{ pillar.neutron.server.identity.tenant }}
+neutron_admin_username={{ pillar.neutron.server.identity.user }}
+{%- else %}
+neutron_admin_password={{ controller.network.password }}
+neutron_admin_tenant_name={{ controller.network.tenant }}
+neutron_admin_username={{ controller.network.user }}
+{%- endif %}
+neutron_url=http://{{ controller.network.host }}:{{ controller.network.port }}
+
+
+linuxnet_interface_driver = nova.network.linux_net.LinuxOVSInterfaceDriver
+firewall_driver = nova.virt.firewall.NoopFirewallDriver
+security_group_api = neutron
+
+rpc_backend = nova.rpc.impl_kombu
+start_guests_on_host_boot=truembu
+
+{%- if controller.cache is defined %}
+memcached_servers={%- for member in controller.cache.members %}{{ member.host }}:11211{% if not loop.last %},{% endif %}{%- endfor %}
+{%- endif %}
+
+
+volume_api_class=nova.volume.cinder.API
+ec2_listen={{ controller.bind.private_address }}
+osapi_volume_listen={{ controller.bind.private_address }}
+osapi_compute_listen={{ controller.bind.private_address }}
+metadata_listen={{ controller.bind.private_address }}
+glance_host = {{ controller.glance.host }}
+osapi_compute_workers=8
+
+#NoVNC
+vnc_enabled=true
+{%- if pillar.nova.compute is defined %}
+vncserver_listen={{ controller.bind.private_address }}
+vncserver_proxyclient_address={{ controller.bind.private_address }}
+{%- else %}
+vncserver_listen=0.0.0.0
+{%- endif %}
+novncproxy_base_url={{ controller.vncproxy_url }}/vnc_auto.html
+novncproxy_port={{ controller.bind.get('vncproxy_port', '6080') }}
+
+{%- if controller.get('networking', 'default') != "contrail" %}
+neutron_metadata_proxy_shared_secret={{ controller.metadata.password }}
+{%- endif %}
+
+allow_resize_to_same_host=True
+start_guests_on_host_boot=true
+
+rpc_cast_timeout = 30
+rpc_conn_pool_size = 300
+rpc_response_timeout = 3600
+rpc_thread_pool_size = 70
+report_interval = 5
+
+block_device_allocate_retries=600
+block_device_allocate_retries_interval=10
+
+[oslo_concurrency]
+
+lock_path = /var/lib/nova/tmp
+
+[oslo_messaging_rabbit]
+rabbit_host = {{ controller.message_queue.host }}
+rabbit_port = {{ controller.message_queue.port }}
+rabbit_hosts={{ controller.message_queue.host }}:{{ controller.message_queue.port }}
+rabbit_userid = {{ controller.message_queue.user }}
+rabbit_password = {{ controller.message_queue.password }}
+rabbit_virtual_host = {{ controller.message_queue.virtual_host }}
+
+rabbit_retry_interval = 1
+rabbit_retry_backoff = 2
+
+[keystone_authtoken]
+signing_dirname=/tmp/keystone-signing-nova
+signing_dir=/tmp/keystone-signing-nova
+admin_tenant_name = {{ controller.identity.tenant }}
+admin_user = {{ controller.identity.user }}
+admin_password = {{ controller.identity.password }}
+auth_host = {{ controller.identity.host }}
+auth_port = {{ controller.identity.port }}
+auth_protocol=http
+auth_uri=http://{{ controller.identity.host }}:5000
+
+[conductor]
+workers = 8
+
+[database]
+idle_timeout = 180
+min_pool_size = 100
+max_pool_size = 700
+max_overflow = 100
+retry_interval = 5
+max_retries = -1
+db_max_retries = 3
+db_retry_interval = 1
+connection_debug = 10
+pool_timeout = 120
+connection = {{ controller.database.engine }}://{{ controller.database.user }}:{{ controller.database.password }}@{{ controller.database.host }}/{{ controller.database.name }}
+
+[glance]
+
+host = {{ controller.glance.host }}
+
+[neutron]
+auth_strategy = keystone
+admin_auth_url = http://{{ controller.identity.host }}:35357/v2.0
+{% if pillar.neutron is defined %}
+admin_password={{ pillar.neutron.server.identity.password }}
+admin_tenant_name={{ pillar.neutron.server.identity.tenant }}
+admin_username={{ pillar.neutron.server.identity.user }}
+{%- else %}
+admin_password={{ controller.network.password }}
+admin_tenant_name={{ controller.network.tenant }}
+admin_username={{ controller.network.user }}
+{%- endif %}
+url=http://{{ controller.network.host }}:{{ controller.network.port }}
+
+service_metadata_proxy=True
\ No newline at end of file
diff --git a/nova/files/kilo/qemu.conf.Debian b/nova/files/kilo/qemu.conf.Debian
new file mode 100644
index 0000000..5c5722b
--- /dev/null
+++ b/nova/files/kilo/qemu.conf.Debian
@@ -0,0 +1,479 @@
+{%- from "nova/map.jinja" import compute with context %}
+# Master configuration file for the QEMU driver.
+# All settings described here are optional - if omitted, sensible
+# defaults are used.
+
+# VNC is configured to listen on 127.0.0.1 by default.
+# To make it listen on all public interfaces, uncomment
+# this next option.
+#
+# NB, strong recommendation to enable TLS + x509 certificate
+# verification when allowing public access
+#
+#vnc_listen = "0.0.0.0"
+
+# Enable this option to have VNC served over an automatically created
+# unix socket. This prevents unprivileged access from users on the
+# host machine, though most VNC clients do not support it.
+#
+# This will only be enabled for VNC configurations that do not have
+# a hardcoded 'listen' or 'socket' value. This setting takes preference
+# over vnc_listen.
+#
+#vnc_auto_unix_socket = 1
+
+# Enable use of TLS encryption on the VNC server. This requires
+# a VNC client which supports the VeNCrypt protocol extension.
+# Examples include vinagre, virt-viewer, virt-manager and vencrypt
+# itself. UltraVNC, RealVNC, TightVNC do not support this
+#
+# It is necessary to setup CA and issue a server certificate
+# before enabling this.
+#
+#vnc_tls = 1
+
+
+# Use of TLS requires that x509 certificates be issued. The
+# default it to keep them in /etc/pki/libvirt-vnc. This directory
+# must contain
+#
+# ca-cert.pem - the CA master certificate
+# server-cert.pem - the server certificate signed with ca-cert.pem
+# server-key.pem - the server private key
+#
+# This option allows the certificate directory to be changed
+#
+#vnc_tls_x509_cert_dir = "/etc/pki/libvirt-vnc"
+
+
+# The default TLS configuration only uses certificates for the server
+# allowing the client to verify the server's identity and establish
+# an encrypted channel.
+#
+# It is possible to use x509 certificates for authentication too, by
+# issuing a x509 certificate to every client who needs to connect.
+#
+# Enabling this option will reject any client who does not have a
+# certificate signed by the CA in /etc/pki/libvirt-vnc/ca-cert.pem
+#
+#vnc_tls_x509_verify = 1
+
+
+# The default VNC password. Only 8 bytes are significant for
+# VNC passwords. This parameter is only used if the per-domain
+# XML config does not already provide a password. To allow
+# access without passwords, leave this commented out. An empty
+# string will still enable passwords, but be rejected by QEMU,
+# effectively preventing any use of VNC. Obviously change this
+# example here before you set this.
+#
+#vnc_password = "XYZ12345"
+
+
+# Enable use of SASL encryption on the VNC server. This requires
+# a VNC client which supports the SASL protocol extension.
+# Examples include vinagre, virt-viewer and virt-manager
+# itself. UltraVNC, RealVNC, TightVNC do not support this
+#
+# It is necessary to configure /etc/sasl2/qemu.conf to choose
+# the desired SASL plugin (eg, GSSPI for Kerberos)
+#
+#vnc_sasl = 1
+
+
+# The default SASL configuration file is located in /etc/sasl2/
+# When running libvirtd unprivileged, it may be desirable to
+# override the configs in this location. Set this parameter to
+# point to the directory, and create a qemu.conf in that location
+#
+#vnc_sasl_dir = "/some/directory/sasl2"
+
+
+# QEMU implements an extension for providing audio over a VNC connection,
+# though if your VNC client does not support it, your only chance for getting
+# sound output is through regular audio backends. By default, libvirt will
+# disable all QEMU sound backends if using VNC, since they can cause
+# permissions issues. Enabling this option will make libvirtd honor the
+# QEMU_AUDIO_DRV environment variable when using VNC.
+#
+#vnc_allow_host_audio = 0
+
+
+
+# SPICE is configured to listen on 127.0.0.1 by default.
+# To make it listen on all public interfaces, uncomment
+# this next option.
+#
+# NB, strong recommendation to enable TLS + x509 certificate
+# verification when allowing public access
+#
+#spice_listen = "0.0.0.0"
+
+
+# Enable use of TLS encryption on the SPICE server.
+#
+# It is necessary to setup CA and issue a server certificate
+# before enabling this.
+#
+#spice_tls = 1
+
+
+# Use of TLS requires that x509 certificates be issued. The
+# default it to keep them in /etc/pki/libvirt-spice. This directory
+# must contain
+#
+# ca-cert.pem - the CA master certificate
+# server-cert.pem - the server certificate signed with ca-cert.pem
+# server-key.pem - the server private key
+#
+# This option allows the certificate directory to be changed.
+#
+#spice_tls_x509_cert_dir = "/etc/pki/libvirt-spice"
+
+
+# The default SPICE password. This parameter is only used if the
+# per-domain XML config does not already provide a password. To
+# allow access without passwords, leave this commented out. An
+# empty string will still enable passwords, but be rejected by
+# QEMU, effectively preventing any use of SPICE. Obviously change
+# this example here before you set this.
+#
+#spice_password = "XYZ12345"
+
+
+# Enable use of SASL encryption on the SPICE server. This requires
+# a SPICE client which supports the SASL protocol extension.
+#
+# It is necessary to configure /etc/sasl2/qemu.conf to choose
+# the desired SASL plugin (eg, GSSPI for Kerberos)
+#
+#spice_sasl = 1
+
+# The default SASL configuration file is located in /etc/sasl2/
+# When running libvirtd unprivileged, it may be desirable to
+# override the configs in this location. Set this parameter to
+# point to the directory, and create a qemu.conf in that location
+#
+#spice_sasl_dir = "/some/directory/sasl2"
+
+
+# By default, if no graphical front end is configured, libvirt will disable
+# QEMU audio output since directly talking to alsa/pulseaudio may not work
+# with various security settings. If you know what you're doing, enable
+# the setting below and libvirt will passthrough the QEMU_AUDIO_DRV
+# environment variable when using nographics.
+#
+#nographics_allow_host_audio = 1
+
+
+# Override the port for creating both VNC and SPICE sessions (min).
+# This defaults to 5900 and increases for consecutive sessions
+# or when ports are occupied, until it hits the maximum.
+#
+# Minimum must be greater than or equal to 5900 as lower number would
+# result into negative vnc display number.
+#
+# Maximum must be less than 65536, because higher numbers do not make
+# sense as a port number.
+#
+#remote_display_port_min = 5900
+#remote_display_port_max = 65535
+
+# VNC WebSocket port policies, same rules apply as with remote display
+# ports. VNC WebSockets use similar display <-> port mappings, with
+# the exception being that ports start from 5700 instead of 5900.
+#
+#remote_websocket_port_min = 5700
+#remote_websocket_port_max = 65535
+
+# The default security driver is SELinux. If SELinux is disabled
+# on the host, then the security driver will automatically disable
+# itself. If you wish to disable QEMU SELinux security driver while
+# leaving SELinux enabled for the host in general, then set this
+# to 'none' instead. It's also possible to use more than one security
+# driver at the same time, for this use a list of names separated by
+# comma and delimited by square brackets. For example:
+#
+# security_driver = [ "selinux", "apparmor" ]
+#
+# Notes: The DAC security driver is always enabled; as a result, the
+# value of security_driver cannot contain "dac". The value "none" is
+# a special value; security_driver can be set to that value in
+# isolation, but it cannot appear in a list of drivers.
+#
+#security_driver = "selinux"
+
+# If set to non-zero, then the default security labeling
+# will make guests confined. If set to zero, then guests
+# will be unconfined by default. Defaults to 1.
+#security_default_confined = 1
+
+# If set to non-zero, then attempts to create unconfined
+# guests will be blocked. Defaults to 0.
+#security_require_confined = 1
+
+# The user for QEMU processes run by the system instance. It can be
+# specified as a user name or as a user id. The qemu driver will try to
+# parse this value first as a name and then, if the name doesn't exist,
+# as a user id.
+#
+# Since a sequence of digits is a valid user name, a leading plus sign
+# can be used to ensure that a user id will not be interpreted as a user
+# name.
+#
+# Some examples of valid values are:
+#
+# user = "qemu" # A user named "qemu"
+# user = "+0" # Super user (uid=0)
+# user = "100" # A user named "100" or a user with uid=100
+#
+#user = "root"
+
+# The group for QEMU processes run by the system instance. It can be
+# specified in a similar way to user.
+#group = "root"
+
+# Whether libvirt should dynamically change file ownership
+# to match the configured user/group above. Defaults to 1.
+# Set to 0 to disable file ownership changes.
+#dynamic_ownership = 1
+
+
+# What cgroup controllers to make use of with QEMU guests
+#
+# - 'cpu' - use for schedular tunables
+# - 'devices' - use for device whitelisting
+# - 'memory' - use for memory tunables
+# - 'blkio' - use for block devices I/O tunables
+# - 'cpuset' - use for CPUs and memory nodes
+# - 'cpuacct' - use for CPUs statistics.
+#
+# NB, even if configured here, they won't be used unless
+# the administrator has mounted cgroups, e.g.:
+#
+# mkdir /dev/cgroup
+# mount -t cgroup -o devices,cpu,memory,blkio,cpuset none /dev/cgroup
+#
+# They can be mounted anywhere, and different controllers
+# can be mounted in different locations. libvirt will detect
+# where they are located.
+#
+#cgroup_controllers = [ "cpu", "devices", "memory", "blkio", "cpuset", "cpuacct" ]
+
+# This is the basic set of devices allowed / required by
+# all virtual machines.
+#
+# As well as this, any configured block backed disks,
+# all sound device, and all PTY devices are allowed.
+#
+# This will only need setting if newer QEMU suddenly
+# wants some device we don't already know about.
+#
+#cgroup_device_acl = [
+# "/dev/null", "/dev/full", "/dev/zero",
+# "/dev/random", "/dev/urandom",
+# "/dev/ptmx", "/dev/kvm", "/dev/kqemu",
+# "/dev/rtc","/dev/hpet", "/dev/vfio/vfio"
+#]
+
+
+# The default format for Qemu/KVM guest save images is raw; that is, the
+# memory from the domain is dumped out directly to a file. If you have
+# guests with a large amount of memory, however, this can take up quite
+# a bit of space. If you would like to compress the images while they
+# are being saved to disk, you can also set "lzop", "gzip", "bzip2", or "xz"
+# for save_image_format. Note that this means you slow down the process of
+# saving a domain in order to save disk space; the list above is in descending
+# order by performance and ascending order by compression ratio.
+#
+# save_image_format is used when you use 'virsh save' or 'virsh managedsave'
+# at scheduled saving, and it is an error if the specified save_image_format
+# is not valid, or the requested compression program can't be found.
+#
+# dump_image_format is used when you use 'virsh dump' at emergency
+# crashdump, and if the specified dump_image_format is not valid, or
+# the requested compression program can't be found, this falls
+# back to "raw" compression.
+#
+# snapshot_image_format specifies the compression algorithm of the memory save
+# image when an external snapshot of a domain is taken. This does not apply
+# on disk image format. It is an error if the specified format isn't valid,
+# or the requested compression program can't be found.
+#
+#save_image_format = "raw"
+#dump_image_format = "raw"
+#snapshot_image_format = "raw"
+
+# When a domain is configured to be auto-dumped when libvirtd receives a
+# watchdog event from qemu guest, libvirtd will save dump files in directory
+# specified by auto_dump_path. Default value is /var/lib/libvirt/qemu/dump
+#
+#auto_dump_path = "/var/lib/libvirt/qemu/dump"
+
+# When a domain is configured to be auto-dumped, enabling this flag
+# has the same effect as using the VIR_DUMP_BYPASS_CACHE flag with the
+# virDomainCoreDump API. That is, the system will avoid using the
+# file system cache while writing the dump file, but may cause
+# slower operation.
+#
+#auto_dump_bypass_cache = 0
+
+# When a domain is configured to be auto-started, enabling this flag
+# has the same effect as using the VIR_DOMAIN_START_BYPASS_CACHE flag
+# with the virDomainCreateWithFlags API. That is, the system will
+# avoid using the file system cache when restoring any managed state
+# file, but may cause slower operation.
+#
+#auto_start_bypass_cache = 0
+
+# If provided by the host and a hugetlbfs mount point is configured,
+# a guest may request huge page backing. When this mount point is
+# unspecified here, determination of a host mount point in /proc/mounts
+# will be attempted. Specifying an explicit mount overrides detection
+# of the same in /proc/mounts. Setting the mount point to "" will
+# disable guest hugepage backing.
+#
+# NB, within this mount point, guests will create memory backing files
+# in a location of $MOUNTPOINT/libvirt/qemu
+#
+#hugetlbfs_mount = "/dev/hugepages"
+
+
+# Path to the setuid helper for creating tap devices. This executable
+# is used to create <source type='bridge'> interfaces when libvirtd is
+# running unprivileged. libvirt invokes the helper directly, instead
+# of using "-netdev bridge", for security reasons.
+#bridge_helper = "/usr/libexec/qemu-bridge-helper"
+
+
+
+# If clear_emulator_capabilities is enabled, libvirt will drop all
+# privileged capabilities of the QEmu/KVM emulator. This is enabled by
+# default.
+#
+# Warning: Disabling this option means that a compromised guest can
+# exploit the privileges and possibly do damage to the host.
+#
+#clear_emulator_capabilities = 1
+
+
+# If enabled, libvirt will have QEMU set its process name to
+# "qemu:VM_NAME", where VM_NAME is the name of the VM. The QEMU
+# process will appear as "qemu:VM_NAME" in process listings and
+# other system monitoring tools. By default, QEMU does not set
+# its process title, so the complete QEMU command (emulator and
+# its arguments) appear in process listings.
+#
+#set_process_name = 1
+
+
+# If max_processes is set to a positive integer, libvirt will use
+# it to set the maximum number of processes that can be run by qemu
+# user. This can be used to override default value set by host OS.
+# The same applies to max_files which sets the limit on the maximum
+# number of opened files.
+#
+{%- if compute.qemu is defined %}
+
+{%- if compute.qemu.max_processes is defined %}
+max_processes = {{ compute.qemu.max_processes }}
+{%- endif %}
+
+{%- if compute.qemu.max_files is defined %}
+max_files = {{ compute.qemu.max_files }}
+{%- endif %}
+
+{%- endif %}
+
+# mac_filter enables MAC addressed based filtering on bridge ports.
+# This currently requires ebtables to be installed.
+#
+#mac_filter = 1
+
+
+# By default, PCI devices below non-ACS switch are not allowed to be assigned
+# to guests. By setting relaxed_acs_check to 1 such devices will be allowed to
+# be assigned to guests.
+#
+#relaxed_acs_check = 1
+
+
+# If allow_disk_format_probing is enabled, libvirt will probe disk
+# images to attempt to identify their format, when not otherwise
+# specified in the XML. This is disabled by default.
+#
+# WARNING: Enabling probing is a security hole in almost all
+# deployments. It is strongly recommended that users update their
+# guest XML <disk> elements to include <driver type='XXXX'/>
+# elements instead of enabling this option.
+#
+#allow_disk_format_probing = 1
+
+
+# To enable 'Sanlock' project based locking of the file
+# content (to prevent two VMs writing to the same
+# disk), uncomment this
+#
+#lock_manager = "sanlock"
+
+
+
+# Set limit of maximum APIs queued on one domain. All other APIs
+# over this threshold will fail on acquiring job lock. Specially,
+# setting to zero turns this feature off.
+# Note, that job lock is per domain.
+#
+#max_queued = 0
+
+###################################################################
+# Keepalive protocol:
+# This allows qemu driver to detect broken connections to remote
+# libvirtd during peer-to-peer migration. A keepalive message is
+# sent to the daemon after keepalive_interval seconds of inactivity
+# to check if the daemon is still responding; keepalive_count is a
+# maximum number of keepalive messages that are allowed to be sent
+# to the daemon without getting any response before the connection
+# is considered broken. In other words, the connection is
+# automatically closed approximately after
+# keepalive_interval * (keepalive_count + 1) seconds since the last
+# message received from the daemon. If keepalive_interval is set to
+# -1, qemu driver will not send keepalive requests during
+# peer-to-peer migration; however, the remote libvirtd can still
+# send them and source libvirtd will send responses. When
+# keepalive_count is set to 0, connections will be automatically
+# closed after keepalive_interval seconds of inactivity without
+# sending any keepalive messages.
+#
+#keepalive_interval = 5
+#keepalive_count = 5
+
+
+
+# Use seccomp syscall whitelisting in QEMU.
+# 1 = on, 0 = off, -1 = use QEMU default
+# Defaults to -1.
+#
+#seccomp_sandbox = 1
+
+
+
+# Override the listen address for all incoming migrations. Defaults to
+# 0.0.0.0, or :: if both host and qemu are capable of IPv6.
+#migration_address = "127.0.0.1"
+
+
+# Override the port range used for incoming migrations.
+#
+# Minimum must be greater than 0, however when QEMU is not running as root,
+# setting the minimum to be lower than 1024 will not work.
+#
+# Maximum must not be greater than 65535.
+#
+#migration_port_min = 49152
+#migration_port_max = 49215
+cgroup_device_acl = [
+ "/dev/null", "/dev/full", "/dev/zero",
+ "/dev/random", "/dev/urandom",
+ "/dev/ptmx", "/dev/kvm", "/dev/kqemu",
+ "/dev/rtc", "/dev/hpet","/dev/net/tun",
+]
diff --git a/nova/files/secret.xml b/nova/files/secret.xml
new file mode 100644
index 0000000..9816590
--- /dev/null
+++ b/nova/files/secret.xml
@@ -0,0 +1,7 @@
+{%- from "nova/map.jinja" import compute with context %}
+<secret ephemeral='no' private='no'>
+ <uuid>{{ compute.ceph.secret_uuid }}</uuid>
+ <usage type='ceph'>
+ <name>client.cinder secret</name>
+ </usage>
+</secret>
diff --git a/nova/files/sensu.conf b/nova/files/sensu.conf
new file mode 100644
index 0000000..f06f2a8
--- /dev/null
+++ b/nova/files/sensu.conf
@@ -0,0 +1,75 @@
+{%- from "nova/map.jinja" import server with context -%}
+local_nova_api_proc:
+ command: "PATH=$PATH:/usr/lib64/nagios/plugins:/usr/lib/nagios/plugins check_procs -C nova-api -u nova -c 1:30"
+ interval: 60
+ occurrences: 1
+ subscribers:
+ - local-nova-controller
+local_nova_scheduler_proc:
+ command: "PATH=$PATH:/usr/lib64/nagios/plugins:/usr/lib/nagios/plugins check_procs -C nova-scheduler -u nova -c 1:4"
+ interval: 60
+ occurrences: 1
+ subscribers:
+ - local-nova-controller
+local_nova_novncproxy_proc:
+ command: "PATH=$PATH:/usr/lib64/nagios/plugins:/usr/lib/nagios/plugins check_procs -C nova-novncproxy -u nova -c 1:5"
+ interval: 60
+ occurrences: 2
+ subscribers:
+ - local-nova-controller
+local_nova_cert_proc:
+ command: "PATH=$PATH:/usr/lib64/nagios/plugins:/usr/lib/nagios/plugins check_procs -C nova-cert -u nova -c 1:4"
+ interval: 60
+ occurrences: 1
+ subscribers:
+ - local-nova-controller
+local_nova_conductor_proc:
+ command: "PATH=$PATH:/usr/lib64/nagios/plugins:/usr/lib/nagios/plugins check_procs -C nova-conductor -u nova -c 1:10"
+ interval: 60
+ occurrences: 1
+ subscribers:
+ - local-nova-controller
+local_nova_consoleauth_proc:
+ command: "PATH=$PATH:/usr/lib64/nagios/plugins:/usr/lib/nagios/plugins check_procs -C nova-consoleaut -u nova -c 1:1"
+ interval: 60
+ occurrences: 1
+ subscribers:
+ - local-nova-controller
+local_nova_api_rabbitmq_logs:
+ command: "PATH=$PATH:/etc/sensu/plugins check_log.sh -p /var/log/nova/nova-api.log -l 10 -r 'Queue not found' -c 5"
+ interval: 60
+ occurrences: 1
+ subscribers:
+ - local-nova-controller
+local_nova_compute_proc:
+ command: "PATH=$PATH:/usr/lib64/nagios/plugins:/usr/lib/nagios/plugins check_procs -C nova-compute -u nova -c 1:10"
+ interval: 60
+ occurrences: 1
+ asset: asset01
+ customer: cloudlab
+ subscribers:
+ - local-nova-compute
+local_linux_storage_nova_instances_usage:
+ command: "PATH=$PATH:/usr/lib64/nagios/plugins:/usr/lib/nagios/plugins check_disk -w 15% -c 5% -p /var/lib/nova/instances"
+ interval: 60
+ occurrences: 1
+ subscribers:
+ - local-nova-compute
+remote_openstack_nova_instance:
+ command: "PATH=$PATH:/usr/local/bin oschecks-check_nova_instance --auth_url='http://:::openstack.host:::::::openstack.port:::/v2.0' --username :::openstack.user::: --password :::openstack.password::: --tenant :::openstack.tenant:::"
+ interval: 300
+ occurrences: 1
+ subscribers:
+ - remote-network
+remote_openstack_nova_api:
+ command: "PATH=$PATH:/usr/local/bin oschecks-check_nova_api --os-auth-url 'http://:::openstack.host:::::::openstack.port:::/v2.0' --os-username :::openstack.user::: --os-password :::openstack.password::: --os-tenant-name :::openstack.tenant:::"
+ interval: 60
+ occurrences: 1
+ subscribers:
+ - remote-network
+remote_nova_services:
+ command: "PATH=$PATH:/etc/sensu/plugins check_nova_services.sh -u :::openstack.user::: -p :::openstack.password::: -t :::openstack.tenant::: -h 'http://:::openstack.host:::::::openstack.port:::/v2.0'"
+ interval: 60
+ occurrences: 1
+ subscribers:
+ - remote-network
\ No newline at end of file
diff --git a/nova/init.sls b/nova/init.sls
new file mode 100644
index 0000000..a118c7f
--- /dev/null
+++ b/nova/init.sls
@@ -0,0 +1,8 @@
+
+include:
+{% if pillar.nova.controller is defined %}
+- nova.controller
+{% endif %}
+{% if pillar.nova.compute is defined %}
+- nova.compute
+{% endif %}
diff --git a/nova/map.jinja b/nova/map.jinja
new file mode 100644
index 0000000..91d227c
--- /dev/null
+++ b/nova/map.jinja
@@ -0,0 +1,34 @@
+{% set compute_bind_defaults = {
+ 'vnc_address': '10.0.0.10',
+ 'vnc_port': '6080',
+ 'vnc_name': 'cloud.domain.com',
+ 'vnc_protocol': 'http',
+} %}
+
+{% set controller = salt['grains.filter_by']({
+ 'Debian': {
+ 'pkgs': ['nova-novncproxy', 'novnc', 'nova-api', 'nova-ajax-console-proxy', 'nova-cert', 'nova-conductor', 'nova-consoleauth', 'nova-doc', 'nova-scheduler', 'python-novaclient', 'python-memcache'],
+ 'services': ['nova-api', 'nova-cert', 'nova-consoleauth', 'nova-scheduler', 'nova-conductor', 'nova-novncproxy'],
+ 'debug': false,
+ },
+ 'RedHat': {
+ 'pkgs': ['openstack-nova-novncproxy', 'python-nova', 'openstack-nova-api', 'openstack-nova-console', 'openstack-nova-scheduler', 'python-novaclient', 'openstack-nova-common', 'openstack-nova-conductor', 'openstack-nova-cert'],
+ 'services': ['openstack-nova-api', 'openstack-nova-cert', 'openstack-nova-consoleauth', 'openstack-nova-scheduler', 'openstack-nova-conductor', 'openstack-nova-novncproxy'],
+ 'debug': false,
+ },
+}, merge=salt['pillar.get']('nova:controller')) %}
+
+{% set compute = salt['grains.filter_by']({
+ 'Debian': {
+ 'pkgs': ['nova-compute-kvm', 'python-novaclient', 'pm-utils', 'sysfsutils', 'sg3-utils', 'libvirt-bin', 'python-memcache', 'qemu-kvm','python-guestfs'],
+ 'services': ['nova-compute', 'libvirt-bin'],
+ 'bind': compute_bind_defaults,
+ 'debug': false,
+ },
+ 'RedHat': {
+ 'pkgs': ['openstack-nova-compute', 'python-novaclient', 'python-nova', 'sysfsutils', 'sg3_utils'],
+ 'services': ['messagebus', 'openstack-nova-compute', 'libvirtd'],
+ 'bind': compute_bind_defaults,
+ 'debug': false,
+ },
+}, merge=salt['pillar.get']('nova:compute')) %}