Adding temlates and tiny change for Queens release

Change-Id: Ie51d0addd389b3ce897413b2c2b3f8a7e43c2e8e
diff --git a/ceilometer/files/queens/ceilometer-agent.conf.Debian b/ceilometer/files/queens/ceilometer-agent.conf.Debian
new file mode 100644
index 0000000..7a10276
--- /dev/null
+++ b/ceilometer/files/queens/ceilometer-agent.conf.Debian
@@ -0,0 +1,481 @@
+{%- from "ceilometer/map.jinja" import agent with context -%}
+
+[DEFAULT]
+
+{%- set _data = agent.message_queue %}
+{%- include "oslo_templates/files/queens/oslo/messaging/_default.conf" %}
+
+{%- set _data = agent.logging %}
+{%- include "oslo_templates/files/queens/oslo/_log.conf" %}
+
+#
+# From ceilometer
+#
+
+# To reduce polling agent load, samples are sent to the notification agent in a
+# batch. To gain higher throughput at the cost of load set this to False.
+# (boolean value)
+#batch_polled_samples = true
+{%- if agent.batch_polled_samples is defined %}
+batch_polled_samples = {{ agent.batch_polled_samples|lower }}
+{%- endif %}
+
+# Inspector to use for inspecting the hypervisor layer. Known inspectors are
+# libvirt, hyperv, vsphere and xenapi. (string value)
+#hypervisor_inspector = libvirt
+
+# Libvirt domain type. (string value)
+# Possible values:
+# kvm - <No description provided>
+# lxc - <No description provided>
+# qemu - <No description provided>
+# uml - <No description provided>
+# xen - <No description provided>
+#libvirt_type = kvm
+
+# Override the default libvirt URI (which is dependent on libvirt_type).
+# (string value)
+#libvirt_uri =
+
+# Swift reseller prefix. Must be on par with reseller_prefix in proxy-
+# agent.conf. (string value)
+#reseller_prefix = AUTH_
+
+# Configuration file for pipeline definition. (string value)
+#pipeline_cfg_file = pipeline.yaml
+
+# Configuration file for event pipeline definition. (string value)
+#event_pipeline_cfg_file = event_pipeline.yaml
+
+# Source for samples emitted on this instance. (string value)
+#sample_source = openstack
+
+# List of metadata prefixes reserved for metering use. (list value)
+#reserved_metadata_namespace = metering.
+
+# Limit on length of reserved metadata values. (integer value)
+#reserved_metadata_length = 256
+
+# List of metadata keys reserved for metering use. And these keys are
+# additional to the ones included in the namespace. (list value)
+#reserved_metadata_keys =
+
+# Path to the rootwrap configuration file to use for running commands as root
+# (string value)
+#rootwrap_config = /etc/ceilometer/rootwrap.conf
+
+# Name of this node, which must be valid in an AMQP key. Can be an opaque
+# identifier. For ZeroMQ only, must be a valid host name, FQDN, or IP address.
+# (host address value)
+#host = <your_hostname>
+
+# Timeout seconds for HTTP requests. Set it to None to disable timeout.
+# (integer value)
+#http_timeout = 600
+
+# Maximum number of parallel requests for services to handle at the same time.
+# (integer value)
+# Minimum value: 1
+#max_parallel_requests = 64
+
+
+[compute]
+
+#
+# From ceilometer
+#
+
+# Ceilometer offers many methods to discover the instance running on a compute
+# node:
+# * naive: poll nova to get all instances
+# * workload_partitioning: poll nova to get instances of the compute
+# * libvirt_metadata: get instances from libvirt metadata   but without
+# instance metadata (recommended for Gnocchi   backend (string value)
+# Possible values:
+# naive - <No description provided>
+# workload_partitioning - <No description provided>
+# libvirt_metadata - <No description provided>
+#instance_discovery_method = libvirt_metadata
+instance_discovery_method = {{ agent.get('discovery_method', 'libvirt_metadata') }}
+
+# New instances will be discovered periodically based on this option (in
+# seconds). By default, the agent discovers instances according to pipeline
+# polling interval. If option is greater than 0, the instance list to poll will
+# be updated based on this option's interval. Measurements relating to the
+# instances will match intervals defined in pipeline. This option is only used
+# for agent polling to Nova API, so it will work only when
+# 'instance_discovery_method' is set to 'naive'. (integer value)
+# Minimum value: 0
+#resource_update_interval = 0
+
+# The expiry to totally refresh the instances resource cache, since the
+# instance may be migrated to another host, we need to clean the legacy
+# instances info in local cache by totally refreshing the local cache. The
+# minimum should be the value of the config option of resource_update_interval.
+# This option is only used for agent polling to Nova API, so it will work only
+# when 'instance_discovery_method' is set to 'naive'. (integer value)
+# Minimum value: 0
+#resource_cache_expiry = 3600
+
+
+[event]
+
+#
+# From ceilometer
+#
+
+# Configuration file for event definitions. (string value)
+#definitions_cfg_file = event_definitions.yaml
+
+# Drop notifications if no event definition matches. (Otherwise, we convert
+# them with just the default traits) (boolean value)
+#drop_unmatched_notifications = false
+
+# Store the raw notification for select priority levels (info and/or error). By
+# default, raw details are not captured. (multi valued)
+#store_raw =
+
+
+[hardware]
+
+#
+# From ceilometer
+#
+
+# URL scheme to use for hardware nodes. (string value)
+#url_scheme = snmp://
+
+# SNMPd user name of all nodes running in the cloud. (string value)
+#readonly_user_name = ro_snmp_user
+
+# SNMPd v3 authentication password of all the nodes running in the cloud.
+# (string value)
+#readonly_user_password = password
+
+# SNMPd v3 authentication algorithm of all the nodes running in the cloud
+# (string value)
+# Possible values:
+# md5 - <No description provided>
+# sha - <No description provided>
+#readonly_user_auth_proto = <None>
+
+# SNMPd v3 encryption algorithm of all the nodes running in the cloud (string
+# value)
+# Possible values:
+# des - <No description provided>
+# aes128 - <No description provided>
+# 3des - <No description provided>
+# aes192 - <No description provided>
+# aes256 - <No description provided>
+#readonly_user_priv_proto = <None>
+
+# SNMPd v3 encryption password of all the nodes running in the cloud. (string
+# value)
+#readonly_user_priv_password = <None>
+
+# Name of the control plane Tripleo network (string value)
+#tripleo_network_name = ctlplane
+
+# Configuration file for defining hardware snmp meters. (string value)
+#meter_definitions_file = snmp.yaml
+
+
+[ipmi]
+
+#
+# From ceilometer
+#
+
+# Number of retries upon Intel Node Manager initialization failure (integer
+# value)
+#node_manager_init_retry = 3
+
+# Tolerance of IPMI/NM polling failures before disable this pollster. Negative
+# indicates retrying forever. (integer value)
+#polling_retry = 3
+
+
+[meter]
+
+#
+# From ceilometer
+#
+
+# DEPRECATED: Configuration file for defining meter notifications. This option
+# is deprecated and use meter_definitions_dirs to configure meter notification
+# file. Meter definitions configuration file will be sought according to the
+# parameter. (string value)
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
+#meter_definitions_cfg_file = <None>
+
+# List directory to find files of defining meter notifications. (multi valued)
+#meter_definitions_dirs = /etc/ceilometer/meters.d
+#meter_definitions_dirs = /usr/src/git/ceilometer/ceilometer/data/meters.d
+
+
+[notification]
+
+#
+# From ceilometer
+#
+
+# DEPRECATED: Number of queues to parallelize workload across. This value
+# should be larger than the number of active notification agents for optimal
+# results. WARNING: Once set, lowering this value may result in lost data.
+# (integer value)
+# Minimum value: 1
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
+#pipeline_processing_queues = 10
+
+# Acknowledge message when event persistence fails. (boolean value)
+#ack_on_event_error = true
+
+# DEPRECATED: Enable workload partitioning, allowing multiple notification
+# agents to be run simultaneously. (boolean value)
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
+#workload_partitioning = false
+
+# Messaging URLs to listen for notifications. Example:
+# rabbit://user:pass@host1:port1[,user:pass@hostN:portN]/virtual_host
+# (DEFAULT/transport_url is used if empty). This is useful when you have
+# dedicate messaging nodes for each service, for example, all nova
+# notifications go to rabbit-nova:5672, while all cinder notifications go to
+# rabbit-cinder:5672. (multi valued)
+#messaging_urls =
+
+# Number of notification messages to wait before publishing them. Batching is
+# advised when transformations are applied in pipeline. (integer value)
+# Minimum value: 1
+#batch_size = 100
+
+# Number of seconds to wait before publishing samples when batch_size is not
+# reached (None means indefinitely) (integer value)
+#batch_timeout = 5
+
+# Number of workers for notification service, default value is 1. (integer
+# value)
+# Minimum value: 1
+# Deprecated group/name - [DEFAULT]/notification_workers
+#workers = 1
+
+# Select which pipeline managers to enable to  generate data (multi valued)
+#pipelines = meter
+#pipelines = event
+
+# Exchanges name to listen for notifications. (multi valued)
+# Deprecated group/name - [DEFAULT]/http_control_exchanges
+#notification_control_exchanges = nova
+#notification_control_exchanges = glance
+#notification_control_exchanges = neutron
+#notification_control_exchanges = cinder
+#notification_control_exchanges = heat
+#notification_control_exchanges = keystone
+#notification_control_exchanges = sahara
+#notification_control_exchanges = trove
+#notification_control_exchanges = zaqar
+#notification_control_exchanges = swift
+#notification_control_exchanges = ceilometer
+#notification_control_exchanges = magnum
+#notification_control_exchanges = dns
+#notification_control_exchanges = ironic
+#notification_control_exchanges = aodh
+
+
+[polling]
+
+#
+# From ceilometer
+#
+
+# Configuration file for polling definition. (string value)
+#cfg_file = polling.yaml
+
+# Work-load partitioning group prefix. Use only if you want to run multiple
+# polling agents with different config files. For each sub-group of the agent
+# pool with the same partitioning_group_prefix a disjoint subset of pollsters
+# should be loaded. (string value)
+#partitioning_group_prefix = <None>
+
+
+[publisher]
+
+#
+# From ceilometer
+#
+
+# Secret value for signing messages. Set value empty if signing is not required
+# to avoid computational overhead. (string value)
+# Deprecated group/name - [DEFAULT]/metering_secret
+# Deprecated group/name - [publisher_rpc]/metering_secret
+# Deprecated group/name - [publisher]/metering_secret
+#telemetry_secret = change this for valid signing
+{%- if agent.secret is defined %}
+telemetry_secret={{ agent.secret }}
+{%- endif %}
+
+
+[publisher_notifier]
+
+#
+# From ceilometer
+#
+
+# The topic that ceilometer uses for metering notifications. (string value)
+#metering_topic = metering
+
+# The topic that ceilometer uses for event notifications. (string value)
+#event_topic = event
+
+# The driver that ceilometer uses for metering notifications. (string value)
+# Deprecated group/name - [publisher_notifier]/metering_driver
+#telemetry_driver = messagingv2
+
+
+[rgw_admin_credentials]
+
+#
+# From ceilometer
+#
+
+# Access key for Radosgw Admin. (string value)
+#access_key = <None>
+
+# Secret key for Radosgw Admin. (string value)
+#secret_key = <None>
+
+
+[service_credentials]
+
+#
+# From ceilometer-auth
+#
+{%- set _data = agent.identity %}
+{%- set auth_type = _data.get('auth_type', 'password') %}
+{%- include "oslo_templates/files/queens/keystoneauth/_type_" + auth_type + ".conf" %}
+
+# Type of endpoint in Identity service catalog to use for communication with
+# OpenStack services. (string value)
+# Possible values:
+# public - <No description provided>
+# internal - <No description provided>
+# admin - <No description provided>
+# auth - <No description provided>
+# publicURL - <No description provided>
+# internalURL - <No description provided>
+# adminURL - <No description provided>
+# Deprecated group/name - [service_credentials]/os_endpoint_type
+interface = internal
+
+
+[service_types]
+
+#
+# From ceilometer
+#
+
+# Glance service type. (string value)
+#glance = image
+
+# Neutron service type. (string value)
+#neutron = network
+
+# Neutron load balancer version. (string value)
+# Possible values:
+# v1 - <No description provided>
+# v2 - <No description provided>
+#neutron_lbaas_version = v2
+
+# Nova service type. (string value)
+#nova = compute
+
+# Radosgw service type. (string value)
+#radosgw = <None>
+
+# Swift service type. (string value)
+#swift = object-store
+
+# Cinder service type. (string value)
+# Deprecated group/name - [service_types]/cinderv2
+#cinder = volumev3
+
+
+[vmware]
+
+#
+# From ceilometer
+#
+
+# IP address of the VMware vSphere host. (host address value)
+#host_ip = 127.0.0.1
+
+# Port of the VMware vSphere host. (port value)
+# Minimum value: 0
+# Maximum value: 65535
+#host_port = 443
+
+# Username of VMware vSphere. (string value)
+#host_username =
+
+# Password of VMware vSphere. (string value)
+#host_password =
+
+# CA bundle file to use in verifying the vCenter server certificate. (string
+# value)
+#ca_file = <None>
+
+# If true, the vCenter server certificate is not verified. If false, then the
+# default CA truststore is used for verification. This option is ignored if
+# "ca_file" is set. (boolean value)
+#insecure = false
+
+# Number of times a VMware vSphere API may be retried. (integer value)
+#api_retry_count = 10
+
+# Sleep time in seconds for polling an ongoing async task. (floating point
+# value)
+#task_poll_interval = 0.5
+
+# Optional vim service WSDL location e.g http://<server>/vimService.wsdl.
+# Optional over-ride to default location for bug work-arounds. (string value)
+#wsdl_location = <None>
+
+
+[xenapi]
+
+#
+# From ceilometer
+#
+
+# URL for connection to XenServer/Xen Cloud Platform. (string value)
+#connection_url = <None>
+
+# Username for connection to XenServer/Xen Cloud Platform. (string value)
+#connection_username = root
+
+# Password for connection to XenServer/Xen Cloud Platform. (string value)
+#connection_password = <None>
+
+[oslo_concurrency]
+{%- if agent.concurrency is defined %}
+{%- set _data = agent.concurrency %}
+{%- include "oslo_templates/files/queens/oslo/_concurrency.conf" %}
+{%- endif %}
+
+[oslo_messaging_notifications]
+{%- set _data = agent.notification %}
+{%- include "oslo_templates/files/queens/oslo/messaging/_notifications.conf" %}
+
+{%- if agent.message_queue is defined %}
+{%- set _data = agent.message_queue %}
+{%- if _data.engine == 'rabbitmq' %}
+    {%- set messaging_engine = 'rabbit' %}
+{%- else %}
+    {%- set messaging_engine = _data.engine %}
+{%- endif %}
+[oslo_messaging_{{ messaging_engine }}]
+{%- include "oslo_templates/files/queens/oslo/messaging/_" + messaging_engine + ".conf" %}
+{%- endif %}
+
diff --git a/ceilometer/files/queens/ceilometer-server.conf.Debian b/ceilometer/files/queens/ceilometer-server.conf.Debian
new file mode 100644
index 0000000..c86fc81
--- /dev/null
+++ b/ceilometer/files/queens/ceilometer-server.conf.Debian
@@ -0,0 +1,477 @@
+{%- from "ceilometer/map.jinja" import server with context -%}
+
+[DEFAULT]
+
+{%- set _data = server.message_queue %}
+{%- include "oslo_templates/files/queens/oslo/messaging/_default.conf" %}
+
+{%- set _data = server.logging %}
+{%- include "oslo_templates/files/queens/oslo/_log.conf" %}
+
+#
+# From ceilometer
+#
+
+# To reduce polling agent load, samples are sent to the notification agent in a
+# batch. To gain higher throughput at the cost of load set this to False.
+# (boolean value)
+#batch_polled_samples = true
+{%- if server.batch_polled_samples is defined %}
+batch_polled_samples = {{ server.batch_polled_samples|lower }}
+{%- endif %}
+
+# Inspector to use for inspecting the hypervisor layer. Known inspectors are
+# libvirt, hyperv, vsphere and xenapi. (string value)
+#hypervisor_inspector = libvirt
+
+# Libvirt domain type. (string value)
+# Possible values:
+# kvm - <No description provided>
+# lxc - <No description provided>
+# qemu - <No description provided>
+# uml - <No description provided>
+# xen - <No description provided>
+#libvirt_type = kvm
+
+# Override the default libvirt URI (which is dependent on libvirt_type).
+# (string value)
+#libvirt_uri =
+
+# Swift reseller prefix. Must be on par with reseller_prefix in proxy-
+# server.conf. (string value)
+#reseller_prefix = AUTH_
+
+# Configuration file for pipeline definition. (string value)
+#pipeline_cfg_file = pipeline.yaml
+
+# Configuration file for event pipeline definition. (string value)
+#event_pipeline_cfg_file = event_pipeline.yaml
+
+# Source for samples emitted on this instance. (string value)
+#sample_source = openstack
+
+# List of metadata prefixes reserved for metering use. (list value)
+#reserved_metadata_namespace = metering.
+
+# Limit on length of reserved metadata values. (integer value)
+#reserved_metadata_length = 256
+
+# List of metadata keys reserved for metering use. And these keys are
+# additional to the ones included in the namespace. (list value)
+#reserved_metadata_keys =
+
+# Path to the rootwrap configuration file to use for running commands as root
+# (string value)
+#rootwrap_config = /etc/ceilometer/rootwrap.conf
+
+# Name of this node, which must be valid in an AMQP key. Can be an opaque
+# identifier. For ZeroMQ only, must be a valid host name, FQDN, or IP address.
+# (host address value)
+#host = <your_hostname>
+
+# Timeout seconds for HTTP requests. Set it to None to disable timeout.
+# (integer value)
+#http_timeout = 600
+
+# Maximum number of parallel requests for services to handle at the same time.
+# (integer value)
+# Minimum value: 1
+#max_parallel_requests = 64
+
+
+[coordination]
+
+#
+# From ceilometer
+#
+
+# The backend URL to use for distributed coordination. If left empty, per-
+# deployment central agent and per-host compute agent won't do workload
+# partitioning and will only function correctly if a single instance of that
+# service is running. (string value)
+#backend_url = <None>
+{%- if server.coordination_backend is defined %}
+backend_url = {{ server.coordination_backend.url }}
+{%-  endif %}
+
+# Number of seconds between checks to see if group membership has changed
+# (floating point value)
+#check_watchers = 10.0
+{%- if server.get('coordination_backend', {}).check_watchers is defined %}
+check_watchers = {{ server.coordination_backend.check_watchers }}
+{%- endif %}
+
+{%- if server.get('coordination_backend', {}).heartbeat is defined %}
+heartbeat = {{ server.coordination_backend.heartbeat }}
+{%- endif %}
+
+{%- if server.get('coordination_backend', {}).retry_backoff is defined %}
+retry_backoff = {{ server.coordination_backend.retry_backoff }}
+{%- endif %}
+
+{%- if server.get('coordination_backend', {}).max_retry_interval is defined %}
+max_retry_interval = {{ server.coordination_backend.max_retry_interval }}
+{%- endif %}
+
+
+[event]
+
+#
+# From ceilometer
+#
+
+# Configuration file for event definitions. (string value)
+#definitions_cfg_file = event_definitions.yaml
+
+# Drop notifications if no event definition matches. (Otherwise, we convert
+# them with just the default traits) (boolean value)
+#drop_unmatched_notifications = false
+
+# Store the raw notification for select priority levels (info and/or error). By
+# default, raw details are not captured. (multi valued)
+#store_raw =
+
+
+[hardware]
+
+#
+# From ceilometer
+#
+
+# URL scheme to use for hardware nodes. (string value)
+#url_scheme = snmp://
+
+# SNMPd user name of all nodes running in the cloud. (string value)
+#readonly_user_name = ro_snmp_user
+
+# SNMPd v3 authentication password of all the nodes running in the cloud.
+# (string value)
+#readonly_user_password = password
+
+# SNMPd v3 authentication algorithm of all the nodes running in the cloud
+# (string value)
+# Possible values:
+# md5 - <No description provided>
+# sha - <No description provided>
+#readonly_user_auth_proto = <None>
+
+# SNMPd v3 encryption algorithm of all the nodes running in the cloud (string
+# value)
+# Possible values:
+# des - <No description provided>
+# aes128 - <No description provided>
+# 3des - <No description provided>
+# aes192 - <No description provided>
+# aes256 - <No description provided>
+#readonly_user_priv_proto = <None>
+
+# SNMPd v3 encryption password of all the nodes running in the cloud. (string
+# value)
+#readonly_user_priv_password = <None>
+
+# Name of the control plane Tripleo network (string value)
+#tripleo_network_name = ctlplane
+
+# Configuration file for defining hardware snmp meters. (string value)
+#meter_definitions_file = snmp.yaml
+
+
+[ipmi]
+
+#
+# From ceilometer
+#
+
+# Number of retries upon Intel Node Manager initialization failure (integer
+# value)
+#node_manager_init_retry = 3
+
+# Tolerance of IPMI/NM polling failures before disable this pollster. Negative
+# indicates retrying forever. (integer value)
+#polling_retry = 3
+
+
+[meter]
+
+#
+# From ceilometer
+#
+
+# DEPRECATED: Configuration file for defining meter notifications. This option
+# is deprecated and use meter_definitions_dirs to configure meter notification
+# file. Meter definitions configuration file will be sought according to the
+# parameter. (string value)
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
+#meter_definitions_cfg_file = <None>
+
+# List directory to find files of defining meter notifications. (multi valued)
+#meter_definitions_dirs = /etc/ceilometer/meters.d
+#meter_definitions_dirs = /usr/src/git/ceilometer/ceilometer/data/meters.d
+
+
+[notification]
+
+#
+# From ceilometer
+#
+
+# DEPRECATED: Number of queues to parallelize workload across. This value
+# should be larger than the number of active notification agents for optimal
+# results. WARNING: Once set, lowering this value may result in lost data.
+# (integer value)
+# Minimum value: 1
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
+#pipeline_processing_queues = 10
+
+# Acknowledge message when event persistence fails. (boolean value)
+#ack_on_event_error = true
+
+# DEPRECATED: Enable workload partitioning, allowing multiple notification
+# agents to be run simultaneously. (boolean value)
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
+#workload_partitioning = false
+
+# Messaging URLs to listen for notifications. Example:
+# rabbit://user:pass@host1:port1[,user:pass@hostN:portN]/virtual_host
+# (DEFAULT/transport_url is used if empty). This is useful when you have
+# dedicate messaging nodes for each service, for example, all nova
+# notifications go to rabbit-nova:5672, while all cinder notifications go to
+# rabbit-cinder:5672. (multi valued)
+#messaging_urls =
+
+# Number of notification messages to wait before publishing them. Batching is
+# advised when transformations are applied in pipeline. (integer value)
+# Minimum value: 1
+#batch_size = 100
+
+# Number of seconds to wait before publishing samples when batch_size is not
+# reached (None means indefinitely) (integer value)
+#batch_timeout = 5
+
+# Number of workers for notification service, default value is 1. (integer
+# value)
+# Minimum value: 1
+# Deprecated group/name - [DEFAULT]/notification_workers
+#workers = 1
+
+# Select which pipeline managers to enable to  generate data (multi valued)
+#pipelines = meter
+#pipelines = event
+
+# Exchanges name to listen for notifications. (multi valued)
+# Deprecated group/name - [DEFAULT]/http_control_exchanges
+#notification_control_exchanges = nova
+#notification_control_exchanges = glance
+#notification_control_exchanges = neutron
+#notification_control_exchanges = cinder
+#notification_control_exchanges = heat
+#notification_control_exchanges = keystone
+#notification_control_exchanges = sahara
+#notification_control_exchanges = trove
+#notification_control_exchanges = zaqar
+#notification_control_exchanges = swift
+#notification_control_exchanges = ceilometer
+#notification_control_exchanges = magnum
+#notification_control_exchanges = dns
+#notification_control_exchanges = ironic
+#notification_control_exchanges = aodh
+
+
+[polling]
+
+#
+# From ceilometer
+#
+
+# Configuration file for polling definition. (string value)
+#cfg_file = polling.yaml
+
+# Work-load partitioning group prefix. Use only if you want to run multiple
+# polling agents with different config files. For each sub-group of the agent
+# pool with the same partitioning_group_prefix a disjoint subset of pollsters
+# should be loaded. (string value)
+#partitioning_group_prefix = <None>
+
+
+[publisher]
+
+#
+# From ceilometer
+#
+
+# Secret value for signing messages. Set value empty if signing is not required
+# to avoid computational overhead. (string value)
+# Deprecated group/name - [DEFAULT]/metering_secret
+# Deprecated group/name - [publisher_rpc]/metering_secret
+# Deprecated group/name - [publisher]/metering_secret
+#telemetry_secret = change this for valid signing
+{%- if server.secret is defined %}
+telemetry_secret={{ server.secret }}
+{%- endif %}
+
+
+[publisher_notifier]
+
+#
+# From ceilometer
+#
+
+# The topic that ceilometer uses for metering notifications. (string value)
+#metering_topic = metering
+
+# The topic that ceilometer uses for event notifications. (string value)
+#event_topic = event
+
+# The driver that ceilometer uses for metering notifications. (string value)
+# Deprecated group/name - [publisher_notifier]/metering_driver
+#telemetry_driver = messagingv2
+
+
+[rgw_admin_credentials]
+
+#
+# From ceilometer
+#
+
+# Access key for Radosgw Admin. (string value)
+#access_key = <None>
+
+# Secret key for Radosgw Admin. (string value)
+#secret_key = <None>
+
+
+[service_credentials]
+
+#
+# From ceilometer-auth
+#
+{%- set _data = server.identity %}
+{%- set auth_type = _data.get('auth_type', 'password') %}
+{%- include "oslo_templates/files/queens/keystoneauth/_type_" + auth_type + ".conf" %}
+
+# Type of endpoint in Identity service catalog to use for communication with
+# OpenStack services. (string value)
+# Possible values:
+# public - <No description provided>
+# internal - <No description provided>
+# admin - <No description provided>
+# auth - <No description provided>
+# publicURL - <No description provided>
+# internalURL - <No description provided>
+# adminURL - <No description provided>
+# Deprecated group/name - [service_credentials]/os_endpoint_type
+interface = internal
+
+
+[service_types]
+
+#
+# From ceilometer
+#
+
+# Glance service type. (string value)
+#glance = image
+
+# Neutron service type. (string value)
+#neutron = network
+
+# Neutron load balancer version. (string value)
+# Possible values:
+# v1 - <No description provided>
+# v2 - <No description provided>
+#neutron_lbaas_version = v2
+
+# Nova service type. (string value)
+#nova = compute
+
+# Radosgw service type. (string value)
+#radosgw = <None>
+
+# Swift service type. (string value)
+#swift = object-store
+
+# Cinder service type. (string value)
+# Deprecated group/name - [service_types]/cinderv2
+#cinder = volumev3
+
+
+[vmware]
+
+#
+# From ceilometer
+#
+
+# IP address of the VMware vSphere host. (host address value)
+#host_ip = 127.0.0.1
+
+# Port of the VMware vSphere host. (port value)
+# Minimum value: 0
+# Maximum value: 65535
+#host_port = 443
+
+# Username of VMware vSphere. (string value)
+#host_username =
+
+# Password of VMware vSphere. (string value)
+#host_password =
+
+# CA bundle file to use in verifying the vCenter server certificate. (string
+# value)
+#ca_file = <None>
+
+# If true, the vCenter server certificate is not verified. If false, then the
+# default CA truststore is used for verification. This option is ignored if
+# "ca_file" is set. (boolean value)
+#insecure = false
+
+# Number of times a VMware vSphere API may be retried. (integer value)
+#api_retry_count = 10
+
+# Sleep time in seconds for polling an ongoing async task. (floating point
+# value)
+#task_poll_interval = 0.5
+
+# Optional vim service WSDL location e.g http://<server>/vimService.wsdl.
+# Optional over-ride to default location for bug work-arounds. (string value)
+#wsdl_location = <None>
+
+
+[xenapi]
+
+#
+# From ceilometer
+#
+
+# URL for connection to XenServer/Xen Cloud Platform. (string value)
+#connection_url = <None>
+
+# Username for connection to XenServer/Xen Cloud Platform. (string value)
+#connection_username = root
+
+# Password for connection to XenServer/Xen Cloud Platform. (string value)
+#connection_password = <None>
+
+[oslo_concurrency]
+{%- if server.concurrency is defined %}
+{%- set _data = server.concurrency %}
+{%- include "oslo_templates/files/queens/oslo/_concurrency.conf" %}
+{%- endif %}
+
+[oslo_messaging_notifications]
+{%- set _data = server.notification %}
+{%- include "oslo_templates/files/queens/oslo/messaging/_notifications.conf" %}
+
+{%- if server.message_queue is defined %}
+{%- set _data = server.message_queue %}
+{%- if _data.engine == 'rabbitmq' %}
+    {%- set messaging_engine = 'rabbit' %}
+{%- else %}
+    {%- set messaging_engine = _data.engine %}
+{%- endif %}
+[oslo_messaging_{{ messaging_engine }}]
+{%- include "oslo_templates/files/queens/oslo/messaging/_" + messaging_engine + ".conf" %}
+{%- endif %}
+
diff --git a/ceilometer/files/queens/compute_pipeline.yaml b/ceilometer/files/queens/compute_pipeline.yaml
new file mode 100644
index 0000000..e04dc9d
--- /dev/null
+++ b/ceilometer/files/queens/compute_pipeline.yaml
@@ -0,0 +1,26 @@
+{%- from "ceilometer/map.jinja" import server with context %}
+{%- from "ceilometer/map.jinja" import agent with context %}
+{%- if server.get('enabled', False) %}
+{%- set publisher = server.publisher %}
+{%- else %}
+{%- set publisher = agent.publisher %}
+{%- endif %}
+---
+sources:
+    - name: meter_source
+      interval: 60
+      meters:
+          - "*"
+      sinks:
+          - meter_sink
+sinks:
+    - name: meter_sink
+      transformers:
+      publishers:
+          {%- for publisher_name, publisher in publisher.items() %}
+          {%- if publisher_name == 'graphite' %}
+          - graphite://{{ publisher.host }}:{{ publisher.port }}
+          {%- else %}
+          - notifier://
+          {%- endif %}
+          {%- endfor %}
diff --git a/ceilometer/files/queens/event_definitions.yaml b/ceilometer/files/queens/event_definitions.yaml
new file mode 100644
index 0000000..d87e1dc
--- /dev/null
+++ b/ceilometer/files/queens/event_definitions.yaml
@@ -0,0 +1,585 @@
+---
+- event_type: 'compute.instance.*'
+  traits: &instance_traits
+    tenant_id:
+      fields: payload.tenant_id
+    user_id:
+      fields: payload.user_id
+    instance_id:
+      fields: payload.instance_id
+    resource_id:
+      fields: payload.instance_id
+    host:
+      fields: publisher_id.`split(., 1, 1)`
+    service:
+      fields: publisher_id.`split(., 0, -1)`
+    memory_mb:
+      type: int
+      fields: payload.memory_mb
+    disk_gb:
+      type: int
+      fields: payload.disk_gb
+    root_gb:
+      type: int
+      fields: payload.root_gb
+    ephemeral_gb:
+      type: int
+      fields: payload.ephemeral_gb
+    vcpus:
+      type: int
+      fields: payload.vcpus
+    instance_type_id:
+      type: int
+      fields: payload.instance_type_id
+    instance_type:
+      fields: payload.instance_type
+    state:
+      fields: payload.state
+    os_architecture:
+      fields: payload.image_meta.'org.openstack__1__architecture'
+    os_version:
+      fields: payload.image_meta.'org.openstack__1__os_version'
+    os_distro:
+      fields: payload.image_meta.'org.openstack__1__os_distro'
+    launched_at:
+      type: datetime
+      fields: payload.launched_at
+    deleted_at:
+      type: datetime
+      fields: payload.deleted_at
+- event_type: compute.instance.update
+  traits:
+    <<: *instance_traits
+    old_state:
+      fields: payload.old_state
+- event_type: compute.instance.exists
+  traits:
+    <<: *instance_traits
+    audit_period_beginning:
+      type: datetime
+      fields: payload.audit_period_beginning
+    audit_period_ending:
+      type: datetime
+      fields: payload.audit_period_ending
+- event_type: ['volume.exists', 'volume.create.*', 'volume.delete.*', 'volume.resize.*', 'volume.attach.*', 'volume.detach.*', 'volume.update.*', 'snapshot.exists', 'snapshot.create.*', 'snapshot.delete.*', 'snapshot.update.*']
+  traits: &cinder_traits
+    user_id:
+      fields: payload.user_id
+    project_id:
+      fields: payload.tenant_id
+    availability_zone:
+      fields: payload.availability_zone
+    display_name:
+      fields: payload.display_name
+    replication_status:
+      fields: payload.replication_status
+    status:
+      fields: payload.status
+    created_at:
+      fields: payload.created_at
+- event_type: ['volume.exists', 'volume.create.*', 'volume.delete.*', 'volume.resize.*', 'volume.attach.*', 'volume.detach.*', 'volume.update.*']
+  traits:
+    <<: *cinder_traits
+    resource_id:
+      fields: payload.volume_id
+    host:
+      fields: payload.host
+    size:
+      fields: payload.size
+    type:
+      fields: payload.volume_type
+    replication_status:
+      fields: payload.replication_status
+- event_type: ['snapshot.exists', 'snapshot.create.*', 'snapshot.delete.*', 'snapshot.update.*']
+  traits:
+    <<: *cinder_traits
+    resource_id:
+      fields: payload.snapshot_id
+    volume_id:
+      fields: payload.volume_id
+- event_type: ['image_volume_cache.*']
+  traits:
+    image_id:
+      fields: payload.image_id
+    host:
+      fields: payload.host
+- event_type: ['image.create', 'image.update', 'image.upload', 'image.delete']
+  traits: &glance_crud
+    project_id:
+      fields: payload.owner
+    resource_id:
+      fields: payload.id
+    name:
+      fields: payload.name
+    status:
+      fields: payload.status
+    created_at:
+      fields: payload.created_at
+    user_id:
+      fields: payload.owner
+    deleted_at:
+      fields: payload.deleted_at
+    size:
+      fields: payload.size
+- event_type: image.send
+  traits: &glance_send
+    receiver_project:
+      fields: payload.receiver_tenant_id
+    receiver_user:
+      fields: payload.receiver_user_id
+    user_id:
+      fields: payload.owner_id
+    image_id:
+      fields: payload.image_id
+    destination_ip:
+      fields: payload.destination_ip
+    bytes_sent:
+      type: int
+      fields: payload.bytes_sent
+- event_type: orchestration.stack.*
+  traits: &orchestration_crud
+    project_id:
+      fields: payload.tenant_id
+    user_id:
+      fields: ['_context_trustor_user_id', '_context_user_id']
+    resource_id:
+      fields: payload.stack_identity
+- event_type: sahara.cluster.*
+  traits: &sahara_crud
+    project_id:
+      fields: payload.project_id
+    user_id:
+      fields: _context_user_id
+    resource_id:
+      fields: payload.cluster_id
+- event_type: sahara.cluster.health
+  traits: &sahara_health
+    <<: *sahara_crud
+    verification_id:
+      fields: payload.verification_id
+    health_check_status:
+      fields: payload.health_check_status
+    health_check_name:
+      fields: payload.health_check_name
+    health_check_description:
+      fields: payload.health_check_description
+    created_at:
+      type: datetime
+      fields: payload.created_at
+    updated_at:
+      type: datetime
+      fields: payload.updated_at
+- event_type: ['identity.user.*', 'identity.project.*', 'identity.group.*', 'identity.role.*', 'identity.OS-TRUST:trust.*',
+               'identity.region.*', 'identity.service.*', 'identity.endpoint.*', 'identity.policy.*']
+  traits: &identity_crud
+    resource_id:
+      fields: payload.resource_info
+    initiator_id:
+      fields: payload.initiator.id
+    project_id:
+      fields: payload.initiator.project_id
+    domain_id:
+      fields: payload.initiator.domain_id
+- event_type: identity.role_assignment.*
+  traits: &identity_role_assignment
+    role:
+      fields: payload.role
+    group:
+      fields: payload.group
+    domain:
+      fields: payload.domain
+    user:
+      fields: payload.user
+    project:
+      fields: payload.project
+- event_type: identity.authenticate
+  traits: &identity_authenticate
+    typeURI:
+      fields: payload.typeURI
+    id:
+      fields: payload.id
+    action:
+      fields: payload.action
+    eventType:
+      fields: payload.eventType
+    eventTime:
+      fields: payload.eventTime
+    outcome:
+      fields: payload.outcome
+    initiator_typeURI:
+      fields: payload.initiator.typeURI
+    initiator_id:
+      fields: payload.initiator.id
+    initiator_name:
+      fields: payload.initiator.name
+    initiator_host_agent:
+      fields: payload.initiator.host.agent
+    initiator_host_addr:
+      fields: payload.initiator.host.address
+    target_typeURI:
+      fields: payload.target.typeURI
+    target_id:
+      fields: payload.target.id
+    observer_typeURI:
+      fields: payload.observer.typeURI
+    observer_id:
+      fields: payload.observer.id
+- event_type: objectstore.http.request
+  traits: &objectstore_request
+    typeURI:
+      fields: payload.typeURI
+    id:
+      fields: payload.id
+    action:
+      fields: payload.action
+    eventType:
+      fields: payload.eventType
+    eventTime:
+      fields: payload.eventTime
+    outcome:
+      fields: payload.outcome
+    initiator_typeURI:
+      fields: payload.initiator.typeURI
+    initiator_id:
+      fields: payload.initiator.id
+    initiator_project_id:
+      fields: payload.initiator.project_id
+    target_typeURI:
+      fields: payload.target.typeURI
+    target_id:
+      fields: payload.target.id
+    target_action:
+      fields: payload.target.action
+    target_metadata_path:
+      fields: payload.target.metadata.path
+    target_metadata_version:
+      fields: payload.target.metadata.version
+    target_metadata_container:
+      fields: payload.target.metadata.container
+    target_metadata_object:
+      fields: payload.target.metadata.object
+    observer_id:
+      fields: payload.observer.id
+- event_type: ['network.*', 'subnet.*', 'port.*', 'router.*', 'floatingip.*', 'pool.*', 'vip.*', 'member.*', 'health_monitor.*', 'healthmonitor.*', 'listener.*', 'loadbalancer.*', 'firewall.*', 'firewall_policy.*', 'firewall_rule.*', 'vpnservice.*', 'ipsecpolicy.*', 'ikepolicy.*', 'ipsec_site_connection.*']
+  traits: &network_traits
+    user_id:
+      fields: _context_user_id
+    project_id:
+      fields: _context_tenant_id
+- event_type: network.*
+  traits:
+    <<: *network_traits
+    resource_id:
+      fields: ['payload.network.id', 'payload.id']
+- event_type: subnet.*
+  traits:
+    <<: *network_traits
+    resource_id:
+      fields: ['payload.subnet.id', 'payload.id']
+- event_type: port.*
+  traits:
+    <<: *network_traits
+    resource_id:
+      fields: ['payload.port.id', 'payload.id']
+- event_type: router.*
+  traits:
+    <<: *network_traits
+    resource_id:
+      fields: ['payload.router.id', 'payload.id']
+- event_type: floatingip.*
+  traits:
+    <<: *network_traits
+    resource_id:
+      fields: ['payload.floatingip.id', 'payload.id']
+- event_type: pool.*
+  traits:
+    <<: *network_traits
+    resource_id:
+      fields: ['payload.pool.id', 'payload.id']
+- event_type: vip.*
+  traits:
+    <<: *network_traits
+    resource_id:
+      fields: ['payload.vip.id', 'payload.id']
+- event_type: member.*
+  traits:
+    <<: *network_traits
+    resource_id:
+      fields: ['payload.member.id', 'payload.id']
+- event_type: health_monitor.*
+  traits:
+    <<: *network_traits
+    resource_id:
+      fields: ['payload.health_monitor.id', 'payload.id']
+- event_type: healthmonitor.*
+  traits:
+    <<: *network_traits
+    resource_id:
+      fields: ['payload.healthmonitor.id', 'payload.id']
+- event_type: listener.*
+  traits:
+    <<: *network_traits
+    resource_id:
+      fields: ['payload.listener.id', 'payload.id']
+- event_type: loadbalancer.*
+  traits:
+    <<: *network_traits
+    resource_id:
+      fields: ['payload.loadbalancer.id', 'payload.id']
+- event_type: firewall.*
+  traits:
+    <<: *network_traits
+    resource_id:
+      fields: ['payload.firewall.id', 'payload.id']
+- event_type: firewall_policy.*
+  traits:
+    <<: *network_traits
+    resource_id:
+      fields: ['payload.firewall_policy.id', 'payload.id']
+- event_type: firewall_rule.*
+  traits:
+    <<: *network_traits
+    resource_id:
+      fields: ['payload.firewall_rule.id', 'payload.id']
+- event_type: vpnservice.*
+  traits:
+    <<: *network_traits
+    resource_id:
+      fields: ['payload.vpnservice.id', 'payload.id']
+- event_type: ipsecpolicy.*
+  traits:
+    <<: *network_traits
+    resource_id:
+      fields: ['payload.ipsecpolicy.id', 'payload.id']
+- event_type: ikepolicy.*
+  traits:
+    <<: *network_traits
+    resource_id:
+      fields: ['payload.ikepolicy.id', 'payload.id']
+- event_type: ipsec_site_connection.*
+  traits:
+    <<: *network_traits
+    resource_id:
+      fields: ['payload.ipsec_site_connection.id', 'payload.id']
+- event_type: '*http.*'
+  traits: &http_audit
+    project_id:
+      fields: payload.initiator.project_id
+    user_id:
+      fields: payload.initiator.id
+    typeURI:
+      fields: payload.typeURI
+    eventType:
+      fields: payload.eventType
+    action:
+      fields: payload.action
+    outcome:
+      fields: payload.outcome
+    id:
+      fields: payload.id
+    eventTime:
+      fields: payload.eventTime
+    requestPath:
+      fields: payload.requestPath
+    observer_id:
+      fields: payload.observer.id
+    target_id:
+      fields: payload.target.id
+    target_typeURI:
+      fields: payload.target.typeURI
+    target_name:
+      fields: payload.target.name
+    initiator_typeURI:
+      fields: payload.initiator.typeURI
+    initiator_id:
+      fields: payload.initiator.id
+    initiator_name:
+      fields: payload.initiator.name
+    initiator_host_address:
+      fields: payload.initiator.host.address
+- event_type: '*http.response'
+  traits:
+    <<: *http_audit
+    reason_code:
+      fields: payload.reason.reasonCode
+- event_type: ['dns.domain.create', 'dns.domain.update', 'dns.domain.delete']
+  traits: &dns_domain_traits
+    status:
+      fields: payload.status
+    retry:
+      fields: payload.retry
+    description:
+      fields: payload.description
+    expire:
+      fields: payload.expire
+    email:
+      fields: payload.email
+    ttl:
+      fields: payload.ttl
+    action:
+      fields: payload.action
+    name:
+      fields: payload.name
+    resource_id:
+      fields: payload.id
+    created_at:
+      fields: payload.created_at
+    updated_at:
+      fields: payload.updated_at
+    version:
+      fields: payload.version
+    parent_domain_id:
+      fields: parent_domain_id
+    serial:
+      fields: payload.serial
+- event_type: dns.domain.exists
+  traits:
+    <<: *dns_domain_traits
+    audit_period_beginning:
+      type: datetime
+      fields: payload.audit_period_beginning
+    audit_period_ending:
+      type: datetime
+      fields: payload.audit_period_ending
+- event_type: trove.*
+  traits: &trove_base_traits
+    state:
+      fields: payload.state_description
+    instance_type:
+      fields: payload.instance_type
+    user_id:
+      fields: payload.user_id
+    resource_id:
+      fields: payload.instance_id
+    instance_type_id:
+      fields: payload.instance_type_id
+    launched_at:
+      type: datetime
+      fields: payload.launched_at
+    instance_name:
+      fields: payload.instance_name
+    state:
+      fields: payload.state
+    nova_instance_id:
+      fields: payload.nova_instance_id
+    service_id:
+      fields: payload.service_id
+    created_at:
+      type: datetime
+      fields: payload.created_at
+    region:
+      fields: payload.region
+- event_type: ['trove.instance.create', 'trove.instance.modify_volume', 'trove.instance.modify_flavor', 'trove.instance.delete']
+  traits: &trove_common_traits
+    name:
+      fields: payload.name
+    availability_zone:
+      fields: payload.availability_zone
+    instance_size:
+      type: int
+      fields: payload.instance_size
+    volume_size:
+      type: int
+      fields: payload.volume_size
+    nova_volume_id:
+      fields: payload.nova_volume_id
+- event_type: trove.instance.create
+  traits:
+    <<: [*trove_base_traits, *trove_common_traits]
+- event_type: trove.instance.modify_volume
+  traits:
+    <<: [*trove_base_traits, *trove_common_traits]
+    old_volume_size:
+      type: int
+      fields: payload.old_volume_size
+    modify_at:
+      type: datetime
+      fields: payload.modify_at
+- event_type: trove.instance.modify_flavor
+  traits:
+    <<: [*trove_base_traits, *trove_common_traits]
+    old_instance_size:
+      type: int
+      fields: payload.old_instance_size
+    modify_at:
+      type: datetime
+      fields: payload.modify_at
+- event_type: trove.instance.delete
+  traits:
+    <<: [*trove_base_traits, *trove_common_traits]
+    deleted_at:
+      type: datetime
+      fields: payload.deleted_at
+- event_type: trove.instance.exists
+  traits:
+    <<: *trove_base_traits
+    display_name:
+      fields: payload.display_name
+    audit_period_beginning:
+      type: datetime
+      fields: payload.audit_period_beginning
+    audit_period_ending:
+      type: datetime
+      fields: payload.audit_period_ending
+- event_type: profiler.*
+  traits:
+    project:
+      fields: payload.project
+    service:
+      fields: payload.service
+    name:
+      fields: payload.name
+    base_id:
+      fields: payload.base_id
+    trace_id:
+      fields: payload.trace_id
+    parent_id:
+      fields: payload.parent_id
+    timestamp:
+      fields: payload.timestamp
+    host:
+      fields: payload.info.host
+    path:
+      fields: payload.info.request.path
+    query:
+      fields: payload.info.request.query
+    method:
+      fields: payload.info.request.method
+    scheme:
+      fields: payload.info.request.scheme
+    db.statement:
+      fields: payload.info.db.statement
+    db.params:
+      fields: payload.info.db.params
+- event_type: 'magnum.bay.*'
+  traits: &magnum_bay_crud
+    id:
+      fields: payload.id
+    typeURI:
+      fields: payload.typeURI
+    eventType:
+      fields: payload.eventType
+    eventTime:
+      fields: payload.eventTime
+    action:
+      fields: payload.action
+    outcome:
+      fields: payload.outcome
+    initiator_id:
+      fields: payload.initiator.id
+    initiator_typeURI:
+      fields: payload.initiator.typeURI
+    initiator_name:
+      fields: payload.initiator.name
+    initiator_host_agent:
+      fields: payload.initiator.host.agent
+    initiator_host_address:
+      fields: payload.initiator.host.address
+    target_id:
+      fields: payload.target.id
+    target_typeURI:
+      fields: payload.target.typeURI
+    observer_id:
+      fields: payload.observer.id
+    observer_typeURI:
+      fields: payload.observer.typeURI
diff --git a/ceilometer/files/queens/event_pipeline.yaml b/ceilometer/files/queens/event_pipeline.yaml
new file mode 100644
index 0000000..936b52c
--- /dev/null
+++ b/ceilometer/files/queens/event_pipeline.yaml
@@ -0,0 +1,29 @@
+{%- from "ceilometer/map.jinja" import server with context %}
+{%- from "ceilometer/map.jinja" import agent with context %}
+{%- if server.get('enabled', False) %}
+{%- set publisher = server.publisher %}
+{%- else %}
+{%- set publisher = agent.publisher %}
+{%- endif %}
+{%- set publisher_lst = [] %}
+{%- for k, v in publisher.items() %}
+{%- if v.get('enabled', False) and v.get('publish_event', False) %}
+{%- if k == 'default' %}
+{%- do publisher_lst.append( '- direct://' ) %}
+{%- else %}
+{%- do publisher_lst.append( '- ' + v.url ) %}
+{%- endif %}
+{%- endif %}
+{%- endfor %}
+---
+sources:
+    - name: event_source
+      events:
+          - "*"
+      sinks:
+          - event_sink
+sinks:
+    - name: event_sink
+      transformers:
+      publishers:
+          {{ '\n'.join(publisher_lst) }}
diff --git a/ceilometer/files/queens/gabbi_pipeline.yaml b/ceilometer/files/queens/gabbi_pipeline.yaml
new file mode 100644
index 0000000..e90516f
--- /dev/null
+++ b/ceilometer/files/queens/gabbi_pipeline.yaml
@@ -0,0 +1,19 @@
+# A limited pipeline for use with the Gabbi spike.
+# direct writes to the the metering database without using an
+# intermediary dispatcher.
+#
+# This is one of several things that will need some extensive
+# tidying to be more right.
+---
+sources:
+    - name: meter_source
+      interval: 1
+      meters:
+          - "*"
+      sinks:
+          - meter_sink
+sinks:
+    - name: meter_sink
+      transformers:
+      publishers:
+          - direct://
diff --git a/ceilometer/files/queens/gnocchi_resources.yaml b/ceilometer/files/queens/gnocchi_resources.yaml
new file mode 100644
index 0000000..a92ce7d
--- /dev/null
+++ b/ceilometer/files/queens/gnocchi_resources.yaml
@@ -0,0 +1,298 @@
+---
+
+resources:
+  - resource_type: identity
+    metrics:
+      - 'identity.authenticate.success'
+      - 'identity.authenticate.pending'
+      - 'identity.authenticate.failure'
+      - 'identity.user.created'
+      - 'identity.user.deleted'
+      - 'identity.user.updated'
+      - 'identity.group.created'
+      - 'identity.group.deleted'
+      - 'identity.group.updated'
+      - 'identity.role.created'
+      - 'identity.role.deleted'
+      - 'identity.role.updated'
+      - 'identity.project.created'
+      - 'identity.project.deleted'
+      - 'identity.project.updated'
+      - 'identity.trust.created'
+      - 'identity.trust.deleted'
+      - 'identity.role_assignment.created'
+      - 'identity.role_assignment.deleted'
+
+  - resource_type: ceph_account
+    metrics:
+      - 'radosgw.objects'
+      - 'radosgw.objects.size'
+      - 'radosgw.objects.containers'
+      - 'radosgw.api.request'
+      - 'radosgw.containers.objects'
+      - 'radosgw.containers.objects.size'
+
+  - resource_type: instance
+    metrics:
+      - 'memory'
+      - 'memory.usage'
+      - 'memory.resident'
+      - 'memory.swap.in'
+      - 'memory.swap.out'
+      - 'memory.bandwidth.total'
+      - 'memory.bandwidth.local'
+      - 'vcpus'
+      - 'cpu'
+      - 'cpu.delta'
+      - 'cpu_util'
+      - 'cpu_l3_cache'
+      - 'disk.root.size'
+      - 'disk.ephemeral.size'
+      - 'disk.read.requests'
+      - 'disk.read.requests.rate'
+      - 'disk.write.requests'
+      - 'disk.write.requests.rate'
+      - 'disk.read.bytes'
+      - 'disk.read.bytes.rate'
+      - 'disk.write.bytes'
+      - 'disk.write.bytes.rate'
+      - 'disk.latency'
+      - 'disk.iops'
+      - 'disk.capacity'
+      - 'disk.allocation'
+      - 'disk.usage'
+      - 'compute.instance.booting.time'
+      - 'perf.cpu.cycles'
+      - 'perf.instructions'
+      - 'perf.cache.references'
+      - 'perf.cache.misses'
+    attributes:
+      host: resource_metadata.(instance_host|host)
+      image_ref: resource_metadata.image_ref
+      display_name: resource_metadata.display_name
+      flavor_id: resource_metadata.(instance_flavor_id|(flavor.id)|flavor_id)
+      flavor_name: resource_metadata.(instance_type|(flavor.name)|flavor_name)
+      server_group: resource_metadata.user_metadata.server_group
+    event_delete: compute.instance.delete.start
+    event_attributes:
+      id: instance_id
+    event_associated_resources:
+      instance_network_interface: '{"=": {"instance_id": "%s"}}'
+      instance_disk: '{"=": {"instance_id": "%s"}}'
+
+  - resource_type: instance_network_interface
+    metrics:
+      - 'network.outgoing.packets.rate'
+      - 'network.incoming.packets.rate'
+      - 'network.outgoing.packets'
+      - 'network.incoming.packets'
+      - 'network.outgoing.packets.drop'
+      - 'network.incoming.packets.drop'
+      - 'network.outgoing.packets.error'
+      - 'network.incoming.packets.error'
+      - 'network.outgoing.bytes.rate'
+      - 'network.incoming.bytes.rate'
+      - 'network.outgoing.bytes'
+      - 'network.incoming.bytes'
+    attributes:
+      name: resource_metadata.vnic_name
+      instance_id: resource_metadata.instance_id
+
+  - resource_type: instance_disk
+    metrics:
+      - 'disk.device.read.requests'
+      - 'disk.device.read.requests.rate'
+      - 'disk.device.write.requests'
+      - 'disk.device.write.requests.rate'
+      - 'disk.device.read.bytes'
+      - 'disk.device.read.bytes.rate'
+      - 'disk.device.write.bytes'
+      - 'disk.device.write.bytes.rate'
+      - 'disk.device.latency'
+      - 'disk.device.iops'
+      - 'disk.device.capacity'
+      - 'disk.device.allocation'
+      - 'disk.device.usage'
+    attributes:
+      name: resource_metadata.disk_name
+      instance_id: resource_metadata.instance_id
+
+  - resource_type: image
+    metrics:
+      - 'image.size'
+      - 'image.download'
+      - 'image.serve'
+    attributes:
+      name: resource_metadata.name
+      container_format: resource_metadata.container_format
+      disk_format: resource_metadata.disk_format
+    event_delete: image.delete
+    event_attributes:
+      id: resource_id
+
+  - resource_type: ipmi
+    metrics:
+      - 'hardware.ipmi.node.power'
+      - 'hardware.ipmi.node.temperature'
+      - 'hardware.ipmi.node.inlet_temperature'
+      - 'hardware.ipmi.node.outlet_temperature'
+      - 'hardware.ipmi.node.fan'
+      - 'hardware.ipmi.node.current'
+      - 'hardware.ipmi.node.voltage'
+      - 'hardware.ipmi.node.airflow'
+      - 'hardware.ipmi.node.cups'
+      - 'hardware.ipmi.node.cpu_util'
+      - 'hardware.ipmi.node.mem_util'
+      - 'hardware.ipmi.node.io_util'
+
+  - resource_type: network
+    metrics:
+      - 'bandwidth'
+      - 'ip.floating'
+    event_delete: floatingip.delete.end
+    event_attributes:
+      id: resource_id
+
+  - resource_type: stack
+    metrics:
+      - 'stack.create'
+      - 'stack.update'
+      - 'stack.delete'
+      - 'stack.resume'
+      - 'stack.suspend'
+
+  - resource_type: swift_account
+    metrics:
+      - 'storage.objects.incoming.bytes'
+      - 'storage.objects.outgoing.bytes'
+      - 'storage.api.request'
+      - 'storage.objects.size'
+      - 'storage.objects'
+      - 'storage.objects.containers'
+      - 'storage.containers.objects'
+      - 'storage.containers.objects.size'
+
+  - resource_type: volume
+    metrics:
+      - 'volume'
+      - 'volume.size'
+      - 'snapshot.size'
+      - 'volume.snapshot.size'
+      - 'volume.backup.size'
+    attributes:
+      display_name: resource_metadata.(display_name|name)
+      volume_type: resource_metadata.volume_type
+    event_delete: volume.delete.start
+    event_attributes:
+      id: resource_id
+
+  - resource_type: host
+    metrics:
+      - 'hardware.cpu.load.1min'
+      - 'hardware.cpu.load.5min'
+      - 'hardware.cpu.load.15min'
+      - 'hardware.cpu.util'
+      - 'hardware.memory.total'
+      - 'hardware.memory.used'
+      - 'hardware.memory.swap.total'
+      - 'hardware.memory.swap.avail'
+      - 'hardware.memory.buffer'
+      - 'hardware.memory.cached'
+      - 'hardware.network.ip.outgoing.datagrams'
+      - 'hardware.network.ip.incoming.datagrams'
+      - 'hardware.system_stats.cpu.idle'
+      - 'hardware.system_stats.io.outgoing.blocks'
+      - 'hardware.system_stats.io.incoming.blocks'
+    attributes:
+      host_name: resource_metadata.resource_url
+
+  - resource_type: host_disk
+    metrics:
+      - 'hardware.disk.size.total'
+      - 'hardware.disk.size.used'
+    attributes:
+      host_name: resource_metadata.resource_url
+      device_name: resource_metadata.device
+
+  - resource_type: host_network_interface
+    metrics:
+      - 'hardware.network.incoming.bytes'
+      - 'hardware.network.outgoing.bytes'
+      - 'hardware.network.outgoing.errors'
+    attributes:
+      host_name: resource_metadata.resource_url
+      device_name: resource_metadata.name
+
+  - resource_type: nova_compute
+    metrics:
+      - 'compute.node.cpu.frequency'
+      - 'compute.node.cpu.idle.percent'
+      - 'compute.node.cpu.idle.time'
+      - 'compute.node.cpu.iowait.percent'
+      - 'compute.node.cpu.iowait.time'
+      - 'compute.node.cpu.kernel.percent'
+      - 'compute.node.cpu.kernel.time'
+      - 'compute.node.cpu.percent'
+      - 'compute.node.cpu.user.percent'
+      - 'compute.node.cpu.user.time'
+    attributes:
+      host_name: resource_metadata.host
+
+  - resource_type: manila_share
+    metrics:
+      - 'manila.share.size'
+    attributes:
+      name: resource_metadata.name
+      host: resource_metadata.host
+      status: resource_metadata.status
+      availability_zone: resource_metadata.availability_zone
+      protocol: resource_metadata.protocol
+
+  - resource_type: switch
+    metrics:
+      - 'switch'
+      - 'switch.ports'
+    attributes:
+      controller: resource_metadata.controller
+
+  - resource_type: switch_port
+    metrics:
+      - 'switch.port'
+      - 'switch.port.uptime'
+      - 'switch.port.receive.packets'
+      - 'switch.port.transmit.packets'
+      - 'switch.port.receive.bytes'
+      - 'switch.port.transmit.bytes'
+      - 'switch.port.receive.drops'
+      - 'switch.port.transmit.drops'
+      - 'switch.port.receive.errors'
+      - 'switch.port.transmit.errors'
+      - 'switch.port.receive.frame_error'
+      - 'switch.port.receive.overrun_error'
+      - 'switch.port.receive.crc_error'
+      - 'switch.port.collision.count'
+    attributes:
+      switch: resource_metadata.switch
+      port_number_on_switch: resource_metadata.port_number_on_switch
+      neutron_port_id: resource_metadata.neutron_port_id
+      controller: resource_metadata.controller
+
+  - resource_type: port
+    metrics:
+      - 'port'
+      - 'port.uptime'
+      - 'port.receive.packets'
+      - 'port.transmit.packets'
+      - 'port.receive.bytes'
+      - 'port.transmit.bytes'
+      - 'port.receive.drops'
+      - 'port.receive.errors'
+    attributes:
+      controller: resource_metadata.controller
+
+  - resource_type: switch_table
+    metrics:
+      - 'switch.table.active.entries'
+    attributes:
+      controller: resource_metadata.controller
+      switch: resource_metadata.switch
diff --git a/ceilometer/files/queens/pipeline.yaml b/ceilometer/files/queens/pipeline.yaml
new file mode 100644
index 0000000..77512de
--- /dev/null
+++ b/ceilometer/files/queens/pipeline.yaml
@@ -0,0 +1,108 @@
+{%- from "ceilometer/map.jinja" import server with context %}
+{%- from "ceilometer/map.jinja" import agent with context %}
+{%- if server.get('enabled', False) %}
+{%- set publisher = server.publisher %}
+{%- else %}
+{%- set publisher = agent.publisher %}
+{%- endif %}
+{%- set publisher_lst = [] %}
+{%- for k, v in publisher.items() %}
+{%- if v.get('enabled', False) and v.get('publish_metric', False) %}
+{%- if k == 'default' %}
+{%- do publisher_lst.append( '- notifier://' ) %}
+{%- elif k == 'graphite' %}
+{%- do publisher_lst.append( '- graphite://' + v.host + ':' + v.port ) %}
+{%- else %}
+{%- do publisher_lst.append( '- ' + v.url ) %}
+{%- endif %}
+{%- endif %}
+{%- endfor %}
+---
+sources:
+    - name: meter_source
+      meters:
+          - "*"
+      sinks:
+          - meter_sink
+    - name: cpu_source
+      meters:
+          - "cpu"
+      sinks:
+          - cpu_sink
+          - cpu_delta_sink
+    - name: disk_source
+      meters:
+          - "disk.read.bytes"
+          - "disk.read.requests"
+          - "disk.write.bytes"
+          - "disk.write.requests"
+          - "disk.device.read.bytes"
+          - "disk.device.read.requests"
+          - "disk.device.write.bytes"
+          - "disk.device.write.requests"
+      sinks:
+          - disk_sink
+    - name: network_source
+      meters:
+          - "network.incoming.bytes"
+          - "network.incoming.packets"
+          - "network.outgoing.bytes"
+          - "network.outgoing.packets"
+      sinks:
+          - network_sink
+sinks:
+    - name: meter_sink
+      transformers:
+      publishers:
+          {{ '\n'.join(publisher_lst) }}
+    - name: cpu_sink
+      transformers:
+          - name: "rate_of_change"
+            parameters:
+                target:
+                    name: "cpu_util"
+                    unit: "%"
+                    type: "gauge"
+                    max: 100
+                    scale: "100.0 / (10**9 * (resource_metadata.cpu_number or 1))"
+      publishers:
+          {{ '\n'.join(publisher_lst) }}
+    - name: cpu_delta_sink
+      transformers:
+          - name: "delta"
+            parameters:
+                target:
+                    name: "cpu.delta"
+                growth_only: True
+      publishers:
+          {{ '\n'.join(publisher_lst) }}
+    - name: disk_sink
+      transformers:
+          - name: "rate_of_change"
+            parameters:
+                source:
+                    map_from:
+                        name: "(disk\\.device|disk)\\.(read|write)\\.(bytes|requests)"
+                        unit: "(B|request)"
+                target:
+                    map_to:
+                        name: "\\1.\\2.\\3.rate"
+                        unit: "\\1/s"
+                    type: "gauge"
+      publishers:
+          {{ '\n'.join(publisher_lst) }}
+    - name: network_sink
+      transformers:
+          - name: "rate_of_change"
+            parameters:
+                source:
+                   map_from:
+                       name: "network\\.(incoming|outgoing)\\.(bytes|packets)"
+                       unit: "(B|packet)"
+                target:
+                    map_to:
+                        name: "network.\\1.\\2.rate"
+                        unit: "\\1/s"
+                    type: "gauge"
+      publishers:
+          {{ '\n'.join(publisher_lst) }}
diff --git a/ceilometer/files/queens/polling.yaml b/ceilometer/files/queens/polling.yaml
new file mode 100644
index 0000000..48a51c4
--- /dev/null
+++ b/ceilometer/files/queens/polling.yaml
@@ -0,0 +1,11 @@
+{%- from "ceilometer/map.jinja" import agent with context %}
+---
+sources:
+{%- for source_name, source in agent.polling.sources.items() %}
+    - name: {{ source_name }}
+      interval: {{ source.get('interval', 300) }}
+      meters:
+        {%- for meter in source.meters %}
+        - "{{ meter }}"
+        {%- endfor %}
+{%- endfor %}
diff --git a/ceilometer/map.jinja b/ceilometer/map.jinja
index 40f1162..d522bd0 100644
--- a/ceilometer/map.jinja
+++ b/ceilometer/map.jinja
@@ -10,6 +10,9 @@
     'Debian': {
         'pkgs': ['ceilometer-agent-compute'],
         'services': ['ceilometer-agent-compute'],
+        'notification': {
+          'topics': 'notifications'
+        },
         'logging': {
           'log_appender': false,
           'log_handlers': {
@@ -22,6 +25,9 @@
     'RedHat': {
         'pkgs': ['openstack-ceilometer-compute'],
         'services': ['openstack-ceilometer-compute'],
+        'notification': {
+          'topics': 'notifications'
+        },
         'logging': {
           'log_appender': false,
           'log_handlers': {
@@ -68,6 +74,9 @@
 {%- set server = salt['grains.filter_by']({
     'BaseDefaults': default_params,
     'default': {
+        'notification': {
+          'topics': 'notifications'
+        },
         'logging': {
           'log_appender': false,
           'log_handlers': {
diff --git a/ceilometer/server.sls b/ceilometer/server.sls
index 3001d05..8f478ee 100644
--- a/ceilometer/server.sls
+++ b/ceilometer/server.sls
@@ -52,7 +52,7 @@
         values: {{ server }}
     - watch_in:
       - service: ceilometer_server_services
-{%- if server.version not in ['liberty', 'juno', 'kilo', 'mitaka'] %}
+{%- if server.version in  ['newton', 'ocata', 'pike'] %}
       - service: ceilometer_apache_restart
 {%- endif %}
 
@@ -62,7 +62,7 @@
     - group: ceilometer
     - watch_in:
       - service: ceilometer_server_services
-{%- if server.version not in ['liberty', 'juno', 'kilo', 'mitaka'] %}
+{%- if server.version in  ['newton', 'ocata', 'pike'] %}
       - service: ceilometer_apache_restart
 {%- endif %}
 
@@ -140,7 +140,7 @@
 
 ceilometer_upgrade:
   cmd.run:
-    - name: ceilometer-upgrade --skip-metering-database
+    - name: ceilometer-upgrade
     {%- if grains.get('noservices') %}
     - onlyif: /bin/false
     {%- endif %}
@@ -191,7 +191,7 @@
 {%- endif %}
 
 # for Newton and newer
-{%- if server.version not in ['liberty', 'juno', 'kilo', 'mitaka'] %}
+{%- if server.version in ['newton', 'ocata', 'pike'] %}
 
 ceilometer_api_apache_config:
   file.managed:
diff --git a/metadata/service/server/publisher/gnocchi.yml b/metadata/service/server/publisher/gnocchi.yml
index 4850143..4f091fd 100644
--- a/metadata/service/server/publisher/gnocchi.yml
+++ b/metadata/service/server/publisher/gnocchi.yml
@@ -8,8 +8,9 @@
       publisher:
         gnocchi:
           enabled: true
-          url: gnocchi://
+          url: gnocchi://?archive_policy=${_param:ceilometer_gnocchi_archive_policy}&filter_project=${_param:ceilometer_gnocchi_filter_project}
           publish_metric: true
+          # archive_policy and filter_project are deprecated since queens
           archive_policy: ${_param:ceilometer_gnocchi_archive_policy}
           filter_project: ${_param:ceilometer_gnocchi_filter_project}
           create_resources: ${_param:ceilometer_create_gnocchi_resources}