Merge "Take into account actual compute.(ceph|lvm).ephemeral value"
diff --git a/README.rst b/README.rst
index bf4cbe2..c5606e8 100644
--- a/README.rst
+++ b/README.rst
@@ -714,6 +714,26 @@
     compute:
       max_concurrent_live_migrations: 1  # (1 is the default)
 
+Live migration with auto converge
+----------------------------------
+
+Auto converge throttles down CPU if a progress of on-going live migration is slow.
+https://docs.openstack.org/ocata/config-reference/compute/config-options.html
+
+.. code-block:: yaml
+
+  nova:
+    compute:
+      libvirt:
+        live_migration_permit_auto_converge: False  # (False is the default)
+
+.. code-block:: yaml
+
+  nova:
+    controller:
+      libvirt:
+        live_migration_permit_auto_converge: False  # (False is the default)
+
 Enhanced logging with logging.conf
 ----------------------------------
 
@@ -772,6 +792,95 @@
           <logger_name>:
             level: WARNING
 
+Configure syslog parameters for libvirtd
+----------------------------------------
+
+To configure syslog parameters for libvirtd the below pillar structure should be used with values which are supported
+by libvirtd. These values might be known from the documentation.
+
+ nova:
+   compute:
+     libvirt:
+       logging:
+         level: 3
+         filters: '3:remote 4:event'
+         outputs: '3:syslog:libvirtd'
+         buffer_size: 64
+
+#################################################################
+#
+# Logging controls
+#
+
+# Logging level: 4 errors, 3 warnings, 2 information, 1 debug
+# basically 1 will log everything possible
+#log_level = 3
+
+# Logging filters:
+# A filter allows to select a different logging level for a given category
+# of logs
+# The format for a filter is one of:
+#    x:name
+#    x:+name
+#      where name is a string which is matched against source file name,
+#      e.g., "remote", "qemu", or "util/json", the optional "+" prefix
+#      tells libvirt to log stack trace for each message matching name,
+#      and x is the minimal level where matching messages should be logged:
+#    1: DEBUG
+#    2: INFO
+#    3: WARNING
+#    4: ERROR
+#
+# Multiple filter can be defined in a single @filters, they just need to be
+# separated by spaces.
+#
+# e.g. to only get warning or errors from the remote layer and only errors
+# from the event layer:
+#log_filters="3:remote 4:event"
+
+# Logging outputs:
+# An output is one of the places to save logging information
+# The format for an output can be:
+#    x:stderr
+#      output goes to stderr
+#    x:syslog:name
+#      use syslog for the output and use the given name as the ident
+#    x:file:file_path
+#      output to a file, with the given filepath
+# In all case the x prefix is the minimal level, acting as a filter
+#    1: DEBUG
+#    2: INFO
+#    3: WARNING
+#    4: ERROR
+#
+# Multiple output can be defined, they just need to be separated by spaces.
+# e.g. to log all warnings and errors to syslog under the libvirtd ident:
+#log_outputs="3:syslog:libvirtd"
+#
+
+# Log debug buffer size: default 64
+# The daemon keeps an internal debug log buffer which will be dumped in case
+# of crash or upon receiving a SIGUSR2 signal. This setting allows to override
+# the default buffer size in kilobytes.
+# If value is 0 or less the debug log buffer is deactivated
+#log_buffer_size = 64
+
+To configure logging parameters for qemu the below pillar structure and logging parameters should be used:
+
+ nova:
+   compute:
+      qemu:
+        logging:
+          handler: logd
+      virtlog:
+        enabled: true
+        level: 4
+        filters: '3:remote 3:event'
+        outputs: '4:syslog:virtlogd'
+        max_clients: 512
+        max_size: 2097100
+        max_backups: 2
+
 Inject password to VM
 ---------------------
 
diff --git a/nova/compute.sls b/nova/compute.sls
index 0946d2e..33d30de 100644
--- a/nova/compute.sls
+++ b/nova/compute.sls
@@ -338,6 +338,26 @@
   - require:
     - pkg: nova_compute_packages
 
+{%- if compute.get('virtlog',{}).get('enabled', false) %}
+
+/etc/libvirt/virtlogd.conf:
+  file.managed:
+  - source: salt://nova/files/{{ compute.version }}/virtlogd.conf.{{ grains.os_family }}
+  - template: jinja
+  - require:
+    - pkg: nova_compute_packages
+
+/usr/sbin/virtlogd:
+  service.running:
+  - name: virtlogd
+  - enable: true
+  {%- if grains.get('noservices') %}
+  - onlyif: /bin/false
+  {%- endif %}
+  - watch:
+    - file: /etc/libvirt/virtlogd.conf
+{%- endif %}
+
 virsh net-undefine default:
   cmd.run:
   - name: "virsh net-destroy default"
diff --git a/nova/files/mitaka/libvirtd.conf.Debian b/nova/files/mitaka/libvirtd.conf.Debian
index 4fab737..607cbcb 100644
--- a/nova/files/mitaka/libvirtd.conf.Debian
+++ b/nova/files/mitaka/libvirtd.conf.Debian
@@ -295,7 +295,9 @@
 # Logging level: 4 errors, 3 warnings, 2 information, 1 debug
 # basically 1 will log everything possible
 #log_level = 3
-
+{%- if compute.libvirt.get('logging',{}).level is defined %}
+log_level = {{ compute.libvirt.logging.level }}
+{%- endif %}
 # Logging filters:
 # A filter allows to select a different logging level for a given category
 # of logs
@@ -317,7 +319,9 @@
 # e.g. to only get warning or errors from the remote layer and only errors
 # from the event layer:
 #log_filters="3:remote 4:event"
-
+{%- if compute.libvirt.get('logging',{}).filters is defined %}
+log_filters={{ compute.libvirt.logging.filters|yaml_dquote }}
+{%- endif %}
 # Logging outputs:
 # An output is one of the places to save logging information
 # The format for an output can be:
@@ -336,16 +340,18 @@
 # Multiple output can be defined, they just need to be separated by spaces.
 # e.g. to log all warnings and errors to syslog under the libvirtd ident:
 #log_outputs="3:syslog:libvirtd"
-#
-
+{%- if compute.libvirt.get('logging',{}).outputs is defined %}
+log_outputs={{ compute.libvirt.logging.outputs|yaml_dquote }}
+{%- endif %}
 # Log debug buffer size: default 64
 # The daemon keeps an internal debug log buffer which will be dumped in case
 # of crash or upon receiving a SIGUSR2 signal. This setting allows to override
 # the default buffer size in kilobytes.
 # If value is 0 or less the debug log buffer is deactivated
 #log_buffer_size = 64
-
-
+{%- if compute.libvirt.get('logging',{}).buffer_size is defined %}
+log_buffer_size = {{ compute.libvirt.logging.buffer_size }}
+{%- endif %}
 ##################################################################
 #
 # Auditing
@@ -400,4 +406,4 @@
 # If set to 1, libvirtd will refuse to talk to clients that do not
 # support keepalive protocol.  Defaults to 0.
 #
-#keepalive_required = 1
\ No newline at end of file
+#keepalive_required = 1
diff --git a/nova/files/mitaka/qemu.conf.Debian b/nova/files/mitaka/qemu.conf.Debian
index 5b039c4..a338e7d 100644
--- a/nova/files/mitaka/qemu.conf.Debian
+++ b/nova/files/mitaka/qemu.conf.Debian
@@ -484,3 +484,21 @@
     "/dev/vfio/vfio",
     {% endif %}
 ]
+
+# The backend to use for handling stdout/stderr output from
+# QEMU processes.
+#
+#  'file': QEMU writes directly to a plain file. This is the
+#          historical default, but allows QEMU to inflict a
+#          denial of service attack on the host by exhausting
+#          filesystem space
+#
+#  'logd': QEMU writes to a pipe provided by virtlogd daemon.
+#          This is the current default, providing protection
+#          against denial of service by performing log file
+#          rollover when a size limit is hit.
+#
+#stdio_handler = "logd"
+{%- if compute.get('qemu',{}).get('logging',{}).handler is defined %}
+stdio_handler = "{{ compute.qemu.logging.handler }}"
+{%- endif %}
diff --git a/nova/files/mitaka/virtlogd.conf.Debian b/nova/files/mitaka/virtlogd.conf.Debian
new file mode 100644
index 0000000..ecb5b2e
--- /dev/null
+++ b/nova/files/mitaka/virtlogd.conf.Debian
@@ -0,0 +1,78 @@
+{%- from "nova/map.jinja" import compute with context %}
+# Master virtlogd daemon configuration file
+#
+
+#################################################################
+#
+# Logging controls
+#
+
+# Logging level: 4 errors, 3 warnings, 2 information, 1 debug
+# basically 1 will log everything possible
+{%- if compute.get('virtlog',{}).level is defined %}
+log_level = {{ compute.virtlog.level }}
+{%- endif %}
+# Logging filters:
+# A filter allows to select a different logging level for a given category
+# of logs
+# The format for a filter is one of:
+#    x:name
+#    x:+name
+#      where name is a string which is matched against source file name,
+#      e.g., "remote", "qemu", or "util/json", the optional "+" prefix
+#      tells libvirt to log stack trace for each message matching name,
+#      and x is the minimal level where matching messages should be logged:
+#    1: DEBUG
+#    2: INFO
+#    3: WARNING
+#    4: ERROR
+#
+# Multiple filter can be defined in a single @filters, they just need to be
+# separated by spaces.
+#
+# e.g. to only get warning or errors from the remote layer and only errors
+# from the event layer:
+#log_filters="3:remote 4:event"
+{%- if compute.get('virtlog',{}).filters is defined %}
+log_filters={{ compute.virtlog.get('filters')|yaml_dquote }}
+{%- endif %}
+# Logging outputs:
+# An output is one of the places to save logging information
+# The format for an output can be:
+#    x:stderr
+#      output goes to stderr
+#    x:syslog:name
+#      use syslog for the output and use the given name as the ident
+#    x:file:file_path
+#      output to a file, with the given filepath
+#    x:journald
+#      ouput to the systemd journal
+# In all case the x prefix is the minimal level, acting as a filter
+#    1: DEBUG
+#    2: INFO
+#    3: WARNING
+#    4: ERROR
+#
+# Multiple output can be defined, they just need to be separated by spaces.
+# e.g. to log all warnings and errors to syslog under the virtlogd ident:
+#log_outputs="3:syslog:virtlogd"
+{%- if compute.get('virtlog',{}).outputs is defined %}
+log_outputs={{ compute.virtlog.get('outputs')|yaml_dquote }}
+{%- endif %}
+# The maximum number of concurrent client connections to allow
+# over all sockets combined.
+#max_clients = 1024
+{%- if compute.get('virtlog',{}).max_clients is defined %}
+max_clients = {{ compute.virtlog.max_clients }}
+{%- endif %}
+# Maximum file size before rolling over. Defaults to 2 MB
+#max_size = 2097152
+{%- if compute.get('virtlog',{}).max_size is defined %}
+max_size = {{ compute.virtlog.max_size }}
+{%- endif %}
+# Maximum number of backup files to keep. Defaults to 3,
+# not including the primary active file
+#max_backups = 3
+{%- if compute.get('virtlog',{}).max_backups is defined %}
+max_backups = {{ compute.virtlog.max_backups }}
+{%- endif %}
diff --git a/nova/files/mitaka/virtlogd.conf.RedHat b/nova/files/mitaka/virtlogd.conf.RedHat
new file mode 120000
index 0000000..d466306
--- /dev/null
+++ b/nova/files/mitaka/virtlogd.conf.RedHat
@@ -0,0 +1 @@
+virtlogd.conf.Debian
\ No newline at end of file
diff --git a/nova/files/newton/libvirtd.conf.Debian b/nova/files/newton/libvirtd.conf.Debian
index 4fab737..607cbcb 100644
--- a/nova/files/newton/libvirtd.conf.Debian
+++ b/nova/files/newton/libvirtd.conf.Debian
@@ -295,7 +295,9 @@
 # Logging level: 4 errors, 3 warnings, 2 information, 1 debug
 # basically 1 will log everything possible
 #log_level = 3
-
+{%- if compute.libvirt.get('logging',{}).level is defined %}
+log_level = {{ compute.libvirt.logging.level }}
+{%- endif %}
 # Logging filters:
 # A filter allows to select a different logging level for a given category
 # of logs
@@ -317,7 +319,9 @@
 # e.g. to only get warning or errors from the remote layer and only errors
 # from the event layer:
 #log_filters="3:remote 4:event"
-
+{%- if compute.libvirt.get('logging',{}).filters is defined %}
+log_filters={{ compute.libvirt.logging.filters|yaml_dquote }}
+{%- endif %}
 # Logging outputs:
 # An output is one of the places to save logging information
 # The format for an output can be:
@@ -336,16 +340,18 @@
 # Multiple output can be defined, they just need to be separated by spaces.
 # e.g. to log all warnings and errors to syslog under the libvirtd ident:
 #log_outputs="3:syslog:libvirtd"
-#
-
+{%- if compute.libvirt.get('logging',{}).outputs is defined %}
+log_outputs={{ compute.libvirt.logging.outputs|yaml_dquote }}
+{%- endif %}
 # Log debug buffer size: default 64
 # The daemon keeps an internal debug log buffer which will be dumped in case
 # of crash or upon receiving a SIGUSR2 signal. This setting allows to override
 # the default buffer size in kilobytes.
 # If value is 0 or less the debug log buffer is deactivated
 #log_buffer_size = 64
-
-
+{%- if compute.libvirt.get('logging',{}).buffer_size is defined %}
+log_buffer_size = {{ compute.libvirt.logging.buffer_size }}
+{%- endif %}
 ##################################################################
 #
 # Auditing
@@ -400,4 +406,4 @@
 # If set to 1, libvirtd will refuse to talk to clients that do not
 # support keepalive protocol.  Defaults to 0.
 #
-#keepalive_required = 1
\ No newline at end of file
+#keepalive_required = 1
diff --git a/nova/files/newton/qemu.conf.Debian b/nova/files/newton/qemu.conf.Debian
index 5b039c4..a338e7d 100644
--- a/nova/files/newton/qemu.conf.Debian
+++ b/nova/files/newton/qemu.conf.Debian
@@ -484,3 +484,21 @@
     "/dev/vfio/vfio",
     {% endif %}
 ]
+
+# The backend to use for handling stdout/stderr output from
+# QEMU processes.
+#
+#  'file': QEMU writes directly to a plain file. This is the
+#          historical default, but allows QEMU to inflict a
+#          denial of service attack on the host by exhausting
+#          filesystem space
+#
+#  'logd': QEMU writes to a pipe provided by virtlogd daemon.
+#          This is the current default, providing protection
+#          against denial of service by performing log file
+#          rollover when a size limit is hit.
+#
+#stdio_handler = "logd"
+{%- if compute.get('qemu',{}).get('logging',{}).handler is defined %}
+stdio_handler = "{{ compute.qemu.logging.handler }}"
+{%- endif %}
diff --git a/nova/files/newton/virtlogd.conf.Debian b/nova/files/newton/virtlogd.conf.Debian
new file mode 100644
index 0000000..ecb5b2e
--- /dev/null
+++ b/nova/files/newton/virtlogd.conf.Debian
@@ -0,0 +1,78 @@
+{%- from "nova/map.jinja" import compute with context %}
+# Master virtlogd daemon configuration file
+#
+
+#################################################################
+#
+# Logging controls
+#
+
+# Logging level: 4 errors, 3 warnings, 2 information, 1 debug
+# basically 1 will log everything possible
+{%- if compute.get('virtlog',{}).level is defined %}
+log_level = {{ compute.virtlog.level }}
+{%- endif %}
+# Logging filters:
+# A filter allows to select a different logging level for a given category
+# of logs
+# The format for a filter is one of:
+#    x:name
+#    x:+name
+#      where name is a string which is matched against source file name,
+#      e.g., "remote", "qemu", or "util/json", the optional "+" prefix
+#      tells libvirt to log stack trace for each message matching name,
+#      and x is the minimal level where matching messages should be logged:
+#    1: DEBUG
+#    2: INFO
+#    3: WARNING
+#    4: ERROR
+#
+# Multiple filter can be defined in a single @filters, they just need to be
+# separated by spaces.
+#
+# e.g. to only get warning or errors from the remote layer and only errors
+# from the event layer:
+#log_filters="3:remote 4:event"
+{%- if compute.get('virtlog',{}).filters is defined %}
+log_filters={{ compute.virtlog.get('filters')|yaml_dquote }}
+{%- endif %}
+# Logging outputs:
+# An output is one of the places to save logging information
+# The format for an output can be:
+#    x:stderr
+#      output goes to stderr
+#    x:syslog:name
+#      use syslog for the output and use the given name as the ident
+#    x:file:file_path
+#      output to a file, with the given filepath
+#    x:journald
+#      ouput to the systemd journal
+# In all case the x prefix is the minimal level, acting as a filter
+#    1: DEBUG
+#    2: INFO
+#    3: WARNING
+#    4: ERROR
+#
+# Multiple output can be defined, they just need to be separated by spaces.
+# e.g. to log all warnings and errors to syslog under the virtlogd ident:
+#log_outputs="3:syslog:virtlogd"
+{%- if compute.get('virtlog',{}).outputs is defined %}
+log_outputs={{ compute.virtlog.get('outputs')|yaml_dquote }}
+{%- endif %}
+# The maximum number of concurrent client connections to allow
+# over all sockets combined.
+#max_clients = 1024
+{%- if compute.get('virtlog',{}).max_clients is defined %}
+max_clients = {{ compute.virtlog.max_clients }}
+{%- endif %}
+# Maximum file size before rolling over. Defaults to 2 MB
+#max_size = 2097152
+{%- if compute.get('virtlog',{}).max_size is defined %}
+max_size = {{ compute.virtlog.max_size }}
+{%- endif %}
+# Maximum number of backup files to keep. Defaults to 3,
+# not including the primary active file
+#max_backups = 3
+{%- if compute.get('virtlog',{}).max_backups is defined %}
+max_backups = {{ compute.virtlog.max_backups }}
+{%- endif %}
diff --git a/nova/files/newton/virtlogd.conf.RedHat b/nova/files/newton/virtlogd.conf.RedHat
new file mode 120000
index 0000000..d466306
--- /dev/null
+++ b/nova/files/newton/virtlogd.conf.RedHat
@@ -0,0 +1 @@
+virtlogd.conf.Debian
\ No newline at end of file
diff --git a/nova/files/ocata/libvirt.RedHat b/nova/files/ocata/libvirt.RedHat
new file mode 120000
index 0000000..f8f6638
--- /dev/null
+++ b/nova/files/ocata/libvirt.RedHat
@@ -0,0 +1 @@
+libvirt.Debian
\ No newline at end of file
diff --git a/nova/files/ocata/libvirtd.conf.Debian b/nova/files/ocata/libvirtd.conf.Debian
index 4fab737..1278aea 100644
--- a/nova/files/ocata/libvirtd.conf.Debian
+++ b/nova/files/ocata/libvirtd.conf.Debian
@@ -295,7 +295,9 @@
 # Logging level: 4 errors, 3 warnings, 2 information, 1 debug
 # basically 1 will log everything possible
 #log_level = 3
-
+{%- if compute.libvirt.get('logging',{}).level is defined %}
+log_level = {{ compute.libvirt.logging.level }}
+{%- endif %}
 # Logging filters:
 # A filter allows to select a different logging level for a given category
 # of logs
@@ -317,7 +319,9 @@
 # e.g. to only get warning or errors from the remote layer and only errors
 # from the event layer:
 #log_filters="3:remote 4:event"
-
+{%- if compute.libvirt.get('logging',{}).filters is defined %}
+log_filters={{ compute.libvirt.logging.filters|yaml_dquote }}
+{%- endif %}
 # Logging outputs:
 # An output is one of the places to save logging information
 # The format for an output can be:
@@ -336,15 +340,18 @@
 # Multiple output can be defined, they just need to be separated by spaces.
 # e.g. to log all warnings and errors to syslog under the libvirtd ident:
 #log_outputs="3:syslog:libvirtd"
-#
-
+{%- if compute.libvirt.get('logging',{}).outputs is defined %}
+log_outputs={{ compute.libvirt.logging.outputs|yaml_dquote }}
+{%- endif %}
 # Log debug buffer size: default 64
 # The daemon keeps an internal debug log buffer which will be dumped in case
 # of crash or upon receiving a SIGUSR2 signal. This setting allows to override
 # the default buffer size in kilobytes.
 # If value is 0 or less the debug log buffer is deactivated
 #log_buffer_size = 64
-
+{%- if compute.libvirt.get('logging',{}).buffer_size is defined %}
+log_buffer_size = {{ compute.libvirt.logging.buffer_size }}
+{%- endif %}
 
 ##################################################################
 #
@@ -400,4 +407,4 @@
 # If set to 1, libvirtd will refuse to talk to clients that do not
 # support keepalive protocol.  Defaults to 0.
 #
-#keepalive_required = 1
\ No newline at end of file
+#keepalive_required = 1
diff --git a/nova/files/ocata/libvirtd.conf.RedHat b/nova/files/ocata/libvirtd.conf.RedHat
new file mode 120000
index 0000000..2a7b101
--- /dev/null
+++ b/nova/files/ocata/libvirtd.conf.RedHat
@@ -0,0 +1 @@
+libvirtd.conf.Debian
\ No newline at end of file
diff --git a/nova/files/ocata/nova-compute.conf.Debian b/nova/files/ocata/nova-compute.conf.Debian
index 22ee1bc..aafb8d3 100644
--- a/nova/files/ocata/nova-compute.conf.Debian
+++ b/nova/files/ocata/nova-compute.conf.Debian
@@ -6384,6 +6384,9 @@
 #     * live_migration_permit_post_copy
 #  (boolean value)
 #live_migration_permit_auto_converge=false
+{%- if compute.libvirt.live_migration_permit_auto_converge is defined %}
+live_migration_permit_auto_converge={{ compute.libvirt.live_migration_permit_auto_converge|lower }}
+{%- endif %}
 
 #
 # Determine the snapshot image format when sending to the image service.
diff --git a/nova/files/ocata/nova-controller.conf.Debian b/nova/files/ocata/nova-controller.conf.Debian
index e758573..42a7a0e 100644
--- a/nova/files/ocata/nova-controller.conf.Debian
+++ b/nova/files/ocata/nova-controller.conf.Debian
@@ -6345,6 +6345,9 @@
 #     * live_migration_permit_post_copy
 #  (boolean value)
 #live_migration_permit_auto_converge=false
+{%- if controller.get('libvirt', {}).live_migration_permit_auto_converge is defined %}
+live_migration_permit_auto_converge={{ controller.libvirt.live_migration_permit_auto_converge|lower }}
+{%- endif %}
 
 #
 # Determine the snapshot image format when sending to the image service.
diff --git a/nova/files/ocata/qemu.conf.Debian b/nova/files/ocata/qemu.conf.Debian
index 6212dda..cb20491 100644
--- a/nova/files/ocata/qemu.conf.Debian
+++ b/nova/files/ocata/qemu.conf.Debian
@@ -497,3 +497,21 @@
     "/dev/vfio/vfio",
     {% endif %}
 ]
+
+# The backend to use for handling stdout/stderr output from
+# QEMU processes.
+#
+#  'file': QEMU writes directly to a plain file. This is the
+#          historical default, but allows QEMU to inflict a
+#          denial of service attack on the host by exhausting
+#          filesystem space
+#
+#  'logd': QEMU writes to a pipe provided by virtlogd daemon.
+#          This is the current default, providing protection
+#          against denial of service by performing log file
+#          rollover when a size limit is hit.
+#
+#stdio_handler = "logd"
+{%- if compute.get('qemu',{}).get('logging',{}).handler is defined %}
+stdio_handler = "{{ compute.qemu.logging.handler }}"
+{%- endif %}
diff --git a/nova/files/ocata/qemu.conf.RedHat b/nova/files/ocata/qemu.conf.RedHat
new file mode 120000
index 0000000..1d23f19
--- /dev/null
+++ b/nova/files/ocata/qemu.conf.RedHat
@@ -0,0 +1 @@
+qemu.conf.Debian
\ No newline at end of file
diff --git a/nova/files/ocata/virtlogd.conf.Debian b/nova/files/ocata/virtlogd.conf.Debian
new file mode 100644
index 0000000..ecb5b2e
--- /dev/null
+++ b/nova/files/ocata/virtlogd.conf.Debian
@@ -0,0 +1,78 @@
+{%- from "nova/map.jinja" import compute with context %}
+# Master virtlogd daemon configuration file
+#
+
+#################################################################
+#
+# Logging controls
+#
+
+# Logging level: 4 errors, 3 warnings, 2 information, 1 debug
+# basically 1 will log everything possible
+{%- if compute.get('virtlog',{}).level is defined %}
+log_level = {{ compute.virtlog.level }}
+{%- endif %}
+# Logging filters:
+# A filter allows to select a different logging level for a given category
+# of logs
+# The format for a filter is one of:
+#    x:name
+#    x:+name
+#      where name is a string which is matched against source file name,
+#      e.g., "remote", "qemu", or "util/json", the optional "+" prefix
+#      tells libvirt to log stack trace for each message matching name,
+#      and x is the minimal level where matching messages should be logged:
+#    1: DEBUG
+#    2: INFO
+#    3: WARNING
+#    4: ERROR
+#
+# Multiple filter can be defined in a single @filters, they just need to be
+# separated by spaces.
+#
+# e.g. to only get warning or errors from the remote layer and only errors
+# from the event layer:
+#log_filters="3:remote 4:event"
+{%- if compute.get('virtlog',{}).filters is defined %}
+log_filters={{ compute.virtlog.get('filters')|yaml_dquote }}
+{%- endif %}
+# Logging outputs:
+# An output is one of the places to save logging information
+# The format for an output can be:
+#    x:stderr
+#      output goes to stderr
+#    x:syslog:name
+#      use syslog for the output and use the given name as the ident
+#    x:file:file_path
+#      output to a file, with the given filepath
+#    x:journald
+#      ouput to the systemd journal
+# In all case the x prefix is the minimal level, acting as a filter
+#    1: DEBUG
+#    2: INFO
+#    3: WARNING
+#    4: ERROR
+#
+# Multiple output can be defined, they just need to be separated by spaces.
+# e.g. to log all warnings and errors to syslog under the virtlogd ident:
+#log_outputs="3:syslog:virtlogd"
+{%- if compute.get('virtlog',{}).outputs is defined %}
+log_outputs={{ compute.virtlog.get('outputs')|yaml_dquote }}
+{%- endif %}
+# The maximum number of concurrent client connections to allow
+# over all sockets combined.
+#max_clients = 1024
+{%- if compute.get('virtlog',{}).max_clients is defined %}
+max_clients = {{ compute.virtlog.max_clients }}
+{%- endif %}
+# Maximum file size before rolling over. Defaults to 2 MB
+#max_size = 2097152
+{%- if compute.get('virtlog',{}).max_size is defined %}
+max_size = {{ compute.virtlog.max_size }}
+{%- endif %}
+# Maximum number of backup files to keep. Defaults to 3,
+# not including the primary active file
+#max_backups = 3
+{%- if compute.get('virtlog',{}).max_backups is defined %}
+max_backups = {{ compute.virtlog.max_backups }}
+{%- endif %}
diff --git a/nova/files/ocata/virtlogd.conf.RedHat b/nova/files/ocata/virtlogd.conf.RedHat
new file mode 120000
index 0000000..d466306
--- /dev/null
+++ b/nova/files/ocata/virtlogd.conf.RedHat
@@ -0,0 +1 @@
+virtlogd.conf.Debian
\ No newline at end of file
diff --git a/nova/files/pike/libvirt.RedHat b/nova/files/pike/libvirt.RedHat
new file mode 120000
index 0000000..f8f6638
--- /dev/null
+++ b/nova/files/pike/libvirt.RedHat
@@ -0,0 +1 @@
+libvirt.Debian
\ No newline at end of file
diff --git a/nova/files/pike/libvirtd.conf.Debian b/nova/files/pike/libvirtd.conf.Debian
index 4fab737..6f4afb9 100644
--- a/nova/files/pike/libvirtd.conf.Debian
+++ b/nova/files/pike/libvirtd.conf.Debian
@@ -295,7 +295,9 @@
 # Logging level: 4 errors, 3 warnings, 2 information, 1 debug
 # basically 1 will log everything possible
 #log_level = 3
-
+{%- if compute.libvirt.get('logging',{}).level is defined %}
+log_level = {{ compute.libvirt.logging.level }}
+{%- endif %}
 # Logging filters:
 # A filter allows to select a different logging level for a given category
 # of logs
@@ -317,6 +319,9 @@
 # e.g. to only get warning or errors from the remote layer and only errors
 # from the event layer:
 #log_filters="3:remote 4:event"
+{%- if compute.libvirt.get('logging',{}).filters is defined %}
+log_filters={{ compute.libvirt.logging.filters|yaml_dquote }}
+{%- endif %}
 
 # Logging outputs:
 # An output is one of the places to save logging information
@@ -337,6 +342,9 @@
 # e.g. to log all warnings and errors to syslog under the libvirtd ident:
 #log_outputs="3:syslog:libvirtd"
 #
+{%- if compute.libvirt.get('logging',{}).outputs is defined %}
+log_outputs={{ compute.libvirt.logging.outputs|yaml_dquote }}
+{%- endif %}
 
 # Log debug buffer size: default 64
 # The daemon keeps an internal debug log buffer which will be dumped in case
@@ -344,7 +352,9 @@
 # the default buffer size in kilobytes.
 # If value is 0 or less the debug log buffer is deactivated
 #log_buffer_size = 64
-
+{%- if compute.libvirt.get('logging',{}).buffer_size is defined %}
+log_buffer_size = {{ compute.libvirt.logging.buffer_size }}
+{%- endif %}
 
 ##################################################################
 #
@@ -400,4 +410,4 @@
 # If set to 1, libvirtd will refuse to talk to clients that do not
 # support keepalive protocol.  Defaults to 0.
 #
-#keepalive_required = 1
\ No newline at end of file
+#keepalive_required = 1
diff --git a/nova/files/pike/libvirtd.conf.RedHat b/nova/files/pike/libvirtd.conf.RedHat
new file mode 120000
index 0000000..2a7b101
--- /dev/null
+++ b/nova/files/pike/libvirtd.conf.RedHat
@@ -0,0 +1 @@
+libvirtd.conf.Debian
\ No newline at end of file
diff --git a/nova/files/pike/nova-compute.conf.Debian b/nova/files/pike/nova-compute.conf.Debian
index 39a2133..e604d72 100644
--- a/nova/files/pike/nova-compute.conf.Debian
+++ b/nova/files/pike/nova-compute.conf.Debian
@@ -6415,6 +6415,9 @@
 #     * live_migration_permit_post_copy
 #  (boolean value)
 #live_migration_permit_auto_converge=false
+{%- if compute.libvirt.live_migration_permit_auto_converge is defined %}
+live_migration_permit_auto_converge={{ compute.libvirt.live_migration_permit_auto_converge|lower }}
+{%- endif %}
 
 #
 # Determine the snapshot image format when sending to the image service.
diff --git a/nova/files/pike/nova-controller.conf.Debian b/nova/files/pike/nova-controller.conf.Debian
index b1b18d7..96e1d0f 100644
--- a/nova/files/pike/nova-controller.conf.Debian
+++ b/nova/files/pike/nova-controller.conf.Debian
@@ -6349,6 +6349,9 @@
 #     * live_migration_permit_post_copy
 #  (boolean value)
 #live_migration_permit_auto_converge=false
+{%- if controller.get('libvirt', {}).live_migration_permit_auto_converge is defined %}
+live_migration_permit_auto_converge={{ controller.libvirt.live_migration_permit_auto_converge|lower }}
+{%- endif %}
 
 #
 # Determine the snapshot image format when sending to the image service.
diff --git a/nova/files/pike/qemu.conf.Debian b/nova/files/pike/qemu.conf.Debian
index 6212dda..cb20491 100644
--- a/nova/files/pike/qemu.conf.Debian
+++ b/nova/files/pike/qemu.conf.Debian
@@ -497,3 +497,21 @@
     "/dev/vfio/vfio",
     {% endif %}
 ]
+
+# The backend to use for handling stdout/stderr output from
+# QEMU processes.
+#
+#  'file': QEMU writes directly to a plain file. This is the
+#          historical default, but allows QEMU to inflict a
+#          denial of service attack on the host by exhausting
+#          filesystem space
+#
+#  'logd': QEMU writes to a pipe provided by virtlogd daemon.
+#          This is the current default, providing protection
+#          against denial of service by performing log file
+#          rollover when a size limit is hit.
+#
+#stdio_handler = "logd"
+{%- if compute.get('qemu',{}).get('logging',{}).handler is defined %}
+stdio_handler = "{{ compute.qemu.logging.handler }}"
+{%- endif %}
diff --git a/nova/files/pike/qemu.conf.RedHat b/nova/files/pike/qemu.conf.RedHat
new file mode 120000
index 0000000..1d23f19
--- /dev/null
+++ b/nova/files/pike/qemu.conf.RedHat
@@ -0,0 +1 @@
+qemu.conf.Debian
\ No newline at end of file
diff --git a/nova/files/pike/virtlogd.conf.Debian b/nova/files/pike/virtlogd.conf.Debian
new file mode 100644
index 0000000..ecb5b2e
--- /dev/null
+++ b/nova/files/pike/virtlogd.conf.Debian
@@ -0,0 +1,78 @@
+{%- from "nova/map.jinja" import compute with context %}
+# Master virtlogd daemon configuration file
+#
+
+#################################################################
+#
+# Logging controls
+#
+
+# Logging level: 4 errors, 3 warnings, 2 information, 1 debug
+# basically 1 will log everything possible
+{%- if compute.get('virtlog',{}).level is defined %}
+log_level = {{ compute.virtlog.level }}
+{%- endif %}
+# Logging filters:
+# A filter allows to select a different logging level for a given category
+# of logs
+# The format for a filter is one of:
+#    x:name
+#    x:+name
+#      where name is a string which is matched against source file name,
+#      e.g., "remote", "qemu", or "util/json", the optional "+" prefix
+#      tells libvirt to log stack trace for each message matching name,
+#      and x is the minimal level where matching messages should be logged:
+#    1: DEBUG
+#    2: INFO
+#    3: WARNING
+#    4: ERROR
+#
+# Multiple filter can be defined in a single @filters, they just need to be
+# separated by spaces.
+#
+# e.g. to only get warning or errors from the remote layer and only errors
+# from the event layer:
+#log_filters="3:remote 4:event"
+{%- if compute.get('virtlog',{}).filters is defined %}
+log_filters={{ compute.virtlog.get('filters')|yaml_dquote }}
+{%- endif %}
+# Logging outputs:
+# An output is one of the places to save logging information
+# The format for an output can be:
+#    x:stderr
+#      output goes to stderr
+#    x:syslog:name
+#      use syslog for the output and use the given name as the ident
+#    x:file:file_path
+#      output to a file, with the given filepath
+#    x:journald
+#      ouput to the systemd journal
+# In all case the x prefix is the minimal level, acting as a filter
+#    1: DEBUG
+#    2: INFO
+#    3: WARNING
+#    4: ERROR
+#
+# Multiple output can be defined, they just need to be separated by spaces.
+# e.g. to log all warnings and errors to syslog under the virtlogd ident:
+#log_outputs="3:syslog:virtlogd"
+{%- if compute.get('virtlog',{}).outputs is defined %}
+log_outputs={{ compute.virtlog.get('outputs')|yaml_dquote }}
+{%- endif %}
+# The maximum number of concurrent client connections to allow
+# over all sockets combined.
+#max_clients = 1024
+{%- if compute.get('virtlog',{}).max_clients is defined %}
+max_clients = {{ compute.virtlog.max_clients }}
+{%- endif %}
+# Maximum file size before rolling over. Defaults to 2 MB
+#max_size = 2097152
+{%- if compute.get('virtlog',{}).max_size is defined %}
+max_size = {{ compute.virtlog.max_size }}
+{%- endif %}
+# Maximum number of backup files to keep. Defaults to 3,
+# not including the primary active file
+#max_backups = 3
+{%- if compute.get('virtlog',{}).max_backups is defined %}
+max_backups = {{ compute.virtlog.max_backups }}
+{%- endif %}
diff --git a/nova/files/pike/virtlogd.conf.RedHat b/nova/files/pike/virtlogd.conf.RedHat
new file mode 120000
index 0000000..d466306
--- /dev/null
+++ b/nova/files/pike/virtlogd.conf.RedHat
@@ -0,0 +1 @@
+virtlogd.conf.Debian
\ No newline at end of file
diff --git a/nova/map.jinja b/nova/map.jinja
index c2334fc..4d06861 100644
--- a/nova/map.jinja
+++ b/nova/map.jinja
@@ -187,5 +187,16 @@
         'services_failed_critical_threshold_percent': 0.6,
         'computes_failed_warning_threshold_percent': 0.25,
         'computes_failed_critical_threshold_percent': 0.5,
+        'allocated_vcpus_minor_threshold': 0.9,
+        'allocated_vcpus_major_threshold': 0.97,
+        'allocated_ram_minor_threshold': 0.9,
+        'allocated_ram_major_threshold': 0.97,
+        'allocated_disk_minor_threshold': 0.9,
+        'allocated_disk_major_threshold': 0.97,
+        'ram_major_threshold': 0.85,
+        'ram_critical_threshold': 0.95,
+        'disk_major_threshold': 0.85,
+        'disk_critical_threshold': 0.95,
+        'endpoint_failed_major_threshold': 0.5,
     },
 }, grain='os_family', merge=salt['pillar.get']('nova:monitoring')) %}
diff --git a/nova/meta/prometheus.yml b/nova/meta/prometheus.yml
index 9029265..f701acd 100644
--- a/nova/meta/prometheus.yml
+++ b/nova/meta/prometheus.yml
@@ -34,6 +34,7 @@
 {%- set major_threshold = monitoring.services_failed_critical_threshold_percent|float %}
 {%- set minor_compute_threshold = monitoring.computes_failed_warning_threshold_percent|float %}
 {%- set major_compute_threshold = monitoring.computes_failed_critical_threshold_percent|float %}
+{%- set major_endpoint_threshold = monitoring.endpoint_failed_major_threshold|float %}
 {% raw %}
     NovaAPIOutage:
       if: >-
@@ -66,6 +67,29 @@
         summary: "Host nova-api endpoint is not accessible"
         description: >-
           The host nova-api endpoint on the {{ $labels.host }} node is not accessible for at least 2 minutes.
+{%- endraw %}
+    NovaAPIServiceDownMajor:
+      if: >-
+        count(http_response_status{name=~"nova-api"} == 0) >= count(http_response_status{name=~"nova-api"}) * {{ major_endpoint_threshold }}
+      for: 2m
+      labels:
+        severity: major
+        service: nova
+      annotations:
+        summary: "{{major_endpoint_threshold * 100}}% of host nova-api endpoints are not accessible"
+        description: >-
+          {% raw %}{{ $value }} host nova-api endpoints are not accessible for at least 2 minutes (at least {% endraw %}{{major_endpoint_threshold * 100}}{% raw %}%).
+    NovaAPIServiceOutage:
+      if: >-
+        count(http_response_status{name=~"nova-api"} == 0) == count(http_response_status{name=~"nova-api"})
+      for: 2m
+      labels:
+        severity: critical
+        service: nova
+      annotations:
+        summary: "Host nova-api outage"
+        description: >-
+          All available host nova-api endpoints are not accessible for at least 2 minutes.
     NovaServiceDown:
       if: >-
         openstack_nova_service_state == 0
@@ -79,7 +103,7 @@
 {%- endraw %}
     NovaServicesDownMinor:
       if: >-
-        count(openstack_nova_service_state{binary!~"nova-compute"} == 0) by (binary) >= on (binary) count(openstack_nova_service_state{binary!~"nova-compute"}) by (binary) * {{minor_threshold}} and count(openstack_nova_service_state{binary!~"nova-compute"} == 0) by (binary) < on (binary) count(openstack_nova_service_state{binary!~"nova-compute"}) by (binary) * {{major_threshold}}
+        count(openstack_nova_service_state{binary!~"nova-compute"} == 0) by (binary) >= on (binary) count(openstack_nova_service_state{binary!~"nova-compute"}) by (binary) * {{minor_threshold}}
       labels:
         severity: minor
         service: nova
@@ -89,7 +113,7 @@
           {{ $value }} {{ $labels.binary }} services are down {%- endraw %}(at least {{minor_threshold * 100}}%).
     NovaComputeServicesDownMinor:
       if: >-
-        count(openstack_nova_service_state{binary="nova-compute"} == 0) >= count(openstack_nova_service_state{binary="nova-compute"}) * {{minor_compute_threshold}} and count(openstack_nova_service_state{binary="nova-compute"} == 0) < count(openstack_nova_service_state{binary="nova-compute"}) * {{major_compute_threshold}}
+        count(openstack_nova_service_state{binary="nova-compute"} == 0) >= count(openstack_nova_service_state{binary="nova-compute"}) * {{minor_compute_threshold}}
       labels:
         severity: minor
         service: nova
@@ -127,99 +151,375 @@
         summary: "{{ $labels.binary }} service outage"
         description: >-
           All {{ $labels.binary }} services are down.
-    NovaTotalFreeVCPUsLow:
+{%- endraw -%}
+
+{%- set cpu_ratio = controller.get('cpu_allocation_ratio', 16.0) %}
+{%- set ram_ratio = controller.get('ram_allocation_ratio', 1.5) %}
+{%- set disk_ratio = controller.get('disk_allocation_ratio', 1.0) -%}
+
+{%- set alloc_cpu_minor_threshold = monitoring.allocated_vcpus_minor_threshold|float %}
+{%- set alloc_cpu_major_threshold = monitoring.allocated_vcpus_major_threshold|float %}
+{%- set alloc_ram_minor_threshold = monitoring.allocated_ram_minor_threshold|float %}
+{%- set alloc_ram_major_threshold = monitoring.allocated_ram_major_threshold|float %}
+{%- set alloc_disk_minor_threshold = monitoring.allocated_disk_minor_threshold|float %}
+{%- set alloc_disk_major_threshold = monitoring.allocated_disk_major_threshold|float -%}
+
+{%- set ram_major_threshold = monitoring.ram_major_threshold|float %}
+{%- set ram_critical_threshold = monitoring.ram_critical_threshold|float %}
+{%- set disk_major_threshold = monitoring.disk_major_threshold|float %}
+{%- set disk_critical_threshold = monitoring.disk_critical_threshold|float -%}
+
+    NovaHypervisorAllocatedVCPUsFullMinor:
       if: >-
-        (100.0 * openstack_nova_total_free_vcpus) / (openstack_nova_total_free_vcpus + openstack_nova_total_used_vcpus) < 10.0
-      for: 1m
+        openstack_nova_used_vcpus >= openstack_nova_vcpus * {{ cpu_ratio }} * {{ alloc_cpu_minor_threshold }}
       labels:
-        severity: warning
+        severity: minor
         service: nova
       annotations:
-        summary: "VCPU low limit for new instances"
-        description: >-
-            VPCU low limit for 1 minutes
-    NovaTotalFreeMemoryLow:
+        summary: "{{ alloc_cpu_minor_threshold * 100 }}% of hypervisor VCPUs were allocated"
+        description: "{% raw %}{{ $value }} VCPUs on the {{ $labels.hostname }}{% endraw %} node were allocated (at least {{ alloc_cpu_minor_threshold * 100 }}%)."
+    NovaHypervisorAllocatedVCPUsFullMajor:
       if: >-
-        (100.0 * openstack_nova_total_free_ram) / (openstack_nova_total_free_ram + openstack_nova_total_used_ram) < 10.0
-      for: 1m
+        openstack_nova_used_vcpus >= openstack_nova_vcpus * {{ cpu_ratio }} * {{ alloc_cpu_major_threshold }}
       labels:
-        severity: warning
+        severity: major
         service: nova
       annotations:
-        summary: "Memory low limit for new instances"
-        description: >-
-            Memory low limit for 1 minutes
-    NovaTotalFreeVCPUsShortage:
+        summary: "{{ alloc_cpu_major_threshold * 100 }}% of hypervisor VCPUs were allocated"
+        description: "{% raw %}{{ $value }} VCPUs on the {{ $labels.hostname }}{% endraw %} node were allocated (at least {{ alloc_cpu_major_threshold * 100 }}%)."
+    NovaHypervisorAllocatedVCPUsFullCritical:
       if: >-
-        (100.0 * openstack_nova_total_free_vcpus) / (openstack_nova_total_free_vcpus + openstack_nova_total_used_vcpus) < 2.0
-      for: 1m
+        openstack_nova_used_vcpus >= openstack_nova_vcpus * {{ cpu_ratio }}
       labels:
         severity: critical
         service: nova
       annotations:
-        summary: "VCPU shortage for new instances"
-        description: >-
-            VPCU shortage for 1 minutes
-    NovaTotalFreeMemoryShortage:
+        summary: "No VCPUs available for allocation"
+        description: "All available VCPUs on the {% raw %}{{ $labels.hostname }}{% endraw %} node were allocated."
+    NovaHypervisorAllocatedMemoryFullMinor:
       if: >-
-        (100.0 * openstack_nova_total_free_ram) / (openstack_nova_total_free_ram + openstack_nova_total_used_ram) < 2.0
-      for: 1m
+        openstack_nova_ram - openstack_nova_free_ram >= openstack_nova_ram * {{ ram_ratio }} * {{ alloc_ram_minor_threshold }}
+      labels:
+        severity: minor
+        service: nova
+      annotations:
+        summary: "{{ alloc_ram_minor_threshold * 100 }}% of hypervisor RAM was allocated"
+        description: "{% raw %}{{ $value }}MB of RAM on the {{ $labels.hostname }}{% endraw %} node was allocated (at least {{ alloc_ram_minor_threshold * 100 }}%)."
+    NovaHypervisorAllocatedMemoryFullMajor:
+      if: >-
+        openstack_nova_ram - openstack_nova_free_ram >= openstack_nova_ram * {{ ram_ratio }} * {{ alloc_ram_major_threshold }}
+      labels:
+        severity: major
+        service: nova
+      annotations:
+        summary: "{{ alloc_ram_major_threshold * 100 }}% of hypervisor RAM was allocated"
+        description: "{% raw %}{{ $value }}MB of RAM on the {{ $labels.hostname }}{% endraw %} node was allocated (at least {{ alloc_ram_major_threshold * 100 }}%)."
+    NovaHypervisorAllocatedMemoryFullCritical:
+      if: >-
+        openstack_nova_ram - openstack_nova_free_ram >= openstack_nova_ram * {{ ram_ratio }}
       labels:
         severity: critical
         service: nova
       annotations:
-        summary: "Memory shortage for new instances"
-        description: >-
-            Memory shortage for 1 minutes
-    NovaAggregatesFreeVCPUsLow:
+        summary: "No RAM available for allocation"
+        description: "All available RAM on the {% raw %}{{ $labels.hostname }}{% endraw %} node was allocated."
+    NovaHypervisorMemoryFullMajor:
       if: >-
-        (100.0 * openstack_nova_aggregate_free_vcpus) / (openstack_nova_aggregate_free_vcpus + openstack_nova_aggregate_used_vcpus) < 10.0
-      for: 1m
+        openstack_nova_used_ram >= openstack_nova_ram * {{ ram_major_threshold }}
       labels:
-        severity: warning
+        severity: major
         service: nova
-        aggregate: "{{ $labels.aggregate }}"
       annotations:
-        summary: "VCPU low limit for new instances on aggregate {{ $labels.aggregate }}"
-        description: >-
-            VPCU low limit for 1 minutes on aggregate {{ $labels.aggregate }}
-    NovaAggregatesFreeMemoryLow:
+        summary: "{{ ram_major_threshold * 100 }}% of hypervisor RAM was used"
+        description: "{% raw %}{{ $value }}MB of RAM on the {{ $labels.hostname }}{% endraw %} node was used (at least {{ ram_major_threshold * 100 }}%)."
+    NovaHypervisorMemoryFullCritical:
       if: >-
-        (100.0 * openstack_nova_aggregate_free_ram) / (openstack_nova_aggregate_free_ram + openstack_nova_aggregate_used_ram) < 10.0
-      for: 1m
-      labels:
-        severity: warning
-        service: nova
-        aggregate: "{{ $labels.aggregate }}"
-      annotations:
-        summary: "Memory low limit for new instances on aggregate {{ $labels.aggregate }}"
-        description: >-
-            Memory low limit for 1 minutes on aggregate {{ $labels.aggregate }}
-    NovaAggregatesFreeVCPUsShortage:
-      if: >-
-        (100.0 * openstack_nova_aggregate_free_vcpus) / (openstack_nova_aggregate_free_vcpus + openstack_nova_aggregate_used_vcpus) < 2.0
-      for: 1m
+        openstack_nova_used_ram >= openstack_nova_ram * {{ ram_critical_threshold }}
       labels:
         severity: critical
         service: nova
-        aggregate: "{{ $labels.aggregate }}"
       annotations:
-        summary: "VCPU shortage for new instances on aggregate {{ $labels.aggregate }}"
-        description: >-
-            VPCU shortage for 1 minutes on aggregate {{ $labels.aggregate }}
-    NovaAggregatesFreeMemoryShortage:
+        summary: "{{ ram_critical_threshold * 100 }}% of hypervisor RAM was used"
+        description: "{% raw %}{{ $value }}MB of RAM on the {{ $labels.hostname }}{% endraw %} node was used (at least {{ ram_critical_threshold * 100 }}%)."
+    NovaHypervisorAllocatedDiskFullMinor:
       if: >-
-        (100.0 * openstack_nova_aggregate_free_ram) / (openstack_nova_aggregate_free_ram + openstack_nova_aggregate_used_ram) < 2.0
-      for: 1m
+        openstack_nova_disk - openstack_nova_free_disk >= openstack_nova_disk * {{ disk_ratio }} * {{ alloc_disk_minor_threshold }}
+      labels:
+        severity: minor
+        service: nova
+      annotations:
+        summary: "{{ alloc_disk_minor_threshold * 100 }}% of hypervisor disk space was allocated"
+        description: "{% raw %}{{ $value }}GB of disk space on the {{ $labels.hostname }}{% endraw %} node was allocated (at least {{ alloc_disk_minor_threshold * 100 }}%)."
+    NovaHypervisorAllocatedDiskFullMajor:
+      if: >-
+        openstack_nova_disk - openstack_nova_free_disk >= openstack_nova_disk * {{ disk_ratio }} * {{ alloc_disk_major_threshold }}
+      labels:
+        severity: major
+        service: nova
+      annotations:
+        summary: "{{ alloc_disk_major_threshold * 100 }}% of hypervisor disk space was allocated"
+        description: "{% raw %}{{ $value }}GB of disk space on the {{ $labels.hostname }}{% endraw %} node was allocated (at least {{ alloc_disk_major_threshold * 100 }}%)."
+    NovaHypervisorAllocatedDiskFullCritical:
+      if: >-
+        openstack_nova_disk - openstack_nova_free_disk >= openstack_nova_disk * {{ disk_ratio }}
       labels:
         severity: critical
         service: nova
-        aggregate: "{{ $labels.aggregate }}"
       annotations:
-        summary: "Memory shortage for new instances on aggregate {{ $labels.aggregate }}"
-        description: >-
-            Memory shortage for 1 minutes on aggregate {{ $labels.aggregate }}
-{%- endraw %}
+        summary: "No disk space available for allocation"
+        description: "All available disk space on the {% raw %}{{ $labels.hostname }}{% endraw %} node was allocated."
+    NovaHypervisorDiskFullMajor:
+      if: >-
+        openstack_nova_used_disk >= openstack_nova_disk * {{ disk_major_threshold }}
+      labels:
+        severity: major
+        service: nova
+      annotations:
+        summary: "{{ disk_major_threshold * 100 }}% of hypervisor disk space was used"
+        description: "{% raw %}{{ $value }}GB of disk space on the {{ $labels.hostname }}{% endraw %} node was used (at least {{ disk_major_threshold * 100 }}%)."
+    NovaHypervisorDiskFullCritical:
+      if: >-
+        openstack_nova_used_disk >= openstack_nova_disk * {{ disk_critical_threshold }}
+      labels:
+        severity: critical
+        service: nova
+      annotations:
+        summary: "{{ disk_critical_threshold * 100 }}% of hypervisor disk space was used"
+        description: "{% raw %}{{ $value }}GB of disk space on the {{ $labels.hostname }}{% endraw %} node was used (at least {{ disk_critical_threshold * 100 }}%)."
+    NovaAggregateAllocatedVCPUsFullMinor:
+      if: >-
+        openstack_nova_aggregate_used_vcpus >= openstack_nova_aggregate_vcpus * {{ cpu_ratio }} * {{ alloc_cpu_minor_threshold }}
+      labels:
+        severity: minor
+        service: nova
+      annotations:
+        summary: "{{ alloc_cpu_minor_threshold * 100 }}% of aggregate VCPUs were allocated"
+        description: "{% raw %}{{ $value }} VCPUs on the {{ $labels.aggregate }}{% endraw %} aggregate were allocated (at least {{ alloc_cpu_minor_threshold * 100 }}%)."
+    NovaAggregateAllocatedVCPUsFullMajor:
+      if: >-
+        openstack_nova_aggregate_used_vcpus >= openstack_nova_aggregate_vcpus * {{ cpu_ratio }} * {{ alloc_cpu_major_threshold }}
+      labels:
+        severity: major
+        service: nova
+      annotations:
+        summary: "{{ alloc_cpu_major_threshold * 100 }}% of aggregate VCPUs were allocated"
+        description: "{% raw %}{{ $value }} VCPUs on the {{ $labels.aggregate }}{% endraw %} aggregate were allocated (at least {{ alloc_cpu_major_threshold * 100 }}%)."
+    NovaAggregateAllocatedVCPUsFullCritical:
+      if: >-
+        openstack_nova_aggregate_used_vcpus >= openstack_nova_aggregate_vcpus * {{ cpu_ratio }}
+      labels:
+        severity: critical
+        service: nova
+      annotations:
+        summary: "No VCPUs available for allocation"
+        description: "All available VCPUs on the {% raw %}{{ $labels.aggregate }}{% endraw %} aggregate were allocated."
+    NovaAggregateAllocatedMemoryFullMinor:
+      if: >-
+        openstack_nova_aggregate_ram - openstack_nova_aggregate_free_ram >= openstack_nova_aggregate_ram * {{ ram_ratio }} * {{ alloc_ram_minor_threshold }}
+      labels:
+        severity: minor
+        service: nova
+      annotations:
+        summary: "{{ alloc_ram_minor_threshold * 100 }}% of aggregate RAM was allocated"
+        description: "{% raw %}{{ $value }}MB of RAM on the {{ $labels.aggregate }}{% endraw %} aggregate was allocated (at least {{ alloc_ram_minor_threshold * 100 }}%)."
+    NovaAggregateAllocatedMemoryFullMajor:
+      if: >-
+        openstack_nova_aggregate_ram - openstack_nova_aggregate_free_ram >= openstack_nova_aggregate_ram * {{ ram_ratio }} * {{ alloc_ram_major_threshold }}
+      labels:
+        severity: major
+        service: nova
+      annotations:
+        summary: "{{ alloc_ram_major_threshold * 100 }}% of aggregate RAM was allocated"
+        description: "{% raw %}{{ $value }}MB of RAM on the {{ $labels.aggregate }}{% endraw %} aggregate was allocated (at least {{ alloc_ram_major_threshold * 100 }}%)."
+    NovaAggregateAllocatedMemoryFullCritical:
+      if: >-
+        openstack_nova_aggregate_ram - openstack_nova_aggregate_free_ram >= openstack_nova_aggregate_ram * {{ ram_ratio }}
+      labels:
+        severity: critical
+        service: nova
+      annotations:
+        summary: "No RAM available for allocation"
+        description: "All available RAM on the {% raw %}{{ $labels.aggregate }}{% endraw %} aggregate was allocated."
+    NovaAggregateMemoryFullMajor:
+      if: >-
+        openstack_nova_aggregate_used_ram >= openstack_nova_aggregate_ram * {{ ram_major_threshold }}
+      labels:
+        severity: major
+        service: nova
+      annotations:
+        summary: "{{ ram_major_threshold * 100 }}% of aggregate RAM was used"
+        description: "{% raw %}{{ $value }}MB of RAM on the {{ $labels.aggregate }}{% endraw %} aggregate was used (at least {{ ram_major_threshold * 100 }}%)."
+    NovaAggregateMemoryFullCritical:
+      if: >-
+        openstack_nova_aggregate_used_ram >= openstack_nova_aggregate_ram * {{ ram_critical_threshold }}
+      labels:
+        severity: critical
+        service: nova
+      annotations:
+        summary: "{{ ram_critical_threshold * 100 }}% of aggregate RAM was used"
+        description: "{% raw %}{{ $value }}MB of RAM on the {{ $labels.aggregate }}{% endraw %} aggregate was used (at least {{ ram_critical_threshold * 100 }}%)."
+    NovaAggregateAllocatedDiskFullMinor:
+      if: >-
+        openstack_nova_aggregate_disk - openstack_nova_aggregate_free_disk >= openstack_nova_aggregate_disk * {{ disk_ratio }} * {{ alloc_disk_minor_threshold }}
+      labels:
+        severity: minor
+        service: nova
+      annotations:
+        summary: "{{ alloc_disk_minor_threshold * 100 }}% of aggregate disk space was allocated"
+        description: "{% raw %}{{ $value }}GB of disk space on the {{ $labels.aggregate }}{% endraw %} aggregate was allocated (at least {{ alloc_disk_minor_threshold * 100 }}%)."
+    NovaAggregateAllocatedDiskFullMajor:
+      if: >-
+        openstack_nova_aggregate_disk - openstack_nova_aggregate_free_disk >= openstack_nova_aggregate_disk * {{ disk_ratio }} * {{ alloc_disk_major_threshold }}
+      labels:
+        severity: major
+        service: nova
+      annotations:
+        summary: "{{ alloc_disk_major_threshold * 100 }}% of aggregate disk space was allocated"
+        description: "{% raw %}{{ $value }}GB of disk space on the {{ $labels.aggregate }}{% endraw %} aggregate was allocated (at least {{ alloc_disk_major_threshold * 100 }}%)."
+    NovaAggregateAllocatedDiskFullCritical:
+      if: >-
+        openstack_nova_aggregate_disk - openstack_nova_aggregate_free_disk >= openstack_nova_aggregate_disk * {{ disk_ratio }}
+      labels:
+        severity: critical
+        service: nova
+      annotations:
+        summary: "No disk space available for allocation"
+        description: "All available disk space on the {% raw %}{{ $labels.aggregate }}{% endraw %} aggregate was allocated."
+    NovaAggregateDiskFullMajor:
+      if: >-
+        openstack_nova_aggregate_used_disk >= openstack_nova_aggregate_disk * {{ disk_major_threshold }}
+      labels:
+        severity: major
+        service: nova
+      annotations:
+        summary: "{{ disk_major_threshold * 100 }}% of aggregate disk space was used"
+        description: "{% raw %}{{ $value }}GB of disk space on the {{ $labels.aggregate }}{% endraw %} aggregate was used (at least {{ disk_major_threshold * 100 }}%)."
+    NovaAggregateDiskFullCritical:
+      if: >-
+        openstack_nova_aggregate_used_disk >= openstack_nova_aggregate_disk * {{ disk_critical_threshold }}
+      labels:
+        severity: critical
+        service: nova
+      annotations:
+        summary: "{{ disk_critical_threshold * 100 }}% of aggregate disk space was used"
+        description: "{% raw %}{{ $value }}GB of disk space on the {{ $labels.aggregate }}{% endraw %} aggregate was used (at least {{ disk_critical_threshold * 100 }}%)."
+    NovaTotalAllocatedVCPUsFullMinor:
+      if: >-
+        openstack_nova_total_used_vcpus >= openstack_nova_total_vcpus * {{ cpu_ratio }} * {{ alloc_cpu_minor_threshold }}
+      labels:
+        severity: minor
+        service: nova
+      annotations:
+        summary: "{{ alloc_cpu_minor_threshold * 100 }}% of cloud VCPUs were allocated"
+        description: "{% raw %}{{ $value }}{% endraw %} VCPUs in the cloud were allocated (at least {{ alloc_cpu_minor_threshold * 100 }}%)."
+    NovaTotalAllocatedVCPUsFullMajor:
+      if: >-
+        openstack_nova_total_used_vcpus >= openstack_nova_total_vcpus * {{ cpu_ratio }} * {{ alloc_cpu_major_threshold }}
+      labels:
+        severity: major
+        service: nova
+      annotations:
+        summary: "{{ alloc_cpu_major_threshold * 100 }}% of cloud VCPUs were allocated"
+        description: "{% raw %}{{ $value }}{% endraw %} VCPUs in the cloud were allocated (at least {{ alloc_cpu_major_threshold * 100 }}%)."
+    NovaTotalAllocatedVCPUsFullCritical:
+      if: >-
+        openstack_nova_total_used_vcpus >= openstack_nova_total_vcpus * {{ cpu_ratio }}
+      labels:
+        severity: critical
+        service: nova
+      annotations:
+        summary: "No VCPUs available for allocation"
+        description: "All available VCPUs in the cloud were allocated."
+    NovaTotalAllocatedMemoryFullMinor:
+      if: >-
+        openstack_nova_total_ram - openstack_nova_total_free_ram >= openstack_nova_total_ram * {{ ram_ratio }} * {{ alloc_ram_minor_threshold }}
+      labels:
+        severity: minor
+        service: nova
+      annotations:
+        summary: "{{ alloc_ram_minor_threshold * 100 }}% of cloud RAM was allocated"
+        description: "{% raw %}{{ $value }}MB{% endraw %} of RAM in the cloud was allocated (at least {{ alloc_ram_minor_threshold * 100 }}%)."
+    NovaTotalAllocatedMemoryFullMajor:
+      if: >-
+        openstack_nova_total_ram - openstack_nova_total_free_ram >= openstack_nova_total_ram * {{ ram_ratio }} * {{ alloc_ram_major_threshold }}
+      labels:
+        severity: major
+        service: nova
+      annotations:
+        summary: "{{ alloc_ram_major_threshold * 100 }}% of cloud RAM was allocated"
+        description: "{% raw %}{{ $value }}MB{% endraw %} of RAM in the cloud was allocated (at least {{ alloc_ram_major_threshold * 100 }}%)."
+    NovaTotalAllocatedMemoryFullCritical:
+      if: >-
+        openstack_nova_total_ram - openstack_nova_total_free_ram >= openstack_nova_total_ram * {{ ram_ratio }}
+      labels:
+        severity: critical
+        service: nova
+      annotations:
+        summary: "No RAM available for allocation"
+        description: "All available RAM in the cloud was allocated."
+    NovaTotalMemoryFullMajor:
+      if: >-
+        openstack_nova_total_used_ram >= openstack_nova_total_ram * {{ ram_major_threshold }}
+      labels:
+        severity: major
+        service: nova
+      annotations:
+        summary: "{{ ram_major_threshold * 100 }}% of cloud RAM was used"
+        description: "{% raw %}{{ $value }}MB{% endraw %} of RAM in the cloud was used (at least {{ ram_major_threshold * 100 }}%)."
+    NovaTotalMemoryFullCritical:
+      if: >-
+        openstack_nova_total_used_ram >= openstack_nova_total_ram * {{ ram_critical_threshold }}
+      labels:
+        severity: critical
+        service: nova
+      annotations:
+        summary: "{{ ram_critical_threshold * 100 }}% of cloud RAM was used"
+        description: "{% raw %}{{ $value }}MB{% endraw %} of RAM in the cloud was used (at least {{ ram_critical_threshold * 100 }}%)."
+    NovaTotalAllocatedDiskFullMinor:
+      if: >-
+        openstack_nova_total_disk - openstack_nova_total_free_disk >= openstack_nova_total_disk * {{ disk_ratio }} * {{ alloc_disk_minor_threshold }}
+      labels:
+        severity: minor
+        service: nova
+      annotations:
+        summary: "{{ alloc_disk_minor_threshold * 100 }}% of cloud disk space was allocated"
+        description: "{% raw %}{{ $value }}GB{% endraw %} of disk space in the cloud was allocated (at least {{ alloc_disk_minor_threshold * 100 }}%)."
+    NovaTotalAllocatedDiskFullMajor:
+      if: >-
+        openstack_nova_total_disk - openstack_nova_total_free_disk >= openstack_nova_total_disk * {{ disk_ratio }} * {{ alloc_disk_major_threshold }}
+      labels:
+        severity: major
+        service: nova
+      annotations:
+        summary: "{{ alloc_disk_major_threshold * 100 }}% of cloud disk space was allocated"
+        description: "{% raw %}{{ $value }}GB{% endraw %} of disk space in the cloud was allocated (at least {{ alloc_disk_major_threshold * 100 }}%)."
+    NovaTotalAllocatedDiskFullCritical:
+      if: >-
+        openstack_nova_total_disk - openstack_nova_total_free_disk >= openstack_nova_total_disk * {{ disk_ratio }}
+      labels:
+        severity: critical
+        service: nova
+      annotations:
+        summary: "No disk space available for allocation"
+        description: "All available disk space in the cloud was allocated."
+    NovaTotalDiskFullMajor:
+      if: >-
+        openstack_nova_total_used_disk >= openstack_nova_total_disk * {{ disk_major_threshold }}
+      labels:
+        severity: major
+        service: nova
+      annotations:
+        summary: "{{ disk_major_threshold * 100 }}% of cloud disk space was used"
+        description: "{% raw %}{{ $value }}GB{% endraw %} of disk space in the cloud was used (at least {{ disk_major_threshold * 100 }}%)."
+    NovaTotalDiskFullCritical:
+      if: >-
+        openstack_nova_total_used_disk >= openstack_nova_total_disk * {{ disk_critical_threshold }}
+      labels:
+        severity: critical
+        service: nova
+      annotations:
+        summary: "{{ disk_critical_threshold * 100 }}% of cloud disk space was used"
+        description: "{% raw %}{{ $value }}GB{% endraw %} of disk space in the cloud was used (at least {{ disk_critical_threshold * 100 }}%)."
 {%- endif %}
     NovaErrorLogsTooHigh:
       {%- set log_threshold = monitoring.error_log_rate.warn|float %}
@@ -231,7 +531,8 @@
         service: nova
       annotations:
         summary: "High number of errors in Nova logs"
-        description: "The rate of errors in Nova logs over the last 5 minutes is too high on the {{ $labels.host }} node (current value={{ $value }}, threshold={%- endraw %}{{ log_threshold }})."
+        description: "The average per-second rate of errors in Nova logs on the {{ $labels.host }} node is {{ $value }} (as measured over the last 5 minutes)."
+{%- endraw %}
 {%- if is_compute and exporters is defined %}
 {%- raw %}
     LibvirtDown: