Merge "Rework nova modules and states"
diff --git a/.kitchen.yml b/.kitchen.yml
index f343e41..d4c9f5e 100644
--- a/.kitchen.yml
+++ b/.kitchen.yml
@@ -46,7 +46,7 @@
 platforms:
   - name: <%=ENV['PLATFORM'] ||  'saltstack-ubuntu-xenial-salt-stable' %>
     driver_config:
-      image: <%=ENV['PLATFORM'] || 'epcim/salt-formulas:saltstack-ubuntu-xenial-salt-stable'%>
+      image: <%=ENV['PLATFORM'] || 'epcim/salt:saltstack-ubuntu-xenial-salt-stable'%>
       platform: ubuntu
 
 suites:
diff --git a/.travis.yml b/.travis.yml
index 50728d0..03c1a7e 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -1,3 +1,6 @@
+language: python
+python:
+- "2.7.13"
 sudo: required
 services:
   - docker
@@ -17,21 +20,32 @@
   - bundle install
 
 env:
-  - PLATFORM=epcim/salt-formulas:saltstack-ubuntu-xenial-salt-2016.3 OS_VERSION=mitaka SUITE=compute-cluster
-  - PLATFORM=epcim/salt-formulas:saltstack-ubuntu-xenial-salt-2017.7 OS_VERSION=mitaka SUITE=compute-cluster
-  - PLATFORM=epcim/salt-formulas:saltstack-ubuntu-xenial-salt-2016.3 OS_VERSION=newton SUITE=compute-cluster
-  - PLATFORM=epcim/salt-formulas:saltstack-ubuntu-xenial-salt-2017.7 OS_VERSION=newton SUITE=compute-cluster
-  - PLATFORM=epcim/salt-formulas:saltstack-ubuntu-xenial-salt-2017.7 OS_VERSION=newton SUITE=control-cluster
-  - PLATFORM=epcim/salt-formulas:saltstack-ubuntu-xenial-salt-2016.3 OS_VERSION=ocata SUITE=compute-cluster
-  - PLATFORM=epcim/salt-formulas:saltstack-ubuntu-xenial-salt-2016.3 OS_VERSION=ocata SUITE=control-cluster
-  - PLATFORM=epcim/salt-formulas:saltstack-ubuntu-xenial-salt-2017.7 OS_VERSION=ocata SUITE=compute-cluster
-  - PLATFORM=epcim/salt-formulas:saltstack-ubuntu-xenial-salt-2017.7 OS_VERSION=ocata SUITE=control-cluster
-  # - PLATFORM=epcim/salt-formulas:saltstack-ubuntu-bionic-salt-2017.7 OS_VERSION=mitaka SUITE=compute-cluster
-  # - PLATFORM=epcim/salt-formulas:saltstack-ubuntu-bionic-salt-2017.7 OS_VERSION=mitaka SUITE=control-cluster
-  # - PLATFORM=epcim/salt-formulas:saltstack-ubuntu-bionic-salt-2017.7 OS_VERSION=newton SUITE=compute-cluster
-  # - PLATFORM=epcim/salt-formulas:saltstack-ubuntu-bionic-salt-2017.7 OS_VERSION=newton SUITE=control-cluster
-  # - PLATFORM=epcim/salt-formulas:saltstack-ubuntu-bionic-salt-2017.7 OS_VERSION=ocata SUITE=compute-cluster
-  # - PLATFORM=epcim/salt-formulas:saltstack-ubuntu-bionic-salt-2017.7 OS_VERSION=ocata SUITE=control-cluster
+  - PLATFORM=epcim/salt:saltstack-ubuntu-xenial-salt-2016.3 OS_VERSION=mitaka SUITE=compute-cluster
+  - PLATFORM=epcim/salt:saltstack-ubuntu-xenial-salt-2016.3 OS_VERSION=mitaka SUITE=control-cluster
+  - PLATFORM=epcim/salt:saltstack-ubuntu-xenial-salt-2016.3 OS_VERSION=newton SUITE=compute-cluster
+  - PLATFORM=epcim/salt:saltstack-ubuntu-xenial-salt-2016.3 OS_VERSION=newton SUITE=control-cluster
+  - PLATFORM=epcim/salt:saltstack-ubuntu-xenial-salt-2016.3 OS_VERSION=ocata SUITE=compute-cluster
+  - PLATFORM=epcim/salt:saltstack-ubuntu-xenial-salt-2016.3 OS_VERSION=ocata SUITE=control-cluster
+  - PLATFORM=epcim/salt:saltstack-ubuntu-xenial-salt-2017.7 OS_VERSION=mitaka SUITE=compute-cluster
+  - PLATFORM=epcim/salt:saltstack-ubuntu-xenial-salt-2017.7 OS_VERSION=mitaka SUITE=control-cluster
+  - PLATFORM=epcim/salt:saltstack-ubuntu-xenial-salt-2017.7 OS_VERSION=newton SUITE=compute-cluster
+  - PLATFORM=epcim/salt:saltstack-ubuntu-xenial-salt-2017.7 OS_VERSION=newton SUITE=control-cluster
+  - PLATFORM=epcim/salt:saltstack-ubuntu-xenial-salt-2017.7 OS_VERSION=ocata SUITE=compute-cluster
+  - PLATFORM=epcim/salt:saltstack-ubuntu-xenial-salt-2017.7 OS_VERSION=ocata SUITE=control-cluster
+  - PLATFORM=epcim/salt:saltstack-ubuntu-xenial-salt-2018.3 OS_VERSION=ocata SUITE=compute-cluster
+  - PLATFORM=epcim/salt:saltstack-ubuntu-xenial-salt-2018.3 OS_VERSION=ocata SUITE=control-cluster
+  - PLATFORM=epcim/salt:saltstack-ubuntu-xenial-salt-2018.3 OS_VERSION=pike SUITE=compute-cluster
+  - PLATFORM=epcim/salt:saltstack-ubuntu-xenial-salt-2018.3 OS_VERSION=pike SUITE=control-cluster
+  # - PLATFORM=epcim/salt:saltstack-ubuntu-bionic-salt-2017.7 OS_VERSION=mitaka SUITE=compute-cluster
+  # - PLATFORM=epcim/salt:saltstack-ubuntu-bionic-salt-2017.7 OS_VERSION=mitaka SUITE=control-cluster
+  # - PLATFORM=epcim/salt:saltstack-ubuntu-bionic-salt-2017.7 OS_VERSION=newton SUITE=compute-cluster
+  # - PLATFORM=epcim/salt:saltstack-ubuntu-bionic-salt-2017.7 OS_VERSION=newton SUITE=control-cluster
+  # - PLATFORM=epcim/salt:saltstack-ubuntu-bionic-salt-2017.7 OS_VERSION=ocata SUITE=compute-cluster
+  # - PLATFORM=epcim/salt:saltstack-ubuntu-bionic-salt-2017.7 OS_VERSION=ocata SUITE=control-cluster
+  # - PLATFORM=epcim/salt:saltstack-ubuntu-bionic-salt-2018.3 OS_VERSION=ocata SUITE=compute-cluster
+  # - PLATFORM=epcim/salt:saltstack-ubuntu-bionic-salt-2018.3 OS_VERSION=ocata SUITE=control-cluster
+  # - PLATFORM=epcim/salt:saltstack-ubuntu-bionic-salt-2018.3 OS_VERSION=pike SUITE=compute-cluster
+  # - PLATFORM=epcim/salt:saltstack-ubuntu-bionic-salt-2018.3 OS_VERSION=pike SUITE=control-cluster
 
 before_script:
   - set -o pipefail
diff --git a/README.rst b/README.rst
index 38b1b7b..8ff3b85 100644
--- a/README.rst
+++ b/README.rst
@@ -256,7 +256,6 @@
       compute:
         version: juno
         enabled: true
-        virtualization: kvm
         cross_az_attach: false
         disk_cachemodes: network=writeback,block=none
         availability_zone: availability_zone_01
@@ -313,6 +312,22 @@
           max_processes: 4096
         host: node-12.domain.tld
 
+Compute with vmware driver. Each vmware cluster requires a separate process of nova-compute.
+Each process should have uniq host identifier. However multiple computes might be running on
+single host. It is not recommended to have multiple computes running on different hosts that
+manage the same vmware cluster. To achive this pacemaker/corosync or keepalived might be used.
+
+.. code-block:: yaml
+
+    nova:
+      compute:
+        compute_driver: vmwareapi.VMwareVCDriver
+        vmware:
+          host_username: vmware
+          host_password: vmware
+          cluster_name: vmware_cluster01
+          host_ip: 1.2.3.4
+
 Group and user to be used for QEMU processes run by the system instance
 
 .. code-block:: yaml
@@ -949,6 +964,77 @@
 You can read more about live migration over TLS here:
     https://wiki.libvirt.org/page/TLSCreateServerCerts
 
+Enable transport + authentication for VNC over TLS
+---------------------
+# Only for Queens. Communication between noVNC proxy service and QEMU
+
+By default communication between nova-novncproxy and qemu service is unsecure.
+
+compute:
+  qemu:
+    vnc:
+      tls:
+        enabled: True
+
+controller:
+  novncproxy:
+    # This section responsible for communication between noVNC proxy and client machine
+    tls:
+      enabled: True
+    # This section responsible for communication between nova-novncproxy and qemu service
+    vencrypt:
+      tls:
+        enabled: True
+
+You able to set custom certificates in pillar:
+
+nova:
+  compute:
+    qemu:
+      vnc:
+        tls:
+          cacert (certificate content)
+          cert (certificate content)
+          key (certificate content)
+
+nova:
+  controller:
+    novncproxy:
+      tls:
+        server:
+          cert (certificate content)
+          key (certificate content)
+      vencrypt:
+        tls:
+          cacert (certificate content)
+          cert (certificate content)
+          key (certificate content)
+
+
+You can read more about it here:
+    https://docs.openstack.org/nova/queens/admin/remote-console-access.html
+
+Enable communication between noVNC proxy and client machine over TLS
+---------------------
+
+By default communication between noVNC proxy and client machine is unsecure.
+
+  controller:
+    novncproxy:
+      tls:
+        enabled: True
+
+  nova:
+    controller:
+      novncproxy:
+        tls:
+          server:
+            cert (certificate content)
+            key (certificate content)
+
+You can read more about it here:
+    https://docs.openstack.org/mitaka/config-reference/dashboard/configure.html
+
 Documentation and Bugs
 ======================
 
diff --git a/metadata/service/compute/cluster.yml b/metadata/service/compute/cluster.yml
index 34fbcf8..ba85fbd 100644
--- a/metadata/service/compute/cluster.yml
+++ b/metadata/service/compute/cluster.yml
@@ -4,7 +4,6 @@
 - service.nova.support
 parameters:
   _param:
-    nova_compute_virtualization: kvm
     openstack_log_appender: false
     openstack_fluentd_handler_enabled: false
     openstack_ossyslog_handler_enabled: false
@@ -12,7 +11,6 @@
     compute:
       version: ${_param:nova_version}
       enabled: true
-      virtualization: ${_param:nova_compute_virtualization}
       heal_instance_info_cache_interval: 60
       vncproxy_url: http://${_param:cluster_vip_address}:6080
       bind:
diff --git a/metadata/service/compute/container.yml b/metadata/service/compute/container.yml
index 145ebcc..51f771d 100644
--- a/metadata/service/compute/container.yml
+++ b/metadata/service/compute/container.yml
@@ -10,7 +10,6 @@
               compute:
                 version: ${_param:nova_version}
                 enabled: true
-                virtualization: kvm
                 vncproxy_url: http://${_param:nova_service_host}:6080
                 reserved_host_memory_mb: 32768
                 libvirt:
@@ -50,4 +49,4 @@
                   user: neutron
                   password: ${_param:keystone_neutron_password}
                   host: ${_param:neutron_service_host}
-                  port: 9696
\ No newline at end of file
+                  port: 9696
diff --git a/metadata/service/compute/kvm.yml b/metadata/service/compute/kvm.yml
index 96176af..2ede8af 100644
--- a/metadata/service/compute/kvm.yml
+++ b/metadata/service/compute/kvm.yml
@@ -11,7 +11,6 @@
     compute:
       version: ${_param:nova_version}
       enabled: true
-      virtualization: kvm
       heal_instance_info_cache_interval: 60
       vncproxy_url: http://${_param:cluster_vip_address}:6080
       bind:
diff --git a/nova/compute.sls b/nova/compute.sls
index 5a13581..1b8c149 100644
--- a/nova/compute.sls
+++ b/nova/compute.sls
@@ -105,7 +105,7 @@
 {{ service_name }}_logging_conf:
   file.managed:
     - name: /etc/nova/logging/logging-{{ service_name }}.conf
-    - source: salt://nova/files/logging.conf
+    - source: salt://oslo_templates/files/logging/_logging.conf
     - template: jinja
     - user: nova
     - group: nova
@@ -117,7 +117,7 @@
     - makedirs: True
     - defaults:
         service_name: {{ service_name }}
-        values: {{ compute }}
+        _data: {{ compute.logging }}
     - watch_in:
       - service: nova_compute_services
 
@@ -206,6 +206,50 @@
 {%- endif %}
 {%- endif %}
 
+{%- if compute.qemu.vnc.tls.get('enabled', False) %}
+
+{%- set ca_file=compute.qemu.vnc.tls.get('ca_file') %}
+{%- set key_file=compute.qemu.vnc.tls.get('key_file') %}
+{%- set cert_file=compute.qemu.vnc.tls.get('cert_file') %}
+
+qemu_ca_nova_compute:
+{%- if compute.qemu.vnc.tls.cacert is defined %}
+  file.managed:
+    - name: {{ ca_file }}
+    - contents_pillar: nova:compute:qemu:vnc:tls:cacert
+    - mode: 444
+    - makedirs: true
+{%- else %}
+  file.exists:
+   - name: {{ ca_file }}
+{%- endif %}
+
+qemu_public_cert:
+{%- if compute.qemu.vnc.tls.cert is defined %}
+  file.managed:
+    - name: {{ cert_file }}
+    - contents_pillar: nova:compute:qemu:vnc:tls:cert
+    - mode: 440
+    - makedirs: true
+{%- else %}
+  file.exists:
+   - name: {{ cert_file }}
+{%- endif %}
+
+qemu_private_key:
+{%- if compute.qemu.vnc.tls.key is defined %}
+  file.managed:
+    - name: {{ key_file }}
+    - contents_pillar: nova:compute:qemu:vnc:tls:key
+    - mode: 400
+    - makedirs: true
+{%- else %}
+  file.exists:
+   - name: {{ key_file }}
+{%- endif %}
+
+{%- endif %}
+
 nova_compute_services:
   service.running:
   - enable: true
@@ -253,7 +297,7 @@
 
 {%- endfor %}
 
-{%- if compute.virtualization == 'kvm' %}
+{%- if compute.get('compute_driver', 'libvirt.LibvirtDriver') == 'libvirt.LibvirtDriver' %}
 
 {%- if not salt['user.info']('nova') %}
 # MOS9 libvirt fix to create group
diff --git a/nova/controller.sls b/nova/controller.sls
index 88aa3c0..2ea0510 100644
--- a/nova/controller.sls
+++ b/nova/controller.sls
@@ -64,6 +64,85 @@
       - user: user_nova
 {%- endif %}
 
+# Only for Queens. Communication between noVNC proxy service and QEMU
+{%- if controller.version not in ['mitaka', 'newton', 'ocata', 'pike'] %}
+{%- if controller.novncproxy.vencrypt.tls.get('enabled', False) %}
+
+{%- set ca_file=controller.novncproxy.vencrypt.tls.get('ca_file') %}
+{%- set key_file=controller.novncproxy.vencrypt.tls.get('key_file') %}
+{%- set cert_file=controller.novncproxy.vencrypt.tls.get('cert_file') %}
+
+novncproxy_vencrypt_ca:
+{%- if controller.novncproxy.vencrypt.tls.cacert is defined %}
+  file.managed:
+    - name: {{ ca_file }}
+    - contents_pillar: nova:controller:novncproxy:vencrypt:tls:cacert
+    - mode: 444
+    - makedirs: true
+    - watch_in:
+      - service: nova_controller_services
+{%- else %}
+  file.exists:
+   - name: {{ ca_file }}
+{%- endif %}
+
+novncproxy_vencrypt_public_cert:
+{%- if controller.novncproxy.vencrypt.tls.cert is defined %}
+  file.managed:
+    - name: {{ cert_file }}
+    - contents_pillar: nova:controller:novncproxy:vencrypt:tls:cert
+    - mode: 440
+    - makedirs: true
+{%- else %}
+  file.exists:
+   - name: {{ cert_file }}
+{%- endif %}
+
+novncproxy_vencrypt_private_key:
+{%- if controller.novncproxy.vencrypt.tls.key is defined %}
+  file.managed:
+    - name: {{ key_file }}
+    - contents_pillar: nova:controller:novncproxy:vencrypt:tls:key
+    - mode: 400
+    - makedirs: true
+{%- else %}
+  file.exists:
+   - name: {{ key_file }}
+{%- endif %}
+{%- endif %}
+{%- endif %}
+
+{%- if controller.novncproxy.tls.get('enabled', False) %}
+{%- set key_file=controller.novncproxy.tls.server.get('key_file') %}
+{%- set cert_file=controller.novncproxy.tls.server.get('cert_file') %}
+
+novncproxy_server_public_cert:
+{%- if controller.novncproxy.tls.server.cert is defined %}
+  file.managed:
+    - name: {{ cert_file }}
+    - contents_pillar: nova:controller:novncproxy:tls:server:cert
+    - mode: 440
+    - makedirs: true
+    - watch_in:
+      - service: nova_controller_services
+{%- else %}
+  file.exists:
+   - name: {{ cert_file }}
+{%- endif %}
+
+novncproxy_server_private_key:
+{%- if controller.novncproxy.tls.server.key is defined %}
+  file.managed:
+    - name: {{ key_file }}
+    - contents_pillar: nova:controller:novncproxy:tls:server:key
+    - mode: 400
+    - makedirs: true
+{%- else %}
+  file.exists:
+   - name: {{ key_file }}
+{%- endif %}
+{%- endif %}
+
 {%- if controller.get('networking', 'default') == "contrail" and controller.version == "juno" %}
 
 contrail_nova_packages:
@@ -116,7 +195,7 @@
 nova_general_logging_conf:
   file.managed:
     - name: /etc/nova/logging.conf
-    - source: salt://nova/files/logging.conf
+    - source: salt://oslo_templates/files/logging/_logging.conf
     - template: jinja
     - user: nova
     - group: nova
@@ -127,7 +206,7 @@
 {%- endif %}
     - defaults:
         service_name: nova
-        values: {{ controller }}
+        _data: {{ controller.logging }}
     - watch_in:
       - service: nova_controller_services
 
@@ -146,7 +225,7 @@
 {{ service_name }}_logging_conf:
   file.managed:
     - name: /etc/nova/logging/logging-{{ service_name }}.conf
-    - source: salt://nova/files/logging.conf
+    - source: salt://oslo_templates/files/logging/_logging.conf
     - template: jinja
     - user: nova
     - group: nova
@@ -158,7 +237,7 @@
     - makedirs: True
     - defaults:
         service_name: {{ service_name }}
-        values: {{ controller }}
+        _data: {{ controller.logging }}
     - watch_in:
       - service: nova_controller_services
 {%- if controller.version not in ["juno", "kilo", "liberty", "mitaka", "newton"] %}
diff --git a/nova/files/grafana_dashboards/nova_overview_prometheus.json b/nova/files/grafana_dashboards/nova_overview_prometheus.json
index b3e800e..fbde0bd 100644
--- a/nova/files/grafana_dashboards/nova_overview_prometheus.json
+++ b/nova/files/grafana_dashboards/nova_overview_prometheus.json
@@ -1,3 +1,8 @@
+{%- if parameters is defined and parameters.get('nova_cert_enabled', False) == True %}
+  {%- set service_width = 6 %}
+{%- else %}
+  {%- set service_width = 8 %}
+{%- endif %}
 {%- raw %}
 {
   "annotations": {
@@ -912,120 +917,8 @@
       "fill": 4,
       "gridPos": {
         "h": 5,
-        "w": 6,
-        "x": 0,
-        "y": 17
-      },
-      "id": 19,
-      "legend": {
-        "alignAsTable": true,
-        "avg": false,
-        "current": false,
-        "hideEmpty": false,
-        "hideZero": false,
-        "max": false,
-        "min": false,
-        "rightSide": true,
-        "show": true,
-        "total": false,
-        "values": false
-      },
-      "lines": true,
-      "linewidth": 1,
-      "links": [],
-      "nullPointMode": "null",
-      "percentage": false,
-      "pointradius": 5,
-      "points": false,
-      "renderer": "flot",
-      "repeat": null,
-      "repeatDirection": "h",
-      "seriesOverrides": [],
-      "spaceLength": 10,
-      "stack": true,
-      "steppedLine": false,
-      "targets": [
-        {
-          "expr": "max(count(openstack_nova_service_state{binary=\"nova-cert\"} == 1 and openstack_nova_service_status{binary=\"nova-cert\"} == 1) by (instance))",
-          "format": "time_series",
-          "intervalFactor": 2,
-          "legendFormat": "enabled/up",
-          "refId": "A"
-        },
-        {
-          "expr": "max(count(openstack_nova_service_state{binary=\"nova-cert\"} == 1 and openstack_nova_service_status{binary=\"nova-cert\"} == 0) by (instance))",
-          "format": "time_series",
-          "intervalFactor": 2,
-          "legendFormat": "enabled/down",
-          "refId": "B"
-        },
-        {
-          "expr": "max(count(openstack_nova_service_state{binary=\"nova-cert\"} == 0 and openstack_nova_service_status{binary=\"nova-cert\"} == 1) by (instance))",
-          "format": "time_series",
-          "intervalFactor": 2,
-          "legendFormat": "disabled/up",
-          "refId": "C"
-        },
-        {
-          "expr": "max(count(openstack_nova_service_state{binary=\"nova-cert\"} == 0 and openstack_nova_service_status{binary=\"nova-cert\"} == 0) by (instance))",
-          "format": "time_series",
-          "intervalFactor": 2,
-          "legendFormat": "disabled/down",
-          "refId": "D"
-        }
-      ],
-      "thresholds": [],
-      "timeFrom": null,
-      "timeShift": null,
-      "title": "Nova Cert",
-      "tooltip": {
-        "shared": true,
-        "sort": 0,
-        "value_type": "individual"
-      },
-      "transparent": false,
-      "type": "graph",
-      "xaxis": {
-        "buckets": null,
-        "mode": "time",
-        "name": null,
-        "show": true,
-        "values": []
-      },
-      "yaxes": [
-        {
-          "format": "short",
-          "label": null,
-          "logBase": 1,
-          "max": null,
-          "min": "0",
-          "show": true
-        },
-        {
-          "format": "short",
-          "label": null,
-          "logBase": 1,
-          "max": null,
-          "min": null,
-          "show": true
-        }
-      ],
-      "yaxis": {
-        "align": false,
-        "alignLevel": null
-      }
-    },
-    {
-      "aliasColors": {},
-      "bars": false,
-      "dashLength": 10,
-      "dashes": false,
-      "datasource": null,
-      "fill": 4,
-      "gridPos": {
-        "h": 5,
-        "w": 6,
-        "x": 6,
+        "w": {% endraw %}{{ service_width }}{% raw %},
+        "x": {% endraw %}{{ service_width * 0 }}{% raw %},
         "y": 17
       },
       "id": 27,
@@ -1134,8 +1027,8 @@
       "fill": 4,
       "gridPos": {
         "h": 5,
-        "w": 6,
-        "x": 12,
+        "w": {% endraw %}{{ service_width }}{% raw %},
+        "x": {% endraw %}{{ service_width * 1 }}{% raw %},
         "y": 17
       },
       "id": 28,
@@ -1244,8 +1137,8 @@
       "fill": 4,
       "gridPos": {
         "h": 5,
-        "w": 6,
-        "x": 18,
+        "w": {% endraw %}{{ service_width }}{% raw %},
+        "x": {% endraw %}{{ service_width * 2 }}{% raw %},
         "y": 17
       },
       "id": 29,
@@ -1345,6 +1238,125 @@
         "alignLevel": null
       }
     },
+{%- endraw %}
+{%- if parameters is defined and parameters.get('nova_cert_enabled', False) == True %}
+{%- set service_width = 6 %}
+{%- raw %}
+    {
+      "aliasColors": {},
+      "bars": false,
+      "dashLength": 10,
+      "dashes": false,
+      "datasource": null,
+      "fill": 4,
+      "gridPos": {
+        "h": 5,
+        "w": {% endraw %}{{ service_width }}{% raw %},
+        "x": {% endraw %}{{ service_width * 3 }}{% raw %},
+        "y": 17
+      },
+      "id": 19,
+      "legend": {
+        "alignAsTable": true,
+        "avg": false,
+        "current": false,
+        "hideEmpty": false,
+        "hideZero": false,
+        "max": false,
+        "min": false,
+        "rightSide": true,
+        "show": true,
+        "total": false,
+        "values": false
+      },
+      "lines": true,
+      "linewidth": 1,
+      "links": [],
+      "nullPointMode": "null",
+      "percentage": false,
+      "pointradius": 5,
+      "points": false,
+      "renderer": "flot",
+      "repeat": null,
+      "repeatDirection": "h",
+      "seriesOverrides": [],
+      "spaceLength": 10,
+      "stack": true,
+      "steppedLine": false,
+      "targets": [
+        {
+          "expr": "max(count(openstack_nova_service_state{binary=\"nova-cert\"} == 1 and openstack_nova_service_status{binary=\"nova-cert\"} == 1) by (instance))",
+          "format": "time_series",
+          "intervalFactor": 2,
+          "legendFormat": "enabled/up",
+          "refId": "A"
+        },
+        {
+          "expr": "max(count(openstack_nova_service_state{binary=\"nova-cert\"} == 1 and openstack_nova_service_status{binary=\"nova-cert\"} == 0) by (instance))",
+          "format": "time_series",
+          "intervalFactor": 2,
+          "legendFormat": "enabled/down",
+          "refId": "B"
+        },
+        {
+          "expr": "max(count(openstack_nova_service_state{binary=\"nova-cert\"} == 0 and openstack_nova_service_status{binary=\"nova-cert\"} == 1) by (instance))",
+          "format": "time_series",
+          "intervalFactor": 2,
+          "legendFormat": "disabled/up",
+          "refId": "C"
+        },
+        {
+          "expr": "max(count(openstack_nova_service_state{binary=\"nova-cert\"} == 0 and openstack_nova_service_status{binary=\"nova-cert\"} == 0) by (instance))",
+          "format": "time_series",
+          "intervalFactor": 2,
+          "legendFormat": "disabled/down",
+          "refId": "D"
+        }
+      ],
+      "thresholds": [],
+      "timeFrom": null,
+      "timeShift": null,
+      "title": "Nova Cert",
+      "tooltip": {
+        "shared": true,
+        "sort": 0,
+        "value_type": "individual"
+      },
+      "transparent": false,
+      "type": "graph",
+      "xaxis": {
+        "buckets": null,
+        "mode": "time",
+        "name": null,
+        "show": true,
+        "values": []
+      },
+      "yaxes": [
+        {
+          "format": "short",
+          "label": null,
+          "logBase": 1,
+          "max": null,
+          "min": "0",
+          "show": true
+        },
+        {
+          "format": "short",
+          "label": null,
+          "logBase": 1,
+          "max": null,
+          "min": null,
+          "show": true
+        }
+      ],
+      "yaxis": {
+        "align": false,
+        "alignLevel": null
+      }
+    },
+{%- endraw %}
+{%- endif %}
+{%- raw %}
     {
       "aliasColors": {},
       "bars": false,
diff --git a/nova/files/grafana_dashboards/nova_utilization_prometheus.json b/nova/files/grafana_dashboards/nova_utilization_prometheus.json
index 1ecd6bd..957dc6f 100644
--- a/nova/files/grafana_dashboards/nova_utilization_prometheus.json
+++ b/nova/files/grafana_dashboards/nova_utilization_prometheus.json
@@ -632,7 +632,13 @@
       "tableColumn": "",
       "targets": [
         {
+          {%- endraw %}
+          {%- if parameters is defined and parameters.get('nova_compute_ceph_ephemeral', False) == True %}
+          "expr": "max(avg(openstack_nova_used_disk) by (instance)) / max(avg(openstack_nova_disk) by (instance))",
+          {%- else %}
           "expr": "max(sum(openstack_nova_used_disk) by (instance)) / max(sum(openstack_nova_disk) by (instance))",
+          {%- endif %}
+          {%- raw %}
           "format": "time_series",
           "intervalFactor": 2,
           "refId": "A"
@@ -691,7 +697,13 @@
       "targets": [
         {
           "$$hashKey": "object:33000",
+          {%- endraw %}
+          {%- if parameters is defined and parameters.get('nova_compute_ceph_ephemeral', False) == True %}
+          "expr": "max(avg(openstack_nova_disk) by (instance))",
+          {%- else %}
           "expr": "max(sum(openstack_nova_disk) by (instance))",
+          {%- endif %}
+          {%- raw %}
           "format": "time_series",
           "intervalFactor": 2,
           "legendFormat": "total",
@@ -699,7 +711,13 @@
         },
         {
           "$$hashKey": "object:33001",
+          {%- endraw %}
+          {%- if parameters is defined and parameters.get('nova_compute_ceph_ephemeral', False) == True %}
+          "expr": "max(avg(openstack_nova_used_disk) by (instance))",
+          {%- else %}
           "expr": "max(sum(openstack_nova_used_disk) by (instance))",
+          {%- endif %}
+          {%- raw %}
           "format": "time_series",
           "intervalFactor": 2,
           "legendFormat": "used",
@@ -707,7 +725,13 @@
         },
         {
           "$$hashKey": "object:33002",
+          {%- endraw %}
+          {%- if parameters is defined and parameters.get('nova_compute_ceph_ephemeral', False) == True %}
+          "expr": "max(avg(openstack_nova_used_disk) by (instance))",
+          {%- else %}
           "expr": "max(sum(openstack_nova_disk - openstack_nova_disk_available) by (instance))",
+          {%- endif %}
+          {%- raw %}
           "format": "time_series",
           "intervalFactor": 2,
           "legendFormat": "allocated",
@@ -817,7 +841,13 @@
       "targets": [
         {
           "$$hashKey": "object:33092",
+          {%- endraw %}
+          {%- if parameters is defined and parameters.get('nova_compute_ceph_ephemeral', False) == True %}
+          "expr": "max(avg(openstack_nova_used_disk) by (instance)) / max(avg(openstack_nova_disk) by (instance))",
+          {%- else %}
           "expr": "max(sum(openstack_nova_disk - openstack_nova_disk_available) by (instance)) / max(sum(openstack_nova_disk) by (instance))",
+          {%- endif %}
+          {%- raw %}
           "format": "time_series",
           "intervalFactor": 2,
           "refId": "A"
diff --git a/nova/files/grafana_dashboards/openstack_overview_prometheus.json b/nova/files/grafana_dashboards/openstack_overview_prometheus.json
index 8fa1718..ccaae42 100644
--- a/nova/files/grafana_dashboards/openstack_overview_prometheus.json
+++ b/nova/files/grafana_dashboards/openstack_overview_prometheus.json
@@ -257,7 +257,13 @@
       "tableColumn": "",
       "targets": [
         {
+          {%- endraw %}
+          {%- if parameters is defined and parameters.get('nova_compute_ceph_ephemeral', False) == True %}
+          "expr": "max(avg(openstack_nova_used_disk) by (instance)) / max(avg(openstack_nova_disk) by (instance))",
+          {%- else %}
           "expr": "max(sum(openstack_nova_used_disk) by (instance)) / max(sum(openstack_nova_disk) by (instance))",
+          {%- endif %}
+          {%- raw %}
           "format": "time_series",
           "intervalFactor": 2,
           "refId": "A"
diff --git a/nova/files/logging.conf b/nova/files/logging.conf
deleted file mode 100644
index c34eae6..0000000
--- a/nova/files/logging.conf
+++ /dev/null
@@ -1,94 +0,0 @@
-{%- set log_handlers = [] -%}
-{%- for log_handler_name, log_handler_attrs in values.logging.log_handlers.items() %}
-  {%- if log_handler_attrs.get('enabled', False) %}
-    {%- do log_handlers.append(log_handler_name) -%}
-  {%- endif %}
-{%- endfor %}
-
-[loggers]
-keys = root, nova
-
-[handlers]
-keys = {{ log_handlers | join(", ") }}
-
-[formatters]
-keys = context, default{% if values.logging.log_handlers.get('fluentd', {}).get('enabled', False) %}, fluentd{% endif %}
-
-[logger_root]
-level = {{ values.logging.get('loggers', {}).get('root', {}).get('level', 'WARNING') }}
-handlers = {{ log_handlers | join(", ") }}
-
-[logger_nova]
-level = {{ values.logging.get('loggers', {}).get('nova', {}).get('level', 'INFO') }}
-handlers = {{ log_handlers | join(", ") }}
-qualname = nova
-propagate = 0
-
-[logger_amqp]
-level = {{ values.logging.get('loggers', {}).get('amqp', {}).get('level', 'WARNING') }}
-handlers = {{ log_handlers | join(", ") }}
-qualname = amqp
-
-[logger_amqplib]
-level = {{ values.logging.get('loggers', {}).get('amqplib', {}).get('level', 'WARNING') }}
-handlers = {{ log_handlers | join(", ") }}
-qualname = amqplib
-
-[logger_sqlalchemy]
-level = {{ values.logging.get('loggers', {}).get('sqlalchemy', {}).get('level', 'WARNING') }}
-handlers = {{ log_handlers | join(", ") }}
-qualname = sqlalchemy
-# "level = INFO" logs SQL queries.
-# "level = DEBUG" logs SQL queries and results.
-# "level = WARNING" logs neither.  (Recommended for production systems.)
-
-[logger_boto]
-level = {{ values.logging.get('loggers', {}).get('boto', {}).get('level', 'WARNING') }}
-handlers = {{ log_handlers | join(", ") }}
-qualname = boto
-
-# NOTE(mikal): suds is used by the vmware driver, removing this will
-# cause many extraneous log lines for their tempest runs. Refer to
-# https://review.openstack.org/#/c/219225/ for details.
-[logger_suds]
-level = {{ values.logging.get('loggers', {}).get('suds', {}).get('level', 'INFO') }}
-handlers = {{ log_handlers | join(", ") }}
-qualname = suds
-
-[logger_eventletwsgi]
-level = {{ values.logging.get('loggers', {}).get('eventletwsgi', {}).get('level', 'WARNING') }}
-handlers = {{ log_handlers | join(", ") }}
-qualname = eventlet.wsgi.server
-
-{% if values.logging.log_handlers.get('fluentd').get('enabled', False) -%}
-[handler_fluentd]
-class = fluent.handler.FluentHandler
-args = ('openstack.{{ service_name | replace("-", ".", 1) }}', 'localhost', 24224)
-formatter = fluentd
-{%- endif %}
-
-{% if values.logging.log_handlers.watchedfile.enabled -%}
-[handler_watchedfile]
-class = handlers.WatchedFileHandler
-args = ('/var/log/nova/{{ service_name }}.log',)
-formatter = context
-{%- endif %}
-
-{% if values.logging.log_handlers.get('ossyslog', {}).get('enabled', False) -%}
-{%- set ossyslog_args = values.logging.log_handlers.ossyslog.get('args', {}) -%}
-[handler_ossyslog]
-class = oslo_log.handlers.OSSysLogHandler
-args = ( handlers.SysLogHandler.{{ ossyslog_args.get('facility', 'LOG_USER') }}, )
-formatter = context
-{%- endif %}
-
-[formatter_context]
-class = oslo_log.formatters.ContextFormatter
-
-[formatter_default]
-format = %(message)s
-
-{% if values.logging.log_handlers.get('fluentd').get('enabled', False) -%}
-[formatter_fluentd]
-class = oslo_log.formatters.FluentFormatter
-{%- endif %}
diff --git a/nova/files/pike/compute/_vmware.conf b/nova/files/pike/compute/_vmware.conf
new file mode 100644
index 0000000..2ecf3c4
--- /dev/null
+++ b/nova/files/pike/compute/_vmware.conf
@@ -0,0 +1,310 @@
+
+[vmware]
+#
+# Related options:
+# Following options must be set in order to launch VMware-based
+# virtual machines.
+#
+# * compute_driver: Must use vmwareapi.VMwareVCDriver.
+# * vmware.host_username
+# * vmware.host_password
+# * vmware.cluster_name
+
+#
+# From nova.conf
+#
+
+#
+# This option specifies the physical ethernet adapter name for VLAN
+# networking.
+#
+# Set the vlan_interface configuration option to match the ESX host
+# interface that handles VLAN-tagged VM traffic.
+#
+# Possible values:
+#
+# * Any valid string representing VLAN interface name
+#  (string value)
+#vlan_interface = vmnic0
+{%- if _data.vlan_interface is defined %}
+vlan_interface = {{ _data.vlan_interface }}
+{%- endif %}
+
+#
+# This option should be configured only when using the NSX-MH Neutron
+# plugin. This is the name of the integration bridge on the ESXi server
+# or host. This should not be set for any other Neutron plugin. Hence
+# the default value is not set.
+#
+# Possible values:
+#
+# * Any valid string representing the name of the integration bridge
+#  (string value)
+#integration_bridge = <None>
+{%- if _data.integration_bridge is defined %}
+integration_bridge = {{ _data.integration_bridge }}
+{%- endif %}
+
+#
+# Set this value if affected by an increased network latency causing
+# repeated characters when typing in a remote console.
+#  (integer value)
+# Minimum value: 0
+#console_delay_seconds = <None>
+{%- if _data.console_delay_seconds is defined %}
+console_delay_seconds = {{ _data.console_delay_seconds }}
+{%- endif %}
+
+#
+# Identifies the remote system where the serial port traffic will
+# be sent.
+#
+# This option adds a virtual serial port which sends console output to
+# a configurable service URI. At the service URI address there will be
+# virtual serial port concentrator that will collect console logs.
+# If this is not set, no serial ports will be added to the created VMs.
+#
+# Possible values:
+#
+# * Any valid URI
+#  (string value)
+#serial_port_service_uri = <None>
+{%- if _data.serial_port_service_uri is defined %}
+serial_port_service_uri = {{ _data.serial_port_service_uri }}
+{%- endif %}
+
+#
+# Identifies a proxy service that provides network access to the
+# serial_port_service_uri.
+#
+# Possible values:
+#
+# * Any valid URI (The scheme is 'telnet' or 'telnets'.)
+#
+# Related options:
+# This option is ignored if serial_port_service_uri is not specified.
+# * serial_port_service_uri
+#  (uri value)
+#serial_port_proxy_uri = <None>
+{%- if _data.serial_port_proxy_uri is defined %}
+serial_port_proxy_uri = {{ _data.serial_port_proxy_uri }}
+{%- endif %}
+
+#
+# Hostname or IP address for connection to VMware vCenter host. (unknown value)
+#host_ip = <None>
+host_ip = {{ _data.host_ip }}
+
+# Port for connection to VMware vCenter host. (port value)
+# Minimum value: 0
+# Maximum value: 65535
+#host_port = 443
+{%- if _data.host_port is defined %}
+host_port = {{ _data.host_port }}
+{%- endif %}
+
+# Username for connection to VMware vCenter host. (string value)
+#host_username = <None>
+host_username = {{ _data.host_username }}
+
+# Password for connection to VMware vCenter host. (string value)
+#host_password = <None>
+host_password = {{ _data.host_password }}
+
+#
+# Specifies the CA bundle file to be used in verifying the vCenter
+# server certificate.
+#  (string value)
+#ca_file = <None>
+{%- if _data.cacert_file is defined %}
+ca_file = {{ _data.cacert_file }}
+{%- endif %}
+
+#
+# If true, the vCenter server certificate is not verified. If false,
+# then the default CA truststore is used for verification.
+#
+# Related options:
+# * ca_file: This option is ignored if "ca_file" is set.
+#  (boolean value)
+#insecure = false
+{%- if _data.insecure is defined %}
+insecure = {{ _data.insecure }}
+{%- endif %}
+
+# Name of a VMware Cluster ComputeResource. (string value)
+#cluster_name = <None>
+cluster_name = {{ _data.cluster_name }}
+
+#
+# Regular expression pattern to match the name of datastore.
+#
+# The datastore_regex setting specifies the datastores to use with
+# Compute. For example, datastore_regex="nas.*" selects all the data
+# stores that have a name starting with "nas".
+#
+# NOTE: If no regex is given, it just picks the datastore with the
+# most freespace.
+#
+# Possible values:
+#
+# * Any matching regular expression to a datastore must be given
+#  (string value)
+#datastore_regex = <None>
+{%- if _data.datastore_regex is defined %}
+datastore_regex = {{ _data.datastore_regex }}
+{%- endif %}
+
+#
+# Time interval in seconds to poll remote tasks invoked on
+# VMware VC server.
+#  (floating point value)
+#task_poll_interval = 0.5
+{%- if _data.task_poll_interval is defined %}
+task_poll_interval = {{ _data.task_poll_interval }}
+{%- endif %}
+
+
+#
+# Number of times VMware vCenter server API must be retried on connection
+# failures, e.g. socket error, etc.
+#  (integer value)
+# Minimum value: 0
+#api_retry_count = 10
+{%- if _data.api_retry_count is defined %}
+api_retry_count = {{ _data.api_retry_count }}
+{%- endif %}
+
+#
+# This option specifies VNC starting port.
+#
+# Every VM created by ESX host has an option of enabling VNC client
+# for remote connection. Above option 'vnc_port' helps you to set
+# default starting port for the VNC client.
+#
+# Possible values:
+#
+# * Any valid port number within 5900 -(5900 + vnc_port_total)
+#
+# Related options:
+# Below options should be set to enable VNC client.
+# * vnc.enabled = True
+# * vnc_port_total
+#  (port value)
+# Minimum value: 0
+# Maximum value: 65535
+#vnc_port = 5900
+{%- if _data.vnc_port is defined %}
+vnc_port = {{ _data.vnc_port }}
+{%- endif %}
+
+#
+# Total number of VNC ports.
+#  (integer value)
+# Minimum value: 0
+#vnc_port_total = 10000
+{%- if _data.vnc_port_total is defined %}
+vnc_port_total = {{ _data.vnc_port_total }}
+{%- endif %}
+
+#
+# This option enables/disables the use of linked clone.
+#
+# The ESX hypervisor requires a copy of the VMDK file in order to boot
+# up a virtual machine. The compute driver must download the VMDK via
+# HTTP from the OpenStack Image service to a datastore that is visible
+# to the hypervisor and cache it. Subsequent virtual machines that need
+# the VMDK use the cached version and don't have to copy the file again
+# from the OpenStack Image service.
+#
+# If set to false, even with a cached VMDK, there is still a copy
+# operation from the cache location to the hypervisor file directory
+# in the shared datastore. If set to true, the above copy operation
+# is avoided as it creates copy of the virtual machine that shares
+# virtual disks with its parent VM.
+#  (boolean value)
+#use_linked_clone = true
+{%- if _data.use_linked_clone is defined %}
+use_linked_clone = {{ _data.use_linked_clone }}
+{%- endif %}
+
+#
+# This option enables or disables storage policy based placement
+# of instances.
+#
+# Related options:
+#
+# * pbm_default_policy
+#  (boolean value)
+#pbm_enabled = false
+{%- if _data.pbm_enabled is defined %}
+pbm_enabled = {{ _data.pbm_enabled }}
+{%- endif %}
+
+#
+# This option specifies the PBM service WSDL file location URL.
+#
+# Setting this will disable storage policy based placement
+# of instances.
+#
+# Possible values:
+#
+# * Any valid file path
+#   e.g file:///opt/SDK/spbm/wsdl/pbmService.wsdl
+#  (string value)
+#pbm_wsdl_location = <None>
+{%- if _data.pbm_wsdl_location is defined %}
+pbm_wsdl_location = {{ _data.pbm_wsdl_location }}
+{%- endif %}
+
+#
+# This option specifies the default policy to be used.
+#
+# If pbm_enabled is set and there is no defined storage policy for the
+# specific request, then this policy will be used.
+#
+# Possible values:
+#
+# * Any valid storage policy such as VSAN default storage policy
+#
+# Related options:
+#
+# * pbm_enabled
+#  (string value)
+#pbm_default_policy = <None>
+{%- if _data.pbm_default_policy is defined %}
+pbm_default_policy = {{ _data.pbm_default_policy }}
+{%- endif %}
+
+#
+# This option specifies the limit on the maximum number of objects to
+# return in a single result.
+#
+# A positive value will cause the operation to suspend the retrieval
+# when the count of objects reaches the specified limit. The server may
+# still limit the count to something less than the configured value.
+# Any remaining objects may be retrieved with additional requests.
+#  (integer value)
+# Minimum value: 0
+#maximum_objects = 100
+{%- if _data.maximum_objects is defined %}
+maximum_objects = {{ _data.maximum_objects }}
+{%- endif %}
+
+#
+# This option adds a prefix to the folder where cached images are stored
+#
+# This is not the full path - just a folder prefix. This should only be
+# used when a datastore cache is shared between compute nodes.
+#
+# Note: This should only be used when the compute nodes are running on same
+# host or they have a shared file system.
+#
+# Possible values:
+#
+# * Any string representing the cache prefix to the folder
+#  (string value)
+#cache_prefix = <None>
+{%- if _data.cache_prefix is defined %}
+cache_prefix = {{ _data.cache_prefix }}
+{%- endif %}
diff --git a/nova/files/pike/nova-compute.conf.Debian b/nova/files/pike/nova-compute.conf.Debian
index 4c51e6f..d6589ec 100644
--- a/nova/files/pike/nova-compute.conf.Debian
+++ b/nova/files/pike/nova-compute.conf.Debian
@@ -1,4 +1,5 @@
-{%- from "nova/map.jinja" import compute with context %}
+{%- from "nova/map.jinja" import compute,compute_driver_mapping with context %}
+
 [DEFAULT]
 
 #
@@ -9696,274 +9697,11 @@
 # Tenant Name (string value)
 #tenant_name=<None>
 
-
-[vmware]
-#
-# Related options:
-# Following options must be set in order to launch VMware-based
-# virtual machines.
-#
-# * compute_driver: Must use vmwareapi.VMwareVCDriver.
-# * vmware.host_username
-# * vmware.host_password
-# * vmware.cluster_name
-
-#
-# From nova.conf
-#
-
-#
-# This option specifies the physical ethernet adapter name for VLAN
-# networking.
-#
-# Set the vlan_interface configuration option to match the ESX host
-# interface that handles VLAN-tagged VM traffic.
-#
-# Possible values:
-#
-# * Any valid string representing VLAN interface name
-#  (string value)
-#vlan_interface=vmnic0
-
-#
-# This option should be configured only when using the NSX-MH Neutron
-# plugin. This is the name of the integration bridge on the ESXi server
-# or host. This should not be set for any other Neutron plugin. Hence
-# the default value is not set.
-#
-# Possible values:
-#
-# * Any valid string representing the name of the integration bridge
-#  (string value)
-#integration_bridge=<None>
-
-#
-# Set this value if affected by an increased network latency causing
-# repeated characters when typing in a remote console.
-#  (integer value)
-# Minimum value: 0
-#console_delay_seconds=<None>
-
-#
-# Identifies the remote system where the serial port traffic will
-# be sent.
-#
-# This option adds a virtual serial port which sends console output to
-# a configurable service URI. At the service URI address there will be
-# virtual serial port concentrator that will collect console logs.
-# If this is not set, no serial ports will be added to the created VMs.
-#
-# Possible values:
-#
-# * Any valid URI
-#  (string value)
-#serial_port_service_uri=<None>
-
-#
-# Identifies a proxy service that provides network access to the
-# serial_port_service_uri.
-#
-# Possible values:
-#
-# * Any valid URI
-#
-# Related options:
-# This option is ignored if serial_port_service_uri is not specified.
-# * serial_port_service_uri
-#  (string value)
-#serial_port_proxy_uri=<None>
-
-#
-# Hostname or IP address for connection to VMware vCenter host. (string value)
-#host_ip=<None>
-
-# Port for connection to VMware vCenter host. (port value)
-# Minimum value: 0
-# Maximum value: 65535
-#host_port=443
-
-# Username for connection to VMware vCenter host. (string value)
-#host_username=<None>
-
-# Password for connection to VMware vCenter host. (string value)
-#host_password=<None>
-
-#
-# Specifies the CA bundle file to be used in verifying the vCenter
-# server certificate.
-#  (string value)
-#ca_file=<None>
-
-#
-# If true, the vCenter server certificate is not verified. If false,
-# then the default CA truststore is used for verification.
-#
-# Related options:
-# * ca_file: This option is ignored if "ca_file" is set.
-#  (boolean value)
-#insecure=false
-
-# Name of a VMware Cluster ComputeResource. (string value)
-#cluster_name=<None>
-
-#
-# Regular expression pattern to match the name of datastore.
-#
-# The datastore_regex setting specifies the datastores to use with
-# Compute. For example, datastore_regex="nas.*" selects all the data
-# stores that have a name starting with "nas".
-#
-# NOTE: If no regex is given, it just picks the datastore with the
-# most freespace.
-#
-# Possible values:
-#
-# * Any matching regular expression to a datastore must be given
-#  (string value)
-#datastore_regex=<None>
-
-#
-# Time interval in seconds to poll remote tasks invoked on
-# VMware VC server.
-#  (floating point value)
-#task_poll_interval=0.5
-
-#
-# Number of times VMware vCenter server API must be retried on connection
-# failures, e.g. socket error, etc.
-#  (integer value)
-# Minimum value: 0
-#api_retry_count=10
-
-#
-# This option specifies VNC starting port.
-#
-# Every VM created by ESX host has an option of enabling VNC client
-# for remote connection. Above option 'vnc_port' helps you to set
-# default starting port for the VNC client.
-#
-# Possible values:
-#
-# * Any valid port number within 5900 -(5900 + vnc_port_total)
-#
-# Related options:
-# Below options should be set to enable VNC client.
-# * vnc.enabled = True
-# * vnc_port_total
-#  (port value)
-# Minimum value: 0
-# Maximum value: 65535
-#vnc_port=5900
-
-#
-# Total number of VNC ports.
-#  (integer value)
-# Minimum value: 0
-#vnc_port_total=10000
-
-#
-# This option enables/disables the use of linked clone.
-#
-# The ESX hypervisor requires a copy of the VMDK file in order to boot
-# up a virtual machine. The compute driver must download the VMDK via
-# HTTP from the OpenStack Image service to a datastore that is visible
-# to the hypervisor and cache it. Subsequent virtual machines that need
-# the VMDK use the cached version and don't have to copy the file again
-# from the OpenStack Image service.
-#
-# If set to false, even with a cached VMDK, there is still a copy
-# operation from the cache location to the hypervisor file directory
-# in the shared datastore. If set to true, the above copy operation
-# is avoided as it creates copy of the virtual machine that shares
-# virtual disks with its parent VM.
-#  (boolean value)
-#use_linked_clone=true
-
-# DEPRECATED:
-# This option specifies VIM Service WSDL Location
-#
-# If vSphere API versions 5.1 and later is being used, this section can
-# be ignored. If version is less than 5.1, WSDL files must be hosted
-# locally and their location must be specified in the above section.
-#
-# Optional over-ride to default location for bug work-arounds.
-#
-# Possible values:
-#
-# * http://<server>/vimService.wsdl
-# * file:///opt/stack/vmware/SDK/wsdl/vim25/vimService.wsdl
-#  (string value)
-# This option is deprecated for removal since 15.0.0.
-# Its value may be silently ignored in the future.
-# Reason: Only vCenter versions earlier than 5.1 require this option and the
-# current minimum version is 5.1.
-#wsdl_location=<None>
-
-#
-# This option enables or disables storage policy based placement
-# of instances.
-#
-# Related options:
-#
-# * pbm_default_policy
-#  (boolean value)
-#pbm_enabled=false
-
-#
-# This option specifies the PBM service WSDL file location URL.
-#
-# Setting this will disable storage policy based placement
-# of instances.
-#
-# Possible values:
-#
-# * Any valid file path
-#   e.g file:///opt/SDK/spbm/wsdl/pbmService.wsdl
-#  (string value)
-#pbm_wsdl_location=<None>
-
-#
-# This option specifies the default policy to be used.
-#
-# If pbm_enabled is set and there is no defined storage policy for the
-# specific request, then this policy will be used.
-#
-# Possible values:
-#
-# * Any valid storage policy such as VSAN default storage policy
-#
-# Related options:
-#
-# * pbm_enabled
-#  (string value)
-#pbm_default_policy=<None>
-
-#
-# This option specifies the limit on the maximum number of objects to
-# return in a single result.
-#
-# A positive value will cause the operation to suspend the retrieval
-# when the count of objects reaches the specified limit. The server may
-# still limit the count to something less than the configured value.
-# Any remaining objects may be retrieved with additional requests.
-#  (integer value)
-# Minimum value: 0
-#maximum_objects=100
-
-#
-# This option adds a prefix to the folder where cached images are stored
-#
-# This is not the full path - just a folder prefix. This should only be
-# used when a datastore cache is shared between compute nodes.
-#
-# Note: This should only be used when the compute nodes are running on same
-# host or they have a shared file system.
-#
-# Possible values:
-#
-# * Any string representing the cache prefix to the folder
-#  (string value)
-#cache_prefix=<None>
+{%- set compute_driver = compute.get('compute_driver', 'libvirt.LibvirtDriver') %}
+{%- if compute_driver in compute_driver_mapping.keys() %}
+{%- set _data = compute.get(compute_driver_mapping[compute_driver]) %}
+{%- include "nova/files/pike/compute/_" + compute_driver_mapping[compute_driver] + ".conf" %}
+{%- endif %}
 
 
 [vnc]
diff --git a/nova/files/pike/nova-controller.conf.Debian b/nova/files/pike/nova-controller.conf.Debian
index 96e1d0f..f6979f4 100644
--- a/nova/files/pike/nova-controller.conf.Debian
+++ b/nova/files/pike/nova-controller.conf.Debian
@@ -1,4 +1,4 @@
-{%- from "nova/map.jinja" import controller with context %}
+{%- from "nova/map.jinja" import controller,compute_driver_mapping with context %}
 [DEFAULT]
 
 #
@@ -2525,6 +2525,11 @@
 
 # Disallow non-encrypted connections. (boolean value)
 #ssl_only=false
+{%- if controller.novncproxy.tls.get('enabled', False) %}
+ssl_only=True
+cert={{controller.novncproxy.tls.server.cert_file|yaml_squote}}
+key={{controller.novncproxy.tls.server.key_file|yaml_squote}}
+{%- endif %}
 
 # Set to True if source host is addressed with IPv6. (boolean value)
 #source_is_ipv6=false
@@ -2789,7 +2794,9 @@
 # example, logging_context_format_string). (string value)
 # Note: This option can be changed without restarting.
 # Deprecated group/name - [DEFAULT]/log_config
-#log_config_append=<None>
+{%- if controller.logging.log_appender %}
+log_config_append=/etc/nova/logging.conf
+{%- endif %}
 
 # Defines the format string for %%(asctime)s in log records. Default:
 # %(default)s . This option is ignored if log_config_append is set. (string
@@ -9625,275 +9632,11 @@
 # Tenant Name (string value)
 #tenant_name=<None>
 
-
-[vmware]
-#
-# Related options:
-# Following options must be set in order to launch VMware-based
-# virtual machines.
-#
-# * compute_driver: Must use vmwareapi.VMwareVCDriver.
-# * vmware.host_username
-# * vmware.host_password
-# * vmware.cluster_name
-
-#
-# From nova.conf
-#
-
-#
-# This option specifies the physical ethernet adapter name for VLAN
-# networking.
-#
-# Set the vlan_interface configuration option to match the ESX host
-# interface that handles VLAN-tagged VM traffic.
-#
-# Possible values:
-#
-# * Any valid string representing VLAN interface name
-#  (string value)
-#vlan_interface=vmnic0
-
-#
-# This option should be configured only when using the NSX-MH Neutron
-# plugin. This is the name of the integration bridge on the ESXi server
-# or host. This should not be set for any other Neutron plugin. Hence
-# the default value is not set.
-#
-# Possible values:
-#
-# * Any valid string representing the name of the integration bridge
-#  (string value)
-#integration_bridge=<None>
-
-#
-# Set this value if affected by an increased network latency causing
-# repeated characters when typing in a remote console.
-#  (integer value)
-# Minimum value: 0
-#console_delay_seconds=<None>
-
-#
-# Identifies the remote system where the serial port traffic will
-# be sent.
-#
-# This option adds a virtual serial port which sends console output to
-# a configurable service URI. At the service URI address there will be
-# virtual serial port concentrator that will collect console logs.
-# If this is not set, no serial ports will be added to the created VMs.
-#
-# Possible values:
-#
-# * Any valid URI
-#  (string value)
-#serial_port_service_uri=<None>
-
-#
-# Identifies a proxy service that provides network access to the
-# serial_port_service_uri.
-#
-# Possible values:
-#
-# * Any valid URI
-#
-# Related options:
-# This option is ignored if serial_port_service_uri is not specified.
-# * serial_port_service_uri
-#  (string value)
-#serial_port_proxy_uri=<None>
-
-#
-# Hostname or IP address for connection to VMware vCenter host. (string value)
-#host_ip=<None>
-
-# Port for connection to VMware vCenter host. (port value)
-# Minimum value: 0
-# Maximum value: 65535
-#host_port=443
-
-# Username for connection to VMware vCenter host. (string value)
-#host_username=<None>
-
-# Password for connection to VMware vCenter host. (string value)
-#host_password=<None>
-
-#
-# Specifies the CA bundle file to be used in verifying the vCenter
-# server certificate.
-#  (string value)
-#ca_file=<None>
-
-#
-# If true, the vCenter server certificate is not verified. If false,
-# then the default CA truststore is used for verification.
-#
-# Related options:
-# * ca_file: This option is ignored if "ca_file" is set.
-#  (boolean value)
-#insecure=false
-
-# Name of a VMware Cluster ComputeResource. (string value)
-#cluster_name=<None>
-
-#
-# Regular expression pattern to match the name of datastore.
-#
-# The datastore_regex setting specifies the datastores to use with
-# Compute. For example, datastore_regex="nas.*" selects all the data
-# stores that have a name starting with "nas".
-#
-# NOTE: If no regex is given, it just picks the datastore with the
-# most freespace.
-#
-# Possible values:
-#
-# * Any matching regular expression to a datastore must be given
-#  (string value)
-#datastore_regex=<None>
-
-#
-# Time interval in seconds to poll remote tasks invoked on
-# VMware VC server.
-#  (floating point value)
-#task_poll_interval=0.5
-
-#
-# Number of times VMware vCenter server API must be retried on connection
-# failures, e.g. socket error, etc.
-#  (integer value)
-# Minimum value: 0
-#api_retry_count=10
-
-#
-# This option specifies VNC starting port.
-#
-# Every VM created by ESX host has an option of enabling VNC client
-# for remote connection. Above option 'vnc_port' helps you to set
-# default starting port for the VNC client.
-#
-# Possible values:
-#
-# * Any valid port number within 5900 -(5900 + vnc_port_total)
-#
-# Related options:
-# Below options should be set to enable VNC client.
-# * vnc.enabled = True
-# * vnc_port_total
-#  (port value)
-# Minimum value: 0
-# Maximum value: 65535
-#vnc_port=5900
-
-#
-# Total number of VNC ports.
-#  (integer value)
-# Minimum value: 0
-#vnc_port_total=10000
-
-#
-# This option enables/disables the use of linked clone.
-#
-# The ESX hypervisor requires a copy of the VMDK file in order to boot
-# up a virtual machine. The compute driver must download the VMDK via
-# HTTP from the OpenStack Image service to a datastore that is visible
-# to the hypervisor and cache it. Subsequent virtual machines that need
-# the VMDK use the cached version and don't have to copy the file again
-# from the OpenStack Image service.
-#
-# If set to false, even with a cached VMDK, there is still a copy
-# operation from the cache location to the hypervisor file directory
-# in the shared datastore. If set to true, the above copy operation
-# is avoided as it creates copy of the virtual machine that shares
-# virtual disks with its parent VM.
-#  (boolean value)
-#use_linked_clone=true
-
-# DEPRECATED:
-# This option specifies VIM Service WSDL Location
-#
-# If vSphere API versions 5.1 and later is being used, this section can
-# be ignored. If version is less than 5.1, WSDL files must be hosted
-# locally and their location must be specified in the above section.
-#
-# Optional over-ride to default location for bug work-arounds.
-#
-# Possible values:
-#
-# * http://<server>/vimService.wsdl
-# * file:///opt/stack/vmware/SDK/wsdl/vim25/vimService.wsdl
-#  (string value)
-# This option is deprecated for removal since 15.0.0.
-# Its value may be silently ignored in the future.
-# Reason: Only vCenter versions earlier than 5.1 require this option and the
-# current minimum version is 5.1.
-#wsdl_location=<None>
-
-#
-# This option enables or disables storage policy based placement
-# of instances.
-#
-# Related options:
-#
-# * pbm_default_policy
-#  (boolean value)
-#pbm_enabled=false
-
-#
-# This option specifies the PBM service WSDL file location URL.
-#
-# Setting this will disable storage policy based placement
-# of instances.
-#
-# Possible values:
-#
-# * Any valid file path
-#   e.g file:///opt/SDK/spbm/wsdl/pbmService.wsdl
-#  (string value)
-#pbm_wsdl_location=<None>
-
-#
-# This option specifies the default policy to be used.
-#
-# If pbm_enabled is set and there is no defined storage policy for the
-# specific request, then this policy will be used.
-#
-# Possible values:
-#
-# * Any valid storage policy such as VSAN default storage policy
-#
-# Related options:
-#
-# * pbm_enabled
-#  (string value)
-#pbm_default_policy=<None>
-
-#
-# This option specifies the limit on the maximum number of objects to
-# return in a single result.
-#
-# A positive value will cause the operation to suspend the retrieval
-# when the count of objects reaches the specified limit. The server may
-# still limit the count to something less than the configured value.
-# Any remaining objects may be retrieved with additional requests.
-#  (integer value)
-# Minimum value: 0
-#maximum_objects=100
-
-#
-# This option adds a prefix to the folder where cached images are stored
-#
-# This is not the full path - just a folder prefix. This should only be
-# used when a datastore cache is shared between compute nodes.
-#
-# Note: This should only be used when the compute nodes are running on same
-# host or they have a shared file system.
-#
-# Possible values:
-#
-# * Any string representing the cache prefix to the folder
-#  (string value)
-#cache_prefix=<None>
-
+{%- set compute_driver = controller.get('compute_driver', 'libvirt.LibvirtDriver') %}
+{%- if compute_driver in compute_driver_mapping.keys() %}
+{%- set _data = controller.get(compute_driver_mapping[compute_driver]) %}
+{%- include "nova/files/pike/compute/_" + compute_driver_mapping[compute_driver] + ".conf" %}
+{%- endif %}
 
 [vnc]
 #
diff --git a/nova/files/queens/compute/_vmware.conf b/nova/files/queens/compute/_vmware.conf
new file mode 100644
index 0000000..034053b
--- /dev/null
+++ b/nova/files/queens/compute/_vmware.conf
@@ -0,0 +1,332 @@
+
+[vmware]
+#
+# Related options:
+# Following options must be set in order to launch VMware-based
+# virtual machines.
+#
+# * compute_driver: Must use vmwareapi.VMwareVCDriver.
+# * vmware.host_username
+# * vmware.host_password
+# * vmware.cluster_name
+
+#
+# From nova.conf
+#
+
+#
+# This option specifies the physical ethernet adapter name for VLAN
+# networking.
+#
+# Set the vlan_interface configuration option to match the ESX host
+# interface that handles VLAN-tagged VM traffic.
+#
+# Possible values:
+#
+# * Any valid string representing VLAN interface name
+#  (string value)
+#vlan_interface = vmnic0
+{%- if _data.vlan_interface is defined %}
+vlan_interface = {{ _data.vlan_interface }}
+{%- endif %}
+
+#
+# This option should be configured only when using the NSX-MH Neutron
+# plugin. This is the name of the integration bridge on the ESXi server
+# or host. This should not be set for any other Neutron plugin. Hence
+# the default value is not set.
+#
+# Possible values:
+#
+# * Any valid string representing the name of the integration bridge
+#  (string value)
+#integration_bridge = <None>
+{%- if _data.integration_bridge is defined %}
+integration_bridge = {{ _data.integration_bridge }}
+{%- endif %}
+
+#
+# Set this value if affected by an increased network latency causing
+# repeated characters when typing in a remote console.
+#  (integer value)
+# Minimum value: 0
+#console_delay_seconds = <None>
+{%- if _data.console_delay_seconds is defined %}
+console_delay_seconds = {{ _data.console_delay_seconds }}
+{%- endif %}
+
+#
+# Identifies the remote system where the serial port traffic will
+# be sent.
+#
+# This option adds a virtual serial port which sends console output to
+# a configurable service URI. At the service URI address there will be
+# virtual serial port concentrator that will collect console logs.
+# If this is not set, no serial ports will be added to the created VMs.
+#
+# Possible values:
+#
+# * Any valid URI
+#  (string value)
+#serial_port_service_uri = <None>
+{%- if _data.serial_port_service_uri is defined %}
+serial_port_service_uri = {{ _data.serial_port_service_uri }}
+{%- endif %}
+
+#
+# Identifies a proxy service that provides network access to the
+# serial_port_service_uri.
+#
+# Possible values:
+#
+# * Any valid URI (The scheme is 'telnet' or 'telnets'.)
+#
+# Related options:
+# This option is ignored if serial_port_service_uri is not specified.
+# * serial_port_service_uri
+#  (uri value)
+#serial_port_proxy_uri = <None>
+{%- if _data.serial_port_proxy_uri is defined %}
+serial_port_proxy_uri = {{ _data.serial_port_proxy_uri }}
+{%- endif %}
+
+# Specifies the directory where the Virtual Serial Port Concentrator is
+# storing console log files. It should match the 'serial_log_dir' config
+# value of VSPC.
+#  (string value)
+#serial_log_dir = /opt/vmware/vspc
+{%- if _data.serial_port_proxy_uri is defined %}
+serial_port_proxy_uri = {{ _data.serial_port_proxy_uri }}
+{%- endif %}
+
+
+#
+# Hostname or IP address for connection to VMware vCenter host. (unknown value)
+#host_ip = <None>
+host_ip = {{ _data.host_ip }}
+
+# Port for connection to VMware vCenter host. (port value)
+# Minimum value: 0
+# Maximum value: 65535
+#host_port = 443
+{%- if _data.host_port is defined %}
+host_port = {{ _data.host_port }}
+{%- endif %}
+
+# Username for connection to VMware vCenter host. (string value)
+#host_username = <None>
+host_username = {{ _data.host_username }}
+
+# Password for connection to VMware vCenter host. (string value)
+#host_password = <None>
+host_password = {{ _data.host_password }}
+
+#
+# Specifies the CA bundle file to be used in verifying the vCenter
+# server certificate.
+#  (string value)
+#ca_file = <None>
+{%- if _data.cacert_file is defined %}
+ca_file = {{ _data.cacert_file }}
+{%- endif %}
+
+#
+# If true, the vCenter server certificate is not verified. If false,
+# then the default CA truststore is used for verification.
+#
+# Related options:
+# * ca_file: This option is ignored if "ca_file" is set.
+#  (boolean value)
+#insecure = false
+{%- if _data.insecure is defined %}
+insecure = {{ _data.insecure }}
+{%- endif %}
+
+# Name of a VMware Cluster ComputeResource. (string value)
+#cluster_name = <None>
+cluster_name = {{ _data.cluster_name }}
+
+#
+# Regular expression pattern to match the name of datastore.
+#
+# The datastore_regex setting specifies the datastores to use with
+# Compute. For example, datastore_regex="nas.*" selects all the data
+# stores that have a name starting with "nas".
+#
+# NOTE: If no regex is given, it just picks the datastore with the
+# most freespace.
+#
+# Possible values:
+#
+# * Any matching regular expression to a datastore must be given
+#  (string value)
+#datastore_regex = <None>
+{%- if _data.datastore_regex is defined %}
+datastore_regex = {{ _data.datastore_regex }}
+{%- endif %}
+
+#
+# Time interval in seconds to poll remote tasks invoked on
+# VMware VC server.
+#  (floating point value)
+#task_poll_interval = 0.5
+{%- if _data.task_poll_interval is defined %}
+task_poll_interval = {{ _data.task_poll_interval }}
+{%- endif %}
+
+
+#
+# Number of times VMware vCenter server API must be retried on connection
+# failures, e.g. socket error, etc.
+#  (integer value)
+# Minimum value: 0
+#api_retry_count = 10
+{%- if _data.api_retry_count is defined %}
+api_retry_count = {{ _data.api_retry_count }}
+{%- endif %}
+
+#
+# This option specifies VNC starting port.
+#
+# Every VM created by ESX host has an option of enabling VNC client
+# for remote connection. Above option 'vnc_port' helps you to set
+# default starting port for the VNC client.
+#
+# Possible values:
+#
+# * Any valid port number within 5900 -(5900 + vnc_port_total)
+#
+# Related options:
+# Below options should be set to enable VNC client.
+# * vnc.enabled = True
+# * vnc_port_total
+#  (port value)
+# Minimum value: 0
+# Maximum value: 65535
+#vnc_port = 5900
+{%- if _data.vnc_port is defined %}
+vnc_port = {{ _data.vnc_port }}
+{%- endif %}
+
+#
+# Total number of VNC ports.
+#  (integer value)
+# Minimum value: 0
+#vnc_port_total = 10000
+{%- if _data.vnc_port_total is defined %}
+vnc_port_total = {{ _data.vnc_port_total }}
+{%- endif %}
+
+#
+# This option enables/disables the use of linked clone.
+#
+# The ESX hypervisor requires a copy of the VMDK file in order to boot
+# up a virtual machine. The compute driver must download the VMDK via
+# HTTP from the OpenStack Image service to a datastore that is visible
+# to the hypervisor and cache it. Subsequent virtual machines that need
+# the VMDK use the cached version and don't have to copy the file again
+# from the OpenStack Image service.
+#
+# If set to false, even with a cached VMDK, there is still a copy
+# operation from the cache location to the hypervisor file directory
+# in the shared datastore. If set to true, the above copy operation
+# is avoided as it creates copy of the virtual machine that shares
+# virtual disks with its parent VM.
+#  (boolean value)
+#use_linked_clone = true
+{%- if _data.use_linked_clone is defined %}
+use_linked_clone = {{ _data.use_linked_clone }}
+{%- endif %}
+
+# This option sets the http connection pool size
+#
+# The connection pool size is the maximum number of connections from nova to
+# vSphere.  It should only be increased if there are warnings indicating that
+# the connection pool is full, otherwise, the default should suffice.
+#  (integer value)
+# Minimum value: 10
+#connection_pool_size = 10
+{%- if _data.connection_pool_size is defined %}
+connection_pool_size = {{ _data.connection_pool_size }}
+{%- endif %}
+
+#
+# This option enables or disables storage policy based placement
+# of instances.
+#
+# Related options:
+#
+# * pbm_default_policy
+#  (boolean value)
+#pbm_enabled = false
+{%- if _data.pbm_enabled is defined %}
+pbm_enabled = {{ _data.pbm_enabled }}
+{%- endif %}
+
+#
+# This option specifies the PBM service WSDL file location URL.
+#
+# Setting this will disable storage policy based placement
+# of instances.
+#
+# Possible values:
+#
+# * Any valid file path
+#   e.g file:///opt/SDK/spbm/wsdl/pbmService.wsdl
+#  (string value)
+#pbm_wsdl_location = <None>
+{%- if _data.pbm_wsdl_location is defined %}
+pbm_wsdl_location = {{ _data.pbm_wsdl_location }}
+{%- endif %}
+
+#
+# This option specifies the default policy to be used.
+#
+# If pbm_enabled is set and there is no defined storage policy for the
+# specific request, then this policy will be used.
+#
+# Possible values:
+#
+# * Any valid storage policy such as VSAN default storage policy
+#
+# Related options:
+#
+# * pbm_enabled
+#  (string value)
+#pbm_default_policy = <None>
+{%- if _data.pbm_default_policy is defined %}
+pbm_default_policy = {{ _data.pbm_default_policy }}
+{%- endif %}
+
+#
+# This option specifies the limit on the maximum number of objects to
+# return in a single result.
+#
+# A positive value will cause the operation to suspend the retrieval
+# when the count of objects reaches the specified limit. The server may
+# still limit the count to something less than the configured value.
+# Any remaining objects may be retrieved with additional requests.
+#  (integer value)
+# Minimum value: 0
+#maximum_objects = 100
+{%- if _data.maximum_objects is defined %}
+maximum_objects = {{ _data.maximum_objects }}
+{%- endif %}
+
+#
+# This option adds a prefix to the folder where cached images are stored
+#
+# This is not the full path - just a folder prefix. This should only be
+# used when a datastore cache is shared between compute nodes.
+#
+# Note: This should only be used when the compute nodes are running on same
+# host or they have a shared file system.
+#
+# Possible values:
+#
+# * Any string representing the cache prefix to the folder
+#  (string value)
+#cache_prefix = <None>
+{%- if _data.cache_prefix is defined %}
+cache_prefix = {{ _data.cache_prefix }}
+{%- endif %}
diff --git a/nova/files/queens/nova-compute.conf.Debian b/nova/files/queens/nova-compute.conf.Debian
index 599c04d..d471a26 100644
--- a/nova/files/queens/nova-compute.conf.Debian
+++ b/nova/files/queens/nova-compute.conf.Debian
@@ -1,4 +1,4 @@
-{%- from "nova/map.jinja" import compute with context %}
+{%- from "nova/map.jinja" import compute,compute_driver_mapping with context %}
 [DEFAULT]
 
 #
@@ -9629,286 +9629,11 @@
 # Tenant Name (string value)
 #tenant_name = <None>
 
-
-[vmware]
-#
-# Related options:
-# Following options must be set in order to launch VMware-based
-# virtual machines.
-#
-# * compute_driver: Must use vmwareapi.VMwareVCDriver.
-# * vmware.host_username
-# * vmware.host_password
-# * vmware.cluster_name
-
-#
-# From nova.conf
-#
-
-#
-# This option specifies the physical ethernet adapter name for VLAN
-# networking.
-#
-# Set the vlan_interface configuration option to match the ESX host
-# interface that handles VLAN-tagged VM traffic.
-#
-# Possible values:
-#
-# * Any valid string representing VLAN interface name
-#  (string value)
-#vlan_interface = vmnic0
-
-#
-# This option should be configured only when using the NSX-MH Neutron
-# plugin. This is the name of the integration bridge on the ESXi
-# server
-# or host. This should not be set for any other Neutron plugin. Hence
-# the default value is not set.
-#
-# Possible values:
-#
-# * Any valid string representing the name of the integration bridge
-#  (string value)
-#integration_bridge = <None>
-
-#
-# Set this value if affected by an increased network latency causing
-# repeated characters when typing in a remote console.
-#  (integer value)
-# Minimum value: 0
-#console_delay_seconds = <None>
-
-#
-# Identifies the remote system where the serial port traffic will
-# be sent.
-#
-# This option adds a virtual serial port which sends console output to
-# a configurable service URI. At the service URI address there will be
-# virtual serial port concentrator that will collect console logs.
-# If this is not set, no serial ports will be added to the created
-# VMs.
-#
-# Possible values:
-#
-# * Any valid URI
-#  (string value)
-#serial_port_service_uri = <None>
-
-#
-# Identifies a proxy service that provides network access to the
-# serial_port_service_uri.
-#
-# Possible values:
-#
-# * Any valid URI (The scheme is 'telnet' or 'telnets'.)
-#
-# Related options:
-# This option is ignored if serial_port_service_uri is not specified.
-# * serial_port_service_uri
-#  (uri value)
-#serial_port_proxy_uri = <None>
-
-#
-# Specifies the directory where the Virtual Serial Port Concentrator
-# is
-# storing console log files. It should match the 'serial_log_dir'
-# config
-# value of VSPC.
-#  (string value)
-#serial_log_dir = /opt/vmware/vspc
-
-#
-# Hostname or IP address for connection to VMware vCenter host.
-# (unknown value)
-#host_ip = <None>
-
-# Port for connection to VMware vCenter host. (port value)
-# Minimum value: 0
-# Maximum value: 65535
-#host_port = 443
-
-# Username for connection to VMware vCenter host. (string value)
-#host_username = <None>
-
-# Password for connection to VMware vCenter host. (string value)
-#host_password = <None>
-
-#
-# Specifies the CA bundle file to be used in verifying the vCenter
-# server certificate.
-#  (string value)
-#ca_file = <None>
-
-#
-# If true, the vCenter server certificate is not verified. If false,
-# then the default CA truststore is used for verification.
-#
-# Related options:
-# * ca_file: This option is ignored if "ca_file" is set.
-#  (boolean value)
-#insecure = false
-
-# Name of a VMware Cluster ComputeResource. (string value)
-#cluster_name = <None>
-
-#
-# Regular expression pattern to match the name of datastore.
-#
-# The datastore_regex setting specifies the datastores to use with
-# Compute. For example, datastore_regex="nas.*" selects all the data
-# stores that have a name starting with "nas".
-#
-# NOTE: If no regex is given, it just picks the datastore with the
-# most freespace.
-#
-# Possible values:
-#
-# * Any matching regular expression to a datastore must be given
-#  (string value)
-#datastore_regex = <None>
-
-#
-# Time interval in seconds to poll remote tasks invoked on
-# VMware VC server.
-#  (floating point value)
-#task_poll_interval = 0.5
-
-#
-# Number of times VMware vCenter server API must be retried on
-# connection
-# failures, e.g. socket error, etc.
-#  (integer value)
-# Minimum value: 0
-#api_retry_count = 10
-
-#
-# This option specifies VNC starting port.
-#
-# Every VM created by ESX host has an option of enabling VNC client
-# for remote connection. Above option 'vnc_port' helps you to set
-# default starting port for the VNC client.
-#
-# Possible values:
-#
-# * Any valid port number within 5900 -(5900 + vnc_port_total)
-#
-# Related options:
-# Below options should be set to enable VNC client.
-# * vnc.enabled = True
-# * vnc_port_total
-#  (port value)
-# Minimum value: 0
-# Maximum value: 65535
-#vnc_port = 5900
-
-#
-# Total number of VNC ports.
-#  (integer value)
-# Minimum value: 0
-#vnc_port_total = 10000
-
-#
-# This option enables/disables the use of linked clone.
-#
-# The ESX hypervisor requires a copy of the VMDK file in order to boot
-# up a virtual machine. The compute driver must download the VMDK via
-# HTTP from the OpenStack Image service to a datastore that is visible
-# to the hypervisor and cache it. Subsequent virtual machines that
-# need
-# the VMDK use the cached version and don't have to copy the file
-# again
-# from the OpenStack Image service.
-#
-# If set to false, even with a cached VMDK, there is still a copy
-# operation from the cache location to the hypervisor file directory
-# in the shared datastore. If set to true, the above copy operation
-# is avoided as it creates copy of the virtual machine that shares
-# virtual disks with its parent VM.
-#  (boolean value)
-#use_linked_clone = true
-
-#
-# This option sets the http connection pool size
-#
-# The connection pool size is the maximum number of connections from
-# nova to
-# vSphere.  It should only be increased if there are warnings
-# indicating that
-# the connection pool is full, otherwise, the default should suffice.
-#  (integer value)
-# Minimum value: 10
-#connection_pool_size = 10
-
-#
-# This option enables or disables storage policy based placement
-# of instances.
-#
-# Related options:
-#
-# * pbm_default_policy
-#  (boolean value)
-#pbm_enabled = false
-
-#
-# This option specifies the PBM service WSDL file location URL.
-#
-# Setting this will disable storage policy based placement
-# of instances.
-#
-# Possible values:
-#
-# * Any valid file path
-#   e.g file:///opt/SDK/spbm/wsdl/pbmService.wsdl
-#  (string value)
-#pbm_wsdl_location = <None>
-
-#
-# This option specifies the default policy to be used.
-#
-# If pbm_enabled is set and there is no defined storage policy for the
-# specific request, then this policy will be used.
-#
-# Possible values:
-#
-# * Any valid storage policy such as VSAN default storage policy
-#
-# Related options:
-#
-# * pbm_enabled
-#  (string value)
-#pbm_default_policy = <None>
-
-#
-# This option specifies the limit on the maximum number of objects to
-# return in a single result.
-#
-# A positive value will cause the operation to suspend the retrieval
-# when the count of objects reaches the specified limit. The server
-# may
-# still limit the count to something less than the configured value.
-# Any remaining objects may be retrieved with additional requests.
-#  (integer value)
-# Minimum value: 0
-#maximum_objects = 100
-
-#
-# This option adds a prefix to the folder where cached images are
-# stored
-#
-# This is not the full path - just a folder prefix. This should only
-# be
-# used when a datastore cache is shared between compute nodes.
-#
-# Note: This should only be used when the compute nodes are running on
-# same
-# host or they have a shared file system.
-#
-# Possible values:
-#
-# * Any string representing the cache prefix to the folder
-#  (string value)
-#cache_prefix = <None>
-
+{%- set compute_driver = compute.get('compute_driver', 'libvirt.LibvirtDriver') %}
+{%- if compute_driver in compute_driver_mapping.keys() %}
+{%- set _data = compute.get(compute_driver_mapping[compute_driver]) %}
+{%- include "nova/files/queens/compute/_" + compute_driver_mapping[compute_driver] + ".conf" %}
+{%- endif %}
 
 [vnc]
 #
diff --git a/nova/files/queens/nova-controller.conf.Debian b/nova/files/queens/nova-controller.conf.Debian
index b4d3ace..c413d87 100644
--- a/nova/files/queens/nova-controller.conf.Debian
+++ b/nova/files/queens/nova-controller.conf.Debian
@@ -1,4 +1,4 @@
-{%- from "nova/map.jinja" import controller with context %}
+{%- from "nova/map.jinja" import controller,compute_driver_mapping with context %}
 [DEFAULT]
 
 #
@@ -3028,6 +3028,11 @@
 
 # Disallow non-encrypted connections. (boolean value)
 #ssl_only = false
+{%- if controller.novncproxy.tls.get('enabled', False) %}
+ssl_only=True
+cert={{controller.novncproxy.tls.server.cert_file|yaml_squote}}
+key={{controller.novncproxy.tls.server.key_file|yaml_squote}}
+{%- endif %}
 
 # Set to True if source host is addressed with IPv6. (boolean value)
 #source_is_ipv6 = false
@@ -4444,6 +4449,7 @@
 {%- endif %}
 
 {%- set _data = controller.get('cinder', controller.get('identity', {})) %}
+{%- if 'cacert_file' not in _data.keys() %}{% do _data.update({'cacert_file': controller.cacert_file}) %}{% endif %}
 {%- set auth_type = _data.get('auth_type', 'password') %}
 {%- include "oslo_templates/files/queens/keystoneauth/_type_" + auth_type + ".conf" %}
 
@@ -9537,286 +9543,11 @@
 # Tenant Name (string value)
 #tenant_name = <None>
 
-
-[vmware]
-#
-# Related options:
-# Following options must be set in order to launch VMware-based
-# virtual machines.
-#
-# * compute_driver: Must use vmwareapi.VMwareVCDriver.
-# * vmware.host_username
-# * vmware.host_password
-# * vmware.cluster_name
-
-#
-# From nova.conf
-#
-
-#
-# This option specifies the physical ethernet adapter name for VLAN
-# networking.
-#
-# Set the vlan_interface configuration option to match the ESX host
-# interface that handles VLAN-tagged VM traffic.
-#
-# Possible values:
-#
-# * Any valid string representing VLAN interface name
-#  (string value)
-#vlan_interface = vmnic0
-
-#
-# This option should be configured only when using the NSX-MH Neutron
-# plugin. This is the name of the integration bridge on the ESXi
-# server
-# or host. This should not be set for any other Neutron plugin. Hence
-# the default value is not set.
-#
-# Possible values:
-#
-# * Any valid string representing the name of the integration bridge
-#  (string value)
-#integration_bridge = <None>
-
-#
-# Set this value if affected by an increased network latency causing
-# repeated characters when typing in a remote console.
-#  (integer value)
-# Minimum value: 0
-#console_delay_seconds = <None>
-
-#
-# Identifies the remote system where the serial port traffic will
-# be sent.
-#
-# This option adds a virtual serial port which sends console output to
-# a configurable service URI. At the service URI address there will be
-# virtual serial port concentrator that will collect console logs.
-# If this is not set, no serial ports will be added to the created
-# VMs.
-#
-# Possible values:
-#
-# * Any valid URI
-#  (string value)
-#serial_port_service_uri = <None>
-
-#
-# Identifies a proxy service that provides network access to the
-# serial_port_service_uri.
-#
-# Possible values:
-#
-# * Any valid URI (The scheme is 'telnet' or 'telnets'.)
-#
-# Related options:
-# This option is ignored if serial_port_service_uri is not specified.
-# * serial_port_service_uri
-#  (uri value)
-#serial_port_proxy_uri = <None>
-
-#
-# Specifies the directory where the Virtual Serial Port Concentrator
-# is
-# storing console log files. It should match the 'serial_log_dir'
-# config
-# value of VSPC.
-#  (string value)
-#serial_log_dir = /opt/vmware/vspc
-
-#
-# Hostname or IP address for connection to VMware vCenter host.
-# (unknown value)
-#host_ip = <None>
-
-# Port for connection to VMware vCenter host. (port value)
-# Minimum value: 0
-# Maximum value: 65535
-#host_port = 443
-
-# Username for connection to VMware vCenter host. (string value)
-#host_username = <None>
-
-# Password for connection to VMware vCenter host. (string value)
-#host_password = <None>
-
-#
-# Specifies the CA bundle file to be used in verifying the vCenter
-# server certificate.
-#  (string value)
-#ca_file = <None>
-
-#
-# If true, the vCenter server certificate is not verified. If false,
-# then the default CA truststore is used for verification.
-#
-# Related options:
-# * ca_file: This option is ignored if "ca_file" is set.
-#  (boolean value)
-#insecure = false
-
-# Name of a VMware Cluster ComputeResource. (string value)
-#cluster_name = <None>
-
-#
-# Regular expression pattern to match the name of datastore.
-#
-# The datastore_regex setting specifies the datastores to use with
-# Compute. For example, datastore_regex="nas.*" selects all the data
-# stores that have a name starting with "nas".
-#
-# NOTE: If no regex is given, it just picks the datastore with the
-# most freespace.
-#
-# Possible values:
-#
-# * Any matching regular expression to a datastore must be given
-#  (string value)
-#datastore_regex = <None>
-
-#
-# Time interval in seconds to poll remote tasks invoked on
-# VMware VC server.
-#  (floating point value)
-#task_poll_interval = 0.5
-
-#
-# Number of times VMware vCenter server API must be retried on
-# connection
-# failures, e.g. socket error, etc.
-#  (integer value)
-# Minimum value: 0
-#api_retry_count = 10
-
-#
-# This option specifies VNC starting port.
-#
-# Every VM created by ESX host has an option of enabling VNC client
-# for remote connection. Above option 'vnc_port' helps you to set
-# default starting port for the VNC client.
-#
-# Possible values:
-#
-# * Any valid port number within 5900 -(5900 + vnc_port_total)
-#
-# Related options:
-# Below options should be set to enable VNC client.
-# * vnc.enabled = True
-# * vnc_port_total
-#  (port value)
-# Minimum value: 0
-# Maximum value: 65535
-#vnc_port = 5900
-
-#
-# Total number of VNC ports.
-#  (integer value)
-# Minimum value: 0
-#vnc_port_total = 10000
-
-#
-# This option enables/disables the use of linked clone.
-#
-# The ESX hypervisor requires a copy of the VMDK file in order to boot
-# up a virtual machine. The compute driver must download the VMDK via
-# HTTP from the OpenStack Image service to a datastore that is visible
-# to the hypervisor and cache it. Subsequent virtual machines that
-# need
-# the VMDK use the cached version and don't have to copy the file
-# again
-# from the OpenStack Image service.
-#
-# If set to false, even with a cached VMDK, there is still a copy
-# operation from the cache location to the hypervisor file directory
-# in the shared datastore. If set to true, the above copy operation
-# is avoided as it creates copy of the virtual machine that shares
-# virtual disks with its parent VM.
-#  (boolean value)
-#use_linked_clone = true
-
-#
-# This option sets the http connection pool size
-#
-# The connection pool size is the maximum number of connections from
-# nova to
-# vSphere.  It should only be increased if there are warnings
-# indicating that
-# the connection pool is full, otherwise, the default should suffice.
-#  (integer value)
-# Minimum value: 10
-#connection_pool_size = 10
-
-#
-# This option enables or disables storage policy based placement
-# of instances.
-#
-# Related options:
-#
-# * pbm_default_policy
-#  (boolean value)
-#pbm_enabled = false
-
-#
-# This option specifies the PBM service WSDL file location URL.
-#
-# Setting this will disable storage policy based placement
-# of instances.
-#
-# Possible values:
-#
-# * Any valid file path
-#   e.g file:///opt/SDK/spbm/wsdl/pbmService.wsdl
-#  (string value)
-#pbm_wsdl_location = <None>
-
-#
-# This option specifies the default policy to be used.
-#
-# If pbm_enabled is set and there is no defined storage policy for the
-# specific request, then this policy will be used.
-#
-# Possible values:
-#
-# * Any valid storage policy such as VSAN default storage policy
-#
-# Related options:
-#
-# * pbm_enabled
-#  (string value)
-#pbm_default_policy = <None>
-
-#
-# This option specifies the limit on the maximum number of objects to
-# return in a single result.
-#
-# A positive value will cause the operation to suspend the retrieval
-# when the count of objects reaches the specified limit. The server
-# may
-# still limit the count to something less than the configured value.
-# Any remaining objects may be retrieved with additional requests.
-#  (integer value)
-# Minimum value: 0
-#maximum_objects = 100
-
-#
-# This option adds a prefix to the folder where cached images are
-# stored
-#
-# This is not the full path - just a folder prefix. This should only
-# be
-# used when a datastore cache is shared between compute nodes.
-#
-# Note: This should only be used when the compute nodes are running on
-# same
-# host or they have a shared file system.
-#
-# Possible values:
-#
-# * Any string representing the cache prefix to the folder
-#  (string value)
-#cache_prefix = <None>
-
+{%- set compute_driver = controller.get('compute_driver', 'libvirt.LibvirtDriver') %}
+{%- if compute_driver in compute_driver_mapping.keys() %}
+{%- set _data = controller.get(compute_driver_mapping[compute_driver]) %}
+{%- include "nova/files/queens/compute/_" + compute_driver_mapping[compute_driver] + ".conf" %}
+{%- endif %}
 
 [vnc]
 #
@@ -9847,6 +9578,12 @@
 {%- else %}
 vncserver_listen={{ controller.bind.get('novncproxy_address', '0.0.0.0') }}
 {%- endif %}
+{%- if controller.novncproxy.vencrypt.tls.get('enabled', False) %}
+auth_schemes=vencrypt
+vencrypt_client_key={{controller.novncproxy.vencrypt.tls.key_file|yaml_squote}}
+vencrypt_client_cert={{controller.novncproxy.vencrypt.tls.cert_file|yaml_squote}}
+vencrypt_ca_certs={{controller.novncproxy.vencrypt.tls.ca_file|yaml_squote}}
+{%- endif %}
 
 #
 # Keymap for VNC.
diff --git a/nova/files/queens/qemu.conf.Debian b/nova/files/queens/qemu.conf.Debian
index cb20491..3960007 100644
--- a/nova/files/queens/qemu.conf.Debian
+++ b/nova/files/queens/qemu.conf.Debian
@@ -31,7 +31,11 @@
 # before enabling this.
 #
 #vnc_tls = 1
-
+{%- if compute.qemu.vnc.tls.get('enabled', False) %}
+vnc_tls = 1
+vnc_tls_x509_verify = 1
+vnc_tls_x509_cert_dir = {{compute.qemu.vnc.tls.cert_dir|yaml_squote}}
+{%- endif %}
 
 # Use of TLS requires that x509 certificates be issued. The
 # default it to keep them in /etc/pki/libvirt-vnc. This directory
diff --git a/nova/map.jinja b/nova/map.jinja
index f5b55ec..599a75a 100644
--- a/nova/map.jinja
+++ b/nova/map.jinja
@@ -5,6 +5,11 @@
     })}
 %}
 
+# TODO(vsaienko) add more drivers settings when they are templatized
+{% set compute_driver_mapping = {
+    'vmwareapi.VMwareVCDriver': 'vmware',
+} %}
+
 {% set compute_bind_defaults = {
     'vnc_address': '10.0.0.10',
     'vnc_port': '6080',
@@ -42,6 +47,7 @@
           'enabled': false
         },
         'logging': {
+          'app_name': 'nova',
           'log_appender': false,
           'log_handlers': {
             'watchedfile': {
@@ -49,6 +55,23 @@
             }
           }
         },
+        'novncproxy': {
+          'tls': {
+            'enabled': false,
+            'server': {
+              'key_file': '/etc/pki/nova-novncproxy/server-key.pem',
+              'cert_file': '/etc/pki/nova-novncproxy/server-cert.pem',
+            }
+          },
+          'vencrypt': {
+            'tls': {
+              'enabled': false,
+              'key_file': '/etc/pki/nova-novncproxy/client-key.pem',
+              'cert_file': '/etc/pki/nova-novncproxy/client-cert.pem',
+              'ca_file': '/etc/pki/nova-novncproxy/ca-cert.pem',
+            }
+          }
+        },
     },
     'RedHat': {
         'pkgs': pkgs_list,
@@ -60,6 +83,7 @@
           'enabled': false
         },
         'logging': {
+          'app_name': 'nova',
           'log_appender': false,
           'log_handlers': {
             'watchedfile': {
@@ -67,6 +91,23 @@
             }
           }
         },
+        'novncproxy': {
+          'tls': {
+            'enabled': false,
+            'server': {
+              'key_file': '/etc/pki/nova-novncproxy/server-key.pem',
+              'cert_file': '/etc/pki/nova-novncproxy/server-cert.pem',
+            }
+          },
+          'vencrypt': {
+            'tls': {
+              'enabled': false,
+              'key_file': '/etc/pki/nova-novncproxy/client-key.pem',
+              'cert_file': '/etc/pki/nova-novncproxy/client-cert.pem',
+              'ca_file': '/etc/pki/nova-novncproxy/ca-cert.pem',
+            }
+          }
+        },
     },
 }, merge=pillar.nova.get('controller', {}), base='BaseDefaults') %}
 
@@ -124,6 +165,14 @@
   libvirt_service: libvirt-bin
   bind: compute_bind_defaults
   debug: false
+  qemu:
+    vnc:
+      tls:
+        enabled: False
+        key_file: '/etc/pki/libvirt-vnc/server-key.pem'
+        cert_file: '/etc/pki/libvirt-vnc/server-cert.pem'
+        ca_file: '/etc/pki/libvirt-vnc/ca-cert.pem'
+        cert_dir: '/etc/pki/libvirt-vnc'
   libvirt:
     inject_partition: '-2'
     inject_password: False
@@ -148,6 +197,7 @@
     zmq_linger: 30
     rpc_response_timeout: 3600
   logging:
+    app_name: 'nova'
     log_appender: false
     log_handlers:
       watchedfile:
@@ -168,6 +218,14 @@
   libvirt_service: libvirtd
   bind: compute_bind_defaults
   debug: false
+  qemu:
+    vnc:
+      tls:
+        enabled: False
+        key_file: '/etc/pki/libvirt-vnc/server-key.pem'
+        cert_file: '/etc/pki/libvirt-vnc/server-cert.pem'
+        ca_file: '/etc/pki/libvirt-vnc/ca-cert.pem'
+        cert_dir: '/etc/pki/libvirt-vnc'
   libvirt:
     inject_partition: '-2'
     inject_password: False
@@ -189,6 +247,7 @@
     zmq_linger: 30
     rpc_response_timeout: 3600
   logging:
+    app_name: 'nova'
     log_appender: false
     log_handlers:
       watchedfile:
diff --git a/nova/meta/fluentd.yml b/nova/meta/fluentd.yml
index 43d7a28..48cf241 100644
--- a/nova/meta/fluentd.yml
+++ b/nova/meta/fluentd.yml
@@ -43,8 +43,9 @@
                 value: INFO
               - name: programname
                 value: nova-placement-wsgi
+                # Apache logs response time in microseconds
               - name: http_response_time
-                value: ${ record['http_response_time'].to_i/100000.to_f }
+                value: ${ record['http_response_time'].to_i/10**6.to_f }
         match:
           send_to_default:
             tag: openstack.nova
diff --git a/nova/meta/grafana.yml b/nova/meta/grafana.yml
index b5b1693..0fc3ac3 100644
--- a/nova/meta/grafana.yml
+++ b/nova/meta/grafana.yml
@@ -1,8 +1,11 @@
-{%- from "nova/map.jinja" import compute with context %}
+{%- from "nova/map.jinja" import controller, compute with context %}
 parameters:
   {%- if compute is defined and compute.get('enabled', False) and compute.get('ceph', {}).get('ephemeral', False) %}
   nova_compute_ceph_ephemeral: True
   {%- endif %}
+  {%- if controller is defined and controller.get('version',{}) in ["juno", "kilo", "liberty", "mitaka"] %}
+  nova_cert_enabled: True
+  {%- endif %}
 dashboard:
 {%- if pillar.get('fluentd', {}).get('agent', {}).get('enabled', False) %}
   nova_overview_prometheus:
diff --git a/nova/meta/prometheus.yml b/nova/meta/prometheus.yml
index a638877..001a9ea 100644
--- a/nova/meta/prometheus.yml
+++ b/nova/meta/prometheus.yml
@@ -151,159 +151,157 @@
         summary: "{{ $labels.binary }} service outage"
         description: >-
           All {{ $labels.binary }} services are down.
-{%- endraw -%}
-
+{%- endraw %}
 {%- set cpu_minor_threshold = monitoring.cpu_minor_threshold|float %}
 {%- set cpu_major_threshold = monitoring.cpu_major_threshold|float %}
 {%- set ram_major_threshold = monitoring.ram_major_threshold|float %}
 {%- set ram_critical_threshold = monitoring.ram_critical_threshold|float %}
 {%- set disk_major_threshold = monitoring.disk_major_threshold|float %}
-{%- set disk_critical_threshold = monitoring.disk_critical_threshold|float -%}
-
+{%- set disk_critical_threshold = monitoring.disk_critical_threshold|float %}
     NovaHypervisorVCPUsFullMinor:
       if: >-
-        label_replace(system_load15, "hostname", "$1", "host", "(.*)") >= on (hostname) openstack_nova_vcpus * {{ cpu_minor_threshold }}
+        label_replace(system_load15, "hostname", "$1", "host", "(.*)") > on (hostname) openstack_nova_vcpus * {{ cpu_minor_threshold }}
       labels:
         severity: minor
         service: nova
       annotations:
         summary: "{{ cpu_minor_threshold * 100 }}% of hypervisor VCPUs are used"
-        description: "{% raw %}{{ $value }} VCPUs on the {{ $labels.hostname }} node (>= {% endraw %} {{ cpu_minor_threshold * 100 }}%) are used."
+        description: "{% raw %}{{ $value }} VCPUs on the {{ $labels.hostname }} node (> {% endraw %} {{ cpu_minor_threshold * 100 }}%) are used."
     NovaHypervisorVCPUsFullMajor:
       if: >-
-        label_replace(system_load15, "hostname", "$1", "host", "(.*)") >= on (hostname) openstack_nova_vcpus * {{ cpu_major_threshold }}
+        label_replace(system_load15, "hostname", "$1", "host", "(.*)") > on (hostname) openstack_nova_vcpus * {{ cpu_major_threshold }}
       labels:
         severity: major
         service: nova
       annotations:
         summary: "{{ cpu_major_threshold * 100 }}% of hypervisor VCPUs are used"
-        description: "{% raw %}{{ $value }} VCPUs on the {{ $labels.hostname }} node (>= {% endraw %} {{ cpu_major_threshold * 100 }}%) are used."
+        description: "{% raw %}{{ $value }} VCPUs on the {{ $labels.hostname }} node (> {% endraw %} {{ cpu_major_threshold * 100 }}%) are used."
     NovaHypervisorMemoryFullMajor:
       if: >-
-        openstack_nova_used_ram >= openstack_nova_ram * {{ ram_major_threshold }}
+        openstack_nova_used_ram > openstack_nova_ram * {{ ram_major_threshold }}
       labels:
         severity: major
         service: nova
       annotations:
         summary: "{{ ram_major_threshold * 100 }}% of hypervisor RAM is used"
-        description: "{% raw %}{{ $value }}MB of RAM on the {{ $labels.hostname }} node (>= {% endraw %} {{ ram_major_threshold * 100 }}%) is used."
+        description: "{% raw %}{{ $value }}MB of RAM on the {{ $labels.hostname }} node (> {% endraw %} {{ ram_major_threshold * 100 }}%) is used."
     NovaHypervisorMemoryFullCritical:
       if: >-
-        openstack_nova_used_ram >= openstack_nova_ram * {{ ram_critical_threshold }}
+        openstack_nova_used_ram > openstack_nova_ram * {{ ram_critical_threshold }}
       labels:
         severity: critical
         service: nova
       annotations:
         summary: "{{ ram_critical_threshold * 100 }}% of hypervisor RAM is used"
-        description: "{% raw %}{{ $value }}MB of RAM on the {{ $labels.hostname }} node (>= {% endraw %} {{ ram_critical_threshold * 100 }}%) is used."
+        description: "{% raw %}{{ $value }}MB of RAM on the {{ $labels.hostname }} node (> {% endraw %} {{ ram_critical_threshold * 100 }}%) is used."
     NovaHypervisorDiskFullMajor:
       if: >-
-        openstack_nova_used_disk >= openstack_nova_disk * {{ disk_major_threshold }}
+        openstack_nova_used_disk > openstack_nova_disk * {{ disk_major_threshold }}
       labels:
         severity: major
         service: nova
       annotations:
         summary: "{{ disk_major_threshold * 100 }}% of hypervisor disk space is used"
-        description: "{% raw %}{{ $value }}GB of disk space on the {{ $labels.hostname }} node (>= {% endraw %} {{ disk_major_threshold * 100 }}%) is used."
+        description: "{% raw %}{{ $value }}GB of disk space on the {{ $labels.hostname }} node (> {% endraw %} {{ disk_major_threshold * 100 }}%) is used."
     NovaHypervisorDiskFullCritical:
       if: >-
-        openstack_nova_used_disk >= openstack_nova_disk * {{ disk_critical_threshold }}
+        openstack_nova_used_disk > openstack_nova_disk * {{ disk_critical_threshold }}
       labels:
         severity: critical
         service: nova
       annotations:
         summary: "{{ disk_critical_threshold * 100 }}% of hypervisor disk space is used"
-        description: "{% raw %}{{ $value }}GB of disk space on the {{ $labels.hostname }} node (>= {% endraw %} {{ disk_critical_threshold * 100 }}%) is used."
+        description: "{% raw %}{{ $value }}GB of disk space on the {{ $labels.hostname }} node (> {% endraw %} {{ disk_critical_threshold * 100 }}%) is used."
     NovaAggregateMemoryFullMajor:
       if: >-
-        openstack_nova_aggregate_used_ram >= openstack_nova_aggregate_ram * {{ ram_major_threshold }}
+        openstack_nova_aggregate_used_ram > openstack_nova_aggregate_ram * {{ ram_major_threshold }}
       labels:
         severity: major
         service: nova
       annotations:
         summary: "{{ ram_major_threshold * 100 }}% of aggregate RAM is used"
-        description: "{% raw %}{{ $value }}MB of RAM on the {{ $labels.aggregate }}{% endraw %} aggregate is used (at least {{ ram_major_threshold * 100 }}%)."
+        description: "{% raw %}{{ $value }}MB of RAM on the {{ $labels.aggregate }} aggregate (> {% endraw %} {{ ram_major_threshold * 100 }}%) is used."
     NovaAggregateMemoryFullCritical:
       if: >-
-        openstack_nova_aggregate_used_ram >= openstack_nova_aggregate_ram * {{ ram_critical_threshold }}
+        openstack_nova_aggregate_used_ram > openstack_nova_aggregate_ram * {{ ram_critical_threshold }}
       labels:
         severity: critical
         service: nova
       annotations:
         summary: "{{ ram_critical_threshold * 100 }}% of aggregate RAM is used"
-        description: "{% raw %}{{ $value }}MB of RAM on the {{ $labels.aggregate }} aggregate (>= {% endraw %} {{ ram_critical_threshold * 100 }}%) is used."
+        description: "{% raw %}{{ $value }}MB of RAM on the {{ $labels.aggregate }} aggregate (> {% endraw %} {{ ram_critical_threshold * 100 }}%) is used."
     NovaAggregateDiskFullMajor:
       if: >-
-        openstack_nova_aggregate_used_disk >= openstack_nova_aggregate_disk * {{ disk_major_threshold }}
+        openstack_nova_aggregate_used_disk > openstack_nova_aggregate_disk * {{ disk_major_threshold }}
       labels:
         severity: major
         service: nova
       annotations:
         summary: "{{ disk_major_threshold * 100 }}% of aggregate disk space is used"
-        description: "{% raw %}{{ $value }}GB of disk space on the {{ $labels.aggregate }} aggregate (>= {% endraw %} {{ disk_major_threshold * 100 }}%) is used."
+        description: "{% raw %}{{ $value }}GB of disk space on the {{ $labels.aggregate }} aggregate (> {% endraw %} {{ disk_major_threshold * 100 }}%) is used."
     NovaAggregateDiskFullCritical:
       if: >-
-        openstack_nova_aggregate_used_disk >= openstack_nova_aggregate_disk * {{ disk_critical_threshold }}
+        openstack_nova_aggregate_used_disk > openstack_nova_aggregate_disk * {{ disk_critical_threshold }}
       labels:
         severity: critical
         service: nova
       annotations:
         summary: "{{ disk_critical_threshold * 100 }}% of aggregate disk space is used"
-        description: "{% raw %}{{ $value }}GB of disk space on the {{ $labels.aggregate }} aggregate (>= {% endraw %} {{ disk_critical_threshold * 100 }}%) is used."
+        description: "{% raw %}{{ $value }}GB of disk space on the {{ $labels.aggregate }} aggregate (> {% endraw %} {{ disk_critical_threshold * 100 }}%) is used."
     NovaTotalVCPUsFullMinor:
       if: >-
-        sum(label_replace(system_load15, "hostname", "$1", "host", "(.*)") and on (hostname) openstack_nova_vcpus) >= max(sum(openstack_nova_vcpus) by (instance)) * {{ cpu_minor_threshold }}
+        sum(label_replace(system_load15, "hostname", "$1", "host", "(.*)") and on (hostname) openstack_nova_vcpus) > max(sum(openstack_nova_vcpus) by (instance)) * {{ cpu_minor_threshold }}
       labels:
         severity: minor
         service: nova
       annotations:
         summary: "{{ cpu_minor_threshold * 100 }}% of cloud VCPUs are used"
-        description: "{% raw %}{{ $value }} VCPUs in the cloud (>= {% endraw %} {{ cpu_minor_threshold * 100 }}%) are used."
+        description: "{% raw %}{{ $value }} VCPUs in the cloud (> {% endraw %} {{ cpu_minor_threshold * 100 }}%) are used."
     NovaTotalVCPUsFullMajor:
       if: >-
-        sum(label_replace(system_load15, "hostname", "$1", "host", "(.*)") and on (hostname) openstack_nova_vcpus) >= max(sum(openstack_nova_vcpus) by (instance)) * {{ cpu_major_threshold }}
+        sum(label_replace(system_load15, "hostname", "$1", "host", "(.*)") and on (hostname) openstack_nova_vcpus) > max(sum(openstack_nova_vcpus) by (instance)) * {{ cpu_major_threshold }}
       labels:
         severity: major
         service: nova
       annotations:
         summary: "{{ cpu_major_threshold * 100 }}% of cloud VCPUs are used"
-        description: "{% raw %}{{ $value }} VCPUs in the cloud (>= {% endraw %} {{ cpu_major_threshold * 100 }}%) are used."
+        description: "{% raw %}{{ $value }} VCPUs in the cloud (> {% endraw %} {{ cpu_major_threshold * 100 }}%) are used."
     NovaTotalMemoryFullMajor:
       if: >-
-        openstack_nova_total_used_ram >= openstack_nova_total_ram * {{ ram_major_threshold }}
+        openstack_nova_total_used_ram > openstack_nova_total_ram * {{ ram_major_threshold }}
       labels:
         severity: major
         service: nova
       annotations:
         summary: "{{ ram_major_threshold * 100 }}% of cloud RAM is used"
-        description: "{% raw %}{{ $value }}MB of RAM in the cloud (>= {% endraw %} {{ ram_major_threshold * 100 }}%) is used."
+        description: "{% raw %}{{ $value }}MB of RAM in the cloud (> {% endraw %} {{ ram_major_threshold * 100 }}%) is used."
     NovaTotalMemoryFullCritical:
       if: >-
-        openstack_nova_total_used_ram >= openstack_nova_total_ram * {{ ram_critical_threshold }}
+        openstack_nova_total_used_ram > openstack_nova_total_ram * {{ ram_critical_threshold }}
       labels:
         severity: critical
         service: nova
       annotations:
         summary: "{{ ram_critical_threshold * 100 }}% of cloud RAM is used"
-        description: "{% raw %}{{ $value }}MB of RAM in the cloud (>= {% endraw %} {{ ram_critical_threshold * 100 }}%) is used."
+        description: "{% raw %}{{ $value }}MB of RAM in the cloud (> {% endraw %} {{ ram_critical_threshold * 100 }}%) is used."
     NovaTotalDiskFullMajor:
       if: >-
-        openstack_nova_total_used_disk >= openstack_nova_total_disk * {{ disk_major_threshold }}
+        openstack_nova_total_used_disk > openstack_nova_total_disk * {{ disk_major_threshold }}
       labels:
         severity: major
         service: nova
       annotations:
         summary: "{{ disk_major_threshold * 100 }}% of cloud disk space is used"
-        description: "{% raw %}{{ $value }}GB of disk space in the cloud (>= {% endraw %} {{ disk_major_threshold * 100 }}%) is used."
+        description: "{% raw %}{{ $value }}GB of disk space in the cloud (> {% endraw %} {{ disk_major_threshold * 100 }}%) is used."
     NovaTotalDiskFullCritical:
       if: >-
-        openstack_nova_total_used_disk >= openstack_nova_total_disk * {{ disk_critical_threshold }}
+        openstack_nova_total_used_disk > openstack_nova_total_disk * {{ disk_critical_threshold }}
       labels:
         severity: critical
         service: nova
       annotations:
         summary: "{{ disk_critical_threshold * 100 }}% of cloud disk space is used"
-        description: "{% raw %}{{ $value }}GB of disk space in the cloud (>= {% endraw %} {{ disk_critical_threshold * 100 }}%) is used."
+        description: "{% raw %}{{ $value }}GB of disk space in the cloud (> {% endraw %} {{ disk_critical_threshold * 100 }}%) is used."
 {%- endif %}
     NovaErrorLogsTooHigh:
       {%- set log_threshold = monitoring.error_log_rate.warn|float %}
diff --git a/nova/meta/sphinx.yml b/nova/meta/sphinx.yml
index edf5eae..92e4231 100644
--- a/nova/meta/sphinx.yml
+++ b/nova/meta/sphinx.yml
@@ -10,9 +10,6 @@
         version:
           name: "Version"
           value: {{ compute.version }}
-        virtualization:
-          name: "Virtualization type"
-          value: {{ compute.virtualization }}
         {%- if compute.vncproxy_url is defined %}
         vncproxy_url:
           name: "VNC proxy URL"
diff --git a/tests/pillar/compute_cluster.sls b/tests/pillar/compute_cluster.sls
index 4bbcfba..c057c09 100644
--- a/tests/pillar/compute_cluster.sls
+++ b/tests/pillar/compute_cluster.sls
@@ -6,7 +6,6 @@
     hugepages:
       mount_points:
       - path: /mnt/hugepages_1GB
-    virtualization: kvm
     disk_cachemodes: network=writeback,block=none
     heal_instance_info_cache_interval: 60
     vncproxy_url: openstack:6080
diff --git a/tests/pillar/compute_cluster_vmware.sls b/tests/pillar/compute_cluster_vmware.sls
new file mode 100644
index 0000000..ceaf142
--- /dev/null
+++ b/tests/pillar/compute_cluster_vmware.sls
@@ -0,0 +1,78 @@
+nova:
+  compute:
+    version: pike
+    enabled: true
+    bind:
+      vnc_address: 127.0.0.1
+      vnc_port: 6080
+      vnc_name: 0.0.0.0
+    database:
+      engine: mysql
+      host: 127.0.0.1
+      port: 3306
+      name: nova
+      user: nova
+      password: password
+    identity:
+      engine: keystone
+      region: RegionOne
+      host: 127.0.0.1
+      port: 35357
+      user: nova
+      password: password
+      tenant: service
+    logging:
+      log_appender: false
+      log_handlers:
+        watchedfile:
+          enabled: true
+        fluentd:
+          enabled: false
+        ossyslog:
+          enabled: false
+    message_queue:
+      engine: rabbitmq
+      members:
+      - host: 127.0.0.1
+      - host: 127.0.1.1
+      - host: 127.0.2.1
+      user: openstack
+      password: password
+      virtual_host: '/openstack'
+    image:
+      engine: glance
+      host: 127.0.0.1
+      port: 9292
+    network:
+      engine: neutron
+      region: RegionOne
+      host: 127.0.0.1
+      port: 9696
+      extension_sync_interval: 600
+      user: nova
+      password: password
+      tenant: service
+    metadata:
+      password: metadata
+    cache:
+      engine: memcached
+      members:
+      - host: 127.0.0.1
+        port: 11211
+      - host: 127.0.1.1
+        port: 11211
+      - host: 127.0.2.1
+        port: 11211
+    compute_driver: vmwareapi.VMwareVCDriver
+    vmware:
+      host_username: vmware
+      host_password: vmware
+      cluster_name: vmware_cluster01
+    upgrade_levels:
+      compute: liberty
+    libvirt_service_group: libvirtd
+    lvm:
+      ephemeral: yes
+      images_volume_group: nova_vg
+      volume_clear: zero
+      volume_clear_size: 0
diff --git a/tests/pillar/compute_cluster_vmware_queens.sls b/tests/pillar/compute_cluster_vmware_queens.sls
new file mode 100644
index 0000000..1d6b0cf
--- /dev/null
+++ b/tests/pillar/compute_cluster_vmware_queens.sls
@@ -0,0 +1,78 @@
+nova:
+  compute:
+    version: queens
+    enabled: true
+    bind:
+      vnc_address: 127.0.0.1
+      vnc_port: 6080
+      vnc_name: 0.0.0.0
+    database:
+      engine: mysql
+      host: 127.0.0.1
+      port: 3306
+      name: nova
+      user: nova
+      password: password
+    identity:
+      engine: keystone
+      region: RegionOne
+      host: 127.0.0.1
+      port: 35357
+      user: nova
+      password: password
+      tenant: service
+    logging:
+      log_appender: false
+      log_handlers:
+        watchedfile:
+          enabled: true
+        fluentd:
+          enabled: false
+        ossyslog:
+          enabled: false
+    message_queue:
+      engine: rabbitmq
+      members:
+      - host: 127.0.0.1
+      - host: 127.0.1.1
+      - host: 127.0.2.1
+      user: openstack
+      password: password
+      virtual_host: '/openstack'
+    image:
+      engine: glance
+      host: 127.0.0.1
+      port: 9292
+    network:
+      engine: neutron
+      region: RegionOne
+      host: 127.0.0.1
+      port: 9696
+      extension_sync_interval: 600
+      user: nova
+      password: password
+      tenant: service
+    metadata:
+      password: metadata
+    cache:
+      engine: memcached
+      members:
+      - host: 127.0.0.1
+        port: 11211
+      - host: 127.0.1.1
+        port: 11211
+      - host: 127.0.2.1
+        port: 11211
+    compute_driver: vmwareapi.VMwareVCDriver
+    vmware:
+      host_username: vmware
+      host_password: vmware
+      cluster_name: vmware_cluster01
+    upgrade_levels:
+      compute: liberty
+    libvirt_service_group: libvirtd
+    lvm:
+      ephemeral: yes
+      images_volume_group: nova_vg
+      volume_clear: zero
+      volume_clear_size: 0
diff --git a/tests/pillar/compute_single.sls b/tests/pillar/compute_single.sls
index 211a60e..8d752de 100644
--- a/tests/pillar/compute_single.sls
+++ b/tests/pillar/compute_single.sls
@@ -2,7 +2,6 @@
   compute:
     version: newton
     enabled: true
-    virtualization: kvm
     heal_instance_info_cache_interval: 60
     vncproxy_url: openstack:6080
     vnc_keymap: en-gb
diff --git a/tests/pillar/compute_single_config_drive_options.sls b/tests/pillar/compute_single_config_drive_options.sls
index a3c03da..f33faca 100644
--- a/tests/pillar/compute_single_config_drive_options.sls
+++ b/tests/pillar/compute_single_config_drive_options.sls
@@ -2,7 +2,6 @@
   compute:
     version: mitaka
     enabled: true
-    virtualization: kvm
     heal_instance_info_cache_interval: 60
     vncproxy_url: openstack:6080
     vnc_keymap: en-gb
diff --git a/tests/pillar/repo_mcp_openstack_pike.sls b/tests/pillar/repo_mcp_openstack_pike.sls
new file mode 100644
index 0000000..789b907
--- /dev/null
+++ b/tests/pillar/repo_mcp_openstack_pike.sls
@@ -0,0 +1,12 @@
+linux:
+  system:
+    enabled: true
+    repo:
+      mirantis_openstack_repo:
+        source: "deb http://mirror.fuel-infra.org/mcp-repos/pike/{{ grains.get('oscodename') }} pike main"
+        architectures: amd64
+        key_url: "http://mirror.fuel-infra.org/mcp-repos/pike/{{ grains.get('oscodename') }}/archive-mcppike.key"
+        pin:
+        - pin: 'release a=pike'
+          priority: 1050
+          package: '*'
\ No newline at end of file