Merge "Setting up upgrade_levels"
diff --git a/README.rst b/README.rst
index 42ba8db..98806c0 100644
--- a/README.rst
+++ b/README.rst
@@ -30,6 +30,7 @@
         workers: 8
         report_interval: 60
         dhcp_domain: novalocal
+        consoleauth_token_ttl: 600
         bind:
           public_address: 10.0.0.122
           public_name: openstack.domain.com
@@ -356,7 +357,7 @@
         ...
         networking: contrail
 
-Nova services on compute node with memcached caching:
+Nova services on compute node with memcached caching and security strategy:
 
 .. code-block:: yaml
 
@@ -371,6 +372,10 @@
             port: 11211
           - host: 127.0.0.1
             port: 11211
+          security:
+            enabled: true
+            strategy: ENCRYPT
+            secret_key: secret
 
 Client-side RabbitMQ HA setup:
 
@@ -986,6 +991,32 @@
             key: (certificate content)
             cert: (certificate content)
 
+Controlling access by `tls_allowed_dn_list`.
+Enable an access control list of client certificate Distinguished Names (DNs)
+which can connect to the TLS port on this server. The default is that DNs are
+not checked. This list may contain wildcards such as
+"C=GB,ST=London,L=London,O=Libvirt Project,CN=*" See the POSIX fnmatch function
+for the format of the wildcards.
+Note that if this is an empty list, no client can connect.
+Note also that GnuTLS returns DNs without spaces after commas between
+the fields (and this is what we check against), but the openssl x509 tool
+shows spaces.
+
+.. code-block:: yaml
+
+  nova:
+    compute:
+      libvirt:
+        tls:
+          tls_allowed_dn_list:
+            host1:
+              enabled: true
+              value: 'C=foo,CN=cmp1'
+            host2:
+              enabled: true
+              value: 'C=foo,CN=cmp2'
+
+
 You can read more about live migration over TLS here:
 https://wiki.libvirt.org/page/TLSCreateServerCerts
 
diff --git a/metadata/service/compute/cluster.yml b/metadata/service/compute/cluster.yml
index ba85fbd..9e6e485 100644
--- a/metadata/service/compute/cluster.yml
+++ b/metadata/service/compute/cluster.yml
@@ -7,6 +7,7 @@
     openstack_log_appender: false
     openstack_fluentd_handler_enabled: false
     openstack_ossyslog_handler_enabled: false
+    nova_rpc_response_timeout: 30
   nova:
     compute:
       version: ${_param:nova_version}
@@ -44,6 +45,7 @@
       message_queue:
         engine: rabbitmq
         port: 5672
+        rpc_response_timeout: ${_param:nova_rpc_response_timeout}
         user: openstack
         password: ${_param:rabbitmq_openstack_password}
         virtual_host: '/openstack'
diff --git a/metadata/service/compute/ironic.yml b/metadata/service/compute/ironic.yml
index 5428d44..e81933b 100644
--- a/metadata/service/compute/ironic.yml
+++ b/metadata/service/compute/ironic.yml
@@ -7,6 +7,7 @@
     openstack_log_appender: false
     openstack_fluentd_handler_enabled: false
     openstack_ossyslog_handler_enabled: false
+    nova_rpc_response_timeout: 30
   nova:
     compute:
       version: ${_param:nova_version}
@@ -44,6 +45,7 @@
         engine: rabbitmq
         host: ${_param:cluster_vip_address}
         port: 5672
+        rpc_response_timeout: ${_param:nova_rpc_response_timeout}
         user: openstack
         password: ${_param:rabbitmq_openstack_password}
         virtual_host: '/openstack'
diff --git a/metadata/service/compute/kvm.yml b/metadata/service/compute/kvm.yml
index 2ede8af..0a1925b 100644
--- a/metadata/service/compute/kvm.yml
+++ b/metadata/service/compute/kvm.yml
@@ -7,6 +7,7 @@
     openstack_log_appender: false
     openstack_fluentd_handler_enabled: false
     openstack_ossyslog_handler_enabled: false
+    nova_rpc_response_timeout: 30
   nova:
     compute:
       version: ${_param:nova_version}
@@ -45,6 +46,7 @@
         engine: rabbitmq
         host: ${_param:cluster_vip_address}
         port: 5672
+        rpc_response_timeout: ${_param:nova_rpc_response_timeout}
         user: openstack
         password: ${_param:rabbitmq_openstack_password}
         virtual_host: '/openstack'
diff --git a/metadata/service/control/cluster.yml b/metadata/service/control/cluster.yml
index 03daebc..2e17d93 100644
--- a/metadata/service/control/cluster.yml
+++ b/metadata/service/control/cluster.yml
@@ -9,6 +9,7 @@
     openstack_log_appender: false
     openstack_fluentd_handler_enabled: false
     openstack_ossyslog_handler_enabled: false
+    nova_rpc_response_timeout: 30
   nova:
     controller:
       enabled: true
@@ -55,6 +56,7 @@
         engine: rabbitmq
         host: ${_param:cluster_vip_address}
         port: 5672
+        rpc_response_timeout: ${_param:nova_rpc_response_timeout}
         user: openstack
         password: ${_param:rabbitmq_openstack_password}
         virtual_host: '/openstack'
diff --git a/metadata/service/control/single.yml b/metadata/service/control/single.yml
index 9cdbacb..c525974 100644
--- a/metadata/service/control/single.yml
+++ b/metadata/service/control/single.yml
@@ -9,6 +9,7 @@
     openstack_log_appender: false
     openstack_fluentd_handler_enabled: false
     openstack_ossyslog_handler_enabled: false
+    nova_rpc_response_timeout: 30
   nova:
     controller:
       enabled: true
@@ -55,6 +56,7 @@
         engine: rabbitmq
         host: ${_param:single_address}
         port: 5672
+        rpc_response_timeout: ${_param:nova_rpc_response_timeout}
         user: openstack
         password: ${_param:rabbitmq_openstack_password}
         virtual_host: '/openstack'
diff --git a/nova/_ssl/mysql.sls b/nova/_ssl/mysql.sls
index 3d715e9..cc21c91 100644
--- a/nova/_ssl/mysql.sls
+++ b/nova/_ssl/mysql.sls
@@ -15,8 +15,8 @@
   file.managed:
     - name: {{ ca_file }}
     - contents_pillar: nova:controller:database:x509:cacert
-    - mode: 444
-    - user: nova
+    - mode: 644
+    - user: root
     - group: nova
     - makedirs: true
   {%- else %}
@@ -29,8 +29,8 @@
   file.managed:
     - name: {{ cert_file }}
     - contents_pillar: nova:controller:database:x509:cert
-    - mode: 440
-    - user: nova
+    - mode: 640
+    - user: root
     - group: nova
     - makedirs: true
   {%- else %}
@@ -43,8 +43,8 @@
   file.managed:
     - name: {{ key_file }}
     - contents_pillar: nova:controller:database:x509:key
-    - mode: 400
-    - user: nova
+    - mode: 640
+    - user: root
     - group: nova
     - makedirs: true
   {%- else %}
@@ -58,7 +58,7 @@
       - {{ ca_file }}
       - {{ cert_file }}
       - {{ key_file }}
-    - user: nova
+    - user: root
     - group: nova
 
   {% elif controller.database.get('ssl',{}).get('enabled',False) %}
@@ -67,11 +67,19 @@
   file.managed:
     - name: {{ controller.database.ssl.cacert_file }}
     - contents_pillar: nova:controller:database:ssl:cacert
-    - mode: 0444
+    - mode: 644
     - makedirs: true
+    - user: root
+    - group: nova
   {%- else %}
   file.exists:
     - name: {{ controller.database.ssl.get('cacert_file', controller.cacert_file) }}
   {%- endif %}
 
+mysql_nova_ssl_set_user_and_group:
+  file.managed:
+    - name: {{ controller.database.ssl.get('cacert_file', controller.cacert_file) }}
+    - user: root
+    - group: nova
+
 {%- endif %}
diff --git a/nova/_ssl/rabbitmq.sls b/nova/_ssl/rabbitmq.sls
index 0dc7c6e..6abb6f4 100644
--- a/nova/_ssl/rabbitmq.sls
+++ b/nova/_ssl/rabbitmq.sls
@@ -25,8 +25,8 @@
   file.managed:
     - name: {{ ca_file }}
     - contents_pillar: nova:{{ role }}:message_queue:x509:cacert
-    - mode: 444
-    - user: nova
+    - mode: 644
+    - user: root
     - group: nova
     - makedirs: true
   {%- else %}
@@ -39,8 +39,8 @@
   file.managed:
     - name: {{ cert_file }}
     - contents_pillar: nova:{{ role }}:message_queue:x509:cert
-    - mode: 440
-    - user: nova
+    - mode: 640
+    - user: root
     - group: nova
     - makedirs: true
   {%- else %}
@@ -53,8 +53,8 @@
   file.managed:
     - name: {{ key_file }}
     - contents_pillar: nova:{{ role }}:message_queue:x509:key
-    - mode: 400
-    - user: nova
+    - mode: 640
+    - user: root
     - group: nova
     - makedirs: true
   {%- else %}
@@ -68,7 +68,7 @@
       - {{ ca_file }}
       - {{ cert_file }}
       - {{ key_file }}
-    - user: nova
+    - user: root
     - group: nova
 
   {% elif nova_msg.get('ssl',{}).get('enabled',False) %}
@@ -77,11 +77,16 @@
   file.managed:
     - name: {{ nova_msg.ssl.cacert_file }}
     - contents_pillar: nova:{{ role }}:message_queue:ssl:cacert
-    - mode: 0444
+    - mode: 644
     - makedirs: true
   {%- else %}
   file.exists:
     - name: {{ nova_msg.ssl.get('cacert_file', nova_cacert) }}
   {%- endif %}
 
+rabbitmq_nova_{{ role }}_ssl_set_user_and_group:
+  file.managed:
+    - name: {{ nova_msg.ssl.get('cacert_file', nova_cacert) }}
+    - user: root
+    - group: nova
 {%- endif %}
diff --git a/nova/client/init.sls b/nova/client/init.sls
new file mode 100644
index 0000000..e058d2d
--- /dev/null
+++ b/nova/client/init.sls
@@ -0,0 +1,2 @@
+include:
+- nova.client.resources
diff --git a/nova/client.sls b/nova/client/resources/init.sls
similarity index 93%
rename from nova/client.sls
rename to nova/client/resources/init.sls
index 87ae2cb..6dae528 100644
--- a/nova/client.sls
+++ b/nova/client/resources/init.sls
@@ -1,11 +1,14 @@
 {%- from "nova/map.jinja" import client with context %}
 {%- if client.enabled %}
 
+include:
+- nova.client.resources.v21
+
 nova_client_packages:
   pkg.installed:
   - names: {{ client.pkgs }}
 
-{%- for identity_name, identity in client.server.iteritems() %}
+{%- for identity_name, identity in client.get('server', {}).iteritems() %}
 
 {%- if identity.flavor is defined %}
 
@@ -75,3 +78,4 @@
 {%- endfor %}
 
 {%- endif %}
+
diff --git a/nova/client/resources/v21.sls b/nova/client/resources/v21.sls
new file mode 100644
index 0000000..448a94c
--- /dev/null
+++ b/nova/client/resources/v21.sls
@@ -0,0 +1,33 @@
+{%- from "nova/map.jinja" import client with context %}
+{%- if client.enabled %}
+
+{%- for identity_name, identity in client.get('resources', {}).get('v21', {}).iteritems() %}
+
+  {%- if identity.flavor is defined %}
+  {%- for flavor_name, flavor in identity.flavor.iteritems() %}
+
+novav21_openstack_flavor_{{ flavor_name }}:
+  novav21.flavor_present:
+    - name: {{ flavor_name }}
+    - cloud_name: {{ identity_name }}
+    {%- if flavor.flavor_id is defined %}
+    - flavor_id: {{ flavor.flavor_id }}
+    {%- endif %}
+    {%- if flavor.ram is defined %}
+    - ram: {{ flavor.ram }}
+    {%- endif %}
+    {%- if flavor.disk is defined %}
+    - disk: {{ flavor.disk }}
+    {%- endif %}
+    {%- if flavor.vcpus is defined %}
+    - vcpus: {{ flavor.vcpus }}
+    {%- endif %}
+    {%- if flavor.extra_specs is defined %}
+    - extra_specs: {{ flavor.extra_specs }}
+    {%- endif %}
+
+  {%- endfor %}
+  {%- endif %}
+
+{%- endfor %}
+{%- endif %}
diff --git a/nova/compute.sls b/nova/compute.sls
index bc699cc..b7a2131 100644
--- a/nova/compute.sls
+++ b/nova/compute.sls
@@ -141,19 +141,23 @@
 {% endif %}
 
 {%- if compute.libvirt.get('tls',{}).get('enabled',False)  %}
-{%- set ca_file=compute.libvirt.tls.get('ca_file') %}
-{%- set key_file=compute.libvirt.tls.get('key_file') %}
-{%- set cert_file=compute.libvirt.tls.get('cert_file') %}
-{%- set client_key_file=compute.libvirt.tls.client.get('key_file') %}
-{%- set client_cert_file=compute.libvirt.tls.client.get('cert_file') %}
+{%- set ca_file=compute.libvirt.tls.ca_file %}
+{%- set key_file=compute.libvirt.tls.key_file %}
+{%- set cert_file=compute.libvirt.tls.cert_file %}
+{%- set client_key_file=compute.libvirt.tls.client.key_file %}
+{%- set client_cert_file=compute.libvirt.tls.client.cert_file %}
 
 libvirt_ca_nova_compute:
 {%- if compute.libvirt.tls.cacert is defined %}
   file.managed:
     - name: {{ ca_file }}
     - contents_pillar: nova:compute:libvirt:tls:cacert
-    - mode: 444
+    - mode: 644
+    - user: root
+    - group: nova
     - makedirs: true
+    - require:
+      - user: user_nova_bash
 {%- else %}
   file.exists:
    - name: {{ ca_file }}
@@ -164,8 +168,12 @@
   file.managed:
     - name: {{ cert_file }}
     - contents_pillar: nova:compute:libvirt:tls:cert
-    - mode: 440
+    - mode: 640
+    - user: root
+    - group: nova
     - makedirs: true
+    - require:
+      - user: user_nova_bash
 {%- else %}
   file.exists:
    - name: {{ cert_file }}
@@ -176,8 +184,12 @@
   file.managed:
     - name: {{ key_file }}
     - contents_pillar: nova:compute:libvirt:tls:key
-    - mode: 400
+    - mode: 640
+    - user: root
+    - group: nova
     - makedirs: true
+    - require:
+      - user: user_nova_bash
 {%- else %}
   file.exists:
    - name: {{ key_file }}
@@ -188,8 +200,12 @@
   file.managed:
     - name: {{ client_cert_file }}
     - contents_pillar: nova:compute:libvirt:tls:client:cert
-    - mode: 440
+    - mode: 640
+    - user: root
+    - group: nova
     - makedirs: true
+    - require:
+      - user: user_nova_bash
 {%- else %}
   file.exists:
    - name: {{ client_cert_file }}
@@ -200,27 +216,49 @@
   file.managed:
     - name: {{ client_key_file }}
     - contents_pillar: nova:compute:libvirt:tls:client:key
-    - mode: 400
+    - mode: 640
+    - user: root
+    - group: nova
     - makedirs: true
+    - require:
+      - user: user_nova_bash
 {%- else %}
   file.exists:
    - name: {{ client_key_file }}
 {%- endif %}
+
+libvirt_tls_set_user_and_group:
+  file.managed:
+    - names:
+      - {{ ca_file }}
+      - {{ cert_file }}
+      - {{ key_file }}
+      - {{ client_key_file }}
+      - {{ client_cert_file }}
+    - user: root
+    - group: nova
+    - require:
+      - user: user_nova_bash
+
 {%- endif %}
 
 {%- if compute.qemu.vnc.tls.get('enabled', False) %}
 
-{%- set ca_file=compute.qemu.vnc.tls.get('ca_file') %}
-{%- set key_file=compute.qemu.vnc.tls.get('key_file') %}
-{%- set cert_file=compute.qemu.vnc.tls.get('cert_file') %}
+{%- set ca_file=compute.qemu.vnc.tls.ca_file %}
+{%- set key_file=compute.qemu.vnc.tls.key_file %}
+{%- set cert_file=compute.qemu.vnc.tls.cert_file %}
 
 qemu_ca_nova_compute:
 {%- if compute.qemu.vnc.tls.cacert is defined %}
   file.managed:
     - name: {{ ca_file }}
     - contents_pillar: nova:compute:qemu:vnc:tls:cacert
-    - mode: 444
+    - mode: 644
+    - user: root
+    - group: nova
     - makedirs: true
+    - require:
+      - user: user_libvirt-qemu
 {%- else %}
   file.exists:
    - name: {{ ca_file }}
@@ -231,8 +269,12 @@
   file.managed:
     - name: {{ cert_file }}
     - contents_pillar: nova:compute:qemu:vnc:tls:cert
-    - mode: 440
+    - mode: 640
+    - user: root
+    - group: nova
     - makedirs: true
+    - require:
+      - user: user_libvirt-qemu
 {%- else %}
   file.exists:
    - name: {{ cert_file }}
@@ -243,13 +285,28 @@
   file.managed:
     - name: {{ key_file }}
     - contents_pillar: nova:compute:qemu:vnc:tls:key
-    - mode: 400
+    - mode: 640
+    - user: root
+    - group: nova
     - makedirs: true
+    - require:
+      - user: user_libvirt-qemu
 {%- else %}
   file.exists:
    - name: {{ key_file }}
 {%- endif %}
 
+qemu_tls_set_user_and_group:
+  file.managed:
+    - names:
+      - {{ ca_file }}
+      - {{ cert_file }}
+      - {{ key_file }}
+    - user: root
+    - group: nova
+    - require:
+      - user: user_libvirt-qemu
+
 {%- endif %}
 
 nova_compute_services:
diff --git a/nova/controller.sls b/nova/controller.sls
index f1819fb..b67926e 100644
--- a/nova/controller.sls
+++ b/nova/controller.sls
@@ -75,8 +75,10 @@
   file.managed:
     - name: {{ ca_file }}
     - contents_pillar: nova:controller:novncproxy:vencrypt:tls:cacert
-    - mode: 444
+    - mode: 644
     - makedirs: true
+    - user: root
+    - group: nova
     - watch_in:
       - service: nova_controller_services
 {%- else %}
@@ -89,7 +91,9 @@
   file.managed:
     - name: {{ cert_file }}
     - contents_pillar: nova:controller:novncproxy:vencrypt:tls:cert
-    - mode: 440
+    - mode: 640
+    - user: root
+    - group: nova
     - makedirs: true
 {%- else %}
   file.exists:
@@ -101,12 +105,24 @@
   file.managed:
     - name: {{ key_file }}
     - contents_pillar: nova:controller:novncproxy:vencrypt:tls:key
-    - mode: 400
+    - mode: 640
+    - user: root
+    - group: nova
     - makedirs: true
 {%- else %}
   file.exists:
    - name: {{ key_file }}
 {%- endif %}
+
+novncproxy_vencrypt_set_user_and_group:
+  file.managed:
+    - names:
+      - {{ ca_file }}
+      - {{ cert_file }}
+      - {{ key_file }}
+    - user: root
+    - group: nova
+
 {%- endif %}
 {%- endif %}
 
@@ -119,8 +135,10 @@
   file.managed:
     - name: {{ cert_file }}
     - contents_pillar: nova:controller:novncproxy:tls:server:cert
-    - mode: 440
+    - mode: 644
     - makedirs: true
+    - user: root
+    - group: nova
     - watch_in:
       - service: nova_controller_services
 {%- else %}
@@ -133,12 +151,23 @@
   file.managed:
     - name: {{ key_file }}
     - contents_pillar: nova:controller:novncproxy:tls:server:key
-    - mode: 400
+    - mode: 640
+    - user: root
+    - group: nova
     - makedirs: true
 {%- else %}
   file.exists:
    - name: {{ key_file }}
 {%- endif %}
+
+novncproxy_server_set_user_and_group:
+  file.managed:
+    - names:
+      - {{ cert_file }}
+      - {{ key_file }}
+    - user: root
+    - group: nova
+
 {%- endif %}
 
 {%- if controller.get('networking', 'default') == "contrail" and controller.version == "juno" %}
diff --git a/nova/files/pike/libvirtd.conf.Debian b/nova/files/pike/libvirtd.conf.Debian
index 0f6b341..fdbcf9e 100644
--- a/nova/files/pike/libvirtd.conf.Debian
+++ b/nova/files/pike/libvirtd.conf.Debian
@@ -19,20 +19,32 @@
 # It is necessary to setup a CA and issue server certificates before
 # using this capability.
 #
-# This is enabled by default, uncomment this to disable it
-#listen_tls = 0
 
 {%- if compute.libvirt.tls.get('enabled', False) %}
-listen_tcp = 0
-listen_tls = 1
-key_file = {{compute.libvirt.tls.key_file|yaml_squote}}
-cert_file = {{compute.libvirt.tls.cert_file|yaml_squote}}
-ca_file = {{compute.libvirt.tls.ca_file|yaml_squote}}
-{% else %}
-listen_tls = 0
-listen_tcp = 1
-auth_tcp = "none"
-{% endif %}
+{%- set listen_tls = 1 %}
+{%- set listen_tcp = 0 %}
+{%- set key_file = compute.libvirt.tls.key_file %}
+{%- set cert_file = compute.libvirt.tls.cert_file %}
+{%- set ca_file = compute.libvirt.tls.ca_file %}
+{%- set unix_sock_ro_perms = "0777" %}
+{%- set unix_sock_rw_perms = "0770" %}
+{%- if compute.libvirt.tls.allowed_dn_list is defined %}
+  {% set tls_allowed_dn_list = [] %}
+  {%- for _,item in compute.libvirt.tls.allowed_dn_list.iteritems() %}
+    {%- if item.enabled %}
+      {%- do tls_allowed_dn_list.append(item.value) %}
+    {%- endif %}
+  {%- endfor %}
+{%- endif %}
+{%- else %}
+{%- set listen_tls = 0 %}
+{%- set listen_tcp = 1 %}
+{%- set unix_sock_ro_perms = "0777" %}
+{%- set unix_sock_rw_perms = "0770" %}
+{%- endif %}
+
+# This is enabled by default, uncomment this to disable it
+listen_tls = {{ listen_tls }}
 
 # Listen for unencrypted TCP connections on the public TCP/IP port.
 # NB, must pass the --listen flag to the libvirtd process for this to
@@ -46,6 +58,7 @@
 #listen_tcp = 1
 
 
+listen_tcp = {{ listen_tcp }}
 
 # Override the port for accepting secure TLS connections
 # This can be a port number, or service name
@@ -91,7 +104,7 @@
 # without becoming root.
 #
 # This is restricted to 'root' by default.
-unix_sock_group = "{{ compute.get('libvirt_service_group', 'libvirtd') }}"
+unix_sock_group = {{ compute.get('libvirt_service_group', 'libvirtd')|yaml_dquote }}
 
 # Set the UNIX socket permissions for the R/O socket. This is used
 # for monitoring VM status only
@@ -99,6 +112,7 @@
 # Default allows any user. If setting group ownership may want to
 # restrict this to:
 #unix_sock_ro_perms = "0777"
+unix_sock_ro_perms = {{ unix_sock_ro_perms|yaml_dquote }}
 
 # Set the UNIX socket permissions for the R/W socket. This is used
 # for full management of VMs
@@ -108,7 +122,7 @@
 #
 # If not using PolicyKit and setting group ownership for access
 # control then you may want to relax this to:
-unix_sock_rw_perms = "0770"
+unix_sock_rw_perms = {{ unix_sock_rw_perms|yaml_dquote }}
 
 # Set the name of the directory in which sockets will be found/created.
 #unix_sock_dir = "/var/run/libvirt"
@@ -158,6 +172,7 @@
 # mechanism in /etc/sasl2/libvirt.conf
 #auth_tcp = "sasl"
 #auth_tcp = "none"
+auth_tcp = {{ compute.libvirt.auth_tcp|yaml_dquote }}
 
 # Change the authentication scheme for TLS sockets.
 #
@@ -179,14 +194,23 @@
 # Override the default server key file path
 #
 #key_file = "/etc/pki/libvirt/private/serverkey.pem"
+{%- if key_file is defined %}
+key_file = {{ key_file|yaml_squote }}
+{%- endif %}
 
 # Override the default server certificate file path
 #
 #cert_file = "/etc/pki/libvirt/servercert.pem"
+{%- if cert_file is defined %}
+cert_file = {{ cert_file|yaml_squote }}
+{%- endif %}
 
 # Override the default CA certificate path
 #
 #ca_file = "/etc/pki/CA/cacert.pem"
+{%- if ca_file is defined %}
+ca_file = {{ ca_file|yaml_squote }}
+{%- endif %}
 
 # Specify a certificate revocation list.
 #
@@ -234,6 +258,9 @@
 # By default, no DN's are checked
 #tls_allowed_dn_list = ["DN1", "DN2"]
 
+{%- if tls_allowed_dn_list is defined %}
+tls_allowed_dn_list = {{ tls_allowed_dn_list }}
+{%- endif %}
 
 # A whitelist of allowed SASL usernames. The format for usernames
 # depends on the SASL authentication mechanism. Kerberos usernames
diff --git a/nova/files/pike/nova-compute.conf.Debian b/nova/files/pike/nova-compute.conf.Debian
index 75fc533..7dba92c 100644
--- a/nova/files/pike/nova-compute.conf.Debian
+++ b/nova/files/pike/nova-compute.conf.Debian
@@ -5742,6 +5742,14 @@
 {%- endif %}
 {%- if compute.cache is defined %}
 memcached_servers={%- for member in compute.cache.members %}{{ member.host }}:11211{% if not loop.last %},{% endif %}{%- endfor %}
+  {%- if compute.cache.get('security', {}).get('enabled', False) %}
+memcache_security_strategy = {{ compute.cache.security.get('strategy', 'ENCRYPT') }}
+    {%- if compute.cache.security.secret_key is not defined or not compute.cache.security.secret_key %}
+    {%- do salt.test.exception('compute.cache.security.secret_key is not defined: Please add secret_key') %}
+    {%- else %}
+memcache_secret_key = {{ compute.cache.security.secret_key }}
+    {%- endif %}
+  {%- endif %}
 {%- endif %}
 # Complete "public" Identity API endpoint. This endpoint should not be an
 # "admin" endpoint, as it should be accessible by all end users. Unauthenticated
@@ -5998,14 +6006,6 @@
 {%- endif %}
 {%- endif %}
 
-{%- if compute.libvirt.tls.get('enabled', False) %}
-live_migration_scheme="tls"
-{%- else %}
-{%- if compute.get('libvirt', {}).uri is defined %}
-connection_uri={{ compute.libvirt.uri }}
-{%- endif %}
-{%- endif %}
-
 # The ID of the image to boot from to rescue data from a corrupted instance.
 #
 # If the rescue REST API operation doesn't provide an ID of an image to
@@ -6107,6 +6107,9 @@
 # * ``virt_type``: Influences what is used as default value here.
 #  (string value)
 #connection_uri =
+{%- if compute.get('libvirt', {}).uri is defined %}
+connection_uri={{ compute.libvirt.uri }}
+{%- endif %}
 
 #
 # Allow the injection of an admin password for instance only at ``create`` and
@@ -6273,6 +6276,9 @@
 #  (string value)
 #live_migration_scheme=<None>
 
+{%- if compute.libvirt.tls.get('enabled', False) %}
+live_migration_scheme="tls"
+{%- endif %}
 #
 # Enable tunnelled migration.
 #
diff --git a/nova/files/pike/nova-controller.conf.Debian b/nova/files/pike/nova-controller.conf.Debian
index 1d21f4e..de8325b 100644
--- a/nova/files/pike/nova-controller.conf.Debian
+++ b/nova/files/pike/nova-controller.conf.Debian
@@ -4314,7 +4314,9 @@
 # Minimum value: 0
 # Deprecated group/name - [DEFAULT]/console_token_ttl
 #token_ttl=600
-
+{% if controller.consoleauth_token_ttl is defined %}
+token_ttl = {{ controller.consoleauth_token_ttl }}
+{% endif %}
 
 [cors]
 
@@ -5747,6 +5749,14 @@
 {%- endif %}
 {%- if controller.cache is defined %}
 memcached_servers={%- for member in controller.cache.members %}{{ member.host }}:11211{% if not loop.last %},{% endif %}{%- endfor %}
+  {%- if controller.cache.get('security', {}).get('enabled', False) %}
+memcache_security_strategy = {{ controller.cache.security.get('strategy', 'ENCRYPT') }}
+    {%- if controller.cache.security.secret_key is not defined or not controller.cache.security.secret_key %}
+    {%- do salt.test.exception('controller.cache.security.secret_key is not defined: Please add secret_key') %}
+    {%- else %}
+memcache_secret_key = {{ controller.cache.security.secret_key }}
+    {%- endif %}
+  {%- endif %}
 {%- endif %}
 # Complete "public" Identity API endpoint. This endpoint should not be an
 # "admin" endpoint, as it should be accessible by all end users. Unauthenticated
diff --git a/nova/files/queens/libvirtd.conf.Debian b/nova/files/queens/libvirtd.conf.Debian
index 0f6b341..fdbcf9e 100644
--- a/nova/files/queens/libvirtd.conf.Debian
+++ b/nova/files/queens/libvirtd.conf.Debian
@@ -19,20 +19,32 @@
 # It is necessary to setup a CA and issue server certificates before
 # using this capability.
 #
-# This is enabled by default, uncomment this to disable it
-#listen_tls = 0
 
 {%- if compute.libvirt.tls.get('enabled', False) %}
-listen_tcp = 0
-listen_tls = 1
-key_file = {{compute.libvirt.tls.key_file|yaml_squote}}
-cert_file = {{compute.libvirt.tls.cert_file|yaml_squote}}
-ca_file = {{compute.libvirt.tls.ca_file|yaml_squote}}
-{% else %}
-listen_tls = 0
-listen_tcp = 1
-auth_tcp = "none"
-{% endif %}
+{%- set listen_tls = 1 %}
+{%- set listen_tcp = 0 %}
+{%- set key_file = compute.libvirt.tls.key_file %}
+{%- set cert_file = compute.libvirt.tls.cert_file %}
+{%- set ca_file = compute.libvirt.tls.ca_file %}
+{%- set unix_sock_ro_perms = "0777" %}
+{%- set unix_sock_rw_perms = "0770" %}
+{%- if compute.libvirt.tls.allowed_dn_list is defined %}
+  {% set tls_allowed_dn_list = [] %}
+  {%- for _,item in compute.libvirt.tls.allowed_dn_list.iteritems() %}
+    {%- if item.enabled %}
+      {%- do tls_allowed_dn_list.append(item.value) %}
+    {%- endif %}
+  {%- endfor %}
+{%- endif %}
+{%- else %}
+{%- set listen_tls = 0 %}
+{%- set listen_tcp = 1 %}
+{%- set unix_sock_ro_perms = "0777" %}
+{%- set unix_sock_rw_perms = "0770" %}
+{%- endif %}
+
+# This is enabled by default, uncomment this to disable it
+listen_tls = {{ listen_tls }}
 
 # Listen for unencrypted TCP connections on the public TCP/IP port.
 # NB, must pass the --listen flag to the libvirtd process for this to
@@ -46,6 +58,7 @@
 #listen_tcp = 1
 
 
+listen_tcp = {{ listen_tcp }}
 
 # Override the port for accepting secure TLS connections
 # This can be a port number, or service name
@@ -91,7 +104,7 @@
 # without becoming root.
 #
 # This is restricted to 'root' by default.
-unix_sock_group = "{{ compute.get('libvirt_service_group', 'libvirtd') }}"
+unix_sock_group = {{ compute.get('libvirt_service_group', 'libvirtd')|yaml_dquote }}
 
 # Set the UNIX socket permissions for the R/O socket. This is used
 # for monitoring VM status only
@@ -99,6 +112,7 @@
 # Default allows any user. If setting group ownership may want to
 # restrict this to:
 #unix_sock_ro_perms = "0777"
+unix_sock_ro_perms = {{ unix_sock_ro_perms|yaml_dquote }}
 
 # Set the UNIX socket permissions for the R/W socket. This is used
 # for full management of VMs
@@ -108,7 +122,7 @@
 #
 # If not using PolicyKit and setting group ownership for access
 # control then you may want to relax this to:
-unix_sock_rw_perms = "0770"
+unix_sock_rw_perms = {{ unix_sock_rw_perms|yaml_dquote }}
 
 # Set the name of the directory in which sockets will be found/created.
 #unix_sock_dir = "/var/run/libvirt"
@@ -158,6 +172,7 @@
 # mechanism in /etc/sasl2/libvirt.conf
 #auth_tcp = "sasl"
 #auth_tcp = "none"
+auth_tcp = {{ compute.libvirt.auth_tcp|yaml_dquote }}
 
 # Change the authentication scheme for TLS sockets.
 #
@@ -179,14 +194,23 @@
 # Override the default server key file path
 #
 #key_file = "/etc/pki/libvirt/private/serverkey.pem"
+{%- if key_file is defined %}
+key_file = {{ key_file|yaml_squote }}
+{%- endif %}
 
 # Override the default server certificate file path
 #
 #cert_file = "/etc/pki/libvirt/servercert.pem"
+{%- if cert_file is defined %}
+cert_file = {{ cert_file|yaml_squote }}
+{%- endif %}
 
 # Override the default CA certificate path
 #
 #ca_file = "/etc/pki/CA/cacert.pem"
+{%- if ca_file is defined %}
+ca_file = {{ ca_file|yaml_squote }}
+{%- endif %}
 
 # Specify a certificate revocation list.
 #
@@ -234,6 +258,9 @@
 # By default, no DN's are checked
 #tls_allowed_dn_list = ["DN1", "DN2"]
 
+{%- if tls_allowed_dn_list is defined %}
+tls_allowed_dn_list = {{ tls_allowed_dn_list }}
+{%- endif %}
 
 # A whitelist of allowed SASL usernames. The format for usernames
 # depends on the SASL authentication mechanism. Kerberos usernames
diff --git a/nova/files/queens/nova-compute.conf.Debian b/nova/files/queens/nova-compute.conf.Debian
index da35e8b..a18003b 100644
--- a/nova/files/queens/nova-compute.conf.Debian
+++ b/nova/files/queens/nova-compute.conf.Debian
@@ -4459,6 +4459,7 @@
 
 {%- set _data = compute.get('cinder', compute.get('identity', {})) %}
 {%- set auth_type = _data.get('auth_type', 'password') %}
+{%- if 'cacert_file' not in _data.keys() %}{% do _data.update({'cacert_file': compute.cacert_file}) %}{% endif %}
 {%- include "oslo_templates/files/queens/keystoneauth/_type_" + auth_type + ".conf" %}
 
 
@@ -4513,6 +4514,60 @@
 # Minimum value: 1
 #resource_provider_association_refresh = 300
 
+#
+# Determine if the source compute host should wait for a ``network-vif-plugged``
+# event from the (neutron) networking service before starting the actual
+# transfer
+# of the guest to the destination compute host.
+#
+# If you set this option the same on all of your compute hosts, which you should
+# do if you use the same networking backend universally, you do not have to
+# worry about this.
+#
+# Before starting the transfer of the guest, some setup occurs on the
+# destination
+# compute host, including plugging virtual interfaces. Depending on the
+# networking backend **on the destination host**, a ``network-vif-plugged``
+# event may be triggered and then received on the source compute host and the
+# source compute can wait for that event to ensure networking is set up on the
+# destination host before starting the guest transfer in the hypervisor.
+#
+# By default, this is False for two reasons:
+#
+# 1. Backward compatibility: deployments should test this out and ensure it
+# works
+#    for them before enabling it.
+#
+# 2. The compute service cannot reliably determine which types of virtual
+#    interfaces (``port.binding:vif_type``) will send ``network-vif-plugged``
+#    events without an accompanying port ``binding:host_id`` change.
+#    Open vSwitch and linuxbridge should be OK, but OpenDaylight is at least
+#    one known backend that will not currently work in this case, see bug
+#    https://launchpad.net/bugs/1755890 for more details.
+#
+# Possible values:
+#
+# * True: wait for ``network-vif-plugged`` events before starting guest transfer
+# * False: do not wait for ``network-vif-plugged`` events before starting guest
+#   transfer (this is how things have always worked before this option
+#   was introduced)
+#
+# Related options:
+#
+# * [DEFAULT]/vif_plugging_is_fatal: if ``live_migration_wait_for_vif_plug`` is
+#   True and ``vif_plugging_timeout`` is greater than 0, and a timeout is
+#   reached, the live migration process will fail with an error but the guest
+#   transfer will not have started to the destination host
+# * [DEFAULT]/vif_plugging_timeout: if ``live_migration_wait_for_vif_plug`` is
+#   True, this controls the amount of time to wait before timing out and either
+#   failing if ``vif_plugging_is_fatal`` is True, or simply continuing with the
+#   live migration
+#  (boolean value)
+#live_migration_wait_for_vif_plug = false
+{%- if pillar.get('neutron', {}).get('compute', {}).get('backend', {}).get('mechanism', {}).get('ovs', {}).get('driver', '') == 'openvswitch' %}
+live_migration_wait_for_vif_plug = true
+{%- endif %}
+
 
 [conductor]
 #
@@ -6259,14 +6314,6 @@
 {%- endif %}
 {%- endif %}
 
-{%- if compute.libvirt.tls.get('enabled', False) %}
-live_migration_scheme="tls"
-{%- else %}
-{%- if compute.get('libvirt', {}).uri is defined %}
-connection_uri={{ compute.libvirt.uri }}
-{%- endif %}
-{%- endif %}
-
 #
 # The ID of the image to boot from to rescue data from a corrupted
 # instance.
@@ -6396,6 +6443,9 @@
 # * ``virt_type``: Influences what is used as default value here.
 #  (string value)
 #connection_uri =
+{%- if compute.get('libvirt', {}).uri is defined %}
+connection_uri={{ compute.libvirt.uri }}
+{%- endif %}
 
 #
 # Algorithm used to hash the injected password.
@@ -6647,6 +6697,9 @@
 #  (string value)
 #live_migration_scheme = <None>
 
+{%- if compute.libvirt.tls.get('enabled', False) %}
+live_migration_scheme="tls"
+{%- endif %}
 #
 # Enable tunnelled migration.
 #
@@ -10813,6 +10866,7 @@
     {%- set messaging_engine = _data.engine %}
 {%- endif %}
 [oslo_messaging_{{ messaging_engine }}]
+{%- if _data.ssl is defined and 'cacert_file' not in _data.get('ssl', {}).keys() %}{% do _data['ssl'].update({'cacert_file': compute.cacert_file}) %}{% endif %}
 {%- include "oslo_templates/files/queens/oslo/messaging/_" + messaging_engine + ".conf" %}
 {%- endif %}
 
diff --git a/nova/files/queens/nova-controller.conf.Debian b/nova/files/queens/nova-controller.conf.Debian
index b456d31..66705ba 100644
--- a/nova/files/queens/nova-controller.conf.Debian
+++ b/nova/files/queens/nova-controller.conf.Debian
@@ -4533,6 +4533,9 @@
 # Minimum value: 0
 # Deprecated group/name - [DEFAULT]/console_token_ttl
 #token_ttl = 600
+{% if controller.consoleauth_token_ttl is defined %}
+token_ttl = {{ controller.consoleauth_token_ttl }}
+{% endif %}
 
 [cors]
 {%- if controller.cors is defined %}
@@ -10726,6 +10729,7 @@
     {%- set messaging_engine = _data.engine %}
 {%- endif %}
 [oslo_messaging_{{ messaging_engine }}]
+{%- if _data.ssl is defined and 'cacert_file' not in _data.get('ssl', {}).keys() %}{% do _data['ssl'].update({'cacert_file': controller.cacert_file}) %}{% endif %}
 {%- include "oslo_templates/files/queens/oslo/messaging/_" + messaging_engine + ".conf" %}
 {%- endif %}
 
diff --git a/nova/map.jinja b/nova/map.jinja
index 1883d7b..370f517 100644
--- a/nova/map.jinja
+++ b/nova/map.jinja
@@ -205,6 +205,7 @@
   libvirt:
     inject_partition: '-2'
     inject_password: False
+    auth_tcp: "none"
     tls:
       enabled: False
       key_file: '/etc/pki/libvirt/private/serverkey.pem'
@@ -257,6 +258,7 @@
   libvirt:
     inject_partition: '-2'
     inject_password: False
+    auth_tcp: "none"
     tls:
       enabled: False
       key_file: '/etc/pki/libvirt/private/serverkey.pem'
diff --git a/nova/upgrade/verify/_service.sls b/nova/upgrade/verify/_service.sls
index 97d016b..73c279f 100644
--- a/nova/upgrade/verify/_service.sls
+++ b/nova/upgrade/verify/_service.sls
@@ -4,18 +4,16 @@
   test.show_notification:
     - text: "Running nova.upgrade.verify.service"
 
-{%- if compute.get('enabled') %}
+{%- if compute.get('enabled') or controller.get('enabled') %}
 {% set host_id = salt['network.get_hostname']() %}
-{% endif %}
 
 wait_for_service:
   module.run:
     - name: novav21.services_wait
     - cloud_name: admin_identity
     - admin_up_only: False
-{%- if host_id is defined %}
     - host: {{ host_id }}
-{%- endif %}
-    - retries: 20
+    - retries: 30
     - timeout: 10
 
+{% endif %}
diff --git a/tests/pillar/compute_cluster.sls b/tests/pillar/compute_cluster.sls
index c057c09..c3a55e2 100644
--- a/tests/pillar/compute_cluster.sls
+++ b/tests/pillar/compute_cluster.sls
@@ -74,6 +74,10 @@
         port: 11211
       - host: 127.0.2.1
         port: 11211
+      security:
+        enabled: true
+        strategy: ENCRYPT
+        secret_key: secret
     libvirt:
       hw_disk_discard: unmap
       live_migration_tunnelled: False
diff --git a/tests/pillar/compute_cluster_vmware.sls b/tests/pillar/compute_cluster_vmware.sls
index ceaf142..8cf5646 100644
--- a/tests/pillar/compute_cluster_vmware.sls
+++ b/tests/pillar/compute_cluster_vmware.sls
@@ -63,6 +63,10 @@
         port: 11211
       - host: 127.0.2.1
         port: 11211
+      security:
+        enabled: true
+        strategy: ENCRYPT
+        secret_key: secret
     compute_driver: vmwareapi.VMwareVCDriver
     vmware:
       host_username: vmware
diff --git a/tests/pillar/compute_cluster_vmware_queens.sls b/tests/pillar/compute_cluster_vmware_queens.sls
index 1d6b0cf..d508fc1 100644
--- a/tests/pillar/compute_cluster_vmware_queens.sls
+++ b/tests/pillar/compute_cluster_vmware_queens.sls
@@ -63,6 +63,10 @@
         port: 11211
       - host: 127.0.2.1
         port: 11211
+      security:
+        enabled: true
+        strategy: ENCRYPT
+        secret_key: secret
     compute_driver: vmwareapi.VMwareVCDriver
     vmware:
       host_username: vmware
diff --git a/tests/pillar/compute_single.sls b/tests/pillar/compute_single.sls
index 8d752de..b000da7 100644
--- a/tests/pillar/compute_single.sls
+++ b/tests/pillar/compute_single.sls
@@ -60,6 +60,10 @@
       members:
       - host: 127.0.0.1
         port: 11211
+      security:
+        enabled: true
+        strategy: ENCRYPT
+        secret_key: secret
     qemu:
       user: nova
       group: cinder
diff --git a/tests/pillar/compute_single_config_drive_options.sls b/tests/pillar/compute_single_config_drive_options.sls
index 6351252..78cf088 100644
--- a/tests/pillar/compute_single_config_drive_options.sls
+++ b/tests/pillar/compute_single_config_drive_options.sls
@@ -58,6 +58,10 @@
       members:
       - host: 127.0.0.1
         port: 11211
+      security:
+        enabled: true
+        strategy: ENCRYPT
+        secret_key: secret
     config_drive:
       cdrom: True
       format: iso9660
diff --git a/tests/pillar/control_cluster.sls b/tests/pillar/control_cluster.sls
index f11a947..24fc414 100644
--- a/tests/pillar/control_cluster.sls
+++ b/tests/pillar/control_cluster.sls
@@ -11,6 +11,7 @@
     cpu_allocation_ratio: 16.0
     ram_allocation_ratio: 1.5
     disk_allocation_ratio: 1.0
+    consoleauth_token_ttl: 600
     workers: 8
     bind:
       private_address: 127.0.0.1
diff --git a/tests/pillar/control_single.sls b/tests/pillar/control_single.sls
index eb91fd9..338d63b 100644
--- a/tests/pillar/control_single.sls
+++ b/tests/pillar/control_single.sls
@@ -66,6 +66,10 @@
       members:
       - host: 127.0.0.1
         port: 11211
+      security:
+        enabled: true
+        strategy: ENCRYPT
+        secret_key: secret
     policy:
       'context_is_admin': 'role:admin or role:administrator'
       'compute:create': 'rule:admin_or_owner'