Merge "Fix for pike nova placement interface pick."
diff --git a/.kitchen.yml b/.kitchen.yml
index 24684ec..2c3b809 100644
--- a/.kitchen.yml
+++ b/.kitchen.yml
@@ -49,8 +49,7 @@
sudo: true
docker_images:
- - &xenial-20177 <%=ENV['IMAGE_XENIAL_20177'] || 'docker-dev-local.docker.mirantis.net/epcim/salt/saltstack-ubuntu-xenial-salt-2017.7/salt:2018_11_19'%>
- - &xenial-stable <%=ENV['IMAGE_XENIAL_STABLE'] || 'docker-dev-local.docker.mirantis.net/epcim/salt/saltstack-ubuntu-xenial-salt-stable/salt:2018_11_19'%>
+ - &xenial-20177 <%=ENV['IMAGE_XENIAL_20177'] || 'docker-dev-local.docker.mirantis.net/mirantis/drivetrain/salt-formulas-ci/salt-formulas-ci-xenial-2017.7:latest'%>
platforms:
- name: xenial-2017.7
@@ -58,13 +57,8 @@
image: *xenial-20177
platform: ubuntu
- - name: xenial-stable
- driver_config:
- image: *xenial-stable
- platform: ubuntu
-
suites:
-<% for os_version in ['mitaka','newton','ocata','pike', 'queens', 'rocky'] %>
+<% for os_version in ['pike', 'queens', 'rocky'] %>
- name: compute_cluster_<%=os_version%>
provisioner:
pillars-from-files:
@@ -87,7 +81,7 @@
controller:
version: <%=os_version%>
<% end %>
-<% for os_version in ['queens', ] %>
+<% for os_version in ['queens'] %>
- name: compute_single_ssl_<%=os_version%>
driver:
devices:
diff --git a/README.rst b/README.rst
index bd4d1b5..9039cd0 100644
--- a/README.rst
+++ b/README.rst
@@ -1427,3 +1427,20 @@
| | operations, verify do not have dead network |
| | agents/compute services) |
+-------------------------------+------------------------------------------------------+
+
+
+Don't manage services scheduling while upgrade
+----------------------------------------------
+For some special cases, don't manage services scheduling both enable and disable
+before and after upgrade procedure.
+
+If 'manage_service_maintenance: true' or not present - default behavior, disable services
+before upgrade and enable it after upgrade.
+If 'manage_service_maintenance: false' - don't disable and don't enable upgraded services
+scheduling before and after upgrade.
+
+.. code-block:: yaml
+
+ nova:
+ upgrade:
+ manage_service_maintenance: false
diff --git a/metadata/service/compute/cluster.yml b/metadata/service/compute/cluster.yml
index 8934022..2f081d5 100644
--- a/metadata/service/compute/cluster.yml
+++ b/metadata/service/compute/cluster.yml
@@ -62,6 +62,7 @@
region: RegionOne
host: ${_param:opencontrail_control_address}
port: 9696
+ retry: 3
cache:
engine: memcached
# NOTE (dukov) Do not change this unless you 100% sure what you are doing
diff --git a/metadata/service/compute/ironic.yml b/metadata/service/compute/ironic.yml
index e81933b..e606370 100644
--- a/metadata/service/compute/ironic.yml
+++ b/metadata/service/compute/ironic.yml
@@ -58,6 +58,7 @@
region: RegionOne
host: ${_param:cluster_vip_address}
port: 9696
+ retry: 3
ironic:
region: RegionOne
host: ${_param:ironic_service_host}
diff --git a/metadata/service/compute/kvm.yml b/metadata/service/compute/kvm.yml
index 4174b56..0352323 100644
--- a/metadata/service/compute/kvm.yml
+++ b/metadata/service/compute/kvm.yml
@@ -59,6 +59,7 @@
region: RegionOne
host: ${_param:cluster_vip_address}
port: 9696
+ retry: 3
cache:
engine: memcached
# NOTE (dukov) Do not change this unless you 100% sure what you are doing
diff --git a/metadata/service/control/cluster.yml b/metadata/service/control/cluster.yml
index fa271ff..bb4f432 100644
--- a/metadata/service/control/cluster.yml
+++ b/metadata/service/control/cluster.yml
@@ -69,6 +69,7 @@
host: ${_param:cluster_vip_address}
port: 9696
mtu: 1500
+ retry: 3
metadata:
password: metadataPass
cache:
diff --git a/metadata/service/control/single.yml b/metadata/service/control/single.yml
index 5030462..5178b35 100644
--- a/metadata/service/control/single.yml
+++ b/metadata/service/control/single.yml
@@ -70,6 +70,7 @@
port: 9696
mtu: 1500
tenant: service
+ retry: 3
metadata:
password: ${_param:metadata_password}
cache:
diff --git a/nova/compute.sls b/nova/compute.sls
index fb622c2..67d19c8 100644
--- a/nova/compute.sls
+++ b/nova/compute.sls
@@ -58,8 +58,9 @@
- groups:
- nova
-/var/lib/nova:
+nova_var_lib_nova:
file.directory:
+ - name: /var/lib/nova
- user: nova
- group: nova
- dir_mode: 0750
@@ -624,5 +625,25 @@
{%- endif %}
+{%- if compute.get('qemu', {}).defaults is defined %}
+
+/etc/default/qemu-kvm:
+ file.managed:
+ - source: salt://nova/files/etc-qemu-kvm
+ - context:
+ params: {{ compute.qemu.defaults }}
+ - template: jinja
+ - require:
+ - pkg: nova_compute_packages
+
+qemu_kvm_service:
+ service.running:
+ - name: qemu-kvm
+ - enable: true
+ - watch:
+ - file: /etc/default/qemu-kvm
+
+{%- endif %}
+
{%- endif %}
diff --git a/nova/controller.sls b/nova/controller.sls
index 013f40d..bf758bb 100644
--- a/nova/controller.sls
+++ b/nova/controller.sls
@@ -281,10 +281,10 @@
{% endfor %}
{% endif %}
-{% if controller.get('policy', {}) and controller.version not in ['liberty', 'mitaka', 'newton'] %}
+{%- if controller.version not in ['liberty', 'mitaka', 'newton'] %}
{# nova no longer ships with a default policy.json #}
-
-/etc/nova/policy.json:
+{#- Since Queens release `policy.json` is changed to `policy.yaml`. But default option in `oslo_policy` is `policy.json` #}
+/etc/nova/{{ controller.get('oslo_policy', {}).get('policy_file', 'policy.json') }}:
file.managed:
- contents: '{}'
- replace: False
@@ -292,33 +292,32 @@
- group: nova
- require:
- pkg: nova_controller_packages
-
-{% endif %}
+{%- endif %}
{%- for name, rule in controller.get('policy', {}).iteritems() %}
{%- if rule != None %}
nova_keystone_rule_{{ name }}_present:
keystone_policy.rule_present:
- - path: /etc/nova/policy.json
+ - path: /etc/nova/{{ controller.get('oslo_policy', {}).get('policy_file', 'policy.json') }}
- name: {{ name }}
- rule: {{ rule }}
- require:
- pkg: nova_controller_packages
{% if controller.version not in ['liberty', 'mitaka', 'newton'] %}
- - file: /etc/nova/policy.json
+ - file: /etc/nova/{{ controller.get('oslo_policy', {}).get('policy_file', 'policy.json') }}
{% endif%}
{%- else %}
nova_keystone_rule_{{ name }}_absent:
keystone_policy.rule_absent:
- - path: /etc/nova/policy.json
+ - path: /etc/nova/{{ controller.get('oslo_policy', {}).get('policy_file', 'policy.json') }}
- name: {{ name }}
- require:
- pkg: nova_controller_packages
{% if controller.version not in ['liberty', 'mitaka', 'newton'] %}
- - file: /etc/nova/policy.json
+ - file: /etc/nova/{{ controller.get('oslo_policy', {}).get('policy_file', 'policy.json') }}
{% endif%}
{%- endif %}
diff --git a/nova/files/etc-qemu-kvm b/nova/files/etc-qemu-kvm
new file mode 100644
index 0000000..7ba26ec
--- /dev/null
+++ b/nova/files/etc-qemu-kvm
@@ -0,0 +1,18 @@
+# Set to 1 to enable KSM, 0 to disable KSM, and AUTO to use default settings.
+# After changing this setting restart the qemu-kvm service.
+KSM_ENABLED={{ params.get('ksm_enabled', 'AUTO') }}
+SLEEP_MILLISECS={{ params.get('sleep_millisecs', 200) }}
+
+# To load the vhost_net module, which in some cases can speed up
+# network performance, set VHOST_NET_ENABLED to 1.
+#VHOST_NET_ENABLED=0
+{%- if params.vhost_net_enabled is defined %}
+VHOST_NET_ENABLED={{ params.vhost_net_enabled }}
+{%- endif %}
+
+# Set this to 1 if you want hugepages to be available to kvm under
+# /run/hugepages/kvm
+#KVM_HUGEPAGES=0
+{%- if params.kvm_hugepages is defined %}
+KVM_HUGEPAGES={{ params.kvm_hugepages }}
+{%- endif %}
diff --git a/nova/files/grafana_dashboards/nova_utilization_prometheus.json b/nova/files/grafana_dashboards/nova_utilization_prometheus.json
index 2fbf7fd..43060e6 100644
--- a/nova/files/grafana_dashboards/nova_utilization_prometheus.json
+++ b/nova/files/grafana_dashboards/nova_utilization_prometheus.json
@@ -970,21 +970,8 @@
{%- raw %}
"format": "time_series",
"intervalFactor": 2,
- "legendFormat": "used",
- "refId": "B"
- },
- {
-{%- endraw %}
-{%- if parameters is defined and parameters.get('nova_compute_ceph_ephemeral', False) == True %}
- "expr": "max(avg((openstack_nova_disk - openstack_nova_disk_available) and on (hostname) (openstack_nova_service_status == 1 and openstack_nova_service_state == 1)) by (instance))",
-{%- else %}
- "expr": "max(sum((openstack_nova_disk - openstack_nova_disk_available) and on (hostname) (openstack_nova_service_status == 1 and openstack_nova_service_state == 1)) by (instance))",
-{%- endif %}
-{%- raw %}
- "format": "time_series",
- "intervalFactor": 2,
"legendFormat": "allocated",
- "refId": "C"
+ "refId": "B"
}
],
"thresholds": [],
@@ -1037,9 +1024,10 @@
"#d44a3a"
],
"datasource": null,
+ "decimals": 2,
"format": "percentunit",
"gauge": {
- "maxValue": 1,
+ "maxValue": 2,
"minValue": 0,
"show": true,
"thresholdLabels": false,
@@ -1047,11 +1035,11 @@
},
"gridPos": {
"h": 5,
- "w": 3,
+ "w": 6,
"x": 18,
"y": 11
},
- "id": 47,
+ "id": 55,
"interval": null,
"links": [],
"mappingType": 1,
@@ -1100,94 +1088,7 @@
"refId": "A"
}
],
- "thresholds": "0.85,0.95",
- "title": "Disk Usage %",
- "type": "singlestat",
- "valueFontSize": "80%",
- "valueMaps": [
- {
- "op": "=",
- "text": "N/A",
- "value": "null"
- }
- ],
- "valueName": "current"
- },
- {
- "cacheTimeout": null,
- "colorBackground": false,
- "colorValue": false,
- "colors": [
- "#299c46",
- "rgba(237, 129, 40, 0.89)",
- "#d44a3a"
- ],
- "datasource": null,
- "decimals": 2,
- "format": "percentunit",
- "gauge": {
- "maxValue": 6,
- "minValue": 0,
- "show": true,
- "thresholdLabels": false,
- "thresholdMarkers": true
- },
- "gridPos": {
- "h": 5,
- "w": 3,
- "x": 21,
- "y": 11
- },
- "id": 55,
- "interval": null,
- "links": [],
- "mappingType": 1,
- "mappingTypes": [
- {
- "name": "value to text",
- "value": 1
- },
- {
- "name": "range to text",
- "value": 2
- }
- ],
- "maxDataPoints": 100,
- "nullPointMode": "connected",
- "nullText": null,
- "postfix": "",
- "postfixFontSize": "50%",
- "prefix": "",
- "prefixFontSize": "50%",
- "rangeMaps": [
- {
- "from": "null",
- "text": "N/A",
- "to": "null"
- }
- ],
- "sparkline": {
- "fillColor": "rgba(31, 118, 189, 0.18)",
- "full": false,
- "lineColor": "rgb(31, 120, 193)",
- "show": false
- },
- "tableColumn": "",
- "targets": [
- {
-{%- endraw %}
-{%- if parameters is defined and parameters.get('nova_compute_ceph_ephemeral', False) == True %}
- "expr": "max(avg((openstack_nova_disk - openstack_nova_disk_available) and on (hostname) (openstack_nova_service_status == 1 and openstack_nova_service_state == 1)) by (instance)) / max(avg(openstack_nova_disk and on (hostname) (openstack_nova_service_status == 1 and openstack_nova_service_state == 1)) by (instance))",
-{%- else %}
- "expr": "max(sum((openstack_nova_disk - openstack_nova_disk_available) and on (hostname) (openstack_nova_service_status == 1 and openstack_nova_service_state == 1)) by (instance)) / max(sum(openstack_nova_disk and on (hostname) (openstack_nova_service_status == 1 and openstack_nova_service_state == 1)) by (instance))",
-{%- endif %}
-{%- raw %}
- "format": "time_series",
- "intervalFactor": 2,
- "refId": "A"
- }
- ],
- "thresholds": "2,4",
+ "thresholds": "1,1.5",
"title": "Disk Allocated %",
"type": "singlestat",
"valueFontSize": "80%",
@@ -1778,94 +1679,6 @@
"valueName": "current"
},
{
- "cacheTimeout": null,
- "colorBackground": false,
- "colorValue": false,
- "colors": [
- "#299c46",
- "rgba(237, 129, 40, 0.89)",
- "#d44a3a"
- ],
- "datasource": null,
- "format": "percentunit",
- "gauge": {
- "maxValue": 1,
- "minValue": 0,
- "show": true,
- "thresholdLabels": false,
- "thresholdMarkers": true
- },
- "gridPos": {
- "h": 5,
- "w": 5,
- "x": 0,
- "y": 12
- },
- "id": 26,
- "interval": null,
- "links": [],
- "mappingType": 1,
- "mappingTypes": [
- {
- "name": "value to text",
- "value": 1
- },
- {
- "name": "range to text",
- "value": 2
- }
- ],
- "maxDataPoints": 100,
- "nullPointMode": "connected",
- "nullText": null,
- "postfix": "",
- "postfixFontSize": "50%",
- "prefix": "",
- "prefixFontSize": "50%",
- "rangeMaps": [
- {
- "from": "null",
- "text": "N/A",
- "to": "null"
- }
- ],
- "scopedVars": {
- "aggregate": {
- "isNone": true,
- "selected": true,
- "text": "None",
- "value": ""
- }
- },
- "sparkline": {
- "fillColor": "rgba(31, 118, 189, 0.18)",
- "full": false,
- "lineColor": "rgb(31, 120, 193)",
- "show": false
- },
- "tableColumn": "",
- "targets": [
- {
- "expr": "max(openstack_nova_aggregate_used_disk{aggregate=\"$aggregate\"} / openstack_nova_aggregate_disk{aggregate=\"$aggregate\"})",
- "format": "time_series",
- "intervalFactor": 2,
- "refId": "A"
- }
- ],
- "thresholds": "0.85,0.95",
- "title": "Disk Usage",
- "type": "singlestat",
- "valueFontSize": "80%",
- "valueMaps": [
- {
- "op": "=",
- "text": "N/A",
- "value": "null"
- }
- ],
- "valueName": "current"
- },
- {
"aliasColors": {},
"bars": false,
"dashLength": 10,
@@ -1874,8 +1687,8 @@
"fill": 1,
"gridPos": {
"h": 5,
- "w": 14,
- "x": 5,
+ "w": 19,
+ "x": 0,
"y": 12
},
"id": 25,
@@ -1922,15 +1735,8 @@
"expr": "max(openstack_nova_aggregate_used_disk{aggregate=\"$aggregate\"})",
"format": "time_series",
"intervalFactor": 2,
- "legendFormat": "used",
- "refId": "B"
- },
- {
- "expr": "max(openstack_nova_aggregate_disk{aggregate=\"$aggregate\"} - openstack_nova_aggregate_disk_available{aggregate=\"$aggregate\"})",
- "format": "time_series",
- "intervalFactor": 2,
"legendFormat": "allocated",
- "refId": "C"
+ "refId": "B"
}
],
"thresholds": [],
@@ -2043,13 +1849,13 @@
"tableColumn": "",
"targets": [
{
- "expr": "max((openstack_nova_aggregate_disk{aggregate=\"$aggregate\"} - openstack_nova_aggregate_disk_available{aggregate=\"$aggregate\"}) / openstack_nova_aggregate_disk{aggregate=\"$aggregate\"})",
+ "expr": "max(openstack_nova_aggregate_used_disk{aggregate=\"$aggregate\"} / openstack_nova_aggregate_disk{aggregate=\"$aggregate\"})",
"format": "time_series",
"intervalFactor": 2,
"refId": "A"
}
],
- "thresholds": "0.9,1",
+ "thresholds": "1,1.5",
"title": "Current Disk Allocation Ratio",
"type": "singlestat",
"valueFontSize": "80%",
@@ -2653,93 +2459,6 @@
"valueName": "current"
},
{
- "cacheTimeout": null,
- "colorBackground": false,
- "colorValue": false,
- "colors": [
- "#299c46",
- "rgba(237, 129, 40, 0.89)",
- "#d44a3a"
- ],
- "datasource": null,
- "format": "percentunit",
- "gauge": {
- "maxValue": 1,
- "minValue": 0,
- "show": true,
- "thresholdLabels": false,
- "thresholdMarkers": true
- },
- "gridPos": {
- "h": 5,
- "w": 5,
- "x": 0,
- "y": 13
- },
- "id": 34,
- "interval": null,
- "links": [],
- "mappingType": 1,
- "mappingTypes": [
- {
- "name": "value to text",
- "value": 1
- },
- {
- "name": "range to text",
- "value": 2
- }
- ],
- "maxDataPoints": 100,
- "nullPointMode": "connected",
- "nullText": null,
- "postfix": "",
- "postfixFontSize": "50%",
- "prefix": "",
- "prefixFontSize": "50%",
- "rangeMaps": [
- {
- "from": "null",
- "text": "N/A",
- "to": "null"
- }
- ],
- "scopedVars": {
- "host": {
- "selected": true,
- "text": "cmp1",
- "value": "cmp1"
- }
- },
- "sparkline": {
- "fillColor": "rgba(31, 118, 189, 0.18)",
- "full": false,
- "lineColor": "rgb(31, 120, 193)",
- "show": false
- },
- "tableColumn": "",
- "targets": [
- {
- "expr": "max(openstack_nova_used_disk{hostname=\"$host\"} / openstack_nova_disk{hostname=\"$host\"})",
- "format": "time_series",
- "intervalFactor": 2,
- "refId": "A"
- }
- ],
- "thresholds": "0.85,0.95",
- "title": "Disk Usage",
- "type": "singlestat",
- "valueFontSize": "80%",
- "valueMaps": [
- {
- "op": "=",
- "text": "N/A",
- "value": "null"
- }
- ],
- "valueName": "current"
- },
- {
"aliasColors": {},
"bars": false,
"dashLength": 10,
@@ -2748,8 +2467,8 @@
"fill": 1,
"gridPos": {
"h": 5,
- "w": 14,
- "x": 5,
+ "w": 19,
+ "x": 0,
"y": 13
},
"id": 35,
@@ -2795,15 +2514,8 @@
"expr": "max(openstack_nova_used_disk{hostname=\"$host\"})",
"format": "time_series",
"intervalFactor": 2,
- "legendFormat": "used",
- "refId": "B"
- },
- {
- "expr": "max(openstack_nova_disk{hostname=\"$host\"} - openstack_nova_disk_available{hostname=\"$host\"})",
- "format": "time_series",
- "intervalFactor": 2,
"legendFormat": "allocated",
- "refId": "C"
+ "refId": "B"
}
],
"thresholds": [],
@@ -2915,13 +2627,13 @@
"tableColumn": "",
"targets": [
{
- "expr": "max((openstack_nova_disk{hostname=\"$host\"} - openstack_nova_disk_available{hostname=\"$host\"}) / openstack_nova_disk{hostname=\"$host\"})",
+ "expr": "max(openstack_nova_used_disk{hostname=\"$host\"} / openstack_nova_disk{hostname=\"$host\"})",
"format": "time_series",
"intervalFactor": 2,
"refId": "A"
}
],
- "thresholds": "0.9,1",
+ "thresholds": "1,1.5",
"title": "Current Disk Allocation Ratio",
"type": "singlestat",
"valueFontSize": "80%",
@@ -3041,6 +2753,6 @@
"timezone": "",
"title": "Nova - Utilization",
"uid": null,
- "version": 2
+ "version": 3
}
{%- endraw %}
diff --git a/nova/files/pike/nova-compute.conf.Debian b/nova/files/pike/nova-compute.conf.Debian
index 270510b..11bbaa7 100644
--- a/nova/files/pike/nova-compute.conf.Debian
+++ b/nova/files/pike/nova-compute.conf.Debian
@@ -5953,12 +5953,18 @@
# backwards compatibility reasons this currently only affects the allow_expired
# check. (list value)
#service_token_roles=service
+{%- if compute.service_token_roles is defined %}
+service_token_roles = {{ compute.service_token_roles }}
+{%- endif %}
# For backwards compatibility reasons we must let valid service tokens pass that
# don't pass the service_token_roles check as valid. Setting this true will
# become the default in a future release and should be enabled if possible.
# (boolean value)
#service_token_roles_required=false
+{%- if compute.service_token_roles_required is defined %}
+service_token_roles_required = {{ compute.service_token_roles_required }}
+{%- endif %}
# Prefix to prepend at the beginning of the path. Deprecated, use identity_uri.
# (string value)
@@ -6054,6 +6060,9 @@
{%- if compute.lvm.volume_clear_size is defined %}
volume_clear_size={{ compute.lvm.volume_clear_size }}
{%- endif %}
+{%- if compute.lvm.volume_clear_ionice_level is defined %}
+volume_clear_ionice_level={{compute.lvm.volume_clear_ionice_level}}
+{%- endif %}
{%- endif %}
# The ID of the image to boot from to rescue data from a corrupted instance.
@@ -6724,6 +6733,31 @@
#volume_clear_size=0
#
+# What I/O schedule class and priority level should be used when clearing
+# a volume. Only takes effect if ``volume_clear`` option is set to ``zero`` or
+# ``shred``. For more info about classes and priorities, check ``man ionice``.
+#
+# Possible values:
+#
+# * idle - use the Idle scheduling class. This option impacts system performance
+# the least with a downside of increased time for volume clearance
+# * from 0 to 7 - use the Best-effort scheduling class and set the priority level
+# to the specified number
+#
+# If not set - do not set I/O scheduling class explicitly. Usually, it's the most
+# aggressive option in terms of system performance impact.
+#
+# Related options:
+#
+# * images_type - must be set to ``lvm``
+# * volume_clear - must be set and the value must be different than ``none``
+# for this option to have any impact
+# (string value)
+# Possible values:
+# 'idle', '0', '1', '2', '3', '4', '5', '6', '7'
+#volume_clear_ionice_level=None
+
+#
# Enable snapshot compression for ``qcow2`` images.
#
# Note: you can set ``snapshot_image_format`` to ``qcow2`` to force all
diff --git a/nova/files/pike/nova-controller.conf.Debian b/nova/files/pike/nova-controller.conf.Debian
index 79df316..94a4009 100644
--- a/nova/files/pike/nova-controller.conf.Debian
+++ b/nova/files/pike/nova-controller.conf.Debian
@@ -5984,12 +5984,18 @@
# backwards compatibility reasons this currently only affects the allow_expired
# check. (list value)
#service_token_roles=service
+{%- if controller.service_token_roles is defined %}
+service_token_roles = {{ controller.service_token_roles }}
+{%- endif %}
# For backwards compatibility reasons we must let valid service tokens pass that
# don't pass the service_token_roles check as valid. Setting this true will
# become the default in a future release and should be enabled if possible.
# (boolean value)
#service_token_roles_required=false
+{%- if controller.service_token_roles_required is defined %}
+service_token_roles_required = {{ controller.service_token_roles_required }}
+{%- endif %}
# Prefix to prepend at the beginning of the path. Deprecated, use identity_uri.
# (string value)
@@ -6702,6 +6708,30 @@
#volume_clear_size=0
#
+# What I/O schedule class and priority level should be used when clearing
+# a volume. Only takes effect if ``volume_clear`` option is set to ``zero`` or
+# ``shred``. For more info about classes and priorities, check ``man ionice``.
+#
+# Possible values:
+#
+# * idle - use the Idle scheduling class. This option impacts system performance
+# the least with a downside of increased time for volume clearance
+# * from 0 to 7 - use the Best-effort scheduling class and set the priority level
+# to the specified number
+#
+# If not set - do not set I/O scheduling class explicitly. Usually, it's the most
+# aggressive option in terms of system performance impact.
+#
+# Related options:
+#
+# * images_type - must be set to ``lvm``
+# * volume_clear - must be set and the value must be different than ``none``
+# for this option to have any impact
+# (string value)
+# Possible values:
+# 'idle', '0', '1', '2', '3', '4', '5', '6', '7'
+#volume_clear_ionice_level=None
+#
# Enable snapshot compression for ``qcow2`` images.
#
# Note: you can set ``snapshot_image_format`` to ``qcow2`` to force all
diff --git a/nova/files/queens/nova-compute.conf.Debian b/nova/files/queens/nova-compute.conf.Debian
index 6f59898..d108924 100644
--- a/nova/files/queens/nova-compute.conf.Debian
+++ b/nova/files/queens/nova-compute.conf.Debian
@@ -4685,6 +4685,9 @@
# enabled_vgpu_types = GRID K100,Intel GVT-g,MxGPU.2,nvidia-11
# (list value)
#enabled_vgpu_types =
+{%- if compute.get('devices', {}).enabled_vgpu_types is defined %}
+enabled_vgpu_types = {{ compute.devices.enabled_vgpu_types }}
+{% endif %}
[ephemeral_storage_encryption]
@@ -7265,19 +7268,19 @@
# What I/O schedule class and priority level should be used when clearing
# a volume. Only takes effect if ``volume_clear`` option is set to ``zero`` or
# ``shred``. For more info about classes and priorities, check ``man ionice``.
-#
+#
# Possible values:
-#
+#
# * idle - use the Idle scheduling class. This option impacts system performance
# the least with a downside of increased time for volume clearance
# * from 0 to 7 - use the Best-effort scheduling class and set the priority level
# to the specified number
-#
+#
# If not set - do not set I/O scheduling class explicitly. Usually, it's the most
# aggressive option in terms of system performance impact.
-#
+#
# Related options:
-#
+#
# * images_type - must be set to ``lvm``
# * volume_clear - must be set and the value must be different than ``none``
# for this option to have any impact
@@ -7819,6 +7822,15 @@
extension_sync_interval={{ compute.network.get('extension_sync_interval', '600') }}
#
+# Integer value representing the number of retry attemps
+# if connection error happens.
+# (integer value)
+#retry = 0
+{%- if compute.network.retry is defined %}
+retry = {{ compute.network.retry }}
+{%- endif %}
+
+#
# When set to True, this option indicates that Neutron will be used to
# proxy
# metadata requests and resolve instance ids. Otherwise, the instance
@@ -10861,8 +10873,8 @@
{%- endif %}
[oslo_policy]
-{%- if compute.policy is defined %}
-{%- set _data = compute.policy %}
+{%- if compute.oslo_policy is defined %}
+{%- set _data = compute.oslo_policy %}
{%- include "oslo_templates/files/queens/oslo/_policy.conf" %}
{%- endif %}
diff --git a/nova/files/queens/nova-controller.conf.Debian b/nova/files/queens/nova-controller.conf.Debian
index 3100f7b..ddbd53e 100644
--- a/nova/files/queens/nova-controller.conf.Debian
+++ b/nova/files/queens/nova-controller.conf.Debian
@@ -7687,6 +7687,15 @@
#extension_sync_interval = 600
#
+# Integer value representing the number of retry attemps
+# if connection error happens.
+# (integer value)
+#retry = 0
+{%- if controller.network.retry is defined %}
+retry = {{ controller.network.retry }}
+{%- endif %}
+
+#
# When set to True, this option indicates that Neutron will be used to
# proxy
# metadata requests and resolve instance ids. Otherwise, the instance
@@ -10605,8 +10614,8 @@
{%- endif %}
[oslo_policy]
-{%- if controller.policy is defined %}
-{%- set _data = controller.policy %}
+{%- if controller.oslo_policy is defined %}
+{%- set _data = controller.oslo_policy %}
{%- include "oslo_templates/files/queens/oslo/_policy.conf" %}
{%- endif %}
diff --git a/nova/files/rocky/nova-compute.conf.Debian b/nova/files/rocky/nova-compute.conf.Debian
index 6c52ab2..9f6bd00 100644
--- a/nova/files/rocky/nova-compute.conf.Debian
+++ b/nova/files/rocky/nova-compute.conf.Debian
@@ -4091,6 +4091,9 @@
# enabled_vgpu_types = GRID K100,Intel GVT-g,MxGPU.2,nvidia-11
# (list value)
#enabled_vgpu_types =
+{%- if compute.get('devices', {}).enabled_vgpu_types is defined %}
+enabled_vgpu_types = {{ compute.devices.enabled_vgpu_types }}
+{% endif %}
[ephemeral_storage_encryption]
diff --git a/nova/map.jinja b/nova/map.jinja
index 012aa74..9591f74 100644
--- a/nova/map.jinja
+++ b/nova/map.jinja
@@ -47,6 +47,9 @@
'debug': false,
'notification': false,
'cors': {},
+ 'oslo_policy': {
+ 'policy_file': 'policy.json'
+ },
'audit': {
'enabled': false
},
@@ -83,6 +86,9 @@
'debug': false,
'notification': false,
'cors': {},
+ 'oslo_policy': {
+ 'policy_file': 'policy.json'
+ },
'audit': {
'enabled': false
},
diff --git a/nova/meta/logrotate.yml b/nova/meta/logrotate.yml
new file mode 100644
index 0000000..27012bd
--- /dev/null
+++ b/nova/meta/logrotate.yml
@@ -0,0 +1,78 @@
+## Default: Daily rotation with 28 rotations kept
+{%- from "nova/map.jinja" import controller, compute with context %}
+
+{%- if controller.get('enabled', False) %}
+ {%- set log_interval = controller.get('logrotate', {}).get('interval', 'daily') %}
+ {%- set log_rotation = controller.get('logrotate', {}).get('rotate', 28) %}
+{%- elif compute.get('enabled', False) %}
+ {%- set log_interval = compute.get('logrotate', {}).get('interval', 'daily') %}
+ {%- set log_rotation = compute.get('logrotate', {}).get('rotate', 28) %}
+{%- endif %}
+
+{%- if controller.get('enabled', False) or compute.get('enabled', False) %}
+job:
+ nova-common:
+ - files:
+ - /var/log/nova/*.log
+ options:
+ - {{ log_interval }}
+ - rotate: {{ log_rotation }}
+ - missingok
+ - compress
+ - delaycompress
+ - copytruncate
+ - notifempty
+{%- endif %}
+{%- if compute.get('enabled', False) and compute.get('compute_driver', 'libvirt.LibvirtDriver') == 'libvirt.LibvirtDriver' %}
+ libvirtd:
+ - files:
+ - /var/log/libvirt/libvirtd.log
+ options:
+ - {{ compute.get('libvirt', {}).get('logrotate', {}).get('interval', 'weekly') }}
+ - rotate: {{ compute.get('libvirt', {}).get('logrotate', {}).get('rotate', 4) }}
+ - missingok
+ - compress
+ - delaycompress
+ - copytruncate
+ - minsize: 100k
+ libvirtd.libxl:
+ - files:
+ - /var/log/libvirt/libxl/*.log
+ options:
+ - {{ compute.get('libvirt', {}).get('logrotate', {}).get('interval', 'weekly') }}
+ - rotate: {{ compute.get('libvirt', {}).get('logrotate', {}).get('rotate', 4) }}
+ - missingok
+ - compress
+ - delaycompress
+ - copytruncate
+ libvirtd.lxc:
+ - files:
+ - /var/log/libvirt/lxc/*.log
+ options:
+ - {{ compute.get('libvirt', {}).get('logrotate', {}).get('interval', 'weekly') }}
+ - rotate: {{ compute.get('libvirt', {}).get('logrotate', {}).get('rotate', 4) }}
+ - missingok
+ - compress
+ - delaycompress
+ - copytruncate
+ libvirtd.qemu:
+ - files:
+ - /var/log/libvirt/qemu/*.log
+ options:
+ - {{ compute.get('libvirt', {}).get('logrotate', {}).get('interval', 'weekly') }}
+ - rotate: {{ compute.get('libvirt', {}).get('logrotate', {}).get('rotate', 4) }}
+ - missingok
+ - compress
+ - delaycompress
+ - copytruncate
+ libvirtd.uml:
+ - files:
+ - /var/log/libvirt/uml/*.log
+ options:
+ - {{ compute.get('libvirt', {}).get('logrotate', {}).get('interval', 'weekly') }}
+ - rotate: {{ compute.get('libvirt', {}).get('logrotate', {}).get('rotate', 4) }}
+ - missingok
+ - compress
+ - delaycompress
+ - copytruncate
+{%- endif %}
diff --git a/nova/upgrade/upgrade/post.sls b/nova/upgrade/upgrade/post.sls
index e532c1d..7d4bceb 100644
--- a/nova/upgrade/upgrade/post.sls
+++ b/nova/upgrade/upgrade/post.sls
@@ -1,4 +1,4 @@
-{%- from "nova/map.jinja" import controller, compute, compute_driver_mapping with context %}
+{%- from "nova/map.jinja" import controller, compute, compute_driver_mapping, upgrade with context %}
nova_upgrade_post:
test.show_notification:
@@ -7,6 +7,7 @@
{%- if compute.get('enabled') %}
{% set host_id = salt['network.get_hostname']() %}
+ {%- if upgrade.get('manage_service_maintenance', true) %}
novav21_service_enabled:
novav21.service_enabled:
@@ -14,4 +15,5 @@
- cloud_name: admin_identity
- name: {{ host_id }}
+ {% endif %}
{% endif %}
diff --git a/nova/upgrade/upgrade/pre.sls b/nova/upgrade/upgrade/pre.sls
index e9b48ea..cc2b79a 100644
--- a/nova/upgrade/upgrade/pre.sls
+++ b/nova/upgrade/upgrade/pre.sls
@@ -1,4 +1,4 @@
-{%- from "nova/map.jinja" import controller, compute, compute_driver_mapping with context %}
+{%- from "nova/map.jinja" import controller, compute, compute_driver_mapping, upgrade with context %}
nova_upgrade_pre:
test.show_notification:
@@ -7,6 +7,7 @@
{%- if compute.get('enabled') %}
{% set host_id = salt['network.get_hostname']() %}
+ {%- if upgrade.get('manage_service_maintenance', true) %}
novav21_service_disabled:
novav21.service_disabled:
@@ -15,4 +16,5 @@
- cloud_name: admin_identity
- name: {{ host_id }}
+ {% endif %}
{% endif %}