Merge "Change ssh key for vdrok"
diff --git a/defaults/docker_images.yml b/defaults/docker_images.yml
index b83412a..63fc3b0 100644
--- a/defaults/docker_images.yml
+++ b/defaults/docker_images.yml
@@ -3,7 +3,7 @@
# CI\CD
docker_image_compose: "${_param:mcp_docker_registry}/mirantis/external/docker/compose:1.17.1"
# 2.6.2 version, from 12/18/2108, differ from latest 2.6.2 upstream - update next cycle
- docker_image_registry: "${_param:mcp_docker_registry}/mirantis/external/registry:${_param:mcp_version}"
+ docker_image_registry: "${_param:mcp_docker_registry}/mirantis/external/registry:2019.2.6"
docker_image_visualizer: "${_param:mcp_docker_registry}/mirantis/external/visualizer:${_param:mcp_version}"
docker_image_openldap: "${_param:mcp_docker_registry}/mirantis/cicd/openldap:2019.2.11"
docker_image_postgresql: "${_param:mcp_docker_registry}/mirantis/external/library/postgres:9.6.10"
@@ -55,9 +55,9 @@
docker_image_cvp_xrally: "${_param:mcp_docker_registry}/mirantis/external/xrally/xrally-openstack:0.11.2"
# aptly
docker_image_aptly:
- base: "${_param:mcp_docker_registry}/mirantis/cicd/aptly:${_param:mcp_version}"
- public: "${_param:mcp_docker_registry}/mirantis/cicd/aptly-public:${_param:mcp_version}"
- publisher: "${_param:mcp_docker_registry}/mirantis/cicd/aptly-publisher:${_param:mcp_version}"
+ base: "${_param:mcp_docker_registry}/mirantis/cicd/aptly:2019.2.9"
+ public: "${_param:mcp_docker_registry}/mirantis/cicd/aptly-public:2019.2.9"
+ publisher: "${_param:mcp_docker_registry}/mirantis/cicd/aptly-publisher:2019.2.9"
# List of images, to be placed into offline image, during separate image build process
# WARNING: registry|target_registry and names - must be exactly same as list above!
@@ -68,7 +68,7 @@
# CI/CD
- registry: ${_param:mcp_docker_registry}/mirantis/external
target_registry: ${_param:default_local_mirrror_content:docker_client_registry_target_registry}/mirantis/external
- name: registry:${_param:mcp_version}
+ name: registry:2019.2.6
- registry: ${_param:mcp_docker_registry}/mirantis/external
target_registry: ${_param:default_local_mirrror_content:docker_client_registry_target_registry}/mirantis/external
name: visualizer:${_param:mcp_version}
@@ -87,13 +87,13 @@
- registry: ${_param:mcp_docker_registry}/mirantis/cicd
target_registry: ${_param:default_local_mirrror_content:docker_client_registry_target_registry}/mirantis/cicd
- name: aptly:${_param:mcp_version}
+ name: aptly:2019.2.9
- registry: ${_param:mcp_docker_registry}/mirantis/cicd
target_registry: ${_param:default_local_mirrror_content:docker_client_registry_target_registry}/mirantis/cicd
- name: aptly-public:${_param:mcp_version}
+ name: aptly-public:2019.2.9
- registry: ${_param:mcp_docker_registry}/mirantis/cicd
target_registry: ${_param:default_local_mirrror_content:docker_client_registry_target_registry}/mirantis/cicd
- name: aptly-publisher:${_param:mcp_version}
+ name: aptly-publisher:2019.2.9
- registry: ${_param:mcp_docker_registry}/mirantis/cicd
target_registry: ${_param:default_local_mirrror_content:docker_client_registry_target_registry}/mirantis/cicd
diff --git a/docker/swarm/stack/monitoring/alerta.yml b/docker/swarm/stack/monitoring/alerta.yml
index ac16a2b..bec608c 100644
--- a/docker/swarm/stack/monitoring/alerta.yml
+++ b/docker/swarm/stack/monitoring/alerta.yml
@@ -27,8 +27,10 @@
- ${prometheus:alerta:config_dir}/alerta.conf:/web/config.js
- ${prometheus:alerta:config_dir}/alertad.conf:/app/alertad.conf
environment:
+ ADMIN_KEY: ${_param:alerta_admin_key}
ADMIN_USERS: ${_param:alerta_admin_username}
ADMIN_PASSWORD_FILE: "/run/secrets/alerta"
+ AUTH_REQUIRED: "True"
MONGO_URI: ${_param:alerta_mongodb_uri}
PLUGINS: ""
secrets:
diff --git a/fluentd/label/notifications/input_rabbitmq.yml b/fluentd/label/notifications/input_rabbitmq.yml
index 3d7edef..06fca7c 100644
--- a/fluentd/label/notifications/input_rabbitmq.yml
+++ b/fluentd/label/notifications/input_rabbitmq.yml
@@ -85,7 +85,7 @@
enable_ruby: true
record:
- name: notification_type
- value: '${fluentd:dollar}{ record["payload"]["eventType"] && record["payload"]["eventTime"] ? "audit" : "notification" }'
+ value: '${fluentd:dollar}{ record["payload"].key?("eventType") && record["payload"].key?("eventTime") ? "audit" : "notification" }'
match:
rewrite_message_tag:
tag: raw_notifications
diff --git a/galera/server/cluster.yml b/galera/server/cluster.yml
index a4b3f0a..2dd5962 100644
--- a/galera/server/cluster.yml
+++ b/galera/server/cluster.yml
@@ -3,3 +3,4 @@
- system.haproxy.proxy.listen.openstack.galera
- system.keepalived.cluster.instance.galera_vip
- system.galera.upgrade
+- system.galera.server.clustercheck
diff --git a/glance/client/image/octavia.yml b/glance/client/image/octavia.yml
index 2a00375..c09af34 100644
--- a/glance/client/image/octavia.yml
+++ b/glance/client/image/octavia.yml
@@ -25,3 +25,7 @@
glanceng.get_image_owner_id:
- ${_param:amphora_image_name}
- 'admin_identity'
+ octavia_amphora_image:
+ mine_function: glancev2.image_get_details
+ name: ${_param:amphora_image_name}
+ cloud_name: 'admin_identity'
diff --git a/haproxy/proxy/listen/openstack/galera/init.yml b/haproxy/proxy/listen/openstack/galera/init.yml
index 1dd1a1c..9f210d4 100644
--- a/haproxy/proxy/listen/openstack/galera/init.yml
+++ b/haproxy/proxy/listen/openstack/galera/init.yml
@@ -1,6 +1,6 @@
parameters:
_param:
- haproxy_params_check: 'check'
+ haproxy_params_check: 'check port 9200'
haproxy:
proxy:
listen:
@@ -15,12 +15,16 @@
- name: ${_param:cluster_node01_hostname}
host: ${_param:cluster_node01_address}
port: 3306
- params: ${_param:haproxy_params_check} inter 20s fastinter 2s downinter 2s rise 3 fall 3
+ params: ${_param:haproxy_params_check} inter 20s fastinter 2s downinter 2s rise 3 fall 3 on-marked-down shutdown-sessions
- name: ${_param:cluster_node02_hostname}
host: ${_param:cluster_node02_address}
port: 3306
- params: backup ${_param:haproxy_params_check} inter 20s fastinter 2s downinter 2s rise 3 fall 3
+ params: backup ${_param:haproxy_params_check} inter 20s fastinter 2s downinter 2s rise 3 fall 3 on-marked-down shutdown-sessions
- name: ${_param:cluster_node03_hostname}
host: ${_param:cluster_node03_address}
port: 3306
- params: backup ${_param:haproxy_params_check} inter 20s fastinter 2s downinter 2s rise 3 fall 3
+ params: backup ${_param:haproxy_params_check} inter 20s fastinter 2s downinter 2s rise 3 fall 3 on-marked-down shutdown-sessions
+ health-check:
+ mysql:
+ enabled: False
+
diff --git a/horizon/server/iptables.yml b/horizon/server/iptables.yml
index 4836feb..e25fad8 100644
--- a/horizon/server/iptables.yml
+++ b/horizon/server/iptables.yml
@@ -36,7 +36,7 @@
action: REJECT
501:
# 501-503 disable private networks
- rule: --dst 10.0.0.0/16
+ rule: --dst 10.0.0.0/8
action: REJECT
502:
rule: --dst 172.16.0.0/12
diff --git a/jenkins/client/approved_scripts.yml b/jenkins/client/approved_scripts.yml
index 7054e55..a386737 100644
--- a/jenkins/client/approved_scripts.yml
+++ b/jenkins/client/approved_scripts.yml
@@ -172,3 +172,4 @@
- staticMethod org.codehaus.groovy.runtime.EncodingGroovyMethods encodeBase64 byte[]
- staticMethod org.codehaus.groovy.runtime.ScriptBytecodeAdapter bitwiseNegate java.lang.Object
- staticMethod org.codehaus.groovy.runtime.DefaultGroovyMethods max java.util.Collection
+ - staticMethod org.codehaus.groovy.runtime.DefaultGroovyMethods removeElement java.util.Collection java.lang.Object
diff --git a/jenkins/client/job/ceph/add-node.yml b/jenkins/client/job/ceph/add-node.yml
index 00e50c5..ab16dd9 100644
--- a/jenkins/client/job/ceph/add-node.yml
+++ b/jenkins/client/job/ceph/add-node.yml
@@ -16,7 +16,6 @@
credentials: ${_param:jenkins_gerrit_credentials}
script: ceph-add-node.groovy
param:
- # general parameters
SALT_MASTER_URL:
type: string
description: URL of Salt master
@@ -27,9 +26,17 @@
default: salt
HOST:
type: string
- description: OSD HOST that will be added to Ceph cluster (rgw04*)
- default: 'rgw04*'
- HOST_TYPE:
+ description: OSD HOST that will be added to Ceph cluster (e.g. rgw04*)
+ default: ''
+ CLUSTER_FLAGS:
type: string
- description: Type of Ceph node to be added. Valid values are mon/osd/rgw
- default: 'rgw'
+ description: Flags to be aplied before pipeline and after pipeline (comma-separated list)
+ default: ''
+ OSD_ONLY:
+ type: boolean
+ default: 'false'
+ description: Add only missing OSDs while keep HOST itself intact
+ USE_UPMAP:
+ type: boolean
+ default: 'false'
+ description: Use upmap balancer to smooth data migrations
diff --git a/jenkins/client/job/ceph/add-osd-upmap.yml b/jenkins/client/job/ceph/add-osd-upmap.yml
index a96a303..1c1ccb1 100644
--- a/jenkins/client/job/ceph/add-osd-upmap.yml
+++ b/jenkins/client/job/ceph/add-osd-upmap.yml
@@ -28,7 +28,7 @@
default: salt
HOST:
type: string
- description: OSD HOST that will be added to Ceph cluster (rgw04*)
+ description: OSD HOST that will be added to Ceph cluster
default: 'osd*'
CLUSTER_FLAGS:
type: string
diff --git a/jenkins/client/job/ceph/remove-node.yml b/jenkins/client/job/ceph/remove-node.yml
index 422056c..1f5a4ec 100644
--- a/jenkins/client/job/ceph/remove-node.yml
+++ b/jenkins/client/job/ceph/remove-node.yml
@@ -27,26 +27,27 @@
default: salt
HOST:
type: string
- description: OSD HOST that will be removed from Ceph cluster (rgw04*)
- default: 'rgw04*'
- HOST_TYPE:
+ description: HOST to remove from thecluster
+ OSD:
type: string
- description: Type of Ceph node to be removed. Valid values are mon/osd/rgw
- default: 'rgw'
- ADMIN_HOST:
+ description: Remove only those OSDs at HOST (comma-separated list or *). Leave empty if you going to remove a whole osd node. Only relevant with OSD node
+ default: ''
+ CLUSTER_FLAGS:
type: string
- description: Host with admin keyring and access to cluster management
- default: 'cmn01*'
- GENERATE_CRUSHMAP:
- type: boolean
- default: 'false'
- description: Only if removing OSD host. Set to true if crush map file should be updated. Enforce has to happen manually unless it is specifically set to be enforced in pillar.
+ description: Flags to be aplied before pipeline and after pipeline (comma-separated list)
WAIT_FOR_HEALTHY:
type: boolean
default: 'true'
description: Wait for healthy during pipeline
- CLEANDISK:
+ CLEAN_ORPHANS:
type: boolean
default: 'false'
- description: Clean data/block partitions
-
+ description: Clean orphaned ceph partition which are no longer part of cluster. Only relevant with osd hosts and required Luminous ceph-disk
+ FAST_WIPE:
+ type: boolean
+ default: 'true'
+ description: Wipe only partition table of each removed disk. Otherwise whole disk will be wiped. Only relevant with osd hosts
+ GENERATE_CRUSHMAP:
+ type: boolean
+ default: 'false'
+ description: Generate new crush map
diff --git a/jenkins/client/job/ceph/remove-osd.yml b/jenkins/client/job/ceph/remove-osd.yml
index d3a00b1..624d73d 100644
--- a/jenkins/client/job/ceph/remove-osd.yml
+++ b/jenkins/client/job/ceph/remove-osd.yml
@@ -32,10 +32,6 @@
type: string
description: These OSDs at HOST will be removed (comma-separated list)
default: '*'
- ADMIN_HOST:
- type: string
- description: Host with admin keyring and access to cluster management
- default: 'cmn01*'
CLUSTER_FLAGS:
type: string
description: Flags to be aplied before pipeline and after pipeline (comma-separated list)
@@ -43,10 +39,10 @@
type: boolean
default: 'true'
description: Wait for healthy during pipeline
- CLEANDISK:
+ FAST_WIPE:
type: boolean
- default: 'false'
- description: Clean data/block partitions
+ default: 'true'
+ description: Wipe only partition table of each removed disk. Otherwise whole disk will be wiped. Only relevant with osd hosts
CLEAN_ORPHANS:
type: boolean
default: 'false'
diff --git a/jenkins/client/job/ceph/upgrade.yml b/jenkins/client/job/ceph/upgrade.yml
index 9d352d7..ff8f666 100644
--- a/jenkins/client/job/ceph/upgrade.yml
+++ b/jenkins/client/job/ceph/upgrade.yml
@@ -34,6 +34,10 @@
type: boolean
default: 'true'
description: Wait for healthy during pipeline
+ ASK_CONFIRMATION:
+ type: boolean
+ description: Ask for manual confirmation
+ default: 'true'
CLUSTER_FLAGS:
type: string
default: 'noout'
diff --git a/jenkins/client/job/deploy/backupninja_backup.yml b/jenkins/client/job/deploy/backupninja_backup.yml
index 1089cfa..5473c1f 100644
--- a/jenkins/client/job/deploy/backupninja_backup.yml
+++ b/jenkins/client/job/deploy/backupninja_backup.yml
@@ -32,6 +32,9 @@
BACKUP_DOGTAG:
type: boolean
default: 'true'
+ BACKUP_KEYSTONE_CREDENTIAL_KEYS:
+ type: boolean
+ default: 'true'
trigger:
timer:
enabled: false
diff --git a/jenkins/client/job/deploy/backupninja_restore.yml b/jenkins/client/job/deploy/backupninja_restore.yml
index 76a594e..14c28ac 100644
--- a/jenkins/client/job/deploy/backupninja_restore.yml
+++ b/jenkins/client/job/deploy/backupninja_restore.yml
@@ -30,3 +30,6 @@
RESTORE_DOGTAG:
type: boolean
default: 'true'
+ RESTORE_KEYSTONE_CREDENTIAL_KEYS:
+ type: boolean
+ default: 'true'
diff --git a/jenkins/client/job/deploy/cleanup.yml b/jenkins/client/job/deploy/cleanup.yml
new file mode 100644
index 0000000..1d0a2b6
--- /dev/null
+++ b/jenkins/client/job/deploy/cleanup.yml
@@ -0,0 +1,25 @@
+parameters:
+ jenkins:
+ client:
+ job:
+ openstack_database_cleanup:
+ type: workflow-scm
+ name: openstack-database-cleanup
+ display_name: "Deploy - Openstack Database Cleanup"
+ discard:
+ build:
+ keep_num: 50
+ concurrent: true
+ scm:
+ type: git
+ url: "${_param:jenkins_gerrit_url}/mk/mk-pipelines"
+ branch: "${_param:jenkins_pipelines_branch}"
+ credentials: ${_param:jenkins_gerrit_credentials}
+ script: openstack-database-cleanup.groovy
+ param:
+ SALT_MASTER_CREDENTIALS:
+ type: string
+ default: "salt"
+ SALT_MASTER_URL:
+ type: string
+ default: "${_param:jenkins_salt_api_url}"
diff --git a/jenkins/client/job/deploy/update/init.yml b/jenkins/client/job/deploy/update/init.yml
index afa0aa6..710fd5f 100644
--- a/jenkins/client/job/deploy/update/init.yml
+++ b/jenkins/client/job/deploy/update/init.yml
@@ -27,3 +27,4 @@
- system.jenkins.client.job.deploy.update.update_glusterfs_servers
- system.jenkins.client.job.deploy.update.update_glusterfs_clients
- system.jenkins.client.job.deploy.update.update_glusterfs_cluster_op_version
+ - system.jenkins.client.job.deploy.cleanup
diff --git a/keystone/server/fernet_rotation/cluster.yml b/keystone/server/fernet_rotation/cluster.yml
index cf7b328..4db82cc 100644
--- a/keystone/server/fernet_rotation/cluster.yml
+++ b/keystone/server/fernet_rotation/cluster.yml
@@ -1,9 +1,17 @@
+classes:
+- system.backupninja.client.single
+- system.openssh.client.root
parameters:
_param:
fernet_rotation_driver: 'rsync'
credential_rotation_driver: 'rsync'
+ openstack_control_node01_hostname: ctl01
keystone:
server:
+ initial_data:
+ home_dir: /srv/volumes/backup/backupninja
+ host: ${_param:openstack_control_node01_hostname}.${_param:cluster_domain}
+ source: ${_param:infra_kvm_node03_address}
tokens:
fernet_sync_nodes_list:
sync_node01:
@@ -44,3 +52,10 @@
user: keystone
hour: 0
minute: 0
+ backupninja:
+ client:
+ target:
+ home_dir: /srv/volumes/backup/backupninja
+ engine: rsync
+ engine_opts: "-av --delete --recursive --safe-links"
+ host: ${_param:infra_kvm_node03_address}
diff --git a/keystone/server/fernet_rotation/single.yml b/keystone/server/fernet_rotation/single.yml
index 7514086..4bd09e9 100644
--- a/keystone/server/fernet_rotation/single.yml
+++ b/keystone/server/fernet_rotation/single.yml
@@ -1,9 +1,17 @@
+classes:
+- system.backupninja.client.single
+- system.openssh.client.root
parameters:
_param:
+ openstack_control_node01_hostname: ctl01
fernet_rotation_driver: 'rsync'
credential_rotation_driver: 'rsync'
keystone:
server:
+ initial_data:
+ home_dir: /srv/volumes/backup/backupninja
+ host: ${_param:openstack_control_node01_hostname}.${_param:cluster_domain}
+ source: ${_param:infra_kvm_node03_address}
tokens:
fernet_rotation_driver: ${_param:fernet_rotation_driver}
credential:
@@ -30,3 +38,10 @@
user: keystone
hour: 0
minute: 0
+ backupninja:
+ client:
+ target:
+ home_dir: /srv/volumes/backup/backupninja
+ engine: rsync
+ engine_opts: "-av --delete --recursive --safe-links"
+ host: ${_param:infra_kvm_node03_address}
diff --git a/openssh/server/team/members/pvaylov.yml b/openssh/server/team/members/pvaylov.yml
new file mode 100644
index 0000000..7d90860
--- /dev/null
+++ b/openssh/server/team/members/pvaylov.yml
@@ -0,0 +1,19 @@
+parameters:
+ linux:
+ system:
+ user:
+ pvaylov:
+ email: pvaylov@mirantis.com
+ enabled: true
+ full_name: Pavel Vaylov
+ home: /home/pvaylov
+ name: pvaylov
+ sudo: ${_param:linux_system_user_sudo}
+ openssh:
+ server:
+ user:
+ pvaylov:
+ enabled: true
+ public_keys:
+ - key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDHwOflKJpgQkjO90DxUWz8tdWWY9xMl4lauzPopOV+dnhJ7eyo78gGBdTi3xprguOTncktcZAss3GDapTblLbg7xoofy8qke180+LJVGFSJLACWg74e1UiuxQRv66MNjafCIy337D6RoxNwUSaf0DQGs5ainmxri/PFlnEHAYkri4mF16pKFuw5cFkAlDsLH17ok5S0Rk34zZVywVT4VsPttKbS7vzyejyL0sh8GyBfEXIRlrn6I6GjTWpI5R4rYPbbm2XfOFfQ6aYRzBReJLm8FcTHjXjYlm+LDXaAzQbaiV2W+dmCMnawPD3jZ7uQd9Ob9lYyD43V/wS7BAreIxv
+ user: ${linux:system:user:pvaylov}
diff --git a/openssh/server/team/members/srogov.yml b/openssh/server/team/members/srogov.yml
new file mode 100644
index 0000000..ae6b4a5
--- /dev/null
+++ b/openssh/server/team/members/srogov.yml
@@ -0,0 +1,19 @@
+parameters:
+ linux:
+ system:
+ user:
+ srogov:
+ email: srogov@mirantis.com
+ enabled: true
+ full_name: Stepan Rogov
+ home: /home/srogov
+ name: srogov
+ sudo: ${_param:linux_system_user_sudo}
+ openssh:
+ server:
+ user:
+ srogov:
+ enabled: true
+ public_keys:
+ - key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCjlSWDL7hIgLbklKBVXIyNKxi3U4cbUSxPDFgUAf1Fp0mz/6o5jUuJjGMVtAYvQFBQbi8Hn0wX2bdubyFnD21M+9jQL05fqQGsG/JPq+u/XYj+k+WVLyrjTWMoL4xs9vAwjAJfDaWZ4BDwfWCAVe86OzjuFo2LvDl08YJpLTwwCqpUR9htvZGxR9UvJr7JV2xNqD9xUl3TK0n9Fo0aWbWiszImRO6KLRASKw8x0GgNjzP5qGI02x34tW9nL1yTjRxMweVyyh91OWdGLTp1ghixb2ZRJWPQ99UaWKv0pGCBbyjjmQCmODffSstIcFpGmqqZ8Y6aP6ESOYPXJccAvc0/ chlorum@chlorum
+ user: ${linux:system:user:srogov}
diff --git a/openssh/server/team/members/srudyka.yml b/openssh/server/team/members/srudyka.yml
new file mode 100644
index 0000000..0321ea4
--- /dev/null
+++ b/openssh/server/team/members/srudyka.yml
@@ -0,0 +1,20 @@
+parameters:
+ linux:
+ system:
+ user:
+ srudyka:
+ email: srudyka@mirantis.com
+ enabled: true
+ full_name: Sergii Rudyka
+ home: /home/srudyka
+ name: srudyka
+ email: srudyka@mirantis.com
+ sudo: ${_param:linux_system_user_sudo}
+ openssh:
+ server:
+ user:
+ srudyka:
+ enabled: true
+ public_keys:
+ - key: ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIGps6KXDEuWoTRAMkCPQI/sDaLcPwnq0fDgKSTjqBd1N
+ user: ${linux:system:user:srudyka}
diff --git a/openssh/server/team/services.yml b/openssh/server/team/services.yml
index ef0546e..4c9fbf3 100644
--- a/openssh/server/team/services.yml
+++ b/openssh/server/team/services.yml
@@ -22,10 +22,13 @@
- system.openssh.server.team.members.oaleksieiev
- system.openssh.server.team.members.pbasov
- system.openssh.server.team.members.pruzicka
+- system.openssh.server.team.members.pvaylov
- system.openssh.server.team.members.sburns
+- system.openssh.server.team.members.srogov
- system.openssh.server.team.members.vkuspits
- system.openssh.server.team.members.yisakov
- system.openssh.server.team.members.qwu
+- system.openssh.server.team.members.srudyka
parameters:
_param:
linux_system_user_sudo: true
diff --git a/prometheus/alerta/init.yml b/prometheus/alerta/init.yml
index 6e16b63..2e82f44 100644
--- a/prometheus/alerta/init.yml
+++ b/prometheus/alerta/init.yml
@@ -1,5 +1,5 @@
classes:
- - service.prometheus.alerta
+- service.prometheus.alerta
parameters:
prometheus:
alerta:
diff --git a/prometheus/alertmanager/notification/alerta.yml b/prometheus/alertmanager/notification/alerta.yml
index 816aaf6..729aa26 100644
--- a/prometheus/alertmanager/notification/alerta.yml
+++ b/prometheus/alertmanager/notification/alerta.yml
@@ -16,5 +16,5 @@
enabled: true
webhook_configs:
alerta:
- url: "http://${_param:alertmanager_notification_alerta_host}:${_param:alertmanager_notification_alerta_port}/api/webhooks/prometheus"
+ url: "http://${_param:alertmanager_notification_alerta_host}:${_param:alertmanager_notification_alerta_port}/api/webhooks/prometheus?api-key=${_param:alerta_admin_key}"
send_resolved: true
diff --git a/prometheus/gainsight/query/openstack.yml b/prometheus/gainsight/query/openstack.yml
index e89483b..3615ad4 100644
--- a/prometheus/gainsight/query/openstack.yml
+++ b/prometheus/gainsight/query/openstack.yml
@@ -2,15 +2,15 @@
prometheus:
gainsight:
queries:
- vcpu_used: "'vCPU Used','avg(sum(avg_over_time(openstack_nova_used_vcpus[24h])) by (instance))'"
- vcpu_free: "'vCPU Free','avg(sum(avg_over_time(openstack_nova_free_vcpus[24h])) by (instance))'"
- vstorage_used: "'vStorage Used','avg(sum(avg_over_time(openstack_nova_used_disk[24h])) by (instance))'"
- vstorage_free: "'vStorage Free','avg(sum(avg_over_time(openstack_nova_free_disk[24h])) by (instance))'"
- vram_used: "'vRAM Used','avg(sum(avg_over_time(openstack_nova_used_ram[24h])) by (instance))'"
- vram_free: "'vRAM Free','avg(sum(avg_over_time(openstack_nova_free_ram[24h])) by (instance))'"
- instances: "'Instances','avg(sum(avg_over_time(openstack_nova_instances{state=\"active\"}[24h])) by (instance))'"
- compute_nodes: "'Compute Nodes','avg(sum(openstack_nova_services{binary=~\"nova.compute\"}) by (instance))'"
- tenants: "'Tenants','avg(sum(avg_over_time(openstack_keystone_tenants_total[24h])) by (instance))'"
+ vcpu_used: "'vCPU Used','max(sum by (instance) (avg_over_time(openstack_nova_used_vcpus[24h]) and on (hostname) (openstack_nova_service_status == 1 and openstack_nova_service_state == 1)))'"
+ vcpu_free: "'vCPU Free','max(sum by (instance) (avg_over_time(openstack_nova_free_vcpus[24h]) and on (hostname) (openstack_nova_service_status == 1 and openstack_nova_service_state == 1)))'"
+ vstorage_used: "'vStorage Used','max(sum by (instance) (avg_over_time(openstack_nova_used_disk[24h]) and on (hostname) (openstack_nova_service_status == 1 and openstack_nova_service_state == 1)))'"
+ vstorage_free: "'vStorage Free','max(sum by (instance) (avg_over_time(openstack_nova_free_disk[24h]) and on (hostname) (openstack_nova_service_status == 1 and openstack_nova_service_state == 1)))'"
+ vram_used: "'vRAM Used','max(sum by (instance) (avg_over_time(openstack_nova_used_ram[24h]) and on (hostname) (openstack_nova_service_status == 1 and openstack_nova_service_state == 1)))'"
+ vram_free: "'vRAM Free','max(sum by (instance) (avg_over_time(openstack_nova_free_ram[24h]) and on (hostname) (openstack_nova_service_status == 1 and openstack_nova_service_state == 1)))'"
+ instances: "'Instances','ceil(max(avg_over_time(openstack_nova_instances{state=\"active\"}[24h])))'"
+ compute_nodes: "'Compute Nodes','max(sum by (instance) (openstack_nova_services{binary=~\"nova.compute\"}))'"
+ tenants: "'Tenants','ceil(max(avg_over_time(openstack_keystone_tenants_total[24h])))'"
cinder_api: "'Cinder API','avg_over_time(service_name:openstack_api_check_status:avg5m:for5m:ceil:avg5m:floor{service_name=\"cinderv2\"}[24h]) * 100'"
nova_api: "'Nova API','avg_over_time(service_name:openstack_api_check_status:avg5m:for5m:ceil:avg5m:floor{service_name=\"nova\"}[24h]) * 100'"
keystone_api: "'Keystone API','avg_over_time(service_name:openstack_api_check_status:avg5m:for5m:ceil:avg5m:floor{service_name=\"keystone\"}[24h]) * 100'"