SL prometheus support for kqueen metrics

- kqueen target added
- etcd ruleset added
related us: https://mirantis.jira.com/browse/PROD-18552
related topic: https://gerrit.mcp.mirantis.net/#/q/topic:kqueen-sl+(status:open+OR+status:merged)
Change-Id: I4e588950c9014da076f160f5d2c5d0b081e2373e
diff --git a/prometheus/server/alert/etcd.yml b/prometheus/server/alert/etcd.yml
new file mode 100644
index 0000000..9c456ab
--- /dev/null
+++ b/prometheus/server/alert/etcd.yml
@@ -0,0 +1,140 @@
+parameters:
+  prometheus:
+    server:
+        alert:
+          InsufficientMembers:
+            if: >-
+              count(up{job="etcd"} == 0) > (count(up{job="etcd"}) / 2 - 1)
+            for: 3m
+            labels:
+              severity: critical
+            annotations:
+              description: 'If one more etcd member goes down the cluster will be unavailable'
+              summary: 'etcd cluster insufficient members'
+          NoLeader:
+            if: >-
+              etcd_server_has_leader{job="etcd"} == 0
+            for: 1m
+            labels:
+              severity: critical
+            annotations:
+              description: 'etcd member {{ $labels.instance }} has no leader'
+              summary: 'etcd member has no leader'
+          HighNumberOfLeaderChanges:
+            if: >-
+              increase(etcd_server_leader_changes_seen_total{job="etcd"}[1h]) > 3
+            labels:
+              severity: warning
+            annotations:
+              description: 'etcd instance {{ $labels.instance }} has seen {{ $value }} leader changes within the last hour'
+              summary: 'a high number of leader changes within the etcd cluster are happening'
+          HighNumberOfFailedGRPCRequests:
+            if: >-
+              sum(rate(grpc_server_handled_total{grpc_code!="OK",job="etcd"}[5m])) BY (grpc_service, grpc_method) / sum(rate(grpc_server_handled_total{job="etcd"}[5m])) BY (grpc_service, grpc_method) > 0.01
+            for: 10m
+            labels:
+              severity: warning
+            annotations:
+              description: '{{ $value }}% of requests for {{ $labels.grpc_method }} failed on etcd instance {{ $labels.instance }}'
+              summary: 'a high number of gRPC requests are failing'
+          HighNumberOfFailedGRPCRequests:
+            if: >-
+              sum(rate(grpc_server_handled_total{grpc_code!="OK",job="etcd"}[5m])) BY (grpc_service, grpc_method) / sum(rate(grpc_server_handled_total{job="etcd"}[5m])) BY (grpc_service, grpc_method) > 0.05
+            for: 5m
+            labels:
+              severity: critical
+            annotations:
+              description: '{{ $value }}% of requests for {{ $labels.grpc_method }} failed on etcd instance {{ $labels.instance }}'
+              summary: 'a high number of gRPC requests are failing'
+          GRPCRequestsSlow:
+            if: >-
+              histogram_quantile(0.99, sum(rate(grpc_server_handling_seconds_bucket{job="etcd",grpc_type="unary"}[5m])) by (grpc_service, grpc_method, le)) > 0.15
+            for: 10m
+            labels:
+              severity: critical
+            annotations:
+              description: 'on etcd instance {{ $labels.instance }} gRPC requests to {{ $labels.grpc_method }} are slow'
+              summary: 'slow gRPC requests'
+          HighNumberOfFailedHTTPRequests:
+            if: >-
+              sum(rate(etcd_http_failed_total{job="etcd"}[5m])) BY (method) / sum(rate(etcd_http_received_total{job="etcd"}[5m])) BY (method) > 0.01
+            for: 10m
+            labels:
+              severity: warning
+            annotations:
+              description: '{{ $value }}% of requests for {{ $labels.method }} failed on etcd instance {{ $labels.instance }}'
+              summary: 'a high number of HTTP requests are failing'
+          HighNumberOfFailedHTTPRequests:
+            if: >-
+              sum(rate(etcd_http_failed_total{job="etcd"}[5m])) BY (method) / sum(rate(etcd_http_received_total{job="etcd"}[5m])) BY (method) > 0.05
+            for: 5m
+            labels:
+              severity: critical
+            annotations:
+              description: '{{ $value }}% of requests for {{ $labels.method }} failed on etcd instance {{ $labels.instance }}'
+              summary: 'a high number of HTTP requests are failing'
+          HTTPRequestsSlow:
+            if: >-
+              histogram_quantile(0.99, rate(etcd_http_successful_duration_seconds_bucket[5m])) > 0.15
+            for: 10m
+            labels:
+              severity: warning
+            annotations:
+              description: 'on etcd instance {{ $labels.instance }} HTTP requests to {{ $labels.method }} are slow'
+              summary: 'slow HTTP requests'
+            record: instance:fd_utilization
+            if: >-
+              process_open_fds / process_max_fds
+          FdExhaustionClose:
+            if: >-
+              predict_linear(instance:fd_utilization[1h], 3600 * 4) > 1
+            for: 10m
+            labels:
+              severity: warning
+            annotations:
+              description: '{{ $labels.job }} instance {{ $labels.instance }} will exhaust its file descriptors soon'
+              summary: 'file descriptors soon exhausted'
+          FdExhaustionClose:
+            if: >-
+              predict_linear(instance:fd_utilization[10m], 3600) > 1
+            for: 10m
+            labels:
+              severity: critical
+            annotations:
+              description: '{{ $labels.job }} instance {{ $labels.instance }} will exhaust its file descriptors soon'
+              summary: 'file descriptors soon exhausted'
+          EtcdMemberCommunicationSlow:
+            if: >-
+              histogram_quantile(0.99, rate(etcd_network_member_round_trip_time_seconds_bucket[5m])) > 0.15
+            for: 10m
+            labels:
+              severity: warning
+            annotations:
+              description: 'etcd instance {{ $labels.instance }} member communication with {{ $labels.To }} is slow'
+              summary: 'etcd member communication is slow'
+          HighNumberOfFailedProposals:
+            if: >-
+              increase(etcd_server_proposals_failed_total{job="etcd"}[1h]) > 5
+            labels:
+              severity: warning
+            annotations:
+              description: 'etcd instance {{ $labels.instance }} has seen {{ $value }} proposal failures within the last hour'
+              summary: 'a high number of proposals within the etcd cluster are failing'
+          HighFsyncDurations:
+            if: >-
+              histogram_quantile(0.99, rate(etcd_disk_wal_fsync_duration_seconds_bucket[5m])) > 0.5
+            for: 10m
+            labels:
+              severity: warning
+            annotations:
+              description: 'etcd instance {{ $labels.instance }} fync durations are high'
+              summary: 'high fsync durations'
+          HighCommitDurations:
+            if: >-
+              histogram_quantile(0.99, rate(etcd_disk_backend_commit_duration_seconds_bucket[5m])) > 0.25
+            for: 10m
+            labels:
+              severity: warning
+            annotations:
+              description: 'etcd instance {{ $labels.instance }} commit durations are high'
+              summary: 'high commit durations'
diff --git a/prometheus/server/target/static/etcd.yml b/prometheus/server/target/static/etcd.yml
new file mode 100644
index 0000000..61488b5
--- /dev/null
+++ b/prometheus/server/target/static/etcd.yml
@@ -0,0 +1,13 @@
+parameters:
+  _param:
+    prometheus_etcd_host: ${_param:haproxy_bind_address}
+    prometheus_etcd_port: 4001
+  prometheus:
+    server:
+      target:
+        static:
+          etcd:
+            enabled: true
+            endpoint:
+            - address: ${_param:prometheus_etcd_host}
+              port: ${_param:prometheus_etcd_port}
diff --git a/prometheus/server/target/static/kqueen.yml b/prometheus/server/target/static/kqueen.yml
new file mode 100644
index 0000000..701f4cd
--- /dev/null
+++ b/prometheus/server/target/static/kqueen.yml
@@ -0,0 +1,13 @@
+parameters:
+  _param:
+    prometheus_kqueen_api_host: ${_param:haproxy_bind_address}
+    prometheus_kqueen_api_port: 5000
+  prometheus:
+    server:
+      target:
+        static:
+          kqueen:
+            enabled: true
+            endpoint:
+            - address: ${_param:prometheus_kqueen_api_host}
+              port: ${_param:prometheus_kqueen_api_port}