Merge "Add options auth.allow and auth.reject to all glusterfs volumes" into release/proposed/2019.2.0
diff --git a/defaults/docker_images.yml b/defaults/docker_images.yml
index 3f21778..25f61c1 100644
--- a/defaults/docker_images.yml
+++ b/defaults/docker_images.yml
@@ -29,7 +29,7 @@
     docker_image_alerta: "${_param:mcp_docker_registry}/mirantis/external/alerta-web:${_param:mcp_version}"
     docker_image_alertmanager: "${_param:mcp_docker_registry}/openstack-docker/alertmanager:2019.2.4"
     docker_image_grafana: "${_param:mcp_docker_registry}/openstack-docker/grafana:${_param:mcp_version}"
-    docker_image_prometheus_es_exporter: "${_param:mcp_docker_registry}/mirantis/external/braedon/prometheus-es-exporter:0.5.1"
+    docker_image_prometheus_es_exporter: "${_param:mcp_docker_registry}/openstack-docker/prometheus-es-exporter:0.5.1-mcp0"
     docker_image_prometheus: "${_param:mcp_docker_registry}/openstack-docker/prometheus:${_param:mcp_version}"
     docker_image_prometheus_gainsight: "${_param:mcp_docker_registry}/openstack-docker/gainsight:2019.2.3"
     docker_image_prometheus_gainsight_elasticsearch: "${_param:mcp_docker_registry}/openstack-docker/gainsight_elasticsearch:${_param:mcp_version}"
@@ -38,7 +38,7 @@
     docker_image_remote_agent: "${_param:mcp_docker_registry}/openstack-docker/telegraf:2019.2.4"
     docker_image_remote_collector: "${_param:mcp_docker_registry}/openstack-docker/heka:${_param:mcp_version}"
     docker_image_remote_storage_adapter: "${_param:mcp_docker_registry}/openstack-docker/remote_storage_adapter:${_param:mcp_version}"
-    docker_image_sf_notifier: "${_param:mcp_docker_registry}/openstack-docker/sf_notifier:2019.2.3"
+    docker_image_sf_notifier: "${_param:mcp_docker_registry}/openstack-docker/sf_notifier:0.2-mcp0"
     ##
     docker_image_cockroachdb: "${_param:mcp_docker_registry}/mirantis/external/cockroach/cockroach:v2.1.1"
     # keycloak
diff --git a/defaults/haproxy/elasticsearch.yml b/defaults/haproxy/elasticsearch.yml
new file mode 100644
index 0000000..07db053
--- /dev/null
+++ b/defaults/haproxy/elasticsearch.yml
@@ -0,0 +1,6 @@
+parameters:
+  _param:
+    haproxy_elasticsearch_http_bind_port: 9200
+    haproxy_elasticsearch_http_exposed_port: 9200
+    haproxy_elasticsearch_binary_bind_port: 9300
+    haproxy_elasticsearch_binary_exposed_port: 9300
diff --git a/defaults/haproxy/init.yml b/defaults/haproxy/init.yml
new file mode 100644
index 0000000..499e085
--- /dev/null
+++ b/defaults/haproxy/init.yml
@@ -0,0 +1,2 @@
+classes:
+- system.defaults.haproxy.elasticsearch
diff --git a/defaults/init.yml b/defaults/init.yml
index d2b2e0c..2763914 100644
--- a/defaults/init.yml
+++ b/defaults/init.yml
@@ -23,6 +23,7 @@
 - system.defaults.salt
 - system.defaults.stacklight
 - system.defaults.xtrabackup
+- system.defaults.haproxy
 parameters:
   _param:
     mcp_version: stable
diff --git a/defaults/openstack/init.yml b/defaults/openstack/init.yml
index 8d41c37..44b334b 100644
--- a/defaults/openstack/init.yml
+++ b/defaults/openstack/init.yml
@@ -37,7 +37,7 @@
     openstack_service_user_options:
       ignore_change_password_upon_first_use: True
       ignore_password_expiry: True
-      ignore_lockout_failure_attempts: False
+      ignore_lockout_failure_attempts: True
       lock_password: False
     # Cinder
     cinder_memcache_security_enabled: ${_param:openstack_memcache_security_enabled}
diff --git a/defaults/salt/init.yml b/defaults/salt/init.yml
index 2e19089..feb27d7 100644
--- a/defaults/salt/init.yml
+++ b/defaults/salt/init.yml
@@ -48,3 +48,11 @@
     salt_control_trusty_image: ${_param:mcp_static_images_url}/ubuntu-14-04-x64-mcp${_param:mcp_version}.qcow2
     salt_control_xenial_image: ${_param:mcp_static_images_url}/ubuntu-16-04-x64-mcp${_param:mcp_version}.qcow2
 
+    salt_master_api_permissions:
+    - '.*'
+    - '@local'
+    - '@wheel'   # to allow access to all wheel modules
+    - '@runner'  # to allow access to all runner modules
+    - '@jobs'    # to allow access to the jobs runner and/or wheel mo
+
+    salt_minion_ca_authority: salt_master_ca
diff --git a/defaults/stacklight.yml b/defaults/stacklight.yml
index 1abbb5e..e969319 100644
--- a/defaults/stacklight.yml
+++ b/defaults/stacklight.yml
@@ -1,5 +1,9 @@
 parameters:
   _param:
+    # ELK settings
+    stacklight_elasticsearch_port: 9200
+    stacklight_notification_topic: stacklight_notifications
+
     # ELK stack versions
     elasticsearch_version: 5
     kibana_version: 5
diff --git a/docker/host.yml b/docker/host.yml
index a88ff2f..894f6ee 100644
--- a/docker/host.yml
+++ b/docker/host.yml
@@ -14,6 +14,7 @@
         - ${_param:cluster_vip_address}:5000
         - ${_param:cluster_public_host}:5000
       options:
+        bridge: none
         ipv6: true
         fixed-cidr-v6: fc00::/7
         storage-driver: overlay2
diff --git a/docker/swarm/stack/jenkins/slave_base.yml b/docker/swarm/stack/jenkins/slave_base.yml
index 1c2d6f8..91601ab 100644
--- a/docker/swarm/stack/jenkins/slave_base.yml
+++ b/docker/swarm/stack/jenkins/slave_base.yml
@@ -3,7 +3,6 @@
 parameters:
   _param:
     jenkins_master_host: ${_param:control_vip_address}
-    jenkins_secret: "7c40abc1a7df2d26dd6b2e4421af17218df75a16fcbd5e3aa6017d9f47eaeabe"
     jenkins_master_url: http://${_param:jenkins_master_host}:${_param:jenkins_master_port}
     jenkins_slave_user: ${_param:jenkins_client_user}
     jenkins_slave_password: ${_param:jenkins_client_password}
diff --git a/docker/swarm/stack/monitoring/prometheus/init.yml b/docker/swarm/stack/monitoring/prometheus/init.yml
index 65dd5b9..d7db52c 100644
--- a/docker/swarm/stack/monitoring/prometheus/init.yml
+++ b/docker/swarm/stack/monitoring/prometheus/init.yml
@@ -32,6 +32,7 @@
               volumes:
                 - ${prometheus:server:dir:config}:${_param:prometheus_server_config_directory}
                 - ${prometheus:server:dir:data}:${_param:prometheus_server_data_directory}
+                - /etc/ssl/certs/ca-certificates.crt:/etc/ssl/certs/ca-certificates.crt:ro
               environment:
                 PROMETHEUS_CONFIG_DIR: ${_param:prometheus_server_config_directory}
                 PROMETHEUS_DATA_DIR: ${_param:prometheus_server_data_directory}
diff --git a/docker/swarm/stack/monitoring/sf_notifier.yml b/docker/swarm/stack/monitoring/sf_notifier.yml
index a171ce8..b8b2dd6 100644
--- a/docker/swarm/stack/monitoring/sf_notifier.yml
+++ b/docker/swarm/stack/monitoring/sf_notifier.yml
@@ -29,8 +29,6 @@
               image: ${_param:docker_image_sf_notifier}
               ports:
                 - 15018:5000
-              volumes:
-                - ${prometheus:sf_notifier:dir:logs}:/var/log/sf-notifier
               environment:
                 SF_NOTIFIER_WORKERS: ${_param:sf_notifier_workers}
                 SF_NOTIFIER_BUFFER_SIZE: ${_param:sf_notifier_buffer_size}
diff --git a/elasticsearch/client/ssl.yml b/elasticsearch/client/ssl.yml
new file mode 100644
index 0000000..9f0f9c2
--- /dev/null
+++ b/elasticsearch/client/ssl.yml
@@ -0,0 +1,5 @@
+parameters:
+  elasticsearch:
+    client:
+      server:
+        scheme: https
diff --git a/elasticsearch/server/cluster.yml b/elasticsearch/server/cluster.yml
index 76774aa..155cfdf 100644
--- a/elasticsearch/server/cluster.yml
+++ b/elasticsearch/server/cluster.yml
@@ -5,7 +5,7 @@
   _param:
     java_environment_version: "8"
     java_environment_platform: openjdk
-    elasticsearch_cluster_name: elasticsearch
+    elasticsearch_cluster_name: ${_param:cluster_name}
   linux:
     system:
       sysctl:
@@ -17,7 +17,6 @@
   elasticsearch:
     server:
       version: ${_param:elasticsearch_version}
-      name: ${_param:elasticsearch_cluster_name}
       enabled: true
       master: true
       data: true
@@ -35,6 +34,7 @@
         recover_after_nodes: 2
         recover_after_time: 5m
       cluster:
+        name: ${_param:elasticsearch_cluster_name}
         multicast: false
         minimum_master_nodes: 2
         members:
diff --git a/fluentd/label/default_metric/prometheus_ssl.yml b/fluentd/label/default_metric/prometheus_ssl.yml
new file mode 100644
index 0000000..292c481
--- /dev/null
+++ b/fluentd/label/default_metric/prometheus_ssl.yml
@@ -0,0 +1,9 @@
+parameters:
+  fluentd:
+    agent:
+      config:
+        input:
+          prometheus:
+            metric:
+              ssl:
+                enabled: True
diff --git a/fluentd/label/default_output/elasticsearch_ssl.yml b/fluentd/label/default_output/elasticsearch_ssl.yml
new file mode 100644
index 0000000..da3a5a7
--- /dev/null
+++ b/fluentd/label/default_output/elasticsearch_ssl.yml
@@ -0,0 +1,9 @@
+parameters:
+  fluentd:
+    agent:
+      config:
+        label:
+          default_output:
+            match:
+              elasticsearch_output:
+                scheme: https
diff --git a/fluentd/label/notifications/audit.yml b/fluentd/label/notifications/audit.yml
new file mode 100644
index 0000000..49ea953
--- /dev/null
+++ b/fluentd/label/notifications/audit.yml
@@ -0,0 +1,49 @@
+parameters:
+  fluentd:
+    agent:
+      config:
+        label:
+          audit_messages:
+            filter:
+              get_payload_values:
+                tag: audit
+                type: record_transformer
+                enable_ruby: true
+                record:
+                  - name: Logger
+                    value: ${fluentd:dollar}{ record.dig("publisher_id") }
+                  - name: Severity
+                    value: ${fluentd:dollar}{ {'TRACE'=>7,'DEBUG'=>7,'INFO'=>6,'AUDIT'=>6,'WARNING'=>4,'ERROR'=>3,'CRITICAL'=>2}[record['priority']].to_i }
+                  - name: Timestamp
+                    value: ${fluentd:dollar}{ DateTime.strptime(record.dig("payload", "eventTime"), "%Y-%m-%dT%H:%M:%S.%N%z").strftime("%Y-%m-%dT%H:%M:%S.%3NZ") }
+                  - name: notification_type
+                    value: ${fluentd:dollar}{ record.dig("event_type") }
+                  - name: severity_label
+                    value: ${fluentd:dollar}{ record.dig("priority") }
+                  - name: environment_label
+                    value: ${_param:cluster_domain}
+
+                  - name: action
+                    value: ${fluentd:dollar}{ record.dig("payload", "action") }
+                  - name: event_type
+                    value: ${fluentd:dollar}{ record.dig("payload", "eventType") }
+                  - name: outcome
+                    value: ${fluentd:dollar}{ record.dig("payload", "outcome") }
+              pack_payload_to_json:
+                tag: audit
+                require:
+                  - get_payload_values
+                type: record_transformer
+                enable_ruby: true
+                remove_keys: '["payload", "timestamp", "publisher_id", "priority"]'
+                record:
+                  - name: Payload
+                    value: ${fluentd:dollar}{ record["payload"].to_json }
+            match:
+              audit_output:
+                tag: audit
+                type: elasticsearch
+                host: ${_param:stacklight_log_address}
+                port: ${_param:stacklight_elasticsearch_port}
+                es_index_name: audit
+                tag_key: Type
diff --git a/fluentd/label/notifications/init.yml b/fluentd/label/notifications/init.yml
new file mode 100644
index 0000000..e4e57f8
--- /dev/null
+++ b/fluentd/label/notifications/init.yml
@@ -0,0 +1,4 @@
+classes:
+- system.fluentd.label.notifications.input_rabbitmq
+- system.fluentd.label.notifications.notifications
+- system.fluentd.label.notifications.audit
diff --git a/fluentd/label/notifications/input_rabbitmq.yml b/fluentd/label/notifications/input_rabbitmq.yml
new file mode 100644
index 0000000..7f97648
--- /dev/null
+++ b/fluentd/label/notifications/input_rabbitmq.yml
@@ -0,0 +1,105 @@
+parameters:
+  fluentd:
+    agent:
+      config:
+        label:
+          rabbitmq_notifications:
+            input:
+              tail_rabbitmq_info:
+                tag: raw_notifications
+                type: rabbitmq
+                host: ${_param:openstack_message_queue_address}
+                user: openstack
+                pass: ${_param:rabbitmq_openstack_password}
+                vhost: /openstack
+                queue: ${_param:stacklight_notification_topic}.info
+                routing_key: ${_param:stacklight_notification_topic}.info
+                parser:
+                  type: json
+              tail_rabbitmq_warn:
+                tag: raw_notifications
+                type: rabbitmq
+                host: ${_param:openstack_message_queue_address}
+                user: openstack
+                pass: ${_param:rabbitmq_openstack_password}
+                vhost: /openstack
+                queue: ${_param:stacklight_notification_topic}.warn
+                routing_key: ${_param:stacklight_notification_topic}.warn
+                parser:
+                  type: json
+              tail_rabbitmq_error:
+                tag: raw_notifications
+                type: rabbitmq
+                host: ${_param:openstack_message_queue_address}
+                user: openstack
+                pass: ${_param:rabbitmq_openstack_password}
+                vhost: /openstack
+                queue: ${_param:stacklight_notification_topic}.error
+                routing_key: ${_param:stacklight_notification_topic}.error
+                parser:
+                  type: json
+            filter:
+              parse_json:
+                tag: raw_notifications
+                type: parser
+                key_name: oslo.message
+                reserve_data: false
+                hash_value_field: parsed
+                parser:
+                  type: json
+              remove_context:
+                tag: raw_notifications
+                require:
+                  - parse_json
+                type: record_transformer
+                enable_ruby: true
+                remove_keys: _dummy_1
+                record:
+                  - name: _dummy_1
+                    value: ${fluentd:dollar}{record['parsed'].delete_if { |k,_| k.include?('_context_') }; nil}
+              pack_parsed_to_json:
+                tag: raw_notifications
+                require:
+                  - remove_context
+                type: record_transformer
+                enable_ruby: true
+                record:
+                  - name: parsed
+                    value: ${fluentd:dollar}{record["parsed"].to_json}
+              unpack_on_top_level:
+                tag: raw_notifications
+                require:
+                  - pack_parsed_to_json
+                type: parser
+                key_name: parsed
+                reserve_data: false
+                parser:
+                  type: json
+              detect_audit_notification:
+                tag: raw_notifications
+                require:
+                  - unpack_on_top_level
+                type: record_transformer
+                enable_ruby: true
+                record:
+                  - name: notification_type
+                    value: '${fluentd:dollar}{ record["payload"]["eventType"] && record["payload"]["eventTime"] ? "audit" : "notification" }'
+            match:
+              rewrite_message_tag:
+                tag: raw_notifications
+                type: rewrite_tag_filter
+                rule:
+                  - name: notification_type
+                    regexp: 'audit'
+                    result: audit
+                  - name: notification_type
+                    regexp: '/.+/'
+                    result: notification
+              forward_notification:
+                tag: notification
+                type: relabel
+                label: notification_messages
+              forward_audit:
+                tag: audit
+                type: relabel
+                label: audit_messages
diff --git a/fluentd/label/notifications/notifications.yml b/fluentd/label/notifications/notifications.yml
new file mode 100644
index 0000000..22be3c6
--- /dev/null
+++ b/fluentd/label/notifications/notifications.yml
@@ -0,0 +1,122 @@
+parameters:
+  fluentd:
+    agent:
+      config:
+        label:
+          notification_messages:
+            filter:
+              parse_publuisher_host:
+                tag: notification
+                type: parser
+                key_name: publisher_id
+                reserve_data: true
+                parser:
+                  type: regexp
+                  format: (?<publisher>\w+).(?<hostname>\w+)
+              save_hostname:
+                tag: notification
+                require:
+                  - parse_publuisher_host
+                type: record_transformer
+                enable_ruby: true
+                record:
+                  - name: Hostname
+                    value: ${fluentd:dollar}{ record["hostname"] }
+              parse_source:
+                tag: notification
+                require:
+                  - save_hostname
+                type: parser
+                key_name: event_type
+                reserve_data: true
+                parser:
+                  type: regexp
+                  format: (?<event_type_logger>\w+).+
+              map_logger:
+                tag: notification
+                require:
+                  - parse_source
+                type: record_transformer
+                enable_ruby: true
+                remove_keys: event_type_logger
+                record:
+                  - name: Logger
+                    value: ${fluentd:dollar}{ {'volume'=>'cinder', 'snapshot'=>'cinder', 'image'=>'glance', 'orchestration'=>'heat', 'identity'=>'keystone', 'compute'=>'nova', 'compute_task'=>'nova', 'scheduler'=>'nova', 'keypair'=>'nova', 'floatingip' =>'neutron', 'security_group' =>'neutron', 'security_group_rule' =>'neutron', 'network' =>'neutron', 'port' =>'neutron', 'router' =>'neutron', 'subnet' =>'neutron', 'sahara' =>'sahara'}[record["event_type_logger"]] }
+              get_payload_values:
+                tag: notification
+                require:
+                  - map_logger
+                type: record_transformer
+                enable_ruby: true
+                record:
+                  - name: Timestamp
+                    value: ${fluentd:dollar}{ DateTime.strptime(record['timestamp'], '%Y-%m-%d %H:%M:%S.%N').strftime('%Y-%m-%dT%H:%M:%S.%3NZ') }
+                  - name: severity_label
+                    value: ${fluentd:dollar}{ record["priority"] }
+                  - name: Severity
+                    value: ${fluentd:dollar}{ {'TRACE'=>7,'DEBUG'=>7,'INFO'=>6,'AUDIT'=>6,'WARNING'=>4,'ERROR'=>3,'CRITICAL'=>2}[record['priority']].to_i }
+                  - name: Hostname
+                    value: '${fluentd:dollar}{ record["payload"].has_key?("host") ? record["payload"]["host"] : record["Hostname"] }'
+                  - name: environment_label
+                    value: ${_param:cluster_domain}
+
+                  - name: tenant_id
+                    value: ${fluentd:dollar}{ record.dig("payload", "tenant_id") }
+                  - name: user_id
+                    value: ${fluentd:dollar}{ record.dig("payload", "user_id") }
+                  - name: display_name
+                    value: ${fluentd:dollar}{ record.dig("payload", "display_name") }
+                  - name: vcpus
+                    value: ${fluentd:dollar}{ record.dig("payload", "vcpus") }
+                  - name: availability_zone
+                    value: ${fluentd:dollar}{ record.dig("payload", "availability_zone") }
+                  - name: instance_id
+                    value: ${fluentd:dollar}{ record.dig("payload", "instance_id") }
+                  - name: instance_type
+                    value: ${fluentd:dollar}{ record.dig("payload", "instance_type") }
+                  - name: image_name
+                    value: ${fluentd:dollar}{ record.dig("payload", "image_name") }
+                  - name: memory_mb
+                    value: ${fluentd:dollar}{ record.dig("payload", "memory_mb") }
+                  - name: disk_gb
+                    value: ${fluentd:dollar}{ record.dig("payload", "disk_gb") }
+                  - name: state
+                    value: ${fluentd:dollar}{ record.dig("payload", "state") }
+                  - name: old_state
+                    value: ${fluentd:dollar}{ record.dig("payload", "old_state") }
+                  - name: old_task_state
+                    value: ${fluentd:dollar}{ record.dig("payload", "old_task_state") }
+                  - name: new_task_state
+                    value: ${fluentd:dollar}{ record.dig("payload", "new_task_state") }
+                  - name: network_id
+                    value: ${fluentd:dollar}{ record.dig("payload", "network_id") }
+                  - name: subnet_id
+                    value: ${fluentd:dollar}{ record.dig("payload", "subnet_id") }
+                  - name: port_id
+                    value: ${fluentd:dollar}{ record.dig("payload", "port_id") }
+                  - name: volume_id
+                    value: ${fluentd:dollar}{ record.dig("payload", "volume_id") }
+                  - name: size
+                    value: ${fluentd:dollar}{ record.dig("payload", "size") }
+                  - name: status
+                    value: ${fluentd:dollar}{ record.dig("payload", "status") }
+                  - name: replication_status
+                    value: ${fluentd:dollar}{ record.dig("payload", "replication_status") }
+              pack_payload_to_json:
+                tag: notification
+                require:
+                  - get_payload_values
+                type: record_transformer
+                enable_ruby: true
+                remove_keys: '["timestamp", "publisher_id", "priority", "notification_type", "payload"]'
+                record:
+                  - name: Payload
+                    value: ${fluentd:dollar}{ record["payload"].to_json }
+            match:
+              notifications_output:
+                tag: notification
+                type: elasticsearch
+                host: ${_param:stacklight_log_address}
+                port: ${_param:stacklight_elasticsearch_port}
+                es_index_name: notification
+                tag_key: Type
diff --git a/glance/client/image/octavia.yml b/glance/client/image/octavia.yml
index 3160cdd..2a00375 100644
--- a/glance/client/image/octavia.yml
+++ b/glance/client/image/octavia.yml
@@ -3,6 +3,7 @@
 parameters:
   glance:
     client:
+      cloud_name: admin_identity
       identity:
         admin_identity:
           endpoint_type: internalURL
diff --git a/glance/control/cluster.yml b/glance/control/cluster.yml
index a75f8c5..3eb7866 100644
--- a/glance/control/cluster.yml
+++ b/glance/control/cluster.yml
@@ -82,4 +82,3 @@
       storage:
         engine: file
       images: []
-      show_multiple_locations: True
diff --git a/glance/control/single.yml b/glance/control/single.yml
index ee2ae1a..24e9c3f 100644
--- a/glance/control/single.yml
+++ b/glance/control/single.yml
@@ -31,7 +31,6 @@
         protocol: ${_param:internal_protocol}
       registry:
         protocol: ${_param:internal_protocol}
-      show_multiple_locations: True
       barbican:
         enabled: ${_param:barbican_integration_enabled}
       message_queue:
diff --git a/haproxy/proxy/listen/openstack/large_setup.yml b/haproxy/proxy/listen/openstack/large_setup.yml
index 947cfce..c517779 100644
--- a/haproxy/proxy/listen/openstack/large_setup.yml
+++ b/haproxy/proxy/listen/openstack/large_setup.yml
@@ -8,4 +8,4 @@
 - system.haproxy.proxy.listen.openstack.keystone.large
 - system.haproxy.proxy.listen.openstack.neutron_large
 - system.haproxy.proxy.listen.openstack.nova_large
-- system.haproxy.proxy.listen.openstack.novanc_large
+- system.haproxy.proxy.listen.openstack.novnc_large
diff --git a/haproxy/proxy/listen/stacklight/elasticsearch.yml b/haproxy/proxy/listen/stacklight/elasticsearch.yml
index 582de6a..d684861 100644
--- a/haproxy/proxy/listen/stacklight/elasticsearch.yml
+++ b/haproxy/proxy/listen/stacklight/elasticsearch.yml
@@ -1,10 +1,6 @@
 parameters:
   _param:
     haproxy_elasticsearch_bind_host: ${_param:cluster_vip_address}
-    haproxy_elasticsearch_http_bind_port: 9200
-    haproxy_elasticsearch_http_exposed_port: 9200
-    haproxy_elasticsearch_binary_bind_port: 9300
-    haproxy_elasticsearch_binary_exposed_port: 9300
   haproxy:
     proxy:
       listen:
@@ -17,7 +13,7 @@
             - dontlog-normal
           balance: roundrobin
           binds:
-            - address: ${_param:haproxy_elasticsearch_bind_host}
+            - address: ${_param:cluster_vip_address}
               port: ${_param:haproxy_elasticsearch_http_bind_port}
           servers:
             - name: ${_param:cluster_node01_hostname}
diff --git a/haproxy/proxy/listen/stacklight/elasticsearch_ssl.yml b/haproxy/proxy/listen/stacklight/elasticsearch_ssl.yml
new file mode 100644
index 0000000..a50280e
--- /dev/null
+++ b/haproxy/proxy/listen/stacklight/elasticsearch_ssl.yml
@@ -0,0 +1,55 @@
+parameters:
+  _param:
+    haproxy_elasticsearch_bind_host: ${_param:cluster_vip_address}
+  haproxy:
+    proxy:
+      listen:
+        elasticsearch:
+          mode: http
+          options:
+            - httplog
+            - http-keep-alive
+            - prefer-last-server
+            - dontlog-normal
+          balance: roundrobin
+          binds:
+            - address: ${_param:cluster_vip_address}
+              port: ${_param:haproxy_elasticsearch_http_bind_port}
+              ssl:
+                enabled: true
+                pem_file: /etc/elasticsearch/elasticsearch.pem
+          servers:
+            - name: ${_param:cluster_node01_hostname}
+              host: ${_param:cluster_node01_address}
+              port: ${_param:haproxy_elasticsearch_http_exposed_port}
+              params: 'check inter 10s fastinter 2s downinter 3s rise 3 fall 3'
+            - name: ${_param:cluster_node02_hostname}
+              host: ${_param:cluster_node02_address}
+              port: ${_param:haproxy_elasticsearch_http_exposed_port}
+              params: 'check inter 10s fastinter 2s downinter 3s rise 3 fall 3'
+            - name: ${_param:cluster_node03_hostname}
+              host: ${_param:cluster_node03_address}
+              port: ${_param:haproxy_elasticsearch_http_exposed_port}
+              params: 'check inter 10s fastinter 2s downinter 3s rise 3 fall 3'
+        elasticsearch_binary:
+          mode: tcp
+          options:
+            - tcpka
+            - tcplog
+          balance: source
+          binds:
+            - address: ${_param:cluster_vip_address}
+              port: ${_param:haproxy_elasticsearch_binary_bind_port}
+          servers:
+            - name: ${_param:cluster_node01_hostname}
+              host: ${_param:cluster_node01_address}
+              port: ${_param:haproxy_elasticsearch_binary_exposed_port}
+              params: 'check'
+            - name: ${_param:cluster_node02_hostname}
+              host: ${_param:cluster_node02_address}
+              port: ${_param:haproxy_elasticsearch_binary_exposed_port}
+              params: 'check'
+            - name: ${_param:cluster_node03_hostname}
+              host: ${_param:cluster_node03_address}
+              port: ${_param:haproxy_elasticsearch_binary_exposed_port}
+              params: 'check'
diff --git a/kibana/client/ssl.yml b/kibana/client/ssl.yml
new file mode 100644
index 0000000..76160c6
--- /dev/null
+++ b/kibana/client/ssl.yml
@@ -0,0 +1,5 @@
+parameters:
+  kibana:
+    client:
+      server:
+        scheme: https
diff --git a/kibana/server/single.yml b/kibana/server/single.yml
index 965f274..5c59588 100644
--- a/kibana/server/single.yml
+++ b/kibana/server/single.yml
@@ -13,4 +13,5 @@
         engine: elasticsearch
         host: ${_param:kibana_elasticsearch_host}
         port: 9200
+        scheme: http
 
diff --git a/kibana/server/ssl.yml b/kibana/server/ssl.yml
new file mode 100644
index 0000000..5b049f8
--- /dev/null
+++ b/kibana/server/ssl.yml
@@ -0,0 +1,5 @@
+parameters:
+  kibana:
+    server:
+      database:
+        scheme: https
diff --git a/prometheus/elasticsearch_exporter/queries/compute.yml b/prometheus/elasticsearch_exporter/queries/compute.yml
index 66904da..a17cb7f 100644
--- a/prometheus/elasticsearch_exporter/queries/compute.yml
+++ b/prometheus/elasticsearch_exporter/queries/compute.yml
@@ -27,7 +27,8 @@
                   "aggs": {
                     "host": {
                       "terms": {
-                        "field": "Hostname.keyword"
+                        "field": "Hostname.keyword",
+                        "min_doc_count": 0
                       }
                     }
                   }
@@ -56,7 +57,8 @@
               "aggs": {
                 "host": {
                   "terms": {
-                    "field": "Hostname.keyword"
+                    "field": "Hostname.keyword",
+                    "min_doc_count": 0
                   }
                 }
               }
@@ -83,7 +85,8 @@
               "aggs": {
                 "host": {
                   "terms": {
-                    "field": "Hostname.keyword"
+                    "field": "Hostname.keyword",
+                    "min_doc_count": 0
                   }
                 }
               }
@@ -110,7 +113,8 @@
               "aggs": {
                 "host": {
                   "terms": {
-                    "field": "Hostname.keyword"
+                    "field": "Hostname.keyword",
+                    "min_doc_count": 0
                   }
                 }
               }
diff --git a/prometheus/gainsight/query/openstack.yml b/prometheus/gainsight/query/openstack.yml
index 40a804b..b443712 100644
--- a/prometheus/gainsight/query/openstack.yml
+++ b/prometheus/gainsight/query/openstack.yml
@@ -16,3 +16,10 @@
         keystone_api: "'Keystone API','avg(avg_over_time(openstack_api_check_status{name=\"keystone\"}[24h]))'"
         glance_api: "'Glance API','avg(avg_over_time(openstack_api_check_status{name=\"glance\"}[24h]))'"
         neutron_api: "'Neutron API','avg(avg_over_time(openstack_api_check_status{name=\"neutron\"}[24h]))'"
+        nova_vm_all: "'Total VM number','avg_over_time(total:openstack_nova_instance_all[1d])'"
+        nova_vm_failed: "'Failed VM number','avg_over_time(total:openstack_nova_instance_failed[1d])'"
+        kpi_downtime: "'KPI Downtime','1 - avg_over_time(total:openstack_nova_instance_failed[1d]) / avg_over_time(total:openstack_nova_instance_all[1d])'"
+        compute_instance_create_start: "'VM creation start','sum(compute_instance_create_start_host_doc_count)'"
+        compute_instance_create_end: "'VM creation end','sum(compute_instance_create_end_host_doc_count)'"
+        compute_instance_create_error: "'VM creation error','sum(compute_instance_create_error_host_doc_count)'"
+
diff --git a/salt/minion/cert/elasticsearch.yml b/salt/minion/cert/elasticsearch.yml
new file mode 100644
index 0000000..0ac232d
--- /dev/null
+++ b/salt/minion/cert/elasticsearch.yml
@@ -0,0 +1,16 @@
+parameters:
+  salt:
+    minion:
+      cert:
+        elasticsearch:
+          host: ${_param:salt_minion_ca_host}
+          authority: ${_param:salt_minion_ca_authority}
+          common_name: elasticsearch
+          signing_policy: cert_server
+          alternative_names: IP:127.0.0.1,IP:${_param:single_address},IP:${_param:stacklight_log_address},DNS:${linux:system:name},DNS:${linux:network:fqdn}
+          key_file: /etc/elasticsearch/elasticsearch.key
+          cert_file: /etc/elasticsearch/elasticsearch.crt
+          ca_file: /etc/ssl/certs/ca-${_param:salt_minion_ca_authority}.pem
+          all_file: /etc/elasticsearch/elasticsearch.pem
+          mode: '0444'
+          enabled: true
diff --git a/salt/minion/cert/fluentd_prometheus.yml b/salt/minion/cert/fluentd_prometheus.yml
new file mode 100644
index 0000000..d7f4469
--- /dev/null
+++ b/salt/minion/cert/fluentd_prometheus.yml
@@ -0,0 +1,14 @@
+parameters:
+  salt:
+    minion:
+      cert:
+        fluentd_prometheus:
+          host: ${_param:salt_minion_ca_host}
+          authority: ${_param:salt_minion_ca_authority}
+          common_name: fluentd_prometheus
+          signing_policy: cert_server
+          alternative_names: IP:127.0.0.1,IP:${_param:single_address},DNS:${linux:system:name},DNS:${linux:network:fqdn}
+          key_file: ${fluentd:agent:dir:config}/fluentd-prometheus.key
+          cert_file: ${fluentd:agent:dir:config}/fluentd-prometheus.crt
+          mode: '0444'
+          enabled: true
diff --git a/salt/minion/cert/telegraf_agent.yml b/salt/minion/cert/telegraf_agent.yml
new file mode 100644
index 0000000..d54520c
--- /dev/null
+++ b/salt/minion/cert/telegraf_agent.yml
@@ -0,0 +1,14 @@
+parameters:
+  salt:
+    minion:
+      cert:
+        telegraf_agent:
+          host: ${_param:salt_minion_ca_host}
+          authority: ${_param:salt_minion_ca_authority}
+          common_name: telegraf_agent
+          signing_policy: cert_server
+          alternative_names: IP:127.0.0.1,IP:${_param:single_address},DNS:${linux:system:name},DNS:${linux:network:fqdn}
+          key_file: ${telegraf:agent:dir:config}/telegraf-agent.key
+          cert_file: ${telegraf:agent:dir:config}/telegraf-agent.crt
+          mode: '0444'
+          enabled: true
diff --git a/telegraf/agent/output/prometheus_client_ssl.yml b/telegraf/agent/output/prometheus_client_ssl.yml
new file mode 100644
index 0000000..f59335f
--- /dev/null
+++ b/telegraf/agent/output/prometheus_client_ssl.yml
@@ -0,0 +1,10 @@
+parameters:
+  telegraf:
+    agent:
+      output:
+        prometheus_client:
+          scheme: https
+          tls_cert: ${telegraf:agent:dir:config}/telegraf-agent.crt
+          tls_key: ${telegraf:agent:dir:config}/telegraf-agent.key
+          tls_config:
+            ca_file: /etc/ssl/certs/ca-certificates.crt