Add Elasticsearch 6 support
Remove predefined version from formula
Start using states instead of modules in client.sls
Update test repos to mirror.mirantis.com
Update tests to use ES6
Disable all x-pack features
Change-Id: I7f5a65b0777f2ebce9bd3aae79888d2b00d428b6
Prod-Related: PROD-25122
diff --git a/elasticsearch/client/init.sls b/elasticsearch/client/init.sls
index 18f4ffc..065bde1 100644
--- a/elasticsearch/client/init.sls
+++ b/elasticsearch/client/init.sls
@@ -27,7 +27,7 @@
elasticsearch_index_{{ index_name }}:
{%- if index.get('enabled', False) %}
- {% set operation = 'create' %}
+ {% set operation = 'present' %}
{%- if index.definition is defined %}
{% set definition = index.definition %}
@@ -67,14 +67,8 @@
{%- if index.get('force_operation', False) %}
elasticsearch_index_{{ index_name }}_{{ operation }}:
{% set curdate = None | strftime('%Y.%m.%d') %}
- module.run:
- - name: elasticsearch.index_{{ operation }}
- - index: {{ index_name }}-{{ curdate }}
-{%- else %}
-elasticsearch_index_{{ index_name }}_{{ operation }}:
- module.run:
- - name: elasticsearch.index_{{ operation }}
- - index: {{ index_name }}
+ elasticsearch.index_{{ operation }}:
+ - name: {{ index_name }}-{{ curdate }}
{%- endif %}
{%- endfor %}
diff --git a/elasticsearch/files/v6/elasticsearch b/elasticsearch/files/v6/elasticsearch
new file mode 100644
index 0000000..1836246
--- /dev/null
+++ b/elasticsearch/files/v6/elasticsearch
@@ -0,0 +1,76 @@
+{%- from "elasticsearch/map.jinja" import server with context -%}
+################################
+# Elasticsearch
+################################
+
+# Elasticsearch home directory
+#ES_HOME=/usr/share/elasticsearch
+
+# Elasticsearch configuration directory
+#CONF_DIR=/etc/elasticsearch
+
+# Elasticsearch data directory
+#DATA_DIR=/var/lib/elasticsearch
+
+# Elasticsearch logs directory
+#LOG_DIR=/var/log/elasticsearch
+
+# Elasticsearch PID directory
+#PID_DIR=/var/run/elasticsearch
+
+# Heap size defaults to 256m min, 1g max
+# Set ES_HEAP_SIZE to 50% of available RAM, but no more than 31g
+#ES_HEAP_SIZE={{ server.get('heap', {}).get('size', grains.get('mem_total', 1024)/2/1024)|round(0, 'ceil')|int }}g
+
+# Heap new generation
+#ES_HEAP_NEWSIZE=
+
+# Maximum direct memory
+#ES_DIRECT_SIZE=
+
+# Additional Java OPTS
+#ES_JAVA_OPTS=
+
+# Configure restart on package upgrade (true, every other setting will lead to not restarting)
+#ES_RESTART_ON_UPGRADE=true
+
+# Path to the GC log file
+#ES_GC_LOG_FILE=/var/log/elasticsearch/gc.log
+
+################################
+# Elasticsearch service
+################################
+
+# SysV init.d
+#
+# When executing the init script, this user will be used to run the elasticsearch service.
+# The default value is 'elasticsearch' and is declared in the init.d file.
+# Note that this setting is only used by the init script. If changed, make sure that
+# the configured user can read and write into the data, work, plugins and log directories.
+# For systemd service, the user is usually configured in file /usr/lib/systemd/system/elasticsearch.service
+#ES_USER=elasticsearch
+#ES_GROUP=elasticsearch
+
+# The number of seconds to wait before checking if Elasticsearch started successfully as a daemon process
+ES_STARTUP_SLEEP_TIME=5
+
+################################
+# System properties
+################################
+
+# Specifies the maximum file descriptor number that can be opened by this process
+# When using Systemd, this setting is ignored and the LimitNOFILE defined in
+# /usr/lib/systemd/system/elasticsearch.service takes precedence
+MAX_OPEN_FILES=131070
+
+# The maximum number of bytes of memory that may be locked into RAM
+# Set to "unlimited" if you use the 'bootstrap.mlockall: true' option
+# in elasticsearch.yml (ES_HEAP_SIZE must also be set).
+# When using Systemd, the LimitMEMLOCK property must be set
+# in /usr/lib/systemd/system/elasticsearch.service
+MAX_LOCKED_MEMORY=unlimited
+
+# Maximum number of VMA (Virtual Memory Areas) a process can own
+# When using Systemd, this setting is ignored and the 'vm.max_map_count'
+# property is set at boot time in /usr/lib/sysctl.d/elasticsearch.conf
+#MAX_MAP_COUNT=262144
diff --git a/elasticsearch/files/v6/elasticsearch.yml b/elasticsearch/files/v6/elasticsearch.yml
new file mode 100644
index 0000000..7f70b42
--- /dev/null
+++ b/elasticsearch/files/v6/elasticsearch.yml
@@ -0,0 +1,472 @@
+{%- from "elasticsearch/map.jinja" import server with context %}
+
+##################### Elasticsearch Configuration Example #####################
+
+# This file contains an overview of various configuration settings,
+# targeted at operations staff. Application developers should
+# consult the guide at <http://elasticsearch.org/guide>.
+#
+# The installation procedure is covered at
+# <http://elasticsearch.org/guide/en/elasticsearch/reference/current/setup.html>.
+#
+# Elasticsearch comes with reasonable defaults for most settings,
+# so you can try it out without bothering with configuration.
+#
+# Most of the time, these defaults are just fine for running a production
+# cluster. If you're fine-tuning your cluster, or wondering about the
+# effect of certain configuration option, please _do ask_ on the
+# mailing list or IRC channel [http://elasticsearch.org/community].
+
+# Any element in the configuration can be replaced with environment variables
+# by placing them in ${...} notation. For example:
+#
+# node.rack: ${RACK_ENV_VAR}
+
+# For information on supported formats and syntax for the config file, see
+# <http://elasticsearch.org/guide/en/elasticsearch/reference/current/setup-configuration.html>
+
+
+################################### Cluster ###################################
+
+# Cluster name identifies your cluster for auto-discovery. If you're running
+# multiple clusters on the same network, make sure you're using unique names.
+#
+# cluster.name: elasticsearch
+{%- if server.get('cluster', {}).name is defined %}
+cluster.name: {{ server.cluster.name }}
+{% endif %}
+#################################### Node #####################################
+
+# Node names are generated dynamically on startup, so you're relieved
+# from configuring them manually. You can tie this node to a specific name:
+#
+# node.name: "Franz Kafka"
+node.name: {{ server.get('name', '${HOSTNAME}') }}
+
+# Every node can be configured to allow or deny being eligible as the master,
+# and to allow or deny to store the data.
+#
+# Allow this node to be eligible as a master node (enabled by default):
+#
+node.master: {{ server.get('master', True)|lower }}
+#
+# Allow this node to store data (enabled by default):
+#
+node.data: {{ server.get('data', True)|lower }}
+node.ingest: {{ server.get('ingest', True)|lower }}
+
+{%- if server.attrs is defined %}
+ {%- for name, values in server.attrs.iteritems() %}
+ {%- if values.get('enabled', True) %}
+node.attr.{{ name }}: {{ values.get('value', True) | lower }}
+ {%- endif %}
+ {%- endfor %}
+{%- endif %}
+# You can exploit these settings to design advanced cluster topologies.
+#
+# 1. You want this node to never become a master node, only to hold data.
+# This will be the "workhorse" of your cluster.
+#
+# node.master: false
+# node.data: true
+#
+# 2. You want this node to only serve as a master: to not store any data and
+# to have free resources. This will be the "coordinator" of your cluster.
+#
+# node.master: true
+# node.data: false
+#
+# 3. You want this node to be neither master nor data node, but
+# to act as a "search load balancer" (fetching data from nodes,
+# aggregating results, etc.)
+#
+# node.master: false
+# node.data: false
+
+# Use the Cluster Health API [http://localhost:9200/_cluster/health], the
+# Node Info API [http://localhost:9200/_nodes] or GUI tools
+# such as <http://www.elasticsearch.org/overview/marvel/>,
+# <http://github.com/karmi/elasticsearch-paramedic>,
+# <http://github.com/lukas-vlcek/bigdesk> and
+# <http://mobz.github.com/elasticsearch-head> to inspect the cluster state.
+
+# A node can have generic attributes associated with it, which can later be used
+# for customized shard allocation filtering, or allocation awareness. An attribute
+# is a simple key value pair, similar to node.key: value, here is an example:
+#
+# node.rack: rack314
+{%- if server.rack is defined %}
+node.rack: {{ server.rack }}
+{%- endif %}
+
+# By default, multiple nodes are allowed to start from the same installation location
+# to disable it, set the following:
+# node.max_local_storage_nodes: 1
+
+{%- if server.get('threadpool', {}).get('bulk', {}).queue_size is defined %}
+# For bulk operations. Thread pool type is fixed with a size of # of available processors.
+thread_pool.bulk.queue_size: {{ server.threadpool.bulk.queue_size }}
+{%- endif %}
+
+############################### Remote Search #################################
+
+{%- if server.get("search", {}).get("remote", {}).connect is defined %}
+search.remote.connect: {{ server.search.connect | lower }}
+{%- endif %}
+
+{%- if server.get("search", {}).get("remote", {}).attr is defined %}
+search.remote.node.attr: {{ server.search.remote.attr }}
+{%- endif %}
+
+{%- if server.get("search", {}).get("remote", {}).clusters is defined %}
+ {%- for name, values in server.search.remote.clusters.iteritems() %}
+ {%- if values.get("enabled", True) %}
+search.remote.{{ name }}.seeds: {{ values.seeds | json }}
+ {%- endif %}
+ {%- endfor %}
+{%- endif %}
+
+#################################### Index ####################################
+
+# You can set a number of options (such as shard/replica options, mapping
+# or analyzer definitions, translog settings, ...) for indices globally,
+# in this file.
+#
+# Note, that it makes more sense to configure index settings specifically for
+# a certain index, either when creating it or by using the index templates API.
+#
+# See <http://elasticsearch.org/guide/en/elasticsearch/reference/current/index-modules.html> and
+# <http://elasticsearch.org/guide/en/elasticsearch/reference/current/indices-create-index.html>
+# for more information.
+
+# Set the number of shards (splits) of an index (5 by default):
+#
+#index.number_of_shards: {{ server.get('index', {}).get('shards', 5) }}
+
+# Set the number of replicas (additional copies) of an index (1 by default):
+#
+#index.number_of_replicas: {{ server.get('index', {}).get('replicas', 1) }}
+
+# Note, that for development on a local machine, with small indices, it usually
+# makes sense to "disable" the distributed features:
+#
+# index.number_of_shards: 1
+# index.number_of_replicas: 0
+
+# These settings directly affect the performance of index and search operations
+# in your cluster. Assuming you have enough machines to hold shards and
+# replicas, the rule of thumb is:
+#
+# 1. Having more *shards* enhances the _indexing_ performance and allows to
+# _distribute_ a big index across machines.
+# 2. Having more *replicas* enhances the _search_ performance and improves the
+# cluster _availability_.
+#
+# The "number_of_shards" is a one-time setting for an index.
+#
+# The "number_of_replicas" can be increased or decreased anytime,
+# by using the Index Update Settings API.
+#
+# Elasticsearch takes care about load balancing, relocating, gathering the
+# results from nodes, etc. Experiment with different settings to fine-tune
+# your setup.
+
+# Use the Index Status API (<http://localhost:9200/A/_status>) to inspect
+# the index status.
+
+
+#################################### Paths ####################################
+
+# Path to directory containing configuration (this file and log4j2.properties):
+#
+# path.conf: /path/to/conf
+
+# Path to directory where to store index data allocated for this node.
+#
+# path.data: /path/to/data
+path.data: {{ server.get('path', {}).get('data', '/var/lib/elasticsearch') }}
+#
+# Can optionally include more than one location, causing data to be striped across
+# the locations (a la RAID 0) on a file level, favouring locations with most free
+# space on creation. For example:
+#
+# path.data: /path/to/data1,/path/to/data2
+
+# Path to temporary files:
+#
+# path.work: /path/to/work
+
+# Path to log files:
+#
+# path.logs: /path/to/logs
+path.logs: {{ server.get('path', {}).get('logs', '/var/log/elasticsearch') }}
+
+# Path to where plugins are installed:
+#
+# path.plugins: /path/to/plugins
+#
+{%- if server.snapshot is defined %}
+path.repo:
+ {%- for repo_name, repo in server.snapshot.iteritems() %}
+ - {{ repo.path }}
+ {%- endfor %}
+{%- endif %}
+
+
+#################################### Plugin ###################################
+
+# If a plugin listed here is not installed for current node, the node will not start.
+#
+# plugin.mandatory: mapper-attachments,lang-groovy
+
+
+################################### Memory ####################################
+
+# Elasticsearch performs poorly when JVM starts swapping: you should ensure that
+# it _never_ swaps.
+#
+# Set this property to true to lock the memory:
+#
+# bootstrap.mlockall: true
+{%- if server.mlockall is defined %}
+bootstrap.memory_lock: {{ server.mlockall|lower }}
+{%- endif %}
+
+# Make sure that the ES_MIN_MEM and ES_MAX_MEM environment variables are set
+# to the same value, and that the machine has enough memory to allocate
+# for Elasticsearch, leaving enough memory for the operating system itself.
+#
+# You should also make sure that the Elasticsearch process is allowed to lock
+# the memory, eg. by using `ulimit -l unlimited`.
+
+
+############################## Network And HTTP ###############################
+
+# Elasticsearch, by default, binds itself to the 0.0.0.0 address, and listens
+# on port [9200-9300] for HTTP traffic and on port [9300-9400] for node-to-node
+# communication. (the range means that if the port is busy, it will automatically
+# try the next port).
+
+# Set both 'bind_host' and 'publish_host':
+#
+# network.host: 192.168.0.1
+
+{%- if server.get('bind', {}).address is defined %}
+network.host: {{ server.bind.address }}
+{%- endif %}
+
+# Set specifically the address other nodes will use to communicate with this
+# node. If not set, it is automatically derived. It must point to an actual
+# IP address.
+{%- if server.publish_host is defined %}
+network.publish_host: {{ server.publish_host }}
+{%- endif %}
+
+
+# Set a custom port for the node to node communication (9300 by default):
+#
+# transport.tcp.port: 9300
+{%- if server.get("transport", {}).get("tcp", {}).port is defined %}
+transport.tcp.port: {{ server.transport.tcp.port }}
+{%- endif %}
+
+# Enable compression for all communication between nodes (disabled by default):
+#
+# transport.tcp.compress: true
+{%- if server.get("transport", {}).get("tcp", {}).compress is defined %}
+transport.tcp.compress: {{ server.transport.tcp.compress | lower }}
+{%- endif %}
+
+# Set a custom port to listen for HTTP traffic:
+#
+# http.port: 9200
+{%- if server.get('bind', {}).port is defined %}
+http.port: {{ server.bind.port }}
+{%- endif %}
+
+# Set a custom allowed content length:
+#
+# http.max_content_length: 100mb
+
+# Enable or disable cross-origin resource sharing
+{%- if server.get('cors', {}).enabled is defined %}
+http.cors.enabled: {{ server.cors.enabled|lower }}
+{%- endif %}
+
+# Which origins to allow.
+{%- if server.get('cors', {}).allow_origin is defined %}
+http.cors.allow-origin: {{ server.cors.allow_origin }}
+{%- endif %}
+
+# Disable HTTP completely:
+#
+# http.enabled: false
+http.enabled: true
+
+
+################################### Gateway ###################################
+
+# The gateway allows for persisting the cluster state between full cluster
+# restarts. Every change to the state (such as adding an index) will be stored
+# in the gateway, and when the cluster starts up for the first time,
+# it will read its state from the gateway.
+
+# There are several types of gateway implementations. For more information, see
+# <http://elasticsearch.org/guide/en/elasticsearch/reference/current/modules-gateway.html>.
+
+# The default gateway type is the "local" gateway (recommended):
+#
+# gateway.type: local
+
+# Settings below control how and when to start the initial recovery process on
+# a full cluster restart (to reuse as much local data as possible when using shared
+# gateway).
+
+# Allow recovery process after N nodes in a cluster are up:
+#
+# gateway.recover_after_nodes: 1
+{%- if server.get('gateway', {}).recover_after_nodes is defined %}
+gateway.recover_after_nodes: {{ server.gateway.recover_after_nodes }}
+{%- endif %}
+
+# Set the timeout to initiate the recovery process, once the N nodes
+# from previous setting are up (accepts time value):
+#
+# gateway.recover_after_time: 5m
+{%- if server.get('gateway', {}).recover_after_time is defined %}
+gateway.recover_after_time: {{ server.gateway.recover_after_time }}
+{%- endif %}
+
+# Set how many nodes are expected in this cluster. Once these N nodes
+# are up (and recover_after_nodes is met), begin recovery process immediately
+# (without waiting for recover_after_time to expire):
+#
+# gateway.expected_nodes: 2
+{%- if server.get('gateway', {}).expected_nodes is defined %}
+gateway.expected_nodes: {{ server.gateway.expected_nodes }}
+{%- endif %}
+
+
+############################# Recovery Throttling #############################
+
+# These settings allow to control the process of shards allocation between
+# nodes during initial recovery, replica allocation, rebalancing,
+# or when adding and removing nodes.
+
+# Set the number of concurrent recoveries happening on a node:
+#
+# 1. During the initial recovery
+#
+# cluster.routing.allocation.node_initial_primaries_recoveries: 4
+#
+# 2. During adding/removing nodes, rebalancing, etc
+#
+# cluster.routing.allocation.node_concurrent_recoveries: 2
+
+# Set to throttle throughput when recovering (eg. 100mb, by default 20mb):
+#
+# indices.recovery.max_bytes_per_sec: 20mb
+
+# Set to limit the number of open concurrent streams when
+# recovering a shard from a peer:
+#
+# indices.recovery.concurrent_streams: 5
+
+
+################################## Discovery ##################################
+
+# Discovery infrastructure ensures nodes can be found within a cluster
+# and master node is elected. Multicast discovery is the default.
+
+# Set to ensure a node sees N other master eligible nodes to be considered
+# operational within the cluster. Its recommended to set it to a higher value
+# than 1 when running more than 2 nodes in the cluster.
+#
+discovery.zen.minimum_master_nodes: {{ server.get('cluster', {}).minimum_master_nodes|default(1) }}
+
+# Set the time to wait for ping responses from other nodes when discovering.
+# Set this option to a higher value on a slow or congested network
+# to minimize discovery failures:
+#
+# discovery.zen.ping.timeout: 3s
+
+# For more information, see
+# <http://elasticsearch.org/guide/en/elasticsearch/reference/current/modules-discovery-zen.html>
+
+# Unicast discovery allows to explicitly control which nodes will be used
+# to discover the cluster. It can be used when multicast is not present,
+# or to restrict the cluster communication-wise.
+#
+# 1. Disable multicast discovery (enabled by default):
+#
+#discovery.zen.ping.multicast.enabled: {{ server.get('cluster', {}).get('multicast', True)|lower }}
+#
+# 2. Configure an initial list of master nodes in the cluster
+# to perform discovery when new nodes (master or data) are started:
+#
+{%- if server.get('cluster', {}).members is defined %}
+discovery.zen.ping.unicast.hosts: [{% for member in server.cluster.members %}"{{ member.host }}:{{ member.get('port', 9300) }}"{% if not loop.last %}, {% endif %}{% endfor %}]
+{%- endif %}
+
+# EC2 discovery allows to use AWS EC2 API in order to perform discovery.
+#
+# You have to install the cloud-aws plugin for enabling the EC2 discovery.
+#
+# For more information, see
+# <http://elasticsearch.org/guide/en/elasticsearch/reference/current/modules-discovery-ec2.html>
+#
+# See <http://elasticsearch.org/tutorials/elasticsearch-on-ec2/>
+# for a step-by-step tutorial.
+
+# GCE discovery allows to use Google Compute Engine API in order to perform discovery.
+#
+# You have to install the cloud-gce plugin for enabling the GCE discovery.
+#
+# For more information, see <https://github.com/elasticsearch/elasticsearch-cloud-gce>.
+
+# Azure discovery allows to use Azure API in order to perform discovery.
+#
+# You have to install the cloud-azure plugin for enabling the Azure discovery.
+#
+# For more information, see <https://github.com/elasticsearch/elasticsearch-cloud-azure>.
+
+################################## Slow Log ##################################
+
+# Shard level query and fetch threshold logging.
+
+#index.search.slowlog.threshold.query.warn: 10s
+#index.search.slowlog.threshold.query.info: 5s
+#index.search.slowlog.threshold.query.debug: 2s
+#index.search.slowlog.threshold.query.trace: 500ms
+
+#index.search.slowlog.threshold.fetch.warn: 1s
+#index.search.slowlog.threshold.fetch.info: 800ms
+#index.search.slowlog.threshold.fetch.debug: 500ms
+#index.search.slowlog.threshold.fetch.trace: 200ms
+
+#index.indexing.slowlog.threshold.index.warn: 10s
+#index.indexing.slowlog.threshold.index.info: 5s
+#index.indexing.slowlog.threshold.index.debug: 2s
+#index.indexing.slowlog.threshold.index.trace: 500ms
+
+################################## GC Logging ################################
+
+#monitor.jvm.gc.young.warn: 1000ms
+#monitor.jvm.gc.young.info: 700ms
+#monitor.jvm.gc.young.debug: 400ms
+
+#monitor.jvm.gc.old.warn: 10s
+#monitor.jvm.gc.old.info: 5s
+#monitor.jvm.gc.old.debug: 2s
+
+{%- if server.script is mapping %}
+script: {{ server.script|yaml }}
+{%- endif %}
+
+################################### X-Pack ###################################
+
+xpack.ml.enabled: false
+xpack.monitoring.enabled: false
+xpack.security.enabled: false
+xpack.security.audit.enabled: false
+xpack.watcher.enabled: false
diff --git a/elasticsearch/files/v6/es_template_audit.json b/elasticsearch/files/v6/es_template_audit.json
new file mode 100644
index 0000000..3d6b0f3
--- /dev/null
+++ b/elasticsearch/files/v6/es_template_audit.json
@@ -0,0 +1,125 @@
+{%- from "elasticsearch/map.jinja" import server with context %}
+{
+ "settings" : {
+ "number_of_shards" : {{ server.get('index', {}).get('shards', 5) }},
+ "number_of_replicas" : {{ server.get('index', {}).get('replicas', 1) }}
+ },
+ "mappings": {
+ "message": {
+ "properties": {
+ "Payload": {
+ "type": "text"
+ },
+ "Logger": {
+ "index": false,
+ "type": "text",
+ "fields": {
+ "keyword": {
+ "type": "keyword",
+ "ignore_above": 256
+ }
+ }
+ },
+ "Hostname": {
+ "index": false,
+ "type": "text",
+ "fields": {
+ "keyword": {
+ "type": "keyword",
+ "ignore_above": 256
+ }
+ }
+ },
+ "Pid": {
+ "index": false,
+ "type": "long"
+ },
+ "Severity": {
+ "index": false,
+ "type": "long"
+ },
+ "Type": {
+ "index": false,
+ "type": "text",
+ "fields": {
+ "keyword": {
+ "type": "keyword",
+ "ignore_above": 256
+ }
+ }
+ },
+ "severity_label": {
+ "index": false,
+ "type": "text",
+ "fields": {
+ "keyword": {
+ "type": "keyword",
+ "ignore_above": 256
+ }
+ }
+ },
+ "environment_label": {
+ "index": false,
+ "type": "text",
+ "fields": {
+ "keyword": {
+ "type": "keyword",
+ "ignore_above": 256
+ }
+ }
+ },
+ "region": {
+ "index": false,
+ "type": "text",
+ "fields": {
+ "keyword": {
+ "type": "keyword",
+ "ignore_above": 256
+ }
+ }
+ },
+ "action": {
+ "index": false,
+ "type": "text",
+ "fields": {
+ "keyword": {
+ "type": "keyword",
+ "ignore_above": 256
+ }
+ }
+ },
+ "event_type": {
+ "index": false,
+ "type": "text",
+ "fields": {
+ "keyword": {
+ "type": "keyword",
+ "ignore_above": 256
+ }
+ }
+ },
+ "outcome": {
+ "index": false,
+ "type": "text",
+ "fields": {
+ "keyword": {
+ "type": "keyword",
+ "ignore_above": 256
+ }
+ }
+ },
+ "notification_type": {
+ "index": false,
+ "type": "text",
+ "fields": {
+ "keyword": {
+ "type": "keyword",
+ "ignore_above": 256
+ }
+ }
+ }
+ }
+ }
+ },
+ "template": "audit-*"
+}
diff --git a/elasticsearch/files/v6/es_template_kibana.json b/elasticsearch/files/v6/es_template_kibana.json
new file mode 100644
index 0000000..d41d4bc
--- /dev/null
+++ b/elasticsearch/files/v6/es_template_kibana.json
@@ -0,0 +1,266 @@
+{%- from "elasticsearch/map.jinja" import server with context %}
+{
+ "settings" : {
+ "number_of_shards" : {{ server.get('index', {}).get('shards', 5) }},
+ "number_of_replicas" : {{ server.get('index', {}).get('replicas', 1) }}
+ },
+ "mappings" : {
+ "doc": {
+ "properties": {
+ "type": {
+ "type": "keyword"
+ },
+ "updated_at": {
+ "type": "date"
+ },
+ "config": {
+ "properties": {
+ "buildNum": {
+ "type": "keyword"
+ }
+ }
+ },
+ "index-pattern": {
+ "properties": {
+ "fieldFormatMap": {
+ "type": "text"
+ },
+ "fields": {
+ "type": "text"
+ },
+ "intervalName": {
+ "type": "keyword"
+ },
+ "notExpandable": {
+ "type": "boolean"
+ },
+ "sourceFilters": {
+ "type": "text"
+ },
+ "timeFieldName": {
+ "type": "keyword"
+ },
+ "title": {
+ "type": "text"
+ }
+ }
+ },
+ "visualization": {
+ "properties": {
+ "description": {
+ "type": "text"
+ },
+ "kibanaSavedObjectMeta": {
+ "properties": {
+ "searchSourceJSON": {
+ "type": "text"
+ }
+ }
+ },
+ "savedSearchId": {
+ "type": "keyword"
+ },
+ "title": {
+ "type": "text"
+ },
+ "uiStateJSON": {
+ "type": "text"
+ },
+ "version": {
+ "type": "integer"
+ },
+ "visState": {
+ "type": "text"
+ }
+ }
+ },
+ "search": {
+ "properties": {
+ "columns": {
+ "type": "keyword"
+ },
+ "description": {
+ "type": "text"
+ },
+ "hits": {
+ "type": "integer"
+ },
+ "kibanaSavedObjectMeta": {
+ "properties": {
+ "searchSourceJSON": {
+ "type": "text"
+ }
+ }
+ },
+ "sort": {
+ "type": "keyword"
+ },
+ "title": {
+ "type": "text"
+ },
+ "version": {
+ "type": "integer"
+ }
+ }
+ },
+ "dashboard": {
+ "properties": {
+ "description": {
+ "type": "text"
+ },
+ "hits": {
+ "type": "integer"
+ },
+ "kibanaSavedObjectMeta": {
+ "properties": {
+ "searchSourceJSON": {
+ "type": "text"
+ }
+ }
+ },
+ "optionsJSON": {
+ "type": "text"
+ },
+ "panelsJSON": {
+ "type": "text"
+ },
+ "refreshInterval": {
+ "properties": {
+ "display": {
+ "type": "keyword"
+ },
+ "pause": {
+ "type": "boolean"
+ },
+ "section": {
+ "type": "integer"
+ },
+ "value": {
+ "type": "integer"
+ }
+ }
+ },
+ "timeFrom": {
+ "type": "keyword"
+ },
+ "timeRestore": {
+ "type": "boolean"
+ },
+ "timeTo": {
+ "type": "keyword"
+ },
+ "title": {
+ "type": "text"
+ },
+ "uiStateJSON": {
+ "type": "text"
+ },
+ "version": {
+ "type": "integer"
+ }
+ }
+ },
+ "url": {
+ "properties": {
+ "accessCount": {
+ "type": "long"
+ },
+ "accessDate": {
+ "type": "date"
+ },
+ "createDate": {
+ "type": "date"
+ },
+ "url": {
+ "type": "text",
+ "fields": {
+ "keyword": {
+ "type": "keyword",
+ "ignore_above": 2048
+ }
+ }
+ }
+ }
+ },
+ "server": {
+ "properties": {
+ "uuid": {
+ "type": "keyword"
+ }
+ }
+ },
+ "timelion-sheet": {
+ "properties": {
+ "description": {
+ "type": "text"
+ },
+ "hits": {
+ "type": "integer"
+ },
+ "kibanaSavedObjectMeta": {
+ "properties": {
+ "searchSourceJSON": {
+ "type": "text"
+ }
+ }
+ },
+ "timelion_chart_height": {
+ "type": "integer"
+ },
+ "timelion_columns": {
+ "type": "integer"
+ },
+ "timelion_interval": {
+ "type": "keyword"
+ },
+ "timelion_other_interval": {
+ "type": "keyword"
+ },
+ "timelion_rows": {
+ "type": "integer"
+ },
+ "timelion_sheet": {
+ "type": "text"
+ },
+ "title": {
+ "type": "text"
+ },
+ "version": {
+ "type": "integer"
+ }
+ }
+ },
+ "graph-workspace": {
+ "properties": {
+ "description": {
+ "type": "text"
+ },
+ "kibanaSavedObjectMeta": {
+ "properties": {
+ "searchSourceJSON": {
+ "type": "text"
+ }
+ }
+ },
+ "numLinks": {
+ "type": "integer"
+ },
+ "numVertices": {
+ "type": "integer"
+ },
+ "title": {
+ "type": "text"
+ },
+ "version": {
+ "type": "integer"
+ },
+ "wsState": {
+ "type": "text"
+ }
+ }
+ }
+ }
+ }
+ },
+ "template": ".kibana"
+}
diff --git a/elasticsearch/files/v6/es_template_log.json b/elasticsearch/files/v6/es_template_log.json
new file mode 100644
index 0000000..a8fff7e
--- /dev/null
+++ b/elasticsearch/files/v6/es_template_log.json
@@ -0,0 +1,228 @@
+{%- from "elasticsearch/map.jinja" import server with context %}
+{
+ "settings" : {
+ "number_of_shards" : {{ server.get('index', {}).get('shards', 5) }},
+ "number_of_replicas" : {{ server.get('index', {}).get('replicas', 1) }}
+ },
+ "mappings": {
+ "message": {
+ "properties": {
+ "Logger": {
+ "index": false,
+ "type": "text",
+ "fields": {
+ "keyword": {
+ "type": "keyword",
+ "ignore_above": 256
+ }
+ }
+ },
+ "Hostname": {
+ "index": false,
+ "type": "text",
+ "fields": {
+ "keyword": {
+ "type": "keyword",
+ "ignore_above": 256
+ }
+ }
+ },
+ "Pid": {
+ "index": false,
+ "type": "long"
+ },
+ "Severity": {
+ "index": false,
+ "type": "long"
+ },
+ "Type": {
+ "index": false,
+ "type": "text",
+ "fields": {
+ "keyword": {
+ "type": "keyword",
+ "ignore_above": 256
+ }
+ }
+ },
+ "programname": {
+ "index": false,
+ "type": "text",
+ "fields": {
+ "keyword": {
+ "type": "keyword",
+ "ignore_above": 256
+ }
+ }
+ },
+ "python_module": {
+ "index": false,
+ "type": "text",
+ "fields": {
+ "keyword": {
+ "type": "keyword",
+ "ignore_above": 256
+ }
+ }
+ },
+ "severity_label": {
+ "index": false,
+ "type": "text",
+ "fields": {
+ "keyword": {
+ "type": "keyword",
+ "ignore_above": 256
+ }
+ }
+ },
+ "environment_label": {
+ "index": false,
+ "type": "text",
+ "fields": {
+ "keyword": {
+ "type": "keyword",
+ "ignore_above": 256
+ }
+ }
+ },
+ "region": {
+ "index": false,
+ "type": "text",
+ "fields": {
+ "keyword": {
+ "type": "keyword",
+ "ignore_above": 256
+ }
+ }
+ },
+ "request_id": {
+ "index": false,
+ "type": "text",
+ "fields": {
+ "keyword": {
+ "type": "keyword",
+ "ignore_above": 256
+ }
+ }
+ },
+ "tenant_id": {
+ "index": false,
+ "type": "text",
+ "fields": {
+ "keyword": {
+ "type": "keyword",
+ "ignore_above": 256
+ }
+ }
+ },
+ "user_id": {
+ "index": false,
+ "type": "text",
+ "fields": {
+ "keyword": {
+ "type": "keyword",
+ "ignore_above": 256
+ }
+ }
+ },
+ "instance_id": {
+ "index": false,
+ "type": "text",
+ "fields": {
+ "keyword": {
+ "type": "keyword",
+ "ignore_above": 256
+ }
+ }
+ },
+ "http_response_time": {
+ "type": "float"
+ },
+ "http_status": {
+ "type": "integer"
+ },
+ "http_method": {
+ "index": false,
+ "type": "text",
+ "fields": {
+ "keyword": {
+ "type": "keyword",
+ "ignore_above": 256
+ }
+ }
+ },
+ "http_url": {
+ "index": false,
+ "type": "text",
+ "fields": {
+ "keyword": {
+ "type": "keyword",
+ "ignore_above": 256
+ }
+ }
+ },
+ "http_client_ip_address": {
+ "index": false,
+ "type": "text",
+ "fields": {
+ "keyword": {
+ "type": "keyword",
+ "ignore_above": 256
+ }
+ }
+ },
+ "remote_addr": {
+ "index": false,
+ "type": "text",
+ "fields": {
+ "keyword": {
+ "type": "keyword",
+ "ignore_above": 256
+ }
+ }
+ },
+ "user_agent_browser": {
+ "index": false,
+ "type": "text",
+ "fields": {
+ "keyword": {
+ "type": "keyword",
+ "ignore_above": 256
+ }
+ }
+ },
+ "http_referer": {
+ "index": false,
+ "type": "text",
+ "fields": {
+ "keyword": {
+ "type": "keyword",
+ "ignore_above": 256
+ }
+ }
+ },
+ "user_agent_os": {
+ "index": false,
+ "type": "text",
+ "fields": {
+ "keyword": {
+ "type": "keyword",
+ "ignore_above": 256
+ }
+ }
+ },
+ "http_version": {
+ "index": false,
+ "type": "text",
+ "fields": {
+ "keyword": {
+ "type": "keyword",
+ "ignore_above": 256
+ }
+ }
+ }
+ }
+ }
+ },
+ "template": "log-*"
+}
diff --git a/elasticsearch/files/v6/es_template_notification.json b/elasticsearch/files/v6/es_template_notification.json
new file mode 100644
index 0000000..ad463a9
--- /dev/null
+++ b/elasticsearch/files/v6/es_template_notification.json
@@ -0,0 +1,163 @@
+{%- from "elasticsearch/map.jinja" import server with context %}
+{
+ "settings" : {
+ "number_of_shards" : {{ server.get('index', {}).get('shards', 5) }},
+ "number_of_replicas" : {{ server.get('index', {}).get('replicas', 1) }}
+ },
+ "mappings": {
+ "message": {
+ "properties": {
+ "event_type": {
+ "index": false,
+ "type": "text",
+ "fields": {
+ "keyword": {
+ "type": "keyword",
+ "ignore_above": 256
+ }
+ }
+ },
+ "Payload": {
+ "type": "text",
+ "fields": {
+ "keyword": {
+ "type": "keyword",
+ "ignore_above": 256
+ }
+ }
+ },
+ "Logger": {
+ "index": false,
+ "type": "text",
+ "fields": {
+ "keyword": {
+ "type": "keyword",
+ "ignore_above": 256
+ }
+ }
+ },
+ "Hostname": {
+ "index": false,
+ "type": "text",
+ "fields": {
+ "keyword": {
+ "type": "keyword",
+ "ignore_above": 256
+ }
+ }
+ },
+ "hostname": {
+ "index": false,
+ "type": "text",
+ "fields": {
+ "keyword": {
+ "type": "keyword",
+ "ignore_above": 256
+ }
+ }
+ },
+ "Pid": {
+ "index": false,
+ "type": "long"
+ },
+ "Severity": {
+ "index": false,
+ "type": "long"
+ },
+ "Timestamp": {
+ "type": "date",
+ "format": "dateOptionalTime"
+ },
+ "Type": {
+ "index": false,
+ "type": "text",
+ "fields": {
+ "keyword": {
+ "type": "keyword",
+ "ignore_above": 256
+ }
+ }
+ },
+ "severity_label": {
+ "index": false,
+ "type": "text",
+ "fields": {
+ "keyword": {
+ "type": "keyword",
+ "ignore_above": 256
+ }
+ }
+ },
+ "environment_label": {
+ "index": false,
+ "type": "text",
+ "fields": {
+ "keyword": {
+ "type": "keyword",
+ "ignore_above": 256
+ }
+ }
+ },
+ "region": {
+ "index": false,
+ "type": "text",
+ "fields": {
+ "keyword": {
+ "type": "keyword",
+ "ignore_above": 256
+ }
+ }
+ },
+ "request_id": {
+ "index": false,
+ "type": "text",
+ "fields": {
+ "keyword": {
+ "type": "keyword",
+ "ignore_above": 256
+ }
+ }
+ },
+ "tenant_id": {
+ "index": false,
+ "type": "text",
+ "fields": {
+ "keyword": {
+ "type": "keyword",
+ "ignore_above": 256
+ }
+ }
+ },
+ "user_id": {
+ "index": false,
+ "type": "text",
+ "fields": {
+ "keyword": {
+ "type": "keyword",
+ "ignore_above": 256
+ }
+ }
+ },
+ "launched_at": {
+ "type": "date",
+ "format": "dateOptionalTime"
+ },
+ "created_at": {
+ "type": "date",
+ "format": "dateOptionalTime"
+ },
+ "publisher": {
+ "index": false,
+ "type": "text",
+ "fields": {
+ "keyword": {
+ "type": "keyword",
+ "ignore_above": 256
+ }
+ }
+ }
+ }
+ }
+ },
+ "template": "notification-*"
+}
diff --git a/elasticsearch/files/v6/jvm.options b/elasticsearch/files/v6/jvm.options
new file mode 100644
index 0000000..f0dd096
--- /dev/null
+++ b/elasticsearch/files/v6/jvm.options
@@ -0,0 +1,112 @@
+{%- from "elasticsearch/map.jinja" import server with context -%}
+## JVM configuration
+
+################################################################
+## IMPORTANT: JVM heap size
+################################################################
+##
+## You should always set the min and max JVM heap
+## size to the same value. For example, to set
+## the heap to 4 GB, set:
+##
+## -Xms4g
+## -Xmx4g
+##
+## See https://www.elastic.co/guide/en/elasticsearch/reference/current/heap-size.html
+## for more information
+##
+################################################################
+
+# Xms represents the initial size of total heap space
+# Xmx represents the maximum size of total heap space
+
+-Xms{{ server.get('heap', {}).get('size', grains.get('mem_total', 1024)/2/1024)|round(0, 'ceil')|int }}g
+-Xmx{{ server.get('heap', {}).get('size', grains.get('mem_total', 1024)/2/1024)|round(0, 'ceil')|int }}g
+
+################################################################
+## Expert settings
+################################################################
+##
+## All settings below this section are considered
+## expert settings. Don't tamper with them unless
+## you understand what you are doing
+##
+################################################################
+
+## GC configuration
+-XX:+UseConcMarkSweepGC
+-XX:CMSInitiatingOccupancyFraction=75
+-XX:+UseCMSInitiatingOccupancyOnly
+
+## optimizations
+
+# pre-touch memory pages used by the JVM during initialization
+-XX:+AlwaysPreTouch
+
+## basic
+
+# force the server VM (remove on 32-bit client JVMs)
+-server
+
+# explicitly set the stack size (reduce to 320k on 32-bit client JVMs)
+-Xss1m
+
+# set to headless, just in case
+-Djava.awt.headless=true
+
+# ensure UTF-8 encoding by default (e.g. filenames)
+-Dfile.encoding=UTF-8
+
+# use our provided JNA always versus the system one
+-Djna.nosys=true
+
+# use old-style file permissions on JDK9
+-Djdk.io.permissionsUseCanonicalPath=true
+
+# flags to configure Netty
+-Dio.netty.noUnsafe=true
+-Dio.netty.noKeySetOptimization=true
+-Dio.netty.recycler.maxCapacityPerThread=0
+
+# log4j 2
+-Dlog4j.shutdownHookEnabled=false
+-Dlog4j2.disable.jmx=true
+-Dlog4j.skipJansi=true
+
+## heap dumps
+
+# generate a heap dump when an allocation from the Java heap fails
+# heap dumps are created in the working directory of the JVM
+-XX:+HeapDumpOnOutOfMemoryError
+
+# specify an alternative path for heap dumps
+# ensure the directory exists and has sufficient space
+#-XX:HeapDumpPath=${heap.dump.path}
+
+## GC logging
+
+#-XX:+PrintGCDetails
+#-XX:+PrintGCTimeStamps
+#-XX:+PrintGCDateStamps
+#-XX:+PrintClassHistogram
+#-XX:+PrintTenuringDistribution
+#-XX:+PrintGCApplicationStoppedTime
+
+# log GC status to a file with time stamps
+# ensure the directory exists
+#-Xloggc:${loggc}
+
+# By default, the GC log file will not rotate.
+# By uncommenting the lines below, the GC log file
+# will be rotated every 128MB at most 32 times.
+#-XX:+UseGCLogFileRotation
+#-XX:NumberOfGCLogFiles=32
+#-XX:GCLogFileSize=128M
+
+# Elasticsearch 5.0.0 will throw an exception on unquoted field names in JSON.
+# If documents were already indexed with unquoted fields in a previous version
+# of Elasticsearch, some operations may throw errors.
+#
+# WARNING: This option will be removed in Elasticsearch 6.0.0 and is provided
+# only for migration purposes.
+#-Delasticsearch.json.allow_unquoted_field_names=true
diff --git a/elasticsearch/files/v6/log4j2.properties b/elasticsearch/files/v6/log4j2.properties
new file mode 100644
index 0000000..dbb3a3a
--- /dev/null
+++ b/elasticsearch/files/v6/log4j2.properties
@@ -0,0 +1,184 @@
+{%- raw -%}
+status = error
+
+# log action execution errors for easier debugging
+logger.action.name = org.elasticsearch.action
+logger.action.level = debug
+
+appender.console.type = Console
+appender.console.name = console
+appender.console.layout.type = PatternLayout
+appender.console.layout.pattern = [%d{ISO8601}][%-5p][%-25c{1.}] %marker%m%n
+
+appender.rolling.type = RollingFile
+appender.rolling.name = rolling
+appender.rolling.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}.log
+appender.rolling.layout.type = PatternLayout
+appender.rolling.layout.pattern = [%d{ISO8601}][%-5p][%-25c{1.}] %marker%.-10000m%n
+appender.rolling.filePattern = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}-%d{yyyy-MM-dd}.log.gz
+appender.rolling.policies.type = Policies
+appender.rolling.policies.time.type = TimeBasedTriggeringPolicy
+appender.rolling.policies.time.interval = 1
+appender.rolling.policies.time.modulate = true
+appender.rolling.policies.size.type = SizeBasedTriggeringPolicy
+appender.rolling.policies.size.size = 128MB
+appender.rolling.strategy.type = DefaultRolloverStrategy
+appender.rolling.strategy.fileIndex = nomax
+appender.rolling.strategy.action.type = Delete
+appender.rolling.strategy.action.basepath = ${sys:es.logs.base_path}
+appender.rolling.strategy.action.condition.type = IfFileName
+appender.rolling.strategy.action.condition.glob = ${sys:es.logs.cluster_name}-*
+appender.rolling.strategy.action.condition.nested_condition.type = IfAccumulatedFileSize
+appender.rolling.strategy.action.condition.nested_condition.exceeds = 2GB
+
+rootLogger.level = info
+rootLogger.appenderRef.console.ref = console
+rootLogger.appenderRef.rolling.ref = rolling
+
+appender.deprecation_rolling.type = RollingFile
+appender.deprecation_rolling.name = deprecation_rolling
+appender.deprecation_rolling.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_deprecation.log
+appender.deprecation_rolling.layout.type = PatternLayout
+appender.deprecation_rolling.layout.pattern = [%d{ISO8601}][%-5p][%-25c{1.}] %marker%.-10000m%n
+appender.deprecation_rolling.filePattern = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_deprecation-%i.log.gz
+appender.deprecation_rolling.policies.type = Policies
+appender.deprecation_rolling.policies.size.type = SizeBasedTriggeringPolicy
+appender.deprecation_rolling.policies.size.size = 1GB
+appender.deprecation_rolling.strategy.type = DefaultRolloverStrategy
+appender.deprecation_rolling.strategy.max = 4
+
+logger.deprecation.name = org.elasticsearch.deprecation
+logger.deprecation.level = info
+logger.deprecation.appenderRef.deprecation_rolling.ref = deprecation_rolling
+logger.deprecation.additivity = false
+
+appender.index_search_slowlog_rolling.type = RollingFile
+appender.index_search_slowlog_rolling.name = index_search_slowlog_rolling
+appender.index_search_slowlog_rolling.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_index_search_slowlog.log
+appender.index_search_slowlog_rolling.layout.type = PatternLayout
+appender.index_search_slowlog_rolling.layout.pattern = [%d{ISO8601}][%-5p][%-25c] %marker%.-10000m%n
+appender.index_search_slowlog_rolling.filePattern = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_index_search_slowlog-%d{yyyy-MM-dd}.log.gz
+appender.index_search_slowlog_rolling.policies.type = Policies
+appender.index_search_slowlog_rolling.policies.time.type = TimeBasedTriggeringPolicy
+appender.index_search_slowlog_rolling.policies.time.interval = 1
+appender.index_search_slowlog_rolling.policies.time.modulate = true
+
+logger.index_search_slowlog_rolling.name = index.search.slowlog
+logger.index_search_slowlog_rolling.level = trace
+logger.index_search_slowlog_rolling.appenderRef.index_search_slowlog_rolling.ref = index_search_slowlog_rolling
+logger.index_search_slowlog_rolling.additivity = false
+
+appender.index_indexing_slowlog_rolling.type = RollingFile
+appender.index_indexing_slowlog_rolling.name = index_indexing_slowlog_rolling
+appender.index_indexing_slowlog_rolling.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_index_indexing_slowlog.log
+appender.index_indexing_slowlog_rolling.layout.type = PatternLayout
+appender.index_indexing_slowlog_rolling.layout.pattern = [%d{ISO8601}][%-5p][%-25c] %marker%.-10000m%n
+appender.index_indexing_slowlog_rolling.filePattern = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_index_indexing_slowlog-%d{yyyy-MM-dd}.log.gz
+appender.index_indexing_slowlog_rolling.policies.type = Policies
+appender.index_indexing_slowlog_rolling.policies.time.type = TimeBasedTriggeringPolicy
+appender.index_indexing_slowlog_rolling.policies.time.interval = 1
+appender.index_indexing_slowlog_rolling.policies.time.modulate = true
+
+logger.index_indexing_slowlog.name = index.indexing.slowlog.index
+logger.index_indexing_slowlog.level = trace
+logger.index_indexing_slowlog.appenderRef.index_indexing_slowlog_rolling.ref = index_indexing_slowlog_rolling
+logger.index_indexing_slowlog.additivity = false
+
+
+appender.audit_rolling.type = RollingFile
+appender.audit_rolling.name = audit_rolling
+appender.audit_rolling.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_audit.log
+appender.audit_rolling.layout.type = PatternLayout
+appender.audit_rolling.layout.pattern = {\
+ "@timestamp":"%d{ISO8601}"\
+ %varsNotEmpty{, "node.name":"%enc{%map{node.name}}{JSON}"}\
+ %varsNotEmpty{, "node.id":"%enc{%map{node.id}}{JSON}"}\
+ %varsNotEmpty{, "host.name":"%enc{%map{host.name}}{JSON}"}\
+ %varsNotEmpty{, "host.ip":"%enc{%map{host.ip}}{JSON}"}\
+ %varsNotEmpty{, "event.type":"%enc{%map{event.type}}{JSON}"}\
+ %varsNotEmpty{, "event.action":"%enc{%map{event.action}}{JSON}"}\
+ %varsNotEmpty{, "user.name":"%enc{%map{user.name}}{JSON}"}\
+ %varsNotEmpty{, "user.run_by.name":"%enc{%map{user.run_by.name}}{JSON}"}\
+ %varsNotEmpty{, "user.run_as.name":"%enc{%map{user.run_as.name}}{JSON}"}\
+ %varsNotEmpty{, "user.realm":"%enc{%map{user.realm}}{JSON}"}\
+ %varsNotEmpty{, "user.run_by.realm":"%enc{%map{user.run_by.realm}}{JSON}"}\
+ %varsNotEmpty{, "user.run_as.realm":"%enc{%map{user.run_as.realm}}{JSON}"}\
+ %varsNotEmpty{, "user.roles":%map{user.roles}}\
+ %varsNotEmpty{, "origin.type":"%enc{%map{origin.type}}{JSON}"}\
+ %varsNotEmpty{, "origin.address":"%enc{%map{origin.address}}{JSON}"}\
+ %varsNotEmpty{, "realm":"%enc{%map{realm}}{JSON}"}\
+ %varsNotEmpty{, "url.path":"%enc{%map{url.path}}{JSON}"}\
+ %varsNotEmpty{, "url.query":"%enc{%map{url.query}}{JSON}"}\
+ %varsNotEmpty{, "request.body":"%enc{%map{request.body}}{JSON}"}\
+ %varsNotEmpty{, "action":"%enc{%map{action}}{JSON}"}\
+ %varsNotEmpty{, "request.name":"%enc{%map{request.name}}{JSON}"}\
+ %varsNotEmpty{, "indices":%map{indices}}\
+ %varsNotEmpty{, "opaque_id":"%enc{%map{opaque_id}}{JSON}"}\
+ %varsNotEmpty{, "transport.profile":"%enc{%map{transport.profile}}{JSON}"}\
+ %varsNotEmpty{, "rule":"%enc{%map{rule}}{JSON}"}\
+ %varsNotEmpty{, "event.category":"%enc{%map{event.category}}{JSON}"}\
+ }%n
+# "node.name" node name from the `elasticsearch.yml` settings
+# "node.id" node id which should not change between cluster restarts
+# "host.name" unresolved hostname of the local node
+# "host.ip" the local bound ip (i.e. the ip listening for connections)
+# "event.type" a received REST request is translated into one or more transport requests. This indicates which processing layer generated the event "rest" or "transport" (internal)
+# "event.action" the name of the audited event, eg. "authentication_failed", "access_granted", "run_as_granted", etc.
+# "user.name" the subject name as authenticated by a realm
+# "user.run_by.name" the original authenticated subject name that is impersonating another one.
+# "user.run_as.name" if this "event.action" is of a run_as type, this is the subject name to be impersonated as.
+# "user.realm" the name of the realm that authenticated "user.name"
+# "user.run_by.realm" the realm name of the impersonating subject ("user.run_by.name")
+# "user.run_as.realm" if this "event.action" is of a run_as type, this is the realm name the impersonated user is looked up from
+# "user.roles" the roles array of the user; these are the roles that are granting privileges
+# "origin.type" it is "rest" if the event is originating (is in relation to) a REST request; possible other values are "transport" and "ip_filter"
+# "origin.address" the remote address and port of the first network hop, i.e. a REST proxy or another cluster node
+# "realm" name of a realm that has generated an "authentication_failed" or an "authentication_successful"; the subject is not yet authenticated
+# "url.path" the URI component between the port and the query string; it is percent (URL) encoded
+# "url.query" the URI component after the path and before the fragment; it is percent (URL) encoded
+# "request.body" the content of the request body entity, JSON escaped
+# "action" an action is the most granular operation that is authorized and this identifies it in a namespaced way (internal)
+# "request.name" if the event is in connection to a transport message this is the name of the request class, similar to how rest requests are identified by the url path (internal)
+# "indices" the array of indices that the "action" is acting upon
+# "opaque_id" opaque value conveyed by the "X-Opaque-Id" request header
+# "transport.profile" name of the transport profile in case this is a "connection_granted" or "connection_denied" event
+# "rule" name of the applied rulee if the "origin.type" is "ip_filter"
+# "event.category" fixed value "elasticsearch-audit"
+
+appender.audit_rolling.filePattern = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_audit-%d{yyyy-MM-dd}.log
+appender.audit_rolling.policies.type = Policies
+appender.audit_rolling.policies.time.type = TimeBasedTriggeringPolicy
+appender.audit_rolling.policies.time.interval = 1
+appender.audit_rolling.policies.time.modulate = true
+
+appender.deprecated_audit_rolling.type = RollingFile
+appender.deprecated_audit_rolling.name = deprecated_audit_rolling
+appender.deprecated_audit_rolling.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_access.log
+appender.deprecated_audit_rolling.layout.type = PatternLayout
+appender.deprecated_audit_rolling.layout.pattern = [%d{ISO8601}] %m%n
+appender.deprecated_audit_rolling.filePattern = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_access-%d{yyyy-MM-dd}.log
+appender.deprecated_audit_rolling.policies.type = Policies
+appender.deprecated_audit_rolling.policies.time.type = TimeBasedTriggeringPolicy
+appender.deprecated_audit_rolling.policies.time.interval = 1
+appender.deprecated_audit_rolling.policies.time.modulate = true
+
+logger.xpack_security_audit_logfile.name = org.elasticsearch.xpack.security.audit.logfile.LoggingAuditTrail
+logger.xpack_security_audit_logfile.level = info
+logger.xpack_security_audit_logfile.appenderRef.audit_rolling.ref = audit_rolling
+logger.xpack_security_audit_logfile.additivity = false
+
+logger.xpack_security_audit_deprecated_logfile.name = org.elasticsearch.xpack.security.audit.logfile.DeprecatedLoggingAuditTrail
+# set this to "off" instead of "info" to disable the deprecated appender
+# in the 6.x releases both the new and the previous appenders are enabled
+# for the logfile auditing
+logger.xpack_security_audit_deprecated_logfile.level = info
+logger.xpack_security_audit_deprecated_logfile.appenderRef.deprecated_audit_rolling.ref = deprecated_audit_rolling
+logger.xpack_security_audit_deprecated_logfile.additivity = false
+
+logger.xmlsig.name = org.apache.xml.security.signature.XMLSignature
+logger.xmlsig.level = error
+logger.samlxml_decrypt.name = org.opensaml.xmlsec.encryption.support.Decrypter
+logger.samlxml_decrypt.level = fatal
+logger.saml2_decrypt.name = org.opensaml.saml.saml2.encryption.Decrypter
+logger.saml2_decrypt.level = fatal
+{%- endraw -%}
diff --git a/elasticsearch/map.jinja b/elasticsearch/map.jinja
index 930a2f9..93b3f07 100644
--- a/elasticsearch/map.jinja
+++ b/elasticsearch/map.jinja
@@ -7,16 +7,12 @@
- elasticsearch-curator
- cron
service: elasticsearch
- version: '1.4.4'
RedHat:
pkgs:
- elasticsearch
curator_pkgs:
- elasticsearch-curator
service: elasticsearch
- version: '1.4.4'
-default:
- version: 5
{%- endload %}
{%- set server = salt['grains.filter_by'](base_defaults, merge=salt['pillar.get']('elasticsearch:server'), base='default') %}
diff --git a/elasticsearch/server/init.sls b/elasticsearch/server/init.sls
index ee984cb..7995faf 100644
--- a/elasticsearch/server/init.sls
+++ b/elasticsearch/server/init.sls
@@ -36,11 +36,11 @@
- pkg: elasticsearch_packages
{%- endif %}
-{%- if server.version == 5 %}
+{%- if server.version in [5, 6] %}
elasticsearch_logging:
file.managed:
- name: /etc/elasticsearch/log4j2.properties
- - source: salt://elasticsearch/files/v5/log4j2.properties
+ - source: salt://elasticsearch/files/v{{ server.version }}/log4j2.properties
- template: jinja
- require:
- pkg: elasticsearch_packages
@@ -48,7 +48,7 @@
elasticsearch_jvm_options:
file.managed:
- name: /etc/elasticsearch/jvm.options
- - source: salt://elasticsearch/files/v5/jvm.options
+ - source: salt://elasticsearch/files/v{{ server.version }}/jvm.options
- template: jinja
- require:
- pkg: elasticsearch_packages
diff --git a/metadata/service/client.yml b/metadata/service/client.yml
index 72efe86..9202012 100644
--- a/metadata/service/client.yml
+++ b/metadata/service/client.yml
@@ -1,24 +1,20 @@
applications:
- elasticsearch
parameters:
- _param:
- elasticsearch_version: 5
elasticsearch:
client:
enabled: true
+ version: ${_param:elasticsearch_version}
index:
log:
force_operation: true
enabled: true
- template: elasticsearch/files/v${_param:elasticsearch_version}/es_template_log.json
+ template: elasticsearch/files/v${elasticsearch:client:version}/es_template_log.json
audit:
force_operation: true
enabled: true
- template: elasticsearch/files/v${_param:elasticsearch_version}/es_template_audit.json
+ template: elasticsearch/files/v${elasticsearch:client:version}/es_template_audit.json
notification:
force_operation: true
enabled: true
- template: elasticsearch/files/v${_param:elasticsearch_version}/es_template_notification.json
- kibana:
- enabled: true
- template: elasticsearch/files/v${_param:elasticsearch_version}/es_template_kibana.json
+ template: elasticsearch/files/v${elasticsearch:client:version}/es_template_notification.json
diff --git a/metadata/service/server/cluster.yml b/metadata/service/server/cluster.yml
index 1f9e961..d641218 100644
--- a/metadata/service/server/cluster.yml
+++ b/metadata/service/server/cluster.yml
@@ -3,8 +3,6 @@
classes:
- service.elasticsearch.support
parameters:
- _param:
- elasticsearch_version: 5
elasticsearch:
server:
enabled: true
diff --git a/metadata/service/server/local.yml b/metadata/service/server/local.yml
index 7980251..6cc7849 100644
--- a/metadata/service/server/local.yml
+++ b/metadata/service/server/local.yml
@@ -3,8 +3,6 @@
classes:
- service.elasticsearch.support
parameters:
- _param:
- elasticsearch_version: 5
elasticsearch:
server:
enabled: true
diff --git a/metadata/service/server/single.yml b/metadata/service/server/single.yml
index 01e7436..67c6cf5 100644
--- a/metadata/service/server/single.yml
+++ b/metadata/service/server/single.yml
@@ -3,8 +3,6 @@
classes:
- service.elasticsearch.support
parameters:
- _param:
- elasticsearch_version: 5
elasticsearch:
server:
enabled: true
diff --git a/tests/pillar/client.sls b/tests/pillar/client.sls
index 053d17a..216b590 100644
--- a/tests/pillar/client.sls
+++ b/tests/pillar/client.sls
@@ -1,6 +1,7 @@
elasticsearch:
client:
enabled: true
+ version: 5
server:
host: elasticsearch.host
- port: 9200
\ No newline at end of file
+ port: 9200
diff --git a/tests/pillar/cluster.sls b/tests/pillar/cluster.sls
index 18a0194..1972b40 100644
--- a/tests/pillar/cluster.sls
+++ b/tests/pillar/cluster.sls
@@ -16,4 +16,4 @@
index:
shards: 5
replicas: 1
- version: 2
\ No newline at end of file
+ version: 5
diff --git a/tests/pillar/curator.sls b/tests/pillar/curator.sls
index 7425a81..09c2be7 100644
--- a/tests/pillar/curator.sls
+++ b/tests/pillar/curator.sls
@@ -4,7 +4,7 @@
bind:
address: 0.0.0.0
port: 9200
- version: 2
+ version: 5
curator:
timeout: 900
logfile: /var/log/elasticsearch/curator.log
@@ -70,4 +70,4 @@
source: creation_date
direction: older
unit: days
- unit_count: 2
\ No newline at end of file
+ unit_count: 2
diff --git a/tests/pillar/repo_elasticsearch.sls b/tests/pillar/repo_elasticsearch.sls
index 06d43aa..6140c95 100644
--- a/tests/pillar/repo_elasticsearch.sls
+++ b/tests/pillar/repo_elasticsearch.sls
@@ -3,10 +3,8 @@
enabled: true
repo:
elasticsearch_repo:
- source: "deb http://packages.elastic.co/elasticsearch/2.x/debian stable main"
- architectures: amd64
+ source: "deb [arch=amd64] http://mirror.mirantis.com/testing/elasticsearch-5.x/{{ grains.get('oscodename') }} stable main"
+ key_url: "https://artifacts.elastic.co/GPG-KEY-elasticsearch"
+ elasticsearch_curator_repo:
+ source: "deb [arch=amd64] http://mirror.mirantis.com/nightly/elasticsearch-curator-5/{{ grains.get('oscodename') }} stable main"
key_url: "https://packages.elastic.co/GPG-KEY-elasticsearch"
- mirantis_openstack_repo:
- source: "deb http://mirror.fuel-infra.org/mcp-repos/1.0/{{ grains.get('oscodename') }} mitaka main"
- architectures: amd64
- key_url: "http://mirror.fuel-infra.org/mcp-repos/1.0/{{ grains.get('oscodename') }}/archive-mcp1.0.key"
diff --git a/tests/pillar/single.sls b/tests/pillar/single.sls
index eec8541..7fdf667 100644
--- a/tests/pillar/single.sls
+++ b/tests/pillar/single.sls
@@ -9,4 +9,4 @@
index:
shards: 1
replicas: 0
- version: 2
+ version: 5