# multiple clusters on the same network, make sure you're using unique names.
#
# cluster.name: elasticsearch
-{%- if server.clustername is defined %}
-
-cluster.name: {{ server.clustername }}
-
+{%- if server.get('cluster', {}).name is defined %}
+cluster.name: {{ server.cluster.name }}
{% endif %}
#################################### Node #####################################
#
# Allow this node to be eligible as a master node (enabled by default):
#
-# node.master: true
+node.master: {{ server.get('master', True)|lower }}
#
# Allow this node to store data (enabled by default):
#
-# node.data: true
+node.data: {{ server.get('data', True)|lower }}
# You can exploit these settings to design advanced cluster topologies.
#
# is a simple key value pair, similar to node.key: value, here is an example:
#
# node.rack: rack314
+{%- if server.rack is defined %}
+node.rack: {{ server.rack }}
+{%- endif %}
# By default, multiple nodes are allowed to start from the same installation location
# to disable it, set the following:
# node.max_local_storage_nodes: 1
+{%- if server.get('threadpool', {}).get('bulk', {}).queue_size is defined %}
+# For bulk operations. Thread pool type is fixed with a size of # of available processors.
+threadpool.bulk.queue_size: {{ server.threadpool.bulk.queue_size }}
+{%- endif %}
#################################### Index ####################################
# Set the number of shards (splits) of an index (5 by default):
#
-# index.number_of_shards: 5
+index.number_of_shards: {{ server.get('index', {}).get('shards', 5) }}
# Set the number of replicas (additional copies) of an index (1 by default):
#
-# index.number_of_replicas: 1
+index.number_of_replicas: {{ server.get('index', {}).get('replicas', 1) }}
# Note, that for development on a local machine, with small indices, it usually
# makes sense to "disable" the distributed features:
# Set this property to true to lock the memory:
#
# bootstrap.mlockall: true
+{%- if server.mlockall is defined %}
+bootstrap.mlockall: {{ server.mlockall|lower }}
+{%- endif %}
# Make sure that the ES_MIN_MEM and ES_MAX_MEM environment variables are set
# to the same value, and that the machine has enough memory to allocate
# communication. (the range means that if the port is busy, it will automatically
# try the next port).
-# Set the bind address specifically (IPv4 or IPv6):
-#
-#
-network.bind_host: {{ server.bind.address }}
-http.port: {{ server.bind.port }}
-http.enabled: true
-# Set the address other nodes will use to communicate with this node. If not
-# set, it is automatically derived. It must point to an actual IP address.
-#
-# network.publish_host: 192.168.0.1
-
# Set both 'bind_host' and 'publish_host':
#
# network.host: 192.168.0.1
+{%- if server.get('bind', {}).address is defined %}
+network.host: {{ server.bind.address }}
+{%- endif %}
+
+# Set specifically the address other nodes will use to communicate with this
+# node. If not set, it is automatically derived. It must point to an actual
+# IP address.
+{%- if server.publish_host is defined %}
+network.publish_host: {{ server.publish_host }}
+{%- endif %}
+
+
# Set a custom port for the node to node communication (9300 by default):
#
# transport.tcp.port: 9300
# Set a custom port to listen for HTTP traffic:
#
# http.port: 9200
+{%- if server.get('bind', {}).port is defined %}
+http.port: {{ server.bind.port }}
+{%- endif %}
# Set a custom allowed content length:
#
# Disable HTTP completely:
#
# http.enabled: false
+http.enabled: true
################################### Gateway ###################################
# Allow recovery process after N nodes in a cluster are up:
#
# gateway.recover_after_nodes: 1
+{%- if server.get('gateway', {}).recover_after_nodes is defined %}
+gateway.recover_after_nodes: {{ server.gateway.recover_after_nodes }}
+{%- endif %}
# Set the timeout to initiate the recovery process, once the N nodes
# from previous setting are up (accepts time value):
#
# gateway.recover_after_time: 5m
+{%- if server.get('gateway', {}).recover_after_time is defined %}
+gateway.recover_after_time: {{ server.gateway.recover_after_time }}
+{%- endif %}
# Set how many nodes are expected in this cluster. Once these N nodes
# are up (and recover_after_nodes is met), begin recovery process immediately
# (without waiting for recover_after_time to expire):
#
# gateway.expected_nodes: 2
+{%- if server.get('gateway', {}).expected_nodes is defined %}
+gateway.expected_nodes: {{ server.gateway.expected_nodes }}
+{%- endif %}
############################# Recovery Throttling #############################
# operational within the cluster. Its recommended to set it to a higher value
# than 1 when running more than 2 nodes in the cluster.
#
-# discovery.zen.minimum_master_nodes: 1
+discovery.zen.minimum_master_nodes: {{ server.get('cluster', {}).minimum_master_nodes|default(1) }}
# Set the time to wait for ping responses from other nodes when discovering.
# Set this option to a higher value on a slow or congested network
#
# 1. Disable multicast discovery (enabled by default):
#
-# discovery.zen.ping.multicast.enabled: false
+discovery.zen.ping.multicast.enabled: {{ server.get('cluster', {}).get('multicast', True)|lower }}
#
# 2. Configure an initial list of master nodes in the cluster
# to perform discovery when new nodes (master or data) are started:
#
-# discovery.zen.ping.unicast.hosts: ["host1", "host2:port"]
+{%- if server.get('cluster', {}).members is defined %}
+discovery.zen.ping.unicast.hosts: [{% for member in server.cluster.members %}"{{ member.host }}:{{ member.get('port', 9300) }}"{% if not loop.last %}, {% endif %}{% endfor %}]
+{%- endif %}
# EC2 discovery allows to use AWS EC2 API in order to perform discovery.
#