Merge remote-tracking branch 'upstream/master' into stacklight
diff --git a/README.rst b/README.rst
index cc8cfdb..08bd72a 100644
--- a/README.rst
+++ b/README.rst
@@ -24,6 +24,17 @@
           shards: 1
           replicas: 0
 
+Setup shared repository for snapshots:
+
+.. code-block:: bash
+
+    elasticsearch:
+      server:
+        snapshot:
+          reponame:
+            path: /var/lib/glusterfs/repo
+            compress: true
+
 Cluster with manually defined members:
 
 .. code-block:: yaml
diff --git a/elasticsearch/files/elasticsearch.yml b/elasticsearch/files/elasticsearch.yml
index 4d21261..a51cf3c 100644
--- a/elasticsearch/files/elasticsearch.yml
+++ b/elasticsearch/files/elasticsearch.yml
@@ -182,6 +182,13 @@
 # Path to where plugins are installed:
 #
 # path.plugins: /path/to/plugins
+#
+{%- if server.snapshot is defined %}
+path.repo:
+  {%- for repo_name, repo in server.snapshot.iteritems() %}
+  - {{ repo.path }}
+  {%- endfor %}
+{%- endif %}
 
 
 #################################### Plugin ###################################
diff --git a/elasticsearch/files/logging.yml b/elasticsearch/files/logging.yml
new file mode 100644
index 0000000..3160510
--- /dev/null
+++ b/elasticsearch/files/logging.yml
@@ -0,0 +1,106 @@
+{%- from "elasticsearch/map.jinja" import server with context -%}
+# you can override this using by setting a system property, for example -Des.logger.level=DEBUG
+es.logger.level: {{ server.get('log', {}).level|default('INFO') }}
+rootLogger: ${es.logger.level}, console, file
+logger:
+  # log action execution errors for easier debugging
+  action: DEBUG
+
+  # deprecation logging, turn to DEBUG to see them
+  deprecation: INFO, deprecation_log_file
+
+  # reduce the logging for aws, too much is logged under the default INFO
+  com.amazonaws: WARN
+  # aws will try to do some sketchy JMX stuff, but its not needed.
+  com.amazonaws.jmx.SdkMBeanRegistrySupport: ERROR
+  com.amazonaws.metrics.AwsSdkMetrics: ERROR
+
+  org.apache.http: INFO
+
+  # gateway
+  #gateway: DEBUG
+  #index.gateway: DEBUG
+
+  # peer shard recovery
+  #indices.recovery: DEBUG
+
+  # discovery
+  #discovery: TRACE
+
+  index.search.slowlog: TRACE, index_search_slow_log_file
+  index.indexing.slowlog: TRACE, index_indexing_slow_log_file
+
+additivity:
+  index.search.slowlog: false
+  index.indexing.slowlog: false
+  deprecation: false
+
+appender:
+  console:
+    type: console
+    layout:
+      type: consolePattern
+      conversionPattern: "[%d{ISO8601}][%-5p][%-25c] %m%n"
+
+  file:
+    {%- if server.get('log', {}).logrotate|default(True) %}
+    type: file
+    {%- else %}
+    type: dailyRollingFile
+    {%- endif %}
+    file: ${path.logs}/${cluster.name}.log
+    datePattern: "'.'yyyy-MM-dd"
+    layout:
+      type: pattern
+      conversionPattern: "[%d{ISO8601}][%-5p][%-25c] %.10000m%n"
+
+  # Use the following log4j-extras RollingFileAppender to enable gzip compression of log files. 
+  # For more information see https://logging.apache.org/log4j/extras/apidocs/org/apache/log4j/rolling/RollingFileAppender.html
+  #file:
+    #type: extrasRollingFile
+    #file: ${path.logs}/${cluster.name}.log
+    #rollingPolicy: timeBased
+    #rollingPolicy.FileNamePattern: ${path.logs}/${cluster.name}.log.%d{yyyy-MM-dd}.gz
+    #layout:
+      #type: pattern
+      #conversionPattern: "[%d{ISO8601}][%-5p][%-25c] %m%n"
+
+  deprecation_log_file:
+    {%- if server.get('log', {}).logrotate|default(True) %}
+    type: file
+    {%- else %}
+    type: dailyRollingFile
+    {%- endif %}
+    file: ${path.logs}/${cluster.name}_deprecation.log
+    datePattern: "'.'yyyy-MM-dd"
+    layout:
+      type: pattern
+      conversionPattern: "[%d{ISO8601}][%-5p][%-25c] %m%n"
+
+  index_search_slow_log_file:
+    {%- if server.get('log', {}).logrotate|default(True) %}
+    type: file
+    {%- else %}
+    type: dailyRollingFile
+    {%- endif %}
+    file: ${path.logs}/${cluster.name}_index_search_slowlog.log
+    datePattern: "'.'yyyy-MM-dd"
+    layout:
+      type: pattern
+      conversionPattern: "[%d{ISO8601}][%-5p][%-25c] %m%n"
+
+  index_indexing_slow_log_file:
+    {%- if server.get('log', {}).logrotate|default(True) %}
+    type: file
+    {%- else %}
+    type: dailyRollingFile
+    {%- endif %}
+    file: ${path.logs}/${cluster.name}_index_indexing_slowlog.log
+    datePattern: "'.'yyyy-MM-dd"
+    layout:
+      type: pattern
+      conversionPattern: "[%d{ISO8601}][%-5p][%-25c] %m%n"
+
+{#-
+  vim: syntax=jinja
+-#}
diff --git a/elasticsearch/files/logrotate.conf b/elasticsearch/files/logrotate.conf
new file mode 100644
index 0000000..73ea58d
--- /dev/null
+++ b/elasticsearch/files/logrotate.conf
@@ -0,0 +1,10 @@
+/var/log/elasticsearch/*.log {
+    daily
+    rotate 7
+    copytruncate
+    compress
+    delaycompress
+    missingok
+    notifempty
+    create 664 elasticsearch elasticsearch
+}
diff --git a/elasticsearch/server/init.sls b/elasticsearch/server/init.sls
index 36e9398..14cc9f8 100644
--- a/elasticsearch/server/init.sls
+++ b/elasticsearch/server/init.sls
@@ -27,12 +27,34 @@
   - require:
     - pkg: elasticsearch_packages
 
+elasticsearch_logging:
+  file.managed:
+  - name: /etc/elasticsearch/logging.yml
+  - source: salt://elasticsearch/files/logging.yml
+  - template: jinja
+  - require:
+    - pkg: elasticsearch_packages
+
+{%- if server.get('log', {}).logrotate|default(True) and not
+       salt['file.file_exists' ]('/etc/logrotate.d/elasticsearch') %}
+{#
+  Create logrotate config only if it doesn't already exist to avoid conflict
+  with logrotate formula or possibly package-shipped config
+#}
+elasticsearch_logrotate:
+  file.managed:
+  - name: /etc/logrotate.d/elasticsearch
+  - source: salt://elasticsearch/files/logrotate.conf
+  - template: jinja
+{%- endif %}
+
 elasticsearch_service:
   service.running:
   - enable: true
   - name: {{ server.service }}
   - watch:
     - file: elasticsearch_config
+    - file: elasticsearch_logging
     - file: elasticsearch_default
 
 {%- endif %}