1 {%- from "influxdb/map.jinja" import server with context -%}
2 ### Welcome to the InfluxDB configuration file.
4 # Once every 24 hours InfluxDB will report anonymous data to m.influxdb.com
5 # The data includes raft id (random 8 bytes), os, arch, version, and metadata.
6 # We don't track ip addresses of servers reporting. This is only used
7 # to track the number of instances running and the versions, which
8 # is very helpful for us.
9 # Change this option to true to disable reporting.
10 reporting-disabled = false
12 # we'll try to get the hostname automatically, but if it the os returns something
13 # that isn't resolvable by other servers in the cluster, use this option to
14 # manually set the hostname
15 # hostname = "localhost"
20 ### Controls the parameters for the Raft consensus group that stores metadata
21 ### about the InfluxDB cluster.
24 {%- if server.meta.enabled %}
26 # Where the metadata/raft database is stored
27 dir = "/var/lib/influxdb/meta"
29 retention-autocreate = true
31 # If log messages are printed for the meta service
32 logging-enabled = true
35 # The default duration for leases.
36 lease-duration = "1m0s"
41 {%- if server.get('meta', {}).bind is defined %}
42 bind-address = "{{ server.meta.bind.address }}:{{ server.admin.bind.port }}"
43 http-bind-address = "{{ server.meta.bind.http_address }}:{{ server.admin.bind.http_port }}"
49 ### Controls where the actual shard data for InfluxDB lives and how it is
50 ### flushed from the WAL. "dir" may need to be changed to a suitable place
51 ### for your system, but the WAL settings are an advanced configuration. The
52 ### defaults should work for most systems.
55 {%- if server.data.enabled %}
57 # Controls if this node holds time series data shards in the cluster
60 dir = "/var/lib/influxdb/data"
62 # These are the WAL settings for the storage engine >= 0.9.3
63 wal-dir = "/var/lib/influxdb/wal"
64 wal-logging-enabled = true
65 data-logging-enabled = true
67 # Whether queries should be logged before execution. Very useful for troubleshooting, but will
68 # log any sensitive data contained within a query.
69 # query-log-enabled = true
71 # Settings for the TSM engine
73 # CacheMaxMemorySize is the maximum size a shard's cache can
74 # reach before it starts rejecting writes.
75 # cache-max-memory-size = 524288000
77 # CacheSnapshotMemorySize is the size at which the engine will
78 # snapshot the cache and write it to a TSM file, freeing up memory
79 # cache-snapshot-memory-size = 26214400
81 # CacheSnapshotWriteColdDuration is the length of time at
82 # which the engine will snapshot the cache and write it to
83 # a new TSM file if the shard hasn't received writes or deletes
84 # cache-snapshot-write-cold-duration = "1h"
86 # MinCompactionFileCount is the minimum number of TSM files
87 # that need to exist before a compaction cycle will run
88 # compact-min-file-count = 3
90 # CompactFullWriteColdDuration is the duration at which the engine
91 # will compact all TSM files in a shard if it hasn't received a
93 # compact-full-write-cold-duration = "24h"
95 # MaxPointsPerBlock is the maximum number of points in an encoded
96 # block in a TSM file. Larger numbers may yield better compression
97 # but could incur a performance penalty when querying
98 # max-points-per-block = 1000
107 ### Controls the clustering service configuration.
111 write-timeout = "10s"
112 max-concurrent-queries = 0
114 log-queries-after = "0"
116 max-select-series = 0
117 max-select-buckets = 0
122 ### Controls the enforcement of retention policies for evicting old data.
127 check-interval = "30m"
130 ### [shard-precreation]
132 ### Controls the precreation of shards, so they are available before data arrives.
133 ### Only shards that, after creation, will have both a start- and end-time in the
134 ### future, will ever be created. Shards are never precreated that would be wholly
135 ### or partially in the past.
139 check-interval = "10m"
140 advance-period = "30m"
143 ### Controls the system self-monitoring, statistics and diagnostics.
145 ### The internal database for monitoring data is created automatically if
146 ### if it does not already exist. The target retention within this database
147 ### is called 'monitor' and is also created with a retention period of 7 days
148 ### and a replication factor of 1, if it does not exist. In all cases the
149 ### this retention policy is configured as the default for the database.
152 store-enabled = true # Whether to record statistics internally.
153 store-database = "_internal" # The destination database for recorded statistics
154 store-interval = "10s" # The interval at which to record statistics
159 ### Controls the availability of the built-in, web-based admin interface. If HTTPS is
160 ### enabled for the admin interface, HTTPS must also be enabled on the [http] service.
163 {%- if server.admin.enabled %}
166 bind-address = "{{ server.admin.bind.address }}:{{ server.admin.bind.port }}"
167 {%- if server.admin.get('ssl', {}).get('enabled', False) %}
169 admins-certificate = "{{ server.admin.ssl.cert_file }}"
170 admins-private-key = "{{ server.admin.ssl.key_file }}"
172 https-enabled = false
182 ### Controls how the HTTP endpoints are configured. These are the primary
183 ### mechanism for getting data into and out of InfluxDB.
186 {%- if server.http.enabled %}
189 bind-address = "{{ server.http.bind.address }}:{{ server.http.bind.port }}"
192 write-tracing = false
193 pprof-enabled = false
194 {%- if server.http.get('ssl', {}).get('enabled', False) %}
196 https-certificate = "{{ server.http.ssl.cert_file }}"
197 https-private-key = "{{ server.http.ssl.key_file }}"
199 https-enabled = false
201 max-row-limit = 10000
202 {%- if server.http.output is defined %}
204 {%- for name, output in server.http.output.iteritems() %}
205 { name="{{ name }}", location="{{ output.location }}", timeout="{{ output.timeout|default('10') }}s" },
217 ### Controls the subscriptions, which can be used to fork a copy of all data
218 ### received by the InfluxDB host.
223 # http-timeout = "30s"
224 # write-concurrency = 40
225 # write-buffer-size = 1000
230 ### Controls one or many listeners for Graphite data.
235 # database = "graphite"
236 # bind-address = ":2003"
238 # consistency-level = "one"
240 # These next lines control how batching works. You should have this enabled
241 # otherwise you could get dropped metrics or poor performance. Batching
242 # will buffer points in memory if you have many coming in.
244 # batch-size = 5000 # will flush if this many points get buffered
245 # batch-pending = 10 # number of batches that may be pending in memory
246 # batch-timeout = "1s" # will flush at least this often even if we haven't hit buffer limit
247 # udp-read-buffer = 0 # UDP Read buffer size, 0 means OS default. UDP listener will fail if set above OS max.
249 ### This string joins multiple matching 'measurement' values providing more control over the final measurement name.
252 ### Default tags that will be added to all metrics. These can be overridden at the template level
253 ### or by tags extracted from metric
254 # tags = ["region=us-east", "zone=1c"]
256 ### Each template line requires a template pattern. It can have an optional
257 ### filter before the template and separated by spaces. It can also have optional extra
258 ### tags following the template. Multiple tags should be separated by commas and no spaces
259 ### similar to the line protocol format. There can be only one default template.
261 # "*.app env.service.resource.measurement",
269 ### Controls one or many listeners for collectd data.
278 # These next lines control how batching works. You should have this enabled
279 # otherwise you could get dropped metrics or poor performance. Batching
280 # will buffer points in memory if you have many coming in.
282 # batch-size = 1000 # will flush if this many points get buffered
283 # batch-pending = 5 # number of batches that may be pending in memory
284 # batch-timeout = "1s" # will flush at least this often even if we haven't hit buffer limit
285 # read-buffer = 0 # UDP Read buffer size, 0 means OS default. UDP listener will fail if set above OS max.
290 ### Controls one or many listeners for OpenTSDB data.
295 # bind-address = ":4242"
296 # database = "opentsdb"
297 # retention-policy = ""
298 # consistency-level = "one"
299 # tls-enabled = false
301 # log-point-errors = true # Log an error for every malformed point.
303 # These next lines control how batching works. You should have this enabled
304 # otherwise you could get dropped metrics or poor performance. Only points
305 # metrics received over the telnet protocol undergo batching.
307 # batch-size = 1000 # will flush if this many points get buffered
308 # batch-pending = 5 # number of batches that may be pending in memory
309 # batch-timeout = "1s" # will flush at least this often even if we haven't hit buffer limit
314 ### Controls the listeners for InfluxDB line protocol data via UDP.
317 {%- if server.udp.enabled %}
320 bind-address = "{{ server.udp.bind.address }}:{{ server.udp.bind.port }}"
322 # retention-policy = ""
324 # These next lines control how batching works. You should have this enabled
325 # otherwise you could get dropped metrics or poor performance. Batching
326 # will buffer points in memory if you have many coming in.
328 # batch-size = 1000 # will flush if this many points get buffered
329 # batch-pending = 5 # number of batches that may be pending in memory
330 # batch-timeout = "1s" # will flush at least this often even if we haven't hit buffer limit
331 # read-buffer = 0 # UDP Read buffer size, 0 means OS default. UDP listener will fail if set above OS max.
333 # set the expected UDP payload size; lower values tend to yield better performance, default is max UDP size 65536
334 # udp-payload-size = 65536
335 {%- if server.udp.output is defined %}
337 {%- for name, output in server.udp.output.iteritems() %}
338 { name="{{ name }}", location="{{ output.location }}"{% if output.mtu is defined %}, mtu="{{ output.mtu }}"{% endif %} },
348 ### [continuous_queries]
350 ### Controls how continuous queries are run within InfluxDB.
356 # run-interval = "1s" # interval for how often continuous queries will be checked if they need to run