Merge "Drop Travis CI support"
diff --git a/README.rst b/README.rst
index 2a6883c..b936776 100644
--- a/README.rst
+++ b/README.rst
@@ -735,6 +735,43 @@
token: none
...
+XMPP Encryption
+---------------
+
+Configure encryption of XMPP
+
+Computes nodes
+~~~~~~~~~~~~~~
+
+.. code-block:: yaml
+
+ opencontrail:
+ compute:
+ xmpp:
+ tls:
+ enabled: False
+ auth:
+ enabled: False
+ (optional) cert_file: /etc/contrail/server.pem
+ (optional) key_file: /etc/contrail/privkey.pem
+ (optional) ca_cert_file: /etc/contrail/ca-cert.pem
+
+Control nodes
+~~~~~~~~~~~~~
+
+.. code-block:: yaml
+
+ opencontrail:
+ control:
+ xmpp:
+ tls:
+ enabled: False
+ auth:
+ enabled: False
+ (optional) cert_file: /etc/contrail/server.pem
+ (optional) key_file: /etc/contrail/privkey.pem
+ (optional) ca_cert_file: /etc/contrail/ca-cert.pem
+
Kubernetes support
------------------
@@ -1265,7 +1302,7 @@
- [tenant3, 7]
-If you want to remove all shares from the ip floating pool, define only empty list in
+If you want to remove all shares from the ip floating pool, define only empty list in
list of projects, like this:
.. code-block:: yaml
@@ -1444,37 +1481,3 @@
Trace XMPP requests
http://<compute-node>:8085/Snh_SandeshTraceRequest?x=XmppMessageTrace
-
-
-Documentation and Bugs
-======================
-
-To learn how to install and update salt-formulas, consult the documentation
-available online at:
-
- http://salt-formulas.readthedocs.io/
-
-In the unfortunate event that bugs are discovered, they should be reported to
-the appropriate issue tracker. Use Github issue tracker for specific salt
-formula:
-
- https://github.com/salt-formulas/salt-formula-opencontrail/issues
-
-For feature requests, bug reports or blueprints affecting entire ecosystem,
-use Launchpad salt-formulas project:
-
- https://launchpad.net/salt-formulas
-
-You can also join salt-formulas-users team and subscribe to mailing list:
-
- https://launchpad.net/~salt-formulas-users
-
-Developers wishing to work on the salt-formulas projects should always base
-their work on master branch and submit pull request against specific formula.
-
- https://github.com/salt-formulas/salt-formula-opencontrail
-
-Any questions or feedback is always welcome so feel free to join our IRC
-channel:
-
- #salt-formulas @ irc.freenode.net
diff --git a/_modules/contrail.py b/_modules/contrail.py
index 777c686..c9741e3 100644
--- a/_modules/contrail.py
+++ b/_modules/contrail.py
@@ -702,7 +702,7 @@
vnc_client = _auth(**kwargs)
vrouter_conf_objs = vnc_client._objects_list('global-vrouter-config', detail=True)
for vrouter_conf_obj in vrouter_conf_objs:
- ret[vrouter_conf_obj._display_name] = vrouter_conf_obj.__dict__
+ ret[vrouter_conf_obj.name] = vrouter_conf_obj.__dict__
return ret
diff --git a/_modules/contrail_health.py b/_modules/contrail_health.py
index 750cd8c..0b539c9 100644
--- a/_modules/contrail_health.py
+++ b/_modules/contrail_health.py
@@ -15,6 +15,10 @@
import logging
import os
import subprocess
+import time
+
+# Import Salt Libs
+import salt.utils.http
MODULE_NAME = 'contrail_health'
@@ -78,3 +82,36 @@
'by {0} module.'.format(MODULE_NAME))
return status_map
+
+'''
+ Check status of Contail API service on Virtual IP which is defined by pillars.
+
+ CLI Example:
+
+ .. code-block:: bash
+
+ salt 'ntw01*' contrail_health.get_api_status [wait_for=300] \\
+ [tries=20]
+
+ wait_for
+ Number of seconds how long to wait for API response.
+
+ tries
+ Number of tries. After each unsuccessful try will sleep for \\
+ (wait_for/tries).
+'''
+
+
+def get_api_status(wait_for=180, tries=20):
+ api_host = __pillar__['opencontrail'].get('client', {}).get('api', {}).get('host', {})
+ api_port = __pillar__['opencontrail']['client']['api']['port']
+ for t in range(0, tries):
+ try:
+ data = salt.utils.http.query("http://{0}:{1}".format(api_host, api_port), status=True)
+ except:
+ time.sleep(int(wait_for / tries))
+ continue
+ if data['status'] == 200:
+ return True
+
+ return False
diff --git a/debian/control b/debian/control
index a7de37b..f206b81 100644
--- a/debian/control
+++ b/debian/control
@@ -1,11 +1,11 @@
Source: salt-formula-opencontrail
-Maintainer: Jakub Pavlik <jakub.pavlik@tcpcloud.eu>
+Maintainer: Mirantis Dev <dev@mirantis.com>
Section: admin
Priority: optional
Build-Depends: salt-master, python, python-yaml, debhelper (>= 9),
salt-formula-linux, salt-formula-prometheus
Standards-Version: 3.9.6
-Homepage: https://wiki.openstack.org/wiki/OpenStackSalt
+Homepage: https://www.mirantis.com
Vcs-Browser: https://github.com/openstack/salt-formula-opencontrail
Vcs-Git: https://github.com/openstack/salt-formula-opencontrail.git
diff --git a/debian/copyright b/debian/copyright
index cc96f85..7656ff2 100644
--- a/debian/copyright
+++ b/debian/copyright
@@ -1,12 +1,12 @@
Format: http://www.debian.org/doc/packaging-manuals/copyright-format/1.0/
Upstream-Name: salt-formula-opencontrail
-Upstream-Contact: Jakub Pavlik <jakub.pavlik@tcpcloud.eu>
+Upstream-Contact: Mirantis Dev <dev@mirantis.com>
Source: https://github.com/openstack/salt-formula-opencontrail
Files: *
-Copyright: 2014-2016 tcp cloud
+Copyright: 2014-2019 Mirantis Inc. et al
License: Apache-2.0
- Copyright (C) 2014-2016 tcp cloud
+ Copyright (C) 2014-2019 Mirantis Inc. et al
.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
diff --git a/metadata.yml b/metadata.yml
index ba3b2d8..a20923f 100644
--- a/metadata.yml
+++ b/metadata.yml
@@ -3,6 +3,6 @@
source: "https://github.com/openstack/salt-formula-opencontrail"
dependencies:
- name: linux
- source: "https://github.com/salt-formulas/salt-formula-linux"
+ source: "https://gerrit.mcp.mirantis.com/salt-formulas/linux"
- name: prometheus
- source: "https://github.com/salt-formulas/salt-formula-prometheus"
+ source: "https://gerrit.mcp.mirantis.com/salt-formulas/prometheus"
diff --git a/metadata/service/client/cluster.yml b/metadata/service/client/cluster.yml
index 3cfd124..337e9d0 100644
--- a/metadata/service/client/cluster.yml
+++ b/metadata/service/client/cluster.yml
@@ -6,6 +6,8 @@
opencontrail_identity_port: 35357
opencontrail_identity_version: '2.0'
openstack_control_address: 127.0.0.1
+ opencontrail_admin_password: 'none'
+ opencontrail_admin_user: 'contrail'
opencontrail:
client:
enabled: True
@@ -16,8 +18,8 @@
port: ${_param:opencontrail_identity_port}
version: ${_param:opencontrail_identity_version}
tenant: admin
- user: admin
- password: ${_param:keystone_admin_password}
+ user: ${_param:opencontrail_admin_user}
+ password: ${_param:opencontrail_admin_password}
api:
engine: contrail
host: ${_param:opencontrail_control_address}
diff --git a/metadata/service/client/single.yml b/metadata/service/client/single.yml
index 19f73eb..8d3a509 100644
--- a/metadata/service/client/single.yml
+++ b/metadata/service/client/single.yml
@@ -5,6 +5,8 @@
opencontrail_version: 3.0
opencontrail_identity_port: 35357
opencontrail_identity_version: '2.0'
+ opencontrail_admin_password: 'none'
+ opencontrail_admin_user: 'contrail'
opencontrail:
client:
enabled: True
@@ -15,8 +17,8 @@
port: ${_param:opencontrail_identity_port}
version: ${_param:opencontrail_identity_version}
tenant: admin
- user: admin
- password: ${_param:keystone_admin_password}
+ user: ${_param:opencontrail_admin_user}
+ password: ${_param:opencontrail_admin_password}
api:
engine: contrail
host: ${_param:cluster_local_address}
diff --git a/metadata/service/control/analytics.yml b/metadata/service/control/analytics.yml
index b51a1fe..be71d6d 100644
--- a/metadata/service/control/analytics.yml
+++ b/metadata/service/control/analytics.yml
@@ -7,9 +7,10 @@
opencontrail_version: 2.2
opencontrail_identity_port: 35357
opencontrail_identity_version: '2.0'
- keystone_admin_password: 'none'
+ opencontrail_admin_password: 'none'
+ opencontrail_admin_user: 'contrail'
keystone_service_token: 'none'
- redis_password: 'none'
+ opencontrail_redis_password: 'none'
openstack_region: RegionOne
opencontrail:
common:
@@ -19,7 +20,7 @@
host: ${_param:cluster_vip_address}
port: ${_param:opencontrail_identity_port}
token: '${_param:keystone_service_token}'
- password: '${_param:keystone_admin_password}'
+ password: '${_param:opencontrail_admin_password}'
network:
engine: neutron
host: ${_param:network_vip_address}
@@ -37,7 +38,7 @@
engine: redis
host: ${_param:cluster_local_address}
port: 6379
- password: ${_param:redis_password}
+ password: ${_param:opencontrail_redis_password}
discovery:
host: ${_param:network_vip_address}
data_ttl: 1
@@ -55,8 +56,8 @@
region: ${_param:openstack_region}
host: ${_param:cluster_vip_address}
port: ${_param:opencontrail_identity_port}
- user: admin
- password: '${_param:keystone_admin_password}'
+ user: ${_param:opencontrail_admin_user}
+ password: '${_param:opencontrail_admin_password}'
token: '${_param:keystone_service_token}'
tenant: admin
alarm_gen:
diff --git a/metadata/service/control/cluster.yml b/metadata/service/control/cluster.yml
index d98a19e..8b9eaf7 100644
--- a/metadata/service/control/cluster.yml
+++ b/metadata/service/control/cluster.yml
@@ -7,9 +7,10 @@
opencontrail_version: 2.2
opencontrail_identity_port: 35357
opencontrail_identity_version: '2.0'
- keystone_admin_password: 'none'
+ opencontrail_admin_password: 'none'
+ opencontrail_admin_user: 'contrail'
keystone_service_token: 'none'
- redis_password: 'none'
+ opencontrail_redis_password: 'none'
openstack_region: RegionOne
opencontrail:
common:
@@ -19,7 +20,7 @@
host: ${_param:cluster_vip_address}
port: ${_param:opencontrail_identity_port}
token: '${_param:keystone_service_token}'
- password: '${_param:keystone_admin_password}'
+ password: '${_param:opencontrail_admin_password}'
network:
engine: neutron
host: ${_param:cluster_vip_address}
@@ -58,8 +59,8 @@
region: ${_param:openstack_region}
host: ${_param:cluster_vip_address}
port: ${_param:opencontrail_identity_port}
- user: admin
- password: '${_param:keystone_admin_password}'
+ user: ${_param:opencontrail_admin_user}
+ password: '${_param:opencontrail_admin_password}'
token: '${_param:keystone_service_token}'
tenant: admin
members:
@@ -106,7 +107,7 @@
engine: redis
host: ${_param:cluster_local_address}
port: 6379
- password: ${_param:redis_password}
+ password: ${_param:opencontrail_redis_password}
discovery:
host: ${_param:cluster_vip_address}
data_ttl: 1
@@ -124,8 +125,8 @@
region: RegionOne
host: ${_param:cluster_vip_address}
port: ${_param:opencontrail_identity_port}
- user: admin
- password: '${_param:keystone_admin_password}'
+ user: ${_param:opencontrail_admin_user}
+ password: '${_param:opencontrail_admin_password}'
token: '${_param:keystone_service_token}'
tenant: admin
alarm_gen:
@@ -173,7 +174,7 @@
engine: redis
host: ${_param:cluster_local_address}
port: 6379
- password: ${_param:redis_password}
+ password: ${_param:opencontrail_redis_password}
members:
- host: ${_param:cluster_node01_address}
id: 1
@@ -189,8 +190,8 @@
version: ${_param:opencontrail_identity_version}
host: ${_param:cluster_vip_address}
port: ${_param:opencontrail_identity_port}
- user: admin
- password: '${_param:keystone_admin_password}'
+ user: ${_param:opencontrail_admin_user}
+ password: '${_param:opencontrail_admin_password}'
token: '${_param:keystone_service_token}'
tenant: admin
network:
diff --git a/metadata/service/control/container.yml b/metadata/service/control/container.yml
index 609a448..3d17d2d 100644
--- a/metadata/service/control/container.yml
+++ b/metadata/service/control/container.yml
@@ -40,8 +40,8 @@
region: RegionOne
host: ${_param:keystone_service_host}
port: 35357
- user: admin
- password: '${_param:keystone_admin_password}'
+ user: ${_param:opencontrail_admin_user}
+ password: '${_param:opencontrail_admin_password}'
token: '${_param:keystone_service_token}'
tenant: admin
opencontrail-collector:
@@ -99,8 +99,8 @@
version: '2.0'
host: ${_param:keystone_service_host}
port: 35357
- user: admin
- password: '${_param:keystone_admin_password}'
+ user: ${_param:opencontrail_admin_user}
+ password: '${_param:opencontrail_admin_password}'
token: '${_param:keystone_service_token}'
tenant: admin
network:
diff --git a/metadata/service/control/control.yml b/metadata/service/control/control.yml
index d033ccf..728e53a 100644
--- a/metadata/service/control/control.yml
+++ b/metadata/service/control/control.yml
@@ -7,10 +7,11 @@
opencontrail_version: 2.2
opencontrail_identity_port: 35357
opencontrail_identity_version: '2.0'
- keystone_admin_password: 'none'
+ opencontrail_admin_password: 'none'
+ opencontrail_admin_user: 'contrail'
keystone_service_token: 'none'
openstack_region: RegionOne
- redis_password: 'none'
+ opencontrail_redis_password: 'none'
opencontrail:
common:
version: ${_param:opencontrail_version}
@@ -19,7 +20,7 @@
host: ${_param:cluster_vip_address}
port: ${_param:opencontrail_identity_port}
token: '${_param:keystone_service_token}'
- password: '${_param:keystone_admin_password}'
+ password: '${_param:opencontrail_admin_password}'
network:
engine: neutron
host: ${_param:cluster_vip_address}
@@ -58,8 +59,8 @@
region: ${_param:openstack_region}
host: ${_param:cluster_vip_address}
port: ${_param:opencontrail_identity_port}
- user: admin
- password: '${_param:keystone_admin_password}'
+ user: ${_param:opencontrail_admin_user}
+ password: '${_param:opencontrail_admin_password}'
token: '${_param:keystone_service_token}'
tenant: admin
members:
@@ -133,7 +134,7 @@
cache:
engine: redis
host: ${_param:cluster_local_address}
- password: ${_param:redis_password}
+ password: ${_param:opencontrail_redis_password}
port: 6379
members:
- host: ${_param:cluster_node01_address}
@@ -151,8 +152,8 @@
region: RegionOne
host: ${_param:cluster_vip_address}
port: ${_param:opencontrail_identity_port}
- user: admin
- password: '${_param:keystone_admin_password}'
+ user: ${_param:opencontrail_admin_user}
+ password: '${_param:opencontrail_admin_password}'
token: '${_param:keystone_service_token}'
tenant: admin
network:
diff --git a/metadata/service/control/single.yml b/metadata/service/control/single.yml
index a63f28a..73ce29b 100644
--- a/metadata/service/control/single.yml
+++ b/metadata/service/control/single.yml
@@ -7,10 +7,11 @@
opencontrail_version: 2.2
opencontrail_identity_port: 35357
opencontrail_identity_version: '2.0'
- keystone_admin_password: 'none'
+ opencontrail_admin_password: 'none'
+ opencontrail_admin_user: 'contrail'
keystone_service_token: 'none'
openstack_region: RegionOne
- redis_password: 'none'
+ opencontrail_redis_password: 'none'
opencontrail:
common:
version: ${_param:opencontrail_version}
@@ -19,7 +20,7 @@
host: ${_param:single_address}
port: ${_param:opencontrail_identity_port}
token: '${_param:keystone_service_token}'
- password: '${_param:keystone_admin_password}'
+ password: '${_param:opencontrail_admin_password}'
network:
engine: neutron
host: ${_param:single_address}
@@ -58,8 +59,8 @@
region: ${_param:openstack_region}
host: ${_param:single_address}
port: ${_param:opencontrail_identity_port}
- user: admin
- password: '${_param:keystone_admin_password}'
+ user: ${_param:opencontrail_admin_user}
+ password: '${_param:opencontrail_admin_password}'
token: '${_param:keystone_service_token}'
tenant: admin
members:
@@ -90,7 +91,7 @@
engine: redis
host: ${_param:single_address}
port: 6379
- password: ${_param:redis_password}
+ password: ${_param:opencontrail_redis_password}
discovery:
host: ${_param:single_address}
data_ttl: 1
@@ -104,8 +105,8 @@
region: RegionOne
host: ${_param:single_address}
port: ${_param:opencontrail_identity_port}
- user: admin
- password: '${_param:keystone_admin_password}'
+ user: ${_param:opencontrail_admin_user}
+ password: '${_param:opencontrail_admin_password}'
token: '${_param:keystone_service_token}'
tenant: admin
alarm_gen:
@@ -147,7 +148,7 @@
engine: redis
host: ${_param:single_address}
port: 6379
- password: ${_param:redis_password}
+ password: ${_param:opencontrail_redis_password}
members:
- host: ${_param:single_address}
id: 1
@@ -159,8 +160,8 @@
version: ${_param:opencontrail_identity_version}
host: ${_param:single_address}
port: ${_param:opencontrail_identity_port}
- user: admin
- password: '${_param:keystone_admin_password}'
+ user: ${_param:opencontrail_admin_user}
+ password: '${_param:opencontrail_admin_password}'
token: '${_param:keystone_service_token}'
tenant: admin
network:
diff --git a/metadata/service/test/single.yml b/metadata/service/test/single.yml
index dcd37d9..a2d101e 100644
--- a/metadata/service/test/single.yml
+++ b/metadata/service/test/single.yml
@@ -3,6 +3,8 @@
parameters:
_param:
opencontrail_identity_port: 35357
+ opencontrail_admin_password: 'none'
+ opencontrail_admin_user: 'contrail'
opencontrail:
test:
enabled: True
@@ -33,9 +35,9 @@
service_token: ${_param:keystone_service_token}
service_tenant: service
admin_tenant: admin
- admin_name: admin
- admin_password: ${_param:keystone_admin_password}
- password: ${_param:keystone_admin_password}
+ admin_name: ${_param:opencontrail_admin_user}
+ admin_password: ${_param:opencontrail_admin_password}
+ password: ${_param:opencontrail_admin_password}
cacert: '/etc/ssl/certs/ca-certificates.crt'
bind:
address: 0.0.0.0
diff --git a/opencontrail/collector.sls b/opencontrail/collector.sls
index 107ee5a..63f7d86 100644
--- a/opencontrail/collector.sls
+++ b/opencontrail/collector.sls
@@ -82,9 +82,8 @@
docker-compose-contrail-collector-env:
file.managed:
- name: /etc/docker/compose/opencontrail/contrail.env
- - contents:
- - "CONTRAIL_UID={{ salt['user.info']('contrail').get('uid', 0) }}"
- - "CONTRAIL_GID={{ salt['user.info']('contrail').get('gid', 0) }}"
+ - source: salt://opencontrail/files/{{ collector.version }}/contrail.env
+ - template: jinja
- makedirs: true
- require:
- user: user_contrail_collector
diff --git a/opencontrail/common.sls b/opencontrail/common.sls
index 76d2ec9..009ab26 100644
--- a/opencontrail/common.sls
+++ b/opencontrail/common.sls
@@ -97,39 +97,3 @@
/etc/contrail:
file.directory
-
-{%- if common.identity.engine == "keystone" %}
-/etc/contrail/ctrl-details:
- file.managed:
- - source: salt://opencontrail/files/{{ common.version }}/ctrl-details
- - template: jinja
- - require:
- - file: /etc/contrail
-
-{%- if common.version < 3.0 %}
-/etc/contrail/openstackrc:
- file.managed:
- - source: salt://opencontrail/files/{{ common.version }}/openstackrc
- - template: jinja
- - require:
- - file: /etc/contrail
-
-/etc/contrail/keystonerc:
- file.managed:
- - source: salt://opencontrail/files/{{ common.version }}/keystonerc
- - template: jinja
- - require:
- - file: /etc/contrail
-{%- endif %}
-
-{%- endif %}
-
-{%- if common.version == 4.0 and common.contrainer_folders is defined %}
-{%- for dir in common.contrainer_folders %}
-{{ dir }}:
- file.directory:
- - user: root
- - group: root
- - makedirs: true
-{%- endfor %}
-{%- endif %}
diff --git a/opencontrail/control.sls b/opencontrail/control.sls
index 1edfa54..36c1a9f 100644
--- a/opencontrail/control.sls
+++ b/opencontrail/control.sls
@@ -73,9 +73,8 @@
docker-compose-contrail-control-env:
file.managed:
- name: /etc/docker/compose/opencontrail/contrail.env
- - contents:
- - "CONTRAIL_UID={{ salt['user.info']('contrail').get('uid', 0) }}"
- - "CONTRAIL_GID={{ salt['user.info']('contrail').get('gid', 0) }}"
+ - source: salt://opencontrail/files/{{ control.version }}/contrail.env
+ - template: jinja
- makedirs: true
- require:
- user: user_contrail
diff --git a/opencontrail/database.sls b/opencontrail/database.sls
index 89c79e2..ecdc004 100644
--- a/opencontrail/database.sls
+++ b/opencontrail/database.sls
@@ -4,7 +4,7 @@
include:
- opencontrail.common
-{% if database.cassandra.version == 1 %}
+{% if database.cassandra_version == 1 %}
{{ database.cassandra_config }}cassandra.yaml:
file.managed:
@@ -29,7 +29,7 @@
{{ database.cassandra_config }}cassandra.yaml:
file.managed:
- - source: salt://opencontrail/files/{{ database.version }}/cassandra.yaml
+ - source: salt://opencontrail/files/cassandra/{{ database.cassandra_version }}/cassandra.yaml
- template: jinja
- makedirs: True
{% if grains.os_family == "RedHat" %}
@@ -41,7 +41,7 @@
{{ database.cassandra_config }}cassandra_analytics.yaml:
file.managed:
- - source: salt://opencontrail/files/{{ database.version }}/cassandra_analytics.yaml
+ - source: salt://opencontrail/files/cassandra/{{ database.cassandra_version }}/cassandra_analytics.yaml
- template: jinja
- makedirs: True
{% if grains.os_family == "RedHat" %}
@@ -49,15 +49,23 @@
- pkg: opencontrail_database_packages
{% endif %}
+{% if database.version >= 4.1 %}
+/etc/kafka/consumer.properties:
+{%- else %}
/usr/share/kafka/config/consumer.properties:
+{% endif %}
file.managed:
- - source: salt://opencontrail/files/{{ database.version }}/consumer.properties
+ - source: salt://opencontrail/files/kafka/{{ database.kafka_version }}/consumer.properties
- template: jinja
- makedirs: true
+{% if database.version >= 4.1 %}
+/etc/kafka/zookeeper.properties:
+{%- else %}
/usr/share/kafka/config/zookeeper.properties:
+{% endif %}
file.managed:
- - source: salt://opencontrail/files/{{ database.version }}/zookeeper.properties
+ - source: salt://opencontrail/files/kafka/{{ database.kafka_version }}/zookeeper.properties
- template: jinja
- makedirs: true
@@ -69,12 +77,25 @@
docker-compose-contrail-database-env:
file.managed:
- name: /etc/docker/compose/opencontrail/contrail.env
- - contents:
- - "CONTRAIL_UID={{ salt['user.info']('contrail').get('uid', 0) }}"
- - "CONTRAIL_GID={{ salt['user.info']('contrail').get('gid', 0) }}"
+ - source: salt://opencontrail/files/{{ database.version }}/contrail.env
+ - template: jinja
- makedirs: true
- require:
- user: user_contrail_database
+
+{% if database.version >= 4 %}
+{% for logdir in [database.get('configdb_log_dir'), database.get('analyticsdb_log_dir')] %}
+{% if logdir %}
+{{ logdir }}/cassandra:
+ file.directory:
+ - makedirs: True
+{{ logdir }}/zookeeper:
+ file.directory:
+ - makedirs: True
+{% endif %}
+{% endfor %}
+{% endif %}
+
{% endif %}
{{ database.cassandra_config }}logback.xml:
@@ -89,7 +110,7 @@
{{ database.cassandra_config }}cassandra-env.sh:
file.managed:
- - source: salt://opencontrail/files/{{ database.version }}/database/cassandra-env.sh
+ - source: salt://opencontrail/files/cassandra/{{ database.cassandra_version }}/cassandra-env.sh
- template: jinja
- makedirs: True
{% if grains.os_family == "RedHat" %}
@@ -100,7 +121,7 @@
{% if database.version >= 4.0 %}
{{ database.cassandra_config }}cassandra-env-analytics.sh:
file.managed:
- - source: salt://opencontrail/files/{{ database.version }}/database/cassandra-env-analytics.sh
+ - source: salt://opencontrail/files/cassandra/{{ database.cassandra_version }}/cassandra-env-analytics.sh
- template: jinja
- makedirs: True
{% if grains.os_family == "RedHat" %}
@@ -132,7 +153,11 @@
- /etc/zookeeper/conf/zoo.cfg
- /etc/default/zookeeper
{%- if database.version >= 3.0 %}
+ {%- if database.version < 4.1 %}
- /usr/share/kafka/config/server.properties
+ {%- else %}
+ - /etc/kafka/server.properties
+ {%- endif %}
{%- if database.version < 4.0 or grains.get('init') != 'systemd' %}
- /etc/contrail/supervisord_database_files/contrail-database-nodemgr.ini
{%- endif %}
@@ -188,9 +213,13 @@
{%- if database.version >= 3.0 %}
+{%- if database.version < 4.1 %}
/usr/share/kafka/config/server.properties:
+{%- else %}
+/etc/kafka/server.properties:
+{%- endif %}
file.managed:
- - source: salt://opencontrail/files/{{ database.version }}/server.properties
+ - source: salt://opencontrail/files/kafka/{{ database.kafka_version }}/server.properties
- template: jinja
- makedirs: true
@@ -263,7 +292,11 @@
- file: /etc/zookeeper/conf/log4j.properties
- file: /var/lib/cassandra/data
- file: /etc/contrail/supervisord_database_files/contrail-database-nodemgr.ini
+ {%- if database.version < 4.1 %}
- file: /usr/share/kafka/config/server.properties
+ {%- else %}
+ - file: /etc/kafka/server.properties
+ {%- endif %}
opencontrail_zookeeper_service:
service.running:
@@ -277,7 +310,11 @@
- file: /etc/default/zookeeper
- file: /etc/zookeeper/conf/log4j.properties
- file: /etc/contrail/supervisord_database_files/contrail-database-nodemgr.ini
+ {%- if database.version < 4.1 %}
- file: /usr/share/kafka/config/server.properties
+ {%- else %}
+ - file: /etc/kafka/server.properties
+ {%- endif %}
{%- if grains.get('virtual_subtype', None) == "Docker" %}
@@ -315,7 +352,11 @@
- file: /etc/contrail/contrail-database-nodemgr.conf
- file: /var/lib/zookeeper/myid
- file: /etc/zookeeper/conf/log4j.properties
+ {%- if database.version < 4.1 %}
- file: /usr/share/kafka/config/server.properties
+ {%- else %}
+ - file: /etc/kafka/server.properties
+ {%- endif %}
{%- endif %}
{%- endif %}
diff --git a/opencontrail/files/2.2/contrail-vrouter-agent.conf b/opencontrail/files/2.2/contrail-vrouter-agent.conf
index 0aa2672..1c306d5 100644
--- a/opencontrail/files/2.2/contrail-vrouter-agent.conf
+++ b/opencontrail/files/2.2/contrail-vrouter-agent.conf
@@ -115,6 +115,9 @@
# Maximum flows allowed per VM (given as % of maximum system flows)
# max_vm_flows=100
+{%- if compute.max_vm_flows is defined %}
+max_vm_flows={{ compute.max_vm_flows }}
+{%- endif %}
# Maximum number of link-local flows allowed across all VMs
# max_system_linklocal_flows=4096
# Maximum number of link-local flows allowed per VM
diff --git a/opencontrail/files/3.0/contrail-control.conf b/opencontrail/files/3.0/contrail-control.conf
index cfcc314..1889a70 100644
--- a/opencontrail/files/3.0/contrail-control.conf
+++ b/opencontrail/files/3.0/contrail-control.conf
@@ -10,7 +10,7 @@
# bgp_port=179
# collectors= # Provided by discovery server
hostip={{ control.bind.address }} # Resolved IP of `hostname`
- #hostname=ctl-oc-1 # Retrieved as `hostname`
+ #hostname=ctl-oc-1 # Retrieved as `hostname`
{%- if control.name is defined %}
hostname={{ control.name }}
{%- else %}
@@ -25,11 +25,11 @@
log_level=SYS_NOTICE
log_local=1
# test_mode=0
-{%- if pillar.get('salt', {}).get('minion', {}).get('cert', {}).opencontrail_xmpp is defined %}
-xmpp_auth_enable=true
-xmpp_server_cert={{ pillar.salt.minion.cert.opencontrail_xmpp.get('cert_file', '/etc/contrail/ssl/certs/server.pem') }}
-xmpp_server_key={{ pillar.salt.minion.cert.opencontrail_xmpp.get('key_file', '/etc/contrail/ssl/private/server-privkey.pem') }}
-xmpp_ca_cert={{ pillar.salt.minion.cert.opencontrail_xmpp.get('ca_file', '/etc/contrail/ssl/certs/ca-cert.pem') }}
+{%- if control.xmpp.tls.enabled %}
+xmpp_auth_enable = {{ control.xmpp.tls.auth.enabled }}
+xmpp_server_cert = {{ control.xmpp.tls.cert_file }}
+xmpp_server_key = {{ control.xmpp.tls.key_file }}
+xmpp_ca_cert = {{ control.xmpp.tls.ca_cert_file }}
{%- endif %}
[DISCOVERY]
diff --git a/opencontrail/files/3.0/contrail-vrouter-agent.conf b/opencontrail/files/3.0/contrail-vrouter-agent.conf
index b7e861e..9170aee 100644
--- a/opencontrail/files/3.0/contrail-vrouter-agent.conf
+++ b/opencontrail/files/3.0/contrail-vrouter-agent.conf
@@ -91,13 +91,12 @@
gateway_mode={{ compute.gateway_mode }}
{%- endif %}
-{%- if pillar.get('salt', {}).get('minion', {}).get('cert', {}).opencontrail_xmpp is defined %}
-xmpp_auth_enable=true
-xmpp_server_cert={{ pillar.salt.minion.cert.opencontrail_xmpp.get('cert_file', '/etc/contrail/ssl/certs/server.pem') }}
-xmpp_server_key={{ pillar.salt.minion.cert.opencontrail_xmpp.get('key_file', '/etc/contrail/ssl/private/server-privkey.pem') }}
-xmpp_ca_cert={{ pillar.salt.minion.cert.opencontrail_xmpp.get('ca_file', '/etc/contrail/ssl/certs/ca-cert.pem') }}
+{%- if compute.xmpp.tls.enabled %}
+xmpp_auth_enable = {{ compute.xmpp.tls.auth.enabled }}
+xmpp_server_cert = {{ compute.xmpp.tls.cert_file }}
+xmpp_server_key = {{ compute.xmpp.tls.key_file }}
+xmpp_ca_cert = {{ compute.xmpp.tls.ca_cert_file }}
{%- endif %}
-
[DISCOVERY]
# If COLLECTOR and/or CONTROL-NODE and/or DNS is not specified this section is
# mandatory. Else this section is optional
@@ -162,6 +161,9 @@
# Maximum flows allowed per VM (given as % of maximum system flows)
# max_vm_flows=100
+{%- if compute.max_vm_flows is defined %}
+max_vm_flows={{ compute.max_vm_flows }}
+{%- endif %}
# Maximum number of link-local flows allowed across all VMs
# max_system_linklocal_flows=4096
# Maximum number of link-local flows allowed per VM
@@ -208,7 +210,11 @@
# this only if vhost interface is un-numbered in host-os. Agent will use one
# of the compute_node_address to run services that need IP Address in host-os
# (like metadata...)
-#compute_node_address = 10.204.216.28
+{%- if compute.bind is defined %}
+compute_node_address={{ compute.bind.address }}
+{%- else %}
+compute_node_address={{ compute.interface.address }}
+{%- endif %}
# We can have multiple gateway sections with different indices in the
# following format
diff --git a/opencontrail/files/3.0/redis_webui.conf b/opencontrail/files/3.0/redis_webui.conf
new file mode 100644
index 0000000..8d2f848
--- /dev/null
+++ b/opencontrail/files/3.0/redis_webui.conf
@@ -0,0 +1,707 @@
+# Redis configuration file example
+
+# Note on units: when memory size is needed, it is possible to specify
+# it in the usual form of 1k 5GB 4M and so forth:
+#
+# 1k => 1000 bytes
+# 1kb => 1024 bytes
+# 1m => 1000000 bytes
+# 1mb => 1024*1024 bytes
+# 1g => 1000000000 bytes
+# 1gb => 1024*1024*1024 bytes
+#
+# units are case insensitive so 1GB 1Gb 1gB are all the same.
+
+################################## INCLUDES ###################################
+
+# Include one or more other config files here. This is useful if you
+# have a standard template that goes to all Redis server but also need
+# to customize a few per-server settings. Include files can include
+# other files, so use this wisely.
+#
+# Notice option "include" won't be rewritten by command "CONFIG REWRITE"
+# from admin or Redis Sentinel. Since Redis always uses the last processed
+# line as value of a configuration directive, you'd better put includes
+# at the beginning of this file to avoid overwriting config change at runtime.
+#
+# If instead you are interested in using includes to override configuration
+# options, it is better to use include as the last line.
+#
+# include /path/to/local.conf
+# include /path/to/other.conf
+
+################################ GENERAL #####################################
+
+# By default Redis does not run as a daemon. Use 'yes' if you need it.
+# Note that Redis will write a pid file in /var/run/redis.pid when daemonized.
+daemonize yes
+
+# When running daemonized, Redis writes a pid file in /var/run/redis.pid by
+# default. You can specify a custom pid file location here.
+pidfile /var/run/redis/redis-server.pid
+
+# Accept connections on the specified port, default is 6379.
+# If port 0 is specified Redis will not listen on a TCP socket.
+port 6379
+
+# By default Redis listens for connections from all the network interfaces
+# available on the server. It is possible to listen to just one or multiple
+# interfaces using the "bind" configuration directive, followed by one or
+# more IP addresses.
+#
+# Examples:
+#
+# bind 192.168.1.100 10.0.0.1
+bind 127.0.0.1
+
+# Specify the path for the unix socket that will be used to listen for
+# incoming connections. There is no default, so Redis will not listen
+# on a unix socket when not specified.
+#
+# unixsocket /var/run/redis/redis.sock
+# unixsocketperm 755
+
+# Close the connection after a client is idle for N seconds (0 to disable)
+timeout 0
+
+# TCP keepalive.
+#
+# If non-zero, use SO_KEEPALIVE to send TCP ACKs to clients in absence
+# of communication. This is useful for two reasons:
+#
+# 1) Detect dead peers.
+# 2) Take the connection alive from the point of view of network
+# equipment in the middle.
+#
+# On Linux, the specified value (in seconds) is the period used to send ACKs.
+# Note that to close the connection the double of the time is needed.
+# On other kernels the period depends on the kernel configuration.
+#
+# A reasonable value for this option is 60 seconds.
+tcp-keepalive 0
+
+# Specify the server verbosity level.
+# This can be one of:
+# debug (a lot of information, useful for development/testing)
+# verbose (many rarely useful info, but not a mess like the debug level)
+# notice (moderately verbose, what you want in production probably)
+# warning (only very important / critical messages are logged)
+loglevel notice
+
+# Specify the log file name. Also the empty string can be used to force
+# Redis to log on the standard output. Note that if you use standard
+# output for logging but daemonize, logs will be sent to /dev/null
+logfile /var/log/redis/redis-server.log
+
+# To enable logging to the system logger, just set 'syslog-enabled' to yes,
+# and optionally update the other syslog parameters to suit your needs.
+# syslog-enabled no
+
+# Specify the syslog identity.
+# syslog-ident redis
+
+# Specify the syslog facility. Must be USER or between LOCAL0-LOCAL7.
+# syslog-facility local0
+
+# Set the number of databases. The default database is DB 0, you can select
+# a different one on a per-connection basis using SELECT <dbid> where
+# dbid is a number between 0 and 'databases'-1
+databases 16
+
+################################ SNAPSHOTTING ################################
+#
+# Save the DB on disk:
+#
+# save <seconds> <changes>
+#
+# Will save the DB if both the given number of seconds and the given
+# number of write operations against the DB occurred.
+#
+# In the example below the behaviour will be to save:
+# after 900 sec (15 min) if at least 1 key changed
+# after 300 sec (5 min) if at least 10 keys changed
+# after 60 sec if at least 10000 keys changed
+#
+# Note: you can disable saving at all commenting all the "save" lines.
+#
+# It is also possible to remove all the previously configured save
+# points by adding a save directive with a single empty string argument
+# like in the following example:
+#
+# save ""
+
+#save 900 1
+#save 300 10
+#save 60 10000
+
+# By default Redis will stop accepting writes if RDB snapshots are enabled
+# (at least one save point) and the latest background save failed.
+# This will make the user aware (in a hard way) that data is not persisting
+# on disk properly, otherwise chances are that no one will notice and some
+# disaster will happen.
+#
+# If the background saving process will start working again Redis will
+# automatically allow writes again.
+#
+# However if you have setup your proper monitoring of the Redis server
+# and persistence, you may want to disable this feature so that Redis will
+# continue to work as usual even if there are problems with disk,
+# permissions, and so forth.
+stop-writes-on-bgsave-error yes
+
+# Compress string objects using LZF when dump .rdb databases?
+# For default that's set to 'yes' as it's almost always a win.
+# If you want to save some CPU in the saving child set it to 'no' but
+# the dataset will likely be bigger if you have compressible values or keys.
+rdbcompression yes
+
+# Since version 5 of RDB a CRC64 checksum is placed at the end of the file.
+# This makes the format more resistant to corruption but there is a performance
+# hit to pay (around 10%) when saving and loading RDB files, so you can disable it
+# for maximum performances.
+#
+# RDB files created with checksum disabled have a checksum of zero that will
+# tell the loading code to skip the check.
+rdbchecksum yes
+
+# The filename where to dump the DB
+#dbfilename dump.rdb
+
+# The working directory.
+#
+# The DB will be written inside this directory, with the filename specified
+# above using the 'dbfilename' configuration directive.
+#
+# The Append Only File will also be created inside this directory.
+#
+# Note that you must specify a directory here, not a file name.
+dir /var/lib/redis
+
+################################# REPLICATION #################################
+
+# Master-Slave replication. Use slaveof to make a Redis instance a copy of
+# another Redis server. Note that the configuration is local to the slave
+# so for example it is possible to configure the slave to save the DB with a
+# different interval, or to listen to another port, and so on.
+#
+# slaveof <masterip> <masterport>
+
+# If the master is password protected (using the "requirepass" configuration
+# directive below) it is possible to tell the slave to authenticate before
+# starting the replication synchronization process, otherwise the master will
+# refuse the slave request.
+#
+# masterauth <master-password>
+
+# When a slave loses its connection with the master, or when the replication
+# is still in progress, the slave can act in two different ways:
+#
+# 1) if slave-serve-stale-data is set to 'yes' (the default) the slave will
+# still reply to client requests, possibly with out of date data, or the
+# data set may just be empty if this is the first synchronization.
+#
+# 2) if slave-serve-stale-data is set to 'no' the slave will reply with
+# an error "SYNC with master in progress" to all the kind of commands
+# but to INFO and SLAVEOF.
+#
+slave-serve-stale-data yes
+
+# You can configure a slave instance to accept writes or not. Writing against
+# a slave instance may be useful to store some ephemeral data (because data
+# written on a slave will be easily deleted after resync with the master) but
+# may also cause problems if clients are writing to it because of a
+# misconfiguration.
+#
+# Since Redis 2.6 by default slaves are read-only.
+#
+# Note: read only slaves are not designed to be exposed to untrusted clients
+# on the internet. It's just a protection layer against misuse of the instance.
+# Still a read only slave exports by default all the administrative commands
+# such as CONFIG, DEBUG, and so forth. To a limited extent you can improve
+# security of read only slaves using 'rename-command' to shadow all the
+# administrative / dangerous commands.
+slave-read-only yes
+
+# Slaves send PINGs to server in a predefined interval. It's possible to change
+# this interval with the repl_ping_slave_period option. The default value is 10
+# seconds.
+#
+# repl-ping-slave-period 10
+
+# The following option sets the replication timeout for:
+#
+# 1) Bulk transfer I/O during SYNC, from the point of view of slave.
+# 2) Master timeout from the point of view of slaves (data, pings).
+# 3) Slave timeout from the point of view of masters (REPLCONF ACK pings).
+#
+# It is important to make sure that this value is greater than the value
+# specified for repl-ping-slave-period otherwise a timeout will be detected
+# every time there is low traffic between the master and the slave.
+#
+# repl-timeout 60
+
+# Disable TCP_NODELAY on the slave socket after SYNC?
+#
+# If you select "yes" Redis will use a smaller number of TCP packets and
+# less bandwidth to send data to slaves. But this can add a delay for
+# the data to appear on the slave side, up to 40 milliseconds with
+# Linux kernels using a default configuration.
+#
+# If you select "no" the delay for data to appear on the slave side will
+# be reduced but more bandwidth will be used for replication.
+#
+# By default we optimize for low latency, but in very high traffic conditions
+# or when the master and slaves are many hops away, turning this to "yes" may
+# be a good idea.
+repl-disable-tcp-nodelay no
+
+# Set the replication backlog size. The backlog is a buffer that accumulates
+# slave data when slaves are disconnected for some time, so that when a slave
+# wants to reconnect again, often a full resync is not needed, but a partial
+# resync is enough, just passing the portion of data the slave missed while
+# disconnected.
+#
+# The biggest the replication backlog, the longer the time the slave can be
+# disconnected and later be able to perform a partial resynchronization.
+#
+# The backlog is only allocated once there is at least a slave connected.
+#
+# repl-backlog-size 1mb
+
+# After a master has no longer connected slaves for some time, the backlog
+# will be freed. The following option configures the amount of seconds that
+# need to elapse, starting from the time the last slave disconnected, for
+# the backlog buffer to be freed.
+#
+# A value of 0 means to never release the backlog.
+#
+# repl-backlog-ttl 3600
+
+# The slave priority is an integer number published by Redis in the INFO output.
+# It is used by Redis Sentinel in order to select a slave to promote into a
+# master if the master is no longer working correctly.
+#
+# A slave with a low priority number is considered better for promotion, so
+# for instance if there are three slaves with priority 10, 100, 25 Sentinel will
+# pick the one with priority 10, that is the lowest.
+#
+# However a special priority of 0 marks the slave as not able to perform the
+# role of master, so a slave with priority of 0 will never be selected by
+# Redis Sentinel for promotion.
+#
+# By default the priority is 100.
+slave-priority 100
+
+# It is possible for a master to stop accepting writes if there are less than
+# N slaves connected, having a lag less or equal than M seconds.
+#
+# The N slaves need to be in "online" state.
+#
+# The lag in seconds, that must be <= the specified value, is calculated from
+# the last ping received from the slave, that is usually sent every second.
+#
+# This option does not GUARANTEES that N replicas will accept the write, but
+# will limit the window of exposure for lost writes in case not enough slaves
+# are available, to the specified number of seconds.
+#
+# For example to require at least 3 slaves with a lag <= 10 seconds use:
+#
+# min-slaves-to-write 3
+# min-slaves-max-lag 10
+#
+# Setting one or the other to 0 disables the feature.
+#
+# By default min-slaves-to-write is set to 0 (feature disabled) and
+# min-slaves-max-lag is set to 10.
+
+################################## SECURITY ###################################
+
+# Require clients to issue AUTH <PASSWORD> before processing any other
+# commands. This might be useful in environments in which you do not trust
+# others with access to the host running redis-server.
+#
+# This should stay commented out for backward compatibility and because most
+# people do not need auth (e.g. they run their own servers).
+#
+# Warning: since Redis is pretty fast an outside user can try up to
+# 150k passwords per second against a good box. This means that you should
+# use a very strong password otherwise it will be very easy to break.
+#
+# requirepass foobared
+
+# Command renaming.
+#
+# It is possible to change the name of dangerous commands in a shared
+# environment. For instance the CONFIG command may be renamed into something
+# hard to guess so that it will still be available for internal-use tools
+# but not available for general clients.
+#
+# Example:
+#
+# rename-command CONFIG b840fc02d524045429941cc15f59e41cb7be6c52
+#
+# It is also possible to completely kill a command by renaming it into
+# an empty string:
+#
+# rename-command CONFIG ""
+#
+# Please note that changing the name of commands that are logged into the
+# AOF file or transmitted to slaves may cause problems.
+
+################################### LIMITS ####################################
+
+# Set the max number of connected clients at the same time. By default
+# this limit is set to 10000 clients, however if the Redis server is not
+# able to configure the process file limit to allow for the specified limit
+# the max number of allowed clients is set to the current file limit
+# minus 32 (as Redis reserves a few file descriptors for internal uses).
+#
+# Once the limit is reached Redis will close all the new connections sending
+# an error 'max number of clients reached'.
+#
+# maxclients 10000
+
+# Don't use more memory than the specified amount of bytes.
+# When the memory limit is reached Redis will try to remove keys
+# according to the eviction policy selected (see maxmemory-policy).
+#
+# If Redis can't remove keys according to the policy, or if the policy is
+# set to 'noeviction', Redis will start to reply with errors to commands
+# that would use more memory, like SET, LPUSH, and so on, and will continue
+# to reply to read-only commands like GET.
+#
+# This option is usually useful when using Redis as an LRU cache, or to set
+# a hard memory limit for an instance (using the 'noeviction' policy).
+#
+# WARNING: If you have slaves attached to an instance with maxmemory on,
+# the size of the output buffers needed to feed the slaves are subtracted
+# from the used memory count, so that network problems / resyncs will
+# not trigger a loop where keys are evicted, and in turn the output
+# buffer of slaves is full with DELs of keys evicted triggering the deletion
+# of more keys, and so forth until the database is completely emptied.
+#
+# In short... if you have slaves attached it is suggested that you set a lower
+# limit for maxmemory so that there is some free RAM on the system for slave
+# output buffers (but this is not needed if the policy is 'noeviction').
+#
+# maxmemory <bytes>
+
+# MAXMEMORY POLICY: how Redis will select what to remove when maxmemory
+# is reached. You can select among five behaviors:
+#
+# volatile-lru -> remove the key with an expire set using an LRU algorithm
+# allkeys-lru -> remove any key accordingly to the LRU algorithm
+# volatile-random -> remove a random key with an expire set
+# allkeys-random -> remove a random key, any key
+# volatile-ttl -> remove the key with the nearest expire time (minor TTL)
+# noeviction -> don't expire at all, just return an error on write operations
+#
+# Note: with any of the above policies, Redis will return an error on write
+# operations, when there are not suitable keys for eviction.
+#
+# At the date of writing this commands are: set setnx setex append
+# incr decr rpush lpush rpushx lpushx linsert lset rpoplpush sadd
+# sinter sinterstore sunion sunionstore sdiff sdiffstore zadd zincrby
+# zunionstore zinterstore hset hsetnx hmset hincrby incrby decrby
+# getset mset msetnx exec sort
+#
+# The default is:
+#
+# maxmemory-policy volatile-lru
+
+# LRU and minimal TTL algorithms are not precise algorithms but approximated
+# algorithms (in order to save memory), so you can select as well the sample
+# size to check. For instance for default Redis will check three keys and
+# pick the one that was used less recently, you can change the sample size
+# using the following configuration directive.
+#
+# maxmemory-samples 3
+
+############################## APPEND ONLY MODE ###############################
+
+# By default Redis asynchronously dumps the dataset on disk. This mode is
+# good enough in many applications, but an issue with the Redis process or
+# a power outage may result into a few minutes of writes lost (depending on
+# the configured save points).
+#
+# The Append Only File is an alternative persistence mode that provides
+# much better durability. For instance using the default data fsync policy
+# (see later in the config file) Redis can lose just one second of writes in a
+# dramatic event like a server power outage, or a single write if something
+# wrong with the Redis process itself happens, but the operating system is
+# still running correctly.
+#
+# AOF and RDB persistence can be enabled at the same time without problems.
+# If the AOF is enabled on startup Redis will load the AOF, that is the file
+# with the better durability guarantees.
+#
+# Please check http://redis.io/topics/persistence for more information.
+
+appendonly no
+
+# The name of the append only file (default: "appendonly.aof")
+
+appendfilename "appendonly.aof"
+
+# The fsync() call tells the Operating System to actually write data on disk
+# instead to wait for more data in the output buffer. Some OS will really flush
+# data on disk, some other OS will just try to do it ASAP.
+#
+# Redis supports three different modes:
+#
+# no: don't fsync, just let the OS flush the data when it wants. Faster.
+# always: fsync after every write to the append only log . Slow, Safest.
+# everysec: fsync only one time every second. Compromise.
+#
+# The default is "everysec", as that's usually the right compromise between
+# speed and data safety. It's up to you to understand if you can relax this to
+# "no" that will let the operating system flush the output buffer when
+# it wants, for better performances (but if you can live with the idea of
+# some data loss consider the default persistence mode that's snapshotting),
+# or on the contrary, use "always" that's very slow but a bit safer than
+# everysec.
+#
+# More details please check the following article:
+# http://antirez.com/post/redis-persistence-demystified.html
+#
+# If unsure, use "everysec".
+
+# appendfsync always
+appendfsync everysec
+# appendfsync no
+
+# When the AOF fsync policy is set to always or everysec, and a background
+# saving process (a background save or AOF log background rewriting) is
+# performing a lot of I/O against the disk, in some Linux configurations
+# Redis may block too long on the fsync() call. Note that there is no fix for
+# this currently, as even performing fsync in a different thread will block
+# our synchronous write(2) call.
+#
+# In order to mitigate this problem it's possible to use the following option
+# that will prevent fsync() from being called in the main process while a
+# BGSAVE or BGREWRITEAOF is in progress.
+#
+# This means that while another child is saving, the durability of Redis is
+# the same as "appendfsync none". In practical terms, this means that it is
+# possible to lose up to 30 seconds of log in the worst scenario (with the
+# default Linux settings).
+#
+# If you have latency problems turn this to "yes". Otherwise leave it as
+# "no" that is the safest pick from the point of view of durability.
+
+no-appendfsync-on-rewrite no
+
+# Automatic rewrite of the append only file.
+# Redis is able to automatically rewrite the log file implicitly calling
+# BGREWRITEAOF when the AOF log size grows by the specified percentage.
+#
+# This is how it works: Redis remembers the size of the AOF file after the
+# latest rewrite (if no rewrite has happened since the restart, the size of
+# the AOF at startup is used).
+#
+# This base size is compared to the current size. If the current size is
+# bigger than the specified percentage, the rewrite is triggered. Also
+# you need to specify a minimal size for the AOF file to be rewritten, this
+# is useful to avoid rewriting the AOF file even if the percentage increase
+# is reached but it is still pretty small.
+#
+# Specify a percentage of zero in order to disable the automatic AOF
+# rewrite feature.
+
+auto-aof-rewrite-percentage 100
+auto-aof-rewrite-min-size 64mb
+
+################################ LUA SCRIPTING ###############################
+
+# Max execution time of a Lua script in milliseconds.
+#
+# If the maximum execution time is reached Redis will log that a script is
+# still in execution after the maximum allowed time and will start to
+# reply to queries with an error.
+#
+# When a long running script exceed the maximum execution time only the
+# SCRIPT KILL and SHUTDOWN NOSAVE commands are available. The first can be
+# used to stop a script that did not yet called write commands. The second
+# is the only way to shut down the server in the case a write commands was
+# already issue by the script but the user don't want to wait for the natural
+# termination of the script.
+#
+# Set it to 0 or a negative value for unlimited execution without warnings.
+lua-time-limit 15000
+
+################################## SLOW LOG ###################################
+
+# The Redis Slow Log is a system to log queries that exceeded a specified
+# execution time. The execution time does not include the I/O operations
+# like talking with the client, sending the reply and so forth,
+# but just the time needed to actually execute the command (this is the only
+# stage of command execution where the thread is blocked and can not serve
+# other requests in the meantime).
+#
+# You can configure the slow log with two parameters: one tells Redis
+# what is the execution time, in microseconds, to exceed in order for the
+# command to get logged, and the other parameter is the length of the
+# slow log. When a new command is logged the oldest one is removed from the
+# queue of logged commands.
+
+# The following time is expressed in microseconds, so 1000000 is equivalent
+# to one second. Note that a negative number disables the slow log, while
+# a value of zero forces the logging of every command.
+slowlog-log-slower-than 10000
+
+# There is no limit to this length. Just be aware that it will consume memory.
+# You can reclaim memory used by the slow log with SLOWLOG RESET.
+slowlog-max-len 128
+
+############################# Event notification ##############################
+
+# Redis can notify Pub/Sub clients about events happening in the key space.
+# This feature is documented at http://redis.io/topics/keyspace-events
+#
+# For instance if keyspace events notification is enabled, and a client
+# performs a DEL operation on key "foo" stored in the Database 0, two
+# messages will be published via Pub/Sub:
+#
+# PUBLISH __keyspace@0__:foo del
+# PUBLISH __keyevent@0__:del foo
+#
+# It is possible to select the events that Redis will notify among a set
+# of classes. Every class is identified by a single character:
+#
+# K Keyspace events, published with __keyspace@<db>__ prefix.
+# E Keyevent events, published with __keyevent@<db>__ prefix.
+# g Generic commands (non-type specific) like DEL, EXPIRE, RENAME, ...
+# $ String commands
+# l List commands
+# s Set commands
+# h Hash commands
+# z Sorted set commands
+# x Expired events (events generated every time a key expires)
+# e Evicted events (events generated when a key is evicted for maxmemory)
+# A Alias for g$lshzxe, so that the "AKE" string means all the events.
+#
+# The "notify-keyspace-events" takes as argument a string that is composed
+# by zero or multiple characters. The empty string means that notifications
+# are disabled at all.
+#
+# Example: to enable list and generic events, from the point of view of the
+# event name, use:
+#
+# notify-keyspace-events Elg
+#
+# Example 2: to get the stream of the expired keys subscribing to channel
+# name __keyevent@0__:expired use:
+#
+# notify-keyspace-events Ex
+#
+# By default all notifications are disabled because most users don't need
+# this feature and the feature has some overhead. Note that if you don't
+# specify at least one of K or E, no events will be delivered.
+notify-keyspace-events ""
+
+############################### ADVANCED CONFIG ###############################
+
+# Hashes are encoded using a memory efficient data structure when they have a
+# small number of entries, and the biggest entry does not exceed a given
+# threshold. These thresholds can be configured using the following directives.
+hash-max-ziplist-entries 512
+hash-max-ziplist-value 64
+
+# Similarly to hashes, small lists are also encoded in a special way in order
+# to save a lot of space. The special representation is only used when
+# you are under the following limits:
+list-max-ziplist-entries 512
+list-max-ziplist-value 64
+
+# Sets have a special encoding in just one case: when a set is composed
+# of just strings that happens to be integers in radix 10 in the range
+# of 64 bit signed integers.
+# The following configuration setting sets the limit in the size of the
+# set in order to use this special memory saving encoding.
+set-max-intset-entries 512
+
+# Similarly to hashes and lists, sorted sets are also specially encoded in
+# order to save a lot of space. This encoding is only used when the length and
+# elements of a sorted set are below the following limits:
+zset-max-ziplist-entries 128
+zset-max-ziplist-value 64
+
+# Active rehashing uses 1 millisecond every 100 milliseconds of CPU time in
+# order to help rehashing the main Redis hash table (the one mapping top-level
+# keys to values). The hash table implementation Redis uses (see dict.c)
+# performs a lazy rehashing: the more operation you run into a hash table
+# that is rehashing, the more rehashing "steps" are performed, so if the
+# server is idle the rehashing is never complete and some more memory is used
+# by the hash table.
+#
+# The default is to use this millisecond 10 times every second in order to
+# active rehashing the main dictionaries, freeing memory when possible.
+#
+# If unsure:
+# use "activerehashing no" if you have hard latency requirements and it is
+# not a good thing in your environment that Redis can reply form time to time
+# to queries with 2 milliseconds delay.
+#
+# use "activerehashing yes" if you don't have such hard requirements but
+# want to free memory asap when possible.
+activerehashing yes
+
+# The client output buffer limits can be used to force disconnection of clients
+# that are not reading data from the server fast enough for some reason (a
+# common reason is that a Pub/Sub client can't consume messages as fast as the
+# publisher can produce them).
+#
+# The limit can be set differently for the three different classes of clients:
+#
+# normal -> normal clients
+# slave -> slave clients and MONITOR clients
+# pubsub -> clients subscribed to at least one pubsub channel or pattern
+#
+# The syntax of every client-output-buffer-limit directive is the following:
+#
+# client-output-buffer-limit <class> <hard limit> <soft limit> <soft seconds>
+#
+# A client is immediately disconnected once the hard limit is reached, or if
+# the soft limit is reached and remains reached for the specified number of
+# seconds (continuously).
+# So for instance if the hard limit is 32 megabytes and the soft limit is
+# 16 megabytes / 10 seconds, the client will get disconnected immediately
+# if the size of the output buffers reach 32 megabytes, but will also get
+# disconnected if the client reaches 16 megabytes and continuously overcomes
+# the limit for 10 seconds.
+#
+# By default normal clients are not limited because they don't receive data
+# without asking (in a push way), but just after a request, so only
+# asynchronous clients may create a scenario where data is requested faster
+# than it can read.
+#
+# Instead there is a default limit for pubsub and slave clients, since
+# subscribers and slaves receive data in a push fashion.
+#
+# Both the hard or the soft limit can be disabled by setting them to zero.
+client-output-buffer-limit normal 0 0 0
+client-output-buffer-limit slave 256mb 64mb 60
+client-output-buffer-limit pubsub 32mb 8mb 60
+
+# Redis calls an internal function to perform many background tasks, like
+# closing connections of clients in timeout, purging expired keys that are
+# never requested, and so forth.
+#
+# Not all tasks are performed with the same frequency, but Redis checks for
+# tasks to perform accordingly to the specified "hz" value.
+#
+# By default "hz" is set to 10. Raising the value will use more CPU when
+# Redis is idle, but at the same time will make Redis more responsive when
+# there are many keys expiring at the same time, and timeouts may be
+# handled with more precision.
+#
+# The range is between 1 and 500, however a value over 100 is usually not
+# a good idea. Most users should use the default of 10 and raise this up to
+# 100 only in environments where very low latency is required.
+hz 10
+
+# When a child rewrites the AOF file, if the following option is enabled
+# the file will be fsync-ed every 32 MB of data generated. This is useful
+# in order to commit the file to the disk more incrementally and avoid
+# big latency spikes.
+aof-rewrite-incremental-fsync yes
diff --git a/opencontrail/files/4.0/contrail-alarm-gen.conf b/opencontrail/files/4.0/contrail-alarm-gen.conf
index 87ada20..b5d7cff 100644
--- a/opencontrail/files/4.0/contrail-alarm-gen.conf
+++ b/opencontrail/files/4.0/contrail-alarm-gen.conf
@@ -10,6 +10,10 @@
kafka_broker_list = {% for member in collector.database.members %}{{ member.host }}:9092 {% endfor %}
partitions=30
zk_list = {% for member in collector.database.members %}{{ member.host }}:2182 {% endfor %}
+{%- if collector.version >= 4.1 %}
+
+[CONFIGDB]
+{%- endif %}
#rabbitmq_server_list = xx.xx.xx.xx
{%- if collector.get('message_queue', {}).members is defined %}
rabbitmq_server_list={{ collector.message_queue.members | join(',', attribute='host') }}
@@ -21,11 +25,17 @@
rabbitmq_user={{ collector.get('message_queue', {}).get('user', 'guest') }}
rabbitmq_password={{ collector.get('message_queue', {}).get('password', 'guest') }}
rabbitmq_vhost={{ collector.get('message_queue', {}).get('vhost', '') }}
+{%- if collector.version >= 4.1 %}
+config_db_server_list={% for member in collector.get('config', {}).get('members', {}) %}{{ member.host }}:9161 {% endfor %}
+#config_db_username=
+#config_db_password=
+{% else %}
[API_SERVER]
# List of api-servers in ip:port format separated by space
api_server_list={% for member in collector.config.members %}{{ member.host }}:9100 {% endfor %}
#api_server_use_ssl=False
+{%- endif %}
[REDIS]
redis_server_port=6379
diff --git a/opencontrail/files/4.0/contrail-collector.conf b/opencontrail/files/4.0/contrail-collector.conf
index a8c8136..f7beae2 100644
--- a/opencontrail/files/4.0/contrail-collector.conf
+++ b/opencontrail/files/4.0/contrail-collector.conf
@@ -82,10 +82,26 @@
# number of kafka partitions
# kafka_partitions=30
+{%- if collector.version >= 4.1 %}
+[CONFIGDB]
+#rabbitmq_server_list = xx.xx.xx.xx
+{%- if collector.get('message_queue', {}).members is defined %}
+rabbitmq_server_list={% for member in collector.message_queue.members -%}{{ member.host }}:{{ member.get('port', 5672) }} {% endfor %}
+{%- elif collector.get('message_queue', {}).host is defined %}
+rabbitmq_server_list={{ collector.message_queue.host }}:{{ collector.message_queue.get('port', 5672) }}
+{%- endif %}
+rabbitmq_user={{ collector.get('message_queue', {}).get('user', 'guest') }}
+rabbitmq_password={{ collector.get('message_queue', {}).get('password', 'guest') }}
+rabbitmq_vhost={{ collector.get('message_queue', {}).get('vhost', '') }}
+config_db_server_list={% for member in collector.get('config', {}).get('members', {}) %}{{ member.host }}:9041 {% endfor %}
+#config_db_username=
+#config_db_password=
+{% else %}
[API_SERVER]
# List of api-servers in ip:port format separated by space
api_server_list={% for member in collector.config.members %}{{ member.host }}:9100 {% endfor %}
# api_server_use_ssl=False
+{%- endif %}
[DATABASE]
# disk usage percentage
diff --git a/opencontrail/files/4.0/contrail-control.conf b/opencontrail/files/4.0/contrail-control.conf
index 19bf68b..ed67242 100644
--- a/opencontrail/files/4.0/contrail-control.conf
+++ b/opencontrail/files/4.0/contrail-control.conf
@@ -27,13 +27,12 @@
log_level=SYS_NOTICE
log_local=1
# test_mode=0
-{%- if pillar.get('salt', {}).get('minion', {}).get('cert', {}).opencontrail_xmpp is defined %}
-xmpp_auth_enable=true
-xmpp_server_cert={{ pillar.salt.minion.cert.opencontrail_xmpp.get('cert_file', '/etc/contrail/ssl/certs/server.pem') }}
-xmpp_server_key={{ pillar.salt.minion.cert.opencontrail_xmpp.get('key_file', '/etc/contrail/ssl/private/server-privkey.pem') }}
-xmpp_ca_cert={{ pillar.salt.minion.cert.opencontrail_xmpp.get('ca_file', '/etc/contrail/ssl/certs/ca-cert.pem') }}
+{%- if control.xmpp.tls.enabled %}
+xmpp_auth_enable = {{ control.xmpp.tls.auth.enabled }}
+xmpp_server_cert = {{ control.xmpp.tls.cert_file }}
+xmpp_server_key = {{ control.xmpp.tls.key_file }}
+xmpp_ca_cert = {{ control.xmpp.tls.ca_cert_file }}
{%- endif %}
-
# Sandesh send rate limit can be used to throttle system logs transmitted per
# second. System logs are dropped if the sending rate is exceeded
# sandesh_send_rate_limit=
diff --git a/opencontrail/files/4.0/contrail-vrouter-agent.conf b/opencontrail/files/4.0/contrail-vrouter-agent.conf
index 6724afa..a0a742b 100644
--- a/opencontrail/files/4.0/contrail-vrouter-agent.conf
+++ b/opencontrail/files/4.0/contrail-vrouter-agent.conf
@@ -60,7 +60,7 @@
# Local log file name
log_file=/var/log/contrail/contrail-vrouter-agent.log
-# Log severity levels. Possible values are SYS_EMERG, SYS_ALERT, SYS_CRIT,
+# Log severity levels. Possible values are SYS_EMERG, SYS_ALERT, SYS_CRIT,
# SYS_ERR, SYS_WARN, SYS_NOTICE, SYS_INFO and SYS_DEBUG. Default is SYS_DEBUG
log_level=SYS_NOTICE
@@ -88,13 +88,12 @@
# sandesh_send_rate_limit=
# Enable/Disable SSL based XMPP Authentication
-{%- if pillar.get('salt', {}).get('minion', {}).get('cert', {}).opencontrail_xmpp is defined %}
-xmpp_auth_enable=true
-xmpp_server_cert={{ pillar.salt.minion.cert.opencontrail_xmpp.get('cert_file', '/etc/contrail/ssl/certs/server.pem') }}
-xmpp_server_key={{ pillar.salt.minion.cert.opencontrail_xmpp.get('key_file', '/etc/contrail/ssl/private/server-privkey.pem') }}
-xmpp_ca_cert={{ pillar.salt.minion.cert.opencontrail_xmpp.get('ca_file', '/etc/contrail/ssl/certs/ca-cert.pem') }}
+{%- if compute.xmpp.tls.enabled %}
+xmpp_auth_enable = {{ compute.xmpp.tls.auth.enabled }}
+xmpp_server_cert = {{ compute.xmpp.tls.cert_file }}
+xmpp_server_key = {{ compute.xmpp.tls.key_file }}
+xmpp_ca_cert = {{ compute.xmpp.tls.ca_cert_file }}
{%- endif %}
-
# Gateway mode : can be server/ vcpe (default is none)
# gateway_mode=
{%- if compute.gateway_mode is defined %}
@@ -169,6 +168,9 @@
#
# Maximum flows allowed per VM (given as % of maximum system flows)
# max_vm_flows=
+{%- if compute.max_vm_flows is defined %}
+max_vm_flows={{ compute.max_vm_flows }}
+{%- endif %}
# Maximum number of link-local flows allowed across all VMs
# max_system_linklocal_flows=4096
@@ -252,7 +254,7 @@
# (like metadata...)
#compute_node_address = 10.204.216.28
-# We can have multiple gateway sections with different indices in the
+# We can have multiple gateway sections with different indices in the
# following format
[GATEWAY-0]
# Name of the routing_instance for which the gateway is being configured
@@ -389,4 +391,3 @@
# Fallback time in seconds to find EOC in case config inactivity is not seen.
# config_fallback_time=
# config_inactivity_time=
-
diff --git a/opencontrail/files/4.0/contrail.env b/opencontrail/files/4.0/contrail.env
new file mode 100644
index 0000000..eed4e23
--- /dev/null
+++ b/opencontrail/files/4.0/contrail.env
@@ -0,0 +1,5 @@
+CONTRAIL_UID={{ salt['user.info']('contrail').get('uid', 0) }}
+CONTRAIL_GID={{ salt['user.info']('contrail').get('gid', 0) }}
+{%- if pillar.opencontrail.config is defined %}
+API_WORKERS={{ pillar.opencontrail.config.get('api',{}).get("workers_count", 0) }}
+{%- endif %}
diff --git a/opencontrail/files/4.0/ctrl-details b/opencontrail/files/4.0/ctrl-details
deleted file mode 100644
index 7dac73e..0000000
--- a/opencontrail/files/4.0/ctrl-details
+++ /dev/null
@@ -1,16 +0,0 @@
-{%- from "opencontrail/map.jinja" import common with context %}
-CONTROLLER_MGMT={{ common.identity.host }}
-SERVICE_TOKEN={{ common.identity.token }}
-ADMIN_TOKEN={{ common.identity.password }}
-CONTROLLER={{ common.identity.host }}
-QUANTUM={{ common.network.host }}
-QUANTUM_PORT={{ common.network.port }}
-COMPUTE=None
-AUTH_PROTOCOL=http
-QUANTUM_PROTOCOL=http
-AMQP_SERVER={{ common.identity.host }}
-INTERNAL_VIP={{ common.identity.host }}
-HYPERVISOR=libvirt
-NOVA_PASSWORD={{ common.identity.password }}
-NEUTRON_PASSWORD={{ common.identity.password }}
-SERVICE_TENANT_NAME=service
diff --git a/opencontrail/files/4.0/keystonerc b/opencontrail/files/4.0/keystonerc
deleted file mode 100644
index b665fac..0000000
--- a/opencontrail/files/4.0/keystonerc
+++ /dev/null
@@ -1,4 +0,0 @@
-{%- from "opencontrail/map.jinja" import common with context %}
-export OS_USERNAME=admin
-export SERVICE_TOKEN={{ common.identity.token }}
-export OS_SERVICE_ENDPOINT=http://{{ common.identity.host }}:{{ common.identity.port }}/v2.0
diff --git a/opencontrail/files/4.0/openstackrc b/opencontrail/files/4.0/openstackrc
deleted file mode 100644
index 7c8ef03..0000000
--- a/opencontrail/files/4.0/openstackrc
+++ /dev/null
@@ -1,6 +0,0 @@
-{%- from "opencontrail/map.jinja" import common with context %}
-export OS_USERNAME=admin
-export OS_PASSWORD={{ common.identity.password }}
-export OS_TENANT_NAME=admin
-export OS_AUTH_URL=http://{{ common.identity.host }}:5000/v2.0/
-export OS_NO_CACHE=1
diff --git a/opencontrail/files/4.0/redis_webui.conf b/opencontrail/files/4.0/redis_webui.conf
new file mode 100644
index 0000000..75ac1aa
--- /dev/null
+++ b/opencontrail/files/4.0/redis_webui.conf
@@ -0,0 +1,714 @@
+{%- from "opencontrail/map.jinja" import web with context %}
+# Redis configuration file example
+
+# Note on units: when memory size is needed, it is possible to specify
+# it in the usual form of 1k 5GB 4M and so forth:
+#
+# 1k => 1000 bytes
+# 1kb => 1024 bytes
+# 1m => 1000000 bytes
+# 1mb => 1024*1024 bytes
+# 1g => 1000000000 bytes
+# 1gb => 1024*1024*1024 bytes
+#
+# units are case insensitive so 1GB 1Gb 1gB are all the same.
+
+################################## INCLUDES ###################################
+
+# Include one or more other config files here. This is useful if you
+# have a standard template that goes to all Redis server but also need
+# to customize a few per-server settings. Include files can include
+# other files, so use this wisely.
+#
+# Notice option "include" won't be rewritten by command "CONFIG REWRITE"
+# from admin or Redis Sentinel. Since Redis always uses the last processed
+# line as value of a configuration directive, you'd better put includes
+# at the beginning of this file to avoid overwriting config change at runtime.
+#
+# If instead you are interested in using includes to override configuration
+# options, it is better to use include as the last line.
+#
+# include /path/to/local.conf
+# include /path/to/other.conf
+
+################################ GENERAL #####################################
+
+# By default Redis does not run as a daemon. Use 'yes' if you need it.
+# Note that Redis will write a pid file in /var/run/redis.pid when daemonized.
+daemonize yes
+
+# When running daemonized, Redis writes a pid file in /var/run/redis.pid by
+# default. You can specify a custom pid file location here.
+pidfile /var/run/redis/redis-server.pid
+
+# Accept connections on the specified port, default is 6379.
+# If port 0 is specified Redis will not listen on a TCP socket.
+port 6379
+
+# By default Redis listens for connections from all the network interfaces
+# available on the server. It is possible to listen to just one or multiple
+# interfaces using the "bind" configuration directive, followed by one or
+# more IP addresses.
+#
+# Examples:
+#
+# bind 192.168.1.100 10.0.0.1
+bind 127.0.0.1
+
+# Specify the path for the unix socket that will be used to listen for
+# incoming connections. There is no default, so Redis will not listen
+# on a unix socket when not specified.
+#
+# unixsocket /var/run/redis/redis.sock
+# unixsocketperm 755
+
+# Close the connection after a client is idle for N seconds (0 to disable)
+timeout 0
+
+# TCP keepalive.
+#
+# If non-zero, use SO_KEEPALIVE to send TCP ACKs to clients in absence
+# of communication. This is useful for two reasons:
+#
+# 1) Detect dead peers.
+# 2) Take the connection alive from the point of view of network
+# equipment in the middle.
+#
+# On Linux, the specified value (in seconds) is the period used to send ACKs.
+# Note that to close the connection the double of the time is needed.
+# On other kernels the period depends on the kernel configuration.
+#
+# A reasonable value for this option is 60 seconds.
+tcp-keepalive 0
+
+# Specify the server verbosity level.
+# This can be one of:
+# debug (a lot of information, useful for development/testing)
+# verbose (many rarely useful info, but not a mess like the debug level)
+# notice (moderately verbose, what you want in production probably)
+# warning (only very important / critical messages are logged)
+loglevel notice
+
+# Specify the log file name. Also the empty string can be used to force
+# Redis to log on the standard output. Note that if you use standard
+# output for logging but daemonize, logs will be sent to /dev/null
+logfile /var/log/redis/redis-server.log
+
+# To enable logging to the system logger, just set 'syslog-enabled' to yes,
+# and optionally update the other syslog parameters to suit your needs.
+# syslog-enabled no
+
+# Specify the syslog identity.
+# syslog-ident redis
+
+# Specify the syslog facility. Must be USER or between LOCAL0-LOCAL7.
+# syslog-facility local0
+
+# Set the number of databases. The default database is DB 0, you can select
+# a different one on a per-connection basis using SELECT <dbid> where
+# dbid is a number between 0 and 'databases'-1
+databases 16
+
+################################ SNAPSHOTTING ################################
+#
+# Save the DB on disk:
+#
+# save <seconds> <changes>
+#
+# Will save the DB if both the given number of seconds and the given
+# number of write operations against the DB occurred.
+#
+# In the example below the behaviour will be to save:
+# after 900 sec (15 min) if at least 1 key changed
+# after 300 sec (5 min) if at least 10 keys changed
+# after 60 sec if at least 10000 keys changed
+#
+# Note: you can disable saving at all commenting all the "save" lines.
+#
+# It is also possible to remove all the previously configured save
+# points by adding a save directive with a single empty string argument
+# like in the following example:
+#
+# save ""
+
+#save 900 1
+#save 300 10
+#save 60 10000
+
+# By default Redis will stop accepting writes if RDB snapshots are enabled
+# (at least one save point) and the latest background save failed.
+# This will make the user aware (in a hard way) that data is not persisting
+# on disk properly, otherwise chances are that no one will notice and some
+# disaster will happen.
+#
+# If the background saving process will start working again Redis will
+# automatically allow writes again.
+#
+# However if you have setup your proper monitoring of the Redis server
+# and persistence, you may want to disable this feature so that Redis will
+# continue to work as usual even if there are problems with disk,
+# permissions, and so forth.
+stop-writes-on-bgsave-error yes
+
+# Compress string objects using LZF when dump .rdb databases?
+# For default that's set to 'yes' as it's almost always a win.
+# If you want to save some CPU in the saving child set it to 'no' but
+# the dataset will likely be bigger if you have compressible values or keys.
+rdbcompression yes
+
+# Since version 5 of RDB a CRC64 checksum is placed at the end of the file.
+# This makes the format more resistant to corruption but there is a performance
+# hit to pay (around 10%) when saving and loading RDB files, so you can disable it
+# for maximum performances.
+#
+# RDB files created with checksum disabled have a checksum of zero that will
+# tell the loading code to skip the check.
+rdbchecksum yes
+
+# The filename where to dump the DB
+#dbfilename dump.rdb
+
+# The working directory.
+#
+# The DB will be written inside this directory, with the filename specified
+# above using the 'dbfilename' configuration directive.
+#
+# The Append Only File will also be created inside this directory.
+#
+# Note that you must specify a directory here, not a file name.
+dir /var/lib/redis
+
+################################# REPLICATION #################################
+
+# Master-Slave replication. Use slaveof to make a Redis instance a copy of
+# another Redis server. Note that the configuration is local to the slave
+# so for example it is possible to configure the slave to save the DB with a
+# different interval, or to listen to another port, and so on.
+#
+# slaveof <masterip> <masterport>
+
+# If the master is password protected (using the "requirepass" configuration
+# directive below) it is possible to tell the slave to authenticate before
+# starting the replication synchronization process, otherwise the master will
+# refuse the slave request.
+#
+# masterauth <master-password>
+{%- if web.get('cache', {}).get('password') %}
+masterauth {{ web.cache.password }}
+{%- endif %}
+
+# When a slave loses its connection with the master, or when the replication
+# is still in progress, the slave can act in two different ways:
+#
+# 1) if slave-serve-stale-data is set to 'yes' (the default) the slave will
+# still reply to client requests, possibly with out of date data, or the
+# data set may just be empty if this is the first synchronization.
+#
+# 2) if slave-serve-stale-data is set to 'no' the slave will reply with
+# an error "SYNC with master in progress" to all the kind of commands
+# but to INFO and SLAVEOF.
+#
+slave-serve-stale-data yes
+
+# You can configure a slave instance to accept writes or not. Writing against
+# a slave instance may be useful to store some ephemeral data (because data
+# written on a slave will be easily deleted after resync with the master) but
+# may also cause problems if clients are writing to it because of a
+# misconfiguration.
+#
+# Since Redis 2.6 by default slaves are read-only.
+#
+# Note: read only slaves are not designed to be exposed to untrusted clients
+# on the internet. It's just a protection layer against misuse of the instance.
+# Still a read only slave exports by default all the administrative commands
+# such as CONFIG, DEBUG, and so forth. To a limited extent you can improve
+# security of read only slaves using 'rename-command' to shadow all the
+# administrative / dangerous commands.
+slave-read-only yes
+
+# Slaves send PINGs to server in a predefined interval. It's possible to change
+# this interval with the repl_ping_slave_period option. The default value is 10
+# seconds.
+#
+# repl-ping-slave-period 10
+
+# The following option sets the replication timeout for:
+#
+# 1) Bulk transfer I/O during SYNC, from the point of view of slave.
+# 2) Master timeout from the point of view of slaves (data, pings).
+# 3) Slave timeout from the point of view of masters (REPLCONF ACK pings).
+#
+# It is important to make sure that this value is greater than the value
+# specified for repl-ping-slave-period otherwise a timeout will be detected
+# every time there is low traffic between the master and the slave.
+#
+# repl-timeout 60
+
+# Disable TCP_NODELAY on the slave socket after SYNC?
+#
+# If you select "yes" Redis will use a smaller number of TCP packets and
+# less bandwidth to send data to slaves. But this can add a delay for
+# the data to appear on the slave side, up to 40 milliseconds with
+# Linux kernels using a default configuration.
+#
+# If you select "no" the delay for data to appear on the slave side will
+# be reduced but more bandwidth will be used for replication.
+#
+# By default we optimize for low latency, but in very high traffic conditions
+# or when the master and slaves are many hops away, turning this to "yes" may
+# be a good idea.
+repl-disable-tcp-nodelay no
+
+# Set the replication backlog size. The backlog is a buffer that accumulates
+# slave data when slaves are disconnected for some time, so that when a slave
+# wants to reconnect again, often a full resync is not needed, but a partial
+# resync is enough, just passing the portion of data the slave missed while
+# disconnected.
+#
+# The biggest the replication backlog, the longer the time the slave can be
+# disconnected and later be able to perform a partial resynchronization.
+#
+# The backlog is only allocated once there is at least a slave connected.
+#
+# repl-backlog-size 1mb
+
+# After a master has no longer connected slaves for some time, the backlog
+# will be freed. The following option configures the amount of seconds that
+# need to elapse, starting from the time the last slave disconnected, for
+# the backlog buffer to be freed.
+#
+# A value of 0 means to never release the backlog.
+#
+# repl-backlog-ttl 3600
+
+# The slave priority is an integer number published by Redis in the INFO output.
+# It is used by Redis Sentinel in order to select a slave to promote into a
+# master if the master is no longer working correctly.
+#
+# A slave with a low priority number is considered better for promotion, so
+# for instance if there are three slaves with priority 10, 100, 25 Sentinel will
+# pick the one with priority 10, that is the lowest.
+#
+# However a special priority of 0 marks the slave as not able to perform the
+# role of master, so a slave with priority of 0 will never be selected by
+# Redis Sentinel for promotion.
+#
+# By default the priority is 100.
+slave-priority 100
+
+# It is possible for a master to stop accepting writes if there are less than
+# N slaves connected, having a lag less or equal than M seconds.
+#
+# The N slaves need to be in "online" state.
+#
+# The lag in seconds, that must be <= the specified value, is calculated from
+# the last ping received from the slave, that is usually sent every second.
+#
+# This option does not GUARANTEES that N replicas will accept the write, but
+# will limit the window of exposure for lost writes in case not enough slaves
+# are available, to the specified number of seconds.
+#
+# For example to require at least 3 slaves with a lag <= 10 seconds use:
+#
+# min-slaves-to-write 3
+# min-slaves-max-lag 10
+#
+# Setting one or the other to 0 disables the feature.
+#
+# By default min-slaves-to-write is set to 0 (feature disabled) and
+# min-slaves-max-lag is set to 10.
+
+################################## SECURITY ###################################
+
+# Require clients to issue AUTH <PASSWORD> before processing any other
+# commands. This might be useful in environments in which you do not trust
+# others with access to the host running redis-server.
+#
+# This should stay commented out for backward compatibility and because most
+# people do not need auth (e.g. they run their own servers).
+#
+# Warning: since Redis is pretty fast an outside user can try up to
+# 150k passwords per second against a good box. This means that you should
+# use a very strong password otherwise it will be very easy to break.
+#
+# requirepass foobared
+{%- if web.get('cache', {}).get('password') %}
+requirepass {{ web.cache.password }}
+{%- endif %}
+
+# Command renaming.
+#
+# It is possible to change the name of dangerous commands in a shared
+# environment. For instance the CONFIG command may be renamed into something
+# hard to guess so that it will still be available for internal-use tools
+# but not available for general clients.
+#
+# Example:
+#
+# rename-command CONFIG b840fc02d524045429941cc15f59e41cb7be6c52
+#
+# It is also possible to completely kill a command by renaming it into
+# an empty string:
+#
+# rename-command CONFIG ""
+#
+# Please note that changing the name of commands that are logged into the
+# AOF file or transmitted to slaves may cause problems.
+
+################################### LIMITS ####################################
+
+# Set the max number of connected clients at the same time. By default
+# this limit is set to 10000 clients, however if the Redis server is not
+# able to configure the process file limit to allow for the specified limit
+# the max number of allowed clients is set to the current file limit
+# minus 32 (as Redis reserves a few file descriptors for internal uses).
+#
+# Once the limit is reached Redis will close all the new connections sending
+# an error 'max number of clients reached'.
+#
+# maxclients 10000
+
+# Don't use more memory than the specified amount of bytes.
+# When the memory limit is reached Redis will try to remove keys
+# according to the eviction policy selected (see maxmemory-policy).
+#
+# If Redis can't remove keys according to the policy, or if the policy is
+# set to 'noeviction', Redis will start to reply with errors to commands
+# that would use more memory, like SET, LPUSH, and so on, and will continue
+# to reply to read-only commands like GET.
+#
+# This option is usually useful when using Redis as an LRU cache, or to set
+# a hard memory limit for an instance (using the 'noeviction' policy).
+#
+# WARNING: If you have slaves attached to an instance with maxmemory on,
+# the size of the output buffers needed to feed the slaves are subtracted
+# from the used memory count, so that network problems / resyncs will
+# not trigger a loop where keys are evicted, and in turn the output
+# buffer of slaves is full with DELs of keys evicted triggering the deletion
+# of more keys, and so forth until the database is completely emptied.
+#
+# In short... if you have slaves attached it is suggested that you set a lower
+# limit for maxmemory so that there is some free RAM on the system for slave
+# output buffers (but this is not needed if the policy is 'noeviction').
+#
+# maxmemory <bytes>
+
+# MAXMEMORY POLICY: how Redis will select what to remove when maxmemory
+# is reached. You can select among five behaviors:
+#
+# volatile-lru -> remove the key with an expire set using an LRU algorithm
+# allkeys-lru -> remove any key accordingly to the LRU algorithm
+# volatile-random -> remove a random key with an expire set
+# allkeys-random -> remove a random key, any key
+# volatile-ttl -> remove the key with the nearest expire time (minor TTL)
+# noeviction -> don't expire at all, just return an error on write operations
+#
+# Note: with any of the above policies, Redis will return an error on write
+# operations, when there are not suitable keys for eviction.
+#
+# At the date of writing this commands are: set setnx setex append
+# incr decr rpush lpush rpushx lpushx linsert lset rpoplpush sadd
+# sinter sinterstore sunion sunionstore sdiff sdiffstore zadd zincrby
+# zunionstore zinterstore hset hsetnx hmset hincrby incrby decrby
+# getset mset msetnx exec sort
+#
+# The default is:
+#
+# maxmemory-policy volatile-lru
+
+# LRU and minimal TTL algorithms are not precise algorithms but approximated
+# algorithms (in order to save memory), so you can select as well the sample
+# size to check. For instance for default Redis will check three keys and
+# pick the one that was used less recently, you can change the sample size
+# using the following configuration directive.
+#
+# maxmemory-samples 3
+
+############################## APPEND ONLY MODE ###############################
+
+# By default Redis asynchronously dumps the dataset on disk. This mode is
+# good enough in many applications, but an issue with the Redis process or
+# a power outage may result into a few minutes of writes lost (depending on
+# the configured save points).
+#
+# The Append Only File is an alternative persistence mode that provides
+# much better durability. For instance using the default data fsync policy
+# (see later in the config file) Redis can lose just one second of writes in a
+# dramatic event like a server power outage, or a single write if something
+# wrong with the Redis process itself happens, but the operating system is
+# still running correctly.
+#
+# AOF and RDB persistence can be enabled at the same time without problems.
+# If the AOF is enabled on startup Redis will load the AOF, that is the file
+# with the better durability guarantees.
+#
+# Please check http://redis.io/topics/persistence for more information.
+
+appendonly no
+
+# The name of the append only file (default: "appendonly.aof")
+
+appendfilename "appendonly.aof"
+
+# The fsync() call tells the Operating System to actually write data on disk
+# instead to wait for more data in the output buffer. Some OS will really flush
+# data on disk, some other OS will just try to do it ASAP.
+#
+# Redis supports three different modes:
+#
+# no: don't fsync, just let the OS flush the data when it wants. Faster.
+# always: fsync after every write to the append only log . Slow, Safest.
+# everysec: fsync only one time every second. Compromise.
+#
+# The default is "everysec", as that's usually the right compromise between
+# speed and data safety. It's up to you to understand if you can relax this to
+# "no" that will let the operating system flush the output buffer when
+# it wants, for better performances (but if you can live with the idea of
+# some data loss consider the default persistence mode that's snapshotting),
+# or on the contrary, use "always" that's very slow but a bit safer than
+# everysec.
+#
+# More details please check the following article:
+# http://antirez.com/post/redis-persistence-demystified.html
+#
+# If unsure, use "everysec".
+
+# appendfsync always
+appendfsync everysec
+# appendfsync no
+
+# When the AOF fsync policy is set to always or everysec, and a background
+# saving process (a background save or AOF log background rewriting) is
+# performing a lot of I/O against the disk, in some Linux configurations
+# Redis may block too long on the fsync() call. Note that there is no fix for
+# this currently, as even performing fsync in a different thread will block
+# our synchronous write(2) call.
+#
+# In order to mitigate this problem it's possible to use the following option
+# that will prevent fsync() from being called in the main process while a
+# BGSAVE or BGREWRITEAOF is in progress.
+#
+# This means that while another child is saving, the durability of Redis is
+# the same as "appendfsync none". In practical terms, this means that it is
+# possible to lose up to 30 seconds of log in the worst scenario (with the
+# default Linux settings).
+#
+# If you have latency problems turn this to "yes". Otherwise leave it as
+# "no" that is the safest pick from the point of view of durability.
+
+no-appendfsync-on-rewrite no
+
+# Automatic rewrite of the append only file.
+# Redis is able to automatically rewrite the log file implicitly calling
+# BGREWRITEAOF when the AOF log size grows by the specified percentage.
+#
+# This is how it works: Redis remembers the size of the AOF file after the
+# latest rewrite (if no rewrite has happened since the restart, the size of
+# the AOF at startup is used).
+#
+# This base size is compared to the current size. If the current size is
+# bigger than the specified percentage, the rewrite is triggered. Also
+# you need to specify a minimal size for the AOF file to be rewritten, this
+# is useful to avoid rewriting the AOF file even if the percentage increase
+# is reached but it is still pretty small.
+#
+# Specify a percentage of zero in order to disable the automatic AOF
+# rewrite feature.
+
+auto-aof-rewrite-percentage 100
+auto-aof-rewrite-min-size 64mb
+
+################################ LUA SCRIPTING ###############################
+
+# Max execution time of a Lua script in milliseconds.
+#
+# If the maximum execution time is reached Redis will log that a script is
+# still in execution after the maximum allowed time and will start to
+# reply to queries with an error.
+#
+# When a long running script exceed the maximum execution time only the
+# SCRIPT KILL and SHUTDOWN NOSAVE commands are available. The first can be
+# used to stop a script that did not yet called write commands. The second
+# is the only way to shut down the server in the case a write commands was
+# already issue by the script but the user don't want to wait for the natural
+# termination of the script.
+#
+# Set it to 0 or a negative value for unlimited execution without warnings.
+lua-time-limit 15000
+
+################################## SLOW LOG ###################################
+
+# The Redis Slow Log is a system to log queries that exceeded a specified
+# execution time. The execution time does not include the I/O operations
+# like talking with the client, sending the reply and so forth,
+# but just the time needed to actually execute the command (this is the only
+# stage of command execution where the thread is blocked and can not serve
+# other requests in the meantime).
+#
+# You can configure the slow log with two parameters: one tells Redis
+# what is the execution time, in microseconds, to exceed in order for the
+# command to get logged, and the other parameter is the length of the
+# slow log. When a new command is logged the oldest one is removed from the
+# queue of logged commands.
+
+# The following time is expressed in microseconds, so 1000000 is equivalent
+# to one second. Note that a negative number disables the slow log, while
+# a value of zero forces the logging of every command.
+slowlog-log-slower-than 10000
+
+# There is no limit to this length. Just be aware that it will consume memory.
+# You can reclaim memory used by the slow log with SLOWLOG RESET.
+slowlog-max-len 128
+
+############################# Event notification ##############################
+
+# Redis can notify Pub/Sub clients about events happening in the key space.
+# This feature is documented at http://redis.io/topics/keyspace-events
+#
+# For instance if keyspace events notification is enabled, and a client
+# performs a DEL operation on key "foo" stored in the Database 0, two
+# messages will be published via Pub/Sub:
+#
+# PUBLISH __keyspace@0__:foo del
+# PUBLISH __keyevent@0__:del foo
+#
+# It is possible to select the events that Redis will notify among a set
+# of classes. Every class is identified by a single character:
+#
+# K Keyspace events, published with __keyspace@<db>__ prefix.
+# E Keyevent events, published with __keyevent@<db>__ prefix.
+# g Generic commands (non-type specific) like DEL, EXPIRE, RENAME, ...
+# $ String commands
+# l List commands
+# s Set commands
+# h Hash commands
+# z Sorted set commands
+# x Expired events (events generated every time a key expires)
+# e Evicted events (events generated when a key is evicted for maxmemory)
+# A Alias for g$lshzxe, so that the "AKE" string means all the events.
+#
+# The "notify-keyspace-events" takes as argument a string that is composed
+# by zero or multiple characters. The empty string means that notifications
+# are disabled at all.
+#
+# Example: to enable list and generic events, from the point of view of the
+# event name, use:
+#
+# notify-keyspace-events Elg
+#
+# Example 2: to get the stream of the expired keys subscribing to channel
+# name __keyevent@0__:expired use:
+#
+# notify-keyspace-events Ex
+#
+# By default all notifications are disabled because most users don't need
+# this feature and the feature has some overhead. Note that if you don't
+# specify at least one of K or E, no events will be delivered.
+notify-keyspace-events ""
+
+############################### ADVANCED CONFIG ###############################
+
+# Hashes are encoded using a memory efficient data structure when they have a
+# small number of entries, and the biggest entry does not exceed a given
+# threshold. These thresholds can be configured using the following directives.
+hash-max-ziplist-entries 512
+hash-max-ziplist-value 64
+
+# Similarly to hashes, small lists are also encoded in a special way in order
+# to save a lot of space. The special representation is only used when
+# you are under the following limits:
+list-max-ziplist-entries 512
+list-max-ziplist-value 64
+
+# Sets have a special encoding in just one case: when a set is composed
+# of just strings that happens to be integers in radix 10 in the range
+# of 64 bit signed integers.
+# The following configuration setting sets the limit in the size of the
+# set in order to use this special memory saving encoding.
+set-max-intset-entries 512
+
+# Similarly to hashes and lists, sorted sets are also specially encoded in
+# order to save a lot of space. This encoding is only used when the length and
+# elements of a sorted set are below the following limits:
+zset-max-ziplist-entries 128
+zset-max-ziplist-value 64
+
+# Active rehashing uses 1 millisecond every 100 milliseconds of CPU time in
+# order to help rehashing the main Redis hash table (the one mapping top-level
+# keys to values). The hash table implementation Redis uses (see dict.c)
+# performs a lazy rehashing: the more operation you run into a hash table
+# that is rehashing, the more rehashing "steps" are performed, so if the
+# server is idle the rehashing is never complete and some more memory is used
+# by the hash table.
+#
+# The default is to use this millisecond 10 times every second in order to
+# active rehashing the main dictionaries, freeing memory when possible.
+#
+# If unsure:
+# use "activerehashing no" if you have hard latency requirements and it is
+# not a good thing in your environment that Redis can reply form time to time
+# to queries with 2 milliseconds delay.
+#
+# use "activerehashing yes" if you don't have such hard requirements but
+# want to free memory asap when possible.
+activerehashing yes
+
+# The client output buffer limits can be used to force disconnection of clients
+# that are not reading data from the server fast enough for some reason (a
+# common reason is that a Pub/Sub client can't consume messages as fast as the
+# publisher can produce them).
+#
+# The limit can be set differently for the three different classes of clients:
+#
+# normal -> normal clients
+# slave -> slave clients and MONITOR clients
+# pubsub -> clients subscribed to at least one pubsub channel or pattern
+#
+# The syntax of every client-output-buffer-limit directive is the following:
+#
+# client-output-buffer-limit <class> <hard limit> <soft limit> <soft seconds>
+#
+# A client is immediately disconnected once the hard limit is reached, or if
+# the soft limit is reached and remains reached for the specified number of
+# seconds (continuously).
+# So for instance if the hard limit is 32 megabytes and the soft limit is
+# 16 megabytes / 10 seconds, the client will get disconnected immediately
+# if the size of the output buffers reach 32 megabytes, but will also get
+# disconnected if the client reaches 16 megabytes and continuously overcomes
+# the limit for 10 seconds.
+#
+# By default normal clients are not limited because they don't receive data
+# without asking (in a push way), but just after a request, so only
+# asynchronous clients may create a scenario where data is requested faster
+# than it can read.
+#
+# Instead there is a default limit for pubsub and slave clients, since
+# subscribers and slaves receive data in a push fashion.
+#
+# Both the hard or the soft limit can be disabled by setting them to zero.
+client-output-buffer-limit normal 0 0 0
+client-output-buffer-limit slave 256mb 64mb 60
+client-output-buffer-limit pubsub 32mb 8mb 60
+
+# Redis calls an internal function to perform many background tasks, like
+# closing connections of clients in timeout, purging expired keys that are
+# never requested, and so forth.
+#
+# Not all tasks are performed with the same frequency, but Redis checks for
+# tasks to perform accordingly to the specified "hz" value.
+#
+# By default "hz" is set to 10. Raising the value will use more CPU when
+# Redis is idle, but at the same time will make Redis more responsive when
+# there are many keys expiring at the same time, and timeouts may be
+# handled with more precision.
+#
+# The range is between 1 and 500, however a value over 100 is usually not
+# a good idea. Most users should use the default of 10 and raise this up to
+# 100 only in environments where very low latency is required.
+hz 10
+
+# When a child rewrites the AOF file, if the following option is enabled
+# the file will be fsync-ed every 32 MB of data generated. This is useful
+# in order to commit the file to the disk more incrementally and avoid
+# big latency spikes.
+aof-rewrite-incremental-fsync yes
diff --git a/opencontrail/files/4.1 b/opencontrail/files/4.1
new file mode 120000
index 0000000..0c16b90
--- /dev/null
+++ b/opencontrail/files/4.1
@@ -0,0 +1 @@
+4.0/
\ No newline at end of file
diff --git a/opencontrail/files/3.0/database/cassandra-env.sh b/opencontrail/files/cassandra/2.1/cassandra-env.sh
similarity index 100%
rename from opencontrail/files/3.0/database/cassandra-env.sh
rename to opencontrail/files/cassandra/2.1/cassandra-env.sh
diff --git a/opencontrail/files/3.0/cassandra.yaml b/opencontrail/files/cassandra/2.1/cassandra.yaml
similarity index 100%
rename from opencontrail/files/3.0/cassandra.yaml
rename to opencontrail/files/cassandra/2.1/cassandra.yaml
diff --git a/opencontrail/files/4.0/database/cassandra-env-analytics.sh b/opencontrail/files/cassandra/2.2/cassandra-env-analytics.sh
similarity index 100%
rename from opencontrail/files/4.0/database/cassandra-env-analytics.sh
rename to opencontrail/files/cassandra/2.2/cassandra-env-analytics.sh
diff --git a/opencontrail/files/4.0/database/cassandra-env.sh b/opencontrail/files/cassandra/2.2/cassandra-env.sh
similarity index 100%
rename from opencontrail/files/4.0/database/cassandra-env.sh
rename to opencontrail/files/cassandra/2.2/cassandra-env.sh
diff --git a/opencontrail/files/4.0/cassandra.yaml b/opencontrail/files/cassandra/2.2/cassandra.yaml
similarity index 100%
rename from opencontrail/files/4.0/cassandra.yaml
rename to opencontrail/files/cassandra/2.2/cassandra.yaml
diff --git a/opencontrail/files/4.0/cassandra_analytics.yaml b/opencontrail/files/cassandra/2.2/cassandra_analytics.yaml
similarity index 100%
rename from opencontrail/files/4.0/cassandra_analytics.yaml
rename to opencontrail/files/cassandra/2.2/cassandra_analytics.yaml
diff --git a/opencontrail/files/cassandra/3.10/cassandra-env-analytics.sh b/opencontrail/files/cassandra/3.10/cassandra-env-analytics.sh
new file mode 100644
index 0000000..00acb2d
--- /dev/null
+++ b/opencontrail/files/cassandra/3.10/cassandra-env-analytics.sh
@@ -0,0 +1,319 @@
+{%- from "opencontrail/map.jinja" import database with context %}# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+calculate_heap_sizes()
+{
+ case "`uname`" in
+ Linux)
+ system_memory_in_mb=`free -m | awk '/:/ {print $2;exit}'`
+ system_cpu_cores=`egrep -c 'processor([[:space:]]+):.*' /proc/cpuinfo`
+ ;;
+ FreeBSD)
+ system_memory_in_bytes=`sysctl hw.physmem | awk '{print $2}'`
+ system_memory_in_mb=`expr $system_memory_in_bytes / 1024 / 1024`
+ system_cpu_cores=`sysctl hw.ncpu | awk '{print $2}'`
+ ;;
+ SunOS)
+ system_memory_in_mb=`prtconf | awk '/Memory size:/ {print $3}'`
+ system_cpu_cores=`psrinfo | wc -l`
+ ;;
+ Darwin)
+ system_memory_in_bytes=`sysctl hw.memsize | awk '{print $2}'`
+ system_memory_in_mb=`expr $system_memory_in_bytes / 1024 / 1024`
+ system_cpu_cores=`sysctl hw.ncpu | awk '{print $2}'`
+ ;;
+ *)
+ # assume reasonable defaults for e.g. a modern desktop or
+ # cheap server
+ system_memory_in_mb="2048"
+ system_cpu_cores="2"
+ ;;
+ esac
+
+ # some systems like the raspberry pi don't report cores, use at least 1
+ if [ "$system_cpu_cores" -lt "1" ]
+ then
+ system_cpu_cores="1"
+ fi
+
+ # set max heap size based on the following
+ # max(min(1/2 ram, 1024MB), min(1/4 ram, 8GB))
+ # calculate 1/2 ram and cap to 1024MB
+ # calculate 1/4 ram and cap to 8192MB
+ # pick the max
+ half_system_memory_in_mb=`expr $system_memory_in_mb / 2`
+ quarter_system_memory_in_mb=`expr $half_system_memory_in_mb / 2`
+ if [ "$half_system_memory_in_mb" -gt "1024" ]
+ then
+ half_system_memory_in_mb="1024"
+ fi
+ if [ "$quarter_system_memory_in_mb" -gt "8192" ]
+ then
+ quarter_system_memory_in_mb="8192"
+ fi
+ if [ "$half_system_memory_in_mb" -gt "$quarter_system_memory_in_mb" ]
+ then
+ max_heap_size_in_mb="$half_system_memory_in_mb"
+ else
+ max_heap_size_in_mb="$quarter_system_memory_in_mb"
+ fi
+ MAX_HEAP_SIZE="${max_heap_size_in_mb}M"
+
+ # Young gen: min(max_sensible_per_modern_cpu_core * num_cores, 1/4 * heap size)
+ max_sensible_yg_per_core_in_mb="100"
+ max_sensible_yg_in_mb=`expr $max_sensible_yg_per_core_in_mb "*" $system_cpu_cores`
+
+ desired_yg_in_mb=`expr $max_heap_size_in_mb / 4`
+
+ if [ "$desired_yg_in_mb" -gt "$max_sensible_yg_in_mb" ]
+ then
+ HEAP_NEWSIZE="${max_sensible_yg_in_mb}M"
+ else
+ HEAP_NEWSIZE="${desired_yg_in_mb}M"
+ fi
+}
+
+# Determine the sort of JVM we'll be running on.
+java_ver_output=`"${JAVA:-java}" -version 2>&1`
+jvmver=`echo "$java_ver_output" | grep '[openjdk|java] version' | awk -F'"' 'NR==1 {print $2}' | cut -d\- -f1`
+JVM_VERSION=${jvmver%_*}
+JVM_PATCH_VERSION=${jvmver#*_}
+
+if [ "$JVM_VERSION" \< "1.8" ] ; then
+ echo "Cassandra 3.0 and later require Java 8u40 or later."
+ exit 1;
+fi
+
+if [ "$JVM_VERSION" \< "1.8" ] && [ "$JVM_PATCH_VERSION" -lt 40 ] ; then
+ echo "Cassandra 3.0 and later require Java 8u40 or later."
+ exit 1;
+fi
+
+jvm=`echo "$java_ver_output" | grep -A 1 'java version' | awk 'NR==2 {print $1}'`
+case "$jvm" in
+ OpenJDK)
+ JVM_VENDOR=OpenJDK
+ # this will be "64-Bit" or "32-Bit"
+ JVM_ARCH=`echo "$java_ver_output" | awk 'NR==3 {print $2}'`
+ ;;
+ "Java(TM)")
+ JVM_VENDOR=Oracle
+ # this will be "64-Bit" or "32-Bit"
+ JVM_ARCH=`echo "$java_ver_output" | awk 'NR==3 {print $3}'`
+ ;;
+ *)
+ # Help fill in other JVM values
+ JVM_VENDOR=other
+ JVM_ARCH=unknown
+ ;;
+esac
+
+{% set cassandra_gc_logging = database.get('cassandra_gc_logging', 'rotation') %}
+{% if cassandra_gc_logging %}
+JVM_OPTS="$JVM_OPTS -XX:+PrintGCDetails"
+JVM_OPTS="$JVM_OPTS -XX:+PrintGCDateStamps"
+JVM_OPTS="$JVM_OPTS -XX:+PrintHeapAtGC"
+JVM_OPTS="$JVM_OPTS -XX:+PrintTenuringDistribution"
+JVM_OPTS="$JVM_OPTS -XX:+PrintGCApplicationStoppedTime"
+JVM_OPTS="$JVM_OPTS -XX:+PrintPromotionFailure"
+JVM_OPTS="$JVM_OPTS -XX:PrintFLSStatistics=1"
+{% if cassandra_gc_logging == 'legacy' %}
+JVM_OPTS="$JVM_OPTS -Xloggc:/var/log/cassandra/gc-`date +%s`.log"
+{% elif cassandra_gc_logging == 'rotation' %}
+#GC log path has to be defined here because it needs to access CASSANDRA_HOME
+JVM_OPTS="$JVM_OPTS -Xloggc:/var/log/cassandra/gc.log"
+JVM_OPTS="$JVM_OPTS -XX:+UseGCLogFileRotation"
+JVM_OPTS="$JVM_OPTS -XX:NumberOfGCLogFiles=10"
+JVM_OPTS="$JVM_OPTS -XX:GCLogFileSize=10M"
+{% endif %}
+{% endif %}
+
+# Here we create the arguments that will get passed to the jvm when
+# starting cassandra.
+
+# Read user-defined JVM options from jvm.options file
+JVM_OPTS_FILE=$CASSANDRA_CONF/jvm.options
+for opt in `grep "^-" $JVM_OPTS_FILE`
+do
+ JVM_OPTS="$JVM_OPTS $opt"
+done
+
+# Check what parameters were defined on jvm.options file to avoid conflicts
+echo $JVM_OPTS | grep -q Xmn
+DEFINED_XMN=$?
+echo $JVM_OPTS | grep -q Xmx
+DEFINED_XMX=$?
+echo $JVM_OPTS | grep -q Xms
+DEFINED_XMS=$?
+echo $JVM_OPTS | grep -q UseConcMarkSweepGC
+USING_CMS=$?
+echo $JVM_OPTS | grep -q UseG1GC
+USING_G1=$?
+
+# Override these to set the amount of memory to allocate to the JVM at
+# start-up. For production use you may wish to adjust this for your
+# environment. MAX_HEAP_SIZE is the total amount of memory dedicated
+# to the Java heap. HEAP_NEWSIZE refers to the size of the young
+# generation. Both MAX_HEAP_SIZE and HEAP_NEWSIZE should be either set
+# or not (if you set one, set the other).
+#
+# The main trade-off for the young generation is that the larger it
+# is, the longer GC pause times will be. The shorter it is, the more
+# expensive GC will be (usually).
+#
+# The example HEAP_NEWSIZE assumes a modern 8-core+ machine for decent pause
+# times. If in doubt, and if you do not particularly want to tweak, go with
+# 100 MB per physical CPU core.
+
+#MAX_HEAP_SIZE="4G"
+#HEAP_NEWSIZE="800M"
+
+{%- if database.max_heap_size is defined %}
+MAX_HEAP_SIZE="{{ database.max_heap_size }}"
+{%- endif %}
+{%- if database.heap_newsize is defined %}
+HEAP_NEWSIZE="{{ database.heap_newsize }}"
+{%- endif %}
+
+# Set this to control the amount of arenas per-thread in glibc
+#export MALLOC_ARENA_MAX=4
+
+# only calculate the size if it's not set manually
+if [ "x$MAX_HEAP_SIZE" = "x" ] && [ "x$HEAP_NEWSIZE" = "x" -o $USING_G1 -eq 0 ]; then
+ calculate_heap_sizes
+elif [ "x$MAX_HEAP_SIZE" = "x" ] || [ "x$HEAP_NEWSIZE" = "x" -a $USING_G1 -ne 0 ]; then
+ echo "please set or unset MAX_HEAP_SIZE and HEAP_NEWSIZE in pairs when using CMS GC (see cassandra-env.sh)"
+ exit 1
+fi
+
+if [ "x$MALLOC_ARENA_MAX" = "x" ] ; then
+ export MALLOC_ARENA_MAX=4
+fi
+
+# We only set -Xms and -Xmx if they were not defined on jvm.options file
+# If defined, both Xmx and Xms should be defined together.
+if [ $DEFINED_XMX -ne 0 ] && [ $DEFINED_XMS -ne 0 ]; then
+ JVM_OPTS="$JVM_OPTS -Xms${MAX_HEAP_SIZE}"
+ JVM_OPTS="$JVM_OPTS -Xmx${MAX_HEAP_SIZE}"
+elif [ $DEFINED_XMX -ne 0 ] || [ $DEFINED_XMS -ne 0 ]; then
+ echo "Please set or unset -Xmx and -Xms flags in pairs on jvm.options file."
+ exit 1
+fi
+
+# We only set -Xmn flag if it was not defined in jvm.options file
+# and if the CMS GC is being used
+# If defined, both Xmn and Xmx should be defined together.
+if [ $DEFINED_XMN -eq 0 ] && [ $DEFINED_XMX -ne 0 ]; then
+ echo "Please set or unset -Xmx and -Xmn flags in pairs on jvm.options file."
+ exit 1
+elif [ $DEFINED_XMN -ne 0 ] && [ $USING_CMS -eq 0 ]; then
+ JVM_OPTS="$JVM_OPTS -Xmn${HEAP_NEWSIZE}"
+fi
+
+if [ "$JVM_ARCH" = "64-Bit" ] && [ $USING_CMS -eq 0 ]; then
+ JVM_OPTS="$JVM_OPTS -XX:+UseCondCardMark"
+fi
+
+# provides hints to the JIT compiler
+JVM_OPTS="$JVM_OPTS -XX:CompileCommandFile=$CASSANDRA_CONF/hotspot_compiler"
+
+# add the jamm javaagent
+JVM_OPTS="$JVM_OPTS -javaagent:$CASSANDRA_HOME/lib/jamm-0.3.0.jar"
+
+# set jvm HeapDumpPath with CASSANDRA_HEAPDUMP_DIR
+if [ "x$CASSANDRA_HEAPDUMP_DIR" != "x" ]; then
+ JVM_OPTS="$JVM_OPTS -XX:HeapDumpPath=$CASSANDRA_HEAPDUMP_DIR/cassandra-`date +%s`-pid$$.hprof"
+fi
+
+# jmx: metrics and administration interface
+#
+# add this if you're having trouble connecting:
+# JVM_OPTS="$JVM_OPTS -Djava.rmi.server.hostname=<public name>"
+#
+# see
+# https://blogs.oracle.com/jmxetc/entry/troubleshooting_connection_problems_in_jconsole
+# for more on configuring JMX through firewalls, etc. (Short version:
+# get it working with no firewall first.)
+#
+# Cassandra ships with JMX accessible *only* from localhost.
+# To enable remote JMX connections, uncomment lines below
+# with authentication and/or ssl enabled. See https://wiki.apache.org/cassandra/JmxSecurity
+#
+if [ "x$LOCAL_JMX" = "x" ]; then
+ LOCAL_JMX=yes
+fi
+
+# Specifies the default port over which Cassandra will be available for
+# JMX connections.
+# For security reasons, you should not expose this port to the internet. Firewall it if needed.
+JMX_PORT="7199"
+
+if [ "$LOCAL_JMX" = "yes" ]; then
+ JVM_OPTS="$JVM_OPTS -Dcassandra.jmx.local.port=$JMX_PORT"
+ JVM_OPTS="$JVM_OPTS -Dcom.sun.management.jmxremote.authenticate=false"
+else
+ JVM_OPTS="$JVM_OPTS -Dcassandra.jmx.remote.port=$JMX_PORT"
+ # if ssl is enabled the same port cannot be used for both jmx and rmi so either
+ # pick another value for this property or comment out to use a random port (though see CASSANDRA-7087 for origins)
+ JVM_OPTS="$JVM_OPTS -Dcom.sun.management.jmxremote.rmi.port=$JMX_PORT"
+
+ # turn on JMX authentication. See below for further options
+ JVM_OPTS="$JVM_OPTS -Dcom.sun.management.jmxremote.authenticate=true"
+
+ # jmx ssl options
+ #JVM_OPTS="$JVM_OPTS -Dcom.sun.management.jmxremote.ssl=true"
+ #JVM_OPTS="$JVM_OPTS -Dcom.sun.management.jmxremote.ssl.need.client.auth=true"
+ #JVM_OPTS="$JVM_OPTS -Dcom.sun.management.jmxremote.ssl.enabled.protocols=<enabled-protocols>"
+ #JVM_OPTS="$JVM_OPTS -Dcom.sun.management.jmxremote.ssl.enabled.cipher.suites=<enabled-cipher-suites>"
+ #JVM_OPTS="$JVM_OPTS -Djavax.net.ssl.keyStore=/path/to/keystore"
+ #JVM_OPTS="$JVM_OPTS -Djavax.net.ssl.keyStorePassword=<keystore-password>"
+ #JVM_OPTS="$JVM_OPTS -Djavax.net.ssl.trustStore=/path/to/truststore"
+ #JVM_OPTS="$JVM_OPTS -Djavax.net.ssl.trustStorePassword=<truststore-password>"
+fi
+
+# jmx authentication and authorization options. By default, auth is only
+# activated for remote connections but they can also be enabled for local only JMX
+## Basic file based authn & authz
+JVM_OPTS="$JVM_OPTS -Dcom.sun.management.jmxremote.password.file=/etc/cassandra/jmxremote.password"
+#JVM_OPTS="$JVM_OPTS -Dcom.sun.management.jmxremote.access.file=/etc/cassandra/jmxremote.access"
+## Custom auth settings which can be used as alternatives to JMX's out of the box auth utilities.
+## JAAS login modules can be used for authentication by uncommenting these two properties.
+## Cassandra ships with a LoginModule implementation - org.apache.cassandra.auth.CassandraLoginModule -
+## which delegates to the IAuthenticator configured in cassandra.yaml. See the sample JAAS configuration
+## file cassandra-jaas.config
+#JVM_OPTS="$JVM_OPTS -Dcassandra.jmx.remote.login.config=CassandraLogin"
+#JVM_OPTS="$JVM_OPTS -Djava.security.auth.login.config=$CASSANDRA_HOME/conf/cassandra-jaas.config"
+
+## Cassandra also ships with a helper for delegating JMX authz calls to the configured IAuthorizer,
+## uncomment this to use it. Requires one of the two authentication options to be enabled
+#JVM_OPTS="$JVM_OPTS -Dcassandra.jmx.authorizer=org.apache.cassandra.auth.jmx.AuthorizationProxy"
+
+# To use mx4j, an HTML interface for JMX, add mx4j-tools.jar to the lib/
+# directory.
+# See http://wiki.apache.org/cassandra/Operations#Monitoring_with_MX4J
+# By default mx4j listens on 0.0.0.0:8081. Uncomment the following lines
+# to control its listen address and port.
+#MX4J_ADDRESS="-Dmx4jaddress=127.0.0.1"
+#MX4J_PORT="-Dmx4jport=8081"
+
+# Cassandra uses SIGAR to capture OS metrics CASSANDRA-7838
+# for SIGAR we have to set the java.library.path
+# to the location of the native libraries.
+JVM_OPTS="$JVM_OPTS -Djava.library.path=$CASSANDRA_HOME/lib/sigar-bin"
+
+JVM_OPTS="$JVM_OPTS $MX4J_ADDRESS"
+JVM_OPTS="$JVM_OPTS $MX4J_PORT"
+JVM_OPTS="$JVM_OPTS $JVM_EXTRA_OPTS"
diff --git a/opencontrail/files/cassandra/3.10/cassandra-env.sh b/opencontrail/files/cassandra/3.10/cassandra-env.sh
new file mode 100644
index 0000000..ba547f2
--- /dev/null
+++ b/opencontrail/files/cassandra/3.10/cassandra-env.sh
@@ -0,0 +1,324 @@
+{%- from "opencontrail/map.jinja" import database with context %}# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+calculate_heap_sizes()
+{
+ case "`uname`" in
+ Linux)
+ system_memory_in_mb=`free -m | awk '/:/ {print $2;exit}'`
+ system_cpu_cores=`egrep -c 'processor([[:space:]]+):.*' /proc/cpuinfo`
+ ;;
+ FreeBSD)
+ system_memory_in_bytes=`sysctl hw.physmem | awk '{print $2}'`
+ system_memory_in_mb=`expr $system_memory_in_bytes / 1024 / 1024`
+ system_cpu_cores=`sysctl hw.ncpu | awk '{print $2}'`
+ ;;
+ SunOS)
+ system_memory_in_mb=`prtconf | awk '/Memory size:/ {print $3}'`
+ system_cpu_cores=`psrinfo | wc -l`
+ ;;
+ Darwin)
+ system_memory_in_bytes=`sysctl hw.memsize | awk '{print $2}'`
+ system_memory_in_mb=`expr $system_memory_in_bytes / 1024 / 1024`
+ system_cpu_cores=`sysctl hw.ncpu | awk '{print $2}'`
+ ;;
+ *)
+ # assume reasonable defaults for e.g. a modern desktop or
+ # cheap server
+ system_memory_in_mb="2048"
+ system_cpu_cores="2"
+ ;;
+ esac
+
+ # some systems like the raspberry pi don't report cores, use at least 1
+ if [ "$system_cpu_cores" -lt "1" ]
+ then
+ system_cpu_cores="1"
+ fi
+
+ # set max heap size based on the following
+ # max(min(1/2 ram, 1024MB), min(1/4 ram, 8GB))
+ # calculate 1/2 ram and cap to 1024MB
+ # calculate 1/4 ram and cap to 8192MB
+ # pick the max
+ half_system_memory_in_mb=`expr $system_memory_in_mb / 2`
+ quarter_system_memory_in_mb=`expr $half_system_memory_in_mb / 2`
+ if [ "$half_system_memory_in_mb" -gt "1024" ]
+ then
+ half_system_memory_in_mb="1024"
+ fi
+ if [ "$quarter_system_memory_in_mb" -gt "8192" ]
+ then
+ quarter_system_memory_in_mb="8192"
+ fi
+ if [ "$half_system_memory_in_mb" -gt "$quarter_system_memory_in_mb" ]
+ then
+ max_heap_size_in_mb="$half_system_memory_in_mb"
+ else
+ max_heap_size_in_mb="$quarter_system_memory_in_mb"
+ fi
+ MAX_HEAP_SIZE="${max_heap_size_in_mb}M"
+
+ # Young gen: min(max_sensible_per_modern_cpu_core * num_cores, 1/4 * heap size)
+ max_sensible_yg_per_core_in_mb="100"
+ max_sensible_yg_in_mb=`expr $max_sensible_yg_per_core_in_mb "*" $system_cpu_cores`
+
+ desired_yg_in_mb=`expr $max_heap_size_in_mb / 4`
+
+ if [ "$desired_yg_in_mb" -gt "$max_sensible_yg_in_mb" ]
+ then
+ HEAP_NEWSIZE="${max_sensible_yg_in_mb}M"
+ else
+ HEAP_NEWSIZE="${desired_yg_in_mb}M"
+ fi
+}
+
+# Determine the sort of JVM we'll be running on.
+java_ver_output=`"${JAVA:-java}" -version 2>&1`
+jvmver=`echo "$java_ver_output" | grep '[openjdk|java] version' | awk -F'"' 'NR==1 {print $2}' | cut -d\- -f1`
+JVM_VERSION=${jvmver%_*}
+JVM_PATCH_VERSION=${jvmver#*_}
+
+if [ "$JVM_VERSION" \< "1.8" ] ; then
+ echo "Cassandra 3.0 and later require Java 8u40 or later."
+ exit 1;
+fi
+
+if [ "$JVM_VERSION" \< "1.8" ] && [ "$JVM_PATCH_VERSION" -lt 40 ] ; then
+ echo "Cassandra 3.0 and later require Java 8u40 or later."
+ exit 1;
+fi
+
+jvm=`echo "$java_ver_output" | grep -A 1 'java version' | awk 'NR==2 {print $1}'`
+case "$jvm" in
+ OpenJDK)
+ JVM_VENDOR=OpenJDK
+ # this will be "64-Bit" or "32-Bit"
+ JVM_ARCH=`echo "$java_ver_output" | awk 'NR==3 {print $2}'`
+ ;;
+ "Java(TM)")
+ JVM_VENDOR=Oracle
+ # this will be "64-Bit" or "32-Bit"
+ JVM_ARCH=`echo "$java_ver_output" | awk 'NR==3 {print $3}'`
+ ;;
+ *)
+ # Help fill in other JVM values
+ JVM_VENDOR=other
+ JVM_ARCH=unknown
+ ;;
+esac
+
+{% set cassandra_gc_logging = database.get('cassandra_gc_logging', 'rotation') %}
+{% if cassandra_gc_logging %}
+JVM_OPTS="$JVM_OPTS -XX:+PrintGCDetails"
+JVM_OPTS="$JVM_OPTS -XX:+PrintGCDateStamps"
+JVM_OPTS="$JVM_OPTS -XX:+PrintHeapAtGC"
+JVM_OPTS="$JVM_OPTS -XX:+PrintTenuringDistribution"
+JVM_OPTS="$JVM_OPTS -XX:+PrintGCApplicationStoppedTime"
+JVM_OPTS="$JVM_OPTS -XX:+PrintPromotionFailure"
+JVM_OPTS="$JVM_OPTS -XX:PrintFLSStatistics=1"
+{% if cassandra_gc_logging == 'legacy' %}
+JVM_OPTS="$JVM_OPTS -Xloggc:/var/log/cassandra/gc-`date +%s`.log"
+{% elif cassandra_gc_logging == 'rotation' %}
+#GC log path has to be defined here because it needs to access CASSANDRA_HOME
+JVM_OPTS="$JVM_OPTS -Xloggc:/var/log/cassandra/gc.log"
+JVM_OPTS="$JVM_OPTS -XX:+UseGCLogFileRotation"
+JVM_OPTS="$JVM_OPTS -XX:NumberOfGCLogFiles=10"
+JVM_OPTS="$JVM_OPTS -XX:GCLogFileSize=10M"
+{% endif %}
+{% endif %}
+
+# Here we create the arguments that will get passed to the jvm when
+# starting cassandra.
+
+# Read user-defined JVM options from jvm.options file
+JVM_OPTS_FILE=$CASSANDRA_CONF/jvm.options
+for opt in `grep "^-" $JVM_OPTS_FILE`
+do
+ JVM_OPTS="$JVM_OPTS $opt"
+done
+
+# Check what parameters were defined on jvm.options file to avoid conflicts
+echo $JVM_OPTS | grep -q Xmn
+DEFINED_XMN=$?
+echo $JVM_OPTS | grep -q Xmx
+DEFINED_XMX=$?
+echo $JVM_OPTS | grep -q Xms
+DEFINED_XMS=$?
+echo $JVM_OPTS | grep -q UseConcMarkSweepGC
+USING_CMS=$?
+echo $JVM_OPTS | grep -q UseG1GC
+USING_G1=$?
+
+# Override these to set the amount of memory to allocate to the JVM at
+# start-up. For production use you may wish to adjust this for your
+# environment. MAX_HEAP_SIZE is the total amount of memory dedicated
+# to the Java heap. HEAP_NEWSIZE refers to the size of the young
+# generation. Both MAX_HEAP_SIZE and HEAP_NEWSIZE should be either set
+# or not (if you set one, set the other).
+#
+# The main trade-off for the young generation is that the larger it
+# is, the longer GC pause times will be. The shorter it is, the more
+# expensive GC will be (usually).
+#
+# The example HEAP_NEWSIZE assumes a modern 8-core+ machine for decent pause
+# times. If in doubt, and if you do not particularly want to tweak, go with
+# 100 MB per physical CPU core.
+
+#MAX_HEAP_SIZE="4G"
+#HEAP_NEWSIZE="800M"
+
+{%- if database.max_heap_size is defined %}
+MAX_HEAP_SIZE="{{ database.max_heap_size }}"
+{%- endif %}
+{%- if database.heap_newsize is defined %}
+HEAP_NEWSIZE="{{ database.heap_newsize }}"
+{%- endif %}
+
+# Set this to control the amount of arenas per-thread in glibc
+#export MALLOC_ARENA_MAX=4
+
+# only calculate the size if it's not set manually
+if [ "x$MAX_HEAP_SIZE" = "x" ] && [ "x$HEAP_NEWSIZE" = "x" -o $USING_G1 -eq 0 ]; then
+ calculate_heap_sizes
+elif [ "x$MAX_HEAP_SIZE" = "x" ] || [ "x$HEAP_NEWSIZE" = "x" -a $USING_G1 -ne 0 ]; then
+ echo "please set or unset MAX_HEAP_SIZE and HEAP_NEWSIZE in pairs when using CMS GC (see cassandra-env.sh)"
+ exit 1
+fi
+
+if [ "x$MALLOC_ARENA_MAX" = "x" ] ; then
+ export MALLOC_ARENA_MAX=4
+fi
+
+# We only set -Xms and -Xmx if they were not defined on jvm.options file
+# If defined, both Xmx and Xms should be defined together.
+if [ $DEFINED_XMX -ne 0 ] && [ $DEFINED_XMS -ne 0 ]; then
+ JVM_OPTS="$JVM_OPTS -Xms${MAX_HEAP_SIZE}"
+ JVM_OPTS="$JVM_OPTS -Xmx${MAX_HEAP_SIZE}"
+elif [ $DEFINED_XMX -ne 0 ] || [ $DEFINED_XMS -ne 0 ]; then
+ echo "Please set or unset -Xmx and -Xms flags in pairs on jvm.options file."
+ exit 1
+fi
+
+# We only set -Xmn flag if it was not defined in jvm.options file
+# and if the CMS GC is being used
+# If defined, both Xmn and Xmx should be defined together.
+if [ $DEFINED_XMN -eq 0 ] && [ $DEFINED_XMX -ne 0 ]; then
+ echo "Please set or unset -Xmx and -Xmn flags in pairs on jvm.options file."
+ exit 1
+elif [ $DEFINED_XMN -ne 0 ] && [ $USING_CMS -eq 0 ]; then
+ JVM_OPTS="$JVM_OPTS -Xmn${HEAP_NEWSIZE}"
+fi
+
+if [ "$JVM_ARCH" = "64-Bit" ] && [ $USING_CMS -eq 0 ]; then
+ JVM_OPTS="$JVM_OPTS -XX:+UseCondCardMark"
+fi
+
+# provides hints to the JIT compiler
+JVM_OPTS="$JVM_OPTS -XX:CompileCommandFile=$CASSANDRA_CONF/hotspot_compiler"
+
+# add the jamm javaagent
+JVM_OPTS="$JVM_OPTS -javaagent:$CASSANDRA_HOME/lib/jamm-0.3.0.jar"
+
+# set jvm HeapDumpPath with CASSANDRA_HEAPDUMP_DIR
+if [ "x$CASSANDRA_HEAPDUMP_DIR" != "x" ]; then
+ JVM_OPTS="$JVM_OPTS -XX:HeapDumpPath=$CASSANDRA_HEAPDUMP_DIR/cassandra-`date +%s`-pid$$.hprof"
+fi
+
+# jmx: metrics and administration interface
+#
+# add this if you're having trouble connecting:
+# JVM_OPTS="$JVM_OPTS -Djava.rmi.server.hostname=<public name>"
+#
+# see
+# https://blogs.oracle.com/jmxetc/entry/troubleshooting_connection_problems_in_jconsole
+# for more on configuring JMX through firewalls, etc. (Short version:
+# get it working with no firewall first.)
+#
+# Cassandra ships with JMX accessible *only* from localhost.
+# To enable remote JMX connections, uncomment lines below
+# with authentication and/or ssl enabled. See https://wiki.apache.org/cassandra/JmxSecurity
+#
+if [ "x$LOCAL_JMX" = "x" ]; then
+ LOCAL_JMX=yes
+fi
+
+# Specifies the default port over which Cassandra will be available for
+# JMX connections.
+# For security reasons, you should not expose this port to the internet. Firewall it if needed.
+JMX_PORT="7198"
+
+if [ "$LOCAL_JMX" = "yes" ]; then
+ JVM_OPTS="$JVM_OPTS -Dcassandra.jmx.local.port=$JMX_PORT"
+ JVM_OPTS="$JVM_OPTS -Dcom.sun.management.jmxremote.authenticate=false"
+else
+ JVM_OPTS="$JVM_OPTS -Dcassandra.jmx.remote.port=$JMX_PORT"
+ # if ssl is enabled the same port cannot be used for both jmx and rmi so either
+ # pick another value for this property or comment out to use a random port (though see CASSANDRA-7087 for origins)
+ JVM_OPTS="$JVM_OPTS -Dcom.sun.management.jmxremote.rmi.port=$JMX_PORT"
+
+ # turn on JMX authentication. See below for further options
+ JVM_OPTS="$JVM_OPTS -Dcom.sun.management.jmxremote.authenticate=true"
+
+ # jmx ssl options
+ #JVM_OPTS="$JVM_OPTS -Dcom.sun.management.jmxremote.ssl=true"
+ #JVM_OPTS="$JVM_OPTS -Dcom.sun.management.jmxremote.ssl.need.client.auth=true"
+ #JVM_OPTS="$JVM_OPTS -Dcom.sun.management.jmxremote.ssl.enabled.protocols=<enabled-protocols>"
+ #JVM_OPTS="$JVM_OPTS -Dcom.sun.management.jmxremote.ssl.enabled.cipher.suites=<enabled-cipher-suites>"
+ #JVM_OPTS="$JVM_OPTS -Djavax.net.ssl.keyStore=/path/to/keystore"
+ #JVM_OPTS="$JVM_OPTS -Djavax.net.ssl.keyStorePassword=<keystore-password>"
+ #JVM_OPTS="$JVM_OPTS -Djavax.net.ssl.trustStore=/path/to/truststore"
+ #JVM_OPTS="$JVM_OPTS -Djavax.net.ssl.trustStorePassword=<truststore-password>"
+fi
+
+# jmx authentication and authorization options. By default, auth is only
+# activated for remote connections but they can also be enabled for local only JMX
+## Basic file based authn & authz
+JVM_OPTS="$JVM_OPTS -Dcom.sun.management.jmxremote.password.file=/etc/cassandra/jmxremote.password"
+#JVM_OPTS="$JVM_OPTS -Dcom.sun.management.jmxremote.access.file=/etc/cassandra/jmxremote.access"
+## Custom auth settings which can be used as alternatives to JMX's out of the box auth utilities.
+## JAAS login modules can be used for authentication by uncommenting these two properties.
+## Cassandra ships with a LoginModule implementation - org.apache.cassandra.auth.CassandraLoginModule -
+## which delegates to the IAuthenticator configured in cassandra.yaml. See the sample JAAS configuration
+## file cassandra-jaas.config
+#JVM_OPTS="$JVM_OPTS -Dcassandra.jmx.remote.login.config=CassandraLogin"
+#JVM_OPTS="$JVM_OPTS -Djava.security.auth.login.config=$CASSANDRA_HOME/conf/cassandra-jaas.config"
+
+## Cassandra also ships with a helper for delegating JMX authz calls to the configured IAuthorizer,
+## uncomment this to use it. Requires one of the two authentication options to be enabled
+#JVM_OPTS="$JVM_OPTS -Dcassandra.jmx.authorizer=org.apache.cassandra.auth.jmx.AuthorizationProxy"
+
+# To use mx4j, an HTML interface for JMX, add mx4j-tools.jar to the lib/
+# directory.
+# See http://wiki.apache.org/cassandra/Operations#Monitoring_with_MX4J
+# By default mx4j listens on 0.0.0.0:8081. Uncomment the following lines
+# to control its listen address and port.
+#MX4J_ADDRESS="-Dmx4jaddress=127.0.0.1"
+#MX4J_PORT="-Dmx4jport=8081"
+
+# Cassandra uses SIGAR to capture OS metrics CASSANDRA-7838
+# for SIGAR we have to set the java.library.path
+# to the location of the native libraries.
+JVM_OPTS="$JVM_OPTS -Djava.library.path=$CASSANDRA_HOME/lib/sigar-bin"
+
+JVM_OPTS="$JVM_OPTS $MX4J_ADDRESS"
+JVM_OPTS="$JVM_OPTS $MX4J_PORT"
+JVM_OPTS="$JVM_OPTS $JVM_EXTRA_OPTS"
+
+# Add cassandra pid file, even if not used. It will stay in params and
+# contrail-nodemgr can detect cassandra PID from process list.
+
+JVM_OPTS="$JVM_OPTS -Dcassandra-pidfile=/var/run/cassandra/cassandra.pid"
diff --git a/opencontrail/files/cassandra/3.10/cassandra.yaml b/opencontrail/files/cassandra/3.10/cassandra.yaml
new file mode 100644
index 0000000..ebabb4a
--- /dev/null
+++ b/opencontrail/files/cassandra/3.10/cassandra.yaml
@@ -0,0 +1,1208 @@
+{%- from "opencontrail/map.jinja" import database with context %}
+# Cassandra storage config YAML
+
+# NOTE:
+# See http://wiki.apache.org/cassandra/StorageConfiguration for
+# full explanations of configuration directives
+# /NOTE
+
+# The name of the cluster. This is mainly used to prevent machines in
+# one logical cluster from joining another.
+cluster_name: '{{ database.name }}'
+
+# This defines the number of tokens randomly assigned to this node on the ring
+# The more tokens, relative to other nodes, the larger the proportion of data
+# that this node will store. You probably want all nodes to have the same number
+# of tokens assuming they have equal hardware capability.
+#
+# If you leave this unspecified, Cassandra will use the default of 1 token for legacy compatibility,
+# and will use the initial_token as described below.
+#
+# Specifying initial_token will override this setting on the node's initial start,
+# on subsequent starts, this setting will apply even if initial token is set.
+#
+# If you already have a cluster with 1 token per node, and wish to migrate to
+# multiple tokens per node, see http://wiki.apache.org/cassandra/Operations
+num_tokens: 256
+
+# Triggers automatic allocation of num_tokens tokens for this node. The allocation
+# algorithm attempts to choose tokens in a way that optimizes replicated load over
+# the nodes in the datacenter for the replication strategy used by the specified
+# keyspace.
+#
+# The load assigned to each node will be close to proportional to its number of
+# vnodes.
+#
+# Only supported with the Murmur3Partitioner.
+# allocate_tokens_for_keyspace: KEYSPACE
+
+# initial_token allows you to specify tokens manually. While you can use it with
+# vnodes (num_tokens > 1, above) -- in which case you should provide a
+# comma-separated list -- it's primarily used when adding nodes to legacy clusters
+# that do not have vnodes enabled.
+# initial_token:
+
+# See http://wiki.apache.org/cassandra/HintedHandoff
+# May either be "true" or "false" to enable globally
+hinted_handoff_enabled: true
+
+# When hinted_handoff_enabled is true, a black list of data centers that will not
+# perform hinted handoff
+# hinted_handoff_disabled_datacenters:
+# - DC1
+# - DC2
+
+# this defines the maximum amount of time a dead host will have hints
+# generated. After it has been dead this long, new hints for it will not be
+# created until it has been seen alive and gone down again.
+max_hint_window_in_ms: 10800000 # 3 hours
+
+# Maximum throttle in KBs per second, per delivery thread. This will be
+# reduced proportionally to the number of nodes in the cluster. (If there
+# are two nodes in the cluster, each delivery thread will use the maximum
+# rate; if there are three, each will throttle to half of the maximum,
+# since we expect two nodes to be delivering hints simultaneously.)
+hinted_handoff_throttle_in_kb: 1024
+
+# Number of threads with which to deliver hints;
+# Consider increasing this number when you have multi-dc deployments, since
+# cross-dc handoff tends to be slower
+max_hints_delivery_threads: 2
+
+# Directory where Cassandra should store hints.
+# If not set, the default directory is $CASSANDRA_HOME/data/hints.
+# hints_directory: /var/lib/cassandra/hints
+
+# How often hints should be flushed from the internal buffers to disk.
+# Will *not* trigger fsync.
+hints_flush_period_in_ms: 10000
+
+# Maximum size for a single hints file, in megabytes.
+max_hints_file_size_in_mb: 128
+
+# Compression to apply to the hint files. If omitted, hints files
+# will be written uncompressed. LZ4, Snappy, and Deflate compressors
+# are supported.
+#hints_compression:
+# - class_name: LZ4Compressor
+# parameters:
+# -
+
+# Maximum throttle in KBs per second, total. This will be
+# reduced proportionally to the number of nodes in the cluster.
+batchlog_replay_throttle_in_kb: 1024
+
+# Authentication backend, implementing IAuthenticator; used to identify users
+# Out of the box, Cassandra provides org.apache.cassandra.auth.{AllowAllAuthenticator,
+# PasswordAuthenticator}.
+#
+# - AllowAllAuthenticator performs no checks - set it to disable authentication.
+# - PasswordAuthenticator relies on username/password pairs to authenticate
+# users. It keeps usernames and hashed passwords in system_auth.roles table.
+# Please increase system_auth keyspace replication factor if you use this authenticator.
+# If using PasswordAuthenticator, CassandraRoleManager must also be used (see below)
+authenticator: AllowAllAuthenticator
+
+# Authorization backend, implementing IAuthorizer; used to limit access/provide permissions
+# Out of the box, Cassandra provides org.apache.cassandra.auth.{AllowAllAuthorizer,
+# CassandraAuthorizer}.
+#
+# - AllowAllAuthorizer allows any action to any user - set it to disable authorization.
+# - CassandraAuthorizer stores permissions in system_auth.role_permissions table. Please
+# increase system_auth keyspace replication factor if you use this authorizer.
+authorizer: AllowAllAuthorizer
+
+# Part of the Authentication & Authorization backend, implementing IRoleManager; used
+# to maintain grants and memberships between roles.
+# Out of the box, Cassandra provides org.apache.cassandra.auth.CassandraRoleManager,
+# which stores role information in the system_auth keyspace. Most functions of the
+# IRoleManager require an authenticated login, so unless the configured IAuthenticator
+# actually implements authentication, most of this functionality will be unavailable.
+#
+# - CassandraRoleManager stores role data in the system_auth keyspace. Please
+# increase system_auth keyspace replication factor if you use this role manager.
+role_manager: CassandraRoleManager
+
+# Validity period for roles cache (fetching granted roles can be an expensive
+# operation depending on the role manager, CassandraRoleManager is one example)
+# Granted roles are cached for authenticated sessions in AuthenticatedUser and
+# after the period specified here, become eligible for (async) reload.
+# Defaults to 2000, set to 0 to disable caching entirely.
+# Will be disabled automatically for AllowAllAuthenticator.
+roles_validity_in_ms: 2000
+
+# Refresh interval for roles cache (if enabled).
+# After this interval, cache entries become eligible for refresh. Upon next
+# access, an async reload is scheduled and the old value returned until it
+# completes. If roles_validity_in_ms is non-zero, then this must be
+# also.
+# Defaults to the same value as roles_validity_in_ms.
+# roles_update_interval_in_ms: 2000
+
+# Validity period for permissions cache (fetching permissions can be an
+# expensive operation depending on the authorizer, CassandraAuthorizer is
+# one example). Defaults to 2000, set to 0 to disable.
+# Will be disabled automatically for AllowAllAuthorizer.
+permissions_validity_in_ms: 2000
+
+# Refresh interval for permissions cache (if enabled).
+# After this interval, cache entries become eligible for refresh. Upon next
+# access, an async reload is scheduled and the old value returned until it
+# completes. If permissions_validity_in_ms is non-zero, then this must be
+# also.
+# Defaults to the same value as permissions_validity_in_ms.
+# permissions_update_interval_in_ms: 2000
+
+# Validity period for credentials cache. This cache is tightly coupled to
+# the provided PasswordAuthenticator implementation of IAuthenticator. If
+# another IAuthenticator implementation is configured, this cache will not
+# be automatically used and so the following settings will have no effect.
+# Please note, credentials are cached in their encrypted form, so while
+# activating this cache may reduce the number of queries made to the
+# underlying table, it may not bring a significant reduction in the
+# latency of individual authentication attempts.
+# Defaults to 2000, set to 0 to disable credentials caching.
+credentials_validity_in_ms: 2000
+
+# Refresh interval for credentials cache (if enabled).
+# After this interval, cache entries become eligible for refresh. Upon next
+# access, an async reload is scheduled and the old value returned until it
+# completes. If credentials_validity_in_ms is non-zero, then this must be
+# also.
+# Defaults to the same value as credentials_validity_in_ms.
+# credentials_update_interval_in_ms: 2000
+
+# The partitioner is responsible for distributing groups of rows (by
+# partition key) across nodes in the cluster. You should leave this
+# alone for new clusters. The partitioner can NOT be changed without
+# reloading all data, so when upgrading you should set this to the
+# same partitioner you were already using.
+#
+# Besides Murmur3Partitioner, partitioners included for backwards
+# compatibility include RandomPartitioner, ByteOrderedPartitioner, and
+# OrderPreservingPartitioner.
+#
+partitioner: org.apache.cassandra.dht.Murmur3Partitioner
+
+# Directories where Cassandra should store data on disk. Cassandra
+# will spread data evenly across them, subject to the granularity of
+# the configured compaction strategy.
+# If not set, the default directory is $CASSANDRA_HOME/data/data.
+data_file_directories:
+ - /var/lib/cassandra/data
+
+# commit log. when running on magnetic HDD, this should be a
+# separate spindle than the data directories.
+# If not set, the default directory is $CASSANDRA_HOME/data/commitlog.
+commitlog_directory: /var/lib/cassandra/commitlog
+
+# Enable / disable CDC functionality on a per-node basis. This modifies the logic used
+# for write path allocation rejection (standard: never reject. cdc: reject Mutation
+# containing a CDC-enabled table if at space limit in cdc_raw_directory).
+cdc_enabled: false
+
+# CommitLogSegments are moved to this directory on flush if cdc_enabled: true and the
+# segment contains mutations for a CDC-enabled table. This should be placed on a
+# separate spindle than the data directories. If not set, the default directory is
+# $CASSANDRA_HOME/data/cdc_raw.
+# cdc_raw_directory: /var/lib/cassandra/cdc_raw
+
+# Policy for data disk failures:
+#
+# die
+# shut down gossip and client transports and kill the JVM for any fs errors or
+# single-sstable errors, so the node can be replaced.
+#
+# stop_paranoid
+# shut down gossip and client transports even for single-sstable errors,
+# kill the JVM for errors during startup.
+#
+# stop
+# shut down gossip and client transports, leaving the node effectively dead, but
+# can still be inspected via JMX, kill the JVM for errors during startup.
+#
+# best_effort
+# stop using the failed disk and respond to requests based on
+# remaining available sstables. This means you WILL see obsolete
+# data at CL.ONE!
+#
+# ignore
+# ignore fatal errors and let requests fail, as in pre-1.2 Cassandra
+disk_failure_policy: stop
+
+# Policy for commit disk failures:
+#
+# die
+# shut down gossip and Thrift and kill the JVM, so the node can be replaced.
+#
+# stop
+# shut down gossip and Thrift, leaving the node effectively dead, but
+# can still be inspected via JMX.
+#
+# stop_commit
+# shutdown the commit log, letting writes collect but
+# continuing to service reads, as in pre-2.0.5 Cassandra
+#
+# ignore
+# ignore fatal errors and let the batches fail
+commit_failure_policy: stop
+
+# Maximum size of the native protocol prepared statement cache
+#
+# Valid values are either "auto" (omitting the value) or a value greater 0.
+#
+# Note that specifying a too large value will result in long running GCs and possbily
+# out-of-memory errors. Keep the value at a small fraction of the heap.
+#
+# If you constantly see "prepared statements discarded in the last minute because
+# cache limit reached" messages, the first step is to investigate the root cause
+# of these messages and check whether prepared statements are used correctly -
+# i.e. use bind markers for variable parts.
+#
+# Do only change the default value, if you really have more prepared statements than
+# fit in the cache. In most cases it is not neccessary to change this value.
+# Constantly re-preparing statements is a performance penalty.
+#
+# Default value ("auto") is 1/256th of the heap or 10MB, whichever is greater
+prepared_statements_cache_size_mb:
+
+# Maximum size of the Thrift prepared statement cache
+#
+# If you do not use Thrift at all, it is safe to leave this value at "auto".
+#
+# See description of 'prepared_statements_cache_size_mb' above for more information.
+#
+# Default value ("auto") is 1/256th of the heap or 10MB, whichever is greater
+thrift_prepared_statements_cache_size_mb:
+
+# Maximum size of the key cache in memory.
+#
+# Each key cache hit saves 1 seek and each row cache hit saves 2 seeks at the
+# minimum, sometimes more. The key cache is fairly tiny for the amount of
+# time it saves, so it's worthwhile to use it at large numbers.
+# The row cache saves even more time, but must contain the entire row,
+# so it is extremely space-intensive. It's best to only use the
+# row cache if you have hot rows or static rows.
+#
+# NOTE: if you reduce the size, you may not get you hottest keys loaded on startup.
+#
+# Default value is empty to make it "auto" (min(5% of Heap (in MB), 100MB)). Set to 0 to disable key cache.
+key_cache_size_in_mb:
+
+# Duration in seconds after which Cassandra should
+# save the key cache. Caches are saved to saved_caches_directory as
+# specified in this configuration file.
+#
+# Saved caches greatly improve cold-start speeds, and is relatively cheap in
+# terms of I/O for the key cache. Row cache saving is much more expensive and
+# has limited use.
+#
+# Default is 14400 or 4 hours.
+key_cache_save_period: 14400
+
+# Number of keys from the key cache to save
+# Disabled by default, meaning all keys are going to be saved
+# key_cache_keys_to_save: 100
+
+# Row cache implementation class name. Available implementations:
+#
+# org.apache.cassandra.cache.OHCProvider
+# Fully off-heap row cache implementation (default).
+#
+# org.apache.cassandra.cache.SerializingCacheProvider
+# This is the row cache implementation availabile
+# in previous releases of Cassandra.
+# row_cache_class_name: org.apache.cassandra.cache.OHCProvider
+
+# Maximum size of the row cache in memory.
+# Please note that OHC cache implementation requires some additional off-heap memory to manage
+# the map structures and some in-flight memory during operations before/after cache entries can be
+# accounted against the cache capacity. This overhead is usually small compared to the whole capacity.
+# Do not specify more memory that the system can afford in the worst usual situation and leave some
+# headroom for OS block level cache. Do never allow your system to swap.
+#
+# Default value is 0, to disable row caching.
+row_cache_size_in_mb: 0
+
+# Duration in seconds after which Cassandra should save the row cache.
+# Caches are saved to saved_caches_directory as specified in this configuration file.
+#
+# Saved caches greatly improve cold-start speeds, and is relatively cheap in
+# terms of I/O for the key cache. Row cache saving is much more expensive and
+# has limited use.
+#
+# Default is 0 to disable saving the row cache.
+row_cache_save_period: 0
+
+# Number of keys from the row cache to save.
+# Specify 0 (which is the default), meaning all keys are going to be saved
+# row_cache_keys_to_save: 100
+
+# Maximum size of the counter cache in memory.
+#
+# Counter cache helps to reduce counter locks' contention for hot counter cells.
+# In case of RF = 1 a counter cache hit will cause Cassandra to skip the read before
+# write entirely. With RF > 1 a counter cache hit will still help to reduce the duration
+# of the lock hold, helping with hot counter cell updates, but will not allow skipping
+# the read entirely. Only the local (clock, count) tuple of a counter cell is kept
+# in memory, not the whole counter, so it's relatively cheap.
+#
+# NOTE: if you reduce the size, you may not get you hottest keys loaded on startup.
+#
+# Default value is empty to make it "auto" (min(2.5% of Heap (in MB), 50MB)). Set to 0 to disable counter cache.
+# NOTE: if you perform counter deletes and rely on low gcgs, you should disable the counter cache.
+counter_cache_size_in_mb:
+
+# Duration in seconds after which Cassandra should
+# save the counter cache (keys only). Caches are saved to saved_caches_directory as
+# specified in this configuration file.
+#
+# Default is 7200 or 2 hours.
+counter_cache_save_period: 7200
+
+# Number of keys from the counter cache to save
+# Disabled by default, meaning all keys are going to be saved
+# counter_cache_keys_to_save: 100
+
+# saved caches
+# If not set, the default directory is $CASSANDRA_HOME/data/saved_caches.
+saved_caches_directory: /var/lib/cassandra/saved_caches
+
+# commitlog_sync may be either "periodic" or "batch."
+#
+# When in batch mode, Cassandra won't ack writes until the commit log
+# has been fsynced to disk. It will wait
+# commitlog_sync_batch_window_in_ms milliseconds between fsyncs.
+# This window should be kept short because the writer threads will
+# be unable to do extra work while waiting. (You may need to increase
+# concurrent_writes for the same reason.)
+#
+# commitlog_sync: batch
+# commitlog_sync_batch_window_in_ms: 2
+#
+# the other option is "periodic" where writes may be acked immediately
+# and the CommitLog is simply synced every commitlog_sync_period_in_ms
+# milliseconds.
+commitlog_sync: periodic
+commitlog_sync_period_in_ms: 10000
+
+# The size of the individual commitlog file segments. A commitlog
+# segment may be archived, deleted, or recycled once all the data
+# in it (potentially from each columnfamily in the system) has been
+# flushed to sstables.
+#
+# The default size is 32, which is almost always fine, but if you are
+# archiving commitlog segments (see commitlog_archiving.properties),
+# then you probably want a finer granularity of archiving; 8 or 16 MB
+# is reasonable.
+# Max mutation size is also configurable via max_mutation_size_in_kb setting in
+# cassandra.yaml. The default is half the size commitlog_segment_size_in_mb * 1024.
+#
+# NOTE: If max_mutation_size_in_kb is set explicitly then commitlog_segment_size_in_mb must
+# be set to at least twice the size of max_mutation_size_in_kb / 1024
+#
+commitlog_segment_size_in_mb: 32
+
+# Compression to apply to the commit log. If omitted, the commit log
+# will be written uncompressed. LZ4, Snappy, and Deflate compressors
+# are supported.
+# commitlog_compression:
+# - class_name: LZ4Compressor
+# parameters:
+# -
+
+# any class that implements the SeedProvider interface and has a
+# constructor that takes a Map<String, String> of parameters will do.
+seed_provider:
+ # Addresses of hosts that are deemed contact points.
+ # Cassandra nodes use this list of hosts to find each other and learn
+ # the topology of the ring. You must change this if you are running
+ # multiple nodes!
+ - class_name: org.apache.cassandra.locator.SimpleSeedProvider
+ parameters:
+ # seeds is actually a comma-delimited list of addresses.
+ # Ex: "<ip1>,<ip2>,<ip3>"
+ - seeds: "{%- for member in database.members %}{{ member.host }},{%- endfor %}"
+
+# For workloads with more data than can fit in memory, Cassandra's
+# bottleneck will be reads that need to fetch data from
+# disk. "concurrent_reads" should be set to (16 * number_of_drives) in
+# order to allow the operations to enqueue low enough in the stack
+# that the OS and drives can reorder them. Same applies to
+# "concurrent_counter_writes", since counter writes read the current
+# values before incrementing and writing them back.
+#
+# On the other hand, since writes are almost never IO bound, the ideal
+# number of "concurrent_writes" is dependent on the number of cores in
+# your system; (8 * number_of_cores) is a good rule of thumb.
+concurrent_reads: 32
+concurrent_writes: 32
+concurrent_counter_writes: 32
+
+# For materialized view writes, as there is a read involved, so this should
+# be limited by the less of concurrent reads or concurrent writes.
+concurrent_materialized_view_writes: 32
+
+# Maximum memory to use for sstable chunk cache and buffer pooling.
+# 32MB of this are reserved for pooling buffers, the rest is used as an
+# cache that holds uncompressed sstable chunks.
+# Defaults to the smaller of 1/4 of heap or 512MB. This pool is allocated off-heap,
+# so is in addition to the memory allocated for heap. The cache also has on-heap
+# overhead which is roughly 128 bytes per chunk (i.e. 0.2% of the reserved size
+# if the default 64k chunk size is used).
+# Memory is only allocated when needed.
+# file_cache_size_in_mb: 512
+
+# Flag indicating whether to allocate on or off heap when the sstable buffer
+# pool is exhausted, that is when it has exceeded the maximum memory
+# file_cache_size_in_mb, beyond which it will not cache buffers but allocate on request.
+
+# buffer_pool_use_heap_if_exhausted: true
+
+# The strategy for optimizing disk read
+# Possible values are:
+# ssd (for solid state disks, the default)
+# spinning (for spinning disks)
+# disk_optimization_strategy: ssd
+
+# Total permitted memory to use for memtables. Cassandra will stop
+# accepting writes when the limit is exceeded until a flush completes,
+# and will trigger a flush based on memtable_cleanup_threshold
+# If omitted, Cassandra will set both to 1/4 the size of the heap.
+# memtable_heap_space_in_mb: 2048
+# memtable_offheap_space_in_mb: 2048
+
+# memtable_cleanup_threshold is deprecated. The default calculation
+# is the only reasonable choice. See the comments on memtable_flush_writers
+# for more information.
+#
+# Ratio of occupied non-flushing memtable size to total permitted size
+# that will trigger a flush of the largest memtable. Larger mct will
+# mean larger flushes and hence less compaction, but also less concurrent
+# flush activity which can make it difficult to keep your disks fed
+# under heavy write load.
+#
+# memtable_cleanup_threshold defaults to 1 / (memtable_flush_writers + 1)
+# memtable_cleanup_threshold: 0.11
+
+# Specify the way Cassandra allocates and manages memtable memory.
+# Options are:
+#
+# heap_buffers
+# on heap nio buffers
+#
+# offheap_buffers
+# off heap (direct) nio buffers
+#
+# offheap_objects
+# off heap objects
+memtable_allocation_type: heap_buffers
+
+# Total space to use for commit logs on disk.
+#
+# If space gets above this value, Cassandra will flush every dirty CF
+# in the oldest segment and remove it. So a small total commitlog space
+# will tend to cause more flush activity on less-active columnfamilies.
+#
+# The default value is the smaller of 8192, and 1/4 of the total space
+# of the commitlog volume.
+#
+# commitlog_total_space_in_mb: 8192
+
+# This sets the number of memtable flush writer threads per disk
+# as well as the total number of memtables that can be flushed concurrently.
+# These are generally a combination of compute and IO bound.
+#
+# Memtable flushing is more CPU efficient than memtable ingest and a single thread
+# can keep up with the ingest rate of a whole server on a single fast disk
+# until it temporarily becomes IO bound under contention typically with compaction.
+# At that point you need multiple flush threads. At some point in the future
+# it may become CPU bound all the time.
+#
+# You can tell if flushing is falling behind using the MemtablePool.BlockedOnAllocation
+# metric which should be 0, but will be non-zero if threads are blocked waiting on flushing
+# to free memory.
+#
+# memtable_flush_writers defaults to two for a single data directory.
+# This means that two memtables can be flushed concurrently to the single data directory.
+# If you have multiple data directories the default is one memtable flushing at a time
+# but the flush will use a thread per data directory so you will get two or more writers.
+#
+# Two is generally enough to flush on a fast disk [array] mounted as a single data directory.
+# Adding more flush writers will result in smaller more frequent flushes that introduce more
+# compaction overhead.
+#
+# There is a direct tradeoff between number of memtables that can be flushed concurrently
+# and flush size and frequency. More is not better you just need enough flush writers
+# to never stall waiting for flushing to free memory.
+#
+#memtable_flush_writers: 2
+
+# Total space to use for change-data-capture logs on disk.
+#
+# If space gets above this value, Cassandra will throw WriteTimeoutException
+# on Mutations including tables with CDC enabled. A CDCCompactor is responsible
+# for parsing the raw CDC logs and deleting them when parsing is completed.
+#
+# The default value is the min of 4096 mb and 1/8th of the total space
+# of the drive where cdc_raw_directory resides.
+# cdc_total_space_in_mb: 4096
+
+# When we hit our cdc_raw limit and the CDCCompactor is either running behind
+# or experiencing backpressure, we check at the following interval to see if any
+# new space for cdc-tracked tables has been made available. Default to 250ms
+# cdc_free_space_check_interval_ms: 250
+
+# A fixed memory pool size in MB for for SSTable index summaries. If left
+# empty, this will default to 5% of the heap size. If the memory usage of
+# all index summaries exceeds this limit, SSTables with low read rates will
+# shrink their index summaries in order to meet this limit. However, this
+# is a best-effort process. In extreme conditions Cassandra may need to use
+# more than this amount of memory.
+index_summary_capacity_in_mb:
+
+# How frequently index summaries should be resampled. This is done
+# periodically to redistribute memory from the fixed-size pool to sstables
+# proportional their recent read rates. Setting to -1 will disable this
+# process, leaving existing index summaries at their current sampling level.
+index_summary_resize_interval_in_minutes: 60
+
+# Whether to, when doing sequential writing, fsync() at intervals in
+# order to force the operating system to flush the dirty
+# buffers. Enable this to avoid sudden dirty buffer flushing from
+# impacting read latencies. Almost always a good idea on SSDs; not
+# necessarily on platters.
+trickle_fsync: false
+trickle_fsync_interval_in_kb: 10240
+
+# TCP port, for commands and data
+# For security reasons, you should not expose this port to the internet. Firewall it if needed.
+storage_port: 7010
+
+# SSL port, for encrypted communication. Unused unless enabled in
+# encryption_options
+# For security reasons, you should not expose this port to the internet. Firewall it if needed.
+ssl_storage_port: 7011
+
+# Address or interface to bind to and tell other Cassandra nodes to connect to.
+# You _must_ change this if you want multiple nodes to be able to communicate!
+#
+# Set listen_address OR listen_interface, not both.
+#
+# Leaving it blank leaves it up to InetAddress.getLocalHost(). This
+# will always do the Right Thing _if_ the node is properly configured
+# (hostname, name resolution, etc), and the Right Thing is to use the
+# address associated with the hostname (it might not be).
+#
+# Setting listen_address to 0.0.0.0 is always wrong.
+#
+{% if database.bind.interface is defined %}
+listen_interface: {{ database.bind.interface }}
+{% else %}
+listen_address: {{ database.bind.host }}
+{% endif %}
+
+# Set listen_address OR listen_interface, not both. Interfaces must correspond
+# to a single address, IP aliasing is not supported.
+# listen_interface: eth0
+
+# If you choose to specify the interface by name and the interface has an ipv4 and an ipv6 address
+# you can specify which should be chosen using listen_interface_prefer_ipv6. If false the first ipv4
+# address will be used. If true the first ipv6 address will be used. Defaults to false preferring
+# ipv4. If there is only one address it will be selected regardless of ipv4/ipv6.
+# listen_interface_prefer_ipv6: false
+
+# Address to broadcast to other Cassandra nodes
+# Leaving this blank will set it to the same value as listen_address
+# broadcast_address: 1.2.3.4
+
+# When using multiple physical network interfaces, set this
+# to true to listen on broadcast_address in addition to
+# the listen_address, allowing nodes to communicate in both
+# interfaces.
+# Ignore this property if the network configuration automatically
+# routes between the public and private networks such as EC2.
+# listen_on_broadcast_address: false
+
+# Internode authentication backend, implementing IInternodeAuthenticator;
+# used to allow/disallow connections from peer nodes.
+# internode_authenticator: org.apache.cassandra.auth.AllowAllInternodeAuthenticator
+
+# Whether to start the native transport server.
+# Please note that the address on which the native transport is bound is the
+# same as the rpc_address. The port however is different and specified below.
+start_native_transport: true
+# port for the CQL native transport to listen for clients on
+# For security reasons, you should not expose this port to the internet. Firewall it if needed.
+native_transport_port: {{ database.bind.get('port_configdb', database.bind.get('port', 9042)) }}
+# Enabling native transport encryption in client_encryption_options allows you to either use
+# encryption for the standard port or to use a dedicated, additional port along with the unencrypted
+# standard native_transport_port.
+# Enabling client encryption and keeping native_transport_port_ssl disabled will use encryption
+# for native_transport_port. Setting native_transport_port_ssl to a different value
+# from native_transport_port will use encryption for native_transport_port_ssl while
+# keeping native_transport_port unencrypted.
+# native_transport_port_ssl: 9142
+# The maximum threads for handling requests when the native transport is used.
+# This is similar to rpc_max_threads though the default differs slightly (and
+# there is no native_transport_min_threads, idle threads will always be stopped
+# after 30 seconds).
+# native_transport_max_threads: 128
+#
+# The maximum size of allowed frame. Frame (requests) larger than this will
+# be rejected as invalid. The default is 256MB. If you're changing this parameter,
+# you may want to adjust max_value_size_in_mb accordingly.
+# native_transport_max_frame_size_in_mb: 256
+
+# The maximum number of concurrent client connections.
+# The default is -1, which means unlimited.
+# native_transport_max_concurrent_connections: -1
+
+# The maximum number of concurrent client connections per source ip.
+# The default is -1, which means unlimited.
+# native_transport_max_concurrent_connections_per_ip: -1
+
+# Whether to start the thrift rpc server.
+start_rpc: true
+
+# The address or interface to bind the Thrift RPC service and native transport
+# server to.
+#
+# Set rpc_address OR rpc_interface, not both.
+#
+# Leaving rpc_address blank has the same effect as on listen_address
+# (i.e. it will be based on the configured hostname of the node).
+#
+# Note that unlike listen_address, you can specify 0.0.0.0, but you must also
+# set broadcast_rpc_address to a value other than 0.0.0.0.
+#
+# For security reasons, you should not expose this port to the internet. Firewall it if needed.
+{% if database.bind.interface is defined %}
+rpc_interface: {{ database.bind.interface }}
+{% else %}
+rpc_address: {{ database.bind.host }}
+{% endif %}
+
+# Set rpc_address OR rpc_interface, not both. Interfaces must correspond
+# to a single address, IP aliasing is not supported.
+# rpc_interface: eth1
+
+# If you choose to specify the interface by name and the interface has an ipv4 and an ipv6 address
+# you can specify which should be chosen using rpc_interface_prefer_ipv6. If false the first ipv4
+# address will be used. If true the first ipv6 address will be used. Defaults to false preferring
+# ipv4. If there is only one address it will be selected regardless of ipv4/ipv6.
+# rpc_interface_prefer_ipv6: false
+
+# port for Thrift to listen for clients on
+rpc_port: {{ database.bind.get('rpc_port_configdb', database.bind.get('rpc_port', 9160)) }}
+
+# RPC address to broadcast to drivers and other Cassandra nodes. This cannot
+# be set to 0.0.0.0. If left blank, this will be set to the value of
+# rpc_address. If rpc_address is set to 0.0.0.0, broadcast_rpc_address must
+# be set.
+# broadcast_rpc_address: 1.2.3.4
+
+# enable or disable keepalive on rpc/native connections
+rpc_keepalive: true
+
+# Cassandra provides two out-of-the-box options for the RPC Server:
+#
+# sync
+# One thread per thrift connection. For a very large number of clients, memory
+# will be your limiting factor. On a 64 bit JVM, 180KB is the minimum stack size
+# per thread, and that will correspond to your use of virtual memory (but physical memory
+# may be limited depending on use of stack space).
+#
+# hsha
+# Stands for "half synchronous, half asynchronous." All thrift clients are handled
+# asynchronously using a small number of threads that does not vary with the amount
+# of thrift clients (and thus scales well to many clients). The rpc requests are still
+# synchronous (one thread per active request). If hsha is selected then it is essential
+# that rpc_max_threads is changed from the default value of unlimited.
+#
+# The default is sync because on Windows hsha is about 30% slower. On Linux,
+# sync/hsha performance is about the same, with hsha of course using less memory.
+#
+# Alternatively, can provide your own RPC server by providing the fully-qualified class name
+# of an o.a.c.t.TServerFactory that can create an instance of it.
+rpc_server_type: sync
+
+# Uncomment rpc_min|max_thread to set request pool size limits.
+#
+# Regardless of your choice of RPC server (see above), the number of maximum requests in the
+# RPC thread pool dictates how many concurrent requests are possible (but if you are using the sync
+# RPC server, it also dictates the number of clients that can be connected at all).
+#
+# The default is unlimited and thus provides no protection against clients overwhelming the server. You are
+# encouraged to set a maximum that makes sense for you in production, but do keep in mind that
+# rpc_max_threads represents the maximum number of client requests this server may execute concurrently.
+#
+# rpc_min_threads: 16
+# rpc_max_threads: 2048
+
+# uncomment to set socket buffer sizes on rpc connections
+# rpc_send_buff_size_in_bytes:
+# rpc_recv_buff_size_in_bytes:
+
+# Uncomment to set socket buffer size for internode communication
+# Note that when setting this, the buffer size is limited by net.core.wmem_max
+# and when not setting it it is defined by net.ipv4.tcp_wmem
+# See also:
+# /proc/sys/net/core/wmem_max
+# /proc/sys/net/core/rmem_max
+# /proc/sys/net/ipv4/tcp_wmem
+# /proc/sys/net/ipv4/tcp_wmem
+# and 'man tcp'
+# internode_send_buff_size_in_bytes:
+
+# Uncomment to set socket buffer size for internode communication
+# Note that when setting this, the buffer size is limited by net.core.wmem_max
+# and when not setting it it is defined by net.ipv4.tcp_wmem
+# internode_recv_buff_size_in_bytes:
+
+# Frame size for thrift (maximum message length).
+thrift_framed_transport_size_in_mb: 15
+
+# Set to true to have Cassandra create a hard link to each sstable
+# flushed or streamed locally in a backups/ subdirectory of the
+# keyspace data. Removing these links is the operator's
+# responsibility.
+incremental_backups: false
+
+# Whether or not to take a snapshot before each compaction. Be
+# careful using this option, since Cassandra won't clean up the
+# snapshots for you. Mostly useful if you're paranoid when there
+# is a data format change.
+snapshot_before_compaction: false
+
+# Whether or not a snapshot is taken of the data before keyspace truncation
+# or dropping of column families. The STRONGLY advised default of true
+# should be used to provide data safety. If you set this flag to false, you will
+# lose data on truncation or drop.
+auto_snapshot: true
+
+# Granularity of the collation index of rows within a partition.
+# Increase if your rows are large, or if you have a very large
+# number of rows per partition. The competing goals are these:
+#
+# - a smaller granularity means more index entries are generated
+# and looking up rows withing the partition by collation column
+# is faster
+# - but, Cassandra will keep the collation index in memory for hot
+# rows (as part of the key cache), so a larger granularity means
+# you can cache more hot rows
+column_index_size_in_kb: 64
+
+# Per sstable indexed key cache entries (the collation index in memory
+# mentioned above) exceeding this size will not be held on heap.
+# This means that only partition information is held on heap and the
+# index entries are read from disk.
+#
+# Note that this size refers to the size of the
+# serialized index information and not the size of the partition.
+column_index_cache_size_in_kb: 2
+
+# Number of simultaneous compactions to allow, NOT including
+# validation "compactions" for anti-entropy repair. Simultaneous
+# compactions can help preserve read performance in a mixed read/write
+# workload, by mitigating the tendency of small sstables to accumulate
+# during a single long running compactions. The default is usually
+# fine and if you experience problems with compaction running too
+# slowly or too fast, you should look at
+# compaction_throughput_mb_per_sec first.
+#
+# concurrent_compactors defaults to the smaller of (number of disks,
+# number of cores), with a minimum of 2 and a maximum of 8.
+#
+# If your data directories are backed by SSD, you should increase this
+# to the number of cores.
+#concurrent_compactors: 1
+{% if database.concurrent_compactors is defined %}
+concurrent_compactors: {{ database.concurrent_compactors }}
+{% endif %}
+
+# Throttles compaction to the given total throughput across the entire
+# system. The faster you insert data, the faster you need to compact in
+# order to keep the sstable count down, but in general, setting this to
+# 16 to 32 times the rate you are inserting data is more than sufficient.
+# Setting this to 0 disables throttling. Note that this account for all types
+# of compaction, including validation compaction.
+compaction_throughput_mb_per_sec: {{ database.compaction_throughput_mb_per_sec }}
+
+# When compacting, the replacement sstable(s) can be opened before they
+# are completely written, and used in place of the prior sstables for
+# any range that has been written. This helps to smoothly transfer reads
+# between the sstables, reducing page cache churn and keeping hot rows hot
+sstable_preemptive_open_interval_in_mb: 50
+
+# Throttles all outbound streaming file transfers on this node to the
+# given total throughput in Mbps. This is necessary because Cassandra does
+# mostly sequential IO when streaming data during bootstrap or repair, which
+# can lead to saturating the network connection and degrading rpc performance.
+# When unset, the default is 200 Mbps or 25 MB/s.
+# stream_throughput_outbound_megabits_per_sec: 200
+
+# Throttles all streaming file transfer between the datacenters,
+# this setting allows users to throttle inter dc stream throughput in addition
+# to throttling all network stream traffic as configured with
+# stream_throughput_outbound_megabits_per_sec
+# When unset, the default is 200 Mbps or 25 MB/s
+# inter_dc_stream_throughput_outbound_megabits_per_sec: 200
+
+# How long the coordinator should wait for read operations to complete
+read_request_timeout_in_ms: 10000
+# How long the coordinator should wait for seq or index scans to complete
+range_request_timeout_in_ms: 10000
+# How long the coordinator should wait for writes to complete
+write_request_timeout_in_ms: 10000
+# How long the coordinator should wait for counter writes to complete
+counter_write_request_timeout_in_ms: 5000
+# How long a coordinator should continue to retry a CAS operation
+# that contends with other proposals for the same row
+cas_contention_timeout_in_ms: 1000
+# How long the coordinator should wait for truncates to complete
+# (This can be much longer, because unless auto_snapshot is disabled
+# we need to flush first so we can snapshot before removing the data.)
+truncate_request_timeout_in_ms: 60000
+# The default timeout for other, miscellaneous operations
+request_timeout_in_ms: 10000
+
+# How long before a node logs slow queries. Select queries that take longer than
+# this timeout to execute, will generate an aggregated log message, so that slow queries
+# can be identified. Set this value to zero to disable slow query logging.
+slow_query_log_timeout_in_ms: 500
+
+# Enable operation timeout information exchange between nodes to accurately
+# measure request timeouts. If disabled, replicas will assume that requests
+# were forwarded to them instantly by the coordinator, which means that
+# under overload conditions we will waste that much extra time processing
+# already-timed-out requests.
+#
+# Warning: before enabling this property make sure to ntp is installed
+# and the times are synchronized between the nodes.
+cross_node_timeout: false
+
+# Set keep-alive period for streaming
+# This node will send a keep-alive message periodically with this period.
+# If the node does not receive a keep-alive message from the peer for
+# 2 keep-alive cycles the stream session times out and fail
+# Default value is 300s (5 minutes), which means stalled stream
+# times out in 10 minutes by default
+# streaming_keep_alive_period_in_secs: 300
+
+# phi value that must be reached for a host to be marked down.
+# most users should never need to adjust this.
+# phi_convict_threshold: 8
+
+# endpoint_snitch -- Set this to a class that implements
+# IEndpointSnitch. The snitch has two functions:
+#
+# - it teaches Cassandra enough about your network topology to route
+# requests efficiently
+# - it allows Cassandra to spread replicas around your cluster to avoid
+# correlated failures. It does this by grouping machines into
+# "datacenters" and "racks." Cassandra will do its best not to have
+# more than one replica on the same "rack" (which may not actually
+# be a physical location)
+#
+# CASSANDRA WILL NOT ALLOW YOU TO SWITCH TO AN INCOMPATIBLE SNITCH
+# ONCE DATA IS INSERTED INTO THE CLUSTER. This would cause data loss.
+# This means that if you start with the default SimpleSnitch, which
+# locates every node on "rack1" in "datacenter1", your only options
+# if you need to add another datacenter are GossipingPropertyFileSnitch
+# (and the older PFS). From there, if you want to migrate to an
+# incompatible snitch like Ec2Snitch you can do it by adding new nodes
+# under Ec2Snitch (which will locate them in a new "datacenter") and
+# decommissioning the old ones.
+#
+# Out of the box, Cassandra provides:
+#
+# SimpleSnitch:
+# Treats Strategy order as proximity. This can improve cache
+# locality when disabling read repair. Only appropriate for
+# single-datacenter deployments.
+#
+# GossipingPropertyFileSnitch
+# This should be your go-to snitch for production use. The rack
+# and datacenter for the local node are defined in
+# cassandra-rackdc.properties and propagated to other nodes via
+# gossip. If cassandra-topology.properties exists, it is used as a
+# fallback, allowing migration from the PropertyFileSnitch.
+#
+# PropertyFileSnitch:
+# Proximity is determined by rack and data center, which are
+# explicitly configured in cassandra-topology.properties.
+#
+# Ec2Snitch:
+# Appropriate for EC2 deployments in a single Region. Loads Region
+# and Availability Zone information from the EC2 API. The Region is
+# treated as the datacenter, and the Availability Zone as the rack.
+# Only private IPs are used, so this will not work across multiple
+# Regions.
+#
+# Ec2MultiRegionSnitch:
+# Uses public IPs as broadcast_address to allow cross-region
+# connectivity. (Thus, you should set seed addresses to the public
+# IP as well.) You will need to open the storage_port or
+# ssl_storage_port on the public IP firewall. (For intra-Region
+# traffic, Cassandra will switch to the private IP after
+# establishing a connection.)
+#
+# RackInferringSnitch:
+# Proximity is determined by rack and data center, which are
+# assumed to correspond to the 3rd and 2nd octet of each node's IP
+# address, respectively. Unless this happens to match your
+# deployment conventions, this is best used as an example of
+# writing a custom Snitch class and is provided in that spirit.
+#
+# You can use a custom Snitch by setting this to the full class name
+# of the snitch, which will be assumed to be on your classpath.
+endpoint_snitch: SimpleSnitch
+
+# controls how often to perform the more expensive part of host score
+# calculation
+dynamic_snitch_update_interval_in_ms: 100
+# controls how often to reset all host scores, allowing a bad host to
+# possibly recover
+dynamic_snitch_reset_interval_in_ms: 600000
+# if set greater than zero and read_repair_chance is < 1.0, this will allow
+# 'pinning' of replicas to hosts in order to increase cache capacity.
+# The badness threshold will control how much worse the pinned host has to be
+# before the dynamic snitch will prefer other replicas over it. This is
+# expressed as a double which represents a percentage. Thus, a value of
+# 0.2 means Cassandra would continue to prefer the static snitch values
+# until the pinned host was 20% worse than the fastest.
+dynamic_snitch_badness_threshold: 0.1
+
+# request_scheduler -- Set this to a class that implements
+# RequestScheduler, which will schedule incoming client requests
+# according to the specific policy. This is useful for multi-tenancy
+# with a single Cassandra cluster.
+# NOTE: This is specifically for requests from the client and does
+# not affect inter node communication.
+# org.apache.cassandra.scheduler.NoScheduler - No scheduling takes place
+# org.apache.cassandra.scheduler.RoundRobinScheduler - Round robin of
+# client requests to a node with a separate queue for each
+# request_scheduler_id. The scheduler is further customized by
+# request_scheduler_options as described below.
+request_scheduler: org.apache.cassandra.scheduler.NoScheduler
+
+# Scheduler Options vary based on the type of scheduler
+#
+# NoScheduler
+# Has no options
+#
+# RoundRobin
+# throttle_limit
+# The throttle_limit is the number of in-flight
+# requests per client. Requests beyond
+# that limit are queued up until
+# running requests can complete.
+# The value of 80 here is twice the number of
+# concurrent_reads + concurrent_writes.
+# default_weight
+# default_weight is optional and allows for
+# overriding the default which is 1.
+# weights
+# Weights are optional and will default to 1 or the
+# overridden default_weight. The weight translates into how
+# many requests are handled during each turn of the
+# RoundRobin, based on the scheduler id.
+#
+# request_scheduler_options:
+# throttle_limit: 80
+# default_weight: 5
+# weights:
+# Keyspace1: 1
+# Keyspace2: 5
+
+# request_scheduler_id -- An identifier based on which to perform
+# the request scheduling. Currently the only valid option is keyspace.
+# request_scheduler_id: keyspace
+
+# Enable or disable inter-node encryption
+# JVM defaults for supported SSL socket protocols and cipher suites can
+# be replaced using custom encryption options. This is not recommended
+# unless you have policies in place that dictate certain settings, or
+# need to disable vulnerable ciphers or protocols in case the JVM cannot
+# be updated.
+# FIPS compliant settings can be configured at JVM level and should not
+# involve changing encryption settings here:
+# https://docs.oracle.com/javase/8/docs/technotes/guides/security/jsse/FIPS.html
+# *NOTE* No custom encryption options are enabled at the moment
+# The available internode options are : all, none, dc, rack
+#
+# If set to dc cassandra will encrypt the traffic between the DCs
+# If set to rack cassandra will encrypt the traffic between the racks
+#
+# The passwords used in these options must match the passwords used when generating
+# the keystore and truststore. For instructions on generating these files, see:
+# http://download.oracle.com/javase/6/docs/technotes/guides/security/jsse/JSSERefGuide.html#CreateKeystore
+#
+server_encryption_options:
+ internode_encryption: none
+ keystore: conf/.keystore
+ keystore_password: cassandra
+ truststore: conf/.truststore
+ truststore_password: cassandra
+ # More advanced defaults below:
+ # protocol: TLS
+ # algorithm: SunX509
+ # store_type: JKS
+ # cipher_suites: [TLS_RSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_AES_256_CBC_SHA,TLS_DHE_RSA_WITH_AES_128_CBC_SHA,TLS_DHE_RSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA]
+ # require_client_auth: false
+ # require_endpoint_verification: false
+
+# enable or disable client/server encryption.
+client_encryption_options:
+ enabled: false
+ # If enabled and optional is set to true encrypted and unencrypted connections are handled.
+ optional: false
+ keystore: conf/.keystore
+ keystore_password: cassandra
+ # require_client_auth: false
+ # Set trustore and truststore_password if require_client_auth is true
+ # truststore: conf/.truststore
+ # truststore_password: cassandra
+ # More advanced defaults below:
+ # protocol: TLS
+ # algorithm: SunX509
+ # store_type: JKS
+ # cipher_suites: [TLS_RSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_AES_256_CBC_SHA,TLS_DHE_RSA_WITH_AES_128_CBC_SHA,TLS_DHE_RSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA]
+
+# internode_compression controls whether traffic between nodes is
+# compressed.
+# Can be:
+#
+# all
+# all traffic is compressed
+#
+# dc
+# traffic between different datacenters is compressed
+#
+# none
+# nothing is compressed.
+internode_compression: all
+
+# Enable or disable tcp_nodelay for inter-dc communication.
+# Disabling it will result in larger (but fewer) network packets being sent,
+# reducing overhead from the TCP protocol itself, at the cost of increasing
+# latency if you block for cross-datacenter responses.
+inter_dc_tcp_nodelay: true
+
+# TTL for different trace types used during logging of the repair process.
+tracetype_query_ttl: 86400
+tracetype_repair_ttl: 604800
+
+# By default, Cassandra logs GC Pauses greater than 200 ms at INFO level
+# This threshold can be adjusted to minimize logging if necessary
+# gc_log_threshold_in_ms: 200
+
+# If unset, all GC Pauses greater than gc_log_threshold_in_ms will log at
+# INFO level
+# UDFs (user defined functions) are disabled by default.
+# As of Cassandra 3.0 there is a sandbox in place that should prevent execution of evil code.
+enable_user_defined_functions: false
+
+# Enables scripted UDFs (JavaScript UDFs).
+# Java UDFs are always enabled, if enable_user_defined_functions is true.
+# Enable this option to be able to use UDFs with "language javascript" or any custom JSR-223 provider.
+# This option has no effect, if enable_user_defined_functions is false.
+enable_scripted_user_defined_functions: false
+
+# The default Windows kernel timer and scheduling resolution is 15.6ms for power conservation.
+# Lowering this value on Windows can provide much tighter latency and better throughput, however
+# some virtualized environments may see a negative performance impact from changing this setting
+# below their system default. The sysinternals 'clockres' tool can confirm your system's default
+# setting.
+windows_timer_interval: 1
+
+
+# Enables encrypting data at-rest (on disk). Different key providers can be plugged in, but the default reads from
+# a JCE-style keystore. A single keystore can hold multiple keys, but the one referenced by
+# the "key_alias" is the only key that will be used for encrypt opertaions; previously used keys
+# can still (and should!) be in the keystore and will be used on decrypt operations
+# (to handle the case of key rotation).
+#
+# It is strongly recommended to download and install Java Cryptography Extension (JCE)
+# Unlimited Strength Jurisdiction Policy Files for your version of the JDK.
+# (current link: http://www.oracle.com/technetwork/java/javase/downloads/jce8-download-2133166.html)
+#
+# Currently, only the following file types are supported for transparent data encryption, although
+# more are coming in future cassandra releases: commitlog, hints
+transparent_data_encryption_options:
+ enabled: false
+ chunk_length_kb: 64
+ cipher: AES/CBC/PKCS5Padding
+ key_alias: testing:1
+ # CBC IV length for AES needs to be 16 bytes (which is also the default size)
+ # iv_length: 16
+ key_provider:
+ - class_name: org.apache.cassandra.security.JKSKeyProvider
+ parameters:
+ - keystore: conf/.keystore
+ keystore_password: cassandra
+ store_type: JCEKS
+ key_password: cassandra
+
+
+#####################
+# SAFETY THRESHOLDS #
+#####################
+
+# When executing a scan, within or across a partition, we need to keep the
+# tombstones seen in memory so we can return them to the coordinator, which
+# will use them to make sure other replicas also know about the deleted rows.
+# With workloads that generate a lot of tombstones, this can cause performance
+# problems and even exaust the server heap.
+# (http://www.datastax.com/dev/blog/cassandra-anti-patterns-queues-and-queue-like-datasets)
+# Adjust the thresholds here if you understand the dangers and want to
+# scan more tombstones anyway. These thresholds may also be adjusted at runtime
+# using the StorageService mbean.
+tombstone_warn_threshold: 1000
+tombstone_failure_threshold: 100000
+
+# Log WARN on any multiple-partition batch size exceeding this value. 5kb per batch by default.
+# Caution should be taken on increasing the size of this threshold as it can lead to node instability.
+batch_size_warn_threshold_in_kb: 5
+
+# Fail any multiple-partition batch exceeding this value. 50kb (10x warn threshold) by default.
+batch_size_fail_threshold_in_kb: 50
+
+# Log WARN on any batches not of type LOGGED than span across more partitions than this limit
+unlogged_batch_across_partitions_warn_threshold: 10
+
+# Log a warning when compacting partitions larger than this value
+compaction_large_partition_warning_threshold_mb: 100
+
+# GC Pauses greater than gc_warn_threshold_in_ms will be logged at WARN level
+# Adjust the threshold based on your application throughput requirement
+# By default, Cassandra logs GC Pauses greater than 200 ms at INFO level
+gc_warn_threshold_in_ms: 1000
+
+# Maximum size of any value in SSTables. Safety measure to detect SSTable corruption
+# early. Any value size larger than this threshold will result into marking an SSTable
+# as corrupted.
+# max_value_size_in_mb: 256
+
+# Back-pressure settings #
+# If enabled, the coordinator will apply the back-pressure strategy specified below to each mutation
+# sent to replicas, with the aim of reducing pressure on overloaded replicas.
+back_pressure_enabled: false
+# The back-pressure strategy applied.
+# The default implementation, RateBasedBackPressure, takes three arguments:
+# high ratio, factor, and flow type, and uses the ratio between incoming mutation responses and outgoing mutation requests.
+# If below high ratio, outgoing mutations are rate limited according to the incoming rate decreased by the given factor;
+# if above high ratio, the rate limiting is increased by the given factor;
+# such factor is usually best configured between 1 and 10, use larger values for a faster recovery
+# at the expense of potentially more dropped mutations;
+# the rate limiting is applied according to the flow type: if FAST, it's rate limited at the speed of the fastest replica,
+# if SLOW at the speed of the slowest one.
+# New strategies can be added. Implementors need to implement org.apache.cassandra.net.BackpressureStrategy and
+# provide a public constructor accepting a Map<String, Object>.
+back_pressure_strategy:
+ - class_name: org.apache.cassandra.net.RateBasedBackPressure
+ parameters:
+ - high_ratio: 0.90
+ factor: 5
+ flow: FAST
diff --git a/opencontrail/files/cassandra/3.10/cassandra_analytics.yaml b/opencontrail/files/cassandra/3.10/cassandra_analytics.yaml
new file mode 100644
index 0000000..db38900
--- /dev/null
+++ b/opencontrail/files/cassandra/3.10/cassandra_analytics.yaml
@@ -0,0 +1,1208 @@
+{%- from "opencontrail/map.jinja" import database with context %}
+# Cassandra storage config YAML
+
+# NOTE:
+# See http://wiki.apache.org/cassandra/StorageConfiguration for
+# full explanations of configuration directives
+# /NOTE
+
+# The name of the cluster. This is mainly used to prevent machines in
+# one logical cluster from joining another.
+cluster_name: '{{ database.name }}'
+
+# This defines the number of tokens randomly assigned to this node on the ring
+# The more tokens, relative to other nodes, the larger the proportion of data
+# that this node will store. You probably want all nodes to have the same number
+# of tokens assuming they have equal hardware capability.
+#
+# If you leave this unspecified, Cassandra will use the default of 1 token for legacy compatibility,
+# and will use the initial_token as described below.
+#
+# Specifying initial_token will override this setting on the node's initial start,
+# on subsequent starts, this setting will apply even if initial token is set.
+#
+# If you already have a cluster with 1 token per node, and wish to migrate to
+# multiple tokens per node, see http://wiki.apache.org/cassandra/Operations
+num_tokens: 256
+
+# Triggers automatic allocation of num_tokens tokens for this node. The allocation
+# algorithm attempts to choose tokens in a way that optimizes replicated load over
+# the nodes in the datacenter for the replication strategy used by the specified
+# keyspace.
+#
+# The load assigned to each node will be close to proportional to its number of
+# vnodes.
+#
+# Only supported with the Murmur3Partitioner.
+# allocate_tokens_for_keyspace: KEYSPACE
+
+# initial_token allows you to specify tokens manually. While you can use it with
+# vnodes (num_tokens > 1, above) -- in which case you should provide a
+# comma-separated list -- it's primarily used when adding nodes to legacy clusters
+# that do not have vnodes enabled.
+# initial_token:
+
+# See http://wiki.apache.org/cassandra/HintedHandoff
+# May either be "true" or "false" to enable globally
+hinted_handoff_enabled: true
+
+# When hinted_handoff_enabled is true, a black list of data centers that will not
+# perform hinted handoff
+# hinted_handoff_disabled_datacenters:
+# - DC1
+# - DC2
+
+# this defines the maximum amount of time a dead host will have hints
+# generated. After it has been dead this long, new hints for it will not be
+# created until it has been seen alive and gone down again.
+max_hint_window_in_ms: 10800000 # 3 hours
+
+# Maximum throttle in KBs per second, per delivery thread. This will be
+# reduced proportionally to the number of nodes in the cluster. (If there
+# are two nodes in the cluster, each delivery thread will use the maximum
+# rate; if there are three, each will throttle to half of the maximum,
+# since we expect two nodes to be delivering hints simultaneously.)
+hinted_handoff_throttle_in_kb: 1024
+
+# Number of threads with which to deliver hints;
+# Consider increasing this number when you have multi-dc deployments, since
+# cross-dc handoff tends to be slower
+max_hints_delivery_threads: 2
+
+# Directory where Cassandra should store hints.
+# If not set, the default directory is $CASSANDRA_HOME/data/hints.
+# hints_directory: /var/lib/cassandra/hints
+
+# How often hints should be flushed from the internal buffers to disk.
+# Will *not* trigger fsync.
+hints_flush_period_in_ms: 10000
+
+# Maximum size for a single hints file, in megabytes.
+max_hints_file_size_in_mb: 128
+
+# Compression to apply to the hint files. If omitted, hints files
+# will be written uncompressed. LZ4, Snappy, and Deflate compressors
+# are supported.
+#hints_compression:
+# - class_name: LZ4Compressor
+# parameters:
+# -
+
+# Maximum throttle in KBs per second, total. This will be
+# reduced proportionally to the number of nodes in the cluster.
+batchlog_replay_throttle_in_kb: 1024
+
+# Authentication backend, implementing IAuthenticator; used to identify users
+# Out of the box, Cassandra provides org.apache.cassandra.auth.{AllowAllAuthenticator,
+# PasswordAuthenticator}.
+#
+# - AllowAllAuthenticator performs no checks - set it to disable authentication.
+# - PasswordAuthenticator relies on username/password pairs to authenticate
+# users. It keeps usernames and hashed passwords in system_auth.roles table.
+# Please increase system_auth keyspace replication factor if you use this authenticator.
+# If using PasswordAuthenticator, CassandraRoleManager must also be used (see below)
+authenticator: AllowAllAuthenticator
+
+# Authorization backend, implementing IAuthorizer; used to limit access/provide permissions
+# Out of the box, Cassandra provides org.apache.cassandra.auth.{AllowAllAuthorizer,
+# CassandraAuthorizer}.
+#
+# - AllowAllAuthorizer allows any action to any user - set it to disable authorization.
+# - CassandraAuthorizer stores permissions in system_auth.role_permissions table. Please
+# increase system_auth keyspace replication factor if you use this authorizer.
+authorizer: AllowAllAuthorizer
+
+# Part of the Authentication & Authorization backend, implementing IRoleManager; used
+# to maintain grants and memberships between roles.
+# Out of the box, Cassandra provides org.apache.cassandra.auth.CassandraRoleManager,
+# which stores role information in the system_auth keyspace. Most functions of the
+# IRoleManager require an authenticated login, so unless the configured IAuthenticator
+# actually implements authentication, most of this functionality will be unavailable.
+#
+# - CassandraRoleManager stores role data in the system_auth keyspace. Please
+# increase system_auth keyspace replication factor if you use this role manager.
+role_manager: CassandraRoleManager
+
+# Validity period for roles cache (fetching granted roles can be an expensive
+# operation depending on the role manager, CassandraRoleManager is one example)
+# Granted roles are cached for authenticated sessions in AuthenticatedUser and
+# after the period specified here, become eligible for (async) reload.
+# Defaults to 2000, set to 0 to disable caching entirely.
+# Will be disabled automatically for AllowAllAuthenticator.
+roles_validity_in_ms: 2000
+
+# Refresh interval for roles cache (if enabled).
+# After this interval, cache entries become eligible for refresh. Upon next
+# access, an async reload is scheduled and the old value returned until it
+# completes. If roles_validity_in_ms is non-zero, then this must be
+# also.
+# Defaults to the same value as roles_validity_in_ms.
+# roles_update_interval_in_ms: 2000
+
+# Validity period for permissions cache (fetching permissions can be an
+# expensive operation depending on the authorizer, CassandraAuthorizer is
+# one example). Defaults to 2000, set to 0 to disable.
+# Will be disabled automatically for AllowAllAuthorizer.
+permissions_validity_in_ms: 2000
+
+# Refresh interval for permissions cache (if enabled).
+# After this interval, cache entries become eligible for refresh. Upon next
+# access, an async reload is scheduled and the old value returned until it
+# completes. If permissions_validity_in_ms is non-zero, then this must be
+# also.
+# Defaults to the same value as permissions_validity_in_ms.
+# permissions_update_interval_in_ms: 2000
+
+# Validity period for credentials cache. This cache is tightly coupled to
+# the provided PasswordAuthenticator implementation of IAuthenticator. If
+# another IAuthenticator implementation is configured, this cache will not
+# be automatically used and so the following settings will have no effect.
+# Please note, credentials are cached in their encrypted form, so while
+# activating this cache may reduce the number of queries made to the
+# underlying table, it may not bring a significant reduction in the
+# latency of individual authentication attempts.
+# Defaults to 2000, set to 0 to disable credentials caching.
+credentials_validity_in_ms: 2000
+
+# Refresh interval for credentials cache (if enabled).
+# After this interval, cache entries become eligible for refresh. Upon next
+# access, an async reload is scheduled and the old value returned until it
+# completes. If credentials_validity_in_ms is non-zero, then this must be
+# also.
+# Defaults to the same value as credentials_validity_in_ms.
+# credentials_update_interval_in_ms: 2000
+
+# The partitioner is responsible for distributing groups of rows (by
+# partition key) across nodes in the cluster. You should leave this
+# alone for new clusters. The partitioner can NOT be changed without
+# reloading all data, so when upgrading you should set this to the
+# same partitioner you were already using.
+#
+# Besides Murmur3Partitioner, partitioners included for backwards
+# compatibility include RandomPartitioner, ByteOrderedPartitioner, and
+# OrderPreservingPartitioner.
+#
+partitioner: org.apache.cassandra.dht.Murmur3Partitioner
+
+# Directories where Cassandra should store data on disk. Cassandra
+# will spread data evenly across them, subject to the granularity of
+# the configured compaction strategy.
+# If not set, the default directory is $CASSANDRA_HOME/data/data.
+data_file_directories:
+ - /var/lib/cassandra/data
+
+# commit log. when running on magnetic HDD, this should be a
+# separate spindle than the data directories.
+# If not set, the default directory is $CASSANDRA_HOME/data/commitlog.
+commitlog_directory: /var/lib/cassandra/commitlog
+
+# Enable / disable CDC functionality on a per-node basis. This modifies the logic used
+# for write path allocation rejection (standard: never reject. cdc: reject Mutation
+# containing a CDC-enabled table if at space limit in cdc_raw_directory).
+cdc_enabled: false
+
+# CommitLogSegments are moved to this directory on flush if cdc_enabled: true and the
+# segment contains mutations for a CDC-enabled table. This should be placed on a
+# separate spindle than the data directories. If not set, the default directory is
+# $CASSANDRA_HOME/data/cdc_raw.
+# cdc_raw_directory: /var/lib/cassandra/cdc_raw
+
+# Policy for data disk failures:
+#
+# die
+# shut down gossip and client transports and kill the JVM for any fs errors or
+# single-sstable errors, so the node can be replaced.
+#
+# stop_paranoid
+# shut down gossip and client transports even for single-sstable errors,
+# kill the JVM for errors during startup.
+#
+# stop
+# shut down gossip and client transports, leaving the node effectively dead, but
+# can still be inspected via JMX, kill the JVM for errors during startup.
+#
+# best_effort
+# stop using the failed disk and respond to requests based on
+# remaining available sstables. This means you WILL see obsolete
+# data at CL.ONE!
+#
+# ignore
+# ignore fatal errors and let requests fail, as in pre-1.2 Cassandra
+disk_failure_policy: stop
+
+# Policy for commit disk failures:
+#
+# die
+# shut down gossip and Thrift and kill the JVM, so the node can be replaced.
+#
+# stop
+# shut down gossip and Thrift, leaving the node effectively dead, but
+# can still be inspected via JMX.
+#
+# stop_commit
+# shutdown the commit log, letting writes collect but
+# continuing to service reads, as in pre-2.0.5 Cassandra
+#
+# ignore
+# ignore fatal errors and let the batches fail
+commit_failure_policy: stop
+
+# Maximum size of the native protocol prepared statement cache
+#
+# Valid values are either "auto" (omitting the value) or a value greater 0.
+#
+# Note that specifying a too large value will result in long running GCs and possbily
+# out-of-memory errors. Keep the value at a small fraction of the heap.
+#
+# If you constantly see "prepared statements discarded in the last minute because
+# cache limit reached" messages, the first step is to investigate the root cause
+# of these messages and check whether prepared statements are used correctly -
+# i.e. use bind markers for variable parts.
+#
+# Do only change the default value, if you really have more prepared statements than
+# fit in the cache. In most cases it is not neccessary to change this value.
+# Constantly re-preparing statements is a performance penalty.
+#
+# Default value ("auto") is 1/256th of the heap or 10MB, whichever is greater
+prepared_statements_cache_size_mb:
+
+# Maximum size of the Thrift prepared statement cache
+#
+# If you do not use Thrift at all, it is safe to leave this value at "auto".
+#
+# See description of 'prepared_statements_cache_size_mb' above for more information.
+#
+# Default value ("auto") is 1/256th of the heap or 10MB, whichever is greater
+thrift_prepared_statements_cache_size_mb:
+
+# Maximum size of the key cache in memory.
+#
+# Each key cache hit saves 1 seek and each row cache hit saves 2 seeks at the
+# minimum, sometimes more. The key cache is fairly tiny for the amount of
+# time it saves, so it's worthwhile to use it at large numbers.
+# The row cache saves even more time, but must contain the entire row,
+# so it is extremely space-intensive. It's best to only use the
+# row cache if you have hot rows or static rows.
+#
+# NOTE: if you reduce the size, you may not get you hottest keys loaded on startup.
+#
+# Default value is empty to make it "auto" (min(5% of Heap (in MB), 100MB)). Set to 0 to disable key cache.
+key_cache_size_in_mb:
+
+# Duration in seconds after which Cassandra should
+# save the key cache. Caches are saved to saved_caches_directory as
+# specified in this configuration file.
+#
+# Saved caches greatly improve cold-start speeds, and is relatively cheap in
+# terms of I/O for the key cache. Row cache saving is much more expensive and
+# has limited use.
+#
+# Default is 14400 or 4 hours.
+key_cache_save_period: 14400
+
+# Number of keys from the key cache to save
+# Disabled by default, meaning all keys are going to be saved
+# key_cache_keys_to_save: 100
+
+# Row cache implementation class name. Available implementations:
+#
+# org.apache.cassandra.cache.OHCProvider
+# Fully off-heap row cache implementation (default).
+#
+# org.apache.cassandra.cache.SerializingCacheProvider
+# This is the row cache implementation availabile
+# in previous releases of Cassandra.
+# row_cache_class_name: org.apache.cassandra.cache.OHCProvider
+
+# Maximum size of the row cache in memory.
+# Please note that OHC cache implementation requires some additional off-heap memory to manage
+# the map structures and some in-flight memory during operations before/after cache entries can be
+# accounted against the cache capacity. This overhead is usually small compared to the whole capacity.
+# Do not specify more memory that the system can afford in the worst usual situation and leave some
+# headroom for OS block level cache. Do never allow your system to swap.
+#
+# Default value is 0, to disable row caching.
+row_cache_size_in_mb: 0
+
+# Duration in seconds after which Cassandra should save the row cache.
+# Caches are saved to saved_caches_directory as specified in this configuration file.
+#
+# Saved caches greatly improve cold-start speeds, and is relatively cheap in
+# terms of I/O for the key cache. Row cache saving is much more expensive and
+# has limited use.
+#
+# Default is 0 to disable saving the row cache.
+row_cache_save_period: 0
+
+# Number of keys from the row cache to save.
+# Specify 0 (which is the default), meaning all keys are going to be saved
+# row_cache_keys_to_save: 100
+
+# Maximum size of the counter cache in memory.
+#
+# Counter cache helps to reduce counter locks' contention for hot counter cells.
+# In case of RF = 1 a counter cache hit will cause Cassandra to skip the read before
+# write entirely. With RF > 1 a counter cache hit will still help to reduce the duration
+# of the lock hold, helping with hot counter cell updates, but will not allow skipping
+# the read entirely. Only the local (clock, count) tuple of a counter cell is kept
+# in memory, not the whole counter, so it's relatively cheap.
+#
+# NOTE: if you reduce the size, you may not get you hottest keys loaded on startup.
+#
+# Default value is empty to make it "auto" (min(2.5% of Heap (in MB), 50MB)). Set to 0 to disable counter cache.
+# NOTE: if you perform counter deletes and rely on low gcgs, you should disable the counter cache.
+counter_cache_size_in_mb:
+
+# Duration in seconds after which Cassandra should
+# save the counter cache (keys only). Caches are saved to saved_caches_directory as
+# specified in this configuration file.
+#
+# Default is 7200 or 2 hours.
+counter_cache_save_period: 7200
+
+# Number of keys from the counter cache to save
+# Disabled by default, meaning all keys are going to be saved
+# counter_cache_keys_to_save: 100
+
+# saved caches
+# If not set, the default directory is $CASSANDRA_HOME/data/saved_caches.
+saved_caches_directory: /var/lib/cassandra/saved_caches
+
+# commitlog_sync may be either "periodic" or "batch."
+#
+# When in batch mode, Cassandra won't ack writes until the commit log
+# has been fsynced to disk. It will wait
+# commitlog_sync_batch_window_in_ms milliseconds between fsyncs.
+# This window should be kept short because the writer threads will
+# be unable to do extra work while waiting. (You may need to increase
+# concurrent_writes for the same reason.)
+#
+# commitlog_sync: batch
+# commitlog_sync_batch_window_in_ms: 2
+#
+# the other option is "periodic" where writes may be acked immediately
+# and the CommitLog is simply synced every commitlog_sync_period_in_ms
+# milliseconds.
+commitlog_sync: periodic
+commitlog_sync_period_in_ms: 10000
+
+# The size of the individual commitlog file segments. A commitlog
+# segment may be archived, deleted, or recycled once all the data
+# in it (potentially from each columnfamily in the system) has been
+# flushed to sstables.
+#
+# The default size is 32, which is almost always fine, but if you are
+# archiving commitlog segments (see commitlog_archiving.properties),
+# then you probably want a finer granularity of archiving; 8 or 16 MB
+# is reasonable.
+# Max mutation size is also configurable via max_mutation_size_in_kb setting in
+# cassandra.yaml. The default is half the size commitlog_segment_size_in_mb * 1024.
+#
+# NOTE: If max_mutation_size_in_kb is set explicitly then commitlog_segment_size_in_mb must
+# be set to at least twice the size of max_mutation_size_in_kb / 1024
+#
+commitlog_segment_size_in_mb: 32
+
+# Compression to apply to the commit log. If omitted, the commit log
+# will be written uncompressed. LZ4, Snappy, and Deflate compressors
+# are supported.
+# commitlog_compression:
+# - class_name: LZ4Compressor
+# parameters:
+# -
+
+# any class that implements the SeedProvider interface and has a
+# constructor that takes a Map<String, String> of parameters will do.
+seed_provider:
+ # Addresses of hosts that are deemed contact points.
+ # Cassandra nodes use this list of hosts to find each other and learn
+ # the topology of the ring. You must change this if you are running
+ # multiple nodes!
+ - class_name: org.apache.cassandra.locator.SimpleSeedProvider
+ parameters:
+ # seeds is actually a comma-delimited list of addresses.
+ # Ex: "<ip1>,<ip2>,<ip3>"
+ - seeds: "{%- for member in database.members %}{{ member.host }},{%- endfor %}"
+
+# For workloads with more data than can fit in memory, Cassandra's
+# bottleneck will be reads that need to fetch data from
+# disk. "concurrent_reads" should be set to (16 * number_of_drives) in
+# order to allow the operations to enqueue low enough in the stack
+# that the OS and drives can reorder them. Same applies to
+# "concurrent_counter_writes", since counter writes read the current
+# values before incrementing and writing them back.
+#
+# On the other hand, since writes are almost never IO bound, the ideal
+# number of "concurrent_writes" is dependent on the number of cores in
+# your system; (8 * number_of_cores) is a good rule of thumb.
+concurrent_reads: 32
+concurrent_writes: 32
+concurrent_counter_writes: 32
+
+# For materialized view writes, as there is a read involved, so this should
+# be limited by the less of concurrent reads or concurrent writes.
+concurrent_materialized_view_writes: 32
+
+# Maximum memory to use for sstable chunk cache and buffer pooling.
+# 32MB of this are reserved for pooling buffers, the rest is used as an
+# cache that holds uncompressed sstable chunks.
+# Defaults to the smaller of 1/4 of heap or 512MB. This pool is allocated off-heap,
+# so is in addition to the memory allocated for heap. The cache also has on-heap
+# overhead which is roughly 128 bytes per chunk (i.e. 0.2% of the reserved size
+# if the default 64k chunk size is used).
+# Memory is only allocated when needed.
+# file_cache_size_in_mb: 512
+
+# Flag indicating whether to allocate on or off heap when the sstable buffer
+# pool is exhausted, that is when it has exceeded the maximum memory
+# file_cache_size_in_mb, beyond which it will not cache buffers but allocate on request.
+
+# buffer_pool_use_heap_if_exhausted: true
+
+# The strategy for optimizing disk read
+# Possible values are:
+# ssd (for solid state disks, the default)
+# spinning (for spinning disks)
+# disk_optimization_strategy: ssd
+
+# Total permitted memory to use for memtables. Cassandra will stop
+# accepting writes when the limit is exceeded until a flush completes,
+# and will trigger a flush based on memtable_cleanup_threshold
+# If omitted, Cassandra will set both to 1/4 the size of the heap.
+# memtable_heap_space_in_mb: 2048
+# memtable_offheap_space_in_mb: 2048
+
+# memtable_cleanup_threshold is deprecated. The default calculation
+# is the only reasonable choice. See the comments on memtable_flush_writers
+# for more information.
+#
+# Ratio of occupied non-flushing memtable size to total permitted size
+# that will trigger a flush of the largest memtable. Larger mct will
+# mean larger flushes and hence less compaction, but also less concurrent
+# flush activity which can make it difficult to keep your disks fed
+# under heavy write load.
+#
+# memtable_cleanup_threshold defaults to 1 / (memtable_flush_writers + 1)
+# memtable_cleanup_threshold: 0.11
+
+# Specify the way Cassandra allocates and manages memtable memory.
+# Options are:
+#
+# heap_buffers
+# on heap nio buffers
+#
+# offheap_buffers
+# off heap (direct) nio buffers
+#
+# offheap_objects
+# off heap objects
+memtable_allocation_type: heap_buffers
+
+# Total space to use for commit logs on disk.
+#
+# If space gets above this value, Cassandra will flush every dirty CF
+# in the oldest segment and remove it. So a small total commitlog space
+# will tend to cause more flush activity on less-active columnfamilies.
+#
+# The default value is the smaller of 8192, and 1/4 of the total space
+# of the commitlog volume.
+#
+# commitlog_total_space_in_mb: 8192
+
+# This sets the number of memtable flush writer threads per disk
+# as well as the total number of memtables that can be flushed concurrently.
+# These are generally a combination of compute and IO bound.
+#
+# Memtable flushing is more CPU efficient than memtable ingest and a single thread
+# can keep up with the ingest rate of a whole server on a single fast disk
+# until it temporarily becomes IO bound under contention typically with compaction.
+# At that point you need multiple flush threads. At some point in the future
+# it may become CPU bound all the time.
+#
+# You can tell if flushing is falling behind using the MemtablePool.BlockedOnAllocation
+# metric which should be 0, but will be non-zero if threads are blocked waiting on flushing
+# to free memory.
+#
+# memtable_flush_writers defaults to two for a single data directory.
+# This means that two memtables can be flushed concurrently to the single data directory.
+# If you have multiple data directories the default is one memtable flushing at a time
+# but the flush will use a thread per data directory so you will get two or more writers.
+#
+# Two is generally enough to flush on a fast disk [array] mounted as a single data directory.
+# Adding more flush writers will result in smaller more frequent flushes that introduce more
+# compaction overhead.
+#
+# There is a direct tradeoff between number of memtables that can be flushed concurrently
+# and flush size and frequency. More is not better you just need enough flush writers
+# to never stall waiting for flushing to free memory.
+#
+#memtable_flush_writers: 2
+
+# Total space to use for change-data-capture logs on disk.
+#
+# If space gets above this value, Cassandra will throw WriteTimeoutException
+# on Mutations including tables with CDC enabled. A CDCCompactor is responsible
+# for parsing the raw CDC logs and deleting them when parsing is completed.
+#
+# The default value is the min of 4096 mb and 1/8th of the total space
+# of the drive where cdc_raw_directory resides.
+# cdc_total_space_in_mb: 4096
+
+# When we hit our cdc_raw limit and the CDCCompactor is either running behind
+# or experiencing backpressure, we check at the following interval to see if any
+# new space for cdc-tracked tables has been made available. Default to 250ms
+# cdc_free_space_check_interval_ms: 250
+
+# A fixed memory pool size in MB for for SSTable index summaries. If left
+# empty, this will default to 5% of the heap size. If the memory usage of
+# all index summaries exceeds this limit, SSTables with low read rates will
+# shrink their index summaries in order to meet this limit. However, this
+# is a best-effort process. In extreme conditions Cassandra may need to use
+# more than this amount of memory.
+index_summary_capacity_in_mb:
+
+# How frequently index summaries should be resampled. This is done
+# periodically to redistribute memory from the fixed-size pool to sstables
+# proportional their recent read rates. Setting to -1 will disable this
+# process, leaving existing index summaries at their current sampling level.
+index_summary_resize_interval_in_minutes: 60
+
+# Whether to, when doing sequential writing, fsync() at intervals in
+# order to force the operating system to flush the dirty
+# buffers. Enable this to avoid sudden dirty buffer flushing from
+# impacting read latencies. Almost always a good idea on SSDs; not
+# necessarily on platters.
+trickle_fsync: false
+trickle_fsync_interval_in_kb: 10240
+
+# TCP port, for commands and data
+# For security reasons, you should not expose this port to the internet. Firewall it if needed.
+storage_port: 7000
+
+# SSL port, for encrypted communication. Unused unless enabled in
+# encryption_options
+# For security reasons, you should not expose this port to the internet. Firewall it if needed.
+ssl_storage_port: 7001
+
+# Address or interface to bind to and tell other Cassandra nodes to connect to.
+# You _must_ change this if you want multiple nodes to be able to communicate!
+#
+# Set listen_address OR listen_interface, not both.
+#
+# Leaving it blank leaves it up to InetAddress.getLocalHost(). This
+# will always do the Right Thing _if_ the node is properly configured
+# (hostname, name resolution, etc), and the Right Thing is to use the
+# address associated with the hostname (it might not be).
+#
+# Setting listen_address to 0.0.0.0 is always wrong.
+#
+{% if database.bind.interface is defined %}
+listen_interface: {{ database.bind.interface }}
+{% else %}
+listen_address: {{ database.bind.host }}
+{% endif %}
+
+# Set listen_address OR listen_interface, not both. Interfaces must correspond
+# to a single address, IP aliasing is not supported.
+# listen_interface: eth0
+
+# If you choose to specify the interface by name and the interface has an ipv4 and an ipv6 address
+# you can specify which should be chosen using listen_interface_prefer_ipv6. If false the first ipv4
+# address will be used. If true the first ipv6 address will be used. Defaults to false preferring
+# ipv4. If there is only one address it will be selected regardless of ipv4/ipv6.
+# listen_interface_prefer_ipv6: false
+
+# Address to broadcast to other Cassandra nodes
+# Leaving this blank will set it to the same value as listen_address
+# broadcast_address: 1.2.3.4
+
+# When using multiple physical network interfaces, set this
+# to true to listen on broadcast_address in addition to
+# the listen_address, allowing nodes to communicate in both
+# interfaces.
+# Ignore this property if the network configuration automatically
+# routes between the public and private networks such as EC2.
+# listen_on_broadcast_address: false
+
+# Internode authentication backend, implementing IInternodeAuthenticator;
+# used to allow/disallow connections from peer nodes.
+# internode_authenticator: org.apache.cassandra.auth.AllowAllInternodeAuthenticator
+
+# Whether to start the native transport server.
+# Please note that the address on which the native transport is bound is the
+# same as the rpc_address. The port however is different and specified below.
+start_native_transport: true
+# port for the CQL native transport to listen for clients on
+# For security reasons, you should not expose this port to the internet. Firewall it if needed.
+native_transport_port: {{ database.bind.get('port', 9042) }}
+# Enabling native transport encryption in client_encryption_options allows you to either use
+# encryption for the standard port or to use a dedicated, additional port along with the unencrypted
+# standard native_transport_port.
+# Enabling client encryption and keeping native_transport_port_ssl disabled will use encryption
+# for native_transport_port. Setting native_transport_port_ssl to a different value
+# from native_transport_port will use encryption for native_transport_port_ssl while
+# keeping native_transport_port unencrypted.
+# native_transport_port_ssl: 9142
+# The maximum threads for handling requests when the native transport is used.
+# This is similar to rpc_max_threads though the default differs slightly (and
+# there is no native_transport_min_threads, idle threads will always be stopped
+# after 30 seconds).
+# native_transport_max_threads: 128
+#
+# The maximum size of allowed frame. Frame (requests) larger than this will
+# be rejected as invalid. The default is 256MB. If you're changing this parameter,
+# you may want to adjust max_value_size_in_mb accordingly.
+# native_transport_max_frame_size_in_mb: 256
+
+# The maximum number of concurrent client connections.
+# The default is -1, which means unlimited.
+# native_transport_max_concurrent_connections: -1
+
+# The maximum number of concurrent client connections per source ip.
+# The default is -1, which means unlimited.
+# native_transport_max_concurrent_connections_per_ip: -1
+
+# Whether to start the thrift rpc server.
+start_rpc: true
+
+# The address or interface to bind the Thrift RPC service and native transport
+# server to.
+#
+# Set rpc_address OR rpc_interface, not both.
+#
+# Leaving rpc_address blank has the same effect as on listen_address
+# (i.e. it will be based on the configured hostname of the node).
+#
+# Note that unlike listen_address, you can specify 0.0.0.0, but you must also
+# set broadcast_rpc_address to a value other than 0.0.0.0.
+#
+# For security reasons, you should not expose this port to the internet. Firewall it if needed.
+{% if database.bind.interface is defined %}
+rpc_interface: {{ database.bind.interface }}
+{% else %}
+rpc_address: {{ database.bind.host }}
+{% endif %}
+
+# Set rpc_address OR rpc_interface, not both. Interfaces must correspond
+# to a single address, IP aliasing is not supported.
+# rpc_interface: eth1
+
+# If you choose to specify the interface by name and the interface has an ipv4 and an ipv6 address
+# you can specify which should be chosen using rpc_interface_prefer_ipv6. If false the first ipv4
+# address will be used. If true the first ipv6 address will be used. Defaults to false preferring
+# ipv4. If there is only one address it will be selected regardless of ipv4/ipv6.
+# rpc_interface_prefer_ipv6: false
+
+# port for Thrift to listen for clients on
+rpc_port: {{ database.bind.get('rpc_port', 9160) }}
+
+# RPC address to broadcast to drivers and other Cassandra nodes. This cannot
+# be set to 0.0.0.0. If left blank, this will be set to the value of
+# rpc_address. If rpc_address is set to 0.0.0.0, broadcast_rpc_address must
+# be set.
+# broadcast_rpc_address: 1.2.3.4
+
+# enable or disable keepalive on rpc/native connections
+rpc_keepalive: true
+
+# Cassandra provides two out-of-the-box options for the RPC Server:
+#
+# sync
+# One thread per thrift connection. For a very large number of clients, memory
+# will be your limiting factor. On a 64 bit JVM, 180KB is the minimum stack size
+# per thread, and that will correspond to your use of virtual memory (but physical memory
+# may be limited depending on use of stack space).
+#
+# hsha
+# Stands for "half synchronous, half asynchronous." All thrift clients are handled
+# asynchronously using a small number of threads that does not vary with the amount
+# of thrift clients (and thus scales well to many clients). The rpc requests are still
+# synchronous (one thread per active request). If hsha is selected then it is essential
+# that rpc_max_threads is changed from the default value of unlimited.
+#
+# The default is sync because on Windows hsha is about 30% slower. On Linux,
+# sync/hsha performance is about the same, with hsha of course using less memory.
+#
+# Alternatively, can provide your own RPC server by providing the fully-qualified class name
+# of an o.a.c.t.TServerFactory that can create an instance of it.
+rpc_server_type: sync
+
+# Uncomment rpc_min|max_thread to set request pool size limits.
+#
+# Regardless of your choice of RPC server (see above), the number of maximum requests in the
+# RPC thread pool dictates how many concurrent requests are possible (but if you are using the sync
+# RPC server, it also dictates the number of clients that can be connected at all).
+#
+# The default is unlimited and thus provides no protection against clients overwhelming the server. You are
+# encouraged to set a maximum that makes sense for you in production, but do keep in mind that
+# rpc_max_threads represents the maximum number of client requests this server may execute concurrently.
+#
+# rpc_min_threads: 16
+# rpc_max_threads: 2048
+
+# uncomment to set socket buffer sizes on rpc connections
+# rpc_send_buff_size_in_bytes:
+# rpc_recv_buff_size_in_bytes:
+
+# Uncomment to set socket buffer size for internode communication
+# Note that when setting this, the buffer size is limited by net.core.wmem_max
+# and when not setting it it is defined by net.ipv4.tcp_wmem
+# See also:
+# /proc/sys/net/core/wmem_max
+# /proc/sys/net/core/rmem_max
+# /proc/sys/net/ipv4/tcp_wmem
+# /proc/sys/net/ipv4/tcp_wmem
+# and 'man tcp'
+# internode_send_buff_size_in_bytes:
+
+# Uncomment to set socket buffer size for internode communication
+# Note that when setting this, the buffer size is limited by net.core.wmem_max
+# and when not setting it it is defined by net.ipv4.tcp_wmem
+# internode_recv_buff_size_in_bytes:
+
+# Frame size for thrift (maximum message length).
+thrift_framed_transport_size_in_mb: 15
+
+# Set to true to have Cassandra create a hard link to each sstable
+# flushed or streamed locally in a backups/ subdirectory of the
+# keyspace data. Removing these links is the operator's
+# responsibility.
+incremental_backups: false
+
+# Whether or not to take a snapshot before each compaction. Be
+# careful using this option, since Cassandra won't clean up the
+# snapshots for you. Mostly useful if you're paranoid when there
+# is a data format change.
+snapshot_before_compaction: false
+
+# Whether or not a snapshot is taken of the data before keyspace truncation
+# or dropping of column families. The STRONGLY advised default of true
+# should be used to provide data safety. If you set this flag to false, you will
+# lose data on truncation or drop.
+auto_snapshot: true
+
+# Granularity of the collation index of rows within a partition.
+# Increase if your rows are large, or if you have a very large
+# number of rows per partition. The competing goals are these:
+#
+# - a smaller granularity means more index entries are generated
+# and looking up rows withing the partition by collation column
+# is faster
+# - but, Cassandra will keep the collation index in memory for hot
+# rows (as part of the key cache), so a larger granularity means
+# you can cache more hot rows
+column_index_size_in_kb: 64
+
+# Per sstable indexed key cache entries (the collation index in memory
+# mentioned above) exceeding this size will not be held on heap.
+# This means that only partition information is held on heap and the
+# index entries are read from disk.
+#
+# Note that this size refers to the size of the
+# serialized index information and not the size of the partition.
+column_index_cache_size_in_kb: 2
+
+# Number of simultaneous compactions to allow, NOT including
+# validation "compactions" for anti-entropy repair. Simultaneous
+# compactions can help preserve read performance in a mixed read/write
+# workload, by mitigating the tendency of small sstables to accumulate
+# during a single long running compactions. The default is usually
+# fine and if you experience problems with compaction running too
+# slowly or too fast, you should look at
+# compaction_throughput_mb_per_sec first.
+#
+# concurrent_compactors defaults to the smaller of (number of disks,
+# number of cores), with a minimum of 2 and a maximum of 8.
+#
+# If your data directories are backed by SSD, you should increase this
+# to the number of cores.
+#concurrent_compactors: 1
+{% if database.concurrent_compactors is defined %}
+concurrent_compactors: {{ database.concurrent_compactors }}
+{% endif %}
+
+# Throttles compaction to the given total throughput across the entire
+# system. The faster you insert data, the faster you need to compact in
+# order to keep the sstable count down, but in general, setting this to
+# 16 to 32 times the rate you are inserting data is more than sufficient.
+# Setting this to 0 disables throttling. Note that this account for all types
+# of compaction, including validation compaction.
+compaction_throughput_mb_per_sec: {{ database.compaction_throughput_mb_per_sec }}
+
+# When compacting, the replacement sstable(s) can be opened before they
+# are completely written, and used in place of the prior sstables for
+# any range that has been written. This helps to smoothly transfer reads
+# between the sstables, reducing page cache churn and keeping hot rows hot
+sstable_preemptive_open_interval_in_mb: 50
+
+# Throttles all outbound streaming file transfers on this node to the
+# given total throughput in Mbps. This is necessary because Cassandra does
+# mostly sequential IO when streaming data during bootstrap or repair, which
+# can lead to saturating the network connection and degrading rpc performance.
+# When unset, the default is 200 Mbps or 25 MB/s.
+# stream_throughput_outbound_megabits_per_sec: 200
+
+# Throttles all streaming file transfer between the datacenters,
+# this setting allows users to throttle inter dc stream throughput in addition
+# to throttling all network stream traffic as configured with
+# stream_throughput_outbound_megabits_per_sec
+# When unset, the default is 200 Mbps or 25 MB/s
+# inter_dc_stream_throughput_outbound_megabits_per_sec: 200
+
+# How long the coordinator should wait for read operations to complete
+read_request_timeout_in_ms: 10000
+# How long the coordinator should wait for seq or index scans to complete
+range_request_timeout_in_ms: 10000
+# How long the coordinator should wait for writes to complete
+write_request_timeout_in_ms: 10000
+# How long the coordinator should wait for counter writes to complete
+counter_write_request_timeout_in_ms: 5000
+# How long a coordinator should continue to retry a CAS operation
+# that contends with other proposals for the same row
+cas_contention_timeout_in_ms: 1000
+# How long the coordinator should wait for truncates to complete
+# (This can be much longer, because unless auto_snapshot is disabled
+# we need to flush first so we can snapshot before removing the data.)
+truncate_request_timeout_in_ms: 60000
+# The default timeout for other, miscellaneous operations
+request_timeout_in_ms: 10000
+
+# How long before a node logs slow queries. Select queries that take longer than
+# this timeout to execute, will generate an aggregated log message, so that slow queries
+# can be identified. Set this value to zero to disable slow query logging.
+slow_query_log_timeout_in_ms: 500
+
+# Enable operation timeout information exchange between nodes to accurately
+# measure request timeouts. If disabled, replicas will assume that requests
+# were forwarded to them instantly by the coordinator, which means that
+# under overload conditions we will waste that much extra time processing
+# already-timed-out requests.
+#
+# Warning: before enabling this property make sure to ntp is installed
+# and the times are synchronized between the nodes.
+cross_node_timeout: false
+
+# Set keep-alive period for streaming
+# This node will send a keep-alive message periodically with this period.
+# If the node does not receive a keep-alive message from the peer for
+# 2 keep-alive cycles the stream session times out and fail
+# Default value is 300s (5 minutes), which means stalled stream
+# times out in 10 minutes by default
+# streaming_keep_alive_period_in_secs: 300
+
+# phi value that must be reached for a host to be marked down.
+# most users should never need to adjust this.
+# phi_convict_threshold: 8
+
+# endpoint_snitch -- Set this to a class that implements
+# IEndpointSnitch. The snitch has two functions:
+#
+# - it teaches Cassandra enough about your network topology to route
+# requests efficiently
+# - it allows Cassandra to spread replicas around your cluster to avoid
+# correlated failures. It does this by grouping machines into
+# "datacenters" and "racks." Cassandra will do its best not to have
+# more than one replica on the same "rack" (which may not actually
+# be a physical location)
+#
+# CASSANDRA WILL NOT ALLOW YOU TO SWITCH TO AN INCOMPATIBLE SNITCH
+# ONCE DATA IS INSERTED INTO THE CLUSTER. This would cause data loss.
+# This means that if you start with the default SimpleSnitch, which
+# locates every node on "rack1" in "datacenter1", your only options
+# if you need to add another datacenter are GossipingPropertyFileSnitch
+# (and the older PFS). From there, if you want to migrate to an
+# incompatible snitch like Ec2Snitch you can do it by adding new nodes
+# under Ec2Snitch (which will locate them in a new "datacenter") and
+# decommissioning the old ones.
+#
+# Out of the box, Cassandra provides:
+#
+# SimpleSnitch:
+# Treats Strategy order as proximity. This can improve cache
+# locality when disabling read repair. Only appropriate for
+# single-datacenter deployments.
+#
+# GossipingPropertyFileSnitch
+# This should be your go-to snitch for production use. The rack
+# and datacenter for the local node are defined in
+# cassandra-rackdc.properties and propagated to other nodes via
+# gossip. If cassandra-topology.properties exists, it is used as a
+# fallback, allowing migration from the PropertyFileSnitch.
+#
+# PropertyFileSnitch:
+# Proximity is determined by rack and data center, which are
+# explicitly configured in cassandra-topology.properties.
+#
+# Ec2Snitch:
+# Appropriate for EC2 deployments in a single Region. Loads Region
+# and Availability Zone information from the EC2 API. The Region is
+# treated as the datacenter, and the Availability Zone as the rack.
+# Only private IPs are used, so this will not work across multiple
+# Regions.
+#
+# Ec2MultiRegionSnitch:
+# Uses public IPs as broadcast_address to allow cross-region
+# connectivity. (Thus, you should set seed addresses to the public
+# IP as well.) You will need to open the storage_port or
+# ssl_storage_port on the public IP firewall. (For intra-Region
+# traffic, Cassandra will switch to the private IP after
+# establishing a connection.)
+#
+# RackInferringSnitch:
+# Proximity is determined by rack and data center, which are
+# assumed to correspond to the 3rd and 2nd octet of each node's IP
+# address, respectively. Unless this happens to match your
+# deployment conventions, this is best used as an example of
+# writing a custom Snitch class and is provided in that spirit.
+#
+# You can use a custom Snitch by setting this to the full class name
+# of the snitch, which will be assumed to be on your classpath.
+endpoint_snitch: SimpleSnitch
+
+# controls how often to perform the more expensive part of host score
+# calculation
+dynamic_snitch_update_interval_in_ms: 100
+# controls how often to reset all host scores, allowing a bad host to
+# possibly recover
+dynamic_snitch_reset_interval_in_ms: 600000
+# if set greater than zero and read_repair_chance is < 1.0, this will allow
+# 'pinning' of replicas to hosts in order to increase cache capacity.
+# The badness threshold will control how much worse the pinned host has to be
+# before the dynamic snitch will prefer other replicas over it. This is
+# expressed as a double which represents a percentage. Thus, a value of
+# 0.2 means Cassandra would continue to prefer the static snitch values
+# until the pinned host was 20% worse than the fastest.
+dynamic_snitch_badness_threshold: 0.1
+
+# request_scheduler -- Set this to a class that implements
+# RequestScheduler, which will schedule incoming client requests
+# according to the specific policy. This is useful for multi-tenancy
+# with a single Cassandra cluster.
+# NOTE: This is specifically for requests from the client and does
+# not affect inter node communication.
+# org.apache.cassandra.scheduler.NoScheduler - No scheduling takes place
+# org.apache.cassandra.scheduler.RoundRobinScheduler - Round robin of
+# client requests to a node with a separate queue for each
+# request_scheduler_id. The scheduler is further customized by
+# request_scheduler_options as described below.
+request_scheduler: org.apache.cassandra.scheduler.NoScheduler
+
+# Scheduler Options vary based on the type of scheduler
+#
+# NoScheduler
+# Has no options
+#
+# RoundRobin
+# throttle_limit
+# The throttle_limit is the number of in-flight
+# requests per client. Requests beyond
+# that limit are queued up until
+# running requests can complete.
+# The value of 80 here is twice the number of
+# concurrent_reads + concurrent_writes.
+# default_weight
+# default_weight is optional and allows for
+# overriding the default which is 1.
+# weights
+# Weights are optional and will default to 1 or the
+# overridden default_weight. The weight translates into how
+# many requests are handled during each turn of the
+# RoundRobin, based on the scheduler id.
+#
+# request_scheduler_options:
+# throttle_limit: 80
+# default_weight: 5
+# weights:
+# Keyspace1: 1
+# Keyspace2: 5
+
+# request_scheduler_id -- An identifier based on which to perform
+# the request scheduling. Currently the only valid option is keyspace.
+# request_scheduler_id: keyspace
+
+# Enable or disable inter-node encryption
+# JVM defaults for supported SSL socket protocols and cipher suites can
+# be replaced using custom encryption options. This is not recommended
+# unless you have policies in place that dictate certain settings, or
+# need to disable vulnerable ciphers or protocols in case the JVM cannot
+# be updated.
+# FIPS compliant settings can be configured at JVM level and should not
+# involve changing encryption settings here:
+# https://docs.oracle.com/javase/8/docs/technotes/guides/security/jsse/FIPS.html
+# *NOTE* No custom encryption options are enabled at the moment
+# The available internode options are : all, none, dc, rack
+#
+# If set to dc cassandra will encrypt the traffic between the DCs
+# If set to rack cassandra will encrypt the traffic between the racks
+#
+# The passwords used in these options must match the passwords used when generating
+# the keystore and truststore. For instructions on generating these files, see:
+# http://download.oracle.com/javase/6/docs/technotes/guides/security/jsse/JSSERefGuide.html#CreateKeystore
+#
+server_encryption_options:
+ internode_encryption: none
+ keystore: conf/.keystore
+ keystore_password: cassandra
+ truststore: conf/.truststore
+ truststore_password: cassandra
+ # More advanced defaults below:
+ # protocol: TLS
+ # algorithm: SunX509
+ # store_type: JKS
+ # cipher_suites: [TLS_RSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_AES_256_CBC_SHA,TLS_DHE_RSA_WITH_AES_128_CBC_SHA,TLS_DHE_RSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA]
+ # require_client_auth: false
+ # require_endpoint_verification: false
+
+# enable or disable client/server encryption.
+client_encryption_options:
+ enabled: false
+ # If enabled and optional is set to true encrypted and unencrypted connections are handled.
+ optional: false
+ keystore: conf/.keystore
+ keystore_password: cassandra
+ # require_client_auth: false
+ # Set trustore and truststore_password if require_client_auth is true
+ # truststore: conf/.truststore
+ # truststore_password: cassandra
+ # More advanced defaults below:
+ # protocol: TLS
+ # algorithm: SunX509
+ # store_type: JKS
+ # cipher_suites: [TLS_RSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_AES_256_CBC_SHA,TLS_DHE_RSA_WITH_AES_128_CBC_SHA,TLS_DHE_RSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA]
+
+# internode_compression controls whether traffic between nodes is
+# compressed.
+# Can be:
+#
+# all
+# all traffic is compressed
+#
+# dc
+# traffic between different datacenters is compressed
+#
+# none
+# nothing is compressed.
+internode_compression: all
+
+# Enable or disable tcp_nodelay for inter-dc communication.
+# Disabling it will result in larger (but fewer) network packets being sent,
+# reducing overhead from the TCP protocol itself, at the cost of increasing
+# latency if you block for cross-datacenter responses.
+inter_dc_tcp_nodelay: true
+
+# TTL for different trace types used during logging of the repair process.
+tracetype_query_ttl: 86400
+tracetype_repair_ttl: 604800
+
+# By default, Cassandra logs GC Pauses greater than 200 ms at INFO level
+# This threshold can be adjusted to minimize logging if necessary
+# gc_log_threshold_in_ms: 200
+
+# If unset, all GC Pauses greater than gc_log_threshold_in_ms will log at
+# INFO level
+# UDFs (user defined functions) are disabled by default.
+# As of Cassandra 3.0 there is a sandbox in place that should prevent execution of evil code.
+enable_user_defined_functions: false
+
+# Enables scripted UDFs (JavaScript UDFs).
+# Java UDFs are always enabled, if enable_user_defined_functions is true.
+# Enable this option to be able to use UDFs with "language javascript" or any custom JSR-223 provider.
+# This option has no effect, if enable_user_defined_functions is false.
+enable_scripted_user_defined_functions: false
+
+# The default Windows kernel timer and scheduling resolution is 15.6ms for power conservation.
+# Lowering this value on Windows can provide much tighter latency and better throughput, however
+# some virtualized environments may see a negative performance impact from changing this setting
+# below their system default. The sysinternals 'clockres' tool can confirm your system's default
+# setting.
+windows_timer_interval: 1
+
+
+# Enables encrypting data at-rest (on disk). Different key providers can be plugged in, but the default reads from
+# a JCE-style keystore. A single keystore can hold multiple keys, but the one referenced by
+# the "key_alias" is the only key that will be used for encrypt opertaions; previously used keys
+# can still (and should!) be in the keystore and will be used on decrypt operations
+# (to handle the case of key rotation).
+#
+# It is strongly recommended to download and install Java Cryptography Extension (JCE)
+# Unlimited Strength Jurisdiction Policy Files for your version of the JDK.
+# (current link: http://www.oracle.com/technetwork/java/javase/downloads/jce8-download-2133166.html)
+#
+# Currently, only the following file types are supported for transparent data encryption, although
+# more are coming in future cassandra releases: commitlog, hints
+transparent_data_encryption_options:
+ enabled: false
+ chunk_length_kb: 64
+ cipher: AES/CBC/PKCS5Padding
+ key_alias: testing:1
+ # CBC IV length for AES needs to be 16 bytes (which is also the default size)
+ # iv_length: 16
+ key_provider:
+ - class_name: org.apache.cassandra.security.JKSKeyProvider
+ parameters:
+ - keystore: conf/.keystore
+ keystore_password: cassandra
+ store_type: JCEKS
+ key_password: cassandra
+
+
+#####################
+# SAFETY THRESHOLDS #
+#####################
+
+# When executing a scan, within or across a partition, we need to keep the
+# tombstones seen in memory so we can return them to the coordinator, which
+# will use them to make sure other replicas also know about the deleted rows.
+# With workloads that generate a lot of tombstones, this can cause performance
+# problems and even exaust the server heap.
+# (http://www.datastax.com/dev/blog/cassandra-anti-patterns-queues-and-queue-like-datasets)
+# Adjust the thresholds here if you understand the dangers and want to
+# scan more tombstones anyway. These thresholds may also be adjusted at runtime
+# using the StorageService mbean.
+tombstone_warn_threshold: 1000
+tombstone_failure_threshold: 100000
+
+# Log WARN on any multiple-partition batch size exceeding this value. 5kb per batch by default.
+# Caution should be taken on increasing the size of this threshold as it can lead to node instability.
+batch_size_warn_threshold_in_kb: 5
+
+# Fail any multiple-partition batch exceeding this value. 50kb (10x warn threshold) by default.
+batch_size_fail_threshold_in_kb: 50
+
+# Log WARN on any batches not of type LOGGED than span across more partitions than this limit
+unlogged_batch_across_partitions_warn_threshold: 10
+
+# Log a warning when compacting partitions larger than this value
+compaction_large_partition_warning_threshold_mb: 100
+
+# GC Pauses greater than gc_warn_threshold_in_ms will be logged at WARN level
+# Adjust the threshold based on your application throughput requirement
+# By default, Cassandra logs GC Pauses greater than 200 ms at INFO level
+gc_warn_threshold_in_ms: 1000
+
+# Maximum size of any value in SSTables. Safety measure to detect SSTable corruption
+# early. Any value size larger than this threshold will result into marking an SSTable
+# as corrupted.
+# max_value_size_in_mb: 256
+
+# Back-pressure settings #
+# If enabled, the coordinator will apply the back-pressure strategy specified below to each mutation
+# sent to replicas, with the aim of reducing pressure on overloaded replicas.
+back_pressure_enabled: false
+# The back-pressure strategy applied.
+# The default implementation, RateBasedBackPressure, takes three arguments:
+# high ratio, factor, and flow type, and uses the ratio between incoming mutation responses and outgoing mutation requests.
+# If below high ratio, outgoing mutations are rate limited according to the incoming rate decreased by the given factor;
+# if above high ratio, the rate limiting is increased by the given factor;
+# such factor is usually best configured between 1 and 10, use larger values for a faster recovery
+# at the expense of potentially more dropped mutations;
+# the rate limiting is applied according to the flow type: if FAST, it's rate limited at the speed of the fastest replica,
+# if SLOW at the speed of the slowest one.
+# New strategies can be added. Implementors need to implement org.apache.cassandra.net.BackpressureStrategy and
+# provide a public constructor accepting a Map<String, Object>.
+back_pressure_strategy:
+ - class_name: org.apache.cassandra.net.RateBasedBackPressure
+ parameters:
+ - high_ratio: 0.90
+ factor: 5
+ flow: FAST
diff --git a/opencontrail/files/3.0/server.properties b/opencontrail/files/kafka/0.8.2.0/server.properties
similarity index 100%
rename from opencontrail/files/3.0/server.properties
rename to opencontrail/files/kafka/0.8.2.0/server.properties
diff --git a/opencontrail/files/4.0/consumer.properties b/opencontrail/files/kafka/0.9.0.1/consumer.properties
similarity index 100%
rename from opencontrail/files/4.0/consumer.properties
rename to opencontrail/files/kafka/0.9.0.1/consumer.properties
diff --git a/opencontrail/files/4.0/server.properties b/opencontrail/files/kafka/0.9.0.1/server.properties
similarity index 100%
rename from opencontrail/files/4.0/server.properties
rename to opencontrail/files/kafka/0.9.0.1/server.properties
diff --git a/opencontrail/files/4.0/zookeeper.properties b/opencontrail/files/kafka/0.9.0.1/zookeeper.properties
similarity index 100%
copy from opencontrail/files/4.0/zookeeper.properties
copy to opencontrail/files/kafka/0.9.0.1/zookeeper.properties
diff --git a/opencontrail/files/kafka/1.1.1/consumer.properties b/opencontrail/files/kafka/1.1.1/consumer.properties
new file mode 100644
index 0000000..41f48df
--- /dev/null
+++ b/opencontrail/files/kafka/1.1.1/consumer.properties
@@ -0,0 +1,27 @@
+{%- from "opencontrail/map.jinja" import database with context %}
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# see org.apache.kafka.clients.consumer.ConsumerConfig for more details
+
+# list of brokers used for bootstrapping knowledge about the rest of the cluster
+# format: host1:port1,host2:port2 ...
+bootstrap.servers={% for member in database.members %}{{ member.host }}:2182{% if not loop.last %},{% endif %}{% endfor %}
+
+# consumer group id
+group.id=test-consumer-group
+
+# What to do when there is no initial offset in Kafka or if the current
+# offset does not exist any more on the server: latest, earliest, none
+#auto.offset.reset=
diff --git a/opencontrail/files/kafka/1.1.1/server.properties b/opencontrail/files/kafka/1.1.1/server.properties
new file mode 100644
index 0000000..1fb905c
--- /dev/null
+++ b/opencontrail/files/kafka/1.1.1/server.properties
@@ -0,0 +1,176 @@
+{%- from "opencontrail/map.jinja" import database with context %}
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# see kafka.server.KafkaConfig for additional details and defaults
+
+############################# Server Basics #############################
+
+# The id of the broker. This must be set to a unique integer for each broker.
+broker.id={{ database.id }}
+
+delete.topic.enable=true
+default.replication.factor={% if database.members|length>1 %}2{% else %}1{% endif %}
+
+############################# Socket Server Settings #############################
+
+# The address the socket server listens on. It will get the value returned from
+# java.net.InetAddress.getCanonicalHostName() if not configured.
+# FORMAT:
+# listeners = listener_name://host_name:port
+# EXAMPLE:
+# listeners = PLAINTEXT://your.host.name:9092
+listeners=PLAINTEXT://{{ database.bind.host }}:9092
+
+# Hostname and port the broker will advertise to producers and consumers. If not set,
+# it uses the value for "listeners" if configured. Otherwise, it will use the value
+# returned from java.net.InetAddress.getCanonicalHostName().
+#advertised.listeners=PLAINTEXT://your.host.name:9092
+
+# Maps listener names to security protocols, the default is for them to be the same. See the config documentation for more details
+#listener.security.protocol.map=PLAINTEXT:PLAINTEXT,SSL:SSL,SASL_PLAINTEXT:SASL_PLAINTEXT,SASL_SSL:SASL_SSL
+
+# The number of threads that the server uses for receiving requests from the network and sending responses to the network
+num.network.threads=3
+
+# The number of threads that the server uses for processing requests, which may include disk I/O
+num.io.threads=8
+
+# The send buffer (SO_SNDBUF) used by the socket server
+socket.send.buffer.bytes=102400
+
+# The receive buffer (SO_RCVBUF) used by the socket server
+socket.receive.buffer.bytes=102400
+
+# The maximum size of a request that the socket server will accept (protection against OOM)
+socket.request.max.bytes=104857600
+
+
+############################# Log Basics #############################
+
+# A comma separated list of directories under which to store log files
+log.dirs=/var/lib/kafka
+
+# The default number of log partitions per topic. More partitions allow greater
+# parallelism for consumption, but this will also result in more files across
+# the brokers.
+num.partitions=30
+
+# The number of threads per data directory to be used for log recovery at startup and flushing at shutdown.
+# This value is recommended to be increased for installations with data dirs located in RAID array.
+num.recovery.threads.per.data.dir=1
+
+############################# Internal Topic Settings #############################
+# The replication factor for the group metadata internal topics "__consumer_offsets" and "__transaction_state"
+# For anything other than development testing, a value greater than 1 is recommended for to ensure availability such as 3.
+offsets.topic.replication.factor={% if database.members|length>1 %}2{% else %}1{% endif %}
+transaction.state.log.replication.factor={% if database.members|length>1 %}2{% else %}1{% endif %}
+transaction.state.log.min.isr={% if database.members|length>1 %}2{% else %}1{% endif %}
+
+############################# Log Flush Policy #############################
+
+# Messages are immediately written to the filesystem but by default we only fsync() to sync
+# the OS cache lazily. The following configurations control the flush of data to disk.
+# There are a few important trade-offs here:
+# 1. Durability: Unflushed data may be lost if you are not using replication.
+# 2. Latency: Very large flush intervals may lead to latency spikes when the flush does occur as there will be a lot of data to flush.
+# 3. Throughput: The flush is generally the most expensive operation, and a small flush interval may lead to excessive seeks.
+# The settings below allow one to configure the flush policy to flush data after a period of time or
+# every N messages (or both). This can be done globally and overridden on a per-topic basis.
+
+# The number of messages to accept before forcing a flush of data to disk
+#log.flush.interval.messages=10000
+
+# The maximum amount of time a message can sit in a log before we force a flush
+#log.flush.interval.ms=1000
+
+############################# Log Retention Policy #############################
+
+# The following configurations control the disposal of log segments. The policy can
+# be set to delete segments after a period of time, or after a given size has accumulated.
+# A segment will be deleted whenever *either* of these criteria are met. Deletion always happens
+# from the end of the log.
+
+# The minimum age of a log file to be eligible for deletion due to age
+log.retention.hours=24
+
+# A size-based retention policy for logs. Segments are pruned from the log unless the remaining
+# segments drop below log.retention.bytes. Functions independently of log.retention.hours.
+log.retention.bytes=268435456
+
+# The maximum size of a log segment file. When this size is reached a new log segment will be created.
+log.segment.bytes=268435456
+
+# The interval at which log segments are checked to see if they can be deleted according
+# to the retention policies
+log.retention.check.interval.ms=300000
+
+log.cleanup.policy=delete
+log.cleaner.threads=2
+log.cleaner.dedupe.buffer.size=250000000
+
+############################# Zookeeper #############################
+
+# Zookeeper connection string (see zookeeper docs for details).
+# This is a comma separated host:port pairs, each corresponding to a zk
+# server. e.g. "127.0.0.1:3000,127.0.0.1:3001,127.0.0.1:3002".
+# You can also append an optional chroot string to the urls to specify the
+# root directory for all kafka znodes.
+zookeeper.connect={% for member in database.members %}{{ member.host }}:2182{% if not loop.last %},{% endif %}{% endfor %}
+
+# Timeout in ms for connecting to zookeeper
+zookeeper.connection.timeout.ms=6000
+
+##################### Confluent Metrics Reporter #######################
+# Confluent Control Center and Confluent Auto Data Balancer integration
+#
+# Uncomment the following lines to publish monitoring data for
+# Confluent Control Center and Confluent Auto Data Balancer
+# If you are using a dedicated metrics cluster, also adjust the settings
+# to point to your metrics kakfa cluster.
+#metric.reporters=io.confluent.metrics.reporter.ConfluentMetricsReporter
+#confluent.metrics.reporter.bootstrap.servers=localhost:9092
+#
+# Uncomment the following line if the metrics cluster has a single broker
+#confluent.metrics.reporter.topic.replicas=1
+
+##################### Confluent Proactive Support ######################
+# If set to true, and confluent-support-metrics package is installed
+# then the feature to collect and report support metrics
+# ("Metrics") is enabled. If set to false, the feature is disabled.
+#
+confluent.support.metrics.enable=false
+
+
+# The customer ID under which support metrics will be collected and
+# reported.
+#
+# When the customer ID is set to "anonymous" (the default), then only a
+# reduced set of metrics is being collected and reported.
+#
+# Confluent customers
+# -------------------
+# If you are a Confluent customer, then you should replace the default
+# value with your actual Confluent customer ID. Doing so will ensure
+# that additional support metrics will be collected and reported.
+#
+confluent.support.customer.id=anonymous
+
+############################# Group Coordinator Settings #############################
+
+# The following configuration specifies the time, in milliseconds, that the GroupCoordinator will delay the initial consumer rebalance.
+# The rebalance will be further delayed by the value of group.initial.rebalance.delay.ms as new members join the group, up to a maximum of max.poll.interval.ms.
+# The default value for this is 3 seconds.
+group.initial.rebalance.delay.ms=3
\ No newline at end of file
diff --git a/opencontrail/files/4.0/zookeeper.properties b/opencontrail/files/kafka/1.1.1/zookeeper.properties
similarity index 100%
rename from opencontrail/files/4.0/zookeeper.properties
rename to opencontrail/files/kafka/1.1.1/zookeeper.properties
diff --git a/opencontrail/map.jinja b/opencontrail/map.jinja
index 7fa87e8..064808f 100644
--- a/opencontrail/map.jinja
+++ b/opencontrail/map.jinja
@@ -60,6 +60,14 @@
{%- else %}
['contrail-vrouter-agent', 'contrail-vrouter-nodemgr']
{%- endif %}
+ xmpp:
+ tls:
+ enabled: False
+ auth:
+ enabled: False
+ cert_file: /etc/contrail/ssl/certs/server.pem
+ key_file: /etc/contrail/ssl/private/server-privkey.pem
+ ca_cert_file: /etc/contrail/ssl/certs/ca-cert.pem
dpdk:
enabled: False
RedHat:
@@ -71,6 +79,14 @@
{%- else %}
['contrail-vrouter-agent', 'contrail-vrouter-nodemgr']
{%- endif %}
+ xmpp:
+ tls:
+ enabled: False
+ auth:
+ enabled: False
+ cert_file: /etc/contrail/ssl/certs/server.pem
+ key_file: /etc/contrail/ssl/private/server-privkey.pem
+ ca_cert_file: /etc/contrail/ssl/certs/ca-cert.pem
dpdk:
enabled: False
config:
@@ -118,6 +134,14 @@
container_name:
{{ pillar.docker.client.compose.opencontrail.service.controller.container_name }}
{%- endif%}
+ xmpp:
+ tls:
+ enabled: False
+ auth:
+ enabled: False
+ cert_file: /etc/contrail/ssl/certs/server.pem
+ key_file: /etc/contrail/ssl/private/server-privkey.pem
+ ca_cert_file: /etc/contrail/ssl/certs/ca-cert.pem
RedHat:
pkgs:
['contrail-openstack-control']
@@ -131,12 +155,30 @@
container_name:
{{ pillar.docker.client.compose.opencontrail.service.controller.container_name }}
{%- endif%}
+ xmpp:
+ tls:
+ enabled: False
+ auth:
+ enabled: False
+ cert_file: /etc/contrail/ssl/certs/server.pem
+ key_file: /etc/contrail/ssl/private/server-privkey.pem
+ ca_cert_file: /etc/contrail/ssl/certs/ca-cert.pem
database:
Debian:
pkgs:
{%- set openjdkPackage = 'openjdk-8-jre-headless' if grains.get('oscodename') == 'xenial' else 'openjdk-7-jre-headless' %}
{%- set databasePackage = 'contrail-openstack-database' if version >= 4.0 else 'contrail-database' %}
['cassandra', 'zookeeper', '{{ openjdkPackage }}' , '{{ databasePackage }}']
+ {%- if version >= 4.1 %}
+ cassandra_version: '3.10'
+ kafka_version: '1.1.1'
+ {%- elif version == 4.0 %}
+ cassandra_version: '2.2'
+ kafka_version: '0.9.0.1'
+ {%- else %}
+ cassandra_version: '2.1'
+ kafka_version: '0.8.2.0'
+ {%- endif%}
cassandra_config: '/etc/cassandra/'
compaction_throughput_mb_per_sec: 16
services:
@@ -152,6 +194,16 @@
RedHat:
pkgs:
['contrail-openstack-database', 'zookeeper', 'supervisor', 'java-1.7.0-openjdk-headless']
+ {%- if version >= 4.1 %}
+ cassandra_version: '3.10'
+ kafka_version: '1.1.1'
+ {%- elif version == 4.0 %}
+ cassandra_version: '2.2'
+ kafka_version: '0.9.0.1'
+ {%- else %}
+ cassandra_version: '2.1'
+ kafka_version: '0.8.2.0'
+ {%- endif%}
cassandra_config: '/etc/cassandra/conf/'
compaction_throughput_mb_per_sec: 16
services:
@@ -168,6 +220,7 @@
Debian:
pkgs:
['contrail-web-controller', 'gettext-base', 'nodejs-legacy']
+ redis_config_dir: '/etc/redis/'
services:
{%- if version < 4.0 %}
['contrail-webui-webserver', 'contrail-webui-jobserver']
@@ -184,6 +237,7 @@
RedHat:
pkgs:
['contrail-web-controller']
+ redis_config_dir: '/etc/'
services:
['supervisor-webui']
database:
@@ -303,6 +357,16 @@
pkgs:
{%- set openjdkPackage = 'openjdk-8-jre-headless' if grains.get('oscodename') == 'xenial' else 'openjdk-7-jre-headless' %}
['cassandra', 'zookeeper', '{{ openjdkPackage }}', 'contrail-openstack-database']
+ {%- if version >= 4.1 %}
+ cassandra_version: '3.10'
+ kafka_version: '1.1.1'
+ {%- elif version == 4.0 %}
+ cassandra_version: '2.2'
+ kafka_version: '0.9.0.1'
+ {%- else %}
+ cassandra_version: '2.1'
+ kafka_version: '0.8.2.0'
+ {%- endif%}
cassandra_config: '/etc/cassandra/'
compaction_throughput_mb_per_sec: 16
services:
@@ -314,6 +378,16 @@
RedHat:
pkgs:
['contrail-openstack-database', 'zookeeper', 'supervisor', 'java-1.7.0-openjdk-headless']
+ {%- if version >= 4.1 %}
+ cassandra_version: '3.10'
+ kafka_version: '1.1.1'
+ {%- elif version == 4.0 %}
+ cassandra_version: '2.2'
+ kafka_version: '0.9.0.1'
+ {%- else %}
+ cassandra_version: '2.1'
+ kafka_version: '0.8.2.0'
+ {%- endif%}
cassandra_config: '/etc/cassandra/conf/'
compaction_throughput_mb_per_sec: 16
services:
@@ -326,6 +400,7 @@
Debian:
pkgs:
['contrail-web-controller', 'contrail-openstack-webui', 'nodejs', 'gettext-base']
+ redis_config_dir: '/etc/redis/'
services:
{%- if version < 4.0 %}
['supervisor-webui']
@@ -342,6 +417,7 @@
RedHat:
pkgs:
['contrail-web-controller']
+ redis_config_dir: '/etc/'
services:
['supervisor-webui']
database:
diff --git a/opencontrail/meta/config.yml b/opencontrail/meta/config.yml
index 32cfea0..8c74d06 100644
--- a/opencontrail/meta/config.yml
+++ b/opencontrail/meta/config.yml
@@ -58,13 +58,13 @@
{%- if pillar.opencontrail.database is defined %}
{%- from "opencontrail/map.jinja" import database with context %}
cassandra.yaml:
- source: "salt://opencontrail/files/{{ database.version }}/cassandra.yaml"
+ source: "salt://opencontrail/files/cassandra/{{ database.cassandra_version }}/cassandra.yaml"
template: jinja
logback.xml:
source: "salt://opencontrail/files/{{ database.version }}/database/logback.xml"
template: jinja
cassandra-env.sh:
- source: "salt://opencontrail/files/{{ database.version }}/database/cassandra-env.sh"
+ source: "salt://opencontrail/files/cassandra/{{ database.cassandra_version }}/cassandra-env.sh"
template: jinja
contrail-database-nodemgr.conf:
source: "salt://opencontrail/files/{{ database.version }}/contrail-database-nodemgr.conf"
@@ -76,7 +76,7 @@
source: "salt://opencontrail/files/{{ database.version }}/zookeeper"
template: jinja
server.properties:
- source: "salt://opencontrail/files/{{ database.version }}/server.properties"
+ source: "salt://opencontrail/files/kafka/{{ database.kafka_version }}/server.properties"
template: jinja
{%- endif %}
{%- if pillar.opencontrail.collector is defined %}
diff --git a/opencontrail/meta/fluentd.yml b/opencontrail/meta/fluentd.yml
index 2a8ae54..24a7a60 100644
--- a/opencontrail/meta/fluentd.yml
+++ b/opencontrail/meta/fluentd.yml
@@ -139,12 +139,16 @@
{%- if version < 4.0 %}
path: /var/log/cassandra/*.log
{% else %}
- path: /var/log/contrail/cassandra-status.log
+ path:
+ - /var/log/contrail/cassandra-status.log
+ - /var/log/configdb/cassandra/*.log
+ - /var/log/analyticsdb/cassandra/*.log
{% endif %}
tag: temp.cassandra
pos_file: {{ positiondb }}/cassandra.pos
path_key: log_location
parser:
+ # Cassandra log custom format https://regex101.com/r/f8C4RV/1
type: multiline
format: '/(?<Timestamp>[0-9]{4}-[0-9]{2}-[0-9]{2}\s[0-9]{2}:[0-9]{2}:[0-9]{2},[0-9]{3})\s+-\s+(?<java_severity_label>[A-Z]*)\s+-\s+(?<Payload>.*)/'
format_firstline: '/^[0-9]{4}-[0-9]{2}-[0-9]{2}\s[0-9]{2}:[0-9]{2}:[0-9]{2},[0-9]{3}/'
@@ -153,10 +157,17 @@
time_format: '%Y-%m-%d %H:%M:%S,%N'
tail_zookeeper_log_files:
type: tail
+ {%- if version < 4.0 %}
path: /var/log/zookeeper/zookeeper.log
+ {% else %}
+ path:
+ - /var/log/configdb/zookeeper/zookeeper.log
+ - /var/log/analyticsdb/zookeeper/zookeeper.log
+ {% endif %}
tag: opencontrail.zookeeper
pos_file: {{ positiondb }}/zookeeper.pos
parser:
+ # Zookeeper log custom format https://regex101.com/r/NoXbiq/1/
type: multiline
format: '/(?<Timestamp>[0-9]{4}-[0-9]{2}-[0-9]{2}\s[0-9]{2}:[0-9]{2}:[0-9]{2},[0-9]{3})\s+-\s+(?<java_severity_label>[A-Z]*)\s+(?<Payload>.*)/'
format_firstline: '/^[0-9]{4}-[0-9]{2}-[0-9]{2}\s[0-9]{2}:[0-9]{2}:[0-9]{2},[0-9]{3}/'
diff --git a/opencontrail/web.sls b/opencontrail/web.sls
index ae2092f..376ae1c 100644
--- a/opencontrail/web.sls
+++ b/opencontrail/web.sls
@@ -9,6 +9,7 @@
- require_in:
- /etc/contrail/contrail-webui-userauth.js
- /etc/contrail/config.global.js
+ - file: {{ web.redis_config_dir }}/redis_webui.conf
{%- endif %}
/etc/contrail/config.global.js:
@@ -21,6 +22,12 @@
- source: salt://opencontrail/files/{{ web.version }}/contrail-webui-userauth.js
- template: jinja
+{{ web.redis_config_dir }}/redis_webui.conf:
+ file.managed:
+ - source: salt://opencontrail/files/{{ web.version }}/redis_webui.conf
+ - template: jinja
+ - makedirs: True
+
{%- if not web.get('config_only', False) %}
opencontrail_web_services:
@@ -33,6 +40,7 @@
- watch:
- file: /etc/contrail/config.global.js
- file: /etc/contrail/contrail-webui-userauth.js
+ - file: {{ web.redis_config_dir }}/redis_webui.conf
{%- if grains.get('virtual_subtype', None) == "Docker" %}
@@ -74,6 +82,7 @@
- watch:
- file: /etc/contrail/config.global.js
- file: /etc/contrail/contrail-webui-userauth.js
+ - file: {{ web.redis_config_dir }}/redis_webui.conf
{%- endif %}
{%- endif %}
diff --git a/tests/pillar/control3.sls b/tests/pillar/control3.sls
index 7492795..dff472e 100644
--- a/tests/pillar/control3.sls
+++ b/tests/pillar/control3.sls
@@ -88,6 +88,14 @@
id: 2
- host: 127.0.0.1
id: 3
+ xmpp:
+ tls:
+ enabled: True
+ auth:
+ enabled: True
+ cert_file: /etc/contrail/server.pem
+ key_file: /etc/contrail/privkey.pem
+ ca_cert_file: /etc/contrail/ca-cert.pem
database:
version: 3.0
cassandra:
diff --git a/tests/pillar/control40.sls b/tests/pillar/control40.sls
index 5351ef8..f98a28f 100644
--- a/tests/pillar/control40.sls
+++ b/tests/pillar/control40.sls
@@ -98,6 +98,14 @@
id: 2
- host: 127.0.0.1
id: 3
+ xmpp:
+ tls:
+ enabled: True
+ auth:
+ enabled: True
+ cert_file: /etc/contrail/server.pem
+ key_file: /etc/contrail/privkey.pem
+ ca_cert_file: /etc/contrail/ca-cert.pem
database:
version: 4.0
config_only: true
diff --git a/tests/pillar/vrouter3.sls b/tests/pillar/vrouter3.sls
index 807b510..fbf2550 100644
--- a/tests/pillar/vrouter3.sls
+++ b/tests/pillar/vrouter3.sls
@@ -48,3 +48,11 @@
user: admin
password: "supersecretpassword123"
tenant: admin
+ xmpp:
+ tls:
+ enabled: True
+ auth:
+ enabled: True
+ cert_file: /etc/contrail/server.pem
+ key_file: /etc/contrail/privkey.pem
+ ca_cert_file: /etc/contrail/ca-cert.pem
\ No newline at end of file
diff --git a/tests/pillar/vrouter40.sls b/tests/pillar/vrouter40.sls
index 16cdcff..e1ebc84 100644
--- a/tests/pillar/vrouter40.sls
+++ b/tests/pillar/vrouter40.sls
@@ -52,3 +52,11 @@
user: admin
password: "supersecretpassword123"
tenant: admin
+ xmpp:
+ tls:
+ enabled: True
+ auth:
+ enabled: True
+ cert_file: /etc/contrail/server.pem
+ key_file: /etc/contrail/privkey.pem
+ ca_cert_file: /etc/contrail/ca-cert.pem
\ No newline at end of file