Merge branch 'master' into pr/27
diff --git a/.kitchen.yml b/.kitchen.yml
index dbb9474..90b13c5 100644
--- a/.kitchen.yml
+++ b/.kitchen.yml
@@ -14,6 +14,13 @@
formula: galera
grains:
noservices: True
+ dependencies:
+ - name: mysql
+ repo: git
+ source: https://github.com/salt-formulas/salt-formula-mysql.git
+ - name: linux
+ repo: git
+ source: https://github.com/salt-formulas/salt-formula-linux.git
state_top:
base:
"*":
@@ -27,14 +34,6 @@
- galeracluster_debian_repo
pillars-from-files:
galeracluster_debian_repo.sls: tests/pillar/repo_galeracluster.sls
- dependencies:
- - name: mysql
- repo: git
- source: https://github.com/salt-formulas/salt-formula-mysql.git
- dependencies:
- - name: linux
- repo: git
- source: https://github.com/salt-formulas/salt-formula-linux.git
verifier:
name: inspec
diff --git a/README.rst b/README.rst
index 609ec3d..5f85850 100644
--- a/README.rst
+++ b/README.rst
@@ -56,20 +56,57 @@
user: root
password: pass
+Additional mysql users:
+
+.. code-block:: yaml
+
+ mysql:
+ server:
+ users:
+ - name: clustercheck
+ password: clustercheck
+ database: '*.*'
+ grants: PROCESS
+ - name: inspector
+ host: 127.0.0.1
+ password: password
+ databases:
+ mydb:
+ - database: mydb
+ - table: mytable
+ - grant_option: True
+ - grants:
+ - all privileges
+
+Additional check params:
+========================
+
+.. code-block:: yaml
+
+ galera:
+ clustercheck:
+ - enabled: True
+ - user: clustercheck
+ - password: clustercheck
+ - available_when_donor: 0
+ - available_when_readonly: 1
+ - port 9200
+
Configurable soft parameters
============================
+
- **galera_innodb_buffer_pool_size** - the default value is 3138M
- **galera_max_connections** - the default value is 20000
Usage:
-
.. code-block:: yaml
_param:
galera_innodb_buffer_pool_size: 1024M
galera_max_connections: 200
+
Usage
=====
diff --git a/galera/clustercheck.sls b/galera/clustercheck.sls
new file mode 100644
index 0000000..a311a84
--- /dev/null
+++ b/galera/clustercheck.sls
@@ -0,0 +1,45 @@
+{%- from "galera/map.jinja" import clustercheck with context %}
+
+{%- if clustercheck.get('enabled', False) %}
+/usr/local/bin/mysql_clustercheck:
+ file.managed:
+ - source: salt://galera/files/clustercheck.sh
+ - user: root
+ - group: root
+ - mode: 755
+ - dir_mode: 755
+ - makedirs: True
+
+/etc/xinetd.d/mysql_clustercheck:
+ file.managed:
+ - source: salt://galera/files/xinet.d.conf
+ - template: jinja
+ - makedirs: True
+ - defaults:
+ name: mysqlchk
+ user: nobody
+ server: '/usr/local/bin/mysql_clustercheck'
+ server_args: '{{ clustercheck.get('user', 'clustercheck') }} {{ clustercheck.get('password', 'clustercheck') }} available_when_donor={{ clustercheck.get('available_when_donor', 0) }} /dev/null available_when_readonly={{ clustercheck.get('available_when_readonly', 0) }} {{ clustercheck.config }}'
+ port: {{ clustercheck.get('port', 9200) }}
+ flags: REUSE
+ per_source: UNLIMITED
+ - require:
+ - file: /usr/local/bin/mysql_clustercheck
+{%- if not grains.get('noservices', False) %}
+ - watch_in:
+ - galera_xinetd_service
+{%- endif %}
+
+galera_xinetd_package:
+ pkg.installed:
+ - name: xinetd
+
+{%- if not grains.get('noservices', False) %}
+galera_xinetd_service:
+ service.running:
+ - name: xinetd
+ - require:
+ - pkg: xinetd
+{%- endif %}
+{%- endif %}
+
diff --git a/galera/files/clustercheck.sh b/galera/files/clustercheck.sh
new file mode 100644
index 0000000..efed950
--- /dev/null
+++ b/galera/files/clustercheck.sh
@@ -0,0 +1,138 @@
+#!/bin/bash
+#
+# Script to make a proxy (ie HAProxy) capable of monitoring MySQL Cluster nodes properly
+#
+# Author: Olaf van Zandwijk <olaf.vanzandwijk@nedap.com>
+# Author: Raghavendra Prabhu <raghavendra.prabhu@percona.com>
+# Author: Petr Michalec <pmichalec@mirantis.com>
+#
+# Documentation and download: https://github.com/epcim/percona-clustercheck
+#
+# Based on the original script from Unai Rodriguez
+#
+
+function httpReply(){
+ HTTP_STATUS="${1}"
+ RESPONSE_CONTENT="${2}"
+
+ # https://serverfault.com/questions/504756/curl-failure-when-receiving-data-from-peer-using-percona-xtradb-cluster-check
+ sleep 0.1
+ if [[ "${HTTP_STATUS}" == "503" ]]
+ then
+ echo -en "HTTP/1.1 503 Service Unavailable\r\n"
+ elif [[ "${HTTP_STATUS}" == "404" ]]
+ then
+ echo -en "HTTP/1.1 404 Not Found\r\n"
+ elif [[ "${HTTP_STATUS}" == "401" ]]
+ then
+ echo -en "HTTP/1.1 401 Unauthorized\r\n"
+ elif [[ "${HTTP_STATUS}" == "200" ]]
+ then
+ echo -en "HTTP/1.1 200 OK\r\n"
+ else
+ echo -en "HTTP/1.1 ${HTTP_STATUS}\r\n"
+ fi
+
+ echo -en "Content-Type: text/plain\r\n"
+ echo -en "Connection: close\r\n"
+ echo -en "Content-Length: ${#RESPONSE_CONTENT}\r\n"
+ echo -en "\r\n"
+ echo -en "${RESPONSE_CONTENT}"
+ echo -en "\r\n"
+ sleep 0.1
+}
+
+if [[ $1 == '-h' || $1 == '--help' ]];then
+ echo "Usage: $0 <user> <pass> <available_when_donor=0|1> <log_file> <available_when_readonly=0|1> <defaults_extra_file> <timeout>"
+ exit
+fi
+
+# if the disabled file is present, return 503. This allows
+# admins to manually remove a node from a cluster easily.
+if [ -e "/var/tmp/clustercheck.disabled" ]; then
+ # Shell return-code is 1
+ httpReply "503" "MySQL Cluster Node is manually disabled.\r\n"
+ exit 1
+fi
+
+MYSQL_USERNAME="${1-clustercheckuser}"
+MYSQL_PASSWORD="${2-clustercheckpassword!}"
+AVAILABLE_WHEN_DONOR=${3:-0}
+ERR_FILE="${4:-/dev/null}"
+AVAILABLE_WHEN_READONLY=${5:-1}
+DEFAULTS_EXTRA_FILE=${6:-/etc/my.cnf}
+# Timeout exists for instances where mysqld may be hung
+# Default value considers the Galera timeouts
+TIMEOUT=${7:-18}
+
+EXTRA_ARGS=""
+if [[ -n "$MYSQL_USERNAME" ]]; then
+ EXTRA_ARGS="$EXTRA_ARGS --user=${MYSQL_USERNAME}"
+fi
+if [[ -n "$MYSQL_PASSWORD" ]]; then
+ EXTRA_ARGS="$EXTRA_ARGS --password=${MYSQL_PASSWORD}"
+fi
+if [[ -r $DEFAULTS_EXTRA_FILE ]];then
+ MYSQL_CMDLINE="mysql --defaults-extra-file=$DEFAULTS_EXTRA_FILE -nNE --connect-timeout=$TIMEOUT \
+ ${EXTRA_ARGS}"
+else
+ MYSQL_CMDLINE="mysql -nNE --connect-timeout=$TIMEOUT ${EXTRA_ARGS}"
+fi
+#
+# Perform the query to check the wsrep_local_state
+#
+WSREP_STATUS=$($MYSQL_CMDLINE -e "SHOW STATUS LIKE 'wsrep_local_state';" \
+ 2>${ERR_FILE} | tail -1 2>>${ERR_FILE}; exit ${PIPESTATUS[0]})
+mysql_ret=$?
+
+if [[ $mysql_ret -eq 1 || $mysql_ret -eq 127 ]]; then
+ # hash or command can be used here, but command is POSIX
+ command -v "$MYSQL_CMD"; mysql_ret=$?
+ if [[ $mysql_ret -eq 1 ]]; then
+ # mysql program not found
+ # => return HTTP 404
+ # Shell return-code is 3
+ httpReply "404" "Mysql command not found or service is not running.\r\n"
+ exit 2
+ fi
+
+ # Failed mysql login
+ # => return HTTP 401
+ # Shell return-code is 2
+ httpReply "401" "Access denied to database.\r\n"
+ exit 2
+fi
+
+
+
+if [[ "${WSREP_STATUS}" == "4" ]] || [[ "${WSREP_STATUS}" == "2" && ${AVAILABLE_WHEN_DONOR} == 1 ]]
+then
+ # Check only when set to 0 to avoid latency in response.
+ if [[ $AVAILABLE_WHEN_READONLY -eq 0 ]];then
+ READ_ONLY=$($MYSQL_CMDLINE -e "SHOW GLOBAL VARIABLES LIKE 'read_only';" \
+ 2>${ERR_FILE} | tail -1 2>>${ERR_FILE})
+
+ if [[ "${READ_ONLY}" == "ON" ]];then
+ # MySQL Cluster node local state is 'Synced', but it is in
+ # read-only mode. The variable AVAILABLE_WHEN_READONLY is set to 0.
+ # => return HTTP 503
+ # Shell return-code is 1
+ httpReply "503" "MySQL Cluster Node is read-only.\r\n"
+ exit 1
+ fi
+ fi
+ # MySQL Cluster node local state is 'Synced' => return HTTP 200
+ # Shell return-code is 0
+ httpReply "200" "MySQL Cluster Node is synced.\r\n"
+ exit 0
+else
+ # MySQL Cluster node local state is not 'Synced' => return HTTP 503
+ # Shell return-code is 1
+ if [[ -z "${WSREP_STATUS}" ]]
+ then
+ httpReply "503" "Received empty reply from MySQL Cluster Node.\r\nMight be a permission issue, check the credentials used by ${0}\r\n"
+ else
+ httpReply "503" "MySQL Cluster Node is not synced.\r\n"
+ fi
+ exit 1
+fi
diff --git a/galera/files/xinet.d.conf b/galera/files/xinet.d.conf
new file mode 100644
index 0000000..fd6f321
--- /dev/null
+++ b/galera/files/xinet.d.conf
@@ -0,0 +1,24 @@
+# default: {{ default_state|default('on') }}
+# description: {{ name }}
+
+service {{ name }}:
+{
+ disable = {{ disable|default('no') }}
+ {%- if flags is defined %}
+ flags = {{ flags }}
+ {%- endif %}
+ socket_type = {{ socket_type|default('stream') }}
+ port = {{ port }}
+ wait = {{ wait|default('no') }}
+ user = {{ user }}
+ server = {{ server }}
+ {%- if server_args is defined %}
+ server_args = {{ server_args }}
+ {%- endif %}
+ log_on_failure += {{ log_on_failure|default('USERID') }}
+ only_from = {{ only_from|default('0.0.0.0/0') }}
+ type = {{ type|default('UNLISTED') }}
+ {%- if per_source is defined %}
+ per_source = {{ per_source }}
+ {%- endif %}
+}
diff --git a/galera/init.sls b/galera/init.sls
index 94e1414..bc55f36 100644
--- a/galera/init.sls
+++ b/galera/init.sls
@@ -7,6 +7,9 @@
{%- if pillar.galera.slave is defined %}
- galera.slave
{%- endif %}
+{%- if pillar.galera.clustercheck is defined %}
+- galera.clustercheck
+{%- endif %}
{%- if pillar.galera.monitor is defined %}
- galera.monitor
{%- endif %}
diff --git a/galera/map.jinja b/galera/map.jinja
index e7fea3c..2db08d1 100644
--- a/galera/map.jinja
+++ b/galera/map.jinja
@@ -63,3 +63,20 @@
'config': '/etc/mysql/my.cnf',
},
}, grain='oscodename', merge=pillar.galera.get('slave', {})), base='default') %}
+
+{% set clustercheck = salt['grains.filter_by']({
+ 'Debian': {
+ 'config': '/etc/mysql/my.cnf',
+ 'enabled': False,
+ 'user': clustercheck,
+ 'password': clustercheck,
+ 'port': '9200'
+ },
+ 'RedHat': {
+ 'config': '/etc/my.cnf',
+ 'enabled': False,
+ 'user': clustercheck,
+ 'password': clustercheck,
+ 'port': '9200'
+ },
+}, merge=pillar.galera.get('clustercheck', {})) %}
diff --git a/galera/master.sls b/galera/master.sls
index 47af87d..8b2ee5f 100644
--- a/galera/master.sls
+++ b/galera/master.sls
@@ -115,6 +115,7 @@
- defaults:
service: {{ master|yaml }}
- template: jinja
+ - timeout: 1800
galera_bootstrap_script:
file.managed:
@@ -147,6 +148,7 @@
- require:
- file: galera_run_dir
- file: galera_init_script
+ - timeout: 1800
galera_bootstrap_set_root_password:
cmd.run:
diff --git a/galera/server.sls b/galera/server.sls
index 78975a0..4b84654 100644
--- a/galera/server.sls
+++ b/galera/server.sls
@@ -1,5 +1,5 @@
{%- if pillar.get('mysql', {}).server is defined %}
-
+{%- from "mysql/map.jinja" import mysql_connection_args as connection with context %}
{%- set server = pillar.mysql.server %}
{%- for database_name, database in server.get('database', {}).iteritems() %}
@@ -7,14 +7,30 @@
mysql_database_{{ database_name }}:
mysql_database.present:
- name: {{ database_name }}
+ - character_set: {{ database.get('encoding', 'utf8') }}
+ #- connection_user: {{ connection.user }}
+ #- connection_pass: {{ connection.password }}
+ #- connection_charset: {{ connection.charset }}
+ {%- if grains.get('noservices') %}
+ - onlyif: /bin/false
+ {%- endif %}
-{%- for user in database.users %}
-
+{%- for user in database.get('users', {}) %}
mysql_user_{{ user.name }}_{{ database_name }}_{{ user.host }}:
mysql_user.present:
- host: '{{ user.host }}'
- name: '{{ user.name }}'
+ {%- if user.password is defined %}
- password: {{ user.password }}
+ {%- else %}
+ - allow_passwordless: true
+ {%- endif %}
+ #- connection_user: {{ connection.user }}
+ #- connection_pass: {{ connection.password }}
+ #- connection_charset: {{ connection.charset }}
+ {%- if grains.get('noservices') %}
+ - onlyif: /bin/false
+ {%- endif %}
mysql_grants_{{ user.name }}_{{ database_name }}_{{ user.host }}:
mysql_grants.present:
@@ -22,14 +38,18 @@
- database: '{{ database_name }}.*'
- user: '{{ user.name }}'
- host: '{{ user.host }}'
+ #- connection_user: {{ connection.user }}
+ #- connection_pass: {{ connection.password }}
+ #- connection_charset: {{ connection.charset }}
- require:
- mysql_user: mysql_user_{{ user.name }}_{{ database_name }}_{{ user.host }}
- mysql_database: mysql_database_{{ database_name }}
-
+ {%- if grains.get('noservices') %}
+ - onlyif: /bin/false
+ {%- endif %}
{%- endfor %}
{%- if database.initial_data is defined %}
-
/root/mysql/scripts/restore_{{ database_name }}.sh:
file.managed:
- source: salt://mysql/conf/restore.sh
@@ -49,25 +69,73 @@
- cwd: /root
- require:
- file: /root/mysql/scripts/restore_{{ database_name }}.sh
-
{%- endif %}
{%- endfor %}
{%- for user in server.get('users', []) %}
-
-mysql_user_{{ user.name }}_{{ user.host }}:
+{%- for host in user.get('hosts', user.get('host', 'localhost'))|sequence %}
+mysql_user_{{ user.name }}_{{ host }}:
mysql_user.present:
- - host: '{{ user.host }}'
+ - host: '{{ host }}'
- name: '{{ user.name }}'
- {%- if user.password is defined %}
- - password: {{ user.password }}
+ {%- if user['password_hash'] is defined %}
+ - password_hash: '{{ user.password_hash }}'
+ {%- elif user['password'] is defined and user['password'] != None %}
+ - password: '{{ user.password }}'
{%- else %}
- allow_passwordless: True
{%- endif %}
+ #- connection_user: {{ connection.user }}
+ #- connection_pass: {{ connection.password }}
+ #- connection_charset: {{ connection.charset }}
{%- if grains.get('noservices') %}
- onlyif: /bin/false
{%- endif %}
+
+{%- if 'grants' in user %}
+mysql_user_{{ user.name }}_{{ host }}_grants:
+ mysql_grants.present:
+ - name: {{ user.name }}
+ - grant: {{ user['grants']|sequence|join(",") }}
+ - database: '{{ user.get('database','*.*') }}'
+ - grant_option: {{ user['grant_option'] | default(False) }}
+ - user: {{ user.name }}
+ - host: '{{ host }}'
+ #- connection_user: {{ connection.user }}
+ #- connection_pass: {{ connection.password }}
+ #- connection_charset: {{ connection.charset }}
+ - require:
+ - mysql_user_{{ user.name }}_{{ host }}
+ {%- if grains.get('noservices') %}
+ - onlyif: /bin/false
+ {%- endif %}
+{%- endif %}
+
+{%- if 'databases' in user %}
+{%- for db in user['databases'] %}
+mysql_user_{{ user.name }}_{{ host }}_grants_db_{{ db.database }}_{{ loop.index0 }}:
+ mysql_grants.present:
+ - name: {{ user.name ~ '_' ~ db['database'] ~ '_' ~ db['table'] | default('all') }}
+ - grant: {{ db['grants']|sequence|join(",") }}
+ - database: '{{ db['database'] }}.{{ db['table'] | default('*') }}'
+ - grant_option: {{ db['grant_option'] | default(False) }}
+ - user: {{ user.name }}
+ - host: '{{ host }}'
+ #- connection_user: {{ connection.user }}
+ #- connection_pass: {{ connection.password }}
+ #- connection_charset: {{ connection.charset }}
+ - require:
+ - mysql_user_{{ user.name }}_{{ host }}
+ # the following line is not mandatory as database might not be managed by salt formula
+ #- mysql_database_{{ db.database }}
+ {%- if grains.get('noservices') %}
+ - onlyif: /bin/false
+ {%- endif %}
+{%- endfor %}
+{%- endif %}
{%- endfor %}
-{%- endif %}
\ No newline at end of file
+{%- endfor %}
+
+{%- endif %}
diff --git a/galera/slave.sls b/galera/slave.sls
index 3f5502a..547fbad 100644
--- a/galera/slave.sls
+++ b/galera/slave.sls
@@ -147,6 +147,7 @@
- require:
- file: galera_run_dir
- file: galera_init_script
+ - timeout: 1800
galera_bootstrap_set_root_password:
cmd.run:
@@ -205,6 +206,7 @@
- require:
- file: galera_bootstrap_init_config
- file: galera_bootstrap_script
+ - timeout: 1800
galera_bootstrap_finish_flag:
file.touch:
@@ -235,3 +237,4 @@
{%- endif %}
{%- endif %}
+
diff --git a/tests/integration/master_cluster/checks_clustercheck_spec.rb b/tests/integration/master_cluster/checks_clustercheck_spec.rb
new file mode 100644
index 0000000..d10fc19
--- /dev/null
+++ b/tests/integration/master_cluster/checks_clustercheck_spec.rb
@@ -0,0 +1,10 @@
+describe file('/etc/xinetd.d/mysql_clustercheck') do
+ it('should exist')
+ its('content') { should match /server.*\/usr\/local\/bin\/mysql_clustercheck/ }
+ its('content') { should match /server_args.*clustercheck password available_when_donor=1 \/dev\/null available_when_readonly=1/ }
+end
+
+describe file('/usr/local/bin/mysql_clustercheck') do
+ it('should exist')
+ it('should be_executable')
+end
diff --git a/tests/integration/slave_cluster/checks_clustercheck_spec.rb b/tests/integration/slave_cluster/checks_clustercheck_spec.rb
new file mode 100644
index 0000000..d10fc19
--- /dev/null
+++ b/tests/integration/slave_cluster/checks_clustercheck_spec.rb
@@ -0,0 +1,10 @@
+describe file('/etc/xinetd.d/mysql_clustercheck') do
+ it('should exist')
+ its('content') { should match /server.*\/usr\/local\/bin\/mysql_clustercheck/ }
+ its('content') { should match /server_args.*clustercheck password available_when_donor=1 \/dev\/null available_when_readonly=1/ }
+end
+
+describe file('/usr/local/bin/mysql_clustercheck') do
+ it('should exist')
+ it('should be_executable')
+end
diff --git a/tests/pillar/master_cluster.sls b/tests/pillar/master_cluster.sls
index 5b596d9..a8f0633 100644
--- a/tests/pillar/master_cluster.sls
+++ b/tests/pillar/master_cluster.sls
@@ -7,7 +7,7 @@
port: 3306
maintenance_password: password
admin:
- user: user
+ user: root
password: password
members:
- host: 127.0.0.1
@@ -16,10 +16,20 @@
port: 4567
- host: 127.0.0.1
port: 4567
+ clustercheck:
+ enabled: True
+ user: clustercheck
+ password: password
+ available_when_donor: 1
+ available_when_readonly: 1
+ port: 9200
max_connections: 20000
innodb_buffer_pool_size: 3138M
mysql:
server:
+ database:
+ mydb:
+ encoding: 'utf8'
users:
- name: haproxy
host: localhost
@@ -27,3 +37,17 @@
host: '%'
- name: haproxy
host: 127.0.0.1
+ - name: clustercheck
+ #host: localhost
+ password: password
+ database: '*.*'
+ grants: PROCESS
+ - name: inspector
+ host: 127.0.0.1
+ password: password
+ databases:
+ - database: mydb
+ table: mytable
+ grant_option: True
+ grants:
+ - all privileges
diff --git a/tests/pillar/slave_cluster.sls b/tests/pillar/slave_cluster.sls
index 40ec4de..38c8420 100644
--- a/tests/pillar/slave_cluster.sls
+++ b/tests/pillar/slave_cluster.sls
@@ -7,7 +7,7 @@
port: 3306
maintenance_password: password
admin:
- user: user
+ user: root
password: password
members:
- host: 127.0.0.1
@@ -16,6 +16,13 @@
port: 4567
- host: 127.0.0.1
port: 4567
+ clustercheck:
+ enabled: True
+ user: clustercheck
+ password: password
+ available_when_donor: 1
+ available_when_readonly: 1
+ port: 9200
max_connections: 20000
innodb_buffer_pool_size: 3138M
mysql:
@@ -26,4 +33,9 @@
- name: haproxy
host: '%'
- name: haproxy
- host: 127.0.0.1
\ No newline at end of file
+ host: 127.0.0.1
+ - name: clustercheck
+ #host: localhost
+ password: password
+ database: '*.*'
+ grants: PROCESS