initial commit
Change-Id: I3ef2ec17155991b83f34344a96f9c543f47d2f29
diff --git a/CHANGELOG.rst b/CHANGELOG.rst
new file mode 100644
index 0000000..afe98e0
--- /dev/null
+++ b/CHANGELOG.rst
@@ -0,0 +1,9 @@
+
+===================
+xtrabackup-formula
+===================
+
+0.0.1
+-----
+
+- Initial commit to Community form
diff --git a/LICENSE b/LICENSE
new file mode 100644
index 0000000..cc41a65
--- /dev/null
+++ b/LICENSE
@@ -0,0 +1,15 @@
+
+ Copyright (c) 2013 Salt Stack Formulas
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+
diff --git a/Makefile b/Makefile
new file mode 100644
index 0000000..1043fbe
--- /dev/null
+++ b/Makefile
@@ -0,0 +1,118 @@
+DESTDIR=/
+SALTENVDIR=/usr/share/salt-formulas/env
+RECLASSDIR=/usr/share/salt-formulas/reclass
+FORMULANAME=$(shell grep name: metadata.yml|head -1|cut -d : -f 2|grep -Eo '[a-z0-9\-\_]*')
+VERSION=$(shell grep version: metadata.yml|head -1|cut -d : -f 2|grep -Eo '[a-z0-9\.\-\_]*')
+VERSION_MAJOR := $(shell echo $(VERSION)|cut -d . -f 1-2)
+VERSION_MINOR := $(shell echo $(VERSION)|cut -d . -f 3)
+
+NEW_MAJOR_VERSION ?= $(shell date +%Y.%m|sed 's,\.0,\.,g')
+NEW_MINOR_VERSION ?= $(shell /bin/bash -c 'echo $$[ $(VERSION_MINOR) + 1 ]')
+
+MAKE_PID := $(shell echo $$PPID)
+JOB_FLAG := $(filter -j%, $(subst -j ,-j,$(shell ps T | grep "^\s*$(MAKE_PID).*$(MAKE)")))
+
+ifneq ($(subst -j,,$(JOB_FLAG)),)
+JOBS := $(subst -j,,$(JOB_FLAG))
+else
+JOBS := 1
+endif
+
+KITCHEN_LOCAL_YAML?=.kitchen.yml
+KITCHEN_OPTS?="--concurrency=$(JOBS)"
+KITCHEN_OPTS_CREATE?=""
+KITCHEN_OPTS_CONVERGE?=""
+KITCHEN_OPTS_VERIFY?=""
+KITCHEN_OPTS_TEST?=""
+
+all:
+ @echo "make install - Install into DESTDIR"
+ @echo "make test - Run tests"
+ @echo "make kitchen - Run Kitchen CI tests (create, converge, verify)"
+ @echo "make clean - Cleanup after tests run"
+ @echo "make release-major - Generate new major release"
+ @echo "make release-minor - Generate new minor release"
+ @echo "make changelog - Show changes since last release"
+
+install:
+ # Formula
+ [ -d $(DESTDIR)/$(SALTENVDIR) ] || mkdir -p $(DESTDIR)/$(SALTENVDIR)
+ cp -a $(FORMULANAME) $(DESTDIR)/$(SALTENVDIR)/
+ [ ! -d _modules ] || cp -a _modules $(DESTDIR)/$(SALTENVDIR)/
+ [ ! -d _states ] || cp -a _states $(DESTDIR)/$(SALTENVDIR)/ || true
+ [ ! -d _grains ] || cp -a _grains $(DESTDIR)/$(SALTENVDIR)/ || true
+ # Metadata
+ [ -d $(DESTDIR)/$(RECLASSDIR)/service/$(FORMULANAME) ] || mkdir -p $(DESTDIR)/$(RECLASSDIR)/service/$(FORMULANAME)
+ cp -a metadata/service/* $(DESTDIR)/$(RECLASSDIR)/service/$(FORMULANAME)
+
+test:
+ [ ! -d tests ] || (cd tests; ./run_tests.sh)
+
+release-major: check-changes
+ @echo "Current version is $(VERSION), new version is $(NEW_MAJOR_VERSION)"
+ @[ $(VERSION_MAJOR) != $(NEW_MAJOR_VERSION) ] || (echo "Major version $(NEW_MAJOR_VERSION) already released, nothing to do. Do you want release-minor?" && exit 1)
+ echo "$(NEW_MAJOR_VERSION)" > VERSION
+ sed -i 's,version: .*,version: "$(NEW_MAJOR_VERSION)",g' metadata.yml
+ [ ! -f debian/changelog ] || dch -v $(NEW_MAJOR_VERSION) -m --force-distribution -D `dpkg-parsechangelog -S Distribution` "New version"
+ make genchangelog-$(NEW_MAJOR_VERSION)
+ (git add -u; git commit -m "Version $(NEW_MAJOR_VERSION)")
+ git tag -s -m $(NEW_MAJOR_VERSION) $(NEW_MAJOR_VERSION)
+
+release-minor: check-changes
+ @echo "Current version is $(VERSION), new version is $(VERSION_MAJOR).$(NEW_MINOR_VERSION)"
+ echo "$(VERSION_MAJOR).$(NEW_MINOR_VERSION)" > VERSION
+ sed -i 's,version: .*,version: "$(VERSION_MAJOR).$(NEW_MINOR_VERSION)",g' metadata.yml
+ [ ! -f debian/changelog ] || dch -v $(VERSION_MAJOR).$(NEW_MINOR_VERSION) -m --force-distribution -D `dpkg-parsechangelog -S Distribution` "New version"
+ make genchangelog-$(VERSION_MAJOR).$(NEW_MINOR_VERSION)
+ (git add -u; git commit -m "Version $(VERSION_MAJOR).$(NEW_MINOR_VERSION)")
+ git tag -s -m $(NEW_MAJOR_VERSION) $(VERSION_MAJOR).$(NEW_MINOR_VERSION)
+
+check-changes:
+ @git log --pretty=oneline --decorate $(VERSION)..HEAD | grep -Eqc '.*' || (echo "No new changes since version $(VERSION)"; exit 1)
+
+changelog:
+ git log --pretty=short --invert-grep --grep="Merge pull request" --decorate $(VERSION)..HEAD
+
+genchangelog: genchangelog-$(VERSION_MAJOR).$(NEW_MINOR_VERSION)
+
+genchangelog-%:
+ $(eval NEW_VERSION := $(patsubst genchangelog-%,%,$@))
+ (echo "=========\nChangelog\n=========\n"; \
+ (echo $(NEW_VERSION);git tag) | sort -r | grep -E '^[0-9\.]+' | while read i; do \
+ cur=$$i; \
+ test $$i = $(NEW_VERSION) && i=HEAD; \
+ prev=`(echo $(NEW_VERSION);git tag)|sort|grep -E '^[0-9\.]+'|grep -B1 "$$cur\$$"|head -1`; \
+ echo "Version $$cur\n=============================\n"; \
+ git log --pretty=short --invert-grep --grep="Merge pull request" --decorate $$prev..$$i; \
+ echo; \
+ done) > CHANGELOG.rst
+
+kitchen-check:
+ @[ -e $(KITCHEN_LOCAL_YAML) ] || (echo "Kitchen tests not available, there's no $(KITCHEN_LOCAL_YAML)." && exit 1)
+
+kitchen: kitchen-check kitchen-create kitchen-converge kitchen-verify kitchen-list
+
+kitchen-create: kitchen-check
+ kitchen create ${KITCHEN_OPTS} ${KITCHEN_OPTS_CREATE}
+ [ "$(shell echo $(KITCHEN_LOCAL_YAML)|grep -Eo docker)" = "docker" ] || sleep 120
+
+kitchen-converge: kitchen-check
+ kitchen converge ${KITCHEN_OPTS} ${KITCHEN_OPTS_CONVERGE} &&\
+ kitchen converge ${KITCHEN_OPTS} ${KITCHEN_OPTS_CONVERGE}
+
+kitchen-verify: kitchen-check
+ [ ! -d tests/integration ] || kitchen verify -t tests/integration ${KITCHEN_OPTS} ${KITCHEN_OPTS_VERIFY}
+ [ -d tests/integration ] || kitchen verify ${KITCHEN_OPTS} ${KITCHEN_OPTS_VERIFY}
+
+kitchen-test: kitchen-check
+ [ ! -d tests/integration ] || kitchen test -t tests/integration ${KITCHEN_OPTS} ${KITCHEN_OPTS_TEST}
+ [ -d tests/integration ] || kitchen test ${KITCHEN_OPTS} ${KITCHEN_OPTS_TEST}
+
+kitchen-list: kitchen-check
+ kitchen list
+
+clean:
+ [ ! -x "$(shell which kitchen)" ] || kitchen destroy
+ [ ! -d .kitchen ] || rm -rf .kitchen
+ [ ! -d tests/build ] || rm -rf tests/build
+ [ ! -d build ] || rm -rf build
diff --git a/README.rst b/README.rst
new file mode 100644
index 0000000..a1bec6b
--- /dev/null
+++ b/README.rst
@@ -0,0 +1,181 @@
+
+===================
+xtrabackup formula
+===================
+
+Xtrabackup allows you to backup and restore databases from full backups or full backups and its incrementals.
+
+
+Sample pillars
+==============
+
+Backup client with ssh/rsync remote host
+
+.. code-block:: yaml
+
+ xtrabackup:
+ client:
+ enabled: true
+ full_backups_to_keep: 3
+ hours_before_full: 48
+ hours_before_incr: 12
+ database:
+ user: username
+ password: password
+ target:
+ host: cfg01
+
+ .. note:: full_backups_to_keep param states how many backup will be stored locally on xtrabackup client.
+ More options to relocate local backups can be done using salt-formula-backupninja.
+
+
+Backup client with local backup only
+
+.. code-block:: yaml
+
+ xtrabackup:
+ client:
+ enabled: true
+ full_backups_to_keep: 3
+ hours_before_full: 48
+ hours_before_incr: 12
+ database:
+ user: username
+ password: password
+
+ .. note:: full_backups_to_keep param states how many backup will be stored locally on xtrabackup client
+
+
+Backup client with ssh/rsync remote host with compression:
+
+.. code-block:: yaml
+
+ xtrabackup:
+ client:
+ enabled: true
+ full_backups_to_keep: 3
+ hours_before_full: 48
+ hours_before_incr: 12
+ compression: true
+ compression_threads: 2
+ database:
+ user: username
+ password: password
+ target:
+ host: cfg01
+
+ .. note:: More options to relocate local backups can be done using salt-formula-backupninja.
+
+
+Backup server rsync
+
+.. code-block:: yaml
+
+ xtrabackup:
+ server:
+ enabled: true
+ hours_before_full: 48
+ full_backups_to_keep: 5
+ key:
+ xtrabackup_pub_key:
+ enabled: true
+ key: key
+
+ .. note:: hours_before_full param should have the same value as is stated on xtrabackup client
+
+
+Client restore from local backups:
+
+.. code-block:: yaml
+
+ xtrabackup:
+ client:
+ enabled: true
+ full_backups_to_keep: 5
+ hours_before_full: 48
+ hours_before_incr: 12
+ restore_full_latest: 1
+ restore_from: local
+ compression: true
+ compressThreads: 2
+ database:
+ user: username
+ password: password
+ target:
+ host: cfg01
+ qpress:
+ source: tar
+ name: url
+
+ .. note:: restore_full_latest param with a value of 1 means to restore db from the last full backup and its increments. 2 would mean to restore second latest full backup and its increments
+
+
+Client restore from remote backups:
+
+.. code-block:: yaml
+
+ xtrabackup:
+ client:
+ enabled: true
+ full_backups_to_keep: 5
+ hours_before_full: 48
+ hours_before_incr: 12
+ restore_full_latest: 1
+ restore_from: remote
+ compression: true
+ compressThreads: 2
+ database:
+ user: username
+ password: password
+ target:
+ host: cfg01
+ qpress:
+ source: tar
+ name: url
+
+ .. note:: restore_full_latest param with a value of 1 means to restore db from the last full backup and its increments. 2 would mean to restore second latest full backup and its increments
+
+
+More information
+================
+
+* https://labs.riseup.net/code/projects/xtrabackup/wiki/Configuration
+* http://www.debian-administration.org/articles/351
+* http://duncanlock.net/blog/2013/08/27/comprehensive-linux-backups-with-etckeeper-xtrabackup/
+* https://github.com/riseuplabs/puppet-xtrabackup
+* http://www.ushills.co.uk/2008/02/backup-with-xtrabackup.html
+
+
+Documentation and Bugs
+======================
+
+To learn how to install and update salt-formulas, consult the documentation
+available online at:
+
+ http://salt-formulas.readthedocs.io/
+
+In the unfortunate event that bugs are discovered, they should be reported to
+the appropriate issue tracker. Use Github issue tracker for specific salt
+formula:
+
+ https://github.com/salt-formulas/salt-formula-xtrabackup/issues
+
+For feature requests, bug reports or blueprints affecting entire ecosystem,
+use Launchpad salt-formulas project:
+
+ https://launchpad.net/salt-formulas
+
+You can also join salt-formulas-users team and subscribe to mailing list:
+
+ https://launchpad.net/~salt-formulas-users
+
+Developers wishing to work on the salt-formulas projects should always base
+their work on master branch and submit pull request against specific formula.
+
+ https://github.com/salt-formulas/salt-formula-xtrabackup
+
+Any questions or feedback is always welcome so feel free to join our IRC
+channel:
+
+ #salt-formulas @ irc.freenode.net
+
diff --git a/VERSION b/VERSION
new file mode 100644
index 0000000..3b04cfb
--- /dev/null
+++ b/VERSION
@@ -0,0 +1 @@
+0.2
diff --git a/debian/changelog b/debian/changelog
new file mode 100644
index 0000000..279a08b
--- /dev/null
+++ b/debian/changelog
@@ -0,0 +1,6 @@
+salt-formula-xtrabackup (0.2) xenial; urgency=medium
+
+ * First public release
+
+ -- Jiri Broulik <jbroulik@mirantis.com> Tue, 06 Oct 2015 16:38:36 +0200
+
diff --git a/debian/compat b/debian/compat
new file mode 100644
index 0000000..ec63514
--- /dev/null
+++ b/debian/compat
@@ -0,0 +1 @@
+9
diff --git a/debian/control b/debian/control
new file mode 100644
index 0000000..579ac6e
--- /dev/null
+++ b/debian/control
@@ -0,0 +1,15 @@
+Source: salt-formula-xtrabackup
+Maintainer: Salt Formulas Community <salt-formulas@freelists.org>
+Section: admin
+Priority: optional
+Build-Depends: salt-master, python, python-yaml, debhelper (>= 9)
+Standards-Version: 3.9.6
+Homepage: http://www.mirantis.com
+Vcs-Browser: https://github.com/salt-formulas/salt-formula-xtrabackup
+Vcs-Git: https://github.com/salt-formulas/salt-formula-xtrabackup.git
+
+Package: salt-formula-xtrabackup
+Architecture: all
+Depends: ${misc:Depends}, salt-master, reclass
+Description: xtrabackup salt formula
+ Install and configure xtrabackup backup system.
diff --git a/debian/copyright b/debian/copyright
new file mode 100644
index 0000000..8197f98
--- /dev/null
+++ b/debian/copyright
@@ -0,0 +1,15 @@
+Format: http://www.debian.org/doc/packaging-manuals/copyright-format/1.0/
+Upstream-Name: salt-formula-xtrabackup
+Upstream-Contact: Salt Formulas Community <salt-formulas@freelists.org>
+Source: https://github.com/salt-formulas/salt-formula-xtrabackup
+
+Files: *
+Copyright: 2017 Mirantis inc.
+License: Apache-2.0
+ Copyright (C) 2017 Mirantis inc.
+ .
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ .
+ On a Debian system you can find a copy of this license in
+ /usr/share/common-licenses/Apache-2.0.
diff --git a/debian/docs b/debian/docs
new file mode 100644
index 0000000..d585829
--- /dev/null
+++ b/debian/docs
@@ -0,0 +1,3 @@
+README.rst
+CHANGELOG.rst
+VERSION
diff --git a/debian/rules b/debian/rules
new file mode 100755
index 0000000..abde6ef
--- /dev/null
+++ b/debian/rules
@@ -0,0 +1,5 @@
+#!/usr/bin/make -f
+
+%:
+ dh $@
+
diff --git a/debian/source/format b/debian/source/format
new file mode 100644
index 0000000..89ae9db
--- /dev/null
+++ b/debian/source/format
@@ -0,0 +1 @@
+3.0 (native)
diff --git a/metadata.yml b/metadata.yml
new file mode 100644
index 0000000..6709492
--- /dev/null
+++ b/metadata.yml
@@ -0,0 +1,3 @@
+name: "xtrabackup"
+version: "0.2"
+source: "https://github.com/salt-formulas/salt-formula-xtrabackup"
diff --git a/metadata/service/client/single.yml b/metadata/service/client/single.yml
new file mode 100644
index 0000000..c6edea0
--- /dev/null
+++ b/metadata/service/client/single.yml
@@ -0,0 +1,25 @@
+applications:
+- xtrabackup
+classes:
+- service.xtrabackup.support
+parameters:
+ _param:
+ xtrabackup_qpress_source: tar
+ xtrabackup_qpress_source_name: http://www.quicklz.com/qpress-11-linux-x64.tar
+ xtrabackup_remote_server: cfg01
+ xtrabackup:
+ client:
+ enabled: true
+ full_backups_to_keep: 3
+ hours_before_full: 48
+ hours_before_incr: 12
+ compression: true
+ compression_threads: 2
+ database:
+ user: root
+ password: ${_param:galera_server_admin_password}
+ target:
+ host: ${_param:xtrabackup_remote_server}
+ qpress:
+ source: ${_param:xtrabackup_qpress_source}
+ name: ${_param:xtrabackup_qpress_source_name}
diff --git a/metadata/service/server/single.yml b/metadata/service/server/single.yml
new file mode 100644
index 0000000..ea49766
--- /dev/null
+++ b/metadata/service/server/single.yml
@@ -0,0 +1,14 @@
+applications:
+- xtrabackup
+parameters:
+ _param:
+ xtrabackup_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCzLUiHKAjfFXiZ3fsgx35uXF6VivfC5WFafu4QMalxmj6W+s277oCfdWA8Du5f1wtQXM73VQ5nHkXhM2UIfUMarsyhXK+BxKVrcgEBNHdKlDytaecUPyuOxTDdGWhN/DPv5/vL8NYWweEYBbNbLgU0Td7Rvm52TUXKThIDjeF7XDxX4ShXWipBSwU4boOUBtR8KWfga8fsqeBN+eacuAQFR3MrrOfVvAuWW6Bsf047cmd+V6Qv0raoW73Nu4M/ZAdTsaR5k62a0cHsSRoi3hCmNRqw+CZaQi8prQU6t26eWPEtznjp5EkPF+LLh8LxUoCfWqWT+Lxe8QQwT1nx/LCN
+ xtrabackup:
+ server:
+ enabled: true
+ hours_before_full: 48
+ full_backups_to_keep: 5
+ key:
+ xtrabackup_pub_key:
+ enabled: true
+ key: ${_param:xtrabackup_public_key}
diff --git a/metadata/service/support.yml b/metadata/service/support.yml
new file mode 100644
index 0000000..a92e876
--- /dev/null
+++ b/metadata/service/support.yml
@@ -0,0 +1,13 @@
+parameters:
+ xtrabackup:
+ _support:
+ xtrabackup:
+ enabled: true
+ collectd:
+ enabled: false
+ heka:
+ enabled: true
+ sensu:
+ enabled: true
+ sphinx:
+ enabled: true
diff --git a/tests/pillar/client.sls b/tests/pillar/client.sls
new file mode 100644
index 0000000..d97ee54
--- /dev/null
+++ b/tests/pillar/client.sls
@@ -0,0 +1,16 @@
+xtrabackup:
+ client:
+ enabled: true
+ full_backups_to_keep: 3
+ hours_before_full: 48
+ hours_before_incr: 12
+ compression: true
+ compression_threads: 2
+ database:
+ user: user
+ password: password
+ target:
+ host: host01
+ qpress:
+ source: tar
+ name: url
\ No newline at end of file
diff --git a/tests/pillar/server.sls b/tests/pillar/server.sls
new file mode 100644
index 0000000..cccdd3e
--- /dev/null
+++ b/tests/pillar/server.sls
@@ -0,0 +1,9 @@
+xtrabackup:
+ server:
+ enabled: true
+ hours_before_full: 48
+ full_backups_to_keep: 5
+ key:
+ xtrabackup_pub_key:
+ enabled: true
+ key: pub_key
\ No newline at end of file
diff --git a/tests/run_tests.sh b/tests/run_tests.sh
new file mode 100755
index 0000000..3f42101
--- /dev/null
+++ b/tests/run_tests.sh
@@ -0,0 +1,162 @@
+#!/usr/bin/env bash
+
+set -e
+[ -n "$DEBUG" ] && set -x
+
+CURDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
+METADATA=${CURDIR}/../metadata.yml
+FORMULA_NAME=$(cat $METADATA | python -c "import sys,yaml; print yaml.load(sys.stdin)['name']")
+
+## Overrideable parameters
+PILLARDIR=${PILLARDIR:-${CURDIR}/pillar}
+BUILDDIR=${BUILDDIR:-${CURDIR}/build}
+VENV_DIR=${VENV_DIR:-${BUILDDIR}/virtualenv}
+DEPSDIR=${BUILDDIR}/deps
+
+SALT_FILE_DIR=${SALT_FILE_DIR:-${BUILDDIR}/file_root}
+SALT_PILLAR_DIR=${SALT_PILLAR_DIR:-${BUILDDIR}/pillar_root}
+SALT_CONFIG_DIR=${SALT_CONFIG_DIR:-${BUILDDIR}/salt}
+SALT_CACHE_DIR=${SALT_CACHE_DIR:-${SALT_CONFIG_DIR}/cache}
+
+SALT_OPTS="${SALT_OPTS} --retcode-passthrough --local -c ${SALT_CONFIG_DIR}"
+
+if [ "x${SALT_VERSION}" != "x" ]; then
+ PIP_SALT_VERSION="==${SALT_VERSION}"
+fi
+
+## Functions
+log_info() {
+ echo "[INFO] $*"
+}
+
+log_err() {
+ echo "[ERROR] $*" >&2
+}
+
+setup_virtualenv() {
+ log_info "Setting up Python virtualenv"
+ virtualenv $VENV_DIR
+ source ${VENV_DIR}/bin/activate
+ pip install salt${PIP_SALT_VERSION}
+}
+
+setup_pillar() {
+ [ ! -d ${SALT_PILLAR_DIR} ] && mkdir -p ${SALT_PILLAR_DIR}
+ echo "base:" > ${SALT_PILLAR_DIR}/top.sls
+ for pillar in ${PILLARDIR}/*; do
+ state_name=$(basename ${pillar%.sls})
+ echo -e " ${state_name}:\n - ${state_name}" >> ${SALT_PILLAR_DIR}/top.sls
+ done
+}
+
+setup_salt() {
+ [ ! -d ${SALT_FILE_DIR} ] && mkdir -p ${SALT_FILE_DIR}
+ [ ! -d ${SALT_CONFIG_DIR} ] && mkdir -p ${SALT_CONFIG_DIR}
+ [ ! -d ${SALT_CACHE_DIR} ] && mkdir -p ${SALT_CACHE_DIR}
+
+ echo "base:" > ${SALT_FILE_DIR}/top.sls
+ for pillar in ${PILLARDIR}/*.sls; do
+ state_name=$(basename ${pillar%.sls})
+ echo -e " ${state_name}:\n - ${FORMULA_NAME}" >> ${SALT_FILE_DIR}/top.sls
+ done
+
+ cat << EOF > ${SALT_CONFIG_DIR}/minion
+file_client: local
+cachedir: ${SALT_CACHE_DIR}
+verify_env: False
+
+file_roots:
+ base:
+ - ${SALT_FILE_DIR}
+ - ${CURDIR}/..
+ - /usr/share/salt-formulas/env
+
+pillar_roots:
+ base:
+ - ${SALT_PILLAR_DIR}
+ - ${PILLARDIR}
+EOF
+}
+
+fetch_dependency() {
+ dep_name="$(echo $1|cut -d : -f 1)"
+ dep_source="$(echo $1|cut -d : -f 2-)"
+ dep_root="${DEPSDIR}/$(basename $dep_source .git)"
+ dep_metadata="${dep_root}/metadata.yml"
+
+ [ -d /usr/share/salt-formulas/env/${dep_name} ] && log_info "Dependency $dep_name already present in system-wide salt env" && return 0
+ [ -d $dep_root ] && log_info "Dependency $dep_name already fetched" && return 0
+
+ log_info "Fetching dependency $dep_name"
+ [ ! -d ${DEPSDIR} ] && mkdir -p ${DEPSDIR}
+ git clone $dep_source ${DEPSDIR}/$(basename $dep_source .git)
+ ln -s ${dep_root}/${dep_name} ${SALT_FILE_DIR}/${dep_name}
+
+ METADATA="${dep_metadata}" install_dependencies
+}
+
+install_dependencies() {
+ grep -E "^dependencies:" ${METADATA} >/dev/null || return 0
+ (python - | while read dep; do fetch_dependency "$dep"; done) << EOF
+import sys,yaml
+for dep in yaml.load(open('${METADATA}', 'ro'))['dependencies']:
+ print '%s:%s' % (dep["name"], dep["source"])
+EOF
+}
+
+clean() {
+ log_info "Cleaning up ${BUILDDIR}"
+ [ -d ${BUILDDIR} ] && rm -rf ${BUILDDIR} || exit 0
+}
+
+salt_run() {
+ [ -e ${VEN_DIR}/bin/activate ] && source ${VENV_DIR}/bin/activate
+ salt-call ${SALT_OPTS} $*
+}
+
+prepare() {
+ [ -d ${BUILDDIR} ] && mkdir -p ${BUILDDIR}
+
+ which salt-call || setup_virtualenv
+ setup_pillar
+ setup_salt
+ install_dependencies
+}
+
+run() {
+ for pillar in ${PILLARDIR}/*.sls; do
+ state_name=$(basename ${pillar%.sls})
+ salt_run --id=${state_name} state.show_sls ${FORMULA_NAME} || (log_err "Execution of ${FORMULA_NAME}.${state_name} failed"; exit 1)
+ done
+}
+
+_atexit() {
+ RETVAL=$?
+ trap true INT TERM EXIT
+
+ if [ $RETVAL -ne 0 ]; then
+ log_err "Execution failed"
+ else
+ log_info "Execution successful"
+ fi
+ return $RETVAL
+}
+
+## Main
+trap _atexit INT TERM EXIT
+
+case $1 in
+ clean)
+ clean
+ ;;
+ prepare)
+ prepare
+ ;;
+ run)
+ run
+ ;;
+ *)
+ prepare
+ run
+ ;;
+esac
diff --git a/xtrabackup/client.sls b/xtrabackup/client.sls
new file mode 100644
index 0000000..800d941
--- /dev/null
+++ b/xtrabackup/client.sls
@@ -0,0 +1,71 @@
+{%- from "xtrabackup/map.jinja" import client with context %}
+{%- if client.enabled %}
+
+xtrabackup_client_packages:
+ pkg.installed:
+ - names: {{ client.pkgs }}
+
+xtrabackup_client_runner_script:
+ file.managed:
+ - name: /usr/local/bin/innobackupex-runner.sh
+ - source: salt://xtrabackup/files/innobackupex-client-runner.sh
+ - template: jinja
+ - mode: 655
+ - require:
+ - pkg: xtrabackup_client_packages
+
+xtrabackup_client_restore_script:
+ file.managed:
+ - name: /usr/local/bin/innobackupex-restore.sh
+ - source: salt://xtrabackup/files/innobackupex-client-restore.sh
+ - template: jinja
+ - mode: 655
+ - require:
+ - pkg: xtrabackup_client_packages
+
+xtrabackups_dir:
+ file.directory:
+ - name: {{ client.backup_dir }}
+ - user: root
+ - group: root
+ - makedirs: true
+
+xtrabackup_client_runner_cron:
+ cron.present:
+ - name: /usr/local/bin/innobackupex-runner.sh
+ - user: root
+ - minute: 0
+{%- if client.hours_before_incr is defined %}
+{%- if client.hours_before_incr <= 23 and client.hours_before_incr > 1 %}
+ - hour: '*/{{ client.hours_before_incr }}'
+{%- elif not client.hours_before_incr <= 1 %}
+ - hour: 2
+{%- endif %}
+{%- else %}
+ - hour: 2
+{%- endif %}
+ - require:
+ - file: xtrabackup_client_runner_script
+
+{%- if client.restore_full_latest is defined %}
+
+xtrabackup_client_call_restore_script:
+ file.managed:
+ - name: /usr/local/bin/innobackupex-restore-call.sh
+ - source: salt://xtrabackup/files/innobackupex-client-restore-call.sh
+ - template: jinja
+ - mode: 655
+ - require:
+ - file: xtrabackup_client_restore_script
+
+xtrabackup_run_restore:
+ cmd.run:
+ - name: /usr/local/bin/innobackupex-restore-call.sh
+ - unless: "[ -e {{ client.backup_dir }}/dbrestored ]"
+ - require:
+ - file: xtrabackup_client_call_restore_script
+
+{%- endif %}
+
+
+{%- endif %}
diff --git a/xtrabackup/files/innobackupex-client-restore-call.sh b/xtrabackup/files/innobackupex-client-restore-call.sh
new file mode 100644
index 0000000..9f99e25
--- /dev/null
+++ b/xtrabackup/files/innobackupex-client-restore-call.sh
@@ -0,0 +1,47 @@
+{%- from "xtrabackup/map.jinja" import client with context %}
+#!/bin/sh
+
+# Purpuse of this script is to locally prepare appropriate backup to restore from local or remote location
+
+{%- if client.restore_from == 'remote' %}
+LOGDIR=/var/log/backups
+mkdir -p $LOGDIR
+scpLog=/var/log/backups/innobackupex-restore-scp.log
+echo "Adding ssh-key of remote host to known_hosts"
+ssh-keygen -R {{ client.target.host }} 2>&1 | > $scpLog
+ssh-keyscan {{ client.target.host }} >> ~/.ssh/known_hosts 2>&1 | >> $scpLog
+REMOTEBACKUPPATH=`ssh xtrabackup@{{ client.target.host }} "/usr/local/bin/innobackupex-restore-call.sh {{ client.restore_full_latest }}"`
+echo "Calling /usr/local/bin/innobackupex-restore.sh $REMOTEBACKUPPATH and getting the backup files from remote host"
+/usr/local/bin/innobackupex-restore.sh $REMOTEBACKUPPATH
+
+{%- else %}
+
+BACKUPDIR={{ client.backup_dir }} # Backups base directory
+FULL=`find $BACKUPDIR/full -mindepth 1 -maxdepth 1 -type d -printf "%P\n" | sort -nr | head -{{ client.restore_full_latest }} | tail -1`
+FULL_INCR=`find $BACKUPDIR/incr -mindepth 1 -maxdepth 1 -type d -printf "%P\n" | sort -nr | head -{{ client.restore_full_latest }} | tail -1`
+BEFORE_NEXT_FULL_INCR=`find $BACKUPDIR/incr -mindepth 1 -maxdepth 1 -type d -printf "%P\n" | sort -nr | head -$(( {{ client.restore_full_latest }} - 1 )) | tail -1`
+
+if [ -z "$BEFORE_NEXT_FULL_INCR" ]; then
+ BEFORE_NEXT_FULL_INCR="Empty"
+fi
+
+if [ $FULL = $FULL_INCR ]; then
+ LATEST_FULL_INCR=`find $BACKUPDIR/incr/$FULL_INCR -mindepth 1 -maxdepth 1 -type d -printf "%P\n" | sort -nr | head -1 | tail -1`
+ echo "Restoring full backup $FULL starting from its latest incremental $LATEST_FULL_INCR"
+ echo "Calling /usr/local/bin/innobackupex-restore.sh $BACKUPDIR/incr/$FULL/$LATEST_FULL_INCR"
+ echo
+ /usr/local/bin/innobackupex-restore.sh $BACKUPDIR/incr/$FULL_INCR/$LATEST_FULL_INCR
+elif [ $FULL = $BEFORE_NEXT_FULL_INCR ]; then
+ LATEST_FULL_INCR=`find $BACKUPDIR/incr/$BEFORE_NEXT_FULL_INCR -mindepth 1 -maxdepth 1 -type d -printf "%P\n" | sort -nr | head -1 | tail -1`
+ echo "Restoring full backup $FULL starting from its latest incremental $LATEST_FULL_INCR"
+ echo "Calling /usr/local/bin/innobackupex-restore.sh $BACKUPDIR/incr/$FULL/$LATEST_FULL_INCR"
+ echo
+ /usr/local/bin/innobackupex-restore.sh $BACKUPDIR/incr/$FULL/$LATEST_FULL_INCR
+else
+ echo "Restoring full backup $FULL"
+ echo "Calling /usr/local/bin/innobackupex-restore.sh $BACKUPDIR/full/$FULL"
+ echo
+ /usr/local/bin/innobackupex-restore.sh $BACKUPDIR/full/$FULL
+fi
+
+{%- endif %}
diff --git a/xtrabackup/files/innobackupex-client-restore.sh b/xtrabackup/files/innobackupex-client-restore.sh
new file mode 100644
index 0000000..e8dd0f6
--- /dev/null
+++ b/xtrabackup/files/innobackupex-client-restore.sh
@@ -0,0 +1,225 @@
+{%- from "xtrabackup/map.jinja" import client with context %}
+#!/bin/sh
+#
+# Script to prepare and restore full and incremental backups created with innobackupex-runner.
+#
+# usage example for incr backup restore: ./restore.sh /var/backups/mysql/xtrabackup/incr/2017-05-24_19-48-10/2017-05-24_19-55-35/
+
+INNOBACKUPEX=innobackupex-1.5.1
+INNOBACKUPEXFULL=/usr/bin/$INNOBACKUPEX
+#TMPFILE="/var/log/backups/innobackupex-restore.$$.tmp"
+TMPFILE="/var/log/backups/innobackupex-restore.log"
+MYCNF=/etc/mysql/my.cnf
+BACKUPDIR={{ client.backup_dir }} # Backups base directory
+FULLBACKUPDIR=$BACKUPDIR/full # Full backups directory
+INCRBACKUPDIR=$BACKUPDIR/incr # Incremental backups directory
+MEMORY=1024M # Amount of memory to use when preparing the backup
+DBALREADYRESTORED=$BACKUPDIR/dbrestored
+scpLog=/var/log/backups/innobackupex-restore-scp.log
+decompressionLog=/var/log/backups/innobackupex-decompression.log
+compression=false
+LOGDIR=/var/log/backups
+
+mkdir -p $LOGDIR
+
+#############################################################################
+# Display error message and exit
+#############################################################################
+error()
+{
+ echo "$1" 1>&2
+ exit 1
+}
+
+#############################################################################
+# Check for errors in innobackupex output
+#############################################################################
+check_innobackupex_error()
+{
+ if [ -z "`tail -1 $TMPFILE | grep 'completed OK!'`" ] ; then
+ echo "$INNOBACKUPEX failed:"; echo
+ echo "---------- ERROR OUTPUT from $INNOBACKUPEX ----------"
+ cat $TMPFILE
+ #rm -f $TMPFILE
+ exit 1
+ fi
+}
+
+# Check options before proceeding
+if [ ! -x $INNOBACKUPEXFULL ]; then
+ error "$INNOBACKUPEXFULL does not exist."
+fi
+
+if [ -e $DBALREADYRESTORED ]; then
+ error "Databases already restored. If you want to restore again delete $DBALREADYRESTORED file and run the script again."
+fi
+
+if [ ! -d $BACKUPDIR ]; then
+ error "Backup destination folder: $BACKUPDIR does not exist."
+fi
+
+if [ $# != 1 ] ; then
+ error "Usage: $0 /absolute/path/to/backup/to/restore"
+fi
+
+{%- if client.restore_from != 'remote' %}
+
+if [ ! -d $1 ]; then
+ error "Backup to restore: $1 does not exist."
+fi
+
+{%- endif %}
+
+# Some info output
+echo "----------------------------"
+echo
+echo "$0: MySQL backup script"
+echo "started: `date`"
+echo
+
+{%- if client.restore_from == 'remote' %}
+#get files from remote and change variables to local restore dir
+
+LOCALRESTOREDIR=/var/backups/restoreMysql
+REMOTE_PARENT_DIR=`dirname $1`
+BACKUPPATH=$1
+FULLBACKUPDIR=$LOCALRESTOREDIR/full
+INCRBACKUPDIR=$LOCALRESTOREDIR/incr
+
+mkdir -p $LOCALRESTOREDIR
+rm -rf $LOCALRESTOREDIR/*
+
+echo "Getting files from remote host"
+
+case "$BACKUPPATH" in
+ *incr*) echo "SCP getting full and incr backup files";
+ FULL=`basename $REMOTE_PARENT_DIR`;
+ mkdir -p $FULLBACKUPDIR;
+ mkdir -p $INCRBACKUPDIR;
+ PARENT_DIR=$INCRBACKUPDIR/$FULL;
+ `scp -rp xtrabackup@{{ client.target.host }}:$REMOTE_PARENT_DIR/ $INCRBACKUPDIR/ >> $scpLog 2>&1`;
+ `scp -rp xtrabackup@{{ client.target.host }}:{{ client.backup_dir }}/full/$FULL/ $FULLBACKUPDIR/$FULL/ >> $scpLog 2>&1`;;
+ *full*) echo "SCP getting full backup files";
+ FULL=`basename $1`;
+ mkdir -p $FULLBACKUPDIR;
+ PARENT_DIR=$FULLBACKUPDIR;
+ `scp -rp xtrabackup@{{ client.target.host }}:{{ client.backup_dir }}/full/$FULL/ $FULLBACKUPDIR/$FULL/ >> $scpLog 2>&1`;;
+ *) echo "Unable to scp backup files from remote host"; exit 1 ;;
+esac
+
+# Check if the scp succeeded or failed
+if ! grep -q "No such file or directory" $scpLog; then
+ echo "SCP from remote host completed OK"
+else
+ echo "SCP from remote host FAILED"
+ exit 1
+fi
+
+{%- else %}
+
+PARENT_DIR=`dirname $1`
+
+{%- endif %}
+
+if [ $PARENT_DIR = $FULLBACKUPDIR ]; then
+{%- if client.restore_from == 'remote' %}
+ FULLBACKUP=$FULLBACKUPDIR/$FULL
+{%- else %}
+ FULLBACKUP=$1
+{%- endif %}
+
+ for bf in `find . $FULLBACKUP -iname "*\.qp"`; do compression=True; break; done
+
+ if [ "$compression" = True ]; then
+ if hash qpress 2>>$TMPFILE; then
+ echo "qpress already installed" >> $TMPFILE
+ else
+{%- if client.qpress.source == 'tar' %}
+ wget {{ client.qpress.name }} > $decompressionLog 2>&1
+ tar -xvf qpress-11-linux-x64.tar
+ cp qpress /usr/bin/qpress
+ chmod 755 /usr/bin/qpress
+ chown root:root /usr/bin/qpress
+{%- elif client.qpress.source == 'pkg' %}
+ apt install {{ client.qpress.name }} > $decompressionLog 2>&1
+{%- endif %}
+ fi
+ echo "Uncompressing $FULLBACKUP"
+ for bf in `find . $FULLBACKUP -iname "*\.qp"`; do qpress -d $bf $(dirname $bf) && rm $bf; done > $decompressionLog 2>&1
+ fi
+
+ echo "Restore `basename $FULLBACKUP`"
+ echo
+else
+ if [ `dirname $PARENT_DIR` = $INCRBACKUPDIR ]; then
+ INCR=`basename $1`
+ FULL=`basename $PARENT_DIR`
+ FULLBACKUP=$FULLBACKUPDIR/$FULL
+
+
+ if [ ! -d $FULLBACKUP ]; then
+ error "Full backup: $FULLBACKUP does not exist."
+ fi
+
+ for bf in `find . $FULLBACKUP -iname "*\.qp"`; do compression=True; break; done
+
+ if [ "$compression" = True ]; then
+ if hash qpress 2>>$decompressionLog; then
+ echo "qpress already installed" >> $decompressionLog
+ else
+{%- if client.qpress.source == 'tar' %}
+ wget {{ client.qpress.name }} > $decompressionLog 2>&1
+ tar -xvf qpress-11-linux-x64.tar
+ cp qpress /usr/bin/qpress
+ chmod 755 /usr/bin/qpress
+ chown root:root /usr/bin/qpress
+{%- elif client.qpress.source == 'pkg' %}
+ apt install {{ client.qpress.name }} > $decompressionLog 2>&1
+{%- endif %}
+ fi
+ echo "Uncompressing $FULLBACKUP"
+ for bf in `find . $FULLBACKUP -iname "*\.qp"`; do qpress -d $bf $(dirname $bf) && rm $bf; done > $decompressionLog 2>&1
+ echo "Uncompressing $PARENT_DIR"
+ for bf in `find . $PARENT_DIR -iname "*\.qp"`; do qpress -d $bf $(dirname $bf) && rm $bf; done >> $decompressionLog 2>&1
+ fi
+
+ echo
+ echo "Restore $FULL up to incremental $INCR"
+ echo
+
+ echo "Replay committed transactions on full backup"
+ $INNOBACKUPEXFULL --defaults-file=$MYCNF --apply-log --redo-only --use-memory=$MEMORY $FULLBACKUP > $TMPFILE 2>&1
+ check_innobackupex_error
+
+ # Apply incrementals to base backup
+ for i in `find $PARENT_DIR -mindepth 1 -maxdepth 1 -type d -printf "%P\n" | sort -n`; do
+ echo "Applying $i to full ..."
+ $INNOBACKUPEXFULL --defaults-file=$MYCNF --apply-log --redo-only --use-memory=$MEMORY $FULLBACKUP --incremental-dir=$PARENT_DIR/$i > $TMPFILE 2>&1
+ check_innobackupex_error
+
+ if [ $INCR = $i ]; then
+ break # break. we are restoring up to this incremental.
+ fi
+ done
+ else
+ error "unknown backup type"
+ fi
+fi
+
+echo "Preparing ..."
+$INNOBACKUPEXFULL --defaults-file=$MYCNF --apply-log --use-memory=$MEMORY $FULLBACKUP > $TMPFILE 2>&1
+check_innobackupex_error
+
+echo
+echo "Restoring ..."
+$INNOBACKUPEXFULL --defaults-file=$MYCNF --copy-back $FULLBACKUP > $TMPFILE 2>&1
+check_innobackupex_error
+chown -R mysql:mysql /var/lib/mysql
+#rm -f $TMPFILE
+touch $DBALREADYRESTORED
+echo "Backup restored successfully. You are able to start mysql now."
+echo "Verify files ownership in mysql data dir."
+#echo "Run 'chown -R mysql:mysql /path/to/data/dir' if necessary."
+echo
+echo "completed: `date`"
+exit 0
diff --git a/xtrabackup/files/innobackupex-client-runner.sh b/xtrabackup/files/innobackupex-client-runner.sh
new file mode 100644
index 0000000..cdb55c7
--- /dev/null
+++ b/xtrabackup/files/innobackupex-client-runner.sh
@@ -0,0 +1,160 @@
+{%- from "xtrabackup/map.jinja" import client with context %}
+#!/bin/sh
+#
+# Script to create full and incremental backups (for all databases on server) using innobackupex from Percona.
+# http://www.percona.com/doc/percona-xtrabackup/innobackupex/innobackupex_script.html
+#
+# Every time it runs will generate an incremental backup except for the first time (full backup).
+# FULLBACKUPLIFE variable will define your full backups schedule.
+
+INNOBACKUPEX=innobackupex-1.5.1
+INNOBACKUPEXFULL=/usr/bin/$INNOBACKUPEX
+USEROPTIONS="--user={{ client.database.user }} --password={{ client.database.password }}"
+#TMPFILE="/var/log/backups/innobackupex-runner.$$.tmp"
+LOGDIR=/var/log/backups
+TMPFILE="/var/log/backups/innobackupex-runner.log"
+MYCNF=/etc/mysql/my.cnf
+MYSQL=/usr/bin/mysql
+MYSQLADMIN=/usr/bin/mysqladmin
+BACKUPDIR={{ client.backup_dir }} # Backups base directory
+FULLBACKUPDIR=$BACKUPDIR/full # Full backups directory
+INCRBACKUPDIR=$BACKUPDIR/incr # Incremental backups directory
+HOURSFULLBACKUPLIFE={{ client.hours_before_full }} # Lifetime of the latest full backup in seconds
+FULLBACKUPLIFE=$(( $HOURSFULLBACKUPLIFE * 60 * 60 ))
+KEEP={{ client.full_backups_to_keep }} # Number of full backups (and its incrementals) to keep
+rsyncLog=/var/log/backups/innobackupex-rsync.log
+
+{%- if client.compression is defined %}
+compression={{ client.compression }}
+{%- else %}
+compression=false
+{%- endif %}
+
+{%- if client.compression_threads is defined %}
+compression_threads={{ client.compression_threads }}
+{%- else %}
+compression_threads=1
+{%- endif %}
+
+mkdir -p $LOGDIR
+
+# Grab start time
+STARTED_AT=`date +%s`
+
+#############################################################################
+# Display error message and exit
+#############################################################################
+error()
+{
+ echo "$1" 1>&2
+ exit 1
+}
+
+# Check options before proceeding
+if [ ! -x $INNOBACKUPEXFULL ]; then
+ error "$INNOBACKUPEXFULL does not exist."
+fi
+
+if [ ! -d $BACKUPDIR ]; then
+ error "Backup destination folder: $BACKUPDIR does not exist."
+fi
+
+if [ -z "`$MYSQLADMIN $USEROPTIONS status | grep 'Uptime'`" ] ; then
+ error "HALTED: MySQL does not appear to be running."
+fi
+
+if ! `echo 'exit' | $MYSQL -s $USEROPTIONS` ; then
+ error "HALTED: Supplied mysql username or password appears to be incorrect (not copied here for security, see script)."
+fi
+
+# Some info output
+echo "----------------------------"
+echo
+echo "$0: MySQL backup script"
+echo "started: `date`"
+echo
+
+# Create full and incr backup directories if they not exist.
+mkdir -p $FULLBACKUPDIR
+mkdir -p $INCRBACKUPDIR
+
+# Find latest full backup
+LATEST_FULL=`find $FULLBACKUPDIR -mindepth 1 -maxdepth 1 -type d -printf "%P\n" | sort -nr | head -1`
+
+# Get latest backup last modification time
+LATEST_FULL_CREATED_AT=`stat -c %Y $FULLBACKUPDIR/$LATEST_FULL`
+
+# If compression is enabled, pass it on to the backup command
+if [ "$compression" = True ]; then
+ compress="--compress"
+ compression_threads="--compress-threads=$compression_threads"
+ echo "Setting compression to True"
+ echo
+else
+ compress=
+ compression_threads=
+fi
+
+# Run an incremental backup if latest full is still valid. Otherwise, run a new full one.
+if [ "$LATEST_FULL" -a `expr $LATEST_FULL_CREATED_AT + $FULLBACKUPLIFE + 5` -ge $STARTED_AT ] ; then
+ # Create incremental backups dir if not exists.
+ TMPINCRDIR=$INCRBACKUPDIR/$LATEST_FULL
+ mkdir -p $TMPINCRDIR
+
+ # Find latest incremental backup.
+ LATEST_INCR=`find $TMPINCRDIR -mindepth 1 -maxdepth 1 -type d | sort -nr | head -1`
+
+ # If this is the first incremental, use the full as base. Otherwise, use the latest incremental as base.
+ if [ ! $LATEST_INCR ] ; then
+ INCRBASEDIR=$FULLBACKUPDIR/$LATEST_FULL
+ else
+ INCRBASEDIR=$LATEST_INCR
+ fi
+
+ echo "Running new incremental backup using $INCRBASEDIR as base."
+ $INNOBACKUPEXFULL --defaults-file=$MYCNF $USEROPTIONS $compress $compression_threads --incremental $TMPINCRDIR --incremental-basedir $INCRBASEDIR > $TMPFILE 2>&1
+else
+ echo "Running new full backup."
+ $INNOBACKUPEXFULL --defaults-file=$MYCNF $USEROPTIONS $compress $compression_threads $FULLBACKUPDIR > $TMPFILE 2>&1
+fi
+
+if [ -z "`tail -1 $TMPFILE | grep 'completed OK!'`" ] ; then
+ echo "$INNOBACKUPEX failed:"; echo
+ echo "---------- ERROR OUTPUT from $INNOBACKUPEX ----------"
+ cat $TMPFILE
+ #rm -f $TMPFILE
+ exit 1
+fi
+
+THISBACKUP=`awk -- "/Backup created in directory/ { split( \\\$0, p, \"'\" ) ; print p[2] }" $TMPFILE`
+#rm -f $TMPFILE
+
+echo "Databases backed up successfully to: $THISBACKUP"
+echo
+
+# rsync just the new or modified backup files
+{%- if client.target.host is defined %}
+echo "Adding ssh-key of remote host to known_hosts"
+ssh-keygen -R {{ client.target.host }} 2>&1 | > $rsyncLog
+ssh-keyscan {{ client.target.host }} >> ~/.ssh/known_hosts 2>&1 | >> $rsyncLog
+echo "Rsyncing files to remote host"
+/usr/bin/rsync -rhtPv --rsync-path=rsync --progress $BACKUPDIR/* -e ssh xtrabackup@{{ client.target.host }}:$BACKUPDIR >> $rsyncLog
+
+# Check if the rsync succeeded or failed
+if ! grep -q "rsync error: " $rsyncLog; then
+ echo "Rsync to remote host completed OK"
+else
+ echo "Rsync to remote host FAILED"
+ exit 1
+fi
+{%- endif %}
+
+
+# Cleanup
+echo "Cleanup. Keeping only $KEEP full backups and its incrementals."
+AGE=$(($FULLBACKUPLIFE * $KEEP / 60))
+find $FULLBACKUPDIR -maxdepth 1 -type d -mmin +$AGE -execdir echo "removing: "$FULLBACKUPDIR/{} \; -execdir rm -rf $FULLBACKUPDIR/{} \; -execdir echo "removing: "$INCRBACKUPDIR/{} \; -execdir rm -rf $INCRBACKUPDIR/{} \;
+
+echo
+echo "completed: `date`"
+exit 0
diff --git a/xtrabackup/files/innobackupex-server-restore-call.sh b/xtrabackup/files/innobackupex-server-restore-call.sh
new file mode 100644
index 0000000..c12fed5
--- /dev/null
+++ b/xtrabackup/files/innobackupex-server-restore-call.sh
@@ -0,0 +1,34 @@
+{%- from "xtrabackup/map.jinja" import server with context %}
+#!/bin/sh
+
+# This script returns appropriate backup that client will restore
+
+if [ $# -eq 0 ]; then
+ echo "No arguments provided"
+ exit 1
+fi
+
+# if arg is not an integer
+case $1 in
+ ''|*[!0-9]*) echo "Argument must be integer"; exit 1 ;;
+ *) ;;
+esac
+
+BACKUPDIR={{ server.backup_dir }} # Backups base directory
+FULL=`find $BACKUPDIR/full -mindepth 1 -maxdepth 1 -type d -printf "%P\n" | sort -nr | head -$1 | tail -1`
+FULL_INCR=`find $BACKUPDIR/incr -mindepth 1 -maxdepth 1 -type d -printf "%P\n" | sort -nr | head -$1 | tail -1`
+BEFORE_NEXT_FULL_INCR=`find $BACKUPDIR/incr -mindepth 1 -maxdepth 1 -type d -printf "%P\n" | sort -nr | head -$(( $1 - 1 )) | tail -1`
+
+if [ -z "$BEFORE_NEXT_FULL_INCR" ]; then
+ BEFORE_NEXT_FULL_INCR="Empty"
+fi
+
+if [ $FULL = $FULL_INCR ]; then
+ LATEST_FULL_INCR=`find $BACKUPDIR/incr/$FULL_INCR -mindepth 1 -maxdepth 1 -type d -printf "%P\n" | sort -nr | head -1 | tail -1`
+ echo "$BACKUPDIR/incr/$FULL/$LATEST_FULL_INCR"
+elif [ $FULL = $BEFORE_NEXT_FULL_INCR ]; then
+ LATEST_FULL_INCR=`find $BACKUPDIR/incr/$BEFORE_NEXT_FULL_INCR -mindepth 1 -maxdepth 1 -type d -printf "%P\n" | sort -nr | head -1 | tail -1`
+ echo "$BACKUPDIR/incr/$FULL/$LATEST_FULL_INCR"
+else
+ echo "$BACKUPDIR/full/$FULL"
+fi
diff --git a/xtrabackup/files/innobackupex-server-runner.sh b/xtrabackup/files/innobackupex-server-runner.sh
new file mode 100644
index 0000000..9335618
--- /dev/null
+++ b/xtrabackup/files/innobackupex-server-runner.sh
@@ -0,0 +1,16 @@
+{%- from "xtrabackup/map.jinja" import server with context %}
+#!/bin/sh
+
+# The purpose of this script is to clean up unnecesary backups on a backup storage node (on xtrabackup server node)
+
+BACKUPDIR={{ server.backup_dir }} # Backups base directory
+FULLBACKUPDIR=$BACKUPDIR/full # Full backups directory
+INCRBACKUPDIR=$BACKUPDIR/incr # Incremental backups directory
+HOURSFULLBACKUPLIFE={{ server.hours_before_full }} # Lifetime of the latest full backup in seconds
+FULLBACKUPLIFE=$(( $HOURSFULLBACKUPLIFE * 60 * 60 ))
+KEEP={{ server.full_backups_to_keep }} # Number of full backups (and its incrementals) to keep
+
+# Cleanup
+echo "Cleanup. Keeping only $KEEP full backups and its incrementals."
+AGE=$(($FULLBACKUPLIFE * $KEEP / 60))
+find $FULLBACKUPDIR -maxdepth 1 -type d -mmin +$AGE -execdir echo "removing: "$FULLBACKUPDIR/{} \; -execdir rm -rf $FULLBACKUPDIR/{} \; -execdir echo "removing: "$INCRBACKUPDIR/{} \; -execdir rm -rf $INCRBACKUPDIR/{} \;
diff --git a/xtrabackup/init.sls b/xtrabackup/init.sls
new file mode 100644
index 0000000..4ff31f8
--- /dev/null
+++ b/xtrabackup/init.sls
@@ -0,0 +1,9 @@
+{%- if pillar.xtrabackup is defined %}
+include:
+{%- if pillar.xtrabackup.client is defined %}
+- xtrabackup.client
+{%- endif %}
+{%- if pillar.xtrabackup.server is defined %}
+- xtrabackup.server
+{%- endif %}
+{%- endif %}
diff --git a/xtrabackup/map.jinja b/xtrabackup/map.jinja
new file mode 100644
index 0000000..dcd518e
--- /dev/null
+++ b/xtrabackup/map.jinja
@@ -0,0 +1,22 @@
+
+{% set client = salt['grains.filter_by']({
+ 'Debian': {
+ 'pkgs': ['percona-xtrabackup'],
+ 'backup_dir': '/var/backups/mysql/xtrabackup'
+ },
+ 'RedHat': {
+ 'pkgs': ['percona-xtrabackup'],
+ 'backup_dir': '/var/backups/mysql/xtrabackup'
+ },
+}, merge=salt['pillar.get']('xtrabackup:client')) %}
+
+{% set server = salt['grains.filter_by']({
+ 'Debian': {
+ 'pkgs': ['rsync'],
+ 'backup_dir': '/var/backups/mysql/xtrabackup'
+ },
+ 'RedHat': {
+ 'pkgs': ['rsync'],
+ 'backup_dir': '/var/backups/mysql/xtrabackup'
+ },
+}, merge=salt['pillar.get']('xtrabackup:server')) %}
diff --git a/xtrabackup/meta/sensu.yml b/xtrabackup/meta/sensu.yml
new file mode 100644
index 0000000..5124216
--- /dev/null
+++ b/xtrabackup/meta/sensu.yml
@@ -0,0 +1,8 @@
+check:
+ local_backup_{{ grains['fqdn'] }}:
+ command: "PATH=$PATH:/usr/local/sbin duplicity_salt.sh nagios 24"
+ interval: 3600
+ timeout: 300
+ occurrences: 3
+ subscribers:
+ - {{ grains['fqdn']|replace('.', '-') }}
diff --git a/xtrabackup/meta/sphinx.yml b/xtrabackup/meta/sphinx.yml
new file mode 100644
index 0000000..bd6d5c6
--- /dev/null
+++ b/xtrabackup/meta/sphinx.yml
@@ -0,0 +1,15 @@
+doc:
+ name: xtrabackup
+ description: xtrabackup allows you to backup database periodically in binary format. Including full and incremental backups.
+ role:
+ {%- if pillar.xtrabackup.client is defined %}
+ {%- from "xtrabackup/map.jinja" import client with context %}
+ client:
+ name: client
+ param: {}
+ {%- endif %}
+ {%- if pillar.xtrabackup.server is defined %}
+ server:
+ name: server
+ param: {}
+ {%- endif %}
\ No newline at end of file
diff --git a/xtrabackup/server.sls b/xtrabackup/server.sls
new file mode 100644
index 0000000..ad7658f
--- /dev/null
+++ b/xtrabackup/server.sls
@@ -0,0 +1,66 @@
+{%- from "xtrabackup/map.jinja" import server with context %}
+{%- if server.enabled %}
+
+xtrabackup_server_packages:
+ pkg.installed:
+ - names: {{ server.pkgs }}
+
+xtrabackup_user:
+ user.present:
+ - name: xtrabackup
+ - system: true
+ - home: {{ server.backup_dir }}
+
+{{ server.backup_dir }}:
+ file.directory:
+ - mode: 755
+ - user: xtrabackup
+ - group: xtrabackup
+ - makedirs: true
+ - require:
+ - user: xtrabackup_user
+ - pkg: xtrabackup_server_packages
+
+{%- for key_name, key in server.key.iteritems() %}
+
+{%- if key.get('enabled', False) %}
+
+xtrabackup_key_{{ key.key }}:
+ ssh_auth.present:
+ - user: xtrabackup
+ - name: {{ key.key }}
+ - require:
+ - file: {{ server.backup_dir }}
+
+{%- endif %}
+
+{%- endfor %}
+
+xtrabackup_server_script:
+ file.managed:
+ - name: /usr/local/bin/innobackupex-runner.sh
+ - source: salt://xtrabackup/files/innobackupex-server-runner.sh
+ - template: jinja
+ - mode: 655
+ - require:
+ - pkg: xtrabackup_server_packages
+
+xtrabackup_server_cron:
+ cron.present:
+ - name: /usr/local/bin/innobackupex-runner.sh
+ - user: xtrabackup
+ - minute: 0
+ - hour: 2
+ - require:
+ - file: xtrabackup_server_script
+
+xtrabackup_server_call_restore_script:
+ file.managed:
+ - name: /usr/local/bin/innobackupex-restore-call.sh
+ - source: salt://xtrabackup/files/innobackupex-server-restore-call.sh
+ - template: jinja
+ - mode: 655
+ - require:
+ - pkg: xtrabackup_server_packages
+
+{%- endif %}