Merge pull request #1 from avlasov-mos-de/master

Add posibility turn of mysql backup for specific nodes,
diff --git a/.kitchen.yml b/.kitchen.yml
new file mode 100644
index 0000000..050f800
--- /dev/null
+++ b/.kitchen.yml
@@ -0,0 +1,73 @@
+---
+driver:
+  name: docker
+  hostname: backupninja.ci.local
+  use_sudo: false
+
+provisioner:
+  name: salt_solo
+  salt_install: bootstrap
+  salt_bootstrap_url: https://bootstrap.saltstack.com
+  salt_version: latest
+  require_chef: false
+  log_level: error
+  formula: backupninja
+  grains:
+    noservices: True
+  dependencies:
+    - name: linux
+      repo: git
+      source: https://github.com/salt-formulas/salt-formula-linux
+  state_top:
+    base:
+      "*":
+        - backupninja
+  pillars:
+    top.sls:
+      base:
+        "*":
+          - backupninja
+
+verifier:
+  name: inspec
+  sudo: true
+
+
+platforms:
+  - name: <%=ENV['PLATFORM'] ||  'saltstack-ubuntu-xenial-salt-stable' %>
+    driver_config:
+      image: <%=ENV['PLATFORM'] || 'epcim/salt:saltstack-ubuntu-xenial-salt-stable'%>
+      platform: ubuntu
+
+suites:
+
+  - name: server_rdiff
+    provisioner:
+      pillars-from-files:
+        backupninja.sls: tests/pillar/server_rdiff.sls
+
+  - name: client_rsync
+    provisioner:
+      pillars-from-files:
+        backupninja.sls: tests/pillar/client_rsync.sls
+
+  - name: client_rsync_backup_times
+    provisioner:
+      pillars-from-files:
+        backupninja.sls: tests/pillar/client_rsync_backup_times.sls
+
+  - name: client_s3
+    provisioner:
+      pillars-from-files:
+        backupninja.sls: tests/pillar/client_s3.sls
+
+  - name: client_s3_disabled_auto
+    provisioner:
+      pillars-from-files:
+        backupninja.sls: tests/pillar/client_s3_disabled_auto.sls
+
+  - name: client_webdav
+    provisioner:
+      pillars-from-files:
+        backupninja.sls: tests/pillar/client_webdav.sls
+# vim: ft=yaml sw=2 ts=2 sts=2 tw=125
diff --git a/.travis.yml b/.travis.yml
new file mode 100644
index 0000000..3456794
--- /dev/null
+++ b/.travis.yml
@@ -0,0 +1,64 @@
+sudo: required
+services:
+  - docker
+
+install:
+  - pip install PyYAML
+  - pip install virtualenv
+  - |
+    test -e Gemfile || cat <<EOF > Gemfile
+    source 'https://rubygems.org'
+    gem 'rake'
+    gem 'test-kitchen'
+    gem 'kitchen-docker'
+    gem 'kitchen-inspec'
+    gem 'inspec'
+    gem 'kitchen-salt', :git => 'https://github.com/salt-formulas/kitchen-salt.git'
+  - bundle install
+
+env:
+    - PLATFORM=epcim/salt:saltstack-ubuntu-xenial-salt-2016.3 SUITE=server-rdiff
+    - PLATFORM=epcim/salt:saltstack-ubuntu-xenial-salt-2016.3 SUITE=client-rsync
+    - PLATFORM=epcim/salt:saltstack-ubuntu-xenial-salt-2016.3 SUITE=client-s3
+    - PLATFORM=epcim/salt:saltstack-ubuntu-xenial-salt-2016.3 SUITE=client-webdav
+    - PLATFORM=epcim/salt:saltstack-ubuntu-xenial-salt-2017.7 SUITE=server-rdiff
+    - PLATFORM=epcim/salt:saltstack-ubuntu-xenial-salt-2017.7 SUITE=client-rsync
+    - PLATFORM=epcim/salt:saltstack-ubuntu-xenial-salt-2017.7 SUITE=client-rsync-backup-times
+    - PLATFORM=epcim/salt:saltstack-ubuntu-xenial-salt-2017.7 SUITE=client-s3
+    - PLATFORM=epcim/salt:saltstack-ubuntu-xenial-salt-2017.7 SUITE=client-s3-disabled-auto
+    - PLATFORM=epcim/salt:saltstack-ubuntu-xenial-salt-2017.7 SUITE=client-webdav
+    - PLATFORM=epcim/salt:saltstack-ubuntu-xenial-salt-2018.3 SUITE=server-rdiff
+    - PLATFORM=epcim/salt:saltstack-ubuntu-xenial-salt-2018.3 SUITE=client-rsync
+    - PLATFORM=epcim/salt:saltstack-ubuntu-xenial-salt-2018.3 SUITE=client-rsync-backup-times
+    - PLATFORM=epcim/salt:saltstack-ubuntu-xenial-salt-2018.3 SUITE=client-s3
+    - PLATFORM=epcim/salt:saltstack-ubuntu-xenial-salt-2018.3 SUITE=client-s3-disabled-auto
+    - PLATFORM=epcim/salt:saltstack-ubuntu-xenial-salt-2018.3 SUITE=client-webdav
+#    - PLATFORM=epcim/salt:saltstack-ubuntu-bionic-salt-2017.7 SUITE=server-rdiff
+#    - PLATFORM=epcim/salt:saltstack-ubuntu-bionic-salt-2017.7 SUITE=client-rsync
+#    - PLATFORM=epcim/salt:saltstack-ubuntu-bionic-salt-2017.7 SUITE=client-s3
+#    - PLATFORM=epcim/salt:saltstack-ubuntu-bionic-salt-2017.7 SUITE=client-webdav
+#    - PLATFORM=epcim/salt:saltstack-ubuntu-bionic-salt-2018.3 SUITE=server-rdiff
+#    - PLATFORM=epcim/salt:saltstack-ubuntu-bionic-salt-2018.3 SUITE=client-rsync
+#    - PLATFORM=epcim/salt:saltstack-ubuntu-bionic-salt-2018.3 SUITE=client-rsync-backup-times
+#    - PLATFORM=epcim/salt:saltstack-ubuntu-bionic-salt-2018.3 SUITE=client-s3
+#    - PLATFORM=epcim/salt:saltstack-ubuntu-bionic-salt-2018.3 SUITE=client-s3-disabled-auto
+#    - PLATFORM=epcim/salt:saltstack-ubuntu-bionic-salt-2018.3 SUITE=client-webdav
+
+before_script:
+  - set -o pipefail
+  - make test | tail
+
+script:
+  - test ! -e .kitchen.yml || bundle exec kitchen converge ${SUITE} || true
+  - test ! -e .kitchen.yml || bundle exec kitchen verify ${SUITE} -t tests/integration
+
+notifications:
+  webhooks:
+    urls:
+      - https://webhooks.gitter.im/e/6123573504759330786b
+    on_success: change  # options: [always|never|change] default: always
+    on_failure: never  # options: [always|never|change] default: always
+    on_start: never     # options: [always|never|change] default: always
+    on_cancel: never    # options: [always|never|change] default: always
+    on_error: never    # options: [always|never|change] default: always
+  email: false
diff --git a/Makefile b/Makefile
index 1043fbe..d166862 100644
--- a/Makefile
+++ b/Makefile
@@ -27,6 +27,7 @@
 
 all:
 	@echo "make install - Install into DESTDIR"
+	@echo "make lint    - Run lint tests"
 	@echo "make test    - Run tests"
 	@echo "make kitchen - Run Kitchen CI tests (create, converge, verify)"
 	@echo "make clean   - Cleanup after tests run"
@@ -45,6 +46,9 @@
 	[ -d $(DESTDIR)/$(RECLASSDIR)/service/$(FORMULANAME) ] || mkdir -p $(DESTDIR)/$(RECLASSDIR)/service/$(FORMULANAME)
 	cp -a metadata/service/* $(DESTDIR)/$(RECLASSDIR)/service/$(FORMULANAME)
 
+lint:
+	[ ! -d tests ] || (cd tests; ./run_tests.sh lint)
+
 test:
 	[ ! -d tests ] || (cd tests; ./run_tests.sh)
 
@@ -65,7 +69,7 @@
 	[ ! -f debian/changelog ] || dch -v $(VERSION_MAJOR).$(NEW_MINOR_VERSION) -m --force-distribution -D `dpkg-parsechangelog -S Distribution` "New version"
 	make genchangelog-$(VERSION_MAJOR).$(NEW_MINOR_VERSION)
 	(git add -u; git commit -m "Version $(VERSION_MAJOR).$(NEW_MINOR_VERSION)")
-	git tag -s -m $(NEW_MAJOR_VERSION) $(VERSION_MAJOR).$(NEW_MINOR_VERSION)
+	git tag -s -m $(VERSION_MAJOR).$(NEW_MINOR_VERSION) $(VERSION_MAJOR).$(NEW_MINOR_VERSION)
 
 check-changes:
 	@git log --pretty=oneline --decorate $(VERSION)..HEAD | grep -Eqc '.*' || (echo "No new changes since version $(VERSION)"; exit 1)
diff --git a/README.rst b/README.rst
index 8a29afa..987e654 100644
--- a/README.rst
+++ b/README.rst
@@ -30,6 +30,20 @@
           host: 10.10.10.208
           user: backupninja
 
+Backup client with ssh/rsync remote target with specific rsync options
+
+.. code-block:: yaml
+
+    backupninja:
+      client:
+        enabled: true
+        target:
+          engine: rsync
+          engine_opts: "-av --delete --recursive --safe-links"
+          home_dir: /srv/volumes/backup/backupninja
+          host: 10.10.10.208
+          user: backupninja
+
 Backup client with s3 remote target
 
 .. code-block:: yaml
@@ -59,6 +73,43 @@
               principal: host/${linux:network:fqdn}
               keytab: /etc/krb5.keytab
 
+Backup client with exact backup times
+
+.. code-block:: yaml
+
+    backupninja:
+      client:
+        enabled: true
+        auto_backup_disabled: false
+        backup_times:
+          day_of_week: 1
+          hour: 2
+          minute: 32
+
+.. code-block:: yaml
+
+    backupninja:
+      client:
+        enabled: true
+        auto_backup_disabled: false
+        backup_times:
+          day_of_month: 24
+          hour: 14
+          minute: 12
+
+Backup client with defined number of backups to keep
+
+..code-block:: yaml
+
+    backupninja:
+      client:
+        enabled: true
+        target:
+          engine: rsync
+          host: 10.10.10.208
+          user: backupninja
+          keep: 4
+
 Backup server rsync/rdiff
 
 .. code-block:: yaml
@@ -72,6 +123,60 @@
             enabled: true
             key: ssh-key
 
+Backup server without strict client policy restriction
+
+.. code-block:: yaml
+
+    backupninja:
+      server:
+        restrict_clients: false
+
+Backup client with local storage
+
+.. code-block:: yaml
+
+    backupninja:
+      client:
+        enabled: true
+        target:
+          engine: local
+
+
+Client scheduling in rsync engine
+=================================
+
+Client run backup at 5am every day.
+
+.. code-block:: yaml
+
+    backupninja:
+      client:
+        enabled: true
+        target:
+          engine: rsync
+        scheduling:
+          when:
+            - everyday at 05
+
+
+Client run backup in mupltiple times.
+
+.. code-block:: yaml
+
+    backupninja:
+      client:
+        enabled: true
+        target:
+          engine: rsync
+        scheduling:
+          when:
+            - everyday at 5
+            - Tuesday at 05:30
+            - 25 at 23:45
+            - hourly
+
+If multiple "when" options are present, then they all apply. If two configurations files are scheduled to run in the same hour, then we fall back on the alphabetical ordering specified above.
+
 
 More information
 ================
diff --git a/backupninja/client.sls b/backupninja/client.sls
index 98131f8..99ed1fb 100644
--- a/backupninja/client.sls
+++ b/backupninja/client.sls
@@ -20,8 +20,37 @@
   - user: root
   - group: root
 
-{%- if pillar.postgresql is defined %}
+{%- if client.backup_times is defined %}
 
+delete_cron_file:
+  file.absent:
+  - name: /etc/cron.d/backupninja
+  - require:
+    - pkg: backupninja_packages
+
+create_cron_job:
+  cron.present:
+  - name: if [ -x /usr/sbin/backupninja ]; then /usr/sbin/backupninja --run; fi
+  - user: root
+  {%- if client.backup_times.day_of_week is defined %}
+  - dayweek: {{ client.backup_times.day_of_week }}
+  {%- endif %}
+  {%- if client.backup_times.day_of_month is defined %}
+  - daymonth: {{ client.backup_times.day_of_month }}
+  {%- endif %}
+  {%- if client.backup_times.hour is defined %}
+  - hour: {{ client.backup_times.hour }}
+  {%- endif %}
+  {%- if client.backup_times.minute is defined %}
+  - minute: {{ client.backup_times.minute }}
+  {%- endif %}
+  {%- if client.get('auto_backup_disable', False) %}
+  - commented: True
+  {%- endif %}
+
+{%- endif %}
+
+{%- if pillar.postgresql is defined or pillar.maas is defined %}
 backupninja_postgresql_handler:
   file.managed:
   - name: /etc/backup.d/102.pgsql
@@ -135,6 +164,8 @@
   - require:
     - pkg: backupninja_packages
 
+{%- if client.target.engine not in ["local"] %}
+
 backupninja_remote_handler_{{ client.target.engine }}:
   file.managed:
   - name: /etc/backup.d/200.backup.{{ client.target.engine }}
@@ -144,6 +175,8 @@
   - require:
     - pkg: backupninja_packages
 
+{%- endif %}
+
 {%- if client.target.auth is defined and client.target.auth.gss is defined %}
 
 backupninja_gss_helper_kinit:
diff --git a/backupninja/files/authorized_keys b/backupninja/files/authorized_keys
new file mode 100644
index 0000000..6f54c9b
--- /dev/null
+++ b/backupninja/files/authorized_keys
@@ -0,0 +1,19 @@
+{%- from "backupninja/map.jinja" import server with context %}
+{%- for key_name, key in server.key.iteritems() %}
+{%- if key.get('enabled', False) %}
+{%- set clients = [] %}
+{%- if server.restrict_clients %}
+  {%- for node_name, node_grains in salt['mine.get']('*', 'grains.items').iteritems() %}
+    {%- if node_grains.get('backupninja', {}).get('client') %}
+    {%- set client = node_grains.backupninja.get("client") %}
+      {%- if client.get('addresses') and client.get('addresses', []) is iterable %}
+        {%- for address in client.addresses %}
+          {%- do clients.append(address|string) %}
+        {%- endfor %}
+      {%- endif %}
+    {%- endif %}
+  {%- endfor %}
+{%- endif %}
+no-pty{%- if clients %},from="{{ clients|join(',') }}"{%- endif %} {{ key.key }}
+{%- endif %}
+{%- endfor %}
\ No newline at end of file
diff --git a/backupninja/files/dup.conf b/backupninja/files/dup.conf
index f7d2537..8ace4ec 100644
--- a/backupninja/files/dup.conf
+++ b/backupninja/files/dup.conf
@@ -7,6 +7,16 @@
 ## necessary. Options which are uncommented in this example do not have
 ## defaults, and the settings provided are recommended.
 
+#scheduling
+##by default it is
+##when = everyday at 01
+{%- if client.scheduling is defined %}
+{%- for time in client.scheduling.when %}
+when = {{ time }}
+{%- endfor %}
+{%- endif %}
+
+
 ## passed directly to duplicity, e.g. to increase verbosity set this to:
 ## options = --verbosity 8
 ## when using the Amazon S3 backend to create buckets in Europe:
diff --git a/backupninja/files/handler/mysql.conf b/backupninja/files/handler/mysql.conf
index 67ca78a..ae8da97 100644
--- a/backupninja/files/handler/mysql.conf
+++ b/backupninja/files/handler/mysql.conf
@@ -39,17 +39,17 @@
 # make a backup using mysqldump. this creates text files with sql commands
 # sufficient to recontruct the database.
 #
-# sqldumpoptions = <options>
+sqldumpoptions = --single-transaction --hex-blob --quick --quote-names --add-drop-table
 # (default = --lock-tables --complete-insert --add-drop-table --quick --quote-names)
 # arguments to pass to mysqldump
 #
 # compress = < yes | no > (default = yes)
-# if yes, compress the sqldump output. 
+# if yes, compress the sqldump output.
 #
 # vsname = <vserver> (no default)
-# what vserver to operate on (only used if vserver = yes 
-# in /etc/backupninja.conf), if you do not specify a vsname the 
+# what vserver to operate on (only used if vserver = yes
+# in /etc/backupninja.conf), if you do not specify a vsname the
 # host will be operated on
-# 
-# NB: databases = all doesn't seem to work with hotcopy = yes 
+#
+# NB: databases = all doesn't seem to work with hotcopy = yes
 # when vsname is specified, I would like to know how to fix this.
diff --git a/backupninja/files/handler/pgsql.conf b/backupninja/files/handler/pgsql.conf
index f934695..58dadb6 100644
--- a/backupninja/files/handler/pgsql.conf
+++ b/backupninja/files/handler/pgsql.conf
@@ -7,8 +7,13 @@
 
 backupdir = /var/backups/postgresql
 # where to dump the backups
+{%- if pillar.maas is defined %}
+{%- from "maas/map.jinja" import region with context %}
+databases = {{ region.database.name }}
+{%- else %}
+databases = {% for db_name, db in pillar.postgresql.server.get('database', {}).iteritems() %} {{ db_name }}{% endfor %}
+{%- endif %}
 
-databases = {% for db in pillar.postgresql.server.get('databases', []) %} {{ db.name }}{% endfor %} {% for db_name, db in pillar.postgresql.server.get('database', {}).iteritems() %} {{ db_name }}{% endfor %}
 # which databases to backup. should either be the word 'all' or a 
 # space separated list of database names.
 # Note: when using 'all', pg_dumpall is used instead of pg_dump, which means
diff --git a/backupninja/files/rdiff.conf b/backupninja/files/rdiff.conf
index 2e8bcb3..a2f49dd 100644
--- a/backupninja/files/rdiff.conf
+++ b/backupninja/files/rdiff.conf
@@ -13,12 +13,25 @@
 ## destination host and user.
 ##
 
+#scheduling
+##by default it is
+##when = everyday at 01
+{%- if client.scheduling is defined %}
+{%- for time in client.scheduling.when %}
+when = {{ time }}
+{%- endfor %}
+{%- endif %}
+
+
 ## passed directly to rdiff-backup
 ## an example setting would be:
 ## options = --force
 ## 
 ## Default:
 # options = 
+{% if client.target.engine_opts is defined %}
+options = {{ client.target.engine_opts }}
+{%- endif %}
 
 ## default is 0, but set to 19 if you want to lower the priority.
 ## an example setting would be:
diff --git a/backupninja/files/rsync.conf b/backupninja/files/rsync.conf
index 65e066d..c58e3d7 100644
--- a/backupninja/files/rsync.conf
+++ b/backupninja/files/rsync.conf
@@ -5,6 +5,16 @@
 # rsync handler file
 #
 
+#scheduling
+#by default it is
+#when = everyday at 01
+{%- if client.scheduling is defined %}
+{%- for time in client.scheduling.when %}
+when = {{ time }}
+{%- endfor %}
+{%- endif %}
+
+
 [general]
 
 # rsync log file
@@ -25,7 +35,7 @@
 # backup partition mountpoint or backup main folder
 # this doesn't need to be a real partition, but should be at least the
 # main folder where the backup is being stored
-mountpoint = /srv/backupninja
+mountpoint = {{ client.target.get('home_dir', '/srv/backupninja') }}
 
 # folder relative do mountpoint where the backup should be stored
 backupdir = {{ system.name }}.{{ system.domain }}
@@ -61,7 +71,7 @@
 # that with the days config your backup gets a .0 suffix at the destination
 # folder, making it easier to turn it later to an incremental backup.
 #
-days = 7
+days = {{ client.target.keep|default(7) }}
 
 # for long storage format, specify the number of daily backup increments
 #keepdaily = 7
@@ -129,7 +139,11 @@
 #rsync = $RSYNC
 
 # rsync command options
+{% if client.target.engine_opts is defined %}
+rsync_options = {{ client.target.engine_opts }}
+{%- else %}
 #rsync_options = "-av --delete --recursive"
+{%- endif %}
 
 # when set to 1, use numeric ids instead of user/group mappings on rsync
 #numericids =
diff --git a/backupninja/files/rsync.sh b/backupninja/files/rsync.sh
index 354e451..c3d8241 100644
--- a/backupninja/files/rsync.sh
+++ b/backupninja/files/rsync.sh
@@ -259,7 +259,7 @@
   fi
 
   ssh_cmd_base="ssh -T -o PasswordAuthentication=no -p $port -i $id_file"
-  ssh_cmd="$ssh_cmd_base $user@$host"
+  ssh_cmd="$ssh_cmd_base $user@$host bash"
 
   if [ "$from" == "remote" ] || [ "$dest" == "remote" ]; then
     if [ "$testconnect" == "yes" ] && [ "$protocol" == "ssh" ]; then
diff --git a/backupninja/map.jinja b/backupninja/map.jinja
index a95e808..7c6c1ba 100644
--- a/backupninja/map.jinja
+++ b/backupninja/map.jinja
@@ -19,11 +19,13 @@
   'Debian': {

     'pkgs': ['rsync'],

     'home_dir': '/srv/backupninja',

+    'restrict_clients': True,

     'keys': [],

   },

   'RedHat': {

     'pkgs': ['rsync'],

     'home_dir': '/srv/backupninja',

+    'restrict_clients': True,

     'keys': [],

   },

 }, merge=salt['pillar.get']('backupninja:server')) %}

diff --git a/backupninja/meta/backupninja.yml b/backupninja/meta/backupninja.yml
deleted file mode 100644
index ff227f6..0000000
--- a/backupninja/meta/backupninja.yml
+++ /dev/null
@@ -1,5 +0,0 @@
-backup:
-  backupninja:
-    fs_includes:
-    - /var/backups
-    fs_excludes: []
diff --git a/backupninja/meta/salt.yml b/backupninja/meta/salt.yml
index 89db0df..d7f60d4 100644
--- a/backupninja/meta/salt.yml
+++ b/backupninja/meta/salt.yml
@@ -3,4 +3,22 @@
     {%- if pillar.get('backupninja', {}).get('client') %}
     {%- from "backupninja/map.jinja" import service_grains with context %}
     {{ service_grains|yaml(False)|indent(4) }}
+      {#- client addresses #}
+      {%- set addresses = [] %}
+      {%- set ips = salt['grains.get']("fqdn_ip4")|list %}
+      {%- if ips %}
+        {%- for ip in ips %}
+          {%- if not (ip|string).startswith('127.')  %}
+            {%- do addresses.append(ip) %}
+          {%- endif %}
+        {%- endfor %}
+      {%- endif %}
+      {%- if addresses %}
+      client:
+        addresses: {{ addresses|yaml }}
+      {%- endif %}
+    {%- else %}
+    backupninja:
+      backup: {}
     {%- endif %}
+
diff --git a/backupninja/server.sls b/backupninja/server.sls
index 6a7783a..58bf986 100644
--- a/backupninja/server.sls
+++ b/backupninja/server.sls
@@ -9,9 +9,9 @@
   user.present:
   - name: backupninja
   - system: true
-  - home: /srv/backupninja
+  - home: {{ server.home_dir }}
 
-/srv/backupninja:
+{{ server.home_dir }}:
   file.directory:
   - mode: 700
   - user: backupninja
@@ -21,48 +21,42 @@
     - user: backupninja_user
     - pkg: backupninja_server_packages
 
-{%- for key_name, key in server.key.iteritems() %}
-
-{%- if key.get('enabled', False) %}
-
-backupninja_key_{{ key.key }}:
-  ssh_auth.present:
+{{ server.home_dir }}/.ssh:
+  file.directory:
+  - mode: 700
   - user: backupninja
-  - name: {{ key.key }}
+  - group: backupninja
   - require:
-    - file: /srv/backupninja
+    - file: {{ server.home_dir }}
 
-{%- endif %}
-
-{%- endfor %}
+{{ server.home_dir }}/.ssh/authorized_keys:
+  file.managed:
+  - user: backupninja
+  - group: backupninja
+  - template: jinja
+  - source: salt://backupninja/files/authorized_keys
+  - require:
+    - file: {{ server.home_dir }}
+    - file: {{ server.home_dir }}/.ssh
 
 {%- for node_name, node_grains in salt['mine.get']('*', 'grains.items').iteritems() %}
 
-/srv/backupninja/{{ node_name }}:
-  file.directory:
-  - mode: 700
-  - user: backupninja
-  - group: backupninja
-  - makedirs: true
-  - require:
-    - user: backupninja_user
-    - pkg: backupninja_server_packages
-
 {%- for backup_name, backup in node_grains.get('backupninja', {}).get('backup', {}).iteritems() %}
 {%- for fs_include in backup.fs_includes %}
 
-/srv/backupninja/{{ node_name }}{{ fs_include }}:
+{{ server.home_dir }}/{{ node_name }}{{ fs_include }}:
   file.directory:
   - mode: 700
   - user: backupninja
   - group: backupninja
   - makedirs: true
   - require:
-    - file: /srv/backupninja/{{ node_name }}
+    - user: backupninja_user
+    - pkg: backupninja_server_packages
 
 {%- endfor %}
 {%- endfor %}
 
 {%- endfor %}
 
-{%- endif %}
\ No newline at end of file
+{%- endif %}
diff --git a/debian/control b/debian/control
index 40b8b18..d1fb233 100644
--- a/debian/control
+++ b/debian/control
@@ -10,6 +10,6 @@
 
 Package: salt-formula-backupninja
 Architecture: all
-Depends: ${misc:Depends}, salt-master, reclass
+Depends: ${misc:Depends}
 Description: Backupninja salt formula
  Install and configure Backupninja backup system.
diff --git a/metadata.yml b/metadata.yml
index 39bc145..d527801 100644
--- a/metadata.yml
+++ b/metadata.yml
@@ -1,3 +1,3 @@
 name: "backupninja"
 version: "0.2"
-source: "https://github.com/tcpcloud/salt-formula-backupninja"
+source: "https://github.com/salt-formulas/salt-formula-backupninja"
diff --git a/metadata/service/support.yml b/metadata/service/support.yml
index 5e12c47..8107660 100644
--- a/metadata/service/support.yml
+++ b/metadata/service/support.yml
@@ -2,12 +2,12 @@
   backupninja:
     _support:
       backupninja:
-        enabled: true
+        enabled: false
       collectd:
         enabled: false
       heka:
         enabled: true
       sensu:
-        enabled: true
+        enabled: false
       sphinx:
         enabled: true
diff --git a/tests/pillar/client_rsync.sls b/tests/pillar/client_rsync.sls
new file mode 100644
index 0000000..49206d7
--- /dev/null
+++ b/tests/pillar/client_rsync.sls
@@ -0,0 +1,11 @@
+backupninja:
+  client:
+    enabled: true
+    target:
+      engine: rsync
+      host: 10.10.10.208
+      user: backupninja
+linux:
+  system:
+    name: hostname
+    domain: domain
\ No newline at end of file
diff --git a/tests/pillar/client_rsync_backup_times.sls b/tests/pillar/client_rsync_backup_times.sls
new file mode 100644
index 0000000..adb5a78
--- /dev/null
+++ b/tests/pillar/client_rsync_backup_times.sls
@@ -0,0 +1,15 @@
+backupninja:
+  client:
+    enabled: true
+    backup_times:
+      day_of_week: 1
+      hour: 4
+      minute: 52
+    target:
+      engine: rsync
+      host: 10.10.10.208
+      user: backupninja
+linux:
+  system:
+    name: hostname
+    domain: domain
\ No newline at end of file
diff --git a/tests/pillar/client_s3.sls b/tests/pillar/client_s3.sls
new file mode 100644
index 0000000..b944923
--- /dev/null
+++ b/tests/pillar/client_s3.sls
@@ -0,0 +1,9 @@
+backupninja:
+  client:
+    enabled: true
+    target:
+      engine: dup
+      url: s3+http://bucket-name/folder-name
+      auth:
+        awsaccesskeyid: awsaccesskeyid
+        awssecretaccesskey: awssecretaccesskey
\ No newline at end of file
diff --git a/tests/pillar/client_s3_disabled_auto.sls b/tests/pillar/client_s3_disabled_auto.sls
new file mode 100644
index 0000000..8ee38bf
--- /dev/null
+++ b/tests/pillar/client_s3_disabled_auto.sls
@@ -0,0 +1,10 @@
+backupninja:
+  client:
+    enabled: true
+    auto_backup_disabled: true
+    target:
+      engine: dup
+      url: s3+http://bucket-name/folder-name
+      auth:
+        awsaccesskeyid: awsaccesskeyid
+        awssecretaccesskey: awssecretaccesskey
\ No newline at end of file
diff --git a/tests/pillar/client_webdav.sls b/tests/pillar/client_webdav.sls
new file mode 100644
index 0000000..56f6752
--- /dev/null
+++ b/tests/pillar/client_webdav.sls
@@ -0,0 +1,10 @@
+backupninja:
+  client:
+    enabled: true
+    target:
+      engine: dup
+      url: webdavs://backup.cloud.example.com/box.example.com/
+      auth:
+        gss:
+          principal: host/${linux:network:fqdn}
+          keytab: /etc/krb5.keytab
\ No newline at end of file
diff --git a/tests/pillar/server_rdiff.sls b/tests/pillar/server_rdiff.sls
new file mode 100644
index 0000000..7bd8a75
--- /dev/null
+++ b/tests/pillar/server_rdiff.sls
@@ -0,0 +1,8 @@
+backupninja:
+  server:
+    enabled: true
+    rdiff: true
+    key:
+      client1.domain.com:
+        enabled: true
+        key: MIICWwIBAAKBgHDOcqtOQ12iGujC5GFg1lrFauY2w578XdjynKcgRkYSQa8X4SYp3tRtkwYmQ3m0iSlO+eLhLVqIg3lSGuSlMYEpUM6iiuVfWQ6l2hqGymspabEalO1PgAEn8vrtOQ9cFDa6KD9J4UDulgRDJbWu02KD6EDR93lQo2vh/JnH7o2fAgMBAAECgYBVIxrcO0cQheX1QhIHhISDIXgao75kYWea8xBT+kHPZLv6H/WnTfaFMOXDwGEwm5bhfF+QYg5Ow7UmfVcatap0JY1WYYEyyi/o98l9M4SqF01yki/VB6dzsQEZyv/YT4PoeFbSONDIHMN5UwtDRYuYORaK1eGBjCDqtgh2WpDd0QJBALiP7RlY6zscOP3IicYZvIAUCag4tJe9mfcJULc8Pm0vAMF3XSuEQm95ABqTVtXQFPnR3O1gHxTfUcTxWojrzo0CQQCceFFzYG6+IMQaJhjzcZUQCxqmLVrEIxvsfxKVVgR/KVKvVZoV0XPk1qhO2R7clGOYO9zTb+1RcH1TlvKBiwfbAkEAkvA7k2+gsEny8dJEJkz9L3dEjQLAfQK93tkywv+ar/C5CE6RBgFhp0BNndrqSoZE/PRhkTdy53o4rrR9IP+kTQJAMKjmQ6hAqFYORkeEyOiQv82EOxutt4NnorOd9t0qRtV2BlrrsU9S6F6tNuAkzdpD4srzELolJoeCIiW2Qv0WswJAWiykhuRlPBW75UdnYIGrtTaDD0VmbD6zAeHyaoEBZJX48SR7v8szQzUph7pW3z770riY4xUVgbcBDH61w2q1Tg== email@domain.com
diff --git a/tests/run_tests.sh b/tests/run_tests.sh
new file mode 100755
index 0000000..a600206
--- /dev/null
+++ b/tests/run_tests.sh
@@ -0,0 +1,166 @@
+#!/usr/bin/env bash
+
+set -e
+[ -n "$DEBUG" ] && set -x
+
+CURDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
+METADATA=${CURDIR}/../metadata.yml
+FORMULA_NAME=$(cat $METADATA | python -c "import sys,yaml; print yaml.load(sys.stdin)['name']")
+
+## Overrideable parameters
+PILLARDIR=${PILLARDIR:-${CURDIR}/pillar}
+BUILDDIR=${BUILDDIR:-${CURDIR}/build}
+VENV_DIR=${VENV_DIR:-${BUILDDIR}/virtualenv}
+DEPSDIR=${BUILDDIR}/deps
+
+SALT_FILE_DIR=${SALT_FILE_DIR:-${BUILDDIR}/file_root}
+SALT_PILLAR_DIR=${SALT_PILLAR_DIR:-${BUILDDIR}/pillar_root}
+SALT_CONFIG_DIR=${SALT_CONFIG_DIR:-${BUILDDIR}/salt}
+SALT_CACHE_DIR=${SALT_CACHE_DIR:-${SALT_CONFIG_DIR}/cache}
+
+SALT_OPTS="${SALT_OPTS} --retcode-passthrough --local -c ${SALT_CONFIG_DIR} --log-file=/dev/null"
+
+if [ "x${SALT_VERSION}" != "x" ]; then
+    PIP_SALT_VERSION="==${SALT_VERSION}"
+fi
+
+## Functions
+log_info() {
+    echo "[INFO] $*"
+}
+
+log_err() {
+    echo "[ERROR] $*" >&2
+}
+
+setup_virtualenv() {
+    log_info "Setting up Python virtualenv"
+    virtualenv $VENV_DIR
+    source ${VENV_DIR}/bin/activate
+    python -m pip install salt${PIP_SALT_VERSION}
+}
+
+setup_pillar() {
+    [ ! -d ${SALT_PILLAR_DIR} ] && mkdir -p ${SALT_PILLAR_DIR}
+    echo "base:" > ${SALT_PILLAR_DIR}/top.sls
+    for pillar in ${PILLARDIR}/*; do
+        grep ${FORMULA_NAME}: ${pillar} &>/dev/null || continue
+        state_name=$(basename ${pillar%.sls})
+        echo -e "  ${state_name}:\n    - ${state_name}" >> ${SALT_PILLAR_DIR}/top.sls
+    done
+}
+
+setup_salt() {
+    [ ! -d ${SALT_FILE_DIR} ] && mkdir -p ${SALT_FILE_DIR}
+    [ ! -d ${SALT_CONFIG_DIR} ] && mkdir -p ${SALT_CONFIG_DIR}
+    [ ! -d ${SALT_CACHE_DIR} ] && mkdir -p ${SALT_CACHE_DIR}
+
+    echo "base:" > ${SALT_FILE_DIR}/top.sls
+    for pillar in ${PILLARDIR}/*.sls; do
+        grep ${FORMULA_NAME}: ${pillar} &>/dev/null || continue
+        state_name=$(basename ${pillar%.sls})
+        echo -e "  ${state_name}:\n    - ${FORMULA_NAME}" >> ${SALT_FILE_DIR}/top.sls
+    done
+
+    cat << EOF > ${SALT_CONFIG_DIR}/minion
+file_client: local
+cachedir: ${SALT_CACHE_DIR}
+verify_env: False
+minion_id_caching: False
+
+file_roots:
+  base:
+  - ${SALT_FILE_DIR}
+  - ${CURDIR}/..
+  - /usr/share/salt-formulas/env
+
+pillar_roots:
+  base:
+  - ${SALT_PILLAR_DIR}
+  - ${PILLARDIR}
+EOF
+}
+
+fetch_dependency() {
+    dep_name="$(echo $1|cut -d : -f 1)"
+    dep_source="$(echo $1|cut -d : -f 2-)"
+    dep_root="${DEPSDIR}/$(basename $dep_source .git)"
+    dep_metadata="${dep_root}/metadata.yml"
+
+    [ -d /usr/share/salt-formulas/env/${dep_name} ] && log_info "Dependency $dep_name already present in system-wide salt env" && return 0
+    [ -d $dep_root ] && log_info "Dependency $dep_name already fetched" && return 0
+
+    log_info "Fetching dependency $dep_name"
+    [ ! -d ${DEPSDIR} ] && mkdir -p ${DEPSDIR}
+    git clone $dep_source ${DEPSDIR}/$(basename $dep_source .git)
+    ln -s ${dep_root}/${dep_name} ${SALT_FILE_DIR}/${dep_name}
+
+    METADATA="${dep_metadata}" install_dependencies
+}
+
+install_dependencies() {
+    grep -E "^dependencies:" ${METADATA} >/dev/null || return 0
+    (python - | while read dep; do fetch_dependency "$dep"; done) << EOF
+import sys,yaml
+for dep in yaml.load(open('${METADATA}', 'ro'))['dependencies']:
+    print '%s:%s' % (dep["name"], dep["source"])
+EOF
+}
+
+clean() {
+    log_info "Cleaning up ${BUILDDIR}"
+    [ -d ${BUILDDIR} ] && rm -rf ${BUILDDIR} || exit 0
+}
+
+salt_run() {
+    [ -e ${VENV_DIR}/bin/activate ] && source ${VENV_DIR}/bin/activate
+    python $(which salt-call) ${SALT_OPTS} $*
+}
+
+prepare() {
+    [ -d ${BUILDDIR} ] && mkdir -p ${BUILDDIR}
+
+    which salt-call || setup_virtualenv
+    setup_pillar
+    setup_salt
+    install_dependencies
+}
+
+run() {
+    for pillar in ${PILLARDIR}/*.sls; do
+        grep ${FORMULA_NAME}: ${pillar} &>/dev/null || continue
+        state_name=$(basename ${pillar%.sls})
+        salt_run --id=${state_name} state.show_sls ${FORMULA_NAME} || (log_err "Execution of ${FORMULA_NAME}.${state_name} failed"; exit 1)
+    done
+}
+
+_atexit() {
+    RETVAL=$?
+    trap true INT TERM EXIT
+
+    if [ $RETVAL -ne 0 ]; then
+        log_err "Execution failed"
+    else
+        log_info "Execution successful"
+    fi
+    return $RETVAL
+}
+
+## Main
+trap _atexit INT TERM EXIT
+
+case $1 in
+    clean)
+        clean
+        ;;
+    prepare)
+        prepare
+        ;;
+    run)
+        run
+        ;;
+    *)
+        prepare
+        run
+        ;;
+esac