Initial release
diff --git a/.gitignore b/.gitignore
new file mode 100644
index 0000000..6f9f296
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1,6 @@
+
+.kitchen
+
+tests/build/
+*.swp
+*.pyc
diff --git a/CHANGELOG.rst b/CHANGELOG.rst
new file mode 100644
index 0000000..611ae93
--- /dev/null
+++ b/CHANGELOG.rst
@@ -0,0 +1,6 @@
+elasticsearch-formula
+=====================
+
+0.0.1 (2015-08-03)
+
+- Initial formula setup
\ No newline at end of file
diff --git a/LICENSE b/LICENSE
new file mode 100644
index 0000000..6f2b42f
--- /dev/null
+++ b/LICENSE
@@ -0,0 +1,13 @@
+Copyright (c) 2014-2015 tcp cloud a.s.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+   http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
\ No newline at end of file
diff --git a/Makefile b/Makefile
new file mode 100644
index 0000000..fc83783
--- /dev/null
+++ b/Makefile
@@ -0,0 +1,26 @@
+DESTDIR=/
+SALTENVDIR=/usr/share/salt-formulas/env
+RECLASSDIR=/usr/share/salt-formulas/reclass
+FORMULANAME=$(shell grep name: metadata.yml|head -1|cut -d : -f 2|grep -Eo '[a-z0-9\-]*')
+
+all:
+	@echo "make install - Install into DESTDIR"
+	@echo "make test    - Run tests"
+	@echo "make clean   - Cleanup after tests run"
+
+install:
+	# Formula
+	[ -d $(DESTDIR)/$(SALTENVDIR) ] || mkdir -p $(DESTDIR)/$(SALTENVDIR)
+	cp -a $(FORMULANAME) $(DESTDIR)/$(SALTENVDIR)/
+	[ ! -d _modules ] || cp -a _modules $(DESTDIR)/$(SALTENVDIR)/
+	[ ! -d _states ] || cp -a _states $(DESTDIR)/$(SALTENVDIR)/ || true
+	# Metadata
+	[ -d $(DESTDIR)/$(RECLASSDIR)/service/$(FORMULANAME) ] || mkdir -p $(DESTDIR)/$(RECLASSDIR)/service/$(FORMULANAME)
+	cp -a metadata/service/* $(DESTDIR)/$(RECLASSDIR)/service/$(FORMULANAME)
+
+test:
+	[ ! -d tests ] || (cd tests; ./run_tests.sh)
+
+clean:
+	[ ! -d tests/build ] || rm -rf tests/build
+	[ ! -d build ] || rm -rf build
diff --git a/README.rst b/README.rst
new file mode 100644
index 0000000..c2f5587
--- /dev/null
+++ b/README.rst
@@ -0,0 +1,84 @@
+
+========
+InfluxDB
+========
+
+InfluxData is based on the TICK stack, the first open source platform for managing IoT time-series data at scale.
+
+Sample pillars
+==============
+
+Single-node influxdb, enabled http frontend and admin web interface:
+
+.. code-block:: yaml
+
+    influxdb:
+      server:
+        enabled: true
+        http:
+          enabled: true
+          bind:
+            address: 0.0.0.0
+            port: 8086
+        admin:
+          enabled: true
+          bind:
+            address: 0.0.0.0
+            port: 8083
+
+Single-node influxdb, SSL for http frontend:
+
+.. code-block:: yaml
+
+    influxdb:
+      server:
+        enabled: true
+        http:
+          bind:
+            ssl:
+              enabled: true
+              key_file: /etc/influxdb/ssl/key.pem
+              cert_file: /etc/influxdb/ssl/cert.pem
+
+InfluxDB relay:
+
+.. code-block:: yaml
+
+    influxdb:
+      server:
+        enabled: true
+        http:
+          enabled: true
+          output:
+            idb01:
+              location: http://idb01.local:8086/write
+              timeout: 10
+            idb02:
+              location: http://idb02.local:8086/write
+              timeout: 10
+        udp:
+          enabled: true
+          output:
+            idb01:
+              location: idb01.local:9096
+            idb02:
+              location: idb02.local:9096
+
+Deploy influxdb apt repository (using linux formula):
+
+.. code-block:: yaml
+
+    linux:
+      system:
+        os: ubuntu
+        dist: xenial
+      repo:
+        influxdb:
+          enabled: true
+          source: 'deb https://repos.influxdata.com/${linux:system:os} ${linux:system:dist} stable'
+          key_url: 'https://repos.influxdata.com/influxdb.key'
+
+Read more
+=========
+
+* https://influxdata.com/time-series-platform/influxdb/
diff --git a/VERSION b/VERSION
new file mode 100644
index 0000000..49d5957
--- /dev/null
+++ b/VERSION
@@ -0,0 +1 @@
+0.1
diff --git a/debian/changelog b/debian/changelog
new file mode 100644
index 0000000..d53f96d
--- /dev/null
+++ b/debian/changelog
@@ -0,0 +1,5 @@
+salt-formula-influxdb (0.1) trusty; urgency=medium
+
+  * Initial release
+
+ -- Filip Pytloun <filip.pytloun@tcpcloud.eu>  Thu, 13 Aug 2015 23:23:41 +0200
diff --git a/debian/compat b/debian/compat
new file mode 100644
index 0000000..ec63514
--- /dev/null
+++ b/debian/compat
@@ -0,0 +1 @@
+9
diff --git a/debian/control b/debian/control
new file mode 100644
index 0000000..e12718a
--- /dev/null
+++ b/debian/control
@@ -0,0 +1,15 @@
+Source: salt-formula-influxdb
+Maintainer: Filip Pytloun <filip.pytloun@tcpcloud.eu>
+Section: admin
+Priority: optional
+Build-Depends: salt-master, python, python-yaml, debhelper (>= 9)
+Standards-Version: 3.9.6
+Homepage: http://www.tcpcloud.eu
+Vcs-Browser: https://github.com/tcpcloud/salt-formula-influxdb
+Vcs-Git: https://github.com/tcpcloud/salt-formula-influxdb.git
+
+Package: salt-formula-influxdb
+Architecture: all
+Depends: ${misc:Depends}, salt-master, reclass
+Description: InfluxDB Salt formula
+ Install and configure InfluxDB server.
diff --git a/debian/copyright b/debian/copyright
new file mode 100644
index 0000000..fdc7ec3
--- /dev/null
+++ b/debian/copyright
@@ -0,0 +1,15 @@
+Format: http://www.debian.org/doc/packaging-manuals/copyright-format/1.0/
+Upstream-Name: salt-formula-influxdb
+Upstream-Contact: Jakub Pavlik <jakub.pavlik@tcpcloud.eu>
+Source: https://github.com/tcpcloud/salt-formula-influxdb
+
+Files: *
+Copyright: 2014-2015 tcp cloud
+License: Apache-2.0
+  Copyright (C) 2014-2015 tcp cloud
+  .
+  Licensed under the Apache License, Version 2.0 (the "License");
+  you may not use this file except in compliance with the License.
+  .
+  On a Debian system you can find a copy of this license in
+  /usr/share/common-licenses/Apache-2.0.
diff --git a/debian/docs b/debian/docs
new file mode 100644
index 0000000..d585829
--- /dev/null
+++ b/debian/docs
@@ -0,0 +1,3 @@
+README.rst
+CHANGELOG.rst
+VERSION
diff --git a/debian/rules b/debian/rules
new file mode 100755
index 0000000..abde6ef
--- /dev/null
+++ b/debian/rules
@@ -0,0 +1,5 @@
+#!/usr/bin/make -f
+
+%:
+	dh $@
+
diff --git a/debian/source/format b/debian/source/format
new file mode 100644
index 0000000..89ae9db
--- /dev/null
+++ b/debian/source/format
@@ -0,0 +1 @@
+3.0 (native)
diff --git a/influxdb/files/influxdb.conf b/influxdb/files/influxdb.conf
new file mode 100644
index 0000000..605ed62
--- /dev/null
+++ b/influxdb/files/influxdb.conf
@@ -0,0 +1,333 @@
+{%- from "influxdb/map.jinja" import server with context -%}
+### Welcome to the InfluxDB configuration file.
+
+# Once every 24 hours InfluxDB will report anonymous data to m.influxdb.com
+# The data includes raft id (random 8 bytes), os, arch, version, and metadata.
+# We don't track ip addresses of servers reporting. This is only used
+# to track the number of instances running and the versions, which
+# is very helpful for us.
+# Change this option to true to disable reporting.
+reporting-disabled = false
+
+# we'll try to get the hostname automatically, but if it the os returns something
+# that isn't resolvable by other servers in the cluster, use this option to
+# manually set the hostname
+# hostname = "localhost"
+
+###
+### [meta]
+###
+### Controls the parameters for the Raft consensus group that stores metadata
+### about the InfluxDB cluster.
+###
+
+[meta]
+  # Where the metadata/raft database is stored
+  dir = "/var/lib/influxdb/meta"
+
+  retention-autocreate = true
+
+  # If log messages are printed for the meta service
+  logging-enabled = true
+  pprof-enabled = false
+
+  # The default duration for leases.
+  lease-duration = "1m0s"
+
+###
+### [data]
+###
+### Controls where the actual shard data for InfluxDB lives and how it is
+### flushed from the WAL. "dir" may need to be changed to a suitable place
+### for your system, but the WAL settings are an advanced configuration. The
+### defaults should work for most systems.
+###
+
+[data]
+  # Controls if this node holds time series data shards in the cluster
+  enabled = true
+
+  dir = "/var/lib/influxdb/data"
+
+  # These are the WAL settings for the storage engine >= 0.9.3
+  wal-dir = "/var/lib/influxdb/wal"
+  wal-logging-enabled = true
+  data-logging-enabled = true
+
+  # Whether queries should be logged before execution. Very useful for troubleshooting, but will
+  # log any sensitive data contained within a query.
+  # query-log-enabled = true
+
+  # Settings for the TSM engine
+
+  # CacheMaxMemorySize is the maximum size a shard's cache can
+  # reach before it starts rejecting writes.
+  # cache-max-memory-size = 524288000
+
+  # CacheSnapshotMemorySize is the size at which the engine will
+  # snapshot the cache and write it to a TSM file, freeing up memory
+  # cache-snapshot-memory-size = 26214400
+
+  # CacheSnapshotWriteColdDuration is the length of time at
+  # which the engine will snapshot the cache and write it to
+  # a new TSM file if the shard hasn't received writes or deletes
+  # cache-snapshot-write-cold-duration = "1h"
+
+  # MinCompactionFileCount is the minimum number of TSM files
+  # that need to exist before a compaction cycle will run
+  # compact-min-file-count = 3
+
+  # CompactFullWriteColdDuration is the duration at which the engine
+  # will compact all TSM files in a shard if it hasn't received a
+  # write or delete
+  # compact-full-write-cold-duration = "24h"
+
+  # MaxPointsPerBlock is the maximum number of points in an encoded
+  # block in a TSM file. Larger numbers may yield better compression
+  # but could incur a performance penalty when querying
+  # max-points-per-block = 1000
+
+###
+### [cluster]
+###
+### Controls non-Raft cluster behavior, which generally includes how data is
+### shared across shards.
+###
+
+[cluster]
+  shard-writer-timeout = "5s" # The time within which a remote shard must respond to a write request.
+  write-timeout = "10s" # The time within which a write request must complete on the cluster.
+  max-concurrent-queries = 0 # The maximum number of concurrent queries that can run. 0 to disable.
+  query-timeout = "0s" # The time within a query must complete before being killed automatically. 0s to disable.
+  max-select-point = 0 # The maximum number of points to scan in a query. 0 to disable.
+  max-select-series = 0 # The maximum number of series to select in a query. 0 to disable.
+  max-select-buckets = 0 # The maximum number of buckets to select in an aggregate query. 0 to disable.
+
+###
+### [retention]
+###
+### Controls the enforcement of retention policies for evicting old data.
+###
+
+[retention]
+  enabled = true
+  check-interval = "30m"
+
+###
+### [shard-precreation]
+###
+### Controls the precreation of shards, so they are available before data arrives.
+### Only shards that, after creation, will have both a start- and end-time in the
+### future, will ever be created. Shards are never precreated that would be wholly
+### or partially in the past.
+
+[shard-precreation]
+  enabled = true
+  check-interval = "10m"
+  advance-period = "30m"
+
+###
+### Controls the system self-monitoring, statistics and diagnostics.
+###
+### The internal database for monitoring data is created automatically if
+### if it does not already exist. The target retention within this database
+### is called 'monitor' and is also created with a retention period of 7 days
+### and a replication factor of 1, if it does not exist. In all cases the
+### this retention policy is configured as the default for the database.
+
+[monitor]
+  store-enabled = true # Whether to record statistics internally.
+  store-database = "_internal" # The destination database for recorded statistics
+  store-interval = "10s" # The interval at which to record statistics
+
+###
+### [admin]
+###
+### Controls the availability of the built-in, web-based admin interface. If HTTPS is
+### enabled for the admin interface, HTTPS must also be enabled on the [http] service.
+###
+
+{%- if server.admin.enabled %}
+[admin]
+  enabled = true
+  bind-address = "{{ server.admin.bind.address }}:{{ server.admin.bind.port }}"
+  {%- if server.admin.get('ssl', {}).get('enabled', False) %}
+  https-enabled = true
+  admins-certificate = "{{ server.admin.ssl.cert_file }}"
+  admins-private-key = "{{ server.admin.ssl.key_file }}"
+  {%- else %}
+  https-enabled = false
+  {%- endif %}
+{%- else %}
+[admin]
+  enabled = false
+{%- endif %}
+
+###
+### [http]
+###
+### Controls how the HTTP endpoints are configured. These are the primary
+### mechanism for getting data into and out of InfluxDB.
+###
+
+{%- if server.http.enabled %}
+[http]
+  enabled = true
+  bind-address = "{{ server.http.bind.address }}:{{ server.http.bind.port }}"
+  auth-enabled = false
+  log-enabled = true
+  write-tracing = false
+  pprof-enabled = false
+  {%- if server.http.get('ssl', {}).get('enabled', False) %}
+  https-enabled = true
+  https-certificate = "{{ server.http.ssl.cert_file }}"
+  https-private-key = "{{ server.http.ssl.key_file }}"
+  {%- else %}
+  https-enabled = false
+  {%- endif %}
+  max-row-limit = 10000
+  {%- if server.http.output is defined %}
+  output = [
+    {%- for name, output in server.http.output.iteritems() %}
+      { name="{{ name }}", location="{{ output.location }}", timeout="{{ output.timeout|default('10') }}s" },
+    {%- endfor %}
+  ]
+  {%- endif %}
+{%- else %}
+[http]
+  enabled = false
+{%- endif %}
+
+###
+### [[graphite]]
+###
+### Controls one or many listeners for Graphite data.
+###
+
+[[graphite]]
+  enabled = false
+  # database = "graphite"
+  # bind-address = ":2003"
+  # protocol = "tcp"
+  # consistency-level = "one"
+
+  # These next lines control how batching works. You should have this enabled
+  # otherwise you could get dropped metrics or poor performance. Batching
+  # will buffer points in memory if you have many coming in.
+
+  # batch-size = 5000 # will flush if this many points get buffered
+  # batch-pending = 10 # number of batches that may be pending in memory
+  # batch-timeout = "1s" # will flush at least this often even if we haven't hit buffer limit
+  # udp-read-buffer = 0 # UDP Read buffer size, 0 means OS default. UDP listener will fail if set above OS max.
+
+  ### This string joins multiple matching 'measurement' values providing more control over the final measurement name.
+  # separator = "."
+
+  ### Default tags that will be added to all metrics.  These can be overridden at the template level
+  ### or by tags extracted from metric
+  # tags = ["region=us-east", "zone=1c"]
+
+  ### Each template line requires a template pattern.  It can have an optional
+  ### filter before the template and separated by spaces.  It can also have optional extra
+  ### tags following the template.  Multiple tags should be separated by commas and no spaces
+  ### similar to the line protocol format.  There can be only one default template.
+  # templates = [
+  #   "*.app env.service.resource.measurement",
+  #   # Default template
+  #   "server.*",
+  # ]
+
+###
+### [collectd]
+###
+### Controls one or many listeners for collectd data.
+###
+
+[[collectd]]
+  enabled = false
+  # bind-address = ""
+  # database = ""
+  # typesdb = ""
+
+  # These next lines control how batching works. You should have this enabled
+  # otherwise you could get dropped metrics or poor performance. Batching
+  # will buffer points in memory if you have many coming in.
+
+  # batch-size = 1000 # will flush if this many points get buffered
+  # batch-pending = 5 # number of batches that may be pending in memory
+  # batch-timeout = "1s" # will flush at least this often even if we haven't hit buffer limit
+  # read-buffer = 0 # UDP Read buffer size, 0 means OS default. UDP listener will fail if set above OS max.
+
+###
+### [opentsdb]
+###
+### Controls one or many listeners for OpenTSDB data.
+###
+
+[[opentsdb]]
+  enabled = false
+  # bind-address = ":4242"
+  # database = "opentsdb"
+  # retention-policy = ""
+  # consistency-level = "one"
+  # tls-enabled = false
+  # certificate= ""
+  # log-point-errors = true # Log an error for every malformed point.
+
+  # These next lines control how batching works. You should have this enabled
+  # otherwise you could get dropped metrics or poor performance. Only points
+  # metrics received over the telnet protocol undergo batching.
+
+  # batch-size = 1000 # will flush if this many points get buffered
+  # batch-pending = 5 # number of batches that may be pending in memory
+  # batch-timeout = "1s" # will flush at least this often even if we haven't hit buffer limit
+
+###
+### [[udp]]
+###
+### Controls the listeners for InfluxDB line protocol data via UDP.
+###
+
+{%- if server.udp.enabled %}
+[udp]
+  enabled = true
+  bind-address = "{{ server.udp.bind.address }}:{{ server.udp.bind.port }}"
+  # database = "udp"
+  # retention-policy = ""
+
+  # These next lines control how batching works. You should have this enabled
+  # otherwise you could get dropped metrics or poor performance. Batching
+  # will buffer points in memory if you have many coming in.
+
+  # batch-size = 1000 # will flush if this many points get buffered
+  # batch-pending = 5 # number of batches that may be pending in memory
+  # batch-timeout = "1s" # will flush at least this often even if we haven't hit buffer limit
+  # read-buffer = 0 # UDP Read buffer size, 0 means OS default. UDP listener will fail if set above OS max.
+
+  # set the expected UDP payload size; lower values tend to yield better performance, default is max UDP size 65536
+  # udp-payload-size = 65536
+  {%- if server.udp.output is defined %}
+  output = [
+    {%- for name, output in server.udp.output.iteritems() %}
+      { name="{{ name }}", location="{{ output.location }}"{% if output.mtu is defined %}, mtu="{{ output.mtu }}"{% endif %} },
+    {%- endfor %}
+  ]
+  {%- endif %}
+{%- else %}
+[udp]
+  enabled = false
+{%- endif %}
+
+###
+### [continuous_queries]
+###
+### Controls how continuous queries are run within InfluxDB.
+###
+
+[continuous_queries]
+  log-enabled = true
+  enabled = true
+  # run-interval = "1s" # interval for how often continuous queries will be checked if they need to run
+{#-
+vim: syntax=jinja
+-#}
diff --git a/influxdb/init.sls b/influxdb/init.sls
new file mode 100644
index 0000000..c4499cc
--- /dev/null
+++ b/influxdb/init.sls
@@ -0,0 +1,5 @@
+
+{% if pillar.influxdb.server is defined %}
+include:
+- influxdb.server
+{% endif %}
diff --git a/influxdb/map.jinja b/influxdb/map.jinja
new file mode 100644
index 0000000..3c63303
--- /dev/null
+++ b/influxdb/map.jinja
@@ -0,0 +1,25 @@
+
+
+{%- load_yaml as base_defaults %}
+default:
+  pkgs:
+  - influxdb
+  service: influxdb
+  http:
+    enabled: true
+    bind:
+      address: 0.0.0.0
+      port: 8086
+  udp:
+    enabled: false
+    bind:
+      address: 0.0.0.0
+      port: 9096
+  admin:
+    enabled: true
+    bind:
+      address: 0.0.0.0
+      port: 8083
+{%- endload %}
+
+{%- set server = salt['grains.filter_by'](base_defaults, merge=salt['pillar.get']('influxdb:server')) %}
diff --git a/influxdb/meta/sensu.yml b/influxdb/meta/sensu.yml
new file mode 100644
index 0000000..96f9526
--- /dev/null
+++ b/influxdb/meta/sensu.yml
@@ -0,0 +1,8 @@
+{%- from "influxdb/map.jinja" import server with context -%}
+check:
+  local_influxdb_server_proc:
+    command: "PATH=$PATH:/usr/lib64/nagios/plugins:/usr/lib/nagios/plugins check_procs -C influxd -u influxdb -c 1:1"
+    interval: 30
+    occurrences: 3
+    subscribers:
+    - local-influxdb-server
diff --git a/influxdb/server.sls b/influxdb/server.sls
new file mode 100644
index 0000000..0c310f7
--- /dev/null
+++ b/influxdb/server.sls
@@ -0,0 +1,23 @@
+{%- from "influxdb/map.jinja" import server with context %}
+{%- if server.enabled %}
+
+influxdb_packages:
+  pkg.installed:
+  - names: {{ server.pkgs }}
+
+influxdb_config:
+  file.managed:
+  - name: /etc/influxdb/influxdb.conf
+  - source: salt://influxdb/files/influxdb.conf
+  - template: jinja
+  - require:
+    - pkg: influxdb_packages
+
+influxdb_service:
+  service.running:
+  - enable: true
+  - name: {{ server.service }}
+  - watch:
+    - file: influxdb_config
+
+{%- endif %}
diff --git a/metadata.yml b/metadata.yml
new file mode 100644
index 0000000..2f36681
--- /dev/null
+++ b/metadata.yml
@@ -0,0 +1,3 @@
+name: "influxdb"
+version: "0.1"
+source: "https://github.com/tcpcloud/salt-formula-influxdb"
diff --git a/metadata/service/server/local.yml b/metadata/service/server/local.yml
new file mode 100644
index 0000000..c9ac40a
--- /dev/null
+++ b/metadata/service/server/local.yml
@@ -0,0 +1,14 @@
+applications:
+- influxdb
+classes:
+- service.influxdb.support
+parameters:
+  influxdb:
+    server:
+      enabled: true
+      admin:
+        bind:
+          address: 127.0.0.1
+      http:
+        bind:
+          address: 127.0.0.1
diff --git a/metadata/service/server/single.yml b/metadata/service/server/single.yml
new file mode 100644
index 0000000..ec8a7cd
--- /dev/null
+++ b/metadata/service/server/single.yml
@@ -0,0 +1,14 @@
+applications:
+- influxdb
+classes:
+- service.influxdb.support
+parameters:
+  influxdb:
+    server:
+      enabled: true
+      admin:
+        bind:
+          address: 0.0.0.0
+      http:
+        bind:
+          address: 0.0.0.0
diff --git a/metadata/service/server/vendor_repo_debian.yml b/metadata/service/server/vendor_repo_debian.yml
new file mode 100644
index 0000000..6ce13bc
--- /dev/null
+++ b/metadata/service/server/vendor_repo_debian.yml
@@ -0,0 +1,13 @@
+parameters:
+  linux:
+    system:
+      repo:
+        influxdb:
+          # To have this working, set following structure:
+          # linux:
+          #   system:
+          #     os: ubuntu
+          #     dist: xenial
+          enabled: true
+          source: 'deb https://repos.influxdata.com/${linux:system:os} ${linux:system:dist} stable'
+          key_url: 'https://repos.influxdata.com/influxdb.key'
diff --git a/metadata/service/support.yml b/metadata/service/support.yml
new file mode 100644
index 0000000..9b52193
--- /dev/null
+++ b/metadata/service/support.yml
@@ -0,0 +1,11 @@
+parameters:
+  influxdb:
+    _support:
+      collectd:
+        enabled: false
+      heka:
+        enabled: false
+      sensu:
+        enabled: true
+      sphinx:
+        enabled: false
diff --git a/tests/pillar/server.sls b/tests/pillar/server.sls
new file mode 100644
index 0000000..21c12b0
--- /dev/null
+++ b/tests/pillar/server.sls
@@ -0,0 +1,3 @@
+influxdb:
+  server:
+    enabled: true
diff --git a/tests/run_tests.sh b/tests/run_tests.sh
new file mode 100755
index 0000000..3f42101
--- /dev/null
+++ b/tests/run_tests.sh
@@ -0,0 +1,162 @@
+#!/usr/bin/env bash
+
+set -e
+[ -n "$DEBUG" ] && set -x
+
+CURDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
+METADATA=${CURDIR}/../metadata.yml
+FORMULA_NAME=$(cat $METADATA | python -c "import sys,yaml; print yaml.load(sys.stdin)['name']")
+
+## Overrideable parameters
+PILLARDIR=${PILLARDIR:-${CURDIR}/pillar}
+BUILDDIR=${BUILDDIR:-${CURDIR}/build}
+VENV_DIR=${VENV_DIR:-${BUILDDIR}/virtualenv}
+DEPSDIR=${BUILDDIR}/deps
+
+SALT_FILE_DIR=${SALT_FILE_DIR:-${BUILDDIR}/file_root}
+SALT_PILLAR_DIR=${SALT_PILLAR_DIR:-${BUILDDIR}/pillar_root}
+SALT_CONFIG_DIR=${SALT_CONFIG_DIR:-${BUILDDIR}/salt}
+SALT_CACHE_DIR=${SALT_CACHE_DIR:-${SALT_CONFIG_DIR}/cache}
+
+SALT_OPTS="${SALT_OPTS} --retcode-passthrough --local -c ${SALT_CONFIG_DIR}"
+
+if [ "x${SALT_VERSION}" != "x" ]; then
+    PIP_SALT_VERSION="==${SALT_VERSION}"
+fi
+
+## Functions
+log_info() {
+    echo "[INFO] $*"
+}
+
+log_err() {
+    echo "[ERROR] $*" >&2
+}
+
+setup_virtualenv() {
+    log_info "Setting up Python virtualenv"
+    virtualenv $VENV_DIR
+    source ${VENV_DIR}/bin/activate
+    pip install salt${PIP_SALT_VERSION}
+}
+
+setup_pillar() {
+    [ ! -d ${SALT_PILLAR_DIR} ] && mkdir -p ${SALT_PILLAR_DIR}
+    echo "base:" > ${SALT_PILLAR_DIR}/top.sls
+    for pillar in ${PILLARDIR}/*; do
+        state_name=$(basename ${pillar%.sls})
+        echo -e "  ${state_name}:\n    - ${state_name}" >> ${SALT_PILLAR_DIR}/top.sls
+    done
+}
+
+setup_salt() {
+    [ ! -d ${SALT_FILE_DIR} ] && mkdir -p ${SALT_FILE_DIR}
+    [ ! -d ${SALT_CONFIG_DIR} ] && mkdir -p ${SALT_CONFIG_DIR}
+    [ ! -d ${SALT_CACHE_DIR} ] && mkdir -p ${SALT_CACHE_DIR}
+
+    echo "base:" > ${SALT_FILE_DIR}/top.sls
+    for pillar in ${PILLARDIR}/*.sls; do
+        state_name=$(basename ${pillar%.sls})
+        echo -e "  ${state_name}:\n    - ${FORMULA_NAME}" >> ${SALT_FILE_DIR}/top.sls
+    done
+
+    cat << EOF > ${SALT_CONFIG_DIR}/minion
+file_client: local
+cachedir: ${SALT_CACHE_DIR}
+verify_env: False
+
+file_roots:
+  base:
+  - ${SALT_FILE_DIR}
+  - ${CURDIR}/..
+  - /usr/share/salt-formulas/env
+
+pillar_roots:
+  base:
+  - ${SALT_PILLAR_DIR}
+  - ${PILLARDIR}
+EOF
+}
+
+fetch_dependency() {
+    dep_name="$(echo $1|cut -d : -f 1)"
+    dep_source="$(echo $1|cut -d : -f 2-)"
+    dep_root="${DEPSDIR}/$(basename $dep_source .git)"
+    dep_metadata="${dep_root}/metadata.yml"
+
+    [ -d /usr/share/salt-formulas/env/${dep_name} ] && log_info "Dependency $dep_name already present in system-wide salt env" && return 0
+    [ -d $dep_root ] && log_info "Dependency $dep_name already fetched" && return 0
+
+    log_info "Fetching dependency $dep_name"
+    [ ! -d ${DEPSDIR} ] && mkdir -p ${DEPSDIR}
+    git clone $dep_source ${DEPSDIR}/$(basename $dep_source .git)
+    ln -s ${dep_root}/${dep_name} ${SALT_FILE_DIR}/${dep_name}
+
+    METADATA="${dep_metadata}" install_dependencies
+}
+
+install_dependencies() {
+    grep -E "^dependencies:" ${METADATA} >/dev/null || return 0
+    (python - | while read dep; do fetch_dependency "$dep"; done) << EOF
+import sys,yaml
+for dep in yaml.load(open('${METADATA}', 'ro'))['dependencies']:
+    print '%s:%s' % (dep["name"], dep["source"])
+EOF
+}
+
+clean() {
+    log_info "Cleaning up ${BUILDDIR}"
+    [ -d ${BUILDDIR} ] && rm -rf ${BUILDDIR} || exit 0
+}
+
+salt_run() {
+    [ -e ${VEN_DIR}/bin/activate ] && source ${VENV_DIR}/bin/activate
+    salt-call ${SALT_OPTS} $*
+}
+
+prepare() {
+    [ -d ${BUILDDIR} ] && mkdir -p ${BUILDDIR}
+
+    which salt-call || setup_virtualenv
+    setup_pillar
+    setup_salt
+    install_dependencies
+}
+
+run() {
+    for pillar in ${PILLARDIR}/*.sls; do
+        state_name=$(basename ${pillar%.sls})
+        salt_run --id=${state_name} state.show_sls ${FORMULA_NAME} || (log_err "Execution of ${FORMULA_NAME}.${state_name} failed"; exit 1)
+    done
+}
+
+_atexit() {
+    RETVAL=$?
+    trap true INT TERM EXIT
+
+    if [ $RETVAL -ne 0 ]; then
+        log_err "Execution failed"
+    else
+        log_info "Execution successful"
+    fi
+    return $RETVAL
+}
+
+## Main
+trap _atexit INT TERM EXIT
+
+case $1 in
+    clean)
+        clean
+        ;;
+    prepare)
+        prepare
+        ;;
+    run)
+        run
+        ;;
+    *)
+        prepare
+        run
+        ;;
+esac