Merge pull request #28 from Martin819/master

Added tests script and sample pillars, Kitchen tests and Travis
diff --git a/.kitchen.yml b/.kitchen.yml
new file mode 100644
index 0000000..576130e
--- /dev/null
+++ b/.kitchen.yml
@@ -0,0 +1,50 @@
+---
+driver:
+  name: docker
+  hostname: elasticsearch.ci.local
+  use_sudo: false
+
+provisioner:
+  name: salt_solo
+  salt_install: bootstrap
+  salt_bootstrap_url: https://bootstrap.saltstack.com
+  salt_version: latest
+  require_chef: false
+  log_level: error
+  formula: elasticsearch
+  grains:
+    noservices: True
+  state_top:
+    base:
+      "*":
+        - linux
+        - elasticsearch
+  pillars:
+    top.sls:
+      base:
+        "*":
+          - elasticsearch
+          - elasticsearch_debian_repo
+  pillars-from-files:
+    elasticsearch_debian_repo.sls: tests/pillar/repo_elasticsearch.sls
+  dependencies:
+    - name: linux
+      repo: git
+      source: https://github.com/salt-formulas/salt-formula-linux.git
+
+verifier:
+  name: inspec
+  sudo: true
+
+platforms:
+  - name: ubuntu-xenial
+    driver_config:
+      image: trevorj/salty-whales:xenial
+      platform: ubuntu
+
+suites:
+  - name: <%=ENV['SUITE'] || 'single'%>
+    provisioner:
+      pillars-from-files:
+        elasticsearch.sls: tests/pillar/<%=ENV['SUITE'] || 'single'%>.sls
+# vim: ft=yaml sw=2 ts=2 sts=2 tw=125
diff --git a/.travis.yml b/.travis.yml
new file mode 100644
index 0000000..6385840
--- /dev/null
+++ b/.travis.yml
@@ -0,0 +1,41 @@
+sudo: required
+services:
+  - docker
+
+install:
+  - pip install PyYAML
+  - pip install virtualenv
+  - |
+    test -e Gemfile || cat <<EOF > Gemfile
+    source 'https://rubygems.org'
+    gem 'rake'
+    gem 'test-kitchen'
+    gem 'kitchen-docker'
+    gem 'kitchen-inspec'
+    gem 'inspec'
+    gem 'kitchen-salt', :git => 'https://github.com/salt-formulas/kitchen-salt.git'
+  - bundle install
+
+env:
+    - SUITE='single'
+    - SUITE='cluster'
+    - SUITE='curator'
+    - SUITE='client'
+
+before_script:
+  - set -o pipefail
+  - make test | tail
+
+script:
+  - test ! -e .kitchen.yml || bundle exec kitchen test -t tests/integration
+
+notifications:
+  webhooks:
+    urls:
+      - https://webhooks.gitter.im/e/6123573504759330786b
+    on_success: change  # options: [always|never|change] default: always
+    on_failure: never  # options: [always|never|change] default: always
+    on_start: never     # options: [always|never|change] default: always
+    on_cancel: never    # options: [always|never|change] default: always
+    on_error: never    # options: [always|never|change] default: always
+  email: false
diff --git a/elasticsearch/map.jinja b/elasticsearch/map.jinja
index 36dd674..ad70680 100644
--- a/elasticsearch/map.jinja
+++ b/elasticsearch/map.jinja
@@ -1,11 +1,10 @@
-
-
 {%- load_yaml as base_defaults %}
 Debian:
   pkgs:
   - elasticsearch
   curator_pkgs:
   - python-elasticsearch-curator
+  - cron
   service: elasticsearch
   version: '1.4.4'
 RedHat:
diff --git a/elasticsearch/server/init.sls b/elasticsearch/server/init.sls
index 14cc9f8..8b2ec70 100644
--- a/elasticsearch/server/init.sls
+++ b/elasticsearch/server/init.sls
@@ -1,11 +1,10 @@
 {%- from "elasticsearch/map.jinja" import server with context %}
 {%- if server.enabled %}
 
+{%- if server.curator is defined %}
 include:
-  - java
-  {%- if server.curator is defined %}
   - elasticsearch.server.curator
-  {%- endif %}
+{%- endif %}
 
 elasticsearch_packages:
   pkg.installed:
@@ -48,6 +47,8 @@
   - template: jinja
 {%- endif %}
 
+{%- if not grains.get('noservices', False) %}
+
 elasticsearch_service:
   service.running:
   - enable: true
@@ -58,3 +59,5 @@
     - file: elasticsearch_default
 
 {%- endif %}
+
+{%- endif %}
diff --git a/tests/pillar/client.sls b/tests/pillar/client.sls
new file mode 100644
index 0000000..053d17a
--- /dev/null
+++ b/tests/pillar/client.sls
@@ -0,0 +1,6 @@
+elasticsearch:
+  client:
+    enabled: true
+    server:
+      host: elasticsearch.host
+      port: 9200
\ No newline at end of file
diff --git a/tests/pillar/cluster.sls b/tests/pillar/cluster.sls
new file mode 100644
index 0000000..316438e
--- /dev/null
+++ b/tests/pillar/cluster.sls
@@ -0,0 +1,18 @@
+elasticsearch:
+  server:
+    enabled: true
+    bind:
+      address: 0.0.0.0
+      port: 9200
+    cluster:
+      multicast: false
+      members:
+        - host: elastic01
+          port: 9300
+        - host: elastic02
+          port: 9300
+        - host: elastic03
+          port: 9300
+    index:
+      shards: 5
+      replicas: 1
\ No newline at end of file
diff --git a/tests/pillar/curator.sls b/tests/pillar/curator.sls
new file mode 100644
index 0000000..f4b8158
--- /dev/null
+++ b/tests/pillar/curator.sls
@@ -0,0 +1,72 @@
+elasticsearch:
+  server:
+    enabled: true
+    bind:
+      address: 0.0.0.0
+      port: 9200
+    curator:
+      timeout: 900
+      logfile: /var/log/elasticsearch/curator.log
+      logformat: json
+      master_only: true
+      actions:
+        - action: delete_indices
+          description: >-
+            Delete indices older than 45 days (based on index name).
+            Ignore the error if the filter does not result in an actionable
+            list of indices (ignore_empty_list) and exit cleanly.
+          options:
+            ignore_empty_list: True
+            continue_if_exception: False
+            disable_action: False
+          filters:
+            - filtertype: pattern
+              kind: regex
+              value: '.*\-\d\d\d\d\.\d\d\.\d\d$'
+            - filtertype: age
+              source: name
+              direction: older
+              timestring: '%Y.%m.%d'
+              unit: days
+              unit_count: 90
+        - action: replicas
+          description: >-
+            Reduce the replica count to 0 for indices older than 30 days
+            (based on index creation_date)
+          options:
+            count: 0
+            wait_for_completion: False
+            continue_if_exception: False
+            disable_action: False
+          filters:
+            - filtertype: pattern
+              kind: regex
+              value: '.*\-\d\d\d\d\.\d\d\.\d\d$'
+            - filtertype: age
+              source: creation_date
+              direction: older
+              unit: days
+              unit_count: 30
+        - action: forcemerge
+          description: >-
+            forceMerge indices older than 2 days (based on index
+            creation_date) to 2 segments per shard.  Delay 120 seconds
+            between each forceMerge operation to allow the cluster to
+            quiesce.
+            This action will ignore indices already forceMerged to the same
+            or fewer number of segments per shard, so the 'forcemerged'
+            filter is unneeded.
+          options:
+            max_num_segments: 2
+            delay: 120
+            continue_if_exception: False
+            disable_action: True
+          filters:
+            - filtertype: pattern
+              kind: regex
+              value: '.*\-\d\d\d\d\.\d\d\.\d\d$'
+            - filtertype: age
+              source: creation_date
+              direction: older
+              unit: days
+              unit_count: 2
\ No newline at end of file
diff --git a/tests/pillar/repo_elasticsearch.sls b/tests/pillar/repo_elasticsearch.sls
new file mode 100644
index 0000000..06d43aa
--- /dev/null
+++ b/tests/pillar/repo_elasticsearch.sls
@@ -0,0 +1,12 @@
+linux:
+  system:
+    enabled: true
+    repo:
+      elasticsearch_repo:
+        source: "deb http://packages.elastic.co/elasticsearch/2.x/debian stable main"
+        architectures: amd64
+        key_url: "https://packages.elastic.co/GPG-KEY-elasticsearch"
+      mirantis_openstack_repo:
+        source: "deb http://mirror.fuel-infra.org/mcp-repos/1.0/{{ grains.get('oscodename') }} mitaka main"
+        architectures: amd64
+        key_url: "http://mirror.fuel-infra.org/mcp-repos/1.0/{{ grains.get('oscodename') }}/archive-mcp1.0.key"
diff --git a/tests/pillar/single.sls b/tests/pillar/single.sls
new file mode 100644
index 0000000..c4f54e0
--- /dev/null
+++ b/tests/pillar/single.sls
@@ -0,0 +1,11 @@
+elasticsearch:
+  server:
+    enabled: true
+    bind:
+      address: 0.0.0.0
+      port: 9200
+    cluster:
+      multicast: false
+    index:
+      shards: 1
+      replicas: 0
diff --git a/tests/run_tests.sh b/tests/run_tests.sh
new file mode 100755
index 0000000..a4cac88
--- /dev/null
+++ b/tests/run_tests.sh
@@ -0,0 +1,166 @@
+#!/usr/bin/env bash
+
+set -e
+[ -n "$DEBUG" ] && set -x
+
+CURDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
+METADATA=${CURDIR}/../metadata.yml
+FORMULA_NAME=$(cat $METADATA | python -c "import sys,yaml; print yaml.load(sys.stdin)['name']")
+
+## Overrideable parameters
+PILLARDIR=${PILLARDIR:-${CURDIR}/pillar}
+BUILDDIR=${BUILDDIR:-${CURDIR}/build}
+VENV_DIR=${VENV_DIR:-${BUILDDIR}/virtualenv}
+DEPSDIR=${BUILDDIR}/deps
+
+SALT_FILE_DIR=${SALT_FILE_DIR:-${BUILDDIR}/file_root}
+SALT_PILLAR_DIR=${SALT_PILLAR_DIR:-${BUILDDIR}/pillar_root}
+SALT_CONFIG_DIR=${SALT_CONFIG_DIR:-${BUILDDIR}/salt}
+SALT_CACHE_DIR=${SALT_CACHE_DIR:-${SALT_CONFIG_DIR}/cache}
+
+SALT_OPTS="${SALT_OPTS} --retcode-passthrough --local -c ${SALT_CONFIG_DIR} --log-file=/dev/null"
+
+if [ "x${SALT_VERSION}" != "x" ]; then
+    PIP_SALT_VERSION="==${SALT_VERSION}"
+fi
+
+## Functions
+log_info() {
+    echo "[INFO] $*"
+}
+
+log_err() {
+    echo "[ERROR] $*" >&2
+}
+
+setup_virtualenv() {
+    log_info "Setting up Python virtualenv"
+    virtualenv $VENV_DIR
+    source ${VENV_DIR}/bin/activate
+    pip install salt${PIP_SALT_VERSION}
+}
+
+setup_pillar() {
+    [ ! -d ${SALT_PILLAR_DIR} ] && mkdir -p ${SALT_PILLAR_DIR}
+    echo "base:" > ${SALT_PILLAR_DIR}/top.sls
+    for pillar in ${PILLARDIR}/*; do
+        grep ${FORMULA_NAME}: ${pillar} &>/dev/null || continue
+        state_name=$(basename ${pillar%.sls})
+        echo -e "  ${state_name}:\n    - ${state_name}" >> ${SALT_PILLAR_DIR}/top.sls
+    done
+}
+
+setup_salt() {
+    [ ! -d ${SALT_FILE_DIR} ] && mkdir -p ${SALT_FILE_DIR}
+    [ ! -d ${SALT_CONFIG_DIR} ] && mkdir -p ${SALT_CONFIG_DIR}
+    [ ! -d ${SALT_CACHE_DIR} ] && mkdir -p ${SALT_CACHE_DIR}
+
+    echo "base:" > ${SALT_FILE_DIR}/top.sls
+    for pillar in ${PILLARDIR}/*.sls; do
+        grep ${FORMULA_NAME}: ${pillar} &>/dev/null || continue
+        state_name=$(basename ${pillar%.sls})
+        echo -e "  ${state_name}:\n    - ${FORMULA_NAME}" >> ${SALT_FILE_DIR}/top.sls
+    done
+
+    cat << EOF > ${SALT_CONFIG_DIR}/minion
+file_client: local
+cachedir: ${SALT_CACHE_DIR}
+verify_env: False
+minion_id_caching: False
+
+file_roots:
+  base:
+  - ${SALT_FILE_DIR}
+  - ${CURDIR}/..
+  - /usr/share/salt-formulas/env
+
+pillar_roots:
+  base:
+  - ${SALT_PILLAR_DIR}
+  - ${PILLARDIR}
+EOF
+}
+
+fetch_dependency() {
+    dep_name="$(echo $1|cut -d : -f 1)"
+    dep_source="$(echo $1|cut -d : -f 2-)"
+    dep_root="${DEPSDIR}/$(basename $dep_source .git)"
+    dep_metadata="${dep_root}/metadata.yml"
+
+    [ -d /usr/share/salt-formulas/env/${dep_name} ] && log_info "Dependency $dep_name already present in system-wide salt env" && return 0
+    [ -d $dep_root ] && log_info "Dependency $dep_name already fetched" && return 0
+
+    log_info "Fetching dependency $dep_name"
+    [ ! -d ${DEPSDIR} ] && mkdir -p ${DEPSDIR}
+    git clone $dep_source ${DEPSDIR}/$(basename $dep_source .git)
+    ln -s ${dep_root}/${dep_name} ${SALT_FILE_DIR}/${dep_name}
+
+    METADATA="${dep_metadata}" install_dependencies
+}
+
+install_dependencies() {
+    grep -E "^dependencies:" ${METADATA} >/dev/null || return 0
+    (python - | while read dep; do fetch_dependency "$dep"; done) << EOF
+import sys,yaml
+for dep in yaml.load(open('${METADATA}', 'ro'))['dependencies']:
+    print '%s:%s' % (dep["name"], dep["source"])
+EOF
+}
+
+clean() {
+    log_info "Cleaning up ${BUILDDIR}"
+    [ -d ${BUILDDIR} ] && rm -rf ${BUILDDIR} || exit 0
+}
+
+salt_run() {
+    [ -e ${VEN_DIR}/bin/activate ] && source ${VENV_DIR}/bin/activate
+    salt-call ${SALT_OPTS} $*
+}
+
+prepare() {
+    [ -d ${BUILDDIR} ] && mkdir -p ${BUILDDIR}
+
+    which salt-call || setup_virtualenv
+    setup_pillar
+    setup_salt
+    install_dependencies
+}
+
+run() {
+    for pillar in ${PILLARDIR}/*.sls; do
+        grep ${FORMULA_NAME}: ${pillar} &>/dev/null || continue
+        state_name=$(basename ${pillar%.sls})
+        salt_run --id=${state_name} state.show_sls ${FORMULA_NAME} || (log_err "Execution of ${FORMULA_NAME}.${state_name} failed"; exit 1)
+    done
+}
+
+_atexit() {
+    RETVAL=$?
+    trap true INT TERM EXIT
+
+    if [ $RETVAL -ne 0 ]; then
+        log_err "Execution failed"
+    else
+        log_info "Execution successful"
+    fi
+    return $RETVAL
+}
+
+## Main
+trap _atexit INT TERM EXIT
+
+case $1 in
+    clean)
+        clean
+        ;;
+    prepare)
+        prepare
+        ;;
+    run)
+        run
+        ;;
+    *)
+        prepare
+        run
+        ;;
+esac