Initial commit
Change-Id: I3db2409bab10e8005a8c99c3899d19ced495469e
Co-Authored-By: Michael Polenchuk <mpolenchuk@mirantis.com>
diff --git a/.gitignore b/.gitignore
new file mode 100644
index 0000000..aa8e42a
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1,5 @@
+.kitchen
+tests/build/
+*.swp
+*.pyc
+.ropeproject
diff --git a/.gitreview b/.gitreview
new file mode 100644
index 0000000..0675d17
--- /dev/null
+++ b/.gitreview
@@ -0,0 +1,4 @@
+[gerrit]
+host=gerrit.mcp.mirantis.net
+port=29418
+project=salt-formulas/panko.git
diff --git a/.kitchen.docker.yml b/.kitchen.docker.yml
new file mode 100644
index 0000000..348675b
--- /dev/null
+++ b/.kitchen.docker.yml
@@ -0,0 +1,43 @@
+---
+driver:
+ name: docker
+ hostname: panko.ci.local
+ use_sudo: false
+
+provisioner:
+ name: salt_solo
+ salt_install: bootstrap
+ salt_bootstrap_url: https://bootstrap.saltstack.com
+ salt_version: latest
+ require_chef: false
+ log_level: error
+ formula: panko
+ grains:
+ noservices: True
+ state_top:
+ base:
+ "*":
+ - panko
+ pillars:
+ top.sls:
+ base:
+ "*":
+ - panko
+
+verifier:
+ name: inspec
+ sudo: true
+
+
+platforms:
+ - name: <%=ENV['PLATFORM'] || 'ubuntu-xenial'%>
+ driver_config:
+ image: <%=ENV['PLATFORM'] || 'trevorj/salty-whales:xenial'%>
+ platform: ubuntu
+
+suites:
+ - name: default
+ # provisioner:
+ # pillars-from-files:
+ # panko.sls: tests/pillar/default.sls
+# vim: ft=yaml sw=2 ts=2 sts=2 tw=125
diff --git a/.kitchen.openstack.yml b/.kitchen.openstack.yml
new file mode 100644
index 0000000..6dbf7b2
--- /dev/null
+++ b/.kitchen.openstack.yml
@@ -0,0 +1,41 @@
+
+# usage: `KITCHEN_LOCAL_YAML=.kitchen.openstack.yml kitchen test`
+
+# https://docs.chef.io/config_yml_kitchen.html
+# https://github.com/test-kitchen/kitchen-openstack
+
+---
+driver:
+ name: openstack
+ openstack_auth_url: <%= ENV['OS_AUTH_URL'] %>/tokens
+ openstack_username: <%= ENV['OS_USERNAME'] || 'ci' %>
+ openstack_api_key: <%= ENV['OS_PASSWORD'] || 'ci' %>
+ openstack_tenant: <%= ENV['OS_TENANT_NAME'] || 'ci_jenkins' %>
+
+ #floating_ip_pool: <%= ENV['OS_FLOATING_IP_POOL'] || 'nova' %>
+ key_name: <%= ENV['BOOTSTRAP_SSH_KEY_NAME'] || 'bootstrap_insecure' %>
+ private_key_path: <%= ENV['BOOTSTRAP_SSH_KEY_PATH'] || "#{ENV['HOME']}/.ssh/id_rsa_bootstrap_insecure" %>
+
+
+platforms:
+ - name: ubuntu-14.04
+ driver:
+ username: <%= ENV['OS_UBUNTU_IMAGE_USER'] || 'root' %>
+ image_ref: <%= ENV['OS_UBUNTU_IMAGE_REF'] || 'ubuntu-14-04-x64-1455869035' %>
+ flavor_ref: m1.medium
+ network_ref:
+ <% if ENV['OS_NETWORK_REF'] -%>
+ - <% ENV['OS_NETWORK_REF'] %>
+ <% else -%>
+ - ci-net
+ <% end -%>
+ # force update apt cache on the image
+ run_list:
+ - recipe[apt]
+ attributes:
+ apt:
+ compile_time_update: true
+transport:
+ username: <%= ENV['OS_UBUNTU_IMAGE_USER'] || 'root' %>
+
+# vim: ft=yaml sw=2 ts=2 sts=2 tw=125
diff --git a/.kitchen.vagrant.yml b/.kitchen.vagrant.yml
new file mode 100644
index 0000000..bf2dec2
--- /dev/null
+++ b/.kitchen.vagrant.yml
@@ -0,0 +1,30 @@
+---
+driver:
+ name: vagrant
+ vm_hostname: panko.ci.local
+ use_sudo: false
+ customize:
+ memory: 512
+
+
+provisioner:
+ name: salt_solo
+ salt_install: bootstrap
+ salt_bootstrap_url: https://bootstrap.saltstack.com
+ salt_version: latest
+ require_chef: false
+ formula: panko
+ log_level: info
+ state_top:
+ base:
+ "*":
+ - panko
+ pillars:
+ top.sls:
+ base:
+ "*":
+ - panko
+ grains:
+ noservices: True
+
+# vim: ft=yaml sw=2 ts=2 sts=2 tw=125
diff --git a/.travis.yml b/.travis.yml
new file mode 100644
index 0000000..7a77247
--- /dev/null
+++ b/.travis.yml
@@ -0,0 +1,39 @@
+sudo: required
+services:
+ - docker
+
+install:
+ - pip install PyYAML
+ - pip install virtualenv
+ - |
+ test -e Gemfile || cat <<EOF > Gemfile
+ source 'https://rubygems.org'
+ gem 'rake'
+ gem 'test-kitchen'
+ gem 'kitchen-docker'
+ gem 'kitchen-inspec'
+ gem 'inspec'
+ gem 'kitchen-salt', :git => 'https://github.com/salt-formulas/kitchen-salt.git'
+ - bundle install
+
+env:
+ - PLATFORM=trevorj/salty-whales:trusty
+ - PLATFORM=trevorj/salty-whales:xenial
+
+before_script:
+ - set -o pipefail
+ - make test | tail
+
+script:
+ - test ! -e .kitchen.yml || bundle exec kitchen test -t tests/integration
+
+notifications:
+ webhooks:
+ urls:
+ - https://webhooks.gitter.im/e/6123573504759330786b
+ on_success: change # options: [always|never|change] default: always
+ on_failure: never # options: [always|never|change] default: always
+ on_start: never # options: [always|never|change] default: always
+ on_cancel: never # options: [always|never|change] default: always
+ on_error: never # options: [always|never|change] default: always
+ email: false
diff --git a/CHANGELOG.rst b/CHANGELOG.rst
new file mode 100644
index 0000000..e060c65
--- /dev/null
+++ b/CHANGELOG.rst
@@ -0,0 +1,6 @@
+panko formula
+=====================================
+
+2017.12 (2017-12-07)
+
+- Initial formula setup
diff --git a/LICENSE b/LICENSE
new file mode 100644
index 0000000..7862bbb
--- /dev/null
+++ b/LICENSE
@@ -0,0 +1,13 @@
+Copyright (c) 2017 Mirantis Inc.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
\ No newline at end of file
diff --git a/Makefile b/Makefile
new file mode 100644
index 0000000..1043fbe
--- /dev/null
+++ b/Makefile
@@ -0,0 +1,118 @@
+DESTDIR=/
+SALTENVDIR=/usr/share/salt-formulas/env
+RECLASSDIR=/usr/share/salt-formulas/reclass
+FORMULANAME=$(shell grep name: metadata.yml|head -1|cut -d : -f 2|grep -Eo '[a-z0-9\-\_]*')
+VERSION=$(shell grep version: metadata.yml|head -1|cut -d : -f 2|grep -Eo '[a-z0-9\.\-\_]*')
+VERSION_MAJOR := $(shell echo $(VERSION)|cut -d . -f 1-2)
+VERSION_MINOR := $(shell echo $(VERSION)|cut -d . -f 3)
+
+NEW_MAJOR_VERSION ?= $(shell date +%Y.%m|sed 's,\.0,\.,g')
+NEW_MINOR_VERSION ?= $(shell /bin/bash -c 'echo $$[ $(VERSION_MINOR) + 1 ]')
+
+MAKE_PID := $(shell echo $$PPID)
+JOB_FLAG := $(filter -j%, $(subst -j ,-j,$(shell ps T | grep "^\s*$(MAKE_PID).*$(MAKE)")))
+
+ifneq ($(subst -j,,$(JOB_FLAG)),)
+JOBS := $(subst -j,,$(JOB_FLAG))
+else
+JOBS := 1
+endif
+
+KITCHEN_LOCAL_YAML?=.kitchen.yml
+KITCHEN_OPTS?="--concurrency=$(JOBS)"
+KITCHEN_OPTS_CREATE?=""
+KITCHEN_OPTS_CONVERGE?=""
+KITCHEN_OPTS_VERIFY?=""
+KITCHEN_OPTS_TEST?=""
+
+all:
+ @echo "make install - Install into DESTDIR"
+ @echo "make test - Run tests"
+ @echo "make kitchen - Run Kitchen CI tests (create, converge, verify)"
+ @echo "make clean - Cleanup after tests run"
+ @echo "make release-major - Generate new major release"
+ @echo "make release-minor - Generate new minor release"
+ @echo "make changelog - Show changes since last release"
+
+install:
+ # Formula
+ [ -d $(DESTDIR)/$(SALTENVDIR) ] || mkdir -p $(DESTDIR)/$(SALTENVDIR)
+ cp -a $(FORMULANAME) $(DESTDIR)/$(SALTENVDIR)/
+ [ ! -d _modules ] || cp -a _modules $(DESTDIR)/$(SALTENVDIR)/
+ [ ! -d _states ] || cp -a _states $(DESTDIR)/$(SALTENVDIR)/ || true
+ [ ! -d _grains ] || cp -a _grains $(DESTDIR)/$(SALTENVDIR)/ || true
+ # Metadata
+ [ -d $(DESTDIR)/$(RECLASSDIR)/service/$(FORMULANAME) ] || mkdir -p $(DESTDIR)/$(RECLASSDIR)/service/$(FORMULANAME)
+ cp -a metadata/service/* $(DESTDIR)/$(RECLASSDIR)/service/$(FORMULANAME)
+
+test:
+ [ ! -d tests ] || (cd tests; ./run_tests.sh)
+
+release-major: check-changes
+ @echo "Current version is $(VERSION), new version is $(NEW_MAJOR_VERSION)"
+ @[ $(VERSION_MAJOR) != $(NEW_MAJOR_VERSION) ] || (echo "Major version $(NEW_MAJOR_VERSION) already released, nothing to do. Do you want release-minor?" && exit 1)
+ echo "$(NEW_MAJOR_VERSION)" > VERSION
+ sed -i 's,version: .*,version: "$(NEW_MAJOR_VERSION)",g' metadata.yml
+ [ ! -f debian/changelog ] || dch -v $(NEW_MAJOR_VERSION) -m --force-distribution -D `dpkg-parsechangelog -S Distribution` "New version"
+ make genchangelog-$(NEW_MAJOR_VERSION)
+ (git add -u; git commit -m "Version $(NEW_MAJOR_VERSION)")
+ git tag -s -m $(NEW_MAJOR_VERSION) $(NEW_MAJOR_VERSION)
+
+release-minor: check-changes
+ @echo "Current version is $(VERSION), new version is $(VERSION_MAJOR).$(NEW_MINOR_VERSION)"
+ echo "$(VERSION_MAJOR).$(NEW_MINOR_VERSION)" > VERSION
+ sed -i 's,version: .*,version: "$(VERSION_MAJOR).$(NEW_MINOR_VERSION)",g' metadata.yml
+ [ ! -f debian/changelog ] || dch -v $(VERSION_MAJOR).$(NEW_MINOR_VERSION) -m --force-distribution -D `dpkg-parsechangelog -S Distribution` "New version"
+ make genchangelog-$(VERSION_MAJOR).$(NEW_MINOR_VERSION)
+ (git add -u; git commit -m "Version $(VERSION_MAJOR).$(NEW_MINOR_VERSION)")
+ git tag -s -m $(NEW_MAJOR_VERSION) $(VERSION_MAJOR).$(NEW_MINOR_VERSION)
+
+check-changes:
+ @git log --pretty=oneline --decorate $(VERSION)..HEAD | grep -Eqc '.*' || (echo "No new changes since version $(VERSION)"; exit 1)
+
+changelog:
+ git log --pretty=short --invert-grep --grep="Merge pull request" --decorate $(VERSION)..HEAD
+
+genchangelog: genchangelog-$(VERSION_MAJOR).$(NEW_MINOR_VERSION)
+
+genchangelog-%:
+ $(eval NEW_VERSION := $(patsubst genchangelog-%,%,$@))
+ (echo "=========\nChangelog\n=========\n"; \
+ (echo $(NEW_VERSION);git tag) | sort -r | grep -E '^[0-9\.]+' | while read i; do \
+ cur=$$i; \
+ test $$i = $(NEW_VERSION) && i=HEAD; \
+ prev=`(echo $(NEW_VERSION);git tag)|sort|grep -E '^[0-9\.]+'|grep -B1 "$$cur\$$"|head -1`; \
+ echo "Version $$cur\n=============================\n"; \
+ git log --pretty=short --invert-grep --grep="Merge pull request" --decorate $$prev..$$i; \
+ echo; \
+ done) > CHANGELOG.rst
+
+kitchen-check:
+ @[ -e $(KITCHEN_LOCAL_YAML) ] || (echo "Kitchen tests not available, there's no $(KITCHEN_LOCAL_YAML)." && exit 1)
+
+kitchen: kitchen-check kitchen-create kitchen-converge kitchen-verify kitchen-list
+
+kitchen-create: kitchen-check
+ kitchen create ${KITCHEN_OPTS} ${KITCHEN_OPTS_CREATE}
+ [ "$(shell echo $(KITCHEN_LOCAL_YAML)|grep -Eo docker)" = "docker" ] || sleep 120
+
+kitchen-converge: kitchen-check
+ kitchen converge ${KITCHEN_OPTS} ${KITCHEN_OPTS_CONVERGE} &&\
+ kitchen converge ${KITCHEN_OPTS} ${KITCHEN_OPTS_CONVERGE}
+
+kitchen-verify: kitchen-check
+ [ ! -d tests/integration ] || kitchen verify -t tests/integration ${KITCHEN_OPTS} ${KITCHEN_OPTS_VERIFY}
+ [ -d tests/integration ] || kitchen verify ${KITCHEN_OPTS} ${KITCHEN_OPTS_VERIFY}
+
+kitchen-test: kitchen-check
+ [ ! -d tests/integration ] || kitchen test -t tests/integration ${KITCHEN_OPTS} ${KITCHEN_OPTS_TEST}
+ [ -d tests/integration ] || kitchen test ${KITCHEN_OPTS} ${KITCHEN_OPTS_TEST}
+
+kitchen-list: kitchen-check
+ kitchen list
+
+clean:
+ [ ! -x "$(shell which kitchen)" ] || kitchen destroy
+ [ ! -d .kitchen ] || rm -rf .kitchen
+ [ ! -d tests/build ] || rm -rf tests/build
+ [ ! -d build ] || rm -rf build
diff --git a/README.rst b/README.rst
new file mode 100644
index 0000000..bada6d1
--- /dev/null
+++ b/README.rst
@@ -0,0 +1,90 @@
+=============
+Panko Formula
+=============
+
+Panko is a component of the OpenStack Telemetry project and is designed to
+provide a metadata indexing, event storage service which enables users to
+capture the state information of OpenStack resources at a given time. Its aim
+is to enable a scalable means of storing both short and long term data for
+use cases such as auditing and system debugging.
+
+Sample pillars
+==============
+
+Single panko service
+
+.. code-block:: yaml
+
+ panko:
+ server:
+ enabled: true
+ version: pike
+ database:
+ engine: mysql
+ host: 10.20.0.101
+ port: 3306
+ name: panko
+ user: panko
+ password: segreto
+ bind:
+ host: 10.20.0.102
+ port: 8977
+ identity:
+ engine: keystone
+ host: 10.20.0.101
+ port: 35357
+ user: panko
+ password: segreto
+ region: RegionOne
+ tenant: service
+ endpoint_type: internalURL
+ cache:
+ engine: memcached
+ members:
+ - host: 10.20.0.102
+ port: 11211
+ - host: 10.20.0.103
+ port: 11211
+ - host: 10.20.0.104
+ port: 11211
+
+
+More information
+================
+
+* https://wiki.openstack.org/wiki/Telemetry
+* https://docs.openstack.org/developer/panko/
+* https://github.com/openstack/panko
+* https://bugs.launchpad.net/panko
+
+
+Documentation and Bugs
+======================
+
+To learn how to install and update salt-formulas, consult the documentation
+available online at:
+
+ http://salt-formulas.readthedocs.io/
+
+In the unfortunate event that bugs are discovered, they should be reported to
+the appropriate issue tracker. Use GitHub issue tracker for specific salt
+formula:
+
+ https://github.com/salt-formulas/salt-formula-panko/issues
+
+For feature requests, bug reports or blueprints affecting entire ecosystem,
+use Launchpad salt-formulas project:
+
+ https://launchpad.net/salt-formulas
+
+Developers wishing to work on the salt-formulas projects should always base
+their work on master branch and submit pull request against specific formula.
+
+You should also subscribe to mailing list (salt-formulas@freelists.org):
+
+ https://www.freelists.org/list/salt-formulas
+
+Any questions or feedback is always welcome so feel free to join our IRC
+channel:
+
+ #salt-formulas @ irc.freenode.net
diff --git a/VERSION b/VERSION
new file mode 100644
index 0000000..565d2be
--- /dev/null
+++ b/VERSION
@@ -0,0 +1 @@
+2017.6
diff --git a/debian/changelog b/debian/changelog
new file mode 100644
index 0000000..fa6a3a5
--- /dev/null
+++ b/debian/changelog
@@ -0,0 +1,5 @@
+salt-formula-panko (2017.6) xenial; urgency=medium
+
+ * Initial release
+
+ -- Petr Jediný petr.jediny@gmail.com Mon, 12 Jun 2017 09:59:23 BST
diff --git a/debian/compat b/debian/compat
new file mode 100644
index 0000000..ec63514
--- /dev/null
+++ b/debian/compat
@@ -0,0 +1 @@
+9
diff --git a/debian/control b/debian/control
new file mode 100644
index 0000000..31fcc99
--- /dev/null
+++ b/debian/control
@@ -0,0 +1,15 @@
+Source: salt-formula-panko
+Maintainer: Petr Jediný petr.jediny@gmail.com
+Section: admin
+Priority: optional
+Build-Depends: debhelper (>= 9), salt-master, python, python-yaml
+Standards-Version: 3.9.6
+Homepage: https://github.com/salt-formulas
+Vcs-Browser: https://github.com/salt-formulas/salt-formula-panko
+Vcs-Git: https://github.com/salt-formulas/salt-formula-panko.git
+
+Package: salt-formula-panko
+Architecture: all
+Depends: ${misc:Depends}, salt-master, reclass
+Description: panko salt formula
+ Install and configure panko system.
diff --git a/debian/copyright b/debian/copyright
new file mode 100644
index 0000000..e0a07eb
--- /dev/null
+++ b/debian/copyright
@@ -0,0 +1,15 @@
+Format: http://www.debian.org/doc/packaging-manuals/copyright-format/1.0/
+Upstream-Name: salt-formula-panko
+Upstream-Contact: salt-formulas@freelists.org
+Source: https://github.com/salt-formulas/salt-formula-panko
+
+Files: *
+Copyright: 2017 Mirantis Inc.
+License: Apache-2.0
+ Copyright (C) 2017 Mirantis Inc.
+ .
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ .
+ On a Debian system you can find a copy of this license in
+ /usr/share/common-licenses/Apache-2.0.
diff --git a/debian/docs b/debian/docs
new file mode 100644
index 0000000..d585829
--- /dev/null
+++ b/debian/docs
@@ -0,0 +1,3 @@
+README.rst
+CHANGELOG.rst
+VERSION
diff --git a/debian/rules b/debian/rules
new file mode 100755
index 0000000..abde6ef
--- /dev/null
+++ b/debian/rules
@@ -0,0 +1,5 @@
+#!/usr/bin/make -f
+
+%:
+ dh $@
+
diff --git a/debian/source/format b/debian/source/format
new file mode 100644
index 0000000..89ae9db
--- /dev/null
+++ b/debian/source/format
@@ -0,0 +1 @@
+3.0 (native)
diff --git a/doc/source/conf.py b/doc/source/conf.py
new file mode 100644
index 0000000..c65c092
--- /dev/null
+++ b/doc/source/conf.py
@@ -0,0 +1,73 @@
+# -*- coding: utf-8 -*-
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os
+import sys
+
+sys.path.insert(0, os.path.abspath('../..'))
+# -- General configuration ----------------------------------------------------
+
+# Add any Sphinx extension module names here, as strings. They can be
+# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
+extensions = [
+ 'sphinx.ext.autodoc',
+]
+
+# autodoc generation is a bit aggressive and a nuisance when doing heavy
+# text edit cycles.
+# execute "export SPHINX_DEBUG=1" in your terminal to disable
+
+# The suffix of source filenames.
+source_suffix = '.rst'
+
+# The master toctree document.
+master_doc = 'index'
+
+# General information about the project.
+project = u'salt-formula-panko'
+copyright = u'2018, Mirantis Inc.'
+
+# If true, '()' will be appended to :func: etc. cross-reference text.
+add_function_parentheses = True
+
+# If true, the current module name will be prepended to all description
+# unit titles (such as .. function::).
+add_module_names = True
+
+# The name of the Pygments (syntax highlighting) style to use.
+pygments_style = 'sphinx'
+
+# -- Options for HTML output --------------------------------------------------
+
+# The theme to use for HTML and HTML Help pages. Major themes that come with
+# Sphinx are currently 'default' and 'sphinxdoc'.
+# html_theme_path = ["."]
+# html_theme = '_theme'
+# html_static_path = ['static']
+
+# Output file base name for HTML help builder.
+htmlhelp_basename = '%sdoc' % project
+
+# Grouping the document tree into LaTeX files. List of tuples
+# (source start file, target name, title, author, documentclass
+# [howto/manual]).
+latex_documents = [
+ ('index',
+ '%s.tex' % project,
+ u'%s Documentation' % project,
+ u'OpenStack Foundation', 'manual'),
+]
+
+# Example configuration for intersphinx: refer to the Python standard library.
+# intersphinx_mapping = {'http://docs.python.org/': None}
diff --git a/doc/source/index.rst b/doc/source/index.rst
new file mode 100644
index 0000000..a6210d3
--- /dev/null
+++ b/doc/source/index.rst
@@ -0,0 +1 @@
+.. include:: ../../README.rst
diff --git a/metadata.yml b/metadata.yml
new file mode 100644
index 0000000..4b7a302
--- /dev/null
+++ b/metadata.yml
@@ -0,0 +1,3 @@
+name: "panko"
+version: "2017.6"
+source: "https://github.com/salt-formulas/salt-formula-panko"
diff --git a/metadata/service/server/cluster.yml b/metadata/service/server/cluster.yml
new file mode 100644
index 0000000..329a5cf
--- /dev/null
+++ b/metadata/service/server/cluster.yml
@@ -0,0 +1,39 @@
+applications:
+- panko
+classes:
+- service.panko.support
+parameters:
+ _param:
+ keystone_panko_endpoint_type: internalURL
+ panko:
+ server:
+ enabled: true
+ version: ${_param:panko_version}
+ database:
+ engine: mysql
+ host: ${_param:cluster_vip_address}
+ port: 3306
+ name: panko
+ user: panko
+ password: ${_param:mysql_panko_password}
+ bind:
+ host: ${_param:cluster_local_address}
+ port: 8977
+ identity:
+ engine: keystone
+ host: ${_param:cluster_vip_address}
+ port: 35357
+ user: panko
+ password: ${_param:keystone_panko_password}
+ region: RegionOne
+ tenant: service
+ endpoint_type: ${_param:keystone_panko_endpoint_type}
+ cache:
+ engine: memcached
+ members:
+ - host: ${_param:cluster_node01_address}
+ port: 11211
+ - host: ${_param:cluster_node02_address}
+ port: 11211
+ - host: ${_param:cluster_node03_address}
+ port: 11211
diff --git a/metadata/service/server/single.yml b/metadata/service/server/single.yml
new file mode 100644
index 0000000..988bd3e
--- /dev/null
+++ b/metadata/service/server/single.yml
@@ -0,0 +1,35 @@
+applications:
+- panko
+classes:
+- service.panko.support
+parameters:
+ _param:
+ keystone_panko_endpoint_type: internalURL
+ panko:
+ server:
+ enabled: true
+ version: ${_param:panko_version}
+ database:
+ engine: mysql
+ host: ${_param:single_address}
+ port: 3306
+ name: panko
+ user: panko
+ password: ${_param:mysql_panko_password}
+ bind:
+ host: ${_param:single_address}
+ port: 8977
+ identity:
+ engine: keystone
+ host: ${_param:single_address}
+ port: 35357
+ user: panko
+ password: ${_param:keystone_panko_password}
+ region: RegionOne
+ tenant: service
+ endpoint_type: ${_param:keystone_panko_endpoint_type}
+ cache:
+ engine: memcached
+ members:
+ - host: ${_param:single_address}
+ port: 11211
diff --git a/metadata/service/support.yml b/metadata/service/support.yml
new file mode 100644
index 0000000..c9bb4c8
--- /dev/null
+++ b/metadata/service/support.yml
@@ -0,0 +1,11 @@
+parameters:
+ panko:
+ _support:
+ collectd:
+ enabled: false
+ heka:
+ enabled: false
+ sensu:
+ enabled: false
+ sphinx:
+ enabled: true
diff --git a/panko/files/pike/panko-api.apache2.conf.Debian b/panko/files/pike/panko-api.apache2.conf.Debian
new file mode 100644
index 0000000..b5b7f31
--- /dev/null
+++ b/panko/files/pike/panko-api.apache2.conf.Debian
@@ -0,0 +1,14 @@
+{%- from "panko/map.jinja" import server with context %}
+Listen {{ server.bind.host }}:{{ server.bind.port }}
+
+<VirtualHost *:{{ server.bind.port }}>
+ WSGIDaemonProcess panko-api processes={{ (grains.num_cpus/2+1)|int }} threads=1 user=panko display-name=%{GROUP}
+ WSGIProcessGroup panko-api
+ WSGIScriptAlias / /usr/bin/panko-api
+ WSGIApplicationGroup %{GLOBAL}
+ <IfVersion >= 2.4>
+ ErrorLogFormat "%{cu}t %M"
+ </IfVersion>
+ ErrorLog /var/log/apache2/panko_error.log
+ CustomLog /var/log/apache2/panko_access.log combined
+</VirtualHost>
diff --git a/panko/files/pike/panko.conf.Debian b/panko/files/pike/panko.conf.Debian
new file mode 100644
index 0000000..0c4a4b7
--- /dev/null
+++ b/panko/files/pike/panko.conf.Debian
@@ -0,0 +1,558 @@
+{%- from "panko/map.jinja" import server with context -%}
+[DEFAULT]
+
+#
+# From oslo.log
+#
+
+# If set to true, the logging level will be set to DEBUG instead of the default
+# INFO level. (boolean value)
+# Note: This option can be changed without restarting.
+#debug = false
+debug = {{ server.get('debug', False) }}
+
+# The name of a logging configuration file. This file is appended to any
+# existing logging configuration files. For details about logging configuration
+# files, see the Python logging module documentation. Note that when logging
+# configuration files are used then all logging configuration is set in the
+# configuration file and other logging configuration options are ignored (for
+# example, logging_context_format_string). (string value)
+# Note: This option can be changed without restarting.
+# Deprecated group/name - [DEFAULT]/log_config
+#log_config_append = <None>
+
+# Defines the format string for %%(asctime)s in log records. Default:
+# %(default)s . This option is ignored if log_config_append is set. (string
+# value)
+#log_date_format = %Y-%m-%d %H:%M:%S
+
+# (Optional) Name of log file to send logging output to. If no default is set,
+# logging will go to stderr as defined by use_stderr. This option is ignored if
+# log_config_append is set. (string value)
+# Deprecated group/name - [DEFAULT]/logfile
+#log_file = <None>
+
+# (Optional) The base directory used for relative log_file paths. This option
+# is ignored if log_config_append is set. (string value)
+# Deprecated group/name - [DEFAULT]/logdir
+#log_dir = <None>
+
+# Uses logging handler designed to watch file system. When log file is moved or
+# removed this handler will open a new log file with specified path
+# instantaneously. It makes sense only if log_file option is specified and
+# Linux platform is used. This option is ignored if log_config_append is set.
+# (boolean value)
+#watch_log_file = false
+
+# Use syslog for logging. Existing syslog format is DEPRECATED and will be
+# changed later to honor RFC5424. This option is ignored if log_config_append
+# is set. (boolean value)
+#use_syslog = false
+
+# Enable journald for logging. If running in a systemd environment you may wish
+# to enable journal support. Doing so will use the journal native protocol
+# which includes structured metadata in addition to log messages.This option is
+# ignored if log_config_append is set. (boolean value)
+#use_journal = false
+
+# Syslog facility to receive log lines. This option is ignored if
+# log_config_append is set. (string value)
+#syslog_log_facility = LOG_USER
+
+# Log output to standard error. This option is ignored if log_config_append is
+# set. (boolean value)
+#use_stderr = false
+
+# Format string to use for log messages with context. (string value)
+#logging_context_format_string = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [%(request_id)s %(user_identity)s] %(instance)s%(message)s
+
+# Format string to use for log messages when context is undefined. (string
+# value)
+#logging_default_format_string = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [-] %(instance)s%(message)s
+
+# Additional data to append to log message when logging level for the message
+# is DEBUG. (string value)
+#logging_debug_format_suffix = %(funcName)s %(pathname)s:%(lineno)d
+
+# Prefix each line of exception output with this format. (string value)
+#logging_exception_prefix = %(asctime)s.%(msecs)03d %(process)d ERROR %(name)s %(instance)s
+
+# Defines the format string for %(user_identity)s that is used in
+# logging_context_format_string. (string value)
+#logging_user_identity_format = %(user)s %(tenant)s %(domain)s %(user_domain)s %(project_domain)s
+
+# List of package logging levels in logger=LEVEL pairs. This option is ignored
+# if log_config_append is set. (list value)
+#default_log_levels = amqp=WARN,amqplib=WARN,boto=WARN,qpid=WARN,sqlalchemy=WARN,suds=INFO,oslo.messaging=INFO,oslo_messaging=INFO,iso8601=WARN,requests.packages.urllib3.connectionpool=WARN,urllib3.connectionpool=WARN,websocket=WARN,requests.packages.urllib3.util.retry=WARN,urllib3.util.retry=WARN,keystonemiddleware=WARN,routes.middleware=WARN,stevedore=WARN,taskflow=WARN,keystoneauth=WARN,oslo.cache=INFO,dogpile.core.dogpile=INFO
+
+# Enables or disables publication of error events. (boolean value)
+#publish_errors = false
+
+# The format for an instance that is passed with the log message. (string
+# value)
+#instance_format = "[instance: %(uuid)s] "
+
+# The format for an instance UUID that is passed with the log message. (string
+# value)
+#instance_uuid_format = "[instance: %(uuid)s] "
+
+# Interval, number of seconds, of log rate limiting. (integer value)
+#rate_limit_interval = 0
+
+# Maximum number of logged messages per rate_limit_interval. (integer value)
+#rate_limit_burst = 0
+
+# Log level name used by rate limiting: CRITICAL, ERROR, INFO, WARNING, DEBUG
+# or empty string. Logs with level greater or equal to rate_limit_except_level
+# are not filtered. An empty string means that all levels are filtered. (string
+# value)
+#rate_limit_except_level = CRITICAL
+
+# Enables or disables fatal status of deprecations. (boolean value)
+#fatal_deprecations = false
+
+#
+# From panko
+#
+
+# Configuration file for WSGI definition of API. (string value)
+#api_paste_config = api_paste.ini
+
+
+[api]
+
+#
+# From panko
+#
+
+# Default maximum number of items returned by API request. (integer value)
+# Minimum value: 1
+#default_api_return_limit = 100
+
+
+[cors]
+
+#
+# From oslo.middleware.cors
+#
+
+# Indicate whether this resource may be shared with the domain received in the
+# requests "origin" header. Format: "<protocol>://<host>[:<port>]", no trailing
+# slash. Example: https://horizon.example.com (list value)
+#allowed_origin = <None>
+
+# Indicate that the actual request can include user credentials (boolean value)
+#allow_credentials = true
+
+# Indicate which headers are safe to expose to the API. Defaults to HTTP Simple
+# Headers. (list value)
+#expose_headers = X-Auth-Token,X-Subject-Token,X-Service-Token,X-Openstack-Request-Id
+
+# Maximum cache age of CORS preflight requests. (integer value)
+#max_age = 3600
+
+# Indicate which methods can be used during the actual request. (list value)
+#allow_methods = GET,PUT,POST,DELETE,PATCH
+
+# Indicate which header field names may be used during the actual request.
+# (list value)
+#allow_headers = X-Auth-Token,X-Identity-Status,X-Roles,X-Service-Catalog,X-User-Id,X-Tenant-Id,X-Openstack-Request-Id
+
+
+[database]
+
+#
+# From oslo.db
+#
+
+# If True, SQLite uses synchronous mode. (boolean value)
+#sqlite_synchronous = true
+
+# The back end to use for the database. (string value)
+# Deprecated group/name - [DEFAULT]/db_backend
+#backend = sqlalchemy
+
+# The SQLAlchemy connection string to use to connect to the database. (string
+# value)
+# Deprecated group/name - [DEFAULT]/sql_connection
+# Deprecated group/name - [DATABASE]/sql_connection
+# Deprecated group/name - [sql]/connection
+connection = {{ server.database.engine }}+pymysql://{{ server.database.user }}:{{ server.database.password }}@{{ server.database.host }}/{{ server.database.name }}{%- if server.database.get('ssl',{}).get('enabled', False) %}?ssl_ca={{ server.database.ssl.get('cacert_file', server.cacert_file) }}{% endif %}
+
+# The SQLAlchemy connection string to use to connect to the slave database.
+# (string value)
+#slave_connection = <None>
+
+# The SQL mode to be used for MySQL sessions. This option, including the
+# default, overrides any server-set SQL mode. To use whatever SQL mode is set
+# by the server configuration, set this to no value. Example: mysql_sql_mode=
+# (string value)
+#mysql_sql_mode = TRADITIONAL
+
+# If True, transparently enables support for handling MySQL Cluster (NDB).
+# (boolean value)
+#mysql_enable_ndb = false
+
+# Timeout before idle SQL connections are reaped. (integer value)
+# Deprecated group/name - [DEFAULT]/sql_idle_timeout
+# Deprecated group/name - [DATABASE]/sql_idle_timeout
+# Deprecated group/name - [sql]/idle_timeout
+#idle_timeout = 3600
+
+# Minimum number of SQL connections to keep open in a pool. (integer value)
+# Deprecated group/name - [DEFAULT]/sql_min_pool_size
+# Deprecated group/name - [DATABASE]/sql_min_pool_size
+#min_pool_size = 1
+
+# Maximum number of SQL connections to keep open in a pool. Setting a value of
+# 0 indicates no limit. (integer value)
+# Deprecated group/name - [DEFAULT]/sql_max_pool_size
+# Deprecated group/name - [DATABASE]/sql_max_pool_size
+#max_pool_size = 5
+
+# Maximum number of database connection retries during startup. Set to -1 to
+# specify an infinite retry count. (integer value)
+# Deprecated group/name - [DEFAULT]/sql_max_retries
+# Deprecated group/name - [DATABASE]/sql_max_retries
+#max_retries = 10
+max_retries = {{ server.database.get('max_retries', -1) }}
+
+# Interval between retries of opening a SQL connection. (integer value)
+# Deprecated group/name - [DEFAULT]/sql_retry_interval
+# Deprecated group/name - [DATABASE]/reconnect_interval
+#retry_interval = 10
+
+# If set, use this value for max_overflow with SQLAlchemy. (integer value)
+# Deprecated group/name - [DEFAULT]/sql_max_overflow
+# Deprecated group/name - [DATABASE]/sqlalchemy_max_overflow
+#max_overflow = 50
+max_overflow = {{ server.database.get('max_overflow', 30) }}
+
+# Verbosity of SQL debugging information: 0=None, 100=Everything. (integer
+# value)
+# Minimum value: 0
+# Maximum value: 100
+# Deprecated group/name - [DEFAULT]/sql_connection_debug
+#connection_debug = 0
+
+# Add Python stack traces to SQL as comment strings. (boolean value)
+# Deprecated group/name - [DEFAULT]/sql_connection_trace
+#connection_trace = false
+
+# If set, use this value for pool_timeout with SQLAlchemy. (integer value)
+# Deprecated group/name - [DATABASE]/sqlalchemy_pool_timeout
+#pool_timeout = <None>
+
+# Enable the experimental use of database reconnect on connection lost.
+# (boolean value)
+#use_db_reconnect = false
+
+# Seconds between retries of a database transaction. (integer value)
+#db_retry_interval = 1
+
+# If True, increases the interval between retries of a database operation up to
+# db_max_retry_interval. (boolean value)
+#db_inc_retry_interval = true
+
+# If db_inc_retry_interval is set, the maximum seconds between retries of a
+# database operation. (integer value)
+#db_max_retry_interval = 10
+
+# Maximum retries in case of connection error or deadlock error before error is
+# raised. Set to -1 to specify an infinite retry count. (integer value)
+#db_max_retries = 20
+
+#
+# From panko
+#
+
+# Number of seconds that events are kept in the database for (<= 0 means
+# forever). (integer value)
+#event_time_to_live = -1
+
+# DEPRECATED: The connection string used to connect to the event database -
+# rather use ${database.connection} (string value)
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
+#event_connection = <None>
+
+
+[keystone_authtoken]
+
+#
+# From keystonemiddleware.auth_token
+#
+
+# Complete "public" Identity API endpoint. This endpoint should not be an
+# "admin" endpoint, as it should be accessible by all end users.
+# Unauthenticated clients are redirected to this endpoint to authenticate.
+# Although this endpoint should ideally be unversioned, client support in the
+# wild varies. If you're using a versioned v2 endpoint here, then this should
+# *not* be the same endpoint the service user utilizes for validating tokens,
+# because normal end users may not be able to reach that endpoint. (string
+# value)
+#auth_uri = <None>
+auth_uri = {{ server.identity.get('protocol', 'http') }}://{{ server.identity.host }}:5000
+
+# API version of the admin Identity API endpoint. (string value)
+#auth_version = <None>
+
+# Do not handle authorization requests within the middleware, but delegate the
+# authorization decision to downstream WSGI components. (boolean value)
+#delay_auth_decision = false
+
+# Request timeout value for communicating with Identity API server. (integer
+# value)
+#http_connect_timeout = <None>
+
+# How many times are we trying to reconnect when communicating with Identity
+# API Server. (integer value)
+#http_request_max_retries = 3
+
+# Request environment key where the Swift cache object is stored. When
+# auth_token middleware is deployed with a Swift cache, use this option to have
+# the middleware share a caching backend with swift. Otherwise, use the
+# ``memcached_servers`` option instead. (string value)
+#cache = <None>
+
+# Required if identity server requires client certificate (string value)
+#certfile = <None>
+
+# Required if identity server requires client certificate (string value)
+#keyfile = <None>
+
+# A PEM encoded Certificate Authority to use when verifying HTTPs connections.
+# Defaults to system CAs. (string value)
+#cafile = <None>
+{%- if server.identity.get('protocol', 'http') == 'https' %}
+cafile={{ server.identity.get('cacert_file', server.cacert_file) }}
+{%- endif %}
+
+# Verify HTTPS connections. (boolean value)
+#insecure = false
+
+# The region in which the identity server can be found. (string value)
+#region_name = <None>
+region_name = {{ server.identity.region }}
+
+# DEPRECATED: Directory used to cache files related to PKI tokens. This option
+# has been deprecated in the Ocata release and will be removed in the P
+# release. (string value)
+# This option is deprecated for removal since Ocata.
+# Its value may be silently ignored in the future.
+# Reason: PKI token format is no longer supported.
+#signing_dir = <None>
+
+# Optionally specify a list of memcached server(s) to use for caching. If left
+# undefined, tokens will instead be cached in-process. (list value)
+# Deprecated group/name - [keystone_authtoken]/memcache_servers
+#memcached_servers = <None>
+{%- if server.cache is defined %}
+memcached_servers = {%- for member in server.cache.members %}{{ member.host }}:{{ member.get('port', '11211') }}{% if not loop.last %},{% endif %}{%- endfor %}
+{%- else %}
+token_cache_time = -1
+{%- endif %}
+
+# In order to prevent excessive effort spent validating tokens, the middleware
+# caches previously-seen tokens for a configurable duration (in seconds). Set
+# to -1 to disable caching completely. (integer value)
+#token_cache_time = 300
+
+# DEPRECATED: Determines the frequency at which the list of revoked tokens is
+# retrieved from the Identity service (in seconds). A high number of revocation
+# events combined with a low cache duration may significantly reduce
+# performance. Only valid for PKI tokens. This option has been deprecated in
+# the Ocata release and will be removed in the P release. (integer value)
+# This option is deprecated for removal since Ocata.
+# Its value may be silently ignored in the future.
+# Reason: PKI token format is no longer supported.
+#revocation_cache_time = 10
+
+# (Optional) If defined, indicate whether token data should be authenticated or
+# authenticated and encrypted. If MAC, token data is authenticated (with HMAC)
+# in the cache. If ENCRYPT, token data is encrypted and authenticated in the
+# cache. If the value is not one of these options or empty, auth_token will
+# raise an exception on initialization. (string value)
+# Allowed values: None, MAC, ENCRYPT
+#memcache_security_strategy = None
+
+# (Optional, mandatory if memcache_security_strategy is defined) This string is
+# used for key derivation. (string value)
+#memcache_secret_key = <None>
+
+# (Optional) Number of seconds memcached server is considered dead before it is
+# tried again. (integer value)
+#memcache_pool_dead_retry = 300
+
+# (Optional) Maximum total number of open connections to every memcached
+# server. (integer value)
+#memcache_pool_maxsize = 10
+
+# (Optional) Socket timeout in seconds for communicating with a memcached
+# server. (integer value)
+#memcache_pool_socket_timeout = 3
+
+# (Optional) Number of seconds a connection to memcached is held unused in the
+# pool before it is closed. (integer value)
+#memcache_pool_unused_timeout = 60
+
+# (Optional) Number of seconds that an operation will wait to get a memcached
+# client connection from the pool. (integer value)
+#memcache_pool_conn_get_timeout = 10
+
+# (Optional) Use the advanced (eventlet safe) memcached client pool. The
+# advanced pool will only work under python 2.x. (boolean value)
+#memcache_use_advanced_pool = false
+
+# (Optional) Indicate whether to set the X-Service-Catalog header. If False,
+# middleware will not ask for service catalog on token validation and will not
+# set the X-Service-Catalog header. (boolean value)
+#include_service_catalog = true
+
+# Used to control the use and type of token binding. Can be set to: "disabled"
+# to not check token binding. "permissive" (default) to validate binding
+# information if the bind type is of a form known to the server and ignore it
+# if not. "strict" like "permissive" but if the bind type is unknown the token
+# will be rejected. "required" any form of token binding is needed to be
+# allowed. Finally the name of a binding method that must be present in tokens.
+# (string value)
+#enforce_token_bind = permissive
+
+# DEPRECATED: If true, the revocation list will be checked for cached tokens.
+# This requires that PKI tokens are configured on the identity server. (boolean
+# value)
+# This option is deprecated for removal since Ocata.
+# Its value may be silently ignored in the future.
+# Reason: PKI token format is no longer supported.
+#check_revocations_for_cached = false
+
+# DEPRECATED: Hash algorithms to use for hashing PKI tokens. This may be a
+# single algorithm or multiple. The algorithms are those supported by Python
+# standard hashlib.new(). The hashes will be tried in the order given, so put
+# the preferred one first for performance. The result of the first hash will be
+# stored in the cache. This will typically be set to multiple values only while
+# migrating from a less secure algorithm to a more secure one. Once all the old
+# tokens are expired this option should be set to a single value for better
+# performance. (list value)
+# This option is deprecated for removal since Ocata.
+# Its value may be silently ignored in the future.
+# Reason: PKI token format is no longer supported.
+#hash_algorithms = md5
+
+# A choice of roles that must be present in a service token. Service tokens are
+# allowed to request that an expired token can be used and so this check should
+# tightly control that only actual services should be sending this token. Roles
+# here are applied as an ANY check so any role in this list must be present.
+# For backwards compatibility reasons this currently only affects the
+# allow_expired check. (list value)
+#service_token_roles = service
+
+# For backwards compatibility reasons we must let valid service tokens pass
+# that don't pass the service_token_roles check as valid. Setting this true
+# will become the default in a future release and should be enabled if
+# possible. (boolean value)
+#service_token_roles_required = false
+
+# Prefix to prepend at the beginning of the path. Deprecated, use identity_uri.
+# (string value)
+#auth_admin_prefix =
+
+# Host providing the admin Identity API endpoint. Deprecated, use identity_uri.
+# (string value)
+#auth_host = 127.0.0.1
+
+# Port of the admin Identity API endpoint. Deprecated, use identity_uri.
+# (integer value)
+#auth_port = 35357
+
+# Protocol of the admin Identity API endpoint. Deprecated, use identity_uri.
+# (string value)
+# Allowed values: http, https
+#auth_protocol = https
+
+# Complete admin Identity API endpoint. This should specify the unversioned
+# root endpoint e.g. https://localhost:35357/ (string value)
+#identity_uri = <None>
+identity_uri = {{ server.identity.get('protocol', 'http') }}://{{ server.identity.host }}:35357
+
+# This option is deprecated and may be removed in a future release. Single
+# shared secret with the Keystone configuration used for bootstrapping a
+# Keystone installation, or otherwise bypassing the normal authentication
+# process. This option should not be used, use `admin_user` and
+# `admin_password` instead. (string value)
+#admin_token = <None>
+
+# Service username. (string value)
+#admin_user = <None>
+
+# Service user password. (string value)
+#admin_password = <None>
+
+# Service tenant name. (string value)
+#admin_tenant_name = admin
+
+# Authentication type to load (string value)
+# Deprecated group/name - [keystone_authtoken]/auth_plugin
+#auth_type = <None>
+auth_type = password
+
+# Config Section from which to load plugin specific options (string value)
+#auth_section = <None>
+
+user_domain_id = {{ server.identity.get('domain', 'default') }}
+project_domain_id = {{ server.identity.get('domain', 'default') }}
+project_name = {{ server.identity.tenant }}
+username = {{ server.identity.user }}
+password = {{ server.identity.password }}
+auth_url = {{ server.identity.get('protocol', 'http') }}://{{ server.identity.host }}:35357
+interface = internal
+
+[oslo_middleware]
+
+#
+# From oslo.middleware.http_proxy_to_wsgi
+#
+
+# Whether the application is behind a proxy or not. This determines if the
+# middleware should parse the headers or not. (boolean value)
+#enable_proxy_headers_parsing = false
+
+
+[oslo_policy]
+
+#
+# From oslo.policy
+#
+
+# The file that defines policies. (string value)
+#policy_file = policy.json
+
+# Default rule. Enforced when a requested rule is not found. (string value)
+#policy_default_rule = default
+
+# Directories where policy configuration files are stored. They can be relative
+# to any directory in the search path defined by the config_dir option, or
+# absolute paths. The file defined by policy_file must exist for these
+# directories to be searched. Missing or empty directories are ignored. (multi
+# valued)
+#policy_dirs = policy.d
+
+
+[storage]
+
+#
+# From panko
+#
+
+# Maximum number of connection retries during startup. Set to -1 to specify an
+# infinite retry count. (integer value)
+#max_retries = 10
+
+# Interval (in seconds) between retries of connection. (integer value)
+#retry_interval = 10
+
+# Enable HTTPS connection in the Elasticsearch connection (boolean value)
+#es_ssl_enabled = false
+
+# The name of the index in Elasticsearch (string value)
+#es_index_name = events
diff --git a/panko/init.sls b/panko/init.sls
new file mode 100644
index 0000000..bfdf23f
--- /dev/null
+++ b/panko/init.sls
@@ -0,0 +1,5 @@
+
+include:
+{%- if pillar.panko.server is defined %}
+- panko.server
+{%- endif %}
diff --git a/panko/map.jinja b/panko/map.jinja
new file mode 100644
index 0000000..755f31d
--- /dev/null
+++ b/panko/map.jinja
@@ -0,0 +1,10 @@
+{%- load_yaml as base_defaults %}
+
+Debian:
+ pkgs:
+ - panko-api
+ cacert_file: '/etc/ssl/certs/ca-certificates.crt'
+
+{%- endload %}
+
+{%- set server = salt['grains.filter_by'](base_defaults, merge=salt['pillar.get']('panko:server')) %}
diff --git a/panko/meta/sensu.yml b/panko/meta/sensu.yml
new file mode 100644
index 0000000..a1adedc
--- /dev/null
+++ b/panko/meta/sensu.yml
@@ -0,0 +1,8 @@
+# Sample check
+check:
+ local_panko_proc:
+ command: "PATH=$PATH:/usr/lib64/nagios/plugins:/usr/lib/nagios/plugins check_procs -C panko-api -u panko -c 1:1024"
+ interval: 60
+ occurrences: 1
+ subscribers:
+ - local-panko-server
diff --git a/panko/meta/sphinx.yml b/panko/meta/sphinx.yml
new file mode 100644
index 0000000..d784985
--- /dev/null
+++ b/panko/meta/sphinx.yml
@@ -0,0 +1,12 @@
+{%- from "panko/map.jinja" import server with context %}
+# Fill in documentation details
+doc:
+ name: panko
+ description: Some service info
+ role:
+ server:
+ name: server
+ param:
+ some_param:
+ name: "Some name"
+ value: "some value"
diff --git a/panko/server.sls b/panko/server.sls
new file mode 100644
index 0000000..944c82e
--- /dev/null
+++ b/panko/server.sls
@@ -0,0 +1,43 @@
+{%- from "panko/map.jinja" import server with context %}
+{%- if server.get('enabled', False) %}
+
+panko_server_packages:
+ pkg.installed:
+ - names: {{ server.pkgs }}
+
+/etc/panko/panko.conf:
+ file.managed:
+ - source: salt://panko/files/{{ server.version }}/panko.conf.{{ grains.os_family }}
+ - template: jinja
+ - require:
+ - pkg: panko_server_packages
+
+{%- if not grains.get('noservices', False) %}
+panko_syncdb:
+ cmd.run:
+ - name: panko-dbsync
+ - require:
+ - file: /etc/panko/panko.conf
+{%- endif %}
+
+/etc/apache2/sites-available/panko-api.conf:
+ file.managed:
+ - source: salt://panko/files/{{ server.version }}/panko-api.apache2.conf.Debian
+ - template: jinja
+ - require:
+ - pkg: panko_server_packages
+
+panko_api_config:
+ file.symlink:
+ - name: /etc/apache2/sites-enabled/panko-api.conf
+ - target: /etc/apache2/sites-available/panko-api.conf
+
+panko_apache_restart:
+ service.running:
+ - enable: true
+ - name: apache2
+ - watch:
+ - file: /etc/panko/panko.conf
+ - file: /etc/apache2/sites-available/panko-api.conf
+
+{%- endif %}
diff --git a/tests/pillar/server_cluster.sls b/tests/pillar/server_cluster.sls
new file mode 100644
index 0000000..ac67542
--- /dev/null
+++ b/tests/pillar/server_cluster.sls
@@ -0,0 +1,30 @@
+panko:
+ server:
+ region: RegionOne
+ enabled: true
+ version: pike
+ bind:
+ host: 127.0.0.1
+ port: 8977
+ identity:
+ engine: keystone
+ host: 127.0.0.1
+ port: 35357
+ tenant: service
+ user: panko
+ password: misterio
+ endpoint_type: internalURL
+ database:
+ engine: mysql
+ host: 127.0.0.1
+ port: 3306
+ name: panko
+ user: panko
+ password: misterio
+ cache:
+ engine: memcached
+ members:
+ - host: 127.0.0.1
+ - host: 127.0.0.1
+ - host: 127.0.0.1
+
diff --git a/tests/pillar/server_single.sls b/tests/pillar/server_single.sls
new file mode 100644
index 0000000..a85faa0
--- /dev/null
+++ b/tests/pillar/server_single.sls
@@ -0,0 +1,23 @@
+panko:
+ server:
+ region: RegionOne
+ enabled: true
+ version: pike
+ bind:
+ host: 127.0.0.1
+ port: 8977
+ identity:
+ engine: keystone
+ host: 127.0.0.1
+ port: 35357
+ tenant: service
+ user: panko
+ password: misterio
+ endpoint_type: internalURL
+ database:
+ engine: mysql
+ host: 127.0.0.1
+ port: 3306
+ name: panko
+ user: panko
+ password: misterio
diff --git a/tests/run_tests.sh b/tests/run_tests.sh
new file mode 100755
index 0000000..312b7e4
--- /dev/null
+++ b/tests/run_tests.sh
@@ -0,0 +1,167 @@
+#!/usr/bin/env bash
+
+set -e
+[ -n "$DEBUG" ] && set -x
+
+CURDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
+METADATA=${CURDIR}/../metadata.yml
+FORMULA_NAME=$(cat $METADATA | python -c "import sys,yaml; print yaml.load(sys.stdin)['name']")
+
+## Overrideable parameters
+PILLARDIR=${PILLARDIR:-${CURDIR}/pillar}
+BUILDDIR=${BUILDDIR:-${CURDIR}/build}
+VENV_DIR=${VENV_DIR:-${BUILDDIR}/virtualenv}
+DEPSDIR=${BUILDDIR}/deps
+
+SALT_FILE_DIR=${SALT_FILE_DIR:-${BUILDDIR}/file_root}
+SALT_PILLAR_DIR=${SALT_PILLAR_DIR:-${BUILDDIR}/pillar_root}
+SALT_CONFIG_DIR=${SALT_CONFIG_DIR:-${BUILDDIR}/salt}
+SALT_CACHE_DIR=${SALT_CACHE_DIR:-${SALT_CONFIG_DIR}/cache}
+
+SALT_OPTS="${SALT_OPTS} --retcode-passthrough --local -c ${SALT_CONFIG_DIR}"
+
+if [ "x${SALT_VERSION}" != "x" ]; then
+ PIP_SALT_VERSION="==${SALT_VERSION}"
+fi
+
+## Functions
+log_info() {
+ echo "[INFO] $*"
+}
+
+log_err() {
+ echo "[ERROR] $*" >&2
+}
+
+setup_virtualenv() {
+ log_info "Setting up Python virtualenv"
+ virtualenv $VENV_DIR
+ source ${VENV_DIR}/bin/activate
+ pip install salt${PIP_SALT_VERSION}
+}
+
+setup_pillar() {
+ [ ! -d ${SALT_PILLAR_DIR} ] && mkdir -p ${SALT_PILLAR_DIR}
+ echo "base:" > ${SALT_PILLAR_DIR}/top.sls
+ for pillar in ${PILLARDIR}/*; do
+ grep ${FORMULA_NAME}: ${pillar} &>/dev/null || continue
+ state_name=$(basename ${pillar%.sls})
+ echo -e " ${state_name}:\n - ${state_name}" >> ${SALT_PILLAR_DIR}/top.sls
+ done
+}
+
+setup_salt() {
+ [ ! -d ${SALT_FILE_DIR} ] && mkdir -p ${SALT_FILE_DIR}
+ [ ! -d ${SALT_CONFIG_DIR} ] && mkdir -p ${SALT_CONFIG_DIR}
+ [ ! -d ${SALT_CACHE_DIR} ] && mkdir -p ${SALT_CACHE_DIR}
+
+ echo "base:" > ${SALT_FILE_DIR}/top.sls
+ for pillar in ${PILLARDIR}/*.sls; do
+ grep ${FORMULA_NAME}: ${pillar} &>/dev/null || continue
+ state_name=$(basename ${pillar%.sls})
+ echo -e " ${state_name}:\n - ${FORMULA_NAME}" >> ${SALT_FILE_DIR}/top.sls
+ done
+
+ cat << EOF > ${SALT_CONFIG_DIR}/minion
+file_client: local
+cachedir: ${SALT_CACHE_DIR}
+verify_env: False
+minion_id_caching: False
+
+file_roots:
+ base:
+ - ${SALT_FILE_DIR}
+ - ${CURDIR}/..
+ - /usr/share/salt-formulas/env
+
+pillar_roots:
+ base:
+ - ${SALT_PILLAR_DIR}
+ - ${PILLARDIR}
+EOF
+}
+
+fetch_dependency() {
+ dep_name="$(echo $1|cut -d : -f 1)"
+ dep_source="$(echo $1|cut -d : -f 2-)"
+ dep_root="${DEPSDIR}/$(basename $dep_source .git)"
+ dep_metadata="${dep_root}/metadata.yml"
+
+ [ -d /usr/share/salt-formulas/env/${dep_name} ] && log_info "Dependency $dep_name already present in system-wide salt env" && return 0
+ [ -d $dep_root ] && log_info "Dependency $dep_name already fetched" && return 0
+
+ log_info "Fetching dependency $dep_name"
+ [ ! -d ${DEPSDIR} ] && mkdir -p ${DEPSDIR}
+ git clone $dep_source ${DEPSDIR}/$(basename $dep_source .git)
+ ln -s ${dep_root}/${dep_name} ${SALT_FILE_DIR}/${dep_name}
+
+ METADATA="${dep_metadata}" install_dependencies
+}
+
+install_dependencies() {
+ grep -E "^dependencies:" ${METADATA} >/dev/null || return 0
+ (python - | while read dep; do fetch_dependency "$dep"; done) << EOF
+import sys,yaml
+for dep in yaml.load(open('${METADATA}', 'ro'))['dependencies']:
+ print '%s:%s' % (dep["name"], dep["source"])
+EOF
+}
+
+clean() {
+ log_info "Cleaning up ${BUILDDIR}"
+ [ -d ${BUILDDIR} ] && rm -rf ${BUILDDIR} || exit 0
+}
+
+salt_run() {
+ [ -e ${VEN_DIR}/bin/activate ] && source ${VENV_DIR}/bin/activate
+ salt-call ${SALT_OPTS} $*
+}
+
+prepare() {
+ [ -d ${BUILDDIR} ] && mkdir -p ${BUILDDIR}
+
+ which salt-call || setup_virtualenv
+ setup_pillar
+ setup_salt
+ install_dependencies
+}
+
+run() {
+ for pillar in ${PILLARDIR}/*.sls; do
+ grep ${FORMULA_NAME}: ${pillar} &>/dev/null || continue
+ state_name=$(basename ${pillar%.sls})
+ salt_run grains.set 'noservices' False force=True
+ salt_run --id=${state_name} state.show_sls ${FORMULA_NAME} || (log_err "Execution of ${FORMULA_NAME}.${state_name} failed"; exit 1)
+ done
+}
+
+_atexit() {
+ RETVAL=$?
+ trap true INT TERM EXIT
+
+ if [ $RETVAL -ne 0 ]; then
+ log_err "Execution failed"
+ else
+ log_info "Execution successful"
+ fi
+ return $RETVAL
+}
+
+## Main
+trap _atexit INT TERM EXIT
+
+case $1 in
+ clean)
+ clean
+ ;;
+ prepare)
+ prepare
+ ;;
+ run)
+ run
+ ;;
+ *)
+ prepare
+ run
+ ;;
+esac