Merge "README updates"
diff --git a/.kitchen.yml b/.kitchen.yml
index 3a181b6..362bd1d 100644
--- a/.kitchen.yml
+++ b/.kitchen.yml
@@ -36,7 +36,7 @@
platforms:
- name: <%=ENV['PLATFORM'] || 'saltstack-ubuntu-xenial-salt-stable' %>
driver_config:
- image: <%=ENV['PLATFORM'] || 'epcim/salt-formulas:saltstack-ubuntu-xenial-salt-stable'%>
+ image: <%=ENV['PLATFORM'] || 'epcim/salt:saltstack-ubuntu-xenial-salt-stable'%>
platform: ubuntu
suites:
diff --git a/.travis.yml b/.travis.yml
index 7f03f12..82dd0bf 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -1,3 +1,6 @@
+language: python
+python:
+- "2.7.13"
sudo: required
services:
- docker
@@ -17,15 +20,21 @@
- bundle install
env:
- - PLATFORM=epcim/salt-formulas:saltstack-ubuntu-xenial-salt-2016.3 SUITE=server-cluster
- - PLATFORM=epcim/salt-formulas:saltstack-ubuntu-xenial-salt-2016.3 SUITE=server-plugin-dirs
- - PLATFORM=epcim/salt-formulas:saltstack-ubuntu-xenial-salt-2016.3 SUITE=server-single
- - PLATFORM=epcim/salt-formulas:saltstack-ubuntu-xenial-salt-2017.7 SUITE=server-cluster
- - PLATFORM=epcim/salt-formulas:saltstack-ubuntu-xenial-salt-2017.7 SUITE=server-plugin-dirs
- - PLATFORM=epcim/salt-formulas:saltstack-ubuntu-xenial-salt-2017.7 SUITE=server-single
-# - PLATFORM=epcim/salt-formulas:saltstack-ubuntu-bionic-salt-2017.7 SUITE=server-cluster
-# - PLATFORM=epcim/salt-formulas:saltstack-ubuntu-bionic-salt-2017.7 SUITE=server-plugin-dirs
-# - PLATFORM=epcim/salt-formulas:saltstack-ubuntu-bionic-salt-2017.7 SUITE=server-single
+ - PLATFORM=epcim/salt:saltstack-ubuntu-xenial-salt-2016.3 SUITE=server-cluster
+ - PLATFORM=epcim/salt:saltstack-ubuntu-xenial-salt-2016.3 SUITE=server-plugin-dirs
+ - PLATFORM=epcim/salt:saltstack-ubuntu-xenial-salt-2016.3 SUITE=server-single
+ - PLATFORM=epcim/salt:saltstack-ubuntu-xenial-salt-2017.7 SUITE=server-cluster
+ - PLATFORM=epcim/salt:saltstack-ubuntu-xenial-salt-2017.7 SUITE=server-plugin-dirs
+ - PLATFORM=epcim/salt:saltstack-ubuntu-xenial-salt-2017.7 SUITE=server-single
+ - PLATFORM=epcim/salt:saltstack-ubuntu-xenial-salt-2018.3 SUITE=server-cluster
+ - PLATFORM=epcim/salt:saltstack-ubuntu-xenial-salt-2018.3 SUITE=server-plugin-dirs
+ - PLATFORM=epcim/salt:saltstack-ubuntu-xenial-salt-2018.3 SUITE=server-single
+# - PLATFORM=epcim/salt:saltstack-ubuntu-bionic-salt-2017.7 SUITE=server-cluster
+# - PLATFORM=epcim/salt:saltstack-ubuntu-bionic-salt-2017.7 SUITE=server-plugin-dirs
+# - PLATFORM=epcim/salt:saltstack-ubuntu-bionic-salt-2017.7 SUITE=server-single
+# - PLATFORM=epcim/salt:saltstack-ubuntu-bionic-salt-2018.3 SUITE=server-cluster
+# - PLATFORM=epcim/salt:saltstack-ubuntu-bionic-salt-2018.3 SUITE=server-plugin-dirs
+# - PLATFORM=epcim/salt:saltstack-ubuntu-bionic-salt-2018.3 SUITE=server-single
before_script:
diff --git a/_modules/heatv1/__init__.py b/_modules/heatv1/__init__.py
new file mode 100644
index 0000000..51d90b4
--- /dev/null
+++ b/_modules/heatv1/__init__.py
@@ -0,0 +1,38 @@
+"""
+Module for handling Heat stacks.
+
+:depends: - os_client_config
+:configuration: This module is not usable until the following are specified
+"""
+
+try:
+ import os_client_config
+ REQUIREMENTS_MET = True
+except ImportError:
+ REQUIREMENTS_MET = False
+
+import os
+import sys
+
+# i failed to load module witjout this
+# seems bugs in salt or it is only me
+sys.path.insert(0, os.path.abspath(os.path.dirname(__file__)))
+
+import stack
+
+stack_create = stack.stack_create
+stack_delete = stack.stack_delete
+stack_list = stack.stack_list
+stack_show = stack.stack_show
+stack_update = stack.stack_update
+
+__all__ = ('stack_create', 'stack_list', 'stack_delete', 'stack_show',
+ 'stack_update')
+
+
+def __virtual__():
+ if REQUIREMENTS_MET:
+ return 'heatv1'
+ else:
+ return False, ("The heat execution module cannot be loaded: "
+ "os_client_config is not available.")
diff --git a/_modules/heatv1/common.py b/_modules/heatv1/common.py
new file mode 100644
index 0000000..06eabff
--- /dev/null
+++ b/_modules/heatv1/common.py
@@ -0,0 +1,109 @@
+import logging
+import six
+import uuid
+
+import os_client_config
+from salt import exceptions
+
+
+log = logging.getLogger(__name__)
+
+SERVICE_KEY = 'orchestration'
+
+
+def get_raw_client(cloud_name):
+ config = os_client_config.OpenStackConfig()
+ cloud = config.get_one_cloud(cloud_name)
+ adapter = cloud.get_session_client(SERVICE_KEY)
+ adapter.version = '1'
+ try:
+ access_info = adapter.session.auth.get_access(adapter.session)
+ endpoints = access_info.service_catalog.get_endpoints()
+ except (AttributeError, ValueError) as exc:
+ six.raise_from(exc, exceptions.SaltInvocationError(
+ "Cannot load keystoneauth plugin. Please check your environment "
+ "configuration."))
+ if SERVICE_KEY not in endpoints:
+ raise exceptions.SaltInvocationError("Cannot find heat endpoint in "
+ "environment endpoint list.")
+ return adapter
+
+
+def send(method):
+ def wrap(func):
+ @six.wraps(func)
+ def wrapped_f(*args, **kwargs):
+ cloud_name = kwargs.get('cloud_name', None)
+ if not cloud_name:
+ raise exceptions.SaltInvocationError(
+ "No cloud_name specified. Please provide cloud_name "
+ "parameter")
+ adapter = get_raw_client(cloud_name)
+ # Remove salt internal kwargs
+ kwarg_keys = list(kwargs.keys())
+ for k in kwarg_keys:
+ if k.startswith('__'):
+ kwargs.pop(k)
+ url, request_kwargs = func(*args, **kwargs)
+ try:
+ response = getattr(adapter, method.lower())(url,
+ **request_kwargs)
+ except Exception as e:
+ log.exception("Error occured when executing request")
+ return {"result": False,
+ "comment": str(e),
+ "status_code": getattr(e, "http_status", 500)}
+ try:
+ resp_body = response.json() if response.content else {}
+ except:
+ resp_body = str(response.content)
+ return {"result": True,
+ "body": resp_body,
+ "status_code": response.status_code}
+ return wrapped_f
+ return wrap
+
+
+def _check_uuid(val):
+ try:
+ return str(uuid.UUID(val)) == val
+ except (TypeError, ValueError, AttributeError):
+ return False
+
+
+def get_by_name_or_uuid(resource_list, resp_key):
+ def wrap(func):
+ @six.wraps(func)
+ def wrapped_f(*args, **kwargs):
+ if 'name' in kwargs:
+ ref = kwargs.get('name', None)
+ start_arg = 0
+ else:
+ start_arg = 1
+ ref = args[0]
+ kwargs["name"] = ref
+ if _check_uuid(ref):
+ uuid = ref
+ else:
+ # Then we have name not uuid
+ cloud_name = kwargs['cloud_name']
+ resp = resource_list(
+ name=ref, cloud_name=cloud_name)["body"][resp_key]
+ if len(resp) == 0:
+ msg = ("Uniq {resource} resource "
+ "with name={name} not found.").format(
+ resource=resp_key, name=ref)
+ return {"result": False,
+ "body": msg,
+ "status_code": 404}
+ elif len(resp) > 1:
+ msg = ("Multiple resource: {resource} "
+ "with name: {name} found ").format(
+ resource=resp_key, name=ref)
+ return {"result": False,
+ "body": msg,
+ "status_code": 400}
+ uuid = resp[0]['id']
+ return func(uuid, *args[start_arg:], **kwargs)
+ return wrapped_f
+ return wrap
\ No newline at end of file
diff --git a/_modules/heatv1/stack.py b/_modules/heatv1/stack.py
new file mode 100644
index 0000000..ace3db0
--- /dev/null
+++ b/_modules/heatv1/stack.py
@@ -0,0 +1,147 @@
+from yaml import safe_load
+import json
+import common
+try:
+ from urllib.parse import urlencode
+except ImportError:
+ from urllib import urlencode
+
+HEAT_ROOT = "/srv/heat/env"
+
+TEMPLATE_PATH = "template"
+ENV_PATH = "env"
+
+
+def _read_env_file(name):
+ path = "/".join([
+ HEAT_ROOT,
+ ENV_PATH,
+ name])
+
+ return _read_file(path)
+
+
+def _read_template_file(name):
+ path = "/".join([
+ HEAT_ROOT,
+ TEMPLATE_PATH,
+ name])
+
+ return _read_file(path)
+
+
+def _read_file(full_path):
+ with open(full_path, 'r') as f:
+ data = safe_load(f)
+ return json.dumps(data, default=str)
+
+
+def _read_additional_file(path):
+ full_path = "/".join([
+ HEAT_ROOT,
+ path])
+ with open(full_path) as f:
+ return str(f.read())
+
+
+@common.send("get")
+def stack_list(**kwargs):
+ url = "/stacks?{}".format(urlencode(kwargs))
+ return url, {}
+
+
+@common.get_by_name_or_uuid(stack_list, 'stacks')
+@common.send("get")
+def stack_show(stack_id, **kwargs):
+ stack_name = kwargs.get("name")
+ url = "/stacks/{stack_name}/{stack_id}".format(
+ stack_name=stack_name, stack_id=stack_id)
+ return url, {}
+
+
+@common.get_by_name_or_uuid(stack_list, 'stacks')
+@common.send("delete")
+def stack_delete(stack_id, **kwargs):
+ stack_name = kwargs.get("name")
+ url = "/stacks/{stack_name}/{stack_id}".format(stack_name=stack_name,
+ stack_id=stack_id)
+ return url, {}
+
+
+@common.send("post")
+def stack_create(name, template=None, environment=None, environment_files=None,
+ files=None, parameters=None, template_url=None,
+ timeout_mins=5, disable_rollback=True, **kwargs):
+ url = "/stacks"
+ request = {'stack_name': name,
+ 'timeout_mins': timeout_mins,
+ 'disable_rollback': disable_rollback}
+ if environment:
+ request["environment"] = environment
+ file_items = {}
+ if environment_files:
+ env_names = []
+ env_files = {}
+ for f_name in environment_files:
+ data = _read_env_file(f_name)
+ env_files[f_name] = data
+ env_names.append(f_name)
+ file_items.update(env_files)
+ request["environment_files"] = env_names
+ if files:
+ for f_name, path in files.items():
+ file_items.update((f_name, _read_additional_file(path)))
+ if file_items:
+ request["files"] = file_items
+ if parameters:
+ request["parameters"] = parameters
+ if template:
+ template_file = _read_template_file(template)
+ request["template"] = template_file
+ if template_url:
+ request["template_url"] = template_url
+ # Validate the template and get back the params.
+
+ return url, {"json": request}
+
+
+@common.get_by_name_or_uuid(stack_list, 'stacks')
+@common.send("put")
+def stack_update(stack_id, template=None, environment=None,
+ environment_files=None, files=None, parameters=None,
+ template_url=None, timeout_mins=5, disable_rollback=True,
+ clear_parameters=None, **kwargs):
+ stack_name = kwargs.get("name")
+ url = "/stacks/{stack_name}/{stack_id}".format(
+ stack_name=stack_name, stack_id=stack_id
+ )
+ request = {'stack_name': stack_name,
+ 'timeout_mins': timeout_mins,
+ 'disable_rollback': disable_rollback}
+ if environment:
+ request["environment"] = environment
+ file_items = {}
+ if environment_files:
+ env_names = []
+ env_files = {}
+ for f_name in environment_files:
+ data = _read_env_file(f_name)
+ env_files[f_name] = data
+ env_names.append(f_name)
+ file_items.update(env_files)
+ request["environment_files"] = env_names
+ if files:
+ for f_name, path in files.items():
+ file_items.update((f_name, _read_additional_file(path)))
+ if file_items:
+ request["files"] = file_items
+ if parameters:
+ request["parameters"] = parameters
+ if template:
+ template_file = _read_template_file(template)
+ request["template"] = template_file
+ if template_url:
+ request["template_url"] = template_url
+ if clear_parameters:
+ request["clear_parameters"] = clear_parameters
+ return url, {"json": request}
diff --git a/_states/heatv1.py b/_states/heatv1.py
new file mode 100644
index 0000000..4ee8cf5
--- /dev/null
+++ b/_states/heatv1.py
@@ -0,0 +1,131 @@
+# Import Python libs
+from __future__ import absolute_import, print_function, unicode_literals
+import logging
+import time
+
+LOG = logging.getLogger(__name__)
+
+
+def __virtual__():
+ return 'heatv1'
+
+
+def _heat_call(fname, *args, **kwargs):
+ return __salt__['heatv1.{}'.format(fname)](*args, **kwargs)
+
+
+def _poll_for_complete(stack_name, cloud_name=None, action=None,
+ poll_period=5, timeout=60):
+ if action:
+ stop_status = ('{0}_FAILED'.format(action), '{0}_COMPLETE'.format(action))
+ stop_check = lambda a: a in stop_status
+ else:
+ stop_check = lambda a: a.endswith('_COMPLETE') or a.endswith('_FAILED')
+ timeout_sec = timeout * 60
+ msg_template = '\n Stack %(name)s %(status)s \n'
+ while True:
+ stack = _heat_call('stack_show',
+ name=stack_name,
+ cloud_name=cloud_name)
+ if not stack["result"]:
+ raise Exception("request for stack failed")
+
+ stack = stack["body"]["stack"]
+ stack_status = stack["stack_status"]
+ msg = msg_template % dict(
+ name=stack_name, status=stack_status)
+ if stop_check(stack_status):
+ return stack_status, msg
+
+ time.sleep(poll_period)
+ timeout_sec -= poll_period
+ if timeout_sec <= 0:
+ stack_status = '{0}_FAILED'.format(action)
+ msg = 'Timeout expired'
+ return stack_status, msg
+
+
+def stack_present(name, cloud_name, template=None,
+ environment=None, params=None, poll=5, rollback=False,
+ timeout=60, profile=None, **connection_args):
+ LOG.debug('Deployed with(' +
+ '{0}, {1}, {2}, {3}, {4}, {5}, {6}, {7}, {8} {9})'
+ .format(name, cloud_name, template, environment, params,
+ poll, rollback, timeout, profile, connection_args))
+ ret = {'name': None,
+ 'comment': '',
+ 'changes': {},
+ 'result': True}
+
+ if not name:
+ ret['result'] = False
+ ret['comment'] = 'Name is not valid'
+ return ret
+
+ ret['name'] = name,
+
+ existing_stack = _heat_call('stack_show', name=name,
+ cloud_name=cloud_name)
+
+ if existing_stack['result']:
+ _heat_call('stack_update', name=name,
+ template=template,
+ cloud_name=cloud_name,
+ environment=environment,
+ parameters=params,
+ disable_rollback=not rollback,
+ timeout=timeout)
+ ret['changes']['comment'] = 'Updated stack'
+ status, res = _poll_for_complete(stack_name=name,
+ cloud_name=cloud_name,
+ action="UPDATE", timeout=timeout)
+ ret["result"] = status == "UPDATE_COMPLETE"
+ ret['comment'] = res
+ else:
+ _heat_call('stack_create',
+ name=name,
+ template=template,
+ cloud_name=cloud_name,
+ environment=environment,
+ parameters=params,
+ disable_rollback=not rollback,
+ timeout=timeout)
+ status, res = _poll_for_complete(stack_name=name,
+ cloud_name=cloud_name,
+ action="CREATE", timeout=timeout)
+ ret["result"] = status == "CREATE_COMPLETE"
+ ret['comment'] = res
+ ret['changes']['stack_name'] = name
+ return ret
+
+
+def stack_absent(name, cloud_name, poll=5, timeout=60):
+ LOG.debug('Absent with(' +
+ '{0}, {1}, {2})'.format(name, poll, cloud_name))
+ ret = {'name': None,
+ 'comment': '',
+ 'changes': {},
+ 'result': True}
+ if not name:
+ ret['result'] = False
+ ret['comment'] = 'Name is not valid'
+ return ret
+
+ ret['name'] = name,
+
+ existing_stack = _heat_call('stack_show',
+ name=name, cloud_name=cloud_name)
+
+ if not existing_stack['result']:
+ ret['result'] = True
+ ret['comment'] = 'Stack does not exist'
+ return ret
+
+ _heat_call('stack_delete', name=name, cloud_name=cloud_name)
+ status, comment = _poll_for_complete(stack_name=name,
+ cloud_name=cloud_name,
+ action="DELETE", timeout=timeout)
+ ret['result'] = status == "DELETE_COMPLETE"
+ ret['comment'] = comment
+ ret['changes']['stack_name'] = name
+ return ret
diff --git a/heat/files/logging.conf b/heat/files/logging.conf
deleted file mode 100644
index f8a7f50..0000000
--- a/heat/files/logging.conf
+++ /dev/null
@@ -1,85 +0,0 @@
-{%- set log_handlers = [] -%}
-{%- for log_handler_name, log_handler_attrs in values.logging.log_handlers.items() %}
- {%- if log_handler_attrs.get('enabled', False) %}
- {%- do log_handlers.append(log_handler_name) -%}
- {%- endif %}
-{%- endfor %}
-[loggers]
-keys = root, heat, eventletwsgi
-
-[handlers]
-keys = {{ log_handlers | join(", ") }}
-
-[formatters]
-keys = context, default{% if values.logging.log_handlers.get('fluentd', {}).get('enabled', False) %}, fluentd{% endif %}
-
-[logger_root]
-level = WARNING
-handlers = {{ log_handlers | join(", ") }}
-
-[logger_heat]
-level = INFO
-handlers = {{ log_handlers | join(", ") }}
-qualname = heat
-propagate = 0
-
-[logger_amqplib]
-level = WARNING
-handlers = {{ log_handlers | join(", ") }}
-qualname = amqplib
-
-[logger_sqlalchemy]
-level = WARNING
-handlers = {{ log_handlers | join(", ") }}
-qualname = sqlalchemy
-# "level = INFO" logs SQL queries.
-# "level = DEBUG" logs SQL queries and results.
-# "level = WARNING" logs neither. (Recommended for production systems.)
-
-[logger_boto]
-level = WARNING
-handlers = {{ log_handlers | join(", ") }}
-qualname = boto
-
-[logger_suds]
-level = INFO
-handlers = {{ log_handlers | join(", ") }}
-qualname = suds
-
-[logger_eventletwsgi]
-level = INFO
-handlers = {{ log_handlers | join(", ") }}
-qualname = eventlet.wsgi.server
-
-{%- if values.logging.log_handlers.get('fluentd', {}).get('enabled', False) %}
-[handler_fluentd]
-class = fluent.handler.FluentHandler
-args = ('openstack.{{ service_name | replace("-", ".", 1) }}', 'localhost', 24224)
-formatter = fluentd
-{%- endif %}
-
-{%- if values.logging.log_handlers.watchedfile.enabled %}
-[handler_watchedfile]
-class = handlers.WatchedFileHandler
-args = ('/var/log/heat/{{ service_name }}.log',)
-formatter = context
-{%- endif %}
-
-{% if values.logging.log_handlers.get('ossyslog', {}).get('enabled', False) -%}
-{%- set ossyslog_args = values.logging.log_handlers.ossyslog.get('args', {}) -%}
-[handler_ossyslog]
-class = oslo_log.handlers.OSSysLogHandler
-args = ( handlers.SysLogHandler.{{ ossyslog_args.get('facility', 'LOG_USER') }}, )
-formatter = context
-{%- endif %}
-
-[formatter_context]
-class = oslo_log.formatters.ContextFormatter
-
-[formatter_default]
-format = %(message)s
-
-{%- if values.logging.log_handlers.get('fluentd', {}).get('enabled', False) %}
-[formatter_fluentd]
-class = oslo_log.formatters.FluentFormatter
-{%- endif %}
diff --git a/heat/files/ocata/heat.conf.Debian b/heat/files/ocata/heat.conf.Debian
index 74f8fbf..d949481 100644
--- a/heat/files/ocata/heat.conf.Debian
+++ b/heat/files/ocata/heat.conf.Debian
@@ -1327,6 +1327,12 @@
[clients_heat]
endpoint_type = {{ server.identity.get('endpoint_type_heat',
server.identity.get('endpoint_type_default', 'publicURL')) }}
+{%- if server.clients is defined %}
+{%- if server.clients.heat is defined %}
+insecure = {{ server.clients.heat.get('insecure', False) }}
+{%- endif %}
+{%- endif %}
+
[clients_keystone]
{%- if server.clients is defined %}
{%- if server.clients.keystone is defined %}
diff --git a/heat/files/pike/heat.conf.Debian b/heat/files/pike/heat.conf.Debian
index 8132afa..95db173 100644
--- a/heat/files/pike/heat.conf.Debian
+++ b/heat/files/pike/heat.conf.Debian
@@ -1299,7 +1299,8 @@
#
# Authentication Endpoint URI. (string value)
-auth_uri={{ server.identity.get('protocol', 'http') }}://{{ server.identity.host }}:5000
+# NOTE(vsaienko) autodiscovery doesn't work here. Set version explicitly
+auth_uri={{ server.identity.get('protocol', 'http') }}://{{ server.identity.host }}:5000/v3
# Allow orchestration of multiple clouds. (boolean value)
#multi_cloud=false
@@ -1327,6 +1328,12 @@
[clients_heat]
endpoint_type = {{ server.identity.get('endpoint_type_heat',
server.identity.get('endpoint_type_default', 'publicURL')) }}
+{%- if server.clients is defined %}
+{%- if server.clients.heat is defined %}
+insecure = {{ server.clients.heat.get('insecure', False) }}
+{%- endif %}
+{%- endif %}
+
[clients_keystone]
{%- if server.clients is defined %}
{%- if server.clients.keystone is defined %}
diff --git a/heat/files/queens/heat.conf.Debian b/heat/files/queens/heat.conf.Debian
index aec1427..a392c59 100644
--- a/heat/files/queens/heat.conf.Debian
+++ b/heat/files/queens/heat.conf.Debian
@@ -524,8 +524,11 @@
# Optional PEM-formatted file that contains the private key. (string value)
#key_file = <None>
-# If set, then the server's certificate will not be verified. (boolean value)
-#insecure = <None>
+{%- if server.clients is defined %}
+{%- if server.clients.heat is defined %}
+insecure = {{ server.clients.heat.get('insecure', False) }}
+{%- endif %}
+{%- endif %}
# Optional heat url in format like http://0.0.0.0:8004/v1/%(tenant_id)s.
# (string value)
@@ -865,7 +868,8 @@
# Authentication Endpoint URI. (string value)
#auth_uri = <None>
-auth_uri = {{ server.identity.get('protocol', 'http') }}://{{ server.identity.host }}:5000
+# NOTE(vsaienko) autodiscovery doesn't work here. Set version explicitly
+auth_uri = {{ server.identity.get('protocol', 'http') }}://{{ server.identity.host }}:5000/v3
# Allow orchestration of multiple clouds. (boolean value)
#multi_cloud = false
diff --git a/heat/map.jinja b/heat/map.jinja
index d87cc97..fe301a4 100644
--- a/heat/map.jinja
+++ b/heat/map.jinja
@@ -18,6 +18,7 @@
},
'enable_proxy_headers_parsing': true,
'logging': {
+ 'app_name': 'heat',
'log_appender': false,
'log_file': '/var/log/heat/heat.log',
'log_handlers': {
@@ -38,6 +39,7 @@
},
'enable_proxy_headers_parsing': true,
'logging': {
+ 'app_name': 'heat',
'log_appender': false,
'log_file': '/var/log/heat/heat.log',
'log_handlers': {
diff --git a/heat/meta/sphinx.yml b/heat/meta/sphinx.yml
index 3732e63..67b36af 100644
--- a/heat/meta/sphinx.yml
+++ b/heat/meta/sphinx.yml
@@ -31,9 +31,18 @@
database_host:
name: "Database"
value: {{ server.database.user }}@{{ server.database.host }}:{{ server.database.port }}//{{ server.database.name }}
+ {%- set rabbit_port = server.message_queue.get('port', 5671 if server.message_queue.get('ssl',{}).get('enabled', False) else 5672) %}
message_queue_ip:
name: "Message queue"
- value: {{ server.message_queue.user }}@{{ server.message_queue.host }}:{{ server.message_queue.port }}{{ server.message_queue.virtual_host }}
+ {%- if server.message_queue.members is defined %}
+ value: {% for member in server.message_queue.members -%}
+ {{ server.message_queue.user }}:{{ server.message_queue.password }}@{{ member.host }}:{{ member.get('port',rabbit_port) }}
+ {%- if not loop.last -%},{%- endif -%}
+ {%- endfor -%}
+ /{{ server.message_queue.virtual_host }}
+ {%- else %}
+ value: {{ server.message_queue.user }}:{{ server.message_queue.password }}@{{ server.message_queue.host }}:{{ rabbit_port }}/{{ server.message_queue.virtual_host }}
+ {%- endif %}
identity_host:
name: "Identity service"
value: {{ server.identity.user }}@{{ server.identity.host }}:{{ server.identity.port }}
diff --git a/heat/server.sls b/heat/server.sls
index 8d0c483..c06ac26 100644
--- a/heat/server.sls
+++ b/heat/server.sls
@@ -46,13 +46,13 @@
heat_general_logging_conf:
file.managed:
- name: /etc/heat/logging.conf
- - source: salt://heat/files/logging.conf
+ - source: salt://oslo_templates/files/logging/_logging.conf
- template: jinja
- user: heat
- group: heat
- defaults:
service_name: heat
- values: {{ server }}
+ _data: {{ server.logging }}
- require:
- pkg: heat_server_packages
{%- if server.logging.log_handlers.get('fluentd', {}).get('enabled', False) %}
@@ -72,14 +72,14 @@
{{ service_name }}_logging_conf:
file.managed:
- name: /etc/heat/logging/logging-{{ service_name }}.conf
- - source: salt://heat/files/logging.conf
+ - source: salt://oslo_templates/files/logging/_logging.conf
- template: jinja
- makedirs: True
- user: heat
- group: heat
- defaults:
service_name: {{ service_name }}
- values: {{ server }}
+ _data: {{ server.logging }}
- require:
- pkg: heat_server_packages
{%- if server.logging.log_handlers.get('fluentd', {}).get('enabled', False) %}
diff --git a/tests/run_tests.sh b/tests/run_tests.sh
index 7da2a75..9761585 100755
--- a/tests/run_tests.sh
+++ b/tests/run_tests.sh
@@ -6,11 +6,13 @@
CURDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
METADATA=${CURDIR}/../metadata.yml
FORMULA_NAME=$(cat $METADATA | python -c "import sys,yaml; print yaml.load(sys.stdin)['name']")
+FORMULA_META_DIR=${CURDIR}/../${FORMULA_NAME}/meta
## Overrideable parameters
PILLARDIR=${PILLARDIR:-${CURDIR}/pillar}
BUILDDIR=${BUILDDIR:-${CURDIR}/build}
VENV_DIR=${VENV_DIR:-${BUILDDIR}/virtualenv}
+MOCK_BIN_DIR=${MOCK_BIN_DIR:-${CURDIR}/mock_bin}
DEPSDIR=${BUILDDIR}/deps
SALT_FILE_DIR=${SALT_FILE_DIR:-${BUILDDIR}/file_root}
@@ -40,10 +42,20 @@
python -m pip install salt${PIP_SALT_VERSION}
}
+setup_mock_bin() {
+ # If some state requires a binary, a lightweight replacement for
+ # such binary can be put into MOCK_BIN_DIR for test purposes
+ if [ -d "${MOCK_BIN_DIR}" ]; then
+ PATH="${MOCK_BIN_DIR}:$PATH"
+ export PATH
+ fi
+}
+
setup_pillar() {
[ ! -d ${SALT_PILLAR_DIR} ] && mkdir -p ${SALT_PILLAR_DIR}
echo "base:" > ${SALT_PILLAR_DIR}/top.sls
for pillar in ${PILLARDIR}/*; do
+ grep ${FORMULA_NAME}: ${pillar} &>/dev/null || continue
state_name=$(basename ${pillar%.sls})
echo -e " ${state_name}:\n - ${state_name}" >> ${SALT_PILLAR_DIR}/top.sls
done
@@ -56,6 +68,7 @@
echo "base:" > ${SALT_FILE_DIR}/top.sls
for pillar in ${PILLARDIR}/*.sls; do
+ grep ${FORMULA_NAME}: ${pillar} &>/dev/null || continue
state_name=$(basename ${pillar%.sls})
echo -e " ${state_name}:\n - ${FORMULA_NAME}" >> ${SALT_FILE_DIR}/top.sls
done
@@ -119,6 +132,7 @@
[ -d ${BUILDDIR} ] && mkdir -p ${BUILDDIR}
which salt-call || setup_virtualenv
+ setup_mock_bin
setup_pillar
setup_salt
install_dependencies
@@ -126,8 +140,28 @@
run() {
for pillar in ${PILLARDIR}/*.sls; do
+ grep ${FORMULA_NAME}: ${pillar} &>/dev/null || continue
state_name=$(basename ${pillar%.sls})
+ salt_run grains.set 'noservices' False force=True
+
+ echo "Checking state ${FORMULA_NAME}.${state_name} ..."
salt_run --id=${state_name} state.show_sls ${FORMULA_NAME} || (log_err "Execution of ${FORMULA_NAME}.${state_name} failed"; exit 1)
+
+ # Check that all files in 'meta' folder can be rendered using any valid pillar
+ for meta in `find ${FORMULA_META_DIR} -type f`; do
+ meta_name=$(basename ${meta})
+ echo "Checking meta ${meta_name} ..."
+ salt_run --out=quiet --id=${state_name} cp.get_template ${meta} ${SALT_CACHE_DIR}/${meta_name} \
+ || (log_err "Failed to render meta ${meta} using pillar ${FORMULA_NAME}.${state_name}"; exit 1)
+ cat ${SALT_CACHE_DIR}/${meta_name}
+ done
+ done
+}
+
+real_run() {
+ for pillar in ${PILLARDIR}/*.sls; do
+ state_name=$(basename ${pillar%.sls})
+ salt_run --id=${state_name} state.sls ${FORMULA_NAME} || (log_err "Execution of ${FORMULA_NAME}.${state_name} failed"; exit 1)
done
}
@@ -156,6 +190,9 @@
run)
run
;;
+ real-run)
+ real_run
+ ;;
*)
prepare
run