Merge "README clean up"
diff --git a/.travis.yml b/.travis.yml
index 90fe8b5..cec7bcd 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -1,3 +1,6 @@
+language: python
+python:
+- "2.7.13"
 sudo: required
 services:
   - docker
diff --git a/README.rst b/README.rst
index 5e1f645..7d604d6 100644
--- a/README.rst
+++ b/README.rst
@@ -80,7 +80,8 @@
         cinder_internal_tenant_user_id: f46924c112a14c80ab0a24a613d95eef
         cinder_internal_tenant_project_id: b7455b8974bb4064ad247c8f375eae6c
         default_volume_type: 7k2SaS
-        nable_force_upload: true
+        enable_force_upload: true
+        my_ip: 192.168.0.254
         database:
           engine: mysql
           host: 127.0.0.1
@@ -119,6 +120,20 @@
         barbican:
           enabled: true
 
+
+Volume vmware related options:
+
+.. code-block:: yaml
+
+    cinder:
+      volume:
+        backend:
+          vmware:
+            engine: vmware
+            host_username: vmware
+            host_password: vmware
+            cluster_names: vmware_cluster01,vmware_cluster02
+
 * The CORS parameters enablement:
 
   .. code-block:: yaml
@@ -542,6 +557,8 @@
             sf_emulate_512: false
             sf_api_port: 14443
             host: ctl01
+            #for compatibility with old versions
+            sf_account_prefix: PREFIX
 
 * Cinder setup with Block Device driver:
 
diff --git a/_modules/cinderv3/__init__.py b/_modules/cinderv3/__init__.py
new file mode 100644
index 0000000..650a6a2
--- /dev/null
+++ b/_modules/cinderv3/__init__.py
@@ -0,0 +1,33 @@
+try:
+    import os_client_config
+    REQUIREMENTS_MET = True
+except ImportError:
+    REQUIREMENTS_MET = False
+import os
+import sys
+
+# i failed to load module witjout this
+# seems bugs in salt or it is only me
+sys.path.insert(0, os.path.abspath(os.path.dirname(__file__)))
+
+import volume
+
+volume_list = volume.volume_list
+volume_type_list = volume.volume_type_list
+volume_type_get = volume.volume_type_get
+volume_type_create = volume.volume_type_create
+volume_type_delete = volume.volume_type_delete
+keys_volume_type_get = volume.keys_volume_type_get
+keys_volume_type_set = volume.keys_volume_type_set
+
+__all__ = ('volume_list', 'volume_type_list', 'volume_type_get',
+           'volume_type_create', 'keys_volume_type_get',
+           'keys_volume_type_set', 'volume_type_delete')
+
+
+def __virtual__():
+    if REQUIREMENTS_MET:
+        return 'cinderv3'
+    else:
+        return False, ("The cinderv3 execution module cannot be loaded: "
+                       "os_client_config are unavailable.")
diff --git a/_modules/cinderv3/common.py b/_modules/cinderv3/common.py
new file mode 100644
index 0000000..a7e6220
--- /dev/null
+++ b/_modules/cinderv3/common.py
@@ -0,0 +1,105 @@
+import six
+import logging
+import uuid
+
+import os_client_config
+from salt import exceptions
+
+
+log = logging.getLogger(__name__)
+
+SERVICE_KEY = 'volumev3'
+
+
+def get_raw_client(cloud_name):
+    config = os_client_config.OpenStackConfig()
+    cloud = config.get_one_cloud(cloud_name)
+    adapter = cloud.get_session_client(SERVICE_KEY)
+    try:
+        access_info = adapter.session.auth.get_access(adapter.session)
+        endpoints = access_info.service_catalog.get_endpoints()
+    except (AttributeError, ValueError) as exc:
+        six.raise_from(exc, exceptions.SaltInvocationError(
+            "Cannot load keystoneauth plugin. Please check your environment "
+            "configuration."))
+    if SERVICE_KEY not in endpoints:
+        raise exceptions.SaltInvocationError("Cannot find cinder endpoint in "
+                                             "environment endpoint list.")
+    return adapter
+
+
+def send(method):
+    def wrap(func):
+        @six.wraps(func)
+        def wrapped_f(*args, **kwargs):
+            cloud_name = kwargs.pop('cloud_name', None)
+            if not cloud_name:
+                raise exceptions.SaltInvocationError(
+                    "No cloud_name specified. Please provide cloud_name "
+                    "parameter")
+            adapter = get_raw_client(cloud_name)
+            kwarg_keys = list(kwargs.keys())
+            for k in kwarg_keys:
+                if k.startswith('__'):
+                    kwargs.pop(k)
+            url, request_kwargs = func(*args, **kwargs)
+            try:
+                response = getattr(adapter, method.lower())(url,
+                                                            **request_kwargs)
+            except Exception as e:
+                log.exception("Error occured when executing request")
+                return {"result": False,
+                        "comment": str(e),
+                        "status_code": getattr(e, "http_status", 500)}
+            return {"result": True,
+                    "body": response.json() if response.content else {},
+                    "status_code": response.status_code}
+        return wrapped_f
+    return wrap
+
+
+def _check_uuid(val):
+    try:
+        return str(uuid.UUID(val)) == val
+    except (TypeError, ValueError, AttributeError):
+        return False
+
+
+def get_by_name_or_uuid(resource_list, resp_key):
+    def wrap(func):
+        @six.wraps(func)
+        def wrapped_f(*args, **kwargs):
+            if 'name' in kwargs:
+                ref = kwargs.pop('name', None)
+                start_arg = 0
+            else:
+                start_arg = 1
+                ref = args[0]
+            item_id = None
+            if _check_uuid(ref):
+                item_id = ref
+            else:
+                cloud_name = kwargs['cloud_name']
+                # seems no filtering on volume type name in cinder
+                resp = resource_list(cloud_name=cloud_name)["body"][resp_key]
+                # so need to search in list directly
+                for item in resp:
+                    if item["name"] == ref:
+                        if item_id is not None:
+                            msg = ("Multiple resource: {resource} " 
+                                   "with name: {name} found ").format(
+                                    resource=resp_key, name=ref)
+                            return {"result": False,
+                                    "body": msg,
+                                    "status_code": 400}
+                        item_id = item["id"]
+                if not item_id:
+                    msg = ("Uniq {resource} resource "
+                           "with name={name} not found.").format(
+                            resource=resp_key, name=ref)
+                    return {"result": False,
+                            "body": msg,
+                            "status_code": 404}
+            return func(item_id, *args[start_arg:], **kwargs)
+        return wrapped_f
+    return wrap
diff --git a/_modules/cinderv3/volume.py b/_modules/cinderv3/volume.py
new file mode 100644
index 0000000..deaaf6d
--- /dev/null
+++ b/_modules/cinderv3/volume.py
@@ -0,0 +1,95 @@
+from __future__ import absolute_import
+
+import common
+
+try:
+    from urllib.parse import urlencode
+except ImportError:
+    from urllib import urlencode
+
+
+@common.send("get")
+def volume_list(**kwargs):
+    """
+    Return list of cinder volumes.
+    """
+    url = '/volumes?{}'.format(urlencode(kwargs))
+    return url, {}
+
+
+@common.send("get")
+def volume_type_list(**kwargs):
+    """
+    Return list of volume types
+    """
+    url = '/types?{}'.format(urlencode(kwargs))
+    return url, {}
+
+
+@common.get_by_name_or_uuid(volume_type_list, 'volume_types')
+@common.send("get")
+def volume_type_get(volume_type_id, **kwargs):
+    """
+    Returns id of the specified volume type name
+    """
+    url = "/types/{volume_type_id}".format(volume_type_id=volume_type_id)
+    return url, {}
+
+
+@common.get_by_name_or_uuid(volume_type_list, 'volume_types')
+@common.send("delete")
+def volume_type_delete(volume_type_id, **kwargs):
+    """
+    delete the specified volume type
+    """
+    url = "/types/{volume_type_id}".format(volume_type_id=volume_type_id)
+    return url, {}
+
+
+@common.send("post")
+def volume_type_create(name, **kwargs):
+    """
+    Create cinder volume type
+    """
+    url = "/types"
+    req = {"volume_type": {"name": name}}
+    return url, {'json': req}
+
+
+@common.get_by_name_or_uuid(volume_type_list, 'volume_types')
+@common.send("get")
+def keys_volume_type_get(volume_type_id, **kwargs):
+    """
+    Return extra specs of the specified volume type.
+    """
+    url = "/types/{volume_type_id}/extra_specs".format(
+        volume_type_id=volume_type_id)
+    return url, {}
+
+
+@common.send("put")
+def _key_volume_type_set(type_id, key, value, **kwargs):
+    url = "/types/{volume_type_id}/extra_specs/{key}".format(
+        volume_type_id=type_id, key=key)
+    return url, {'json': {str(key): str(value)}}
+
+
+@common.get_by_name_or_uuid(volume_type_list, 'volume_types')
+def keys_volume_type_set(volume_type_id, keys=None, **kwargs):
+    """
+    Set extra specs of the specified volume type.
+    """
+    if keys is None:
+        keys = {}
+    cloud_name = kwargs["cloud_name"]
+    cur_keys = keys_volume_type_get(
+        volume_type_id, cloud_name=cloud_name)["body"]["extra_specs"]
+
+    for k, v in keys.items():
+        if (k, v) in cur_keys.items():
+            continue
+        resp = _key_volume_type_set(volume_type_id, k, v, cloud_name=cloud_name)
+        if resp.get("result") is False:
+            return resp
+
+    return {"result": True}
diff --git a/_states/cinderv3.py b/_states/cinderv3.py
new file mode 100644
index 0000000..c45e970
--- /dev/null
+++ b/_states/cinderv3.py
@@ -0,0 +1,99 @@
+"""
+Management of Cinder resources
+"""
+
+import ast
+import logging
+
+LOG = logging.getLogger(__name__)
+
+
+def __virtual__():
+    return 'cinderv3'
+
+
+def _cinder_call(fname, *args, **kwargs):
+    return __salt__['cinderv3.{}'.format(fname)](*args, **kwargs)
+
+
+def volume_type_present(name=None, cloud_name=None):
+    """
+    Ensures that the specified volume type is present.
+    """
+    ret = {
+        'name': name,
+        'changes': {},
+        'result': True,
+        'comment': 'Volume type "{0}" already exists'.format(name)
+    }
+    type_req = _cinder_call('volume_type_get', name=name, cloud_name=cloud_name)
+    if type_req.get("result"):
+        return ret
+    else:
+        create_req = _cinder_call('volume_type_create', name=name,
+                                  cloud_name=cloud_name)
+        if create_req.get("result") is False:
+            ret = {
+                'name': name,
+                'changes': {},
+                'result': False,
+                'comment': 'Volume type "{0}" failed to create'.format(name)
+            }
+        else:
+            ret['comment'] = 'Volume type {0} has been created'.format(name)
+            ret['changes']['Volume type'] = 'Created'
+        return ret
+
+
+def volume_type_absent(name=None, cloud_name=None):
+    """
+    Ensures that the specified volume type is absent.
+    """
+    ret = {
+        'name': name,
+        'changes': {},
+        'result': True,
+        'comment': 'Volume type "{0}" not found'.format(name)
+    }
+    type_req = _cinder_call('volume_type_get', name=name, cloud_name=cloud_name)
+    if not type_req.get("result"):
+        return ret
+    else:
+        delete_req = _cinder_call('volume_type_delete', name=name,
+                                  cloud_name=cloud_name)
+        if delete_req.get("result") is False:
+            ret = {
+                'name': name,
+                'changes': {},
+                'result': False,
+                'comment': 'Volume type "{0}" failed to delete'.format(name)
+            }
+        else:
+            ret['comment'] = 'Volume type {0} has been deleted'.format(name)
+            ret['changes']['Volume type'] = 'Deleted'
+        return ret
+
+
+def volume_type_key_present(name=None, key=None, value=None, cloud_name=None):
+    """
+    Ensures that the extra specs are present on a volume type.
+    """
+    keys = "{u'" + key + "': u'" + value + "'}"
+    keys = ast.literal_eval(keys)
+    signal_create = _cinder_call('keys_volume_type_set',
+                                 name=name, keys=keys, cloud_name=cloud_name)
+    if signal_create["result"] is True:
+        ret = {
+            'name': name,
+            'changes': keys,
+            'result': True,
+            'comment': 'Volume type "{0}" was updated'.format(name)
+        }
+    else:
+        ret = {
+            'name': name,
+            'changes': {},
+            'result': False,
+            'comment': signal_create.get("comment")
+        }
+    return ret
diff --git a/cinder/client.sls b/cinder/client.sls
index c104985..edfec18 100644
--- a/cinder/client.sls
+++ b/cinder/client.sls
@@ -29,7 +29,7 @@
                        'project_id': identity.project,
                        'port': identity.get('port', 35357),
                        'protocol': identity.get('protocol', 'http'),
-                       'region_name': identity.get('region_name', 'RegionOne'),
+                       'region_name': identity.get('region', 'RegionOne'),
                        'endpoint_type': identity.get('endpoint_type', 'internalURL'),
                        'certificate': identity.get('certificate', client.cacert_file),
                        'api_version': keystone_api_version} %}
diff --git a/cinder/controller.sls b/cinder/controller.sls
index a30bd66..6ee10e1 100644
--- a/cinder/controller.sls
+++ b/cinder/controller.sls
@@ -1,6 +1,12 @@
 {%- from "cinder/map.jinja" import controller with context %}
 {%- if controller.get('enabled', False) %}
 
+include:
+  {%- if controller.version not in ['mitaka','newton'] %}
+ - apache
+  {%- endif %}
+ - cinder.db.offline_sync
+
 {%- set user = controller %}
 {%- include "cinder/user.sls" %}
 
@@ -11,6 +17,8 @@
 cinder_controller_packages:
   pkg.installed:
   - names: {{ controller.pkgs }}
+  - require_in:
+    - sls: cinder.db.offline_sync
 
 /etc/cinder/cinder.conf:
   file.managed:
@@ -18,6 +26,8 @@
   - template: jinja
   - require:
     - pkg: cinder_controller_packages
+  - require_in:
+    - sls: cinder.db.offline_sync
 
 /etc/cinder/api-paste.ini:
   file.managed:
@@ -25,6 +35,8 @@
   - template: jinja
   - require:
     - pkg: cinder_controller_packages
+  - require_in:
+    - sls: cinder.db.offline_sync
 
 {%- if controller.backup.engine != None %}
   {%- set cinder_log_services = controller.services + controller.backup.services %}
@@ -68,13 +80,13 @@
 cinder_general_logging_conf:
   file.managed:
     - name: /etc/cinder/logging.conf
-    - source: salt://cinder/files/logging.conf
+    - source: salt://oslo_templates/files/logging/_logging.conf
     - template: jinja
     - user: cinder
     - group: cinder
     - defaults:
         service_name: cinder
-        values: {{ controller }}
+        _data: {{ controller.logging }}
     - require:
       - pkg: cinder_controller_packages
 {%- if controller.logging.log_handlers.get('fluentd', {}).get('enabled', False) %}
@@ -96,14 +108,14 @@
 {{ service_name }}_logging_conf:
   file.managed:
     - name: /etc/cinder/logging/logging-{{ service_name }}.conf
-    - source: salt://cinder/files/logging.conf
+    - source: salt://oslo_templates/files/logging/_logging.conf
     - template: jinja
     - makedirs: True
     - user: cinder
     - group: cinder
     - defaults:
         service_name: {{ service_name }}
-        values: {{ controller }}
+        _data: {{ controller.logging }}
     - require:
       - pkg: cinder_controller_packages
 {%- if controller.logging.log_handlers.get('fluentd', {}).get('enabled', False) %}
@@ -145,20 +157,7 @@
 
 {%- endfor %}
 
-cinder_syncdb:
-  cmd.run:
-  - name: 'cinder-manage db sync; sleep 5;'
-  {%- if grains.get('noservices') %}
-  - onlyif: /bin/false
-  {%- endif %}
-  - require:
-    - pkg: cinder_controller_packages
-  - require_in:
-    - service: cinder_controller_services
-
 {%- if controller.version not in ['mitaka','newton'] %}
-include:
-  - apache
 {#- Creation of sites using templates is deprecated, sites should be generated by apache pillar, and enabled by cinder formula #}
 {%- if pillar.get('apache', {}).get('server', {}).get('site', {}).cinder is not defined %}
 
@@ -214,6 +213,7 @@
   - require:
     - pkg: cinder_controller_packages
     - service: cinder_api_service_dead
+    - sls: cinder.db.offline_sync
   - watch:
     {%- if controller.message_queue.get('ssl',{}).get('enabled', False) %}
     - file: rabbitmq_ca_cinder_controller
@@ -237,6 +237,7 @@
   {%- endif %}
   - require:
     - pkg: cinder_controller_packages
+    - sls: cinder.db.offline_sync
   - watch:
     {%- if controller.message_queue.get('ssl',{}).get('enabled', False) %}
     - file: rabbitmq_ca_cinder_controller
@@ -270,6 +271,7 @@
   {%- endif %}
   - require:
     - pkg: cinder_controller_packages
+    - sls: cinder.db.offline_sync
   - watch:
     {%- if controller.message_queue.get('ssl',{}).get('enabled', False) %}
     - file: rabbitmq_ca_cinder_controller
@@ -302,7 +304,7 @@
                        'project_id': identity.tenant,
                        'port': identity.get('port', 35357),
                        'protocol': identity.get('protocol', 'http'),
-                       'region_name': identity.get('region_name', 'RegionOne'),
+                       'region_name': identity.get('region', 'RegionOne'),
                        'endpoint_type': identity.get('endpoint_type', 'internalURL'),
                        'certificate': identity.get('certificate', controller.cacert_file),
                        'api_version': keystone_api_version} %}
diff --git a/cinder/db/offline_sync.sls b/cinder/db/offline_sync.sls
new file mode 100644
index 0000000..33da95e
--- /dev/null
+++ b/cinder/db/offline_sync.sls
@@ -0,0 +1,8 @@
+{%- from "cinder/map.jinja" import controller with context %}
+
+cinder_syncdb:
+  cmd.run:
+  - name: 'cinder-manage db sync; sleep 5;'
+  {%- if grains.get('noservices') or controller.get('role', 'primary') == 'secondary' %}
+  - onlyif: /bin/false
+  {%- endif %}
diff --git a/cinder/db/online_sync.sls b/cinder/db/online_sync.sls
new file mode 100644
index 0000000..bdc1082
--- /dev/null
+++ b/cinder/db/online_sync.sls
@@ -0,0 +1,8 @@
+{% from "cinder/map.jinja" import controller with context %}
+
+cinder_controller_online_data_migrations:
+  cmd.run:
+  - name: cinder-manage db online_data_migrations
+  {%- if grains.get('noservices') or controller.get('role', 'primary') == 'secondary' %}
+  - onlyif: /bin/false
+  {%- endif %}
diff --git a/cinder/files/backend/_solidfire.conf b/cinder/files/backend/_solidfire.conf
index 5a76c70..d954121 100644
--- a/cinder/files/backend/_solidfire.conf
+++ b/cinder/files/backend/_solidfire.conf
@@ -14,3 +14,6 @@
 sf_api_port = {{ backend.get('sf_api_port', 443) }}
 {%- endif %}
 volume_driver=cinder.volume.drivers.solidfire.SolidFireDriver
+{%- if backend.sf_account_prefix is defined %}
+sf_account_prefix = {{ backend.sf_account_prefix }}
+{%- endif %}
diff --git a/cinder/files/backend/_vmware.conf b/cinder/files/backend/_vmware.conf
new file mode 100644
index 0000000..c1b8b72
--- /dev/null
+++ b/cinder/files/backend/_vmware.conf
@@ -0,0 +1,124 @@
+
+[{{ backend_name }}]
+
+volume_driver = cinder.volume.drivers.vmware.vmdk.VMwareVcVmdkDriver
+volume_backend_name={{ backend_name }}
+
+# IP address for connecting to VMware vCenter server. (string value)
+#vmware_host_ip = <None>
+{%- if backend.host_ip is defined %}
+vmware_host_ip = {{ backend.host_ip }}
+{%- endif %}
+
+
+# Port number for connecting to VMware vCenter server. (port value)
+# Minimum value: 0
+# Maximum value: 65535
+#vmware_host_port = 443
+{%- if backend.host_port is defined %}
+vmware_host_port = {{ backend.host_port }}
+{%- endif %}
+
+# Username for authenticating with VMware vCenter server. (string value)
+#vmware_host_username = <None>
+vmware_host_username = {{ backend.host_username }}
+
+# Password for authenticating with VMware vCenter server. (string value)
+#vmware_host_password = <None>
+vmware_host_password = {{ backend.host_password }}
+
+
+# Optional VIM service WSDL Location e.g http://<server>/vimService.wsdl.
+# Optional over-ride to default location for bug work-arounds. (string value)
+#vmware_wsdl_location = <None>
+{%- if backend.wsdl_location is defined %}
+vmware_wsdl_location = {{ backend.wsdl_location }}
+{%- endif %}
+
+# Number of times VMware vCenter server API must be retried upon connection
+# related issues. (integer value)
+#vmware_api_retry_count = 10
+{%- if backend.api_retry_count is defined %}
+vmware_api_retry_count = {{ backend.api_retry_count }}
+{%- endif %}
+
+# The interval (in seconds) for polling remote tasks invoked on VMware vCenter
+# server. (floating point value)
+#vmware_task_poll_interval = 2.0
+{%- if backend.task_poll_interval is defined %}
+vmware_task_poll_interval = {{ backend.task_poll_interval }}
+{%- endif %}
+
+# Name of the vCenter inventory folder that will contain Cinder volumes. This
+# folder will be created under "OpenStack/<project_folder>", where
+# project_folder is of format "Project (<volume_project_id>)". (string value)
+#vmware_volume_folder = Volumes
+{%- if backend.volume_folder is defined %}
+vmware_volume_folder = {{ backend.volume_folder }}
+{%- endif %}
+
+# Timeout in seconds for VMDK volume transfer between Cinder and Glance.
+# (integer value)
+#vmware_image_transfer_timeout_secs = 7200
+{%- if backend.image_transfer_timeout_secs is defined %}
+vmware_image_transfer_timeout_secs = {{ backend.image_transfer_timeout_secs }}
+{%- endif %}
+
+# Max number of objects to be retrieved per batch. Query results will be
+# obtained in batches from the server and not in one shot. Server may still
+# limit the count to something less than the configured value. (integer value)
+#vmware_max_objects_retrieval = 100
+{%- if backend.max_objects_retrieval is defined %}
+vmware_max_objects_retrieval = {{ backend.max_objects_retrieval }}
+{%- endif %}
+
+# Optional string specifying the VMware vCenter server version. The driver
+# attempts to retrieve the version from VMware vCenter server. Set this
+# configuration only if you want to override the vCenter server version.
+# (string value)
+#vmware_host_version = <None>
+{%- if backend.host_version is defined %}
+vmware_host_version = {{ backend.host_version }}
+{%- endif %}
+
+# Directory where virtual disks are stored during volume backup and restore.
+# (string value)
+#vmware_tmp_dir = /tmp
+{%- if backend.tmp_dir is defined %}
+vmware_tmp_dir = {{ backend.tmp_dir }}
+{%- endif %}
+
+# CA bundle file to use in verifying the vCenter server certificate. (string
+# value)
+#vmware_ca_file = <None>
+{%- if backend.cacert_file is defined %}
+vmware_ca_file = {{ backend.cacert_file }}
+{%- endif %}
+
+# If true, the vCenter server certificate is not verified. If false, then the
+# default CA truststore is used for verification. This option is ignored if
+# "vmware_ca_file" is set. (boolean value)
+#vmware_insecure = false
+{%- if backend.insecure is defined %}
+vmware_insecure = {{ backend.insecure }}
+{%- endif %}
+
+# Name of a vCenter compute cluster where volumes should be created. (multi
+# valued)
+#vmware_cluster_name =
+{%- for cluster_name in backend.cluster_names.split(',') %}
+vmware_cluster_name = {{ cluster_name }}
+{%- endfor %}
+
+# Maximum number of connections in http connection pool. (integer value)
+#vmware_connection_pool_size = 10
+{%- if backend.connection_pool_size is defined %}
+vmware_connection_pool_size = {{ backend.connection_pool_size }}
+{%- endif %}
+
+# Default adapter type to be used for attaching volumes. (string value)
+# Allowed values: lsiLogic, busLogic, lsiLogicsas, paraVirtual, ide
+#vmware_adapter_type = lsiLogic
+{%- if backend.adapter_type is defined %}
+vmware_adapter_type = {{ backend.adapter_type }}
+{%- endif %}
diff --git a/cinder/files/logging.conf b/cinder/files/logging.conf
deleted file mode 100644
index 779ae4d..0000000
--- a/cinder/files/logging.conf
+++ /dev/null
@@ -1,85 +0,0 @@
-{%- set log_handlers = [] -%}
-{%- for log_handler_name, log_handler_attrs in values.logging.log_handlers.items() %}
-  {%- if log_handler_attrs.get('enabled', False) %}
-    {%- do log_handlers.append(log_handler_name) -%}
-  {%- endif %}
-{%- endfor %}
-[loggers]
-keys = root, cinder, eventletwsgi
-
-[handlers]
-keys = {{ log_handlers | join(", ") }}
-
-[formatters]
-keys = context, default{% if values.logging.log_handlers.get('fluentd', {}).get('enabled', False) %}, fluentd{% endif %}
-
-[logger_root]
-level = WARNING
-handlers = {{ log_handlers | join(", ") }}
-
-[logger_cinder]
-level = INFO
-handlers = {{ log_handlers | join(", ") }}
-qualname = cinder
-propagate = 0
-
-[logger_amqplib]
-level = WARNING
-handlers = {{ log_handlers | join(", ") }}
-qualname = amqplib
-
-[logger_sqlalchemy]
-level = WARNING
-handlers = {{ log_handlers | join(", ") }}
-qualname = sqlalchemy
-# "level = INFO" logs SQL queries.
-# "level = DEBUG" logs SQL queries and results.
-# "level = WARNING" logs neither.  (Recommended for production systems.)
-
-[logger_boto]
-level = WARNING
-handlers = {{ log_handlers | join(", ") }}
-qualname = boto
-
-[logger_suds]
-level = INFO
-handlers = {{ log_handlers | join(", ") }}
-qualname = suds
-
-[logger_eventletwsgi]
-level = INFO
-handlers = {{ log_handlers | join(", ") }}
-qualname = eventlet.wsgi.server
-
-{%- if values.logging.log_handlers.get('fluentd', {}).get('enabled', False) %}
-[handler_fluentd]
-class = fluent.handler.FluentHandler
-args = ('openstack.{{ service_name | replace("-", ".") }}', 'localhost', 24224)
-formatter = fluentd
-{%- endif %}
-
-{%- if values.logging.log_handlers.watchedfile.enabled %}
-[handler_watchedfile]
-class = handlers.WatchedFileHandler
-args = ('/var/log/cinder/{{ service_name }}.log',)
-formatter = context
-{%- endif %}
-
-{% if values.logging.log_handlers.get('ossyslog', {}).get('enabled', False) -%}
-{%- set ossyslog_args = values.logging.log_handlers.ossyslog.get('args', {}) -%}
-[handler_ossyslog]
-class = oslo_log.handlers.OSSysLogHandler
-args = ( handlers.SysLogHandler.{{ ossyslog_args.get('facility', 'LOG_USER') }}, )
-formatter = context
-{%- endif %}
-
-[formatter_context]
-class = oslo_log.formatters.ContextFormatter
-
-[formatter_default]
-format = %(message)s
-
-{%- if values.logging.log_handlers.get('fluentd', {}).get('enabled', False) %}
-[formatter_fluentd]
-class = oslo_log.formatters.FluentFormatter
-{%- endif %}
diff --git a/cinder/files/queens/cinder.conf.controller.Debian b/cinder/files/queens/cinder.conf.controller.Debian
index c3750a7..6d89d7f 100644
--- a/cinder/files/queens/cinder.conf.controller.Debian
+++ b/cinder/files/queens/cinder.conf.controller.Debian
@@ -3260,6 +3260,7 @@
 {%- set _data = {} %}
 {%- do _data.update(controller.identity) %}
 {%- do _data.update(controller.get('barbican', {})) %}
+{%- if 'cacert_file' not in _data.keys() %}{% do _data.update({'cacert_file': controller.cacert_file}) %}{% endif %}
 #
 # From castellan.config
 #
diff --git a/cinder/files/queens/cinder.conf.volume.Debian b/cinder/files/queens/cinder.conf.volume.Debian
index 6c4e023..f8a34bd 100644
--- a/cinder/files/queens/cinder.conf.volume.Debian
+++ b/cinder/files/queens/cinder.conf.volume.Debian
@@ -310,6 +310,9 @@
 
 # IP address of this host (host address value)
 #my_ip = <HOST_IP_ADDRESS>
+{%- if volume.my_ip is defined %}
+my_ip = {{ volume.my_ip }}
+{%- endif %}
 
 # A list of the URLs of glance API servers available to cinder
 # ([http[s]://][hostname|ip]:port). If protocol is not specified it defaults to
@@ -3260,6 +3263,7 @@
 {%- set _data = {} %}
 {%- do _data.update(volume.identity) %}
 {%- do _data.update(volume.get('barbican', {})) %}
+{%- if 'cacert_file' not in _data.keys() %}{% do _data.update({'cacert_file': volume.cacert_file}) %}{% endif %}
 #
 # From castellan.config
 #
diff --git a/cinder/map.jinja b/cinder/map.jinja
index 1b3cb32..22fd95a 100644
--- a/cinder/map.jinja
+++ b/cinder/map.jinja
@@ -29,6 +29,7 @@
            'engine': None
         },
         'logging': {
+          'app_name': 'cinder',
           'log_appender': false,
           'log_handlers': {
             'watchedfile': {
@@ -59,6 +60,7 @@
           'engine': None
         },
         'logging': {
+          'app_name': 'cinder',
           'log_appender': false,
           'log_handlers': {
             'watchedfile': {
@@ -67,7 +69,7 @@
           },
         },
     },
-}, merge=pillar.cinder.get('controller', {}), base='BaseDefaults') %}
+}, merge=pillar.get('cinder', {}).get('controller', {}), base='BaseDefaults') %}
 
 {% set volume = salt['grains.filter_by']({
     'BaseDefaults': default_params,
@@ -91,6 +93,7 @@
           'control_exchange': 'cinder'
         },
         'logging': {
+          'app_name': 'cinder',
           'log_appender': false,
           'log_handlers': {
             'watchedfile': {
@@ -119,6 +122,7 @@
           'engine': None
         },
         'logging': {
+          'app_name': 'cinder',
           'log_appender': false,
           'log_handlers': {
             'watchedfile': {
@@ -127,7 +131,7 @@
           },
         },
     },
-}, merge=pillar.cinder.get('volume', {}), base='BaseDefaults') %}
+}, merge=pillar.get('cinder', {}).get('volume', {}), base='BaseDefaults') %}
 
 {% set client = salt['grains.filter_by']({
     'BaseDefaults': default_params,
@@ -137,7 +141,7 @@
     'RedHat': {
         'pkgs': ['python-cinderclient']
     },
-}, merge=pillar.cinder.get('client', {}), base='BaseDefaults') %}
+}, merge=pillar.get('cinder', {}).get('client', {}), base='BaseDefaults') %}
 
 {% set monitoring = salt['grains.filter_by']({
     'default': {
diff --git a/cinder/meta/sphinx.yml b/cinder/meta/sphinx.yml
index bfdf85b..10e50e7 100644
--- a/cinder/meta/sphinx.yml
+++ b/cinder/meta/sphinx.yml
@@ -24,9 +24,18 @@
         database_host:
           name: "Database"
           value: {{ controller.database.user }}@{{ controller.database.host }}:{{ controller.database.port }}//{{ controller.database.name }}
+        {%- set rabbit_port = controller.message_queue.get('port', 5671 if controller.message_queue.get('ssl',{}).get('enabled', False)  else 5672) %}
         message_queue_ip:
           name: "Message queue"
-          value: {{ controller.message_queue.user }}@{{ controller.message_queue.host }}:{{ controller.message_queue.port }}{{ controller.message_queue.virtual_host }}
+          {%- if controller.message_queue.members is defined %}
+          value: {% for member in controller.message_queue.members -%}
+                 {{ controller.message_queue.user }}:{{ controller.message_queue.password }}@{{ member.host }}:{{ member.get('port',rabbit_port) }}
+                 {%- if not loop.last -%},{%- endif -%}
+                 {%- endfor -%}
+                 /{{ controller.message_queue.virtual_host }}
+          {%- else %}
+          value:  {{ controller.message_queue.user }}:{{ controller.message_queue.password }}@{{ controller.message_queue.host }}:{{ rabbit_port }}/{{ controller.message_queue.virtual_host }}
+          {%- endif %}
         identity_host:
           name: "Identity service"
           value: {{ controller.identity.user }}@{{ controller.identity.host }}:{{ controller.identity.port }}
diff --git a/cinder/volume.sls b/cinder/volume.sls
index c7e8276..cde53fd 100644
--- a/cinder/volume.sls
+++ b/cinder/volume.sls
@@ -130,14 +130,14 @@
 {{ service_name }}_logging_conf:
   file.managed:
     - name: /etc/cinder/logging/logging-{{ service_name }}.conf
-    - source: salt://cinder/files/logging.conf
+    - source: salt://oslo_templates/files/logging/_logging.conf
     - template: jinja
     - makedirs: True
     - user: cinder
     - group: cinder
     - defaults:
         service_name: {{ service_name }}
-        values: {{ volume }}
+        _data: {{ volume.logging }}
     - require:
       - pkg: cinder_volume_packages
 {%- if volume.logging.log_handlers.get('fluentd', {}).get('enabled', False) %}
diff --git a/tests/pillar/lefthand_single.sls b/tests/pillar/lefthand_single.sls
index 024f2bc..cc23584 100644
--- a/tests/pillar/lefthand_single.sls
+++ b/tests/pillar/lefthand_single.sls
@@ -20,10 +20,10 @@
       password: pwd
       region: regionOne
     osapi:
-        host: 127.0.0.1
+      host: 127.0.0.1
     glance:
-        host: 127.0.0.1
-        port: 9292
+      host: 127.0.0.1
+      port: 9292
     logging:
       log_appender: false
       log_handlers:
@@ -72,8 +72,8 @@
       host: 127.0.0.1
     osapi_max_limit: 500
     glance:
-        host: 127.0.0.1
-        port: 9292
+      host: 127.0.0.1
+      port: 9292
     logging:
       log_appender: false
       log_handlers:
diff --git a/tests/pillar/netapp.sls b/tests/pillar/netapp.sls
index 4a81fee..3acc024 100644
--- a/tests/pillar/netapp.sls
+++ b/tests/pillar/netapp.sls
@@ -1,5 +1,10 @@
 cinder:
   controller:
+    osapi:
+      host: 127.0.0.1
+    glance:
+      host: 127.0.0.1
+      port: 9292
     enabled: true
     version: mitaka
     message_queue:
diff --git a/tests/pillar/nfs.sls b/tests/pillar/nfs.sls
index c0edf25..09689ef 100644
--- a/tests/pillar/nfs.sls
+++ b/tests/pillar/nfs.sls
@@ -1,5 +1,10 @@
 cinder:
   controller:
+    osapi:
+      host: 127.0.0.1
+    glance:
+      host: 127.0.0.1
+      port: 9292
     enabled: true
     version: liberty
     default_volume_type: nfs-driver
diff --git a/tests/pillar/volume_single_vmware.sls b/tests/pillar/volume_single_vmware.sls
new file mode 100644
index 0000000..1d3bcbf
--- /dev/null
+++ b/tests/pillar/volume_single_vmware.sls
@@ -0,0 +1,96 @@
+cinder:
+  volume:
+    enabled: true
+    version: pike
+    osapi:
+      host: 127.0.0.1
+    database:
+      engine: mysql
+      host: 127.0.0.1
+      port: 3306
+      name: cinder
+      user: cinder
+      password: password
+    identity:
+      engine: keystone
+      host: 127.0.0.1
+      port: 35357
+      tenant: service
+      user: cinder
+      password: password
+      endpoint_type: internalURL
+      region: regionOne
+    glance:
+      host: 127.0.0.1
+      port: 9292
+    logging:
+      log_appender: false
+      log_handlers:
+        watchedfile:
+          enabled: true
+        fluentd:
+          enabled: false
+        ossyslog:
+          enabled: false
+    message_queue:
+      engine: rabbitmq
+      host: 127.0.0.1
+      port: 5672
+      user: openstack
+      password: password
+      virtual_host: '/openstack'
+    storage:
+      engine: storwize
+      host: 192.168.0.1
+      port: 22
+      user: username
+      password: pass
+    backend:
+      vmware:
+        engine: vmware
+        type_name: vmware-driver
+        host_username: vmware
+        host_password: vmware
+        cluster_name: vmware_cluster01
+apache:
+  server:
+    enabled: true
+    default_mpm: event
+    mpm:
+      prefork:
+        enabled: true
+        servers:
+          start: 5
+          spare:
+            min: 2
+            max: 10
+        max_requests: 0
+        max_clients: 20
+        limit: 20
+    site:
+      cinder:
+        enabled: false
+        available: true
+        type: wsgi
+        name: cinder
+        wsgi:
+          daemon_process: cinder-wsgi
+          processes: 5
+          threads: 1
+          user: cinder
+          group: cinder
+          display_name: '%{GROUP}'
+          script_alias: '/ /usr/bin/cinder-wsgi'
+          application_group: '%{GLOBAL}'
+          authorization: 'On'
+        host:
+          address: 127.0.0.1
+          name: 127.0.0.1
+          port: 8776
+        log:
+          custom:
+            format: >-
+              %v:%p %{X-Forwarded-For}i %h %l %u %t \"%r\" %>s %D %O \"%{Referer}i\" \"%{User-Agent}i\"
+          error:
+            enabled: true
+            format: '%M'
diff --git a/tests/run_tests.sh b/tests/run_tests.sh
index ae7d68f..9761585 100755
--- a/tests/run_tests.sh
+++ b/tests/run_tests.sh
@@ -6,11 +6,13 @@
 CURDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
 METADATA=${CURDIR}/../metadata.yml
 FORMULA_NAME=$(cat $METADATA | python -c "import sys,yaml; print yaml.load(sys.stdin)['name']")
+FORMULA_META_DIR=${CURDIR}/../${FORMULA_NAME}/meta
 
 ## Overrideable parameters
 PILLARDIR=${PILLARDIR:-${CURDIR}/pillar}
 BUILDDIR=${BUILDDIR:-${CURDIR}/build}
 VENV_DIR=${VENV_DIR:-${BUILDDIR}/virtualenv}
+MOCK_BIN_DIR=${MOCK_BIN_DIR:-${CURDIR}/mock_bin}
 DEPSDIR=${BUILDDIR}/deps
 
 SALT_FILE_DIR=${SALT_FILE_DIR:-${BUILDDIR}/file_root}
@@ -18,7 +20,7 @@
 SALT_CONFIG_DIR=${SALT_CONFIG_DIR:-${BUILDDIR}/salt}
 SALT_CACHE_DIR=${SALT_CACHE_DIR:-${SALT_CONFIG_DIR}/cache}
 
-SALT_OPTS="${SALT_OPTS} --retcode-passthrough --local -c ${SALT_CONFIG_DIR}"
+SALT_OPTS="${SALT_OPTS} --retcode-passthrough --local -c ${SALT_CONFIG_DIR} --log-file=/dev/null"
 
 if [ "x${SALT_VERSION}" != "x" ]; then
     PIP_SALT_VERSION="==${SALT_VERSION}"
@@ -40,10 +42,20 @@
     python -m pip install salt${PIP_SALT_VERSION}
 }
 
+setup_mock_bin() {
+    # If some state requires a binary, a lightweight replacement for
+    # such binary can be put into MOCK_BIN_DIR for test purposes
+    if [ -d "${MOCK_BIN_DIR}" ]; then
+        PATH="${MOCK_BIN_DIR}:$PATH"
+        export PATH
+    fi
+}
+
 setup_pillar() {
     [ ! -d ${SALT_PILLAR_DIR} ] && mkdir -p ${SALT_PILLAR_DIR}
     echo "base:" > ${SALT_PILLAR_DIR}/top.sls
     for pillar in ${PILLARDIR}/*; do
+        grep ${FORMULA_NAME}: ${pillar} &>/dev/null || continue
         state_name=$(basename ${pillar%.sls})
         echo -e "  ${state_name}:\n    - ${state_name}" >> ${SALT_PILLAR_DIR}/top.sls
     done
@@ -56,6 +68,7 @@
 
     echo "base:" > ${SALT_FILE_DIR}/top.sls
     for pillar in ${PILLARDIR}/*.sls; do
+        grep ${FORMULA_NAME}: ${pillar} &>/dev/null || continue
         state_name=$(basename ${pillar%.sls})
         echo -e "  ${state_name}:\n    - ${FORMULA_NAME}" >> ${SALT_FILE_DIR}/top.sls
     done
@@ -119,6 +132,7 @@
     [ -d ${BUILDDIR} ] && mkdir -p ${BUILDDIR}
 
     which salt-call || setup_virtualenv
+    setup_mock_bin
     setup_pillar
     setup_salt
     install_dependencies
@@ -128,7 +142,26 @@
     for pillar in ${PILLARDIR}/*.sls; do
         grep ${FORMULA_NAME}: ${pillar} &>/dev/null || continue
         state_name=$(basename ${pillar%.sls})
+        salt_run grains.set 'noservices' False force=True
+
+        echo "Checking state ${FORMULA_NAME}.${state_name} ..."
         salt_run --id=${state_name} state.show_sls ${FORMULA_NAME} || (log_err "Execution of ${FORMULA_NAME}.${state_name} failed"; exit 1)
+
+        # Check that all files in 'meta' folder can be rendered using any valid pillar
+        for meta in `find ${FORMULA_META_DIR} -type f`; do
+            meta_name=$(basename ${meta})
+            echo "Checking meta ${meta_name} ..."
+            salt_run --out=quiet --id=${state_name} cp.get_template ${meta} ${SALT_CACHE_DIR}/${meta_name} \
+              || (log_err "Failed to render meta ${meta} using pillar ${FORMULA_NAME}.${state_name}"; exit 1)
+            cat ${SALT_CACHE_DIR}/${meta_name}
+        done
+    done
+}
+
+real_run() {
+    for pillar in ${PILLARDIR}/*.sls; do
+        state_name=$(basename ${pillar%.sls})
+        salt_run --id=${state_name} state.sls ${FORMULA_NAME} || (log_err "Execution of ${FORMULA_NAME}.${state_name} failed"; exit 1)
     done
 }
 
@@ -157,6 +190,9 @@
     run)
         run
         ;;
+    real-run)
+        real_run
+        ;;
     *)
         prepare
         run