cinder client

Change-Id: I10cf4eaee94154a4acdcf6931d19cbabd3fc1b04
diff --git a/README.rst b/README.rst
index 5bc4018..7a19ad0 100644
--- a/README.rst
+++ b/README.rst
@@ -581,6 +581,31 @@
 The storage availability zone is the actual zone where the node belongs to. Make sure to specify this per node.
 Check the documentation of OpenStack for more information
 
+
+Client role
+
+.. code-block:: yaml
+
+    cinder:
+      client:
+        enabled: true
+        identity:
+          host: 127.0.0.1
+          port: 35357
+          project: service
+          user: cinder
+          password: pwd
+          protocol: http
+          endpoint_type: internalURL
+          region_name: RegionOne
+        backend:
+          ceph:
+            type_name: standard-iops
+            engine: ceph
+            key:
+              conn_speed: fibre-10G
+
+
 Documentation and Bugs
 ============================
 
diff --git a/_modules/cinderng.py b/_modules/cinderng.py
new file mode 100644
index 0000000..cd71348
--- /dev/null
+++ b/_modules/cinderng.py
@@ -0,0 +1,144 @@
+# -*- coding: utf-8 -*-
+import logging
+from functools import wraps
+LOG = logging.getLogger(__name__)
+
+# Import third party libs
+HAS_CINDER = False
+try:
+    from cinderclient.v3 import client
+    HAS_CINDER = True
+except ImportError:
+    pass
+
+__opts__ = {}
+
+
+def __virtual__():
+    '''
+    Only load this module if cinder
+    is installed on this minion.
+    '''
+    if HAS_CINDER:
+        return 'cinderng'
+    return False
+
+def _authng(profile=None):
+    '''
+    Set up cinder credentials
+    '''
+    credentials = {
+        'username': profile['user'],
+        'password': profile['password'],
+        'project_id': profile['project_id'],
+        'auth_url': profile['protocol'] + "://" + profile['host'] + ":" + str(profile['port']) + "/v3",
+        'endpoint_type': profile['endpoint_type'],
+        'certificate': profile['certificate'],
+        'region_name': profile['region_name']
+    }
+    return credentials
+
+def create_conn(cred=None):
+    '''
+    create connection
+    '''
+    nt = client.Client(username=cred['username'], api_key=cred['password'], project_id=cred['project_id'], auth_url=cred['auth_url'], endpoint_type=cred['endpoint_type'], cacert=cred['certificate'], region_name=cred['region_name'])
+    return nt
+
+def list_volumes(profile=None, **kwargs):
+    '''
+    Return list of cinder volumes.
+    '''
+    cred = _authng(profile)
+    nt = create_conn(cred)
+    return nt.volumes.list()
+
+def list_volume_type(profile=None, **kwargs):
+    '''
+    Return list of volume types
+    '''
+    cred = _authng(profile)
+    nt = create_conn(cred)
+    return nt.volume_types.list()
+
+def get_volume_type(type_name, profile=None, **kwargs):
+    '''
+    Returns id of the specified volume type name
+    '''
+    vt_id = None
+    vt_list = list_volume_type(profile);
+    for vt in vt_list:
+        if vt.name == type_name:
+            vt_id = vt.id
+
+    if vt_id:
+        cred = _authng(profile)
+        nt = create_conn(cred)
+        try:
+            vt = nt.volume_types.get(vt_id)
+            return vt
+        except:
+            return
+    else:
+        return
+
+def create_volume_type(type_name, profile=None, **kwargs):
+    '''
+    Create cinder volume type
+    '''
+    vt = get_volume_type(type_name, profile)
+    if not vt:
+        cred = _authng(profile)
+        nt = create_conn(cred)
+        try:
+            nt.volume_types.create(type_name)
+            return 'created'
+        except:
+            return 'failed'
+    else:
+        return 'exists'
+
+
+def get_keys_volume_type(type_name, profile=None, **kwargs):
+    '''
+    Return extra specs of the specified volume type.
+    '''
+
+    vt = get_volume_type(type_name, profile)
+    if vt:
+        try:
+            return vt.get_keys()
+        except:
+            return 'failed'
+    else:
+        return
+
+def set_keys_volume_type(type_name, keys={}, profile=None, **kwargs):
+    '''
+    Set extra specs of the specified volume type.
+    '''
+    set_keys = False
+    vt = get_volume_type(type_name, profile)
+    if vt:
+        k = get_keys_volume_type(type_name, profile)
+        if not k:
+            set_keys = True
+        elif k:
+            for key in keys:
+                if k.get(key) != keys[key]:
+                    set_keys = True
+        elif len(k) != len(keys):
+            set_keys = True
+        else:
+            return
+
+        if set_keys:
+            try:
+                vt.set_keys(keys)
+                return 'updated'
+            except:
+                return 'failed'
+        else:
+            return 'exist'
+    else:
+        return 'not found'
diff --git a/_states/cinderng.py b/_states/cinderng.py
new file mode 100644
index 0000000..e39a4d0
--- /dev/null
+++ b/_states/cinderng.py
@@ -0,0 +1,109 @@
+# -*- coding: utf-8 -*-
+'''
+Management of Cinder resources
+===============================
+:depends:   - cinderclient Python module
+'''
+import ast
+import logging
+from functools import wraps
+LOG = logging.getLogger(__name__)
+
+
+def __virtual__():
+    '''
+    Only load if python-cinderclient is present in __salt__
+    '''
+    return 'cinderng'
+
+def volume_type_present(name=None, profile=None):
+    '''
+    Ensures that the specified volume type is present.
+    '''
+    ret = {'name': name,
+           'changes': {},
+           'result': True,
+           'comment': 'Volume type "{0}" already exists'.format(name)}
+    signal = __salt__['cinderng.create_volume_type'](name, profile)
+    if 'exists' in signal:
+        pass
+    elif 'created' in signal:
+        ret['comment'] = 'Volume type {0} has been created'.format(name)
+        ret['changes']['Volume type'] = 'Created'
+    elif 'failed' in signal:
+        ret = {'name': name,
+               'changes': {},
+               'result': False,
+               'comment': 'Volume type "{0}" failed to create'.format(name)}
+    return ret
+
+def volume_type_key_present(name=None, key=None, value=None, profile=None):
+    '''
+    Ensures that the extra specs are present on a volume type.
+    '''
+    keys = "{u'" + key + "': u'" + value + "'}"
+    keys = ast.literal_eval(keys)
+    ret = {'name': name,
+           'changes': {},
+           'result': True,
+           'comment': 'Volume type keys "{0}" in volume type "{1}" already exist'.format(keys, name)}
+    signal = __salt__['cinderng.set_keys_volume_type'](name, keys, profile)
+    if 'exist' in signal:
+        pass
+    elif 'updated' in signal:
+        ret['comment'] = 'Volume type keys "{0}" in volume type "{1}" have been updated'.format(keys, name)
+        ret['changes']['Volume type keys'] = 'Updated'
+    elif 'failed' in signal:
+        ret = {'name': name,
+               'changes': {},
+               'result': False,
+               'comment': 'Volume type keys "{0}" in volume type "{1}" failed to update'.format(keys, name)}
+    elif 'not found' in signal:
+        ret = {'name': name,
+               'changes': {},
+               'result': False,
+               'comment': 'Volume type "{0}" was not found'.format(name)}
+    return ret
+
+
+def _already_exists(name, resource):
+    changes_dict = {'name': name,
+                    'changes': {},
+                    'result': True}
+    changes_dict['comment'] = \
+        '{0} {1} already exists'.format(resource, name)
+    return changes_dict
+
+
+def _created(name, resource, resource_definition):
+    changes_dict = {'name': name,
+                    'changes': resource_definition,
+                    'result': True,
+                    'comment': '{0} {1} created'.format(resource, name)}
+    return changes_dict
+
+def _updated(name, resource, resource_definition):
+    changes_dict = {'name': name,
+                    'changes': resource_definition,
+                    'result': True,
+                    'comment': '{0} {1} tenant was updated'.format(resource, name)}
+    return changes_dict
+
+def _update_failed(name, resource):
+    changes_dict = {'name': name,
+                    'changes': {},
+                    'comment': '{0} {1} failed to update'.format(resource, name),
+                    'result': False}
+    return changes_dict
+
+def _no_change(name, resource, test=False):
+    changes_dict = {'name': name,
+                    'changes': {},
+                    'result': True}
+    if test:
+        changes_dict['comment'] = \
+            '{0} {1} will be {2}'.format(resource, name, test)
+    else:
+        changes_dict['comment'] = \
+            '{0} {1} is in correct state'.format(resource, name)
+    return changes_dict
diff --git a/cinder/client.sls b/cinder/client.sls
new file mode 100644
index 0000000..bdb3765
--- /dev/null
+++ b/cinder/client.sls
@@ -0,0 +1,57 @@
+{%- from "cinder/map.jinja" import client with context %}
+{%- if client.get('enabled', False) %}
+
+cinder_client_packages:
+  pkg.installed:
+  - names: {{ client.pkgs }}
+
+{% if client.identity is mapping %}
+{%- set identity = client.identity %}
+{%- else %}
+{%- set identity = salt['pillar.get']('keystone:client:server:'+client.identity) %}
+{%- endif %}
+
+{%- set credentials = {'host': identity.host,
+                       'user': identity.user,
+                       'password': identity.password,
+                       'project_id': identity.project,
+                       'port': identity.get('port', 35357),
+                       'protocol': identity.get('protocol', 'http'),
+                       'region_name': identity.get('region_name', 'RegionOne'),
+                       'endpoint_type': identity.get('endpoint_type', 'internalURL'),
+                       'certificate': identity.get('certificate', 'None')} %}
+
+{%- for backend_name, backend in client.get('backend', {}).iteritems() %}
+
+cinder_type_create_{{ backend_name }}:
+  cinderng.volume_type_present:
+  - name: {{ backend.type_name }}
+  - profile: {{ credentials }}
+  - require:
+    - pkg: cinder_client_packages
+
+cinder_type_update_{{ backend_name }}:
+  cinderng.volume_type_key_present:
+  - name: {{ backend.type_name }}
+  - key: volume_backend_name
+  - value: {{ backend_name }}
+  - profile: {{ credentials }}
+  - require:
+    - cinderng: cinder_type_create_{{ backend_name }}
+
+{%- for key_name, key_value in backend.get('key', {}).iteritems() %}
+
+cinder_type_update_{{ backend_name }}_{{ key_name }}:
+  cinderng.volume_type_key_present:
+  - name: {{ backend.type_name }}
+  - key: {{ key_name }}
+  - value: {{ key_value }}
+  - profile: {{ credentials }}
+  - require:
+    - cinderng: cinder_type_create_{{ backend_name }}
+
+{%- endfor %}
+
+{%- endfor %}
+
+{%- endif %}
diff --git a/cinder/controller.sls b/cinder/controller.sls
index af5194d..8a810de 100644
--- a/cinder/controller.sls
+++ b/cinder/controller.sls
@@ -107,7 +107,7 @@
 
 cinder_syncdb:
   cmd.run:
-  - name: cinder-manage db sync
+  - name: 'cinder-manage db sync; sleep 5;'
   {%- if grains.get('noservices') %}
   - onlyif: /bin/false
   {%- endif %}
diff --git a/cinder/init.sls b/cinder/init.sls
index 6f16b57..43133e5 100644
--- a/cinder/init.sls
+++ b/cinder/init.sls
@@ -6,3 +6,6 @@
 {% if pillar.cinder.volume is defined %}
 - cinder.volume
 {% endif %}
+{% if pillar.cinder.client is defined %}
+- cinder.client
+{% endif %}
diff --git a/cinder/map.jinja b/cinder/map.jinja
index 6808dae..a19420a 100644
--- a/cinder/map.jinja
+++ b/cinder/map.jinja
@@ -73,3 +73,11 @@
     },
 }, merge=pillar.cinder.get('volume', {})) %}
 
+{% set client = salt['grains.filter_by']({
+    'Debian': {
+        'pkgs': ['python-cinderclient']
+    },
+    'RedHat': {
+        'pkgs': ['python-cinderclient']
+    },
+}, merge=pillar.cinder.get('client', {})) %}
diff --git a/tests/pillar/client.sls b/tests/pillar/client.sls
new file mode 100644
index 0000000..4dfd0bd
--- /dev/null
+++ b/tests/pillar/client.sls
@@ -0,0 +1,18 @@
+cinder:
+  client:
+    enabled: true
+    identity:
+      host: 127.0.0.1
+      port: 35357
+      project: service
+      user: cinder
+      password: pwd
+      protocol: http
+      endpoint_type: internalURL
+      region_name: RegionOne
+    backend:
+      ceph:
+        type_name: standard-iops
+        engine: ceph
+        key:
+          conn_speed: fibre-10G