Merge pull request #4 from salt-formulas/pr_add_novcproxy_host
add novncproxy_host
diff --git a/README.rst b/README.rst
index c5080b7..f358685 100644
--- a/README.rst
+++ b/README.rst
@@ -130,6 +130,7 @@
version: juno
enabled: true
virtualization: kvm
+ availability_zone: availability_zone_01
security_group: true
bind:
vnc_address: 172.20.0.100
@@ -251,16 +252,73 @@
server:
identity:
flavor:
- jirka-flavor1:
+ flavor1:
flavor_id: 10
ram: 4096
disk: 10
vcpus: 1
+ flavor2:
+ flavor_id: auto
+ ram: 4096
+ disk: 20
+ vcpus: 2
identity1:
flavor:
...
+Availability zones
+
+.. code-block:: yaml
+
+ nova:
+ client:
+ enabled: true
+ server:
+ identity:
+ availability_zones:
+ - availability_zone_01
+ - availability_zone_02
+
+SRIOV
+------
+
+Add PciPassthroughFilter into scheduler filters and NICs on specific compute nodes.
+
+.. code-block:: yaml
+
+ nova:
+ controller:
+ sriov: true
+ scheduler_default_filters: "DifferentHostFilter,RetryFilter,AvailabilityZoneFilter,RamFilter,CoreFilter,DiskFilter,ComputeFilter,ComputeCapabilitiesFilter,ImagePropertiesFilter,ServerGroupAntiAffinityFilter,ServerGroupAffinityFilter,PciPassthroughFilter"
+
+ nova:
+ compute:
+ sriov:
+ nic_one:
+ devname: eth1
+ physical_network: physnet1
+
+CPU pinning & Hugepages
+-----------------------
+
+CPU pinning of virtual machine instances to dedicated physical CPU cores.
+Hugepages mount point for libvirt.
+
+.. code-block:: yaml
+
+ nova:
+ controller:
+ scheduler_default_filters: "DifferentHostFilter,RetryFilter,AvailabilityZoneFilter,RamFilter,CoreFilter,DiskFilter,ComputeFilter,ComputeCapabilitiesFilter,ImagePropertiesFilter,ServerGroupAntiAffinityFilter,ServerGroupAffinityFilter,NUMATopologyFilter,AggregateInstanceExtraSpecsFilter"
+
+ nova:
+ compute:
+ vcpu_pin_set: 2,3,4,5
+ hugepages:
+ mount_points:
+ - path: /mnt/hugepages_1GB
+ - path: /mnt/hugepages_2MB
+
Documentation and Bugs
============================
diff --git a/_modules/novang.py b/_modules/novang.py
new file mode 100644
index 0000000..d2009ae
--- /dev/null
+++ b/_modules/novang.py
@@ -0,0 +1,245 @@
+# -*- coding: utf-8 -*-
+from __future__ import absolute_import
+from pprint import pprint
+
+# Import python libs
+import logging
+
+# Import salt libs
+import salt.utils.openstack.nova as suon
+
+# Get logging started
+log = logging.getLogger(__name__)
+
+# Function alias to not shadow built-ins
+__func_alias__ = {
+ 'list_': 'list'
+}
+
+# Define the module's virtual name
+__virtualname__ = 'novang'
+
+
+def __virtual__():
+ '''
+ Only load this module if nova
+ is installed on this minion.
+ '''
+ if suon.check_nova():
+ return __virtualname__
+ return (False, 'The nova execution module failed to load: '
+ 'only available if nova is installed.')
+
+
+__opts__ = {}
+
+
+def _auth(profile=None, tenant_name=None):
+ '''
+ Set up nova credentials
+ '''
+ if profile:
+ credentials = __salt__['config.option'](profile)
+ user = credentials['keystone.user']
+ password = credentials['keystone.password']
+ if tenant_name:
+ tenant = tenant_name
+ else:
+ tenant = credentials['keystone.tenant']
+ auth_url = credentials['keystone.auth_url']
+ region_name = credentials.get('keystone.region_name', None)
+ api_key = credentials.get('keystone.api_key', None)
+ os_auth_system = credentials.get('keystone.os_auth_system', None)
+ else:
+ user = __salt__['config.option']('keystone.user')
+ password = __salt__['config.option']('keystone.password')
+ tenant = __salt__['config.option']('keystone.tenant')
+ auth_url = __salt__['config.option']('keystone.auth_url')
+ region_name = __salt__['config.option']('keystone.region_name')
+ api_key = __salt__['config.option']('keystone.api_key')
+ os_auth_system = __salt__['config.option']('keystone.os_auth_system')
+ kwargs = {
+ 'username': user,
+ 'password': password,
+ 'api_key': api_key,
+ 'project_id': tenant,
+ 'auth_url': auth_url,
+ 'region_name': region_name,
+ 'os_auth_plugin': os_auth_system
+ }
+ return suon.SaltNova(**kwargs)
+
+
+def server_list(profile=None, tenant_name=None):
+ '''
+ Return list of active servers
+ CLI Example:
+ .. code-block:: bash
+ salt '*' nova.server_list
+ '''
+ conn = _auth(profile, tenant_name)
+ return conn.server_list()
+
+
+def server_get(name, tenant_name=None, profile=None):
+ '''
+ Return information about a server
+ '''
+ items = server_list(profile, tenant_name)
+ instance_id = None
+ for key, value in items.iteritems():
+ if key == name:
+ instance_id = value['id']
+ return instance_id
+
+
+def get_connection_args(profile=None):
+ '''
+ Set up profile credentials
+ '''
+ if profile:
+ credentials = __salt__['config.option'](profile)
+ user = credentials['keystone.user']
+ password = credentials['keystone.password']
+ tenant = credentials['keystone.tenant']
+ auth_url = credentials['keystone.auth_url']
+
+ kwargs = {
+ 'username': user,
+ 'password': password,
+ 'tenant': tenant,
+ 'auth_url': auth_url
+ }
+ return kwargs
+
+
+def quota_list(tenant_name, profile=None):
+ '''
+ list quotas of a tenant
+ '''
+ connection_args = get_connection_args(profile)
+ tenant = __salt__['keystone.tenant_get'](name=tenant_name, profile=profile, **connection_args)
+ tenant_id = tenant[tenant_name]['id']
+ conn = _auth(profile)
+ nt_ks = conn.compute_conn
+ item = nt_ks.quotas.get(tenant_id).__dict__
+ return item
+
+
+def quota_get(name, tenant_name, profile=None, quota_value=None):
+ '''
+ get specific quota value of a tenant
+ '''
+ item = quota_list(tenant_name, profile)
+ quota_value = item[name]
+ return quota_value
+
+
+def quota_update(tenant_name, profile=None, **quota_argument):
+ '''
+ update quota of specified tenant
+ '''
+ connection_args = get_connection_args(profile)
+ tenant = __salt__['keystone.tenant_get'](name=tenant_name, profile=profile, **connection_args)
+ tenant_id = tenant[tenant_name]['id']
+ conn = _auth(profile)
+ nt_ks = conn.compute_conn
+ item = nt_ks.quotas.update(tenant_id, **quota_argument)
+ return item
+
+
+def server_list(profile=None, tenant_name=None):
+ '''
+ Return list of active servers
+ CLI Example:
+ .. code-block:: bash
+ salt '*' nova.server_list
+ '''
+ conn = _auth(profile, tenant_name)
+ return conn.server_list()
+
+
+def secgroup_list(profile=None, tenant_name=None):
+ '''
+ Return a list of available security groups (nova items-list)
+ CLI Example:
+ .. code-block:: bash
+ salt '*' nova.secgroup_list
+ '''
+ conn = _auth(profile, tenant_name)
+ return conn.secgroup_list()
+
+
+def boot(name, flavor_id=0, image_id=0, profile=None, tenant_name=None, timeout=300, **kwargs):
+ '''
+ Boot (create) a new instance
+ name
+ Name of the new instance (must be first)
+ flavor_id
+ Unique integer ID for the flavor
+ image_id
+ Unique integer ID for the image
+ timeout
+ How long to wait, after creating the instance, for the provider to
+ return information about it (default 300 seconds).
+ .. versionadded:: 2014.1.0
+ CLI Example:
+ .. code-block:: bash
+ salt '*' nova.boot myinstance flavor_id=4596 image_id=2
+ The flavor_id and image_id are obtained from nova.flavor_list and
+ nova.image_list
+ .. code-block:: bash
+ salt '*' nova.flavor_list
+ salt '*' nova.image_list
+ '''
+ #kwargs = {'nics': nics}
+ conn = _auth(profile, tenant_name)
+ return conn.boot(name, flavor_id, image_id, timeout, **kwargs)
+
+
+def network_show(name, profile=None):
+ conn = _auth(profile)
+ return conn.network_show(name)
+
+
+def availability_zone_list(profile=None):
+ '''
+ list existing availability zones
+ '''
+ connection_args = get_connection_args(profile)
+ conn = _auth(profile)
+ nt_ks = conn.compute_conn
+ ret = nt_ks.aggregates.list()
+ return ret
+
+
+def availability_zone_get(name, profile=None):
+ '''
+ list existing availability zones
+ '''
+ connection_args = get_connection_args(profile)
+ conn = _auth(profile)
+ nt_ks = conn.compute_conn
+ zone_exists=False
+ items = availability_zone_list(profile)
+ for p in items:
+ item = nt_ks.aggregates.get(p).__getattr__('name')
+ if item == name:
+ zone_exists = True
+ return zone_exists
+
+
+def availability_zone_create(name, availability_zone, profile=None):
+ '''
+ create availability zone
+ '''
+ connection_args = get_connection_args(profile)
+ conn = _auth(profile)
+ nt_ks = conn.compute_conn
+ item = nt_ks.aggregates.create(name, availability_zone)
+ ret = {
+ 'Id': item.__getattr__('id'),
+ 'Aggregate Name': item.__getattr__('name'),
+ 'Availability Zone': item.__getattr__('availability_zone'),
+ }
+ return ret
diff --git a/_states/novang.py b/_states/novang.py
new file mode 100644
index 0000000..46fae9c
--- /dev/null
+++ b/_states/novang.py
@@ -0,0 +1,169 @@
+# -*- coding: utf-8 -*-
+'''
+Nova state that ensures that defined flavor is present
+'''
+import logging
+import collections
+from functools import wraps
+LOG = logging.getLogger(__name__)
+
+
+def __virtual__():
+ '''
+ Only load if the nova module is in __salt__
+ '''
+ return 'novang' if 'nova.flavor_list' in __salt__ else False
+
+
+def flavor_present(name, flavor_id=0, ram=0, disk=0, vcpus=1, profile=None):
+ '''
+ Ensures that the nova flavor exists
+ '''
+ ret = {'name': name,
+ 'changes': {},
+ 'result': True,
+ 'comment': 'Flavor "{0}" already exists'.format(name)}
+ project = __salt__['nova.flavor_list'](profile)
+ if 'Error' in project:
+ pass
+ elif name in project:
+ pass
+ else:
+ __salt__['nova.flavor_create'](name, flavor_id, ram, disk, vcpus, profile)
+ ret['comment'] = 'Flavor {0} has been created'.format(name)
+ ret['changes']['Flavor'] = 'Created'
+ return ret
+
+
+def quota_present(tenant_name, profile, name=None, **kwargs):
+ '''
+ Ensures that the nova quota exists
+ '''
+ changes = {}
+ for key, value in kwargs.items():
+ quota = __salt__['novang.quota_get'](key, tenant_name, profile)
+ if quota != value:
+ arg = {}
+ arg[key] = value
+ changes[key] = value
+ __salt__['novang.quota_update'](tenant_name, profile, **arg)
+ if bool(changes):
+ return _updated(tenant_name, 'tenant', changes)
+ else:
+ return _no_change(tenant_name, 'tenant')
+
+
+def availability_zone_present(name=None, availability_zone=None, profile=None):
+ '''
+ Ensures that the nova availability zone exists
+ '''
+ name = availability_zone
+ zone_exists = __salt__['novang.availability_zone_get'](name, profile)
+ if zone_exists == False:
+ item_created = __salt__['novang.availability_zone_create'](name, availability_zone, profile)
+ if bool(item_created):
+ return _created(availability_zone, 'availabilty zone', item_created)
+ else:
+ return _already_exists(availability_zone, 'availabilty zone')
+ return existing_availability_zones
+
+
+def instance_present(name, flavor, image, networks, security_groups=None, profile=None, tenant_name=None):
+ ret = {'name': name,
+ 'changes': {},
+ 'result': True,
+ 'comment': 'Instance "{0}" already exists'.format(name)}
+ kwargs = {}
+ nics = []
+ existing_instances = __salt__['novang.server_list'](profile, tenant_name)
+ if name in existing_instances:
+ return ret
+ existing_flavors = __salt__['nova.flavor_list'](profile)
+ if flavor in existing_flavors:
+ flavor_id = existing_flavors[flavor]['id']
+ else:
+ return {'name': name,
+ 'changes': {},
+ 'result': False,
+ 'comment': 'Flavor "{0}" doesn\'t exists'.format(flavor)}
+
+ existing_image = __salt__['nova.image_list'](image, profile)
+ if not existing_image:
+ return {'name': name,
+ 'changes': {},
+ 'result': False,
+ 'comment': 'Image "{0}" doesn\'t exists'.format(image)}
+ else:
+ image_id = existing_image.get(image).get('id')
+ if security_groups is not None:
+ kwargs['security_groups'] = []
+ for secgroup in security_groups:
+ existing_secgroups = __salt__['novang.secgroup_list'](profile, tenant_name)
+ if not secgroup in existing_secgroups:
+ return {'name': name,
+ 'changes': {},
+ 'result': False,
+ 'comment': 'Security group "{0}" doesn\'t exists'.format(secgroup)}
+ else:
+ kwargs['security_groups'].append(secgroup)
+ for net in networks:
+ existing_network = __salt__['novang.network_show'](net.get('name'), profile)
+ if not existing_network:
+ return {'name': name,
+ 'changes': {},
+ 'result': False,
+ 'comment': 'Network "{0}" doesn\'t exists'.format(net.get(name))}
+ else:
+ network_id = existing_network.get('id')
+ if net.get('v4_fixed_ip') is not None:
+ nics.append({'net-id': network_id, 'v4-fixed-ip': net.get('v4_fixed_ip')})
+ else:
+ nics.append({'net-id': network_id})
+ kwargs['nics'] = nics
+ new_instance_id = __salt__['novang.boot'] (name, flavor_id, image_id, profile, tenant_name, **kwargs)
+ return {'name': name,
+ 'changes': {},
+ 'result': True,
+ 'comment': 'Instance "{0}" was successfuly created'.format(name)}
+
+def _already_exists(name, resource):
+ changes_dict = {'name': name,
+ 'changes': {},
+ 'result': True}
+ changes_dict['comment'] = \
+ '{0} {1} already exists'.format(resource, name)
+ return changes_dict
+
+
+def _created(name, resource, resource_definition):
+ changes_dict = {'name': name,
+ 'changes': resource_definition,
+ 'result': True,
+ 'comment': '{0} {1} created'.format(resource, name)}
+ return changes_dict
+
+def _updated(name, resource, resource_definition):
+ changes_dict = {'name': name,
+ 'changes': resource_definition,
+ 'result': True,
+ 'comment': '{0} {1} tenant was updated'.format(resource, name)}
+ return changes_dict
+
+def _update_failed(name, resource):
+ changes_dict = {'name': name,
+ 'changes': {},
+ 'comment': '{0} {1} failed to update'.format(resource, name),
+ 'result': False}
+ return changes_dict
+
+def _no_change(name, resource, test=False):
+ changes_dict = {'name': name,
+ 'changes': {},
+ 'result': True}
+ if test:
+ changes_dict['comment'] = \
+ '{0} {1} will be {2}'.format(resource, name, test)
+ else:
+ changes_dict['comment'] = \
+ '{0} {1} is in correct state'.format(resource, name)
+ return changes_dict
\ No newline at end of file
diff --git a/metadata/service/control/cluster.yml b/metadata/service/control/cluster.yml
index 3828de2..69dad66 100644
--- a/metadata/service/control/cluster.yml
+++ b/metadata/service/control/cluster.yml
@@ -14,7 +14,7 @@
vncproxy_url: ${_param:nova_vncproxy_url}
security_group: false
dhcp_domain: novalocal
- scheduler_default_filters: "DifferentHostFilter,RetryFilter,AvailabilityZoneFilter,RamFilter,CoreFilter,DiskFilter,ComputeFilter,ComputeCapabilitiesFilter,ImagePropertiesFilter,ServerGroupAntiAffinityFilter,ServerGroupAffinityFilter"
+ scheduler_default_filters: "DifferentHostFilter,RetryFilter,AvailabilityZoneFilter,RamFilter,CoreFilter,DiskFilter,ComputeFilter,ComputeCapabilitiesFilter,ImagePropertiesFilter,ServerGroupAntiAffinityFilter,ServerGroupAffinityFilter,PciPassthroughFilter,NUMATopologyFilter,AggregateInstanceExtraSpecsFilter"
cpu_allocation_ratio: 16.0
ram_allocation_ratio: 1.5
disk_allocation_ratio: 1.0
diff --git a/nova/_modules/novang.py b/nova/_modules/novang.py
deleted file mode 100644
index 018e41e..0000000
--- a/nova/_modules/novang.py
+++ /dev/null
@@ -1,127 +0,0 @@
-# -*- coding: utf-8 -*-
-from __future__ import absolute_import
-from pprint import pprint
-
-# Import python libs
-import logging
-
-# Import salt libs
-import salt.utils.openstack.nova as suon
-
-# Get logging started
-log = logging.getLogger(__name__)
-
-# Function alias to not shadow built-ins
-__func_alias__ = {
- 'list_': 'list'
-}
-
-# Define the module's virtual name
-__virtualname__ = 'novang'
-
-
-def __virtual__():
- '''
- Only load this module if nova
- is installed on this minion.
- '''
- if suon.check_nova():
- return __virtualname__
- return (False, 'The nova execution module failed to load: '
- 'only available if nova is installed.')
-
-
-__opts__ = {}
-
-
-def _auth(profile=None):
- '''
- Set up nova credentials
- '''
- if profile:
- credentials = __salt__['config.option'](profile)
- user = credentials['keystone.user']
- password = credentials['keystone.password']
- tenant = credentials['keystone.tenant']
- auth_url = credentials['keystone.auth_url']
- region_name = credentials.get('keystone.region_name', None)
- api_key = credentials.get('keystone.api_key', None)
- os_auth_system = credentials.get('keystone.os_auth_system', None)
- else:
- user = __salt__['config.option']('keystone.user')
- password = __salt__['config.option']('keystone.password')
- tenant = __salt__['config.option']('keystone.tenant')
- auth_url = __salt__['config.option']('keystone.auth_url')
- region_name = __salt__['config.option']('keystone.region_name')
- api_key = __salt__['config.option']('keystone.api_key')
- os_auth_system = __salt__['config.option']('keystone.os_auth_system')
- kwargs = {
- 'username': user,
- 'password': password,
- 'api_key': api_key,
- 'project_id': tenant,
- 'auth_url': auth_url,
- 'region_name': region_name,
- 'os_auth_plugin': os_auth_system
- }
-
- return suon.SaltNova(**kwargs)
-
-
-def get_connection_args(profile=None):
- '''
- Set up profile credentials
- '''
- if profile:
- credentials = __salt__['config.option'](profile)
- user = credentials['keystone.user']
- password = credentials['keystone.password']
- tenant = credentials['keystone.tenant']
- auth_url = credentials['keystone.auth_url']
-
- kwargs = {
- 'username': user,
- 'password': password,
- 'tenant': tenant,
- 'auth_url': auth_url
- }
- return kwargs
-
-
-def quota_list(tenant_name, profile=None):
- '''
- list quotas of a tenant
- '''
- connection_args = get_connection_args(profile)
- tenant = __salt__['keystone.tenant_get'](name=tenant_name, profile=profile, **connection_args)
- tenant_id = tenant[tenant_name]['id']
- conn = _auth(profile)
- nt_ks = conn.compute_conn
- item = nt_ks.quotas.get(tenant_id).__dict__
- return item
-
-
-def quota_get(name, tenant_name, profile=None, quota_value=None):
- '''
- get specific quota value of a tenant
- '''
- item = quota_list(tenant_name, profile)
- quota_value = item[name]
- return quota_value
-
-
-def quota_update(tenant_name, profile=None, **quota_argument):
- '''
- update quota of specified tenant
- '''
- connection_args = get_connection_args(profile)
- tenant = __salt__['keystone.tenant_get'](name=tenant_name, profile=profile, **connection_args)
- tenant_id = tenant[tenant_name]['id']
- conn = _auth(profile)
- nt_ks = conn.compute_conn
- item = nt_ks.quotas.update(tenant_id, **quota_argument)
- return item
-
-
-
-
diff --git a/nova/_states/novang.py b/nova/_states/novang.py
deleted file mode 100644
index 124faf4..0000000
--- a/nova/_states/novang.py
+++ /dev/null
@@ -1,78 +0,0 @@
-# -*- coding: utf-8 -*-
-'''
-Nova state that ensures that defined flavor is present
-'''
-import logging
-from functools import wraps
-LOG = logging.getLogger(__name__)
-
-
-def __virtual__():
- '''
- Only load if the nova module is in __salt__
- '''
- return 'novang' if 'nova.flavor_list' in __salt__ else False
-
-
-def flavor_present(name, flavor_id=0, ram=0, disk=0, vcpus=1, profile=None):
- '''
- Ensures that the nova flavor exists
- '''
- ret = {'name': name,
- 'changes': {},
- 'result': True,
- 'comment': 'Flavor "{0}" already exists'.format(name)}
- project = __salt__['nova.flavor_list'](profile)
- if 'Error' in project:
- pass
- elif name in project:
- pass
- else:
- __salt__['nova.flavor_create'](name, flavor_id, ram, disk, vcpus, profile)
- ret['comment'] = 'Flavor {0} has been created'.format(name)
- ret['changes']['Flavor'] = 'Created'
- return ret
-
-
-def quota_present(tenant_name, profile, name=None, **kwargs):
- '''
- Ensures that the nova quota exists
- '''
- changes = {}
- for key, value in kwargs.items():
- quota = __salt__['novang.quota_get'](key, tenant_name, profile)
- if quota != value:
- arg = {}
- arg[key] = value
- changes[key] = value
- __salt__['novang.quota_update'](tenant_name, profile, **arg)
- if bool(changes):
- return _updated(tenant_name, 'tenant', changes)
- else:
- return _no_change(tenant_name, 'tenant')
-
-def _updated(name, resource, resource_definition):
- changes_dict = {'name': name,
- 'changes': resource_definition,
- 'result': True,
- 'comment': '{0} {1} tenant was updated'.format(resource, name)}
- return changes_dict
-
-def _update_failed(name, resource):
- changes_dict = {'name': name,
- 'changes': {},
- 'comment': '{0} {1} failed to update'.format(resource, name),
- 'result': False}
- return changes_dict
-
-def _no_change(name, resource, test=False):
- changes_dict = {'name': name,
- 'changes': {},
- 'result': True}
- if test:
- changes_dict['comment'] = \
- '{0} {1} will be {2}'.format(resource, name, test)
- else:
- changes_dict['comment'] = \
- '{0} {1} is in correct state'.format(resource, name)
- return changes_dict
diff --git a/nova/client.sls b/nova/client.sls
index 355559f..2d20a8f 100644
--- a/nova/client.sls
+++ b/nova/client.sls
@@ -7,6 +7,8 @@
{%- for identity_name, identity in client.server.iteritems() %}
+{%- if identity.flavor is defined %}
+
{%- for flavor_name, flavor in identity.flavor.iteritems() %}
nova_openstack_flavor_{{ flavor_name }}:
@@ -29,6 +31,19 @@
{%- endfor %}
+{%- endif %}
+
+{%- if identity.availability_zones is defined %}
+
+{%- for availability_zone_name in identity.availability_zones %}
+nova_availability_zone_{{ availability_zone_name }}:
+ novang.availability_zone_present:
+ - availability_zone: {{ availability_zone_name }}
+ - profile: {{ identity_name }}
+{%- endfor %}
+
+{%- endif %}
+
{%- endfor %}
{%- endif %}
diff --git a/nova/compute.sls b/nova/compute.sls
index ef0c60a..7839551 100644
--- a/nova/compute.sls
+++ b/nova/compute.sls
@@ -120,6 +120,31 @@
- watch:
- file: /etc/nova/nova.conf
+{%- if compute.availability_zone != None %}
+
+{%- set ident = compute.identity %}
+
+{%- if ident.get('api_version', '2') == '3' %}
+{%- set version = "v3" %}
+{%- else %}
+{%- set version = "v2.0" %}
+{%- endif %}
+
+{%- if ident.get('protocol', 'http') == 'http' %}
+{%- set protocol = 'http' %}
+{%- else %}
+{%- set protocol = 'https' %}
+{%- endif %}
+
+{%- set identity_params = " --os-username="+ident.user+" --os-password="+ident.password+" --os-project-name="+ident.tenant+" --os-auth-url="+protocol+"://"+ident.host+":"+ident.port|string+"/"+version %}
+
+Add_compute_to_availability_zone_{{ compute.availability_zone }}:
+ cmd.run:
+ - name: "nova {{ identity_params }} aggregate-add-host {{ compute.availability_zone }} {{ pillar.linux.system.name }}"
+ - unless: "nova {{ identity_params }} service-list | grep {{ compute.availability_zone }} | grep {{ pillar.linux.system.name }}"
+
+{%- endif %}
+
{%- if compute.virtualization == 'kvm' %}
{% if compute.ceph is defined %}
@@ -181,6 +206,18 @@
- pkg: nova_compute_packages
- onlyif: "virsh net-list | grep default"
+{%- if compute.hugepages is defined %}
+
+/etc/default/qemu-kvm:
+ file.managed:
+ - contents: KVM_HUGEPAGES=1
+ - require:
+ - pkg: nova_compute_packages
+ - require_in:
+ - service: {{ compute.libvirt_service }}
+
+{%- endif %}
+
{{ compute.libvirt_service }}:
service.running:
- enable: true
@@ -189,6 +226,7 @@
- cmd: virsh net-undefine default
- watch:
- file: /etc/libvirt/{{ compute.libvirt_config }}
+ - file: /etc/libvirt/qemu.conf
{%- if grains.get('init', None) == "upstart" %}
# MOS9 libvirt fix for upstart
diff --git a/nova/files/liberty/nova-compute.conf.Debian b/nova/files/liberty/nova-compute.conf.Debian
index a9dc806..dcd7f88 100644
--- a/nova/files/liberty/nova-compute.conf.Debian
+++ b/nova/files/liberty/nova-compute.conf.Debian
@@ -16,13 +16,21 @@
api_paste_config=/etc/nova/api-paste.ini
volumes_path=/var/lib/nova/volumes
enabled_apis=ec2,osapi_compute,metadata
-
+{%- if compute.get('sriov', false) %}
+{%- for nic_name, sriov in compute.sriov.iteritems() %}
+pci_passthrough_whitelist = {"devname":"{{ sriov.devname }}","physical_network":"{{ sriov.physical_network }}"}
+{%- endfor %}
+{%- endif %}
{%- if compute.image.use_cow is defined %}
use_cow_images = {{ compute.image.use_cow }}
{%- endif %}
reserved_host_memory_mb = {{ compute.get('reserved_host_memory_mb', '512') }}
+{%- if compute.vcpu_pin_set is defined %}
+vcpu_pin_set={{ compute.vcpu_pin_set }}
+{%- endif %}
+
allow_resize_to_same_host=True
{%- if compute.get('ceph', {}).ephemeral is defined %}
diff --git a/nova/files/liberty/nova-controller.conf.Debian b/nova/files/liberty/nova-controller.conf.Debian
index 6c8a7db..bf7d9dd 100644
--- a/nova/files/liberty/nova-controller.conf.Debian
+++ b/nova/files/liberty/nova-controller.conf.Debian
@@ -23,6 +23,8 @@
ram_allocation_ratio = {{ controller.ram_allocation_ratio }}
disk_allocation_ratio = {{ controller.disk_allocation_ratio }}
scheduler_default_filters = {{ controller.scheduler_default_filters }}
+scheduler_available_filters = nova.scheduler.filters.all_filters
+scheduler_available_filters = nova.scheduler.filters.pci_passthrough_filter.PciPassthroughFilter
osapi_max_limit = {{ controller.osapi_max_limit|default('1000') }}
allow_resize_to_same_host = True
diff --git a/nova/files/liberty/qemu.conf.Debian b/nova/files/liberty/qemu.conf.Debian
index 5c5722b..f3a8997 100644
--- a/nova/files/liberty/qemu.conf.Debian
+++ b/nova/files/liberty/qemu.conf.Debian
@@ -337,7 +337,10 @@
# in a location of $MOUNTPOINT/libvirt/qemu
#
#hugetlbfs_mount = "/dev/hugepages"
-
+#hugetlbfs_mount = ["/run/hugepages/kvm", "/mnt/hugepages_1GB"]
+{%- if compute.hugepages is defined %}
+hugetlbfs_mount = [{%- for mount in compute.hugepages.mount_points %}"{{ mount.path }}"{% if not loop.last %}, {% endif %}{%- endfor %}]
+{%- endif %}
# Path to the setuid helper for creating tap devices. This executable
# is used to create <source type='bridge'> interfaces when libvirtd is
diff --git a/nova/files/mitaka/nova-compute.conf.Debian b/nova/files/mitaka/nova-compute.conf.Debian
index af03d27..c4e55fe 100644
--- a/nova/files/mitaka/nova-compute.conf.Debian
+++ b/nova/files/mitaka/nova-compute.conf.Debian
@@ -18,7 +18,11 @@
vif_plugging_is_fatal=True
vif_plugging_timeout=300
dhcp_domain={{ compute.get('dhcp_domain', 'novalocal') }}
-
+{%- if compute.get('sriov', false) %}
+{%- for nic_name, sriov in compute.sriov.iteritems() %}
+pci_passthrough_whitelist = {"devname":"{{ sriov.devname }}","physical_network":"{{ sriov.physical_network }}"}
+{%- endfor %}
+{%- endif %}
{%- if compute.image.use_cow is defined %}
use_cow_images = {{ compute.image.use_cow }}
{%- endif %}
@@ -28,6 +32,10 @@
reserved_host_memory_mb = {{ compute.get('reserved_host_memory_mb', '512') }}
+{%- if compute.vcpu_pin_set is defined %}
+vcpu_pin_set={{ compute.vcpu_pin_set }}
+{%- endif %}
+
allow_resize_to_same_host=True
auth_strategy = keystone
diff --git a/nova/files/mitaka/nova-controller.conf.Debian b/nova/files/mitaka/nova-controller.conf.Debian
index 2a79ddb..2711cb8 100644
--- a/nova/files/mitaka/nova-controller.conf.Debian
+++ b/nova/files/mitaka/nova-controller.conf.Debian
@@ -20,6 +20,8 @@
ram_allocation_ratio = {{ controller.ram_allocation_ratio }}
disk_allocation_ratio = {{ controller.disk_allocation_ratio }}
scheduler_default_filters = {{ controller.scheduler_default_filters }}
+scheduler_available_filters = nova.scheduler.filters.all_filters
+scheduler_available_filters = nova.scheduler.filters.pci_passthrough_filter.PciPassthroughFilter
scheduler_driver = filter_scheduler
allow_resize_to_same_host = True
osapi_max_limit = {{ controller.osapi_max_limit|default('1000') }}
diff --git a/nova/files/mitaka/qemu.conf.Debian b/nova/files/mitaka/qemu.conf.Debian
index 5c5722b..f3a8997 100644
--- a/nova/files/mitaka/qemu.conf.Debian
+++ b/nova/files/mitaka/qemu.conf.Debian
@@ -337,7 +337,10 @@
# in a location of $MOUNTPOINT/libvirt/qemu
#
#hugetlbfs_mount = "/dev/hugepages"
-
+#hugetlbfs_mount = ["/run/hugepages/kvm", "/mnt/hugepages_1GB"]
+{%- if compute.hugepages is defined %}
+hugetlbfs_mount = [{%- for mount in compute.hugepages.mount_points %}"{{ mount.path }}"{% if not loop.last %}, {% endif %}{%- endfor %}]
+{%- endif %}
# Path to the setuid helper for creating tap devices. This executable
# is used to create <source type='bridge'> interfaces when libvirtd is
diff --git a/nova/map.jinja b/nova/map.jinja
index 0a8cf24..5566170 100644
--- a/nova/map.jinja
+++ b/nova/map.jinja
@@ -45,6 +45,7 @@
'bind': compute_bind_defaults,
'debug': false,
'notification': false,
+ 'availability_zone': None,
'identity': {
'region': 'RegionOne'
},
@@ -62,6 +63,7 @@
'bind': compute_bind_defaults,
'debug': false,
'notification': false,
+ 'availability_zone': None,
'identity': {
'region': 'RegionOne'
},
diff --git a/tests/pillar/compute_cluster.sls b/tests/pillar/compute_cluster.sls
index e1d3541..3c4b6bc 100644
--- a/tests/pillar/compute_cluster.sls
+++ b/tests/pillar/compute_cluster.sls
@@ -2,6 +2,10 @@
compute:
version: liberty
enabled: true
+ vcpu_pin_set: 1,2,3
+ hugepages:
+ mount_points:
+ - path: /mnt/hugepages_1GB
virtualization: kvm
heal_instance_info_cache_interval: 60
vncproxy_url: openstack:6080