Adding multi-node model with Pike
The patch adds multi-node model to deploy Pike OpenStack release
on it with SSL and to be ready for CI including smoke tests are
passed etc.
Change-Id: I3c0f4f8a18833dd7fcf7974848a9b70292a8c960
Related-PROD: PROD-18350
diff --git a/classes/cluster/os-ha-ovs-pike/openstack/control.yml b/classes/cluster/os-ha-ovs-pike/openstack/control.yml
new file mode 100644
index 0000000..20d2497
--- /dev/null
+++ b/classes/cluster/os-ha-ovs-pike/openstack/control.yml
@@ -0,0 +1,284 @@
+classes:
+- system.salt.minion.cert.proxy
+- system.linux.system.lowmem
+- system.linux.system.repo.glusterfs
+- system.linux.system.repo.mcp.openstack
+- system.linux.system.repo.mcp.extra
+- system.linux.system.repo.saltstack.xenial
+- system.linux.storage.loopback
+- system.memcached.server.single
+- system.rabbitmq.server.cluster
+- system.rabbitmq.server.vhost.openstack
+- system.nginx.server.single
+- system.nginx.server.proxy.openstack_api
+- system.nginx.server.proxy.openstack.designate
+- system.nginx.server.proxy.openstack.placement
+- system.keystone.server.wsgi
+- system.keystone.server.cluster
+- system.glusterfs.client.cluster
+- system.glusterfs.client.volume.glance
+- system.glusterfs.client.volume.keystone
+- system.glusterfs.server.volume.glance
+- system.glusterfs.server.volume.keystone
+- system.glusterfs.server.cluster
+- system.glance.control.cluster
+- system.nova.control.cluster
+- system.neutron.control.openvswitch.cluster
+- system.cinder.control.cluster
+- system.cinder.control.backend.lvm
+- system.cinder.volume.single
+- system.cinder.volume.backend.lvm
+- system.heat.server.cluster
+- system.designate.server.cluster
+- system.designate.server.backend.bind
+- system.galera.server.cluster
+- system.galera.server.database.cinder
+- system.galera.server.database.designate
+- system.galera.server.database.glance
+- system.galera.server.database.heat
+- system.galera.server.database.keystone
+- system.galera.server.database.nova
+- cluster.os-ha-ovs-pike.openstack.dashboard
+- cluster.os-ha-ovs-pike
+parameters:
+ _param:
+ salt_minion_ca_authority: salt_master_ca
+ keepalived_vip_interface: ens4
+ ### nginx ssl sites settings
+ nginx_proxy_ssl:
+ enabled: true
+ engine: salt
+ authority: "${_param:salt_minion_ca_authority}"
+ key_file: "/etc/ssl/private/internal_proxy.key"
+ cert_file: "/etc/ssl/certs/internal_proxy.crt"
+ chain_file: "/etc/ssl/certs/internal_proxy-with-chain.crt"
+ nginx_proxy_openstack_api_address: ${_param:cluster_local_address}
+ nginx_proxy_openstack_keystone_host: 127.0.0.1
+ nginx_proxy_openstack_nova_host: 127.0.0.1
+ nginx_proxy_openstack_cinder_host: 127.0.0.1
+ nginx_proxy_openstack_glance_host: 127.0.0.1
+ nginx_proxy_openstack_neutron_host: 127.0.0.1
+ nginx_proxy_openstack_heat_host: 127.0.0.1
+ nginx_proxy_openstack_designate_host: 127.0.0.1
+ nginx_proxy_openstack_placement_host: 127.0.0.1
+ apache_keystone_api_host: ${_param:single_address}
+ apache_keystone_ssl: ${_param:nginx_proxy_ssl}
+ designate_api_address: 127.0.0.1
+ designate_api_base_uri: ${_param:cluster_vip_address}
+ nginx:
+ server:
+ site:
+ nginx_proxy_openstack_api_keystone:
+ enabled: false
+ nginx_proxy_openstack_api_keystone_private:
+ enabled: false
+ linux:
+ system:
+ package:
+ python-msgpack:
+ version: latest
+ network:
+ interface:
+ ens4:
+ enabled: true
+ type: eth
+ proto: static
+ address: ${_param:single_address}
+ netmask: 255.255.255.0
+ keepalived:
+ cluster:
+ instance:
+ VIP:
+ virtual_router_id: 150
+ keystone:
+ server:
+ admin_email: ${_param:admin_email}
+ notification:
+ driver: messagingv2
+ topics: "notifications"
+ designate:
+ worker:
+ enabled: ${_param:designate_worker_enabled}
+ server:
+ pools:
+ default:
+ description: 'test pool'
+ targets:
+ default:
+ description: 'test target1'
+ default1:
+ type: ${_param:designate_pool_target_type}
+ description: 'test target2'
+ masters: ${_param:designate_pool_target_masters}
+ options:
+ host: ${_param:openstack_control_node02_address}
+ port: 53
+ rndc_host: ${_param:openstack_control_node02_address}
+ rndc_port: 953
+ rndc_key_file: /etc/designate/rndc.key
+ default2:
+ type: ${_param:designate_pool_target_type}
+ description: 'test target3'
+ masters: ${_param:designate_pool_target_masters}
+ options:
+ host: ${_param:openstack_control_node03_address}
+ port: 53
+ rndc_host: ${_param:openstack_control_node03_address}
+ rndc_port: 953
+ rndc_key_file: /etc/designate/rndc.key
+ bind:
+ api:
+ address: ${_param:designate_api_address}
+ api:
+ base_uri: ${_param:designate_service_protocol}://${_param:designate_api_base_uri}:9001/
+ identity:
+ protocol: https
+ glance:
+ server:
+ storage:
+ engine: file
+ images: []
+ workers: 1
+ notification:
+ driver: messagingv2
+ topics: "notifications"
+ bind:
+ address: 127.0.0.1
+ identity:
+ protocol: https
+ registry:
+ protocol: https
+ heat:
+ server:
+ notification:
+ driver: messagingv2
+ topics: "notifications"
+ bind:
+ api:
+ address: 127.0.0.1
+ api_cfn:
+ address: 127.0.0.1
+ api_cloudwatch:
+ address: 127.0.0.1
+ identity:
+ protocol: https
+ neutron:
+ server:
+ notification:
+ driver: messagingv2
+ topics: "notifications"
+ bind:
+ address: 127.0.0.1
+ identity:
+ protocol: https
+ bind:
+ server:
+ control:
+ mgmt:
+ enabled: true
+ bind:
+ address: ${_param:single_address}
+ port: 953
+ allow:
+ - ${_param:openstack_control_node01_address}
+ - ${_param:openstack_control_node02_address}
+ - ${_param:openstack_control_node03_address}
+ keys:
+ - designate
+ nova:
+ controller:
+ networking: dvr
+ cpu_allocation: 54
+ metadata:
+ password: ${_param:metadata_password}
+ bind:
+ private_address: 127.0.0.1
+ public_address: ${_param:cluster_vip_address}
+ novncproxy_port: 6080
+ identity:
+ protocol: https
+ network:
+ protocol: https
+ glance:
+ protocol: https
+ vncproxy_url: http://${_param:cluster_vip_address}:6080
+ workers: 1
+ notification:
+ driver: messagingv2
+ topics: "notifications"
+ metadata:
+ bind:
+ address: ${_param:single_address}
+ cinder:
+ volume:
+ notification:
+ driver: messagingv2
+ topics: "notifications"
+ controller:
+ notification:
+ driver: messagingv2
+ topics: "notifications"
+ identity:
+ protocol: https
+ osapi:
+ host: 127.0.0.1
+ glance:
+ protocol: https
+ salt:
+ minion:
+ cert:
+ internal_proxy:
+ host: ${_param:salt_minion_ca_host}
+ authority: ${_param:salt_minion_ca_authority}
+ common_name: internal_proxy
+ signing_policy: cert_open
+ alternative_names: IP:127.0.0.1,IP:${_param:cluster_local_address},IP:${_param:cluster_public_host},DNS:${linux:system:name},DNS:${linux:network:fqdn},DNS:${_param:cluster_local_address},DNS:${_param:cluster_public_host}
+ key_file: "/etc/ssl/private/internal_proxy.key"
+ cert_file: "/etc/ssl/certs/internal_proxy.crt"
+ all_file: "/etc/ssl/certs/internal_proxy-with-chain.crt"
+ haproxy:
+ proxy:
+ listen:
+ keystone_public_api:
+ type: ~
+ designate_api:
+ type: ~
+ keystone_admin_api:
+ type: ~
+ nova_api:
+ type: ~
+ nova_metadata_api:
+ type: ~
+ cinder_api:
+ type: ~
+ glance_api:
+ type: ~
+ glance_registry_api:
+ type: ~
+ heat_cloudwatch_api:
+ type: ~
+ heat_api:
+ type: ~
+ heat_cfn_api:
+ type: ~
+ neutron_api:
+ type: ~
+ nova_placement_api:
+ mode: tcp
+ service_name: nova_placement
+ binds:
+ - address: ${_param:cluster_vip_address}
+ port: 8778
+ servers:
+ - name: ${_param:cluster_node01_hostname}
+ host: ${_param:cluster_node01_address}
+ port: 8778
+ params: check inter 10s fastinter 2s downinter 3s rise 3 fall 3
+ - name: ${_param:cluster_node02_hostname}
+ host: ${_param:cluster_node02_address}
+ port: 8778
+ params: check inter 10s fastinter 2s downinter 3s rise 3 fall 3
+ - name: ${_param:cluster_node03_hostname}
+ host: ${_param:cluster_node03_address}
+ port: 8778
+ params: check inter 10s fastinter 2s downinter 3s rise 3 fall 3