Add virtual-mcp-pike-dvr-ssl-barbican model

Change-Id: I665aad8777be8ceebffc2a39309bbc453c674af2
diff --git a/classes/cluster/virtual-mcp-pike-dvr-ssl-barbican/openstack/compute.yml b/classes/cluster/virtual-mcp-pike-dvr-ssl-barbican/openstack/compute.yml
new file mode 100644
index 0000000..65bcbeb
--- /dev/null
+++ b/classes/cluster/virtual-mcp-pike-dvr-ssl-barbican/openstack/compute.yml
@@ -0,0 +1,54 @@
+classes:
+- system.linux.system.repo.mcp.apt_mirantis.openstack
+- system.linux.system.repo.mcp.extra
+- system.linux.system.repo.mcp.apt_mirantis.saltstack_2016_3
+- system.linux.network.hosts
+- system.linux.storage.loopback
+- system.nova.compute.cluster
+- system.neutron.compute.cluster
+- system.ceilometer.client.nova_compute
+- system.ceilometer.agent.cluster
+- system.ceilometer.agent.polling.default
+- system.linux.network.interface.single_ovs_dvr
+- service.cinder.volume.single
+- system.cinder.volume.backend.lvm
+- cluster.virtual-mcp-pike-dvr-ssl-barbican
+parameters:
+  _param:
+    primary_interface: ens4
+    tenant_interface: ens5
+    external_interface: ens6
+    interface_mtu: 1500
+    linux_system_codename: xenial
+    loopback_device_size: 20
+  nova:
+    compute:
+      vncproxy_url: http://${_param:cluster_vip_address}:6080
+      barbican:
+        enabled: ${_param:barbican_integration_enabled}
+      identity:
+        protocol: https
+      network:
+        protocol: https
+      glance:
+        protocol: https
+      image:
+        protocol: https
+  neutron:
+    compute:
+      notification:
+        driver: messagingv2
+        topics: "notifications"
+  cinder:
+    volume:
+      barbican:
+        enabled: ${_param:barbican_integration_enabled}
+  linux:
+    network:
+      interface:
+        primary_interface:
+          ipflush_onchange: true
+        tenant_interface:
+          ipflush_onchange: true
+        external_interface:
+          ipflush_onchange: true
diff --git a/classes/cluster/virtual-mcp-pike-dvr-ssl-barbican/openstack/control.yml b/classes/cluster/virtual-mcp-pike-dvr-ssl-barbican/openstack/control.yml
new file mode 100644
index 0000000..5d20cee
--- /dev/null
+++ b/classes/cluster/virtual-mcp-pike-dvr-ssl-barbican/openstack/control.yml
@@ -0,0 +1,307 @@
+classes:
+- system.salt.minion.cert.proxy
+- system.linux.system.lowmem
+- system.linux.system.repo.mcp.apt_mirantis.glusterfs
+- system.linux.system.repo.mcp.apt_mirantis.openstack
+- system.linux.system.repo.mcp.extra
+- system.linux.system.repo.mcp.apt_mirantis.saltstack_2016_3
+- system.memcached.server.single
+- system.rabbitmq.server.cluster
+- system.rabbitmq.server.vhost.openstack
+- system.apache.server.site.manila
+- system.apache.server.site.barbican
+- system.nginx.server.single
+- system.nginx.server.proxy.openstack_api
+- system.nginx.server.proxy.openstack.designate
+- system.nginx.server.proxy.openstack.placement
+- system.keystone.server.wsgi
+- system.keystone.server.cluster
+- system.glusterfs.client.cluster
+- system.glusterfs.client.volume.glance
+- system.glusterfs.client.volume.keystone
+- system.glusterfs.server.volume.glance
+- system.glusterfs.server.volume.keystone
+- system.glusterfs.server.cluster
+- system.glance.control.cluster
+- system.nova.control.cluster
+- system.neutron.control.openvswitch.cluster
+- system.cinder.control.cluster
+- system.heat.server.cluster
+- system.designate.server.cluster
+- system.galera.server.cluster
+- system.galera.server.database.cinder
+- system.galera.server.database.glance
+- system.galera.server.database.heat
+- system.galera.server.database.keystone
+- system.galera.server.database.nova
+- system.galera.server.database.designate
+- system.galera.server.database.manila
+- system.galera.server.database.aodh
+- system.galera.server.database.panko
+- system.galera.server.database.gnocchi
+- system.galera.server.database.barbican
+- system.dogtag.server.cluster
+- system.barbican.server.cluster
+- service.barbican.server.plugin.dogtag
+- system.ceilometer.client
+- system.ceilometer.client.cinder_volume
+- system.ceilometer.client.neutron
+- cluster.virtual-mcp-pike-dvr-ssl-barbican.openstack.dns
+- system.haproxy.proxy.listen.openstack.placement
+- system.haproxy.proxy.listen.openstack.manila
+- system.manila.control.cluster
+- cluster.virtual-mcp-pike-dvr-ssl-barbican
+parameters:
+  _param:
+    keepalived_vip_interface: ens4
+    salt_minion_ca_authority: salt_master_ca
+    ### nginx ssl sites settings
+    nginx_proxy_ssl:
+      enabled: true
+      engine: salt
+      authority: "${_param:salt_minion_ca_authority}"
+      key_file: "/etc/ssl/private/internal_proxy.key"
+      cert_file: "/etc/ssl/certs/internal_proxy.crt"
+      chain_file: "/etc/ssl/certs/internal_proxy-with-chain.crt"
+    nginx_proxy_openstack_api_address: ${_param:cluster_local_address}
+    nginx_proxy_openstack_keystone_host: 127.0.0.1
+    nginx_proxy_openstack_nova_host: 127.0.0.1
+    nginx_proxy_openstack_cinder_host: 127.0.0.1
+    nginx_proxy_openstack_glance_host: 127.0.0.1
+    nginx_proxy_openstack_neutron_host: 127.0.0.1
+    nginx_proxy_openstack_heat_host: 127.0.0.1
+    nginx_proxy_openstack_designate_host: 127.0.0.1
+    nginx_proxy_openstack_placement_host: 127.0.0.1
+    apache_manila_api_address: ${_param:single_address}
+    apache_manila_ssl: ${_param:nginx_proxy_ssl}
+    apache_keystone_api_host: ${_param:single_address}
+    apache_keystone_ssl: ${_param:nginx_proxy_ssl}
+    apache_barbican_api_address: ${_param:cluster_local_address}
+    apache_barbican_api_host: ${_param:single_address}
+    apache_barbican_ssl: ${_param:nginx_proxy_ssl}
+    barbican_dogtag_nss_password: workshop
+    barbican_dogtag_host: ${_param:cluster_vip_address}
+    # dogtag listens on 8443 but there is no way to bind it to
+    # Specific IP, as on this setup dogtag installed on ctl nodes
+    # Change port on haproxy side to avoid binding conflict.
+    haproxy_dogtag_bind_port: 8444
+    cluster_dogtag_port: 8443
+    dogtag_master_host: ctl01.${linux:system:domain}
+    dogtag_pki_admin_password: workshop
+    dogtag_pki_client_database_password: workshop
+    dogtag_pki_client_pkcs12_password: workshop
+    dogtag_pki_ds_password: workshop
+    dogtag_pki_token_password: workshop
+    dogtag_pki_security_domain_password: workshop
+    dogtag_pki_clone_pkcs12_password: workshop
+  nginx:
+    server:
+      site:
+        nginx_proxy_openstack_api_keystone:
+          enabled: false
+        nginx_proxy_openstack_api_keystone_private:
+          enabled: false
+  linux:
+    system:
+      package:
+        python-msgpack:
+          version: latest
+    network:
+      interface:
+        ens4:
+          enabled: true
+          type: eth
+          proto: static
+          address: ${_param:single_address}
+          netmask: 255.255.255.0
+  keepalived:
+    cluster:
+      instance:
+        VIP:
+          virtual_router_id: 150
+  dogtag:
+   server:
+     ldap_hostname: ${linux:network:fqdn}
+     ldap_dn_password: workshop
+     ldap_admin_password: workshop
+     export_pem_file_path: /etc/dogtag/kra_admin_cert.pem
+  # TODO drop this once reclass bumped, missing part in current version
+  apache:
+    server:
+      site:
+        barbican_admin:
+          host:
+            address: ${_param:apache_barbican_api_address}
+            name: ${_param:apache_barbican_api_host}
+            port: 9312
+          log:
+            custom:
+              format: 'combined'
+              file: '/var/log/barbican/barbican-api.log'
+            error:
+              enabled: true
+              file: '/var/log/barbican/barbican-api.log'
+  barbican:
+    server:
+      enabled: true
+      dogtag_admin_cert:
+        engine: mine
+        minion: ${_param:dogtag_master_host}
+      ks_notifications_enable: True
+      store:
+        software:
+          store_plugin: dogtag_crypto
+          global_default: True
+      plugin:
+        dogtag:
+          port: ${_param:haproxy_dogtag_bind_port}
+  keystone:
+    server:
+      admin_email: ${_param:admin_email}
+  designate:
+    pool_manager:
+      enabled: ${_param:designate_pool_manager_enabled}
+      periodic_sync_interval: ${_param:designate_pool_manager_periodic_sync_interval}
+    server:
+      identity:
+        protocol: https
+      bind:
+        api:
+          address: 127.0.0.1
+      backend:
+        pdns4:
+          api_token: ${_param:designate_pdns_api_key}
+          api_endpoint: ${_param:designate_pdns_api_endpoint}
+      mdns:
+        address: ${_param:designate_mdns_address}
+        port: ${_param:designate_mdns_port}
+      pools:
+        default:
+          description: 'test pool'
+          targets:
+            default:
+              description: 'test target1'
+            default1:
+              type: ${_param:designate_pool_target_type}
+              description: 'test target2'
+              masters: ${_param:designate_pool_target_masters}
+              options:
+                host: ${_param:openstack_dns_node02_address}
+                port: 53
+                api_endpoint: "http://${_param:openstack_dns_node02_address}:${_param:powerdns_webserver_port}"
+                api_token: ${_param:designate_pdns_api_key}
+      quota:
+        zones: ${_param:designate_quota_zones}
+  glance:
+    server:
+      barbican:
+        enabled: ${_param:barbican_integration_enabled}
+      storage:
+        engine: file
+      images: []
+      workers: 1
+      bind:
+        address: 127.0.0.1
+      identity:
+        protocol: https
+      registry:
+        protocol: https
+  heat:
+    server:
+      bind:
+        api:
+          address: 127.0.0.1
+        api_cfn:
+          address: 127.0.0.1
+        api_cloudwatch:
+          address: 127.0.0.1
+      identity:
+        protocol: https
+  neutron:
+    server:
+      bind:
+        address: 127.0.0.1
+      identity:
+        protocol: https
+  nova:
+    controller:
+      networking: dvr
+      cpu_allocation: 54
+      barbican:
+        enabled: ${_param:barbican_integration_enabled}
+      metadata:
+        password: ${_param:metadata_password}
+      bind:
+        public_address: ${_param:cluster_vip_address}
+        novncproxy_port: 6080
+        private_address: 127.0.0.1
+      identity:
+        protocol: https
+      network:
+        protocol: https
+      glance:
+        protocol: https
+      vncproxy_url: http://${_param:cluster_vip_address}:6080
+      workers: 1
+  cinder:
+    controller:
+      controller:
+      barbican:
+        enabled: ${_param:barbican_integration_enabled}
+      identity:
+        protocol: https
+      osapi:
+        host: 127.0.0.1
+      glance:
+        protocol: https
+  manila:
+    common:
+      identity:
+        protocol: https
+  salt:
+    minion:
+      cert:
+        internal_proxy:
+          host: ${_param:salt_minion_ca_host}
+          authority: ${_param:salt_minion_ca_authority}
+          common_name: internal_proxy
+          signing_policy: cert_open
+          alternative_names: IP:127.0.0.1,IP:${_param:cluster_local_address},IP:${_param:cluster_public_host},DNS:${linux:system:name},DNS:${linux:network:fqdn},DNS:${_param:cluster_local_address},DNS:${_param:cluster_public_host}
+          key_file: "/etc/ssl/private/internal_proxy.key"
+          cert_file: "/etc/ssl/certs/internal_proxy.crt"
+          all_file: "/etc/ssl/certs/internal_proxy-with-chain.crt"
+  haproxy:
+    proxy:
+      listen:
+        barbican-api:
+          type: ~
+        barbican-admin-api:
+          type: ~
+        designate_api:
+          type: ~
+        keystone_public_api:
+          type: ~
+        keystone_admin_api:
+          type: ~
+        manila_api:
+          type: ~
+        nova_api:
+          type: ~
+        nova_metadata_api:
+          type: ~
+        cinder_api:
+          type: ~
+        glance_api:
+          type: ~
+        glance_registry_api:
+          type: ~
+        heat_cloudwatch_api:
+          type: ~
+        heat_api:
+          type: ~
+        heat_cfn_api:
+          type: ~
+        neutron_api:
+          type: ~
+        placement_api:
+          type: ~
diff --git a/classes/cluster/virtual-mcp-pike-dvr-ssl-barbican/openstack/dashboard.yml b/classes/cluster/virtual-mcp-pike-dvr-ssl-barbican/openstack/dashboard.yml
new file mode 100644
index 0000000..c1f597b
--- /dev/null
+++ b/classes/cluster/virtual-mcp-pike-dvr-ssl-barbican/openstack/dashboard.yml
@@ -0,0 +1,19 @@
+classes:
+- system.linux.system.repo.mcp.apt_mirantis.ubuntu
+- system.linux.system.repo.mcp.apt_mirantis.openstack
+- system.linux.system.repo.mcp.extra
+- system.linux.system.repo.mcp.apt_mirantis.saltstack_2016_3
+- system.horizon.server.single
+- cluster.virtual-mcp-pike-dvr-ssl-barbican
+parameters:
+  _param:
+    horizon_site_branding: OpenStack Dashboard
+  linux:
+    network:
+      interface:
+        ens4:
+          enabled: true
+          type: eth
+          proto: static
+          address: ${_param:single_address}
+          netmask: 255.255.255.0
diff --git a/classes/cluster/virtual-mcp-pike-dvr-ssl-barbican/openstack/dns.yml b/classes/cluster/virtual-mcp-pike-dvr-ssl-barbican/openstack/dns.yml
new file mode 100644
index 0000000..a16c8d6
--- /dev/null
+++ b/classes/cluster/virtual-mcp-pike-dvr-ssl-barbican/openstack/dns.yml
@@ -0,0 +1,35 @@
+classes:
+- system.powerdns.server.single
+- cluster.virtual-mcp-pike-dvr-ssl-barbican
+
+parameters:
+  powerdns:
+    server:
+      api:
+          enabled: True
+          key: ${_param:designate_pdns_api_key}
+      axfr_ips:
+          ${_param:powerdns_axfr_ips}
+      bind:
+          address: ${_param:single_address}
+      overwrite_supermasters: True
+      supermasters:
+          ${_param:powerdns_supermasters}
+      webserver:
+          enabled: True
+          address: ${_param:single_address}
+          password: ${_param:powerdns_webserver_password}
+          port: ${_param:powerdns_webserver_port}
+  linux:
+    system:
+      package:
+        python-msgpack:
+          version: latest
+    network:
+      interface:
+        ens4:
+          enabled: true
+          type: eth
+          proto: static
+          address: ${_param:single_address}
+          netmask: 255.255.255.0
diff --git a/classes/cluster/virtual-mcp-pike-dvr-ssl-barbican/openstack/dogtag.yml b/classes/cluster/virtual-mcp-pike-dvr-ssl-barbican/openstack/dogtag.yml
new file mode 100644
index 0000000..c9d90c0
--- /dev/null
+++ b/classes/cluster/virtual-mcp-pike-dvr-ssl-barbican/openstack/dogtag.yml
@@ -0,0 +1,67 @@
+classes:
+#- system.apache.server.site.barbican
+- system.dogtag.server.cluster
+- system.barbican.server.cluster
+- cluster.virtual-mcp-pike-dvr-ssl-barbican
+parameters:
+  _param:
+    apache_barbican_api_address: ${_param:cluster_local_address}
+    apache_barbican_ssl: ${_param:nginx_proxy_ssl}
+    apache_manila_ssl: ${_param:nginx_proxy_ssl}
+    barbican_dogtag_nss_password: workshop
+    barbican_dogtag_host: ${_param:cluster_vip_address}
+    # dogtag listens on 8443 but there is no way to bind it to
+    # Specific IP, as on this setup dogtag installed on ctl nodes
+    # Change port on haproxy side to avoid binding conflict.
+    haproxy_dogtag_bind_port: 8444
+    cluster_dogtag_port: 8443
+    dogtag_master_host: ctl01.${linux:system:domain}
+  dogtag:
+   server:
+     ldap_hostname: ${linux:network:fqdn}
+     ldap_dn_password: workshop
+     export_pem_file_path: /etc/dogtag/kra_admin_cert.pem
+     default_config_options:
+       pki_ds_hostname: ${linux:network:fqdn}
+       pki_admin_password: workshop
+       pki_backup_password: workshop
+       pki_client_database_password: workshop
+       pki_client_pkcs12_password: workshop
+       pki_clone_pkcs12_password: workshop
+       pki_ds_password: workshop
+       pki_token_password: workshop
+       pki_security_domain_name: EXAMPLE
+       pki_security_domain_password: workshop
+  barbican:
+    server:
+      enabled: true
+      dogtag_admin_cert:
+        engine: mine
+        minion: ${_param:dogtag_master_host}
+      ks_notifications_enable: True
+      store:
+        software:
+          store_plugin: dogtag_crypto
+          global_default: True
+      plugin:
+        dogtag:
+          port: ${_param:haproxy_dogtag_bind_port}
+  glance:
+    server:
+      barbican:
+        enabled: ${_param:barbican_integration_enabled}
+  nova:
+    controller:
+      barbican:
+        enabled: ${_param:barbican_integration_enabled}
+  cinder:
+    controller:
+      barbican:
+        enabled: ${_param:barbican_integration_enabled}
+  haproxy:
+    proxy:
+      listen:
+        barbican-api:
+          type: ~
+        barbican-admin-api:
+          type: ~
diff --git a/classes/cluster/virtual-mcp-pike-dvr-ssl-barbican/openstack/gateway.yml b/classes/cluster/virtual-mcp-pike-dvr-ssl-barbican/openstack/gateway.yml
new file mode 100644
index 0000000..81e8754
--- /dev/null
+++ b/classes/cluster/virtual-mcp-pike-dvr-ssl-barbican/openstack/gateway.yml
@@ -0,0 +1,30 @@
+classes:
+- system.linux.system.repo.mcp.apt_mirantis.openstack
+- system.linux.system.repo.mcp.extra
+- system.linux.system.repo.mcp.apt_mirantis.saltstack_2016_3
+- system.linux.network.interface.single_ovs_dvr
+- system.neutron.gateway.cluster
+- cluster.virtual-mcp-pike-dvr-ssl-barbican
+parameters:
+  _param:
+    primary_interface: ens4
+    tenant_interface: ens5
+    external_interface: ens6
+    interface_mtu: 9000
+    neutron_gateway_dvr: True
+    neutron_gateway_agent_mode: dvr_snat
+    linux_system_codename: xenial
+  neutron:
+    gateway:
+      notification:
+        driver: messagingv2
+        topics: "notifications"
+  linux:
+    network:
+      interface:
+        primary_interface:
+          ipflush_onchange: true
+        tenant_interface:
+          ipflush_onchange: true
+        external_interface:
+          ipflush_onchange: true
diff --git a/classes/cluster/virtual-mcp-pike-dvr-ssl-barbican/openstack/init.yml b/classes/cluster/virtual-mcp-pike-dvr-ssl-barbican/openstack/init.yml
new file mode 100644
index 0000000..18ab132
--- /dev/null
+++ b/classes/cluster/virtual-mcp-pike-dvr-ssl-barbican/openstack/init.yml
@@ -0,0 +1,282 @@
+parameters:
+  _param:
+    # openstack service addresses
+    openstack_database_address: 172.16.10.254
+    openstack_proxy_address: 172.16.10.121
+    openstack_proxy_node01_address: 172.16.10.121
+    openstack_control_address: 172.16.10.254
+    openstack_control_node01_hostname: ctl01
+    openstack_control_node02_hostname: ctl02
+    openstack_control_node03_hostname: ctl03
+    openstack_share_node01_hostname: share01
+    openstack_control_node01_address: 172.16.10.101
+    openstack_control_node02_address: 172.16.10.102
+    openstack_control_node03_address: 172.16.10.103
+    openstack_database_address: ${_param:openstack_control_address}
+    openstack_database_node01_address: ${_param:openstack_control_node01_address}
+    openstack_database_node02_address: ${_param:openstack_control_node02_address}
+    openstack_database_node03_address: ${_param:openstack_control_node03_address}
+    openstack_message_queue_address: ${_param:openstack_control_address}
+    openstack_message_queue_node01_address: ${_param:openstack_control_node01_address}
+    openstack_message_queue_node02_address: ${_param:openstack_control_node02_address}
+    openstack_message_queue_node03_address: ${_param:openstack_control_node03_address}
+    openstack_gateway_address: 172.16.10.110
+    openstack_gateway_node01_address: ${_param:openstack_gateway_address}
+    openstack_dns_node01_address: 172.16.10.111
+    openstack_dns_node02_address: 172.16.10.112
+    openstack_share_node01_address: 172.16.10.97
+    openstack_share_node01_share_address: ${_param:openstack_share_node01_address}
+    openstack_telemetry_address: 172.16.10.250
+    openstack_telemetry_node01_address: 172.16.10.45
+    openstack_telemetry_node02_address: 172.16.10.46
+    openstack_telemetry_node03_address: 172.16.10.47
+    openstack_telemetry_system_codename: xenial
+    openstack_telemetry_hostname: mdb
+    openstack_telemetry_node01_hostname: mdb01
+    openstack_telemetry_node02_hostname: mdb02
+    openstack_telemetry_node03_hostname: mdb03
+    openstack_telemetry_keepalived_password: hf9GQOjG2uPhkJ2Q
+
+    openstack_version: pike
+    apt_mk_version: nightly
+    cinder_version: ${_param:openstack_version}
+    mcp_repo_version: 1.1
+    openstack_region: RegionOne
+    admin_email: root@localhost
+    cluster_public_host: 172.16.10.254
+    telemetry_public_host: ${_param:openstack_telemetry_address}
+    cluster_public_protocol: https
+    cluster_internal_protocol: https
+    keystone_service_protocol: ${_param:cluster_internal_protocol}
+    glance_service_protocol: ${_param:cluster_internal_protocol}
+    nova_service_protocol: ${_param:cluster_internal_protocol}
+    neutron_service_protocol: ${_param:cluster_internal_protocol}
+    heat_service_protocol: ${_param:cluster_internal_protocol}
+    cinder_service_protocol: ${_param:cluster_internal_protocol}
+    designate_service_protocol: ${_param:cluster_internal_protocol}
+    manila_service_protocol: ${_param:cluster_internal_protocol}
+    ceilometer_service_protocol: ${_param:cluster_internal_protocol}
+    panko_service_protocol: ${_param:cluster_internal_protocol}
+    aodh_service_protocol: ${_param:cluster_internal_protocol}
+    gnocchi_service_protocol: ${_param:cluster_internal_protocol}
+    barbican_service_protocol: ${_param:cluster_internal_protocol}
+    neutron_control_dvr: True
+    neutron_tenant_network_types: "flat,vxlan"
+    neutron_l3_ha: False
+    neutron_global_physnet_mtu: 1500
+    neutron_external_mtu: 1500
+    neutron_gateway_dvr: True
+    neutron_gateway_agent_mode: dvr_snat
+    neutron_compute_dvr: True
+    neutron_compute_agent_mode: dvr
+    neutron_compute_external_access: True
+    galera_server_cluster_name: openstack_cluster
+    galera_server_maintenance_password: workshop
+    galera_server_admin_password: workshop
+    galera_innodb_read_io_threads: 2
+    galera_innodb_write_io_threads: 2
+    galera_wsrep_slave_threads: 2
+    galera_xtrabackup_parallel: 1
+    galera_error_log_enabled: true
+    cluster_vip_address: ${_param:openstack_control_address}
+    cluster_local_address: ${_param:single_address}
+    cluster_node01_hostname: ${_param:openstack_control_node01_hostname}
+    cluster_node01_address: ${_param:openstack_control_node01_address}
+    cluster_node02_hostname: ${_param:openstack_control_node02_hostname}
+    cluster_node02_address: ${_param:openstack_control_node02_address}
+    cluster_node03_hostname: ${_param:openstack_control_node03_hostname}
+    cluster_node03_address: ${_param:openstack_control_node03_address}
+    rabbitmq_secret_key: workshop
+    rabbitmq_admin_password: workshop
+    rabbitmq_openstack_password: workshop
+    rabbitmq_cold_password: workshop
+    powerdns_axfr_ips:
+      - ${_param:openstack_control_node01_address}
+      - ${_param:openstack_control_node02_address}
+      - ${_param:openstack_control_node03_address}
+      - 127.0.0.1
+    powerdns_supermasters:
+      - ip: ${_param:openstack_control_node01_address}
+        nameserver: ns1.example.org
+        account: master
+      - ip: ${_param:openstack_control_node02_address}
+        nameserver: ns2.example.org
+        account: master
+      - ip: ${_param:openstack_control_node03_address}
+        nameserver: ns3.example.org
+        account: master
+    powerdns_overwrite_supermasters: True
+    powerdns_webserver_password: gJ6n3gVaYP8eS
+    powerdns_webserver_port: 8081
+    designate_admin_api_enabled: true
+    designate_domain_id: 5186883b-91fb-4891-bd49-e6769234a8fc
+    designate_mdns_address: 0.0.0.0
+    designate_mdns_port: 53
+    designate_pdns_api_key: VxK9cMlFL5Ae
+    designate_pdns_api_endpoint: "http://${_param:openstack_dns_node01_address}:${_param:powerdns_webserver_port}"
+    designate_pool_manager_enabled: True
+    designate_pool_manager_periodic_sync_interval: '120'
+    designate_pool_ns_records:
+      - hostname: 'ns1.example.org.'
+        priority: 10
+      - hostname: 'ns2.example.org.'
+        priority: 20
+      - hostname: 'ns3.example.org.'
+        priority: 30
+    designate_pool_nameservers:
+      - host: ${_param:openstack_dns_node01_address}
+        port: 53
+      - host: ${_param:openstack_dns_node02_address}
+        port: 53
+    designate_pool_target_type: pdns4
+    designate_pool_target_masters:
+      - host: ${_param:openstack_control_node01_address}
+        port: 53
+      - host: ${_param:openstack_control_node02_address}
+        port: 53
+      - host: ${_param:openstack_control_node03_address}
+        port: 53
+    designate_pool_target_options:
+      host: ${_param:openstack_dns_node01_address}
+      port: 53
+      api_token: ${_param:designate_pdns_api_key}
+      api_endpoint: ${_param:designate_pdns_api_endpoint}
+    designate_quota_zones: 40
+    designate_version: ${_param:openstack_version}
+    glance_version: ${_param:openstack_version}
+    glance_service_host: ${_param:cluster_vip_address}
+    keystone_version: ${_param:openstack_version}
+    keystone_service_host: ${_param:cluster_vip_address}
+    heat_version: ${_param:openstack_version}
+    heat_service_host: ${_param:cluster_vip_address}
+    heat_domain_admin_password: workshop
+    cinder_version: ${_param:openstack_version}
+    cinder_service_host: ${_param:cluster_vip_address}
+    manila_version: ${_param:openstack_version}
+    manila_service_host: ${_param:cluster_vip_address}
+    panko_version: ${_param:openstack_version}
+    panko_service_host: ${_param:openstack_telemetry_address}
+    ceilometer_version: ${_param:openstack_version}
+    ceilometer_service_host: ${_param:openstack_telemetry_address}
+    barbican_service_host: ${_param:cluster_vip_address}
+    barbican_version: ${_param:openstack_version}
+    aodh_version: ${_param:openstack_version}
+    aodh_service_host: ${_param:openstack_telemetry_address}
+    gnocchi_version: 4.0
+    gnocchi_service_host: ${_param:openstack_telemetry_address}
+    gnocchi_public_host: ${_param:telemetry_public_host}
+    aodh_public_host: ${_param:telemetry_public_host}
+    ceilometer_public_host: ${_param:telemetry_public_host}
+    panko_public_host: ${_param:telemetry_public_host}
+    nova_version: ${_param:openstack_version}
+    nova_service_host: ${_param:cluster_vip_address}
+    nova_vncproxy_url: http://${_param:cluster_vip_address}:8060
+    neutron_version: ${_param:openstack_version}
+    neutron_service_host: ${_param:cluster_vip_address}
+    glusterfs_service_host: ${_param:cluster_vip_address}
+    designate_service_host: ${_param:cluster_vip_address}
+    metadata_password: password
+    mysql_admin_user: root
+    mysql_admin_password: workshop
+    mysql_cinder_password: workshop
+    mysql_glance_password: workshop
+    mysql_heat_password: workshop
+    mysql_keystone_password: workshop
+    mysql_neutron_password: workshop
+    mysql_nova_password: workshop
+    mysql_designate_password: workshop
+    mysql_manila_password: workshop
+    mysql_ceilometer_password: workshop
+    mysql_panko_password: workshop
+    mysql_aodh_password: workshop
+    mysql_gnocchi_password: workshop
+    mysql_barbican_password: workshop
+    keystone_service_token: workshop
+    keystone_admin_password: workshop
+    keystone_cinder_password: workshop
+    keystone_glance_password: workshop
+    keystone_heat_password: workshop
+    keystone_keystone_password: workshop
+    keystone_neutron_password: workshop
+    keystone_nova_password: workshop
+    keystone_designate_password: workshop
+    keystone_manila_password: workshop
+    keystone_aodh_password: workshop
+    keystone_panko_password: workshop
+    keystone_ceilometer_password: workshop
+    keystone_gnocchi_password: workshop
+    keystone_barbican_password: workshop
+    horizon_version: ${_param:openstack_version}
+    horizon_secret_key: opaesee8Que2yahJoh9fo0eefo1Aeyo6ahyei8zeiboh3aeth5loth7ieNa5xi5e
+    horizon_identity_host: ${_param:cluster_vip_address}
+    horizon_identity_encryption: none
+    horizon_identity_version: 2
+    ceilometer_secret_key: workshop
+    ceilometer_agent_default_polling_interval: 15
+    ceilometer_agent_default_polling_meters:
+      - "*"
+    barbican_integration_enabled: true
+  linux:
+    network:
+      purge_hosts: true
+      host:
+        prx:
+          address: ${_param:openstack_proxy_address}
+          names:
+          - prx
+          - prx.${_param:cluster_domain}
+        prx01:
+          address: ${_param:openstack_proxy_node01_address}
+          names:
+          - prx01
+          - prx01.${_param:cluster_domain}
+        ctl:
+          address: ${_param:openstack_control_address}
+          names:
+          - ctl
+          - ctl.${_param:cluster_domain}
+        ctl01:
+          address: ${_param:openstack_control_node01_address}
+          names:
+          - ctl01
+          - ctl01.${_param:cluster_domain}
+        ctl02:
+          address: ${_param:openstack_control_node02_address}
+          names:
+          - ctl02
+          - ctl02.${_param:cluster_domain}
+        ctl03:
+          address: ${_param:openstack_control_node03_address}
+          names:
+          - ctl03
+          - ctl03.${_param:cluster_domain}
+        gtw01:
+          address: ${_param:openstack_gateway_address}
+          names:
+          - gtw01
+          - gtw01.${_param:cluster_domain}
+        cmp01:
+          address: 172.16.10.105
+          names:
+          - cmp01
+          - cmp01.${_param:cluster_domain}
+        cmp02:
+          address: 172.16.10.106
+          names:
+          - cmp02
+          - cmp02.${_param:cluster_domain}
+        dns01:
+          address: ${_param:openstack_dns_node01_address}
+          names:
+          - dns01
+          - dns01.${_param:cluster_domain}
+        dns02:
+          address: ${_param:openstack_dns_node02_address}
+          names:
+          - dns02
+          - dns02.${_param:cluster_domain}
+    system:
+      user:
+        root:
+          name: root
+          password: false
diff --git a/classes/cluster/virtual-mcp-pike-dvr-ssl-barbican/openstack/proxy.yml b/classes/cluster/virtual-mcp-pike-dvr-ssl-barbican/openstack/proxy.yml
new file mode 100644
index 0000000..074dd4f
--- /dev/null
+++ b/classes/cluster/virtual-mcp-pike-dvr-ssl-barbican/openstack/proxy.yml
@@ -0,0 +1,15 @@
+classes:
+- system.nginx.server.single
+- system.nginx.server.proxy.openstack_api
+- system.nginx.server.proxy.openstack_vnc
+- system.nginx.server.proxy.openstack_web
+- system.salt.minion.cert.proxy
+- cluster.virtual-mcp-pike-dvr-ssl-barbican
+parameters:
+  _param:
+    nginx_proxy_ssl:
+      enabled: true
+      authority: ${_param:salt_minion_ca_authority}
+      engine: salt
+      mode: secure
+    salt_minion_ca_host: cfg01.${linux:system:domain}
diff --git a/classes/cluster/virtual-mcp-pike-dvr-ssl-barbican/openstack/share.yml b/classes/cluster/virtual-mcp-pike-dvr-ssl-barbican/openstack/share.yml
new file mode 100644
index 0000000..93a106e
--- /dev/null
+++ b/classes/cluster/virtual-mcp-pike-dvr-ssl-barbican/openstack/share.yml
@@ -0,0 +1,16 @@
+classes:
+- system.linux.system.lowmem
+- system.linux.system.repo.mcp.apt_mirantis.openstack
+- system.linux.storage.loopback_manila
+- system.manila.share
+- system.manila.share.backend.lvm
+- cluster.virtual-mcp-pike-dvr-ssl-barbican
+parameters:
+  _param:
+    manila_lvm_devices:
+      - /dev/loop0
+    loopback_device1_size: 20
+  manila:
+    common:
+      identity:
+        protocol: https
\ No newline at end of file
diff --git a/classes/cluster/virtual-mcp-pike-dvr-ssl-barbican/openstack/telemetry.yml b/classes/cluster/virtual-mcp-pike-dvr-ssl-barbican/openstack/telemetry.yml
new file mode 100644
index 0000000..eee65de
--- /dev/null
+++ b/classes/cluster/virtual-mcp-pike-dvr-ssl-barbican/openstack/telemetry.yml
@@ -0,0 +1,158 @@
+classes:
+- system.salt.minion.cert.proxy
+- system.linux.system.repo.ubuntu
+- system.linux.system.repo.glusterfs
+- system.linux.system.repo.mcp.openstack
+- system.memcached.server.single
+- system.apache.server.single
+- system.apache.server.site.gnocchi
+- system.apache.server.site.panko
+- system.glusterfs.server.cluster
+- system.glusterfs.client.cluster
+- system.glusterfs.client.volume.gnocchi
+- system.glusterfs.server.volume.gnocchi
+- service.redis.server.single
+- system.nginx.server.single
+- system.nginx.server.proxy.openstack.aodh
+- system.nginx.server.proxy.openstack.ceilometer
+- system.gnocchi.server.cluster
+- system.gnocchi.common.storage.incoming.redis
+- system.gnocchi.common.storage.file
+- system.gnocchi.common.coordination.redis
+- system.ceilometer.server.cluster
+- system.ceilometer.server.coordination.redis
+- system.aodh.server.cluster
+- system.aodh.server.coordination.redis
+- system.panko.server.cluster
+- system.ceilometer.server.backend.default
+- cluster.virtual-mcp-pike-dvr-ssl-barbican
+parameters:
+  _param:
+    salt_minion_ca_authority: salt_master_ca
+    keepalived_openstack_telemetry_vip_address: ${_param:openstack_telemetry_address}
+    keepalived_openstack_telemetry_vip_password: ${_param:openstack_telemetry_keepalived_password}
+    keepalived_openstack_telemetry_vip_interface: ens3
+    cluster_vip_address: ${_param:openstack_telemetry_address}
+    cluster_local_address: ${_param:single_address}
+    nginx_proxy_openstack_api_host: ${_param:openstack_telemetry_address}
+    nginx_proxy_openstack_api_address: ${_param:cluster_local_address}
+    nginx_proxy_openstack_ceilometer_host: 127.0.0.1
+    nginx_proxy_openstack_aodh_host: 127.0.0.1
+    nginx_proxy_ssl:
+      enabled: true
+      engine: salt
+      authority: "${_param:salt_minion_ca_authority}"
+      key_file: "/etc/ssl/private/internal_proxy.key"
+      cert_file: "/etc/ssl/certs/internal_proxy.crt"
+      chain_file: "/etc/ssl/certs/internal_proxy-with-chain.crt"
+    apache_gnocchi_api_address: ${_param:single_address}
+    apache_panko_api_address: ${_param:single_address}
+    apache_gnocchi_ssl: ${_param:nginx_proxy_ssl}
+    apache_panko_ssl: ${_param:nginx_proxy_ssl}
+    cluster_node01_hostname: ${_param:openstack_telemetry_node01_hostname}
+    cluster_node01_address: ${_param:openstack_telemetry_node01_address}
+    cluster_node02_hostname: ${_param:openstack_telemetry_node02_hostname}
+    cluster_node02_address: ${_param:openstack_telemetry_node02_address}
+    cluster_node03_hostname: ${_param:openstack_telemetry_node03_hostname}
+    cluster_node03_address: ${_param:openstack_telemetry_node03_address}
+    glusterfs_service_host: ${_param:openstack_telemetry_address}
+    gnocchi_glusterfs_service_host: ${_param:glusterfs_service_host}
+    redis_sentinel_node01_address: ${_param:openstack_telemetry_node01_address}
+    redis_sentinel_node02_address: ${_param:openstack_telemetry_node02_address}
+    redis_sentinel_node03_address: ${_param:openstack_telemetry_node03_address}
+    openstack_telemetry_redis_url: redis://${_param:redis_sentinel_node01_address}:26379?sentinel=master_1&sentinel_fallback=${_param:redis_sentinel_node02_address}:26379&sentinel_fallback=${_param:redis_sentinel_node03_address}:26379
+    gnocchi_coordination_url: ${_param:openstack_telemetry_redis_url}
+    gnocchi_storage_incoming_redis_url: ${_param:openstack_telemetry_redis_url}
+    glusterfs_node01_address: ${_param:cluster_node01_address}
+    glusterfs_node02_address: ${_param:cluster_node02_address}
+    glusterfs_node03_address: ${_param:cluster_node03_address}
+  linux:
+    network:
+      interface:
+        ens4:
+          enabled: true
+          type: eth
+          proto: static
+          address: ${_param:single_address}
+          netmask: 255.255.255.0
+  salt:
+    minion:
+      cert:
+        internal_proxy:
+          host: ${_param:salt_minion_ca_host}
+          authority: ${_param:salt_minion_ca_authority}
+          common_name: internal_proxy
+          signing_policy: cert_open
+          alternative_names: IP:127.0.0.1,IP:${_param:cluster_local_address},IP:${_param:openstack_telemetry_address},DNS:${linux:system:name},DNS:${linux:network:fqdn},DNS:${_param:cluster_local_address},DNS:${_param:openstack_telemetry_address}
+          key_file: "/etc/ssl/private/internal_proxy.key"
+          cert_file: "/etc/ssl/certs/internal_proxy.crt"
+          all_file: "/etc/ssl/certs/internal_proxy-with-chain.crt"
+  keepalived:
+    cluster:
+      instance:
+        VIP:
+          virtual_router_id: 160
+  redis:
+    server:
+      version: 3.0
+      bind:
+        address: ${_param:single_address}
+    cluster:
+      enabled: True
+      mode: sentinel
+      role: ${_param:redis_cluster_role}
+      quorum: 2
+      master:
+        host: ${_param:cluster_node01_address}
+        port: 6379
+      sentinel:
+        address: ${_param:single_address}
+  apache:
+    server:
+      modules:
+        - wsgi
+  gnocchi:
+    common:
+      database:
+        host: ${_param:openstack_database_address}
+    server:
+      identity:
+        protocol: https
+      pkgs:
+      # TODO: move python-memcache installation to formula
+      - gnocchi-api
+      - gnocchi-metricd
+      - python-memcache
+  panko:
+    server:
+      identity:
+        protocol: https
+  aodh:
+    server:
+      bind:
+        host: 127.0.0.1
+      coordination_backend:
+        url: ${_param:openstack_telemetry_redis_url}
+      identity:
+        protocol: https
+        host: ${_param:openstack_control_address}
+  ceilometer:
+    server:
+      bind:
+        host: 127.0.0.1
+      coordination_backend:
+        url: ${_param:openstack_telemetry_redis_url}
+      identity:
+        protocol: https
+        host: ${_param:openstack_control_address}
+  haproxy:
+    proxy:
+      listen:
+        panko_api:
+          type: ~
+        ceilometer_api:
+          type: ~
+        gnocchi_api:
+          type: ~
+        aodh-api:
+          type: ~