Add virtual-mcp-pike-dvr-ssl

The model uses SSL on internal/public endpoints.

Change-Id: I2a38ffdc93c9658bd1fb99c5fca29a0436362e04
Related-Prod: PROD-17943
diff --git a/classes/cluster/virtual-mcp-pike-dvr-ssl/openstack/control.yml b/classes/cluster/virtual-mcp-pike-dvr-ssl/openstack/control.yml
new file mode 100644
index 0000000..b370655
--- /dev/null
+++ b/classes/cluster/virtual-mcp-pike-dvr-ssl/openstack/control.yml
@@ -0,0 +1,254 @@
+classes:
+#- system.salt.minion.ca.salt_master
+- system.salt.minion.cert.proxy
+- system.linux.system.lowmem
+- system.linux.system.repo.mcp.apt_mirantis.glusterfs
+- system.linux.system.repo.mcp.apt_mirantis.openstack
+- system.linux.system.repo.mcp.extra
+- system.linux.system.repo.mcp.apt_mirantis.saltstack_2016_3
+- system.memcached.server.single
+- system.rabbitmq.server.cluster
+- system.rabbitmq.server.vhost.openstack
+- system.nginx.server.single
+- system.nginx.server.proxy.openstack_api
+- system.nginx.server.proxy.openstack.designate
+- system.nginx.server.proxy.openstack.placement
+- system.keystone.server.wsgi
+- system.keystone.server.cluster
+- system.glusterfs.client.cluster
+- system.glusterfs.client.volume.glance
+- system.glusterfs.client.volume.keystone
+- system.glusterfs.server.volume.glance
+- system.glusterfs.server.volume.keystone
+- system.glusterfs.server.cluster
+- system.glance.control.cluster
+- system.nova.control.cluster
+- system.neutron.control.openvswitch.cluster
+- system.cinder.control.cluster
+- system.heat.server.cluster
+- system.ceilometer.server.cluster
+- system.ceilometer.server.backend.influxdb
+- system.aodh.server.cluster
+- system.heka.ceilometer_collector.single
+- system.designate.server.cluster
+- system.galera.server.cluster
+- system.galera.server.database.aodh
+- system.galera.server.database.ceilometer
+- system.galera.server.database.cinder
+- system.galera.server.database.glance
+- system.galera.server.database.grafana
+- system.galera.server.database.heat
+- system.galera.server.database.keystone
+- system.galera.server.database.nova
+- system.galera.server.database.designate
+- system.haproxy.proxy.listen.openstack.nova-placement
+- cluster.virtual-mcp-pike-dvr-ssl.openstack.dns
+- cluster.virtual-mcp-pike-dvr-ssl
+
+parameters:
+  _param:
+    keepalived_vip_interface: ens4
+    salt_minion_ca_authority: salt_master_ca
+    ### nginx ssl sites settings
+    nginx_proxy_ssl:
+      enabled: true
+      engine: salt
+      authority: "${_param:salt_minion_ca_authority}"
+      key_file: "/etc/ssl/private/internal_proxy.key"
+      cert_file: "/etc/ssl/certs/internal_proxy.crt"
+      chain_file: "/etc/ssl/certs/internal_proxy-with-chain.crt"
+    nginx_proxy_openstack_api_address: ${_param:cluster_local_address}
+    nginx_proxy_openstack_keystone_host: 127.0.0.1
+    nginx_proxy_openstack_nova_host: 127.0.0.1
+    nginx_proxy_openstack_cinder_host: 127.0.0.1
+    nginx_proxy_openstack_glance_host: 127.0.0.1
+    nginx_proxy_openstack_neutron_host: 127.0.0.1
+    nginx_proxy_openstack_heat_host: 127.0.0.1
+    nginx_proxy_openstack_designate_host: 127.0.0.1
+    nginx_proxy_openstack_placement_host: 127.0.0.1
+    apache_keystone_api_host: ${_param:single_address}
+    apache_keystone_ssl: ${_param:nginx_proxy_ssl}
+  nginx:
+    server:
+      site:
+        nginx_proxy_openstack_api_keystone:
+          enabled: false
+        nginx_proxy_openstack_api_keystone_private:
+          enabled: false
+  linux:
+    system:
+      package:
+        python-msgpack:
+          version: latest
+    network:
+      interface:
+        ens4:
+          enabled: true
+          type: eth
+          proto: static
+          address: ${_param:single_address}
+          netmask: 255.255.255.0
+  keepalived:
+    cluster:
+      instance:
+        VIP:
+          virtual_router_id: 150
+  keystone:
+    server:
+      admin_email: ${_param:admin_email}
+      notification:
+        driver: messagingv2
+        topics: "notifications,${_param:stacklight_notification_topic}"
+  designate:
+    pool_manager:
+      enabled: ${_param:designate_pool_manager_enabled}
+      periodic_sync_interval: ${_param:designate_pool_manager_periodic_sync_interval}
+    server:
+      identity:
+        protocol: https
+      bind:
+        api:
+          address: 127.0.0.1
+      backend:
+        pdns4:
+          api_token: ${_param:designate_pdns_api_key}
+          api_endpoint: ${_param:designate_pdns_api_endpoint}
+      mdns:
+        address: ${_param:designate_mdns_address}
+        port: ${_param:designate_mdns_port}
+      pools:
+        default:
+          description: 'test pool'
+          targets:
+            default:
+              description: 'test target1'
+            default1:
+              type: ${_param:designate_pool_target_type}
+              description: 'test target2'
+              masters: ${_param:designate_pool_target_masters}
+              options:
+                host: ${_param:openstack_dns_node02_address}
+                port: 53
+                api_endpoint: "http://${_param:openstack_dns_node02_address}:${_param:powerdns_webserver_port}"
+                api_token: ${_param:designate_pdns_api_key}
+      quota:
+        zones: ${_param:designate_quota_zones}
+  glance:
+    server:
+      storage:
+        engine: file
+      images: []
+      workers: 1
+      notification:
+        driver: messagingv2
+        topics: "notifications,${_param:stacklight_notification_topic}"
+      bind:
+        address: 127.0.0.1
+      identity:
+        protocol: https
+      registry:
+        protocol: https
+  heat:
+    server:
+      notification:
+        driver: messagingv2
+        topics: "notifications,${_param:stacklight_notification_topic}"
+      bind:
+        api:
+          address: 127.0.0.1
+        api_cfn:
+          address: 127.0.0.1
+        api_cloudwatch:
+          address: 127.0.0.1
+      identity:
+        protocol: https
+  neutron:
+    server:
+      notification:
+        driver: messagingv2
+        topics: "notifications,${_param:stacklight_notification_topic}"
+      bind:
+        address: 127.0.0.1
+      identity:
+        protocol: https
+  nova:
+    controller:
+      networking: dvr
+      cpu_allocation: 54
+      metadata:
+        password: ${_param:metadata_password}
+      bind:
+        public_address: ${_param:cluster_vip_address}
+        novncproxy_port: 6080
+        private_address: 127.0.0.1
+      identity:
+        protocol: https
+      network:
+        protocol: https
+      glance:
+        protocol: https
+      vncproxy_url: http://${_param:cluster_vip_address}:6080
+      workers: 1
+      notification:
+        driver: messagingv2
+        topics: "notifications,${_param:stacklight_notification_topic}"
+  cinder:
+    volume:
+      notification:
+        driver: messagingv2
+        topics: "notifications,${_param:stacklight_notification_topic}"
+    controller:
+      notification:
+        driver: messagingv2
+        topics: "notifications,${_param:stacklight_notification_topic}"
+      identity:
+        protocol: https
+      osapi:
+        host: 127.0.0.1
+      glance:
+        protocol: https
+  salt:
+    minion:
+      cert:
+        internal_proxy:
+          host: ${_param:salt_minion_ca_host}
+          authority: ${_param:salt_minion_ca_authority}
+          common_name: internal_proxy
+          signing_policy: cert_open
+          alternative_names: IP:127.0.0.1,IP:${_param:cluster_local_address},IP:${_param:cluster_public_host},DNS:${linux:system:name},DNS:${linux:network:fqdn},DNS:${_param:cluster_local_address},DNS:${_param:cluster_public_host}
+          key_file: "/etc/ssl/private/internal_proxy.key"
+          cert_file: "/etc/ssl/certs/internal_proxy.crt"
+          all_file: "/etc/ssl/certs/internal_proxy-with-chain.crt"
+  haproxy:
+    proxy:
+      listen:
+        # barbican-api:
+        #   type: ~
+        # barbican-admin-api:
+        #   type: ~
+        designate_api:
+          type: ~
+        keystone_public_api:
+          type: ~
+        keystone_admin_api:
+          type: ~
+        nova_api:
+          type: ~
+        nova_metadata_api:
+          type: ~
+        cinder_api:
+          type: ~
+        glance_api:
+          type: ~
+        glance_registry_api:
+          type: ~
+        heat_cloudwatch_api:
+          type: ~
+        heat_api:
+          type: ~
+        heat_cfn_api:
+          type: ~
+        neutron_api:
+          type: ~
+        nova_placement_api:
+          type: ~