Add ovs pike models

* Add ceph ovs pike
* rename ceph ocata rgw to ceph ocata pike as there pike inside
* Add pike ovs, dvr, dpdk

PROD-16951

Change-Id: Ib8c17eb0909991b70f18ac916ba503e6301f6eb7
diff --git a/classes/cluster/virtual-mcp-pike-ovs-ceph/openstack/control.yml b/classes/cluster/virtual-mcp-pike-ovs-ceph/openstack/control.yml
new file mode 100644
index 0000000..d0a95c8
--- /dev/null
+++ b/classes/cluster/virtual-mcp-pike-ovs-ceph/openstack/control.yml
@@ -0,0 +1,108 @@
+classes:
+- system.linux.system.lowmem
+- system.linux.system.repo.mcp.apt_mirantis.glusterfs
+- system.linux.system.repo.mcp.apt_mirantis.openstack
+- system.linux.system.repo.mcp.extra
+- system.linux.system.repo.mcp.apt_mirantis.saltstack_2016_3
+- system.memcached.server.single
+- system.rabbitmq.server.cluster
+- system.rabbitmq.server.vhost.openstack
+- system.keystone.server.wsgi
+- system.keystone.server.cluster
+- system.glusterfs.client.cluster
+- system.glusterfs.client.volume.glance
+- system.glusterfs.client.volume.keystone
+- system.glusterfs.server.volume.glance
+- system.glusterfs.server.volume.keystone
+- system.glusterfs.server.cluster
+- system.glance.control.cluster
+- system.nova.control.cluster
+- system.neutron.control.openvswitch.cluster
+- system.heat.server.cluster
+- system.ceilometer.server.cluster
+- system.ceilometer.server.backend.influxdb
+- system.aodh.server.cluster
+- system.heka.ceilometer_collector.single
+- system.galera.server.cluster
+- system.galera.server.database.aodh
+- system.galera.server.database.ceilometer
+- system.galera.server.database.cinder
+- system.galera.server.database.glance
+- system.galera.server.database.grafana
+- system.galera.server.database.heat
+- system.galera.server.database.keystone
+- system.galera.server.database.nova
+- system.haproxy.proxy.listen.openstack.nova-placement
+- system.glance.control.storage.ceph
+- system.ceph.common.cluster
+- cluster.virtual-mcp-pike-ovs-ceph.ceph.common
+- system.cinder.control.backend.ceph
+- system.cinder.control.cluster
+- system.cinder.control.notification.messagingv2
+- system.cinder.volume.single
+- system.cinder.volume.notification.messagingv2
+- cluster.virtual-mcp-pike-ovs-ceph.infra
+parameters:
+  _param:
+    keepalived_vip_interface: ens4
+  linux:
+    system:
+      package:
+        python-msgpack:
+          version: latest
+    network:
+      interface:
+        ens4:
+          enabled: true
+          type: eth
+          proto: static
+          address: ${_param:single_address}
+          netmask: 255.255.255.0
+  keepalived:
+    cluster:
+      instance:
+        VIP:
+          virtual_router_id: 150
+  keystone:
+    server:
+      admin_email: ${_param:admin_email}
+      notification:
+        driver: messagingv2
+        topics: "notifications,${_param:stacklight_notification_topic}"
+  glance:
+    server:
+      images: []
+      workers: 1
+      notification:
+        driver: messagingv2
+        topics: "notifications,${_param:stacklight_notification_topic}"
+  heat:
+    server:
+      notification:
+        driver: messagingv2
+        topics: "notifications,${_param:stacklight_notification_topic}"
+  neutron:
+    server:
+      notification:
+        driver: messagingv2
+        topics: "notifications,${_param:stacklight_notification_topic}"
+  nova:
+    controller:
+      networking: dvr
+      cpu_allocation: 54
+      metadata:
+        password: ${_param:metadata_password}
+      vncproxy_url: http://${_param:cluster_vip_address}:6080
+      workers: 1
+      notification:
+        driver: messagingv2
+        topics: "notifications,${_param:stacklight_notification_topic}"
+  cinder:
+    volume:
+      notification:
+        driver: messagingv2
+        topics: "notifications,${_param:stacklight_notification_topic}"
+    controller:
+      notification:
+        driver: messagingv2
+        topics: "notifications,${_param:stacklight_notification_topic}"