Pike ovs with ceph rgw

In this change:
* model is aligned to use pike
* added cinder version param
* chnage imcludes of linux.repo to match
  introduced build_id structure

Related: PROD-16862, PROD-15289

Change-Id: I2cd5de82f6a1ef3105bc14ce4625123662b868ef
diff --git a/classes/cluster/virtual-mcp-ocata-dvr-ceph-rgw/init.yml b/classes/cluster/virtual-mcp-ocata-dvr-ceph-rgw/init.yml
new file mode 100644
index 0000000..83d78df
--- /dev/null
+++ b/classes/cluster/virtual-mcp-ocata-dvr-ceph-rgw/init.yml
@@ -0,0 +1,62 @@
+classes:
+- system.linux.network.dynamic_hosts
+- service.grafana.collector
+- system.heka.log_collector.single
+- system.heka.log_collector.output.telegraf
+- system.linux.system.repo.mcp.extra
+- system.linux.system.single
+- system.rsyslog.client.single
+- system.prometheus.collector
+- system.telegraf.agent
+- system.openssh.server.team.lab
+- system.openssh.server.team.tcpcloud
+- system.openssh.server.team.mcp_qa
+- cluster.virtual-mcp-ocata-dvr-ceph-rgw.ceph
+- cluster.virtual-mcp-ocata-dvr-ceph-rgw.infra
+- cluster.virtual-mcp-ocata-dvr-ceph-rgw.openstack
+- cluster.virtual-mcp-ocata-dvr-ceph-rgw.stacklight
+parameters:
+  _param:
+    cluster_domain: virtual-mcp-ocata-dvr-ceph-rgw.local
+    cluster_name: virtual-mcp-ocata-dvr-ceph-rgw
+    # infra service addresses
+    infra_config_hostname: cfg01
+    infra_config_address: 172.16.10.100
+    # openstack service addresses
+    openstack_database_address: 172.16.10.254
+    openstack_proxy_address: 172.16.10.121
+    openstack_proxy_node01_address: 172.16.10.121
+    openstack_control_address: 172.16.10.254
+    openstack_control_node01_address: 172.16.10.101
+    openstack_control_node02_address: 172.16.10.102
+    openstack_control_node03_address: 172.16.10.103
+    openstack_database_address: ${_param:openstack_control_address}
+    openstack_database_node01_address: ${_param:openstack_control_node01_address}
+    openstack_database_node02_address: ${_param:openstack_control_node02_address}
+    openstack_database_node03_address: ${_param:openstack_control_node03_address}
+    openstack_message_queue_address: ${_param:openstack_control_address}
+    openstack_message_queue_node01_address: ${_param:openstack_control_node01_address}
+    openstack_message_queue_node02_address: ${_param:openstack_control_node02_address}
+    openstack_message_queue_node03_address: ${_param:openstack_control_node03_address}
+    openstack_gateway_address: 172.16.10.110
+    openstack_gateway_node01_address: ${_param:openstack_gateway_address}
+    openstack_dns_node01_address: 172.16.10.111
+    openstack_dns_node02_address: 172.16.10.112
+    # stacklight service addresses
+    stacklight_monitor_address: 172.16.10.253
+    stacklight_monitor_node01_address: 172.16.10.107
+    stacklight_monitor_node02_address: 172.16.10.108
+    stacklight_monitor_node03_address: 172.16.10.109
+    stacklight_telemetry_address: ${_param:stacklight_monitor_address}
+    stacklight_log_address: ${_param:stacklight_monitor_address}
+    stacklight_telemetry_node01_address: ${_param:stacklight_monitor_node01_address}
+    # ceph addresses
+    ceph_osd_node01_address: 172.16.10.94
+    ceph_osd_node02_address: 172.16.10.95
+    ceph_mon_node01_address: 172.16.10.96
+    ceph_mon_node02_address: 172.16.10.97
+    ceph_mon_node03_address: 172.16.10.98
+    ceph_rgw_address: 10.60.0.75
+    ceph_rgw_node01_address: 10.60.0.76
+    ceph_rgw_node02_address: 10.60.0.77
+    ceph_rgw_node03_address: 10.60.0.78