Pike ovs with ceph rgw
In this change:
* model is aligned to use pike
* added cinder version param
* chnage imcludes of linux.repo to match
introduced build_id structure
Related: PROD-16862, PROD-15289
Change-Id: I2cd5de82f6a1ef3105bc14ce4625123662b868ef
diff --git a/classes/cluster/virtual-mcp-ocata-dvr-ceph-rgw/infra/config.yml b/classes/cluster/virtual-mcp-ocata-dvr-ceph-rgw/infra/config.yml
new file mode 100644
index 0000000..a7c9444
--- /dev/null
+++ b/classes/cluster/virtual-mcp-ocata-dvr-ceph-rgw/infra/config.yml
@@ -0,0 +1,252 @@
+classes:
+- service.git.client
+- system.linux.system.single
+- system.linux.system.repo.mcp.salt
+- system.linux.system.repo.mcp.apt_mirantis.ubuntu
+- system.linux.system.repo.mcp.apt_mirantis.openstack
+- system.openssh.client.lab
+- system.salt.master.api
+- system.salt.master.pkg
+- system.reclass.storage.salt
+- system.reclass.storage.system.ceph_mon_cluster
+- system.reclass.storage.system.ceph_rgw_cluster
+- system.salt.minion.ca.salt_master
+- system.salt.minion.cert.proxy
+- system.sphinx.server.doc.reclass
+- system.keystone.client.single
+- system.keystone.client.service.aodh
+- system.keystone.client.service.ceilometer
+- system.keystone.client.service.nova21
+- system.keystone.client.service.nova-placement
+- system.keystone.client.service.glare
+- system.keystone.client.service.cinder3
+- system.keystone.client.service.radosgw-swift
+- system.mysql.client.single
+- system.reclass.storage.system.openstack_control_cluster
+- system.reclass.storage.system.openstack_compute_multi
+- system.reclass.storage.system.openstack_gateway_single
+- system.reclass.storage.system.openstack_dashboard_single
+- system.reclass.storage.system.stacklightv2_server_cluster
+- cluster.virtual-mcp-ocata-dvr-ceph-rgw.openstack.proxy
+- cluster.virtual-mcp-ocata-dvr-ceph-rgw.stacklight.proxy
+- cluster.virtual-mcp-ocata-dvr-ceph-rgw
+parameters:
+ _param:
+ reclass_data_repository: https://gerrit.mcp.mirantis.net/salt-models/mcp-virtual-lab
+ reclass_data_revision: master
+ salt_master_environment_repository: "https://github.com/tcpcloud"
+ salt_master_environment_revision: master
+ salt_api_password_hash: "$6$sGnRlxGf$al5jMCetLP.vfI/fTl3Z0N7Za1aeiexL487jAtyRABVfT3NlwZxQGVhO7S1N8OwS/34VHYwZQA8lkXwKMN/GS1"
+ reclass_config_master: 192.168.10.90
+ single_address: 172.16.10.100
+ salt_master_host: 127.0.0.1
+ salt_master_base_environment: prd
+ salt_minion_ca_host: ${linux:network:fqdn}
+ linux:
+ network:
+ interface:
+ ens4:
+ enabled: true
+ type: eth
+ proto: static
+ address: ${_param:single_address}
+ netmask: 255.255.255.0
+ nginx:
+ server:
+ site:
+ nginx_proxy_openstack_web:
+ proxy:
+ host: ${_param:openstack_proxy_address}
+ nginx_proxy_openstack_api_heat_cfn:
+ enabled: false
+ salt:
+ master:
+ reactor:
+ reclass/minion/classify:
+ - salt://reclass/reactor/node_register.sls
+ reclass:
+ storage:
+ class_mapping:
+ common_node:
+ expression: all
+ node_param:
+ single_address:
+ value_template: <<node_control_ip>>
+ linux_system_codename:
+ value_template: <<node_os>>
+ salt_master_host:
+ value_template: <<node_master_ip>>
+ infra_config:
+ expression: <<node_hostname>>__startswith__cfg
+ cluster_param:
+ infra_config_address:
+ value_template: <<node_control_ip>>
+ infra_config_deploy_address:
+ value_template: <<node_deploy_ip>>
+ cluster_domain:
+ value_template: <<node_domain>>
+ openstack_control01:
+ expression: <<node_hostname>>__equals__ctl01
+ cluster_param:
+ openstack_control_node01_address:
+ value_template: <<node_control_ip>>
+ openstack_control02:
+ expression: <<node_hostname>>__equals__ctl02
+ cluster_param:
+ openstack_control_node02_address:
+ value_template: <<node_control_ip>>
+ openstack_control03:
+ expression: <<node_hostname>>__equals__ctl03
+ cluster_param:
+ openstack_control_node03_address:
+ value_template: <<node_control_ip>>
+ openstack_compute:
+ expression: <<node_hostname>>__startswith__cmp
+ node_class:
+ value_template:
+ - cluster.<<node_cluster>>.openstack.compute
+ node_param:
+ tenant_address:
+ value_template: <<node_tenant_ip>>
+ external_address:
+ value_template: <<node_external_ip>>
+ openstack_gateway:
+ expression: <<node_hostname>>__startswith__gtw
+ node_class:
+ value_template:
+ - cluster.<<node_cluster>>.openstack.gateway
+ node_param:
+ tenant_address:
+ value_template: <<node_tenant_ip>>
+ external_address:
+ value_template: <<node_external_ip>>
+ cluster_param:
+ openstack_gateway_node01_address:
+ value_template: <<node_control_ip>>
+ stacklight_monitor_node01:
+ expression: <<node_hostname>>__equals__mon01
+ cluster_param:
+ stacklight_monitor_node01_address:
+ value_template: <<node_control_ip>>
+ stacklight_monitor_node02:
+ expression: <<node_hostname>>__equals__mon02
+ cluster_param:
+ stacklight_monitor_node02_address:
+ value_template: <<node_control_ip>>
+ stacklight_monitor_node03:
+ expression: <<node_hostname>>__equals__mon03
+ cluster_param:
+ stacklight_monitor_node03_address:
+ value_template: <<node_control_ip>>
+ ceph_osd_node01:
+ expression: <<node_hostname>>__equals__osd01
+ cluster_param:
+ ceph_osd_node01_address:
+ value_template: <<node_control_ip>>
+ ceph_osd_node02:
+ expression: <<node_hostname>>__equals__osd02
+ cluster_param:
+ ceph_osd_node02_address:
+ value_template: <<node_control_ip>>
+ ceph_mon_node01:
+ expression: <<node_hostname>>__equals__cmn01
+ cluster_param:
+ ceph_mon_node01_address:
+ value_template: <<node_control_ip>>
+ ceph_mon_node02:
+ expression: <<node_hostname>>__equals__cmn02
+ cluster_param:
+ ceph_mon_node02_address:
+ value_template: <<node_control_ip>>
+ ceph_mon_node03:
+ expression: <<node_hostname>>__equals__cmn03
+ cluster_param:
+ ceph_mon_node03_address:
+ value_template: <<node_control_ip>>
+ ceph_rgw_node01:
+ expression: <<node_hostname>>__equals__rgw01
+ cluster_param:
+ ceph_rgw_node01_address:
+ value_template: <<node_control_ip>>
+ ceph_rgw_node02:
+ expression: <<node_hostname>>__equals__rgw02
+ cluster_param:
+ ceph_rgw_node02_address:
+ value_template: <<node_control_ip>>
+ ceph_rgw_node03:
+ expression: <<node_hostname>>__equals__cmn03
+ cluster_param:
+ ceph_rgw_node03_address:
+ value_template: <<node_control_ip>>
+ openstack_dashboard:
+ expression: <<node_hostname>>__startswith__prx
+ node_class:
+ value_template:
+ - cluster.<<node_cluster>>.openstack.dashboard
+ - cluster.<<node_cluster>>.openstack.proxy
+ cluster_param:
+ openstack_proxy_node01_address:
+ value_template: <<node_control_ip>>
+ node:
+ openstack_control_node01:
+ classes:
+ - service.galera.master.cluster
+ params:
+ mysql_cluster_role: master
+ linux_system_codename: xenial
+ openstack_control_node02:
+ classes:
+ - service.galera.slave.cluster
+ params:
+ mysql_cluster_role: slave
+ linux_system_codename: xenial
+ openstack_control_node03:
+ classes:
+ - service.galera.slave.cluster
+ params:
+ mysql_cluster_role: slave
+ linux_system_codename: xenial
+ stacklight_server_node01:
+ classes:
+ - system.influxdb.server.single
+ - system.influxdb.database.stacklight
+ openstack_compute_node01:
+ params:
+ single_address: 172.16.10.105
+ tenant_address: 10.1.0.105
+ external_address: 10.16.0.105
+ openstack_compute_node02:
+ params:
+ single_address: 172.16.10.106
+ tenant_address: 10.1.0.106
+ external_address: 10.16.0.106
+ openstack_gateway_node01:
+ params:
+ tenant_address: 10.1.0.110
+ external_address: 10.16.0.110
+ openstack_proxy_node01:
+ classes:
+ - cluster.virtual-mcp-ocata-dvr-ceph-rgw.openstack.proxy
+ params:
+ linux_system_codename: xenial
+ infra_storage_rack01:
+ name: osd0<<count>>
+ domain: ${_param:cluster_domain}
+ classes:
+ - cluster.${_param:cluster_name}.ceph.osd
+ repeat:
+ count: 2
+ start: 1
+ digits: 1
+ params:
+ ceph_crush_parent:
+ value: rack<<count>>
+ start: 1
+ single_address:
+ value: ceph_osd_node<<count>>_address
+ start: 1
+ digits: 2
+ interpolate: true
+ params:
+ salt_master_host: ${_param:reclass_config_master}
+ linux_system_codename: xenial