Add Ceph Nautilus for proposed deployments
Change-Id: I6af70cc918a935a331a6da3cb9f0873d58437717
related-prod: PROD-34912
diff --git a/tcp_tests/templates/bm-cicd-pike-ovs-maas/salt-context-cookiecutter-openstack_ovs.yaml b/tcp_tests/templates/bm-cicd-pike-ovs-maas/salt-context-cookiecutter-openstack_ovs.yaml
index e37392e..52f4f8f 100644
--- a/tcp_tests/templates/bm-cicd-pike-ovs-maas/salt-context-cookiecutter-openstack_ovs.yaml
+++ b/tcp_tests/templates/bm-cicd-pike-ovs-maas/salt-context-cookiecutter-openstack_ovs.yaml
@@ -766,15 +766,15 @@
ceph_osd_journal_size: 20
ceph_osd_bond_mode: "active-backup"
ceph_osd_data_partition_prefix: ""
+ ceph_osd_block_partition_prefix: ""
ceph_public_network_allocation: storage
- ceph_public_network: "10.167.11.0/24"
ceph_cluster_network: "10.167.11.0/24"
ceph_osd_single_address_ranges: "10.167.11.200-10.167.11.202"
ceph_osd_deploy_address_ranges: "172.16.164.8-172.16.164.10"
ceph_osd_storage_address_ranges: "10.167.11.200-10.167.11.202"
ceph_osd_backend_address_ranges: "10.167.12.200-10.167.12.202"
ceph_osd_data_disks: "/dev/sdb"
- ceph_osd_journal_or_block_db_disks: "/dev/sdb"
+ ceph_osd_journal_or_block_db_disks: "/dev/sdc"
ceph_osd_mode: "separated"
ceph_osd_deploy_nic: "eth0"
ceph_osd_primary_first_nic: "eth1"
@@ -792,6 +792,16 @@
ceph_rgw_node02_hostname: "rgw02"
ceph_rgw_node03_address: "10.167.11.78"
ceph_rgw_node03_hostname: "rgw03"
+ # FIXME after deleting Ceph public network
+ ceph_public_network: 10.167.11.0/24
+ ceph_mon_node01_ceph_public_address: 10.167.11.66
+ ceph_mon_node02_ceph_public_address: 10.167.11.67
+ ceph_mon_node03_ceph_public_address: 10.167.11.68
+ ceph_rgw_node01_ceph_public_address: 10.167.11.76
+ ceph_rgw_node02_ceph_public_address: 10.167.11.77
+ ceph_rgw_node03_ceph_public_address: 10.167.11.78
+ ceph_osd_ceph_public_address_ranges: 10.167.11.201-10.167.11.203
+ # -end fixme
rsync_fernet_rotation: 'True'
compute_padding_with_zeros: True
designate_backend: powerdns