Add Ceph Nautilus for proposed deployments
Change-Id: I6af70cc918a935a331a6da3cb9f0873d58437717
related-prod: PROD-34912
diff --git a/tcp_tests/templates/bm-cicd-pike-ovs-maas/salt-context-cookiecutter-openstack_ovs.yaml b/tcp_tests/templates/bm-cicd-pike-ovs-maas/salt-context-cookiecutter-openstack_ovs.yaml
index e37392e..52f4f8f 100644
--- a/tcp_tests/templates/bm-cicd-pike-ovs-maas/salt-context-cookiecutter-openstack_ovs.yaml
+++ b/tcp_tests/templates/bm-cicd-pike-ovs-maas/salt-context-cookiecutter-openstack_ovs.yaml
@@ -766,15 +766,15 @@
ceph_osd_journal_size: 20
ceph_osd_bond_mode: "active-backup"
ceph_osd_data_partition_prefix: ""
+ ceph_osd_block_partition_prefix: ""
ceph_public_network_allocation: storage
- ceph_public_network: "10.167.11.0/24"
ceph_cluster_network: "10.167.11.0/24"
ceph_osd_single_address_ranges: "10.167.11.200-10.167.11.202"
ceph_osd_deploy_address_ranges: "172.16.164.8-172.16.164.10"
ceph_osd_storage_address_ranges: "10.167.11.200-10.167.11.202"
ceph_osd_backend_address_ranges: "10.167.12.200-10.167.12.202"
ceph_osd_data_disks: "/dev/sdb"
- ceph_osd_journal_or_block_db_disks: "/dev/sdb"
+ ceph_osd_journal_or_block_db_disks: "/dev/sdc"
ceph_osd_mode: "separated"
ceph_osd_deploy_nic: "eth0"
ceph_osd_primary_first_nic: "eth1"
@@ -792,6 +792,16 @@
ceph_rgw_node02_hostname: "rgw02"
ceph_rgw_node03_address: "10.167.11.78"
ceph_rgw_node03_hostname: "rgw03"
+ # FIXME after deleting Ceph public network
+ ceph_public_network: 10.167.11.0/24
+ ceph_mon_node01_ceph_public_address: 10.167.11.66
+ ceph_mon_node02_ceph_public_address: 10.167.11.67
+ ceph_mon_node03_ceph_public_address: 10.167.11.68
+ ceph_rgw_node01_ceph_public_address: 10.167.11.76
+ ceph_rgw_node02_ceph_public_address: 10.167.11.77
+ ceph_rgw_node03_ceph_public_address: 10.167.11.78
+ ceph_osd_ceph_public_address_ranges: 10.167.11.201-10.167.11.203
+ # -end fixme
rsync_fernet_rotation: 'True'
compute_padding_with_zeros: True
designate_backend: powerdns
diff --git a/tcp_tests/templates/bm-cicd-queens-ovs-maas/salt-context-cookiecutter-openstack_ovs.yaml b/tcp_tests/templates/bm-cicd-queens-ovs-maas/salt-context-cookiecutter-openstack_ovs.yaml
index 2785866..187fd0f 100644
--- a/tcp_tests/templates/bm-cicd-queens-ovs-maas/salt-context-cookiecutter-openstack_ovs.yaml
+++ b/tcp_tests/templates/bm-cicd-queens-ovs-maas/salt-context-cookiecutter-openstack_ovs.yaml
@@ -772,9 +772,13 @@
use_default_network_scheme: 'True'
sriov_network_subnet: 192.168.10.0/24
ceph_enabled: 'True'
- ceph_version: "luminous"
+ ceph_version: "nautilus"
ceph_hyper_converged: "False"
+ # Apply settings for ceph from contexts/ceph/nautilus-collocated-block-db.yml
ceph_osd_backend: "bluestore"
+ ceph_osds_per_device: '1'
+ ceph_osd_data_size: ''
+ ceph_osd_dmcrypt: False
ceph_osd_count: "3"
ceph_osd_node_count: 3
ceph_osd_block_db_size: 20
@@ -782,14 +786,24 @@
ceph_osd_bond_mode: "active-backup"
ceph_osd_data_partition_prefix: ""
ceph_public_network_allocation: storage
- ceph_public_network: "10.167.11.0/24"
ceph_cluster_network: "10.167.11.0/24"
ceph_osd_single_address_ranges: "10.167.11.200-10.167.11.202"
ceph_osd_deploy_address_ranges: "172.16.164.8-172.16.164.10"
ceph_osd_storage_address_ranges: "10.167.11.200-10.167.11.202"
ceph_osd_backend_address_ranges: "10.167.12.200-10.167.12.202"
+ # FIXME after deleting Ceph public network
+ ceph_public_network: 10.167.11.0/24
+ ceph_mon_node01_ceph_public_address: 10.167.11.66
+ ceph_mon_node02_ceph_public_address: 10.167.11.67
+ ceph_mon_node03_ceph_public_address: 10.167.11.68
+ ceph_rgw_node01_ceph_public_address: 10.167.11.76
+ ceph_rgw_node02_ceph_public_address: 10.167.11.77
+ ceph_rgw_node03_ceph_public_address: 10.167.11.78
+ ceph_osd_ceph_public_address_ranges: 10.167.11.201-10.167.11.203
+ # -end fixme
ceph_osd_data_disks: "/dev/sdb"
- ceph_osd_journal_or_block_db_disks: "/dev/sdb"
+ ceph_osd_journal_or_block_db_disks: ""
+ ceph_osd_block_partition_prefix: ''
ceph_osd_mode: "separated"
ceph_osd_deploy_nic: "eth0"
ceph_osd_primary_first_nic: "eth1"
diff --git a/tcp_tests/templates/heat-bm-cicd-pike-contrail-sl/salt-context-cookiecutter-contrail.yaml b/tcp_tests/templates/heat-bm-cicd-pike-contrail-sl/salt-context-cookiecutter-contrail.yaml
index cc98b5f..5724200 100644
--- a/tcp_tests/templates/heat-bm-cicd-pike-contrail-sl/salt-context-cookiecutter-contrail.yaml
+++ b/tcp_tests/templates/heat-bm-cicd-pike-contrail-sl/salt-context-cookiecutter-contrail.yaml
@@ -833,9 +833,9 @@
ceph_osd_journal_size: 20
ceph_osd_bond_mode: "active-backup"
ceph_osd_data_partition_prefix: ""
+ ceph_osd_block_partition_prefix: ""
ceph_public_network_allocation: storage
- ceph_public_network: "==IPV4_NET_CONTROL_PREFIX==.0/24"
ceph_cluster_network: "==IPV4_NET_CONTROL_PREFIX==.0/24"
# for 2018.11.0+
@@ -845,7 +845,7 @@
ceph_osd_backend_address_ranges: "==IPV4_NET_TENANT_PREFIX==.200-==IPV4_NET_TENANT_PREFIX==.202"
ceph_osd_data_disks: "/dev/sdb"
- ceph_osd_journal_or_block_db_disks: "/dev/sdb"
+ ceph_osd_journal_or_block_db_disks: "/dev/sdc"
ceph_osd_mode: "separated"
ceph_osd_deploy_nic: "eth0"
ceph_osd_primary_first_nic: "eth1"
@@ -876,6 +876,16 @@
#ceph_rgw_node03_deploy_address: "172.16.48.78"
ceph_rgw_node03_address: "==IPV4_NET_CONTROL_PREFIX==.78"
ceph_rgw_node03_hostname: "rgw03"
+ # FIXME after deleting Ceph public network
+ ceph_public_network: "==IPV4_NET_CONTROL_PREFIX==.0/24"
+ ceph_mon_node01_ceph_public_address: "==IPV4_NET_CONTROL_PREFIX==.66"
+ ceph_mon_node02_ceph_public_address: "==IPV4_NET_CONTROL_PREFIX==.67"
+ ceph_mon_node03_ceph_public_address: "==IPV4_NET_CONTROL_PREFIX==.68"
+ ceph_rgw_node01_ceph_public_address: "==IPV4_NET_CONTROL_PREFIX==.76"
+ ceph_rgw_node02_ceph_public_address: "==IPV4_NET_CONTROL_PREFIX==.77"
+ ceph_rgw_node03_ceph_public_address: "==IPV4_NET_CONTROL_PREFIX==.78"
+ ceph_osd_ceph_public_address_ranges: "==IPV4_NET_CONTROL_PREFIX==.201-==IPV4_NET_CONTROL_PREFIX==.203"
+
manila_enabled: 'False'
designate_backend: powerdns
designate_enabled: 'True'
diff --git a/tcp_tests/templates/heat-bm-cicd-queens-contrail-sl/salt-context-cookiecutter-contrail.yaml b/tcp_tests/templates/heat-bm-cicd-queens-contrail-sl/salt-context-cookiecutter-contrail.yaml
index 523e30e..84546ec 100644
--- a/tcp_tests/templates/heat-bm-cicd-queens-contrail-sl/salt-context-cookiecutter-contrail.yaml
+++ b/tcp_tests/templates/heat-bm-cicd-queens-contrail-sl/salt-context-cookiecutter-contrail.yaml
@@ -823,10 +823,15 @@
openldap_domain: heat-bm-cicd-queens-contrail-sl.local
openldap_enabled: 'True'
openldap_organisation: ${_param:cluster_name}
+ # Apply ceph setting from contexts/ceph/nautilus-multiple-osd.yml
ceph_enabled: 'True'
- ceph_version: "luminous"
+ ceph_version: "nautilus"
ceph_hyper_converged: "False"
ceph_osd_backend: "bluestore"
+ ceph_osds_per_device: '1'
+ ceph_osd_data_size: ''
+ ceph_osd_dmcrypt: False
+ ceph_osd_journal_partitoning: true
ceph_osd_count: "3"
ceph_osd_node_count: 3
ceph_osd_block_db_size: 20
@@ -835,8 +840,16 @@
ceph_osd_data_partition_prefix: ""
ceph_public_network_allocation: storage
- ceph_public_network: "==IPV4_NET_CONTROL_PREFIX==.0/24"
ceph_cluster_network: "==IPV4_NET_CONTROL_PREFIX==.0/24"
+ # FIXME after deleting Ceph public network
+ ceph_public_network: "==IPV4_NET_CONTROL_PREFIX==.0/24"
+ ceph_mon_node01_ceph_public_address: "==IPV4_NET_CONTROL_PREFIX==.66"
+ ceph_mon_node02_ceph_public_address: "==IPV4_NET_CONTROL_PREFIX==.67"
+ ceph_mon_node03_ceph_public_address: "==IPV4_NET_CONTROL_PREFIX==.68"
+ ceph_rgw_node01_ceph_public_address: "==IPV4_NET_CONTROL_PREFIX==.76"
+ ceph_rgw_node02_ceph_public_address: "==IPV4_NET_CONTROL_PREFIX==.77"
+ ceph_rgw_node03_ceph_public_address: "==IPV4_NET_CONTROL_PREFIX==.78"
+ ceph_osd_ceph_public_address_ranges: "==IPV4_NET_CONTROL_PREFIX==.201-==IPV4_NET_CONTROL_PREFIX==.203"
# for 2018.11.0+
ceph_osd_single_address_ranges: "==IPV4_NET_CONTROL_PREFIX==.200-==IPV4_NET_CONTROL_PREFIX==.202"
@@ -845,7 +858,8 @@
ceph_osd_backend_address_ranges: "==IPV4_NET_TENANT_PREFIX==.200-==IPV4_NET_TENANT_PREFIX==.202"
ceph_osd_data_disks: "/dev/sdb"
- ceph_osd_journal_or_block_db_disks: "/dev/sdb"
+ ceph_osd_block_partition_prefix: ''
+ ceph_osd_journal_or_block_db_disks: "/dev/sdc"
ceph_osd_mode: "separated"
ceph_osd_deploy_nic: "eth0"
ceph_osd_primary_first_nic: "eth1"
diff --git a/tcp_tests/templates/heat-cicd-pike-dvr-sl/salt-context-cookiecutter.yaml b/tcp_tests/templates/heat-cicd-pike-dvr-sl/salt-context-cookiecutter.yaml
index 6af4ce3..e7e23e5 100644
--- a/tcp_tests/templates/heat-cicd-pike-dvr-sl/salt-context-cookiecutter.yaml
+++ b/tcp_tests/templates/heat-cicd-pike-dvr-sl/salt-context-cookiecutter.yaml
@@ -43,13 +43,17 @@
ceph_mon_node02_hostname: cmn02
ceph_mon_node03_address: 10.167.4.68
ceph_mon_node03_hostname: cmn03
+ # Apply settings from context contexts/ceph/luminous-encrypted-devices.yml
ceph_osd_backend: bluestore
+ ceph_osds_per_device: '1'
+ ceph_osd_data_size: ''
ceph_osd_block_db_size: '3'
+ ceph_osd_dmcrypt: True
ceph_osd_bond_mode: active-backup
ceph_osd_data_partition_prefix: ""
ceph_osd_count: '3'
ceph_osd_data_disks: "/dev/vdb"
- ceph_osd_journal_or_block_db_disks: "/dev/vdb"
+ ceph_osd_journal_or_block_db_disks: "/dev/vdc"
ceph_osd_node_count: '3'
ceph_osd_journal_size: '3'
ceph_osd_deploy_nic: "eth0"
@@ -61,7 +65,7 @@
ceph_osd_storage_address_ranges: 10.167.4.201-10.167.4.203
ceph_public_network_allocation: storage
ceph_osd_mode: "separated"
- ceph_public_network: 10.167.4.0/24
+
ceph_rgw_address: 10.167.4.75
ceph_rgw_hostname: rgw
ceph_rgw_node01_address: 10.167.4.76
@@ -71,6 +75,16 @@
ceph_rgw_node03_address: 10.167.4.78
ceph_rgw_node03_hostname: rgw03
ceph_version: "luminous"
+ # FIXME after deleting Ceph public network
+ ceph_public_network: 10.167.4.0/24
+ ceph_mon_node01_ceph_public_address: 10.167.4.66
+ ceph_mon_node02_ceph_public_address: 10.167.4.67
+ ceph_mon_node03_ceph_public_address: 10.167.4.68
+ ceph_rgw_node01_ceph_public_address: 10.167.4.76
+ ceph_rgw_node02_ceph_public_address: 10.167.4.77
+ ceph_rgw_node03_ceph_public_address: 10.167.4.78
+ ceph_osd_ceph_public_address_ranges: 10.167.4.201-10.167.4.203
+ # -end fixme
ceph_osd_block_partition_prefix: ''
cicd_control_node01_address: 10.167.4.91
cicd_control_node01_hostname: cid01
diff --git a/tcp_tests/templates/heat-cicd-queens-contrail41-sl/salt-context-cookiecutter-contrail.yaml b/tcp_tests/templates/heat-cicd-queens-contrail41-sl/salt-context-cookiecutter-contrail.yaml
index 0a079c7..5ca9d6b 100644
--- a/tcp_tests/templates/heat-cicd-queens-contrail41-sl/salt-context-cookiecutter-contrail.yaml
+++ b/tcp_tests/templates/heat-cicd-queens-contrail41-sl/salt-context-cookiecutter-contrail.yaml
@@ -243,19 +243,34 @@
openldap_domain: heat-cicd-queens-contrail41-sl.local
openldap_enabled: 'True'
openldap_organisation: ${_param:cluster_name}
+ # Apply setting from contexts/ceph/nautilus-multiple-osd.yml
ceph_enabled: 'True'
- ceph_version: "luminous"
+ ceph_version: "nautilus"
ceph_hyper_converged: "False"
ceph_osd_backend: "bluestore"
+ ceph_osd_journal_partitoning: true
+ ceph_osds_per_device: '1'
+ ceph_osd_data_size: ''
ceph_osd_count: "3"
ceph_osd_node_count: 3
ceph_osd_block_db_size: 3
ceph_osd_journal_size: 3
ceph_osd_bond_mode: "active-backup"
+ ceph_osd_dmcrypt: False
ceph_osd_data_partition_prefix: ""
+ ceph_osd_block_partition_prefix: ""
ceph_public_network_allocation: storage
- ceph_public_network: "==IPV4_NET_CONTROL_PREFIX==.0/24"
+
ceph_cluster_network: "==IPV4_NET_CONTROL_PREFIX==.0/24"
+ # FIXME after deleting Ceph public network
+ ceph_public_network: "==IPV4_NET_CONTROL_PREFIX==.0/24"
+ ceph_mon_node01_ceph_public_address: "==IPV4_NET_CONTROL_PREFIX==.66"
+ ceph_mon_node02_ceph_public_address: "==IPV4_NET_CONTROL_PREFIX==.67"
+ ceph_mon_node03_ceph_public_address: "==IPV4_NET_CONTROL_PREFIX==.68"
+ ceph_rgw_node01_ceph_public_address: "==IPV4_NET_CONTROL_PREFIX==.76"
+ ceph_rgw_node02_ceph_public_address: "==IPV4_NET_CONTROL_PREFIX==.77"
+ ceph_rgw_node03_ceph_public_address: "==IPV4_NET_CONTROL_PREFIX==.78"
+ ceph_osd_ceph_public_address_ranges: "==IPV4_NET_CONTROL_PREFIX==.201-==IPV4_NET_CONTROL_PREFIX==.203"
# for 2018.11.0+
ceph_osd_single_address_ranges: "==IPV4_NET_CONTROL_PREFIX==.201-==IPV4_NET_CONTROL_PREFIX==.203"
@@ -263,8 +278,8 @@
ceph_osd_storage_address_ranges: "==IPV4_NET_CONTROL_PREFIX==.201-==IPV4_NET_CONTROL_PREFIX==.203"
ceph_osd_backend_address_ranges: "==IPV4_NET_TENANT_PREFIX==.201-==IPV4_NET_TENANT_PREFIX==.203"
- ceph_osd_data_disks: "/dev/vdb"
- ceph_osd_journal_or_block_db_disks: "/dev/vdb"
+ ceph_osd_data_disks: "/dev/vdc"
+ ceph_osd_journal_or_block_db_disks: "/dev/vdd"
ceph_osd_mode: "separated"
ceph_osd_deploy_nic: "eth0"
ceph_osd_primary_first_nic: "eth1"
diff --git a/tcp_tests/templates/heat-cicd-queens-dvr-sl/salt-context-cookiecutter.yaml b/tcp_tests/templates/heat-cicd-queens-dvr-sl/salt-context-cookiecutter.yaml
index 3ca199f..a1b5508 100644
--- a/tcp_tests/templates/heat-cicd-queens-dvr-sl/salt-context-cookiecutter.yaml
+++ b/tcp_tests/templates/heat-cicd-queens-dvr-sl/salt-context-cookiecutter.yaml
@@ -43,13 +43,17 @@
ceph_mon_node02_hostname: cmn02
ceph_mon_node03_address: 10.167.4.68
ceph_mon_node03_hostname: cmn03
+ # Apply changes for ceph from contexts/ceph/nautilus-encrypted-devices.yml
ceph_osd_backend: bluestore
+ ceph_osds_per_device: '1'
+ ceph_osd_data_size: ''
+ ceph_osd_dmcrypt: True
ceph_osd_block_db_size: '3'
ceph_osd_data_partition_prefix: ""
ceph_osd_bond_mode: active-backup
ceph_osd_count: '3'
- ceph_osd_data_disks: "/dev/vdb"
- ceph_osd_journal_or_block_db_disks: "/dev/vdb"
+ ceph_osd_data_disks: "/dev/vdc"
+ ceph_osd_journal_or_block_db_disks: ""
ceph_osd_mode: "separated"
ceph_osd_node_count: '3'
ceph_osd_journal_size: '3'
@@ -60,7 +64,6 @@
ceph_osd_deploy_address_ranges: 10.167.5.70-10.167.5.72
ceph_osd_backend_address_ranges: 10.167.6.201-10.167.6.203
ceph_osd_storage_address_ranges: 10.167.4.201-10.167.4.203
- ceph_public_network: 10.167.4.0/24
ceph_public_network_allocation: storage
ceph_rgw_address: 10.167.4.75
ceph_rgw_hostname: rgw
@@ -70,8 +73,18 @@
ceph_rgw_node02_hostname: rgw02
ceph_rgw_node03_address: 10.167.4.78
ceph_rgw_node03_hostname: rgw03
- ceph_version: "luminous"
+ ceph_version: "nautilus"
ceph_osd_block_partition_prefix: ''
+ # FIXME after deleting Ceph public network
+ ceph_public_network: 10.167.4.0/24
+ ceph_mon_node01_ceph_public_address: 10.167.4.66
+ ceph_mon_node02_ceph_public_address: 10.167.4.67
+ ceph_mon_node03_ceph_public_address: 10.167.4.68
+ ceph_rgw_node01_ceph_public_address: 10.167.4.76
+ ceph_rgw_node02_ceph_public_address: 10.167.4.77
+ ceph_rgw_node03_ceph_public_address: 10.167.4.78
+ ceph_osd_ceph_public_address_ranges: 10.167.4.201-10.167.4.203
+ # -end fixme
cicd_control_node01_address: 10.167.4.91
cicd_control_node01_hostname: cid01
cicd_control_node02_address: 10.167.4.92