Add Ceph Nautilus for released deployments
Related-prod: PROD-34912
Change-Id: I765cd33a6813cd673bd26f03e0b170784e4696d6
diff --git a/tcp_tests/templates/released-heat-cicd-pike-contrail41-sl/salt-context-cookiecutter-contrail.yaml b/tcp_tests/templates/released-heat-cicd-pike-contrail41-sl/salt-context-cookiecutter-contrail.yaml
index 16d5e18..ad9d79b 100644
--- a/tcp_tests/templates/released-heat-cicd-pike-contrail41-sl/salt-context-cookiecutter-contrail.yaml
+++ b/tcp_tests/templates/released-heat-cicd-pike-contrail41-sl/salt-context-cookiecutter-contrail.yaml
@@ -251,15 +251,20 @@
openldap_enabled: 'True'
openldap_organisation: ${_param:cluster_name}
ceph_enabled: 'True'
- ceph_version: "luminous"
+ # Apply setting from contexts/ceph/nautilus-multiple-osd.yml
+ ceph_version: "nautilus"
ceph_hyper_converged: "False"
ceph_osd_backend: "bluestore"
+ ceph_osds_per_device: '1'
+ ceph_osd_data_size: ''
ceph_osd_count: "3"
ceph_osd_node_count: 3
ceph_osd_block_db_size: 3
ceph_osd_journal_size: 3
+ ceph_osd_dmcrypt: False
ceph_osd_bond_mode: "active-backup"
ceph_osd_data_partition_prefix: ""
+ ceph_osd_block_partition_prefix: ""
ceph_public_network_allocation: storage
ceph_public_network: "==IPV4_NET_CONTROL_PREFIX==.0/24"
ceph_cluster_network: "==IPV4_NET_CONTROL_PREFIX==.0/24"
@@ -270,8 +275,8 @@
ceph_osd_storage_address_ranges: "==IPV4_NET_CONTROL_PREFIX==.201-==IPV4_NET_CONTROL_PREFIX==.203"
ceph_osd_backend_address_ranges: "==IPV4_NET_TENANT_PREFIX==.201-==IPV4_NET_TENANT_PREFIX==.203"
- ceph_osd_data_disks: "/dev/vdb"
- ceph_osd_journal_or_block_db_disks: "/dev/vdb"
+ ceph_osd_data_disks: "/dev/vdd"
+ ceph_osd_journal_or_block_db_disks: ""
ceph_osd_mode: "separated"
ceph_osd_deploy_nic: "eth0"
ceph_osd_primary_first_nic: "eth1"
diff --git a/tcp_tests/templates/released-heat-cicd-pike-dvr-sl/salt-context-cookiecutter.yaml b/tcp_tests/templates/released-heat-cicd-pike-dvr-sl/salt-context-cookiecutter.yaml
index e632f83..32a26ed 100644
--- a/tcp_tests/templates/released-heat-cicd-pike-dvr-sl/salt-context-cookiecutter.yaml
+++ b/tcp_tests/templates/released-heat-cicd-pike-dvr-sl/salt-context-cookiecutter.yaml
@@ -43,12 +43,14 @@
ceph_mon_node03_address: 10.167.4.68
ceph_mon_node03_hostname: cmn03
ceph_osd_backend: bluestore
+ ceph_osds_per_device: '3'
ceph_osd_block_db_size: '3'
+ ceph_osd_dmcrypt: False
ceph_osd_bond_mode: active-backup
ceph_osd_data_partition_prefix: ""
ceph_osd_count: '3'
- ceph_osd_data_disks: "/dev/vdb"
- ceph_osd_journal_or_block_db_disks: "/dev/vdb"
+ ceph_osd_data_disks: "/dev/vdd"
+ ceph_osd_journal_or_block_db_disks: "/dev/vde"
ceph_osd_node_count: '3'
ceph_osd_journal_size: '3'
ceph_osd_deploy_nic: "eth0"
diff --git a/tcp_tests/templates/released-heat-cicd-queens-dvr-sl/salt-context-cookiecutter.yaml b/tcp_tests/templates/released-heat-cicd-queens-dvr-sl/salt-context-cookiecutter.yaml
index d729d43..18d7a6b 100644
--- a/tcp_tests/templates/released-heat-cicd-queens-dvr-sl/salt-context-cookiecutter.yaml
+++ b/tcp_tests/templates/released-heat-cicd-queens-dvr-sl/salt-context-cookiecutter.yaml
@@ -1,3 +1,4 @@
+#Ceph Nautilus multiple osd
default_context:
jenkins_cfg_admin_password: r00tme
auditd_enabled: 'False'
@@ -43,13 +44,17 @@
ceph_mon_node02_hostname: cmn02
ceph_mon_node03_address: 10.167.4.68
ceph_mon_node03_hostname: cmn03
+ # Apply changes for ceph from contexts/ceph/nautilus-encrypted-devices.yml
ceph_osd_backend: bluestore
+ ceph_osds_per_device: '3'
+ ceph_osd_data_size: '14'
+ ceph_osd_dmcrypt: True
ceph_osd_block_db_size: '3'
ceph_osd_data_partition_prefix: ""
ceph_osd_bond_mode: active-backup
ceph_osd_count: '3'
- ceph_osd_data_disks: "/dev/vdb"
- ceph_osd_journal_or_block_db_disks: "/dev/vdb"
+ ceph_osd_data_disks: "/dev/vdd"
+ ceph_osd_journal_or_block_db_disks: "/dev/vde"
ceph_osd_mode: "separated"
ceph_osd_node_count: '3'
ceph_osd_journal_size: '3'
@@ -70,7 +75,8 @@
ceph_rgw_node02_hostname: rgw02
ceph_rgw_node03_address: 10.167.4.78
ceph_rgw_node03_hostname: rgw03
- ceph_version: luminous
+ ceph_version: "nautilus"
+ ceph_osd_block_partition_prefix: ''
cicd_control_node01_address: 10.167.4.91
cicd_control_node01_hostname: cid01
cicd_control_node02_address: 10.167.4.92