Migrate ceph pike to cicd
* Fix node hostnamesin underlay
* Fix context
* Add cid nodes
* delete depricated yaml files for deploy
* align generated/file
* migrate to cid usage
* add LMA
Change-Id: I34f889bea6b8094f692d441b32f057da5763cd7e
diff --git a/tcp_tests/templates/cookied-mcp-pike-dvr-ceph/cookiecutter-context-dvr-ceph.yaml b/tcp_tests/templates/cookied-cicd-pike-dvr-ceph/cookiecutter-context-dvr-ceph.yaml
similarity index 67%
rename from tcp_tests/templates/cookied-mcp-pike-dvr-ceph/cookiecutter-context-dvr-ceph.yaml
rename to tcp_tests/templates/cookied-cicd-pike-dvr-ceph/cookiecutter-context-dvr-ceph.yaml
index 22e4442..672e74b 100644
--- a/tcp_tests/templates/cookied-mcp-pike-dvr-ceph/cookiecutter-context-dvr-ceph.yaml
+++ b/tcp_tests/templates/cookied-cicd-pike-dvr-ceph/cookiecutter-context-dvr-ceph.yaml
@@ -68,9 +68,46 @@
ceph_rgw_node03_address: 10.167.4.78
ceph_rgw_node03_hostname: rgw03
ceph_version: luminous
- cicd_enabled: 'False'
- cluster_domain: cookied-mcp-pike-dvr-ceph.local
- cluster_name: cookied-mcp-pike-dvr-ceph
+ cicd_control_node01_address: 10.167.4.91
+ cicd_control_node01_hostname: cid01
+ cicd_control_node02_address: 10.167.4.92
+ cicd_control_node02_hostname: cid02
+ cicd_control_node03_address: 10.167.4.93
+ cicd_control_node03_hostname: cid03
+ cicd_control_vip_address: 10.167.4.90
+ cicd_control_vip_hostname: cid
+ cicd_enabled: 'True'
+ cicd_private_key: |-
+ -----BEGIN RSA PRIVATE KEY-----
+ MIIEpAIBAAKCAQEAv64AnFbEuuOQHLlmMkmaZ+Hh/8hJ+VfFpJ/MzW1wWzYyhis7
+ 3A8rxNFWJ/I1/LJSsFI8qU0DpxjFjS9LMTTFXhDPPpuzgRLwfVusEmuQdXjOiT34
+ AIs07Q4w1nlvJ2+/l788ie1AEfnewd9erUHOs8Wt/PT3OOM/0ikY7EibvYF4L1Lb
+ xGRKYnUkY7G3eal9XcQpsTzAFRXoK3WafbCFBFsfzEWOhx1T+tn1SwaxPYJDt1OB
+ B1s77enFtBwbmbd0m1F1ufSXmdWea2xF3+5caS6tps/hwhCoOSJUQb7+dK4ri8og
+ q2YIhfEptrMP1R+nVqEY76P31aa/YSw4zOvcQwIDAQABAoIBAQCLKOzQlD4n4ObT
+ s9Z6U+2B1gCaDvOFzy9yoYGy8u1Li0GLHwBKd8kzDzgZsEN5vo1B7bKUx5ELU3S5
+ V8ijZMiVzmZn8eqUnwdyO4flp6otXxOzmAXhfy9hm5fhXjBQ1VSn+vMcv95wLpSG
+ 9IBsEQbchXwX1lFWP8Yp8iRiByTqoz6A7qSxRzIOtq1/coYS9Vcy7VZDMiUjqvuc
+ pYvwYHvrgeYqxLXyDRzbZX1BbkSoNI/5VwxLb9IMG901IXph0r4V3uVgnnq+Xzkk
+ MoOfmB3cyOrvtWblZAjkyA+jzTs/QNALRUeI7wUeh4FvlwEGHE6v5G4G28zOS0vL
+ 7IEhCqThAoGBAOeyDO07b060l+NOO+Jkv+NV31VD0w3S4TMyLPVSxXsrRPoHM9RM
+ udi6lewmALE4wk2Lc1Il6n0UrUGVbXxf55NJp2BQoSic+ZK2nTki0cZ/CkUDVNwY
+ R0WtWE0i3J+eF3e8j9VYm1mIlv0aDoYeH4qCp5is/JanvLy4MUl6tM7/AoGBANPJ
+ XheDO5lmqq1ejDTo3GAzYuAs44dQLDs0znEuuaUKZ4MKgQ4ax0L9n0MxvsuUGVcN
+ Nm7fZS4uMY3zLCOLcAXyD1jXY210gmOgFdXeYrH+2kSmqfflV8KHOLCatxLzRtbe
+ KBflcrEnrpUVNGKlpZaYr+4AyapXeMuXIxwveva9AoGAYtoDS9/UwHaqau+A+zlS
+ 6TJFA8LZNAepz0b0CYLUAJXYavhRs508mWwZ9NPN7c6yj5UUkZLdtZnxxY50VOEy
+ ExQUljIwX/yBOogxEiR57b9b6U/fj7vIBMFNcDOUf4Far9pCX5rbRNrS2I+abLxD
+ ZrwRt0Duz3QnQTkwxhHVPI8CgYAaIjQJJLl7AW84O32DneRrvouJ7CAbd2ot2CNN
+ Vh20XudNBUPNkMJb4t3/Nak8h8bktg2sesaKf0rAIGym6jLlmOwJ43IydHkOgBeR
+ r4JwQml+pS4+F7/Pkk4NhNnobbqlEv7RjA+uCp6BaP9w2M3pGmhDLzezXF3ciYbc
+ mINM5QKBgQCyM9ZWwSiA0D3oitnhs7C4eC0IHBfnSoa7f40osKm4VvmqKBFgRu8L
+ qYK9qX++pUm4sk0q7poGUscc1udMlejAkfc/HLIlUi6MM+S7ZQ2NHtnZ7COZa5O4
+ 9fG8FTiigLvMHka9ihYXtPbyGvusCaqyHp3D9VyOT+WsyM5eJe40lA==
+ -----END RSA PRIVATE KEY-----
+ cicd_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC/rgCcVsS645AcuWYySZpn4eH/yEn5V8Wkn8zNbXBbNjKGKzvcDyvE0VYn8jX8slKwUjypTQOnGMWNL0sxNMVeEM8+m7OBEvB9W6wSa5B1eM6JPfgAizTtDjDWeW8nb7+XvzyJ7UAR+d7B316tQc6zxa389Pc44z/SKRjsSJu9gXgvUtvEZEpidSRjsbd5qX1dxCmxPMAVFegrdZp9sIUEWx/MRY6HHVP62fVLBrE9gkO3U4EHWzvt6cW0HBuZt3SbUXW59JeZ1Z5rbEXf7lxpLq2mz+HCEKg5IlRBvv50riuLyiCrZgiF8Sm2sw/VH6dWoRjvo/fVpr9hLDjM69xD
+ cluster_domain: cookied-cicd-pike-dvr-ceph.local
+ cluster_name: cookied-cicd-pike-dvr-ceph
compute_bond_mode: active-backup
compute_padding_with_zeros: 'False'
compute_primary_first_nic: eth1
@@ -88,7 +125,7 @@
deployment_type: physical
dns_server01: 172.18.176.6
dns_server02: 172.18.208.44
- email_address: obutenko@mirantis.com
+ email_address: test@mirantis.com
gainsight_service_enabled: 'False'
gateway_primary_first_nic: eth1
gateway_primary_second_nic: eth2
@@ -125,17 +162,20 @@
nova_vnc_tls_enabled: 'True'
offline_deployment: 'False'
opencontrail_enabled: 'False'
- openssh_groups: ''
+ openldap_domain: ${_param:cluster_name}.local
+ openldap_enabled: 'True'
+ openldap_organisation: ${_param:cluster_name}
+ openssh_groups: cicd
openstack_benchmark_node01_address: 10.167.4.95
openstack_benchmark_node01_hostname: bmk01
openstack_cluster_size: compact
openstack_compute_count: '2'
openstack_compute_rack01_hostname: cmp
- openstack_compute_rack01_single_subnet: 172.16.10
- openstack_compute_rack01_tenant_subnet: 10.1.0
- openstack_compute_single_address_ranges: 172.16.10.105-172.16.10.106
- openstack_compute_deploy_address_ranges: 192.168.10.105-192.168.10.106
- openstack_compute_tenant_address_ranges: 10.1.0.105-10.1.0.106
+ openstack_compute_rack01_single_subnet: 10.167.4
+ openstack_compute_rack01_tenant_subnet: 10.167.6
+ openstack_compute_single_address_ranges: 10.167.4.105-10.167.4.106
+ openstack_compute_deploy_address_ranges: 10.167.5.105-10.167.5.106
+ openstack_compute_tenant_address_ranges: 10.167.6.105-10.167.6.106
openstack_control_address: 10.167.4.10
openstack_control_hostname: ctl
openstack_control_node01_address: 10.167.4.11
@@ -203,23 +243,49 @@
shared_reclass_branch: 'proposed'
shared_reclass_url: https://github.com/Mirantis/reclass-system-salt-model.git
sriov_network_subnet: 10.55.0.0/16
- stacklight_enabled: 'False'
- stacklight_version: '2'
static_ips_on_deploy_network_enabled: 'False'
tenant_network_gateway: 10.167.6.1
tenant_network_netmask: 255.255.255.0
tenant_network_subnet: 10.167.6.0/24
- tenant_telemetry_enabled: 'True'
+ tenant_telemetry_enabled: 'False'
tenant_vlan: '20'
upstream_proxy_enabled: 'False'
use_default_network_scheme: 'True'
version: proposed
vnf_onboarding_enabled: 'False'
- openstack_telemetry_address: 172.16.10.83
+ openstack_telemetry_address: 10.167.4.83
openstack_telemetry_hostname: mdb
- openstack_telemetry_node01_address: 172.16.10.84
+ openstack_telemetry_node01_address: 10.167.4.84
openstack_telemetry_node01_hostname: mdb01
- openstack_telemetry_node02_address: 172.16.10.85
+ openstack_telemetry_node02_address: 10.167.4.85
openstack_telemetry_node02_hostname: mdb02
- openstack_telemetry_node03_address: 172.16.10.86
- openstack_telemetry_node03_hostname: mdb03
\ No newline at end of file
+ openstack_telemetry_node03_address: 10.167.4.86
+ openstack_telemetry_node03_hostname: mdb03
+ fluentd_enabled: 'True'
+ stacklight_enabled: 'True'
+ stacklight_log_address: 10.167.4.60
+ stacklight_log_hostname: log
+ stacklight_log_node01_address: 10.167.4.61
+ stacklight_log_node01_hostname: log01
+ stacklight_log_node02_address: 10.167.4.62
+ stacklight_log_node02_hostname: log02
+ stacklight_log_node03_address: 10.167.4.63
+ stacklight_log_node03_hostname: log03
+ stacklight_monitor_address: 10.167.4.70
+ stacklight_monitor_hostname: mon
+ stacklight_monitor_node01_address: 10.167.4.71
+ stacklight_monitor_node01_hostname: mon01
+ stacklight_monitor_node02_address: 10.167.4.72
+ stacklight_monitor_node02_hostname: mon02
+ stacklight_monitor_node03_address: 10.167.4.73
+ stacklight_monitor_node03_hostname: mon03
+ stacklight_telemetry_address: 10.167.4.50
+ stacklight_telemetry_hostname: mtr
+ stacklight_telemetry_node01_address: 10.167.4.51
+ stacklight_telemetry_node01_hostname: mtr01
+ stacklight_telemetry_node02_address: 10.167.4.52
+ stacklight_telemetry_node02_hostname: mtr02
+ stacklight_telemetry_node03_address: 10.167.4.53
+ stacklight_telemetry_node03_hostname: mtr03
+ stacklight_version: '2'
+ stacklight_long_term_storage_type: prometheus
\ No newline at end of file
diff --git a/tcp_tests/templates/cookied-cicd-pike-dvr-ceph/environment_context.yaml b/tcp_tests/templates/cookied-cicd-pike-dvr-ceph/environment_context.yaml
new file mode 100644
index 0000000..339956e
--- /dev/null
+++ b/tcp_tests/templates/cookied-cicd-pike-dvr-ceph/environment_context.yaml
@@ -0,0 +1,308 @@
+nodes:
+ cfg01:
+ reclass_storage_name: infra_config_node01
+ roles:
+ - infra_config
+ - linux_system_codename_xenial
+ classes:
+ - environment.cookied-cicd-pike-dvr-ceph.override_ntp_virtual
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_static_ctl
+
+ cid01:
+ reclass_storage_name: cicd_control_node01
+ roles:
+ - cicd_control_leader
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ cid02:
+ reclass_storage_name: cicd_control_node02
+ roles:
+ - cicd_control_manager
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ cid03:
+ reclass_storage_name: cicd_control_node03
+ roles:
+ - cicd_control_manager
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ ctl01:
+ reclass_storage_name: openstack_control_node01
+ roles:
+ - infra_kvm
+ - openstack_control_leader
+ - openstack_database_leader
+ - openstack_message_queue
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ ctl02:
+ reclass_storage_name: openstack_control_node02
+ roles:
+ - infra_kvm
+ - openstack_control
+ - openstack_database
+ - openstack_message_queue
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ ctl03:
+ reclass_storage_name: openstack_control_node03
+ roles:
+ - infra_kvm
+ - openstack_control
+ - openstack_database
+ - openstack_message_queue
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ prx01:
+ reclass_storage_name: openstack_proxy_node01
+ roles:
+ - openstack_proxy
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ # Generator-based computes. For compatibility only
+ cmp<<count>>:
+ reclass_storage_name: openstack_compute_rack01
+ roles:
+ - openstack_compute
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+ ens5:
+ role: bond0_ab_ovs_vxlan_mesh
+ ens6:
+ role: bond1_ab_ovs_floating
+
+ gtw01:
+ reclass_storage_name: openstack_gateway_node01
+ roles:
+ - openstack_gateway
+ - linux_system_codename_xenial
+ classes:
+ - system.linux.system.repo.mcp.apt_mirantis.docker
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+ ens5:
+ role: bond0_ab_ovs_vxlan_mesh
+ ens6:
+ role: bond1_ab_ovs_floating
+
+ osd<<count>>:
+ reclass_storage_name: ceph_osd_rack01
+ roles:
+ - ceph_osd
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ cmn01:
+ reclass_storage_name: ceph_mon_node01
+ roles:
+ - ceph_mon
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ cmn02:
+ reclass_storage_name: ceph_mon_node02
+ roles:
+ - ceph_mon
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ cmn03:
+ reclass_storage_name: ceph_mon_node03
+ roles:
+ - ceph_mon
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ rgw01:
+ reclass_storage_name: ceph_rgw_node01
+ roles:
+ - ceph_rgw
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ rgw02:
+ reclass_storage_name: ceph_rgw_node02
+ roles:
+ - ceph_rgw
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ rgw03:
+ reclass_storage_name: ceph_rgw_node03
+ roles:
+ - ceph_rgw
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ mon01:
+ reclass_storage_name: stacklight_server_node01
+ roles:
+ - stacklightv2_server_leader
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ mon02:
+ reclass_storage_name: stacklight_server_node02
+ roles:
+ - stacklightv2_server
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ mon03:
+ reclass_storage_name: stacklight_server_node03
+ roles:
+ - stacklightv2_server
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ log01:
+ reclass_storage_name: stacklight_log_node01
+ roles:
+ - stacklight_log_leader_v2
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ log02:
+ reclass_storage_name: stacklight_log_node02
+ roles:
+ - stacklight_log
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ log03:
+ reclass_storage_name: stacklight_log_node03
+ roles:
+ - stacklight_log
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ mtr01:
+ reclass_storage_name: stacklight_telemetry_node01
+ roles:
+ - stacklight_telemetry_leader
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ mtr02:
+ reclass_storage_name: stacklight_telemetry_node02
+ roles:
+ - stacklight_telemetry
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ mtr03:
+ reclass_storage_name: stacklight_telemetry_node03
+ roles:
+ - stacklight_telemetry
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
diff --git a/tcp_tests/templates/cookied-cicd-pike-dvr-ceph/salt.yaml b/tcp_tests/templates/cookied-cicd-pike-dvr-ceph/salt.yaml
new file mode 100644
index 0000000..4b9c68c
--- /dev/null
+++ b/tcp_tests/templates/cookied-cicd-pike-dvr-ceph/salt.yaml
@@ -0,0 +1,14 @@
+{% from 'cookied-cicd-pike-dvr-ceph/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% from 'cookied-cicd-pike-dvr-ceph/underlay.yaml' import LAB_CONFIG_NAME with context %}
+{% from 'cookied-cicd-pike-dvr-ceph/underlay.yaml' import DOMAIN_NAME with context %}
+
+{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.com/salt-models/mcp-virtual-lab') %}
+# Other salt model repository parameters see in shared-salt.yaml
+
+{% import 'shared-salt.yaml' as SHARED with context %}
+
+{{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
+
+{{SHARED.MACRO_CHECK_SALT_VERSION_SERVICES_ON_CFG()}}
+
+{{SHARED.MACRO_CHECK_SALT_VERSION_ON_NODES()}}
diff --git a/tcp_tests/templates/cookied-mcp-pike-dvr-ceph/underlay--meta-data.yaml b/tcp_tests/templates/cookied-cicd-pike-dvr-ceph/underlay--meta-data.yaml
similarity index 100%
rename from tcp_tests/templates/cookied-mcp-pike-dvr-ceph/underlay--meta-data.yaml
rename to tcp_tests/templates/cookied-cicd-pike-dvr-ceph/underlay--meta-data.yaml
diff --git a/tcp_tests/templates/cookied-mcp-pike-dvr-ceph/underlay--user-data1604.yaml b/tcp_tests/templates/cookied-cicd-pike-dvr-ceph/underlay--user-data1604.yaml
similarity index 82%
rename from tcp_tests/templates/cookied-mcp-pike-dvr-ceph/underlay--user-data1604.yaml
rename to tcp_tests/templates/cookied-cicd-pike-dvr-ceph/underlay--user-data1604.yaml
index 979424f..edc0343 100644
--- a/tcp_tests/templates/cookied-mcp-pike-dvr-ceph/underlay--user-data1604.yaml
+++ b/tcp_tests/templates/cookied-cicd-pike-dvr-ceph/underlay--user-data1604.yaml
@@ -31,18 +31,27 @@
- sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
- sudo resolvconf -u
+ # Enable grub menu using updated config below
+ - update-grub
+
# Prepare network connection
- sudo ifup ens3
#- sudo route add default gw {gateway} {interface_name}
# Create swap
- - fallocate -l 4G /swapfile
+ - fallocate -l 16G /swapfile
- chmod 600 /swapfile
- mkswap /swapfile
- swapon /swapfile
- echo "/swapfile none swap defaults 0 0" >> /etc/fstab
write_files:
+ - path: /etc/default/grub.d/97-enable-grub-menu.cfg
+ content: |
+ GRUB_RECORDFAIL_TIMEOUT=30
+ GRUB_TIMEOUT=3
+ GRUB_TIMEOUT_STYLE=menu
+
- path: /etc/network/interfaces
content: |
auto ens3
diff --git a/tcp_tests/templates/cookied-mcp-pike-dvr-ceph/underlay.yaml b/tcp_tests/templates/cookied-cicd-pike-dvr-ceph/underlay.yaml
similarity index 67%
rename from tcp_tests/templates/cookied-mcp-pike-dvr-ceph/underlay.yaml
rename to tcp_tests/templates/cookied-cicd-pike-dvr-ceph/underlay.yaml
index b36f8be..45d4a10 100644
--- a/tcp_tests/templates/cookied-mcp-pike-dvr-ceph/underlay.yaml
+++ b/tcp_tests/templates/cookied-cicd-pike-dvr-ceph/underlay.yaml
@@ -1,41 +1,48 @@
{% set REPOSITORY_SUITE = os_env('REPOSITORY_SUITE', 'proposed') %}
-{% import 'cookied-mcp-pike-dvr-ceph/underlay--meta-data.yaml' as CLOUDINIT_META_DATA with context %}
-{% import 'cookied-mcp-pike-dvr-ceph/underlay--user-data-cfg01.yaml' as CLOUDINIT_USER_DATA_CFG01 with context %}
-{% import 'cookied-mcp-pike-dvr-ceph/underlay--user-data1604.yaml' as CLOUDINIT_USER_DATA_1604 with context %}
+{% import 'cookied-cicd-pike-dvr-ceph/underlay--meta-data.yaml' as CLOUDINIT_META_DATA with context %}
+{% import 'cookied-cicd-pike-dvr-ceph/underlay--user-data1604.yaml' as CLOUDINIT_USER_DATA_1604 with context %}
---
aliases:
- &interface_model {{ os_env('INTERFACE_MODEL', 'virtio') }}
- &cloudinit_meta_data {{ CLOUDINIT_META_DATA }}
- - &cloudinit_user_data_cfg01 {{ CLOUDINIT_USER_DATA_CFG01 }}
- &cloudinit_user_data_1604 {{ CLOUDINIT_USER_DATA_1604 }}
-{% set LAB_CONFIG_NAME = os_env('LAB_CONFIG_NAME', 'cookied-mcp-pike-dvr-ceph') %}
+{% set LAB_CONFIG_NAME = os_env('LAB_CONFIG_NAME', 'cookied-cicd-pike-dvr-ceph') %}
{% set DOMAIN_NAME = os_env('DOMAIN_NAME', LAB_CONFIG_NAME + '.local') %}
{% set HOSTNAME_CFG01 = os_env('HOSTNAME_CFG01', 'cfg01.' + DOMAIN_NAME) %}
{% set HOSTNAME_CTL01 = os_env('HOSTNAME_CTL01', 'ctl01.' + DOMAIN_NAME) %}
{% set HOSTNAME_CTL02 = os_env('HOSTNAME_CTL02', 'ctl02.' + DOMAIN_NAME) %}
{% set HOSTNAME_CTL03 = os_env('HOSTNAME_CTL03', 'ctl03.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CID01 = os_env('HOSTNAME_CID01', 'cid01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CID02 = os_env('HOSTNAME_CID02', 'cid02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CID03 = os_env('HOSTNAME_CID03', 'cid03.' + DOMAIN_NAME) %}
{% set HOSTNAME_CMP01 = os_env('HOSTNAME_CMP01', 'cmp1.' + DOMAIN_NAME) %}
{% set HOSTNAME_CMP02 = os_env('HOSTNAME_CMP02', 'cmp2.' + DOMAIN_NAME) %}
{% set HOSTNAME_CMN01 = os_env('HOSTNAME_CMN01', 'cmn01.' + DOMAIN_NAME) %}
{% set HOSTNAME_CMN02 = os_env('HOSTNAME_CMN02', 'cmn02.' + DOMAIN_NAME) %}
{% set HOSTNAME_CMN03 = os_env('HOSTNAME_CMN03', 'cmn03.' + DOMAIN_NAME) %}
-{% set HOSTNAME_RGW01 = os_env('HOSTNAME_CMN01', 'rgw01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_RGW02 = os_env('HOSTNAME_CMN02', 'rgw02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_RGW03 = os_env('HOSTNAME_CMN03', 'rgw03.' + DOMAIN_NAME) %}
+{% set HOSTNAME_RGW01 = os_env('HOSTNAME_RGW01', 'rgw01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_RGW02 = os_env('HOSTNAME_RGW02', 'rgw02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_RGW03 = os_env('HOSTNAME_RGW03', 'rgw03.' + DOMAIN_NAME) %}
{% set HOSTNAME_OSD01 = os_env('HOSTNAME_OSD01', 'osd1.' + DOMAIN_NAME) %}
{% set HOSTNAME_OSD02 = os_env('HOSTNAME_OSD02', 'osd2.' + DOMAIN_NAME) %}
{% set HOSTNAME_GTW01 = os_env('HOSTNAME_GTW01', 'gtw01.' + DOMAIN_NAME) %}
{% set HOSTNAME_PRX01 = os_env('HOSTNAME_PRX01', 'prx01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_MDB01 = os_env('HOSTNAME_MDB01', 'mdb01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_MDB02 = os_env('HOSTNAME_MDB02', 'mdb02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_MDB03 = os_env('HOSTNAME_MDB03', 'mdb03.' + DOMAIN_NAME) %}
+{% set HOSTNAME_MON01 = os_env('HOSTNAME_MON01', 'mon01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_MON02 = os_env('HOSTNAME_MON02', 'mon02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_MON03 = os_env('HOSTNAME_MON03', 'mon03.' + DOMAIN_NAME) %}
+{% set HOSTNAME_LOG01 = os_env('HOSTNAME_LOG01', 'log01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_LOG02 = os_env('HOSTNAME_LOG02', 'log02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_LOG03 = os_env('HOSTNAME_LOG03', 'log03.' + DOMAIN_NAME) %}
+{% set HOSTNAME_MTR01 = os_env('HOSTNAME_MTR01', 'mtr01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_MTR02 = os_env('HOSTNAME_MTR02', 'mtr02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_MTR03 = os_env('HOSTNAME_MTR03', 'mtr03.' + DOMAIN_NAME) %}
template:
devops_settings:
- env_name: {{ os_env('ENV_NAME', 'cookied-mcp-pike-dvr-ceph_' + REPOSITORY_SUITE + "_" + os_env('BUILD_NUMBER', '')) }}
+ env_name: {{ os_env('ENV_NAME', 'cookied-cicd-pike-dvr-ceph_' + REPOSITORY_SUITE + "_" + os_env('BUILD_NUMBER', '')) }}
address_pools:
private-pool01:
@@ -50,6 +57,10 @@
default_{{ HOSTNAME_CTL03 }}: +13
default_{{ HOSTNAME_CMP01 }}: +105
default_{{ HOSTNAME_CMP02 }}: +106
+ default_{{ HOSTNAME_CID }}: +90
+ default_{{ HOSTNAME_CID01 }}: +91
+ default_{{ HOSTNAME_CID02 }}: +92
+ default_{{ HOSTNAME_CID03 }}: +93
default_{{ HOSTNAME_OSD01 }}: +94
default_{{ HOSTNAME_OSD02 }}: +95
default_{{ HOSTNAME_CMN01 }}: +96
@@ -60,11 +71,17 @@
default_{{ HOSTNAME_RGW03 }}: +78
default_{{ HOSTNAME_GTW01 }}: +110
default_{{ HOSTNAME_PRX01 }}: +121
- default_{{ HOSTNAME_MDB01 }}: +84
- default_{{ HOSTNAME_MDB02 }}: +85
- default_{{ HOSTNAME_MDB03 }}: +86
+ default_{{ HOSTNAME_MON01 }}: +71
+ default_{{ HOSTNAME_MON02 }}: +72
+ default_{{ HOSTNAME_MON03 }}: +73
+ default_{{ HOSTNAME_LOG01 }}: +61
+ default_{{ HOSTNAME_LOG02 }}: +62
+ default_{{ HOSTNAME_LOG03 }}: +63
+ default_{{ HOSTNAME_MTR01 }}: +51
+ default_{{ HOSTNAME_MTR02 }}: +52
+ default_{{ HOSTNAME_MTR03 }}: +53
ip_ranges:
- dhcp: [+70, -10]
+ dhcp: [+10, -10]
admin-pool01:
net: {{ os_env('ADMIN_ADDRESS_POOL01', '10.70.0.0/16:24') }}
@@ -78,6 +95,10 @@
default_{{ HOSTNAME_CTL03 }}: +13
default_{{ HOSTNAME_CMP01 }}: +105
default_{{ HOSTNAME_CMP02 }}: +106
+ default_{{ HOSTNAME_CID }}: +90
+ default_{{ HOSTNAME_CID01 }}: +91
+ default_{{ HOSTNAME_CID02 }}: +92
+ default_{{ HOSTNAME_CID03 }}: +93
default_{{ HOSTNAME_OSD01 }}: +94
default_{{ HOSTNAME_OSD02 }}: +95
default_{{ HOSTNAME_CMN01 }}: +96
@@ -88,9 +109,15 @@
default_{{ HOSTNAME_RGW03 }}: +78
default_{{ HOSTNAME_GTW01 }}: +110
default_{{ HOSTNAME_PRX01 }}: +121
- default_{{ HOSTNAME_MDB01 }}: +84
- default_{{ HOSTNAME_MDB02 }}: +85
- default_{{ HOSTNAME_MDB03 }}: +86
+ default_{{ HOSTNAME_MON01 }}: +71
+ default_{{ HOSTNAME_MON02 }}: +72
+ default_{{ HOSTNAME_MON03 }}: +73
+ default_{{ HOSTNAME_LOG01 }}: +61
+ default_{{ HOSTNAME_LOG02 }}: +62
+ default_{{ HOSTNAME_LOG03 }}: +63
+ default_{{ HOSTNAME_MTR01 }}: +51
+ default_{{ HOSTNAME_MTR02 }}: +52
+ default_{{ HOSTNAME_MTR03 }}: +53
ip_ranges:
dhcp: [+70, -10]
@@ -116,9 +143,15 @@
default_{{ HOSTNAME_RGW03 }}: +78
default_{{ HOSTNAME_GTW01 }}: +110
default_{{ HOSTNAME_PRX01 }}: +121
- default_{{ HOSTNAME_MDB01 }}: +84
- default_{{ HOSTNAME_MDB02 }}: +85
- default_{{ HOSTNAME_MDB03 }}: +86
+ default_{{ HOSTNAME_MON01 }}: +71
+ default_{{ HOSTNAME_MON02 }}: +72
+ default_{{ HOSTNAME_MON03 }}: +73
+ default_{{ HOSTNAME_LOG01 }}: +61
+ default_{{ HOSTNAME_LOG02 }}: +62
+ default_{{ HOSTNAME_LOG03 }}: +63
+ default_{{ HOSTNAME_MTR01 }}: +51
+ default_{{ HOSTNAME_MTR02 }}: +52
+ default_{{ HOSTNAME_MTR03 }}: +53
ip_ranges:
dhcp: [+10, -10]
@@ -144,9 +177,15 @@
default_{{ HOSTNAME_RGW03 }}: +78
default_{{ HOSTNAME_GTW01 }}: +110
default_{{ HOSTNAME_PRX01 }}: +121
- default_{{ HOSTNAME_MDB01 }}: +84
- default_{{ HOSTNAME_MDB02 }}: +85
- default_{{ HOSTNAME_MDB03 }}: +86
+ default_{{ HOSTNAME_MON01 }}: +71
+ default_{{ HOSTNAME_MON02 }}: +72
+ default_{{ HOSTNAME_MON03 }}: +73
+ default_{{ HOSTNAME_LOG01 }}: +61
+ default_{{ HOSTNAME_LOG02 }}: +62
+ default_{{ HOSTNAME_LOG03 }}: +63
+ default_{{ HOSTNAME_MTR01 }}: +51
+ default_{{ HOSTNAME_MTR02 }}: +52
+ default_{{ HOSTNAME_MTR03 }}: +53
ip_ranges:
dhcp: [+130, +230]
@@ -156,6 +195,7 @@
name: devops.driver.libvirt
params:
connection_string: !os_env CONNECTION_STRING, qemu:///system
+ storage_pool_name: !os_env STORAGE_POOL_NAME, default
stp: False
hpet: False
enable_acpi: true
@@ -169,34 +209,34 @@
external: external-pool01
l2_network_devices:
- private:
- address_pool: private-pool01
- dhcp: false
- forward:
- mode: route
-
admin:
address_pool: admin-pool01
dhcp: true
forward:
mode: nat
+ private:
+ address_pool: private-pool01
+ dhcp: false
+ forward:
+ mode: route
+
tenant:
address_pool: tenant-pool01
dhcp: false
external:
address_pool: external-pool01
- dhcp: true
+ dhcp: false
forward:
mode: route
group_volumes:
- name: cloudimage1604 # This name is used for 'backing_store' option for node volumes.
- source_image: !os_env IMAGE_PATH1604 # https://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-amd64-disk1.img
+ source_image: {{ os_env('MCP_IMAGE_PATH1604') }} # https://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-amd64-disk1.img
format: qcow2
- name: cfg01_day01_image # Pre-configured day01 image
- source_image: {{ os_env('IMAGE_PATH_CFG01_DAY01', os_env('IMAGE_PATH1604')) }} # http://images.mirantis.com/cfg01-day01.qcow2 or fallback to IMAGE_PATH1604
+ source_image: {{ os_env('IMAGE_PATH_CFG01_DAY01') }} # http://images.mirantis.com/cfg01-day01.qcow2 or fallback to IMAGE_PATH1604
format: qcow2
- name: mcp_ubuntu_1604_image # Pre-configured image for control plane
source_image: !os_env MCP_IMAGE_PATH1604
@@ -206,25 +246,20 @@
- name: {{ HOSTNAME_CFG01 }}
role: salt_master
params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 8192
+ vcpu: {{ os_env('CFG_NODE_CPU', 3) }}
+ memory: {{ os_env('CFG_NODE_MEMORY', 8192) }}
boot:
- hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
volumes:
- name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
+ capacity: {{ os_env('CFG_NODE_VOLUME_SIZE', 150) }}
backing_store: cfg01_day01_image
format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
+ - name: config
capacity: 1
format: raw
device: cdrom
bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_cfg01
interfaces:
- label: ens3
@@ -245,7 +280,7 @@
role: salt_minion
params:
vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 16384
+ memory: !os_env SLAVE_NODE_MEMORY, 6144
boot:
- hd
cloud_init_volume_name: iso
@@ -283,7 +318,7 @@
role: salt_minion
params:
vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 16384
+ memory: !os_env SLAVE_NODE_MEMORY, 6144
boot:
- hd
cloud_init_volume_name: iso
@@ -309,7 +344,7 @@
role: salt_minion
params:
vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 16384
+ memory: !os_env SLAVE_NODE_MEMORY, 6144
boot:
- hd
cloud_init_volume_name: iso
@@ -331,93 +366,6 @@
interfaces: *interfaces
network_config: *network_config
- - name: {{ HOSTNAME_MDB01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 8192
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_MDB02 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 8192
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_MDB03 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 8192
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: mcp_ubuntu_1604_image
- format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- name: {{ HOSTNAME_CMN01 }}
role: salt_minion
params:
@@ -744,7 +692,7 @@
role: salt_minion
params:
vcpu: !os_env SLAVE_NODE_CPU, 4
- memory: !os_env SLAVE_NODE_MEMORY, 4096
+ memory: !os_env SLAVE_NODE_MEMORY, 2048
boot:
- hd
cloud_init_volume_name: iso
@@ -765,3 +713,315 @@
interfaces: *all_interfaces
network_config: *all_network_config
+
+ - name: {{ HOSTNAME_CID01 }}
+ role: salt_minion
+ params:
+ vcpu: {{ os_env('CID_NODE_CPU', 3) }}
+ memory: {{ os_env('CID_NODE_MEMORY', 6144) }}
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: {{ os_env('CID_NODE_VOLUME_SIZE', 150) }}
+ backing_store: mcp_ubuntu_1604_image
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *interfaces
+ network_config: *network_config
+
+ - name: {{ HOSTNAME_CID02 }}
+ role: salt_minion
+ params:
+ vcpu: {{ os_env('CID_NODE_CPU', 3) }}
+ memory: {{ os_env('CID_NODE_MEMORY', 6144) }}
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: {{ os_env('CID_NODE_VOLUME_SIZE', 150) }}
+ backing_store: mcp_ubuntu_1604_image
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *interfaces
+ network_config: *network_config
+
+ - name: {{ HOSTNAME_CID03 }}
+ role: salt_minion
+ params:
+ vcpu: {{ os_env('CID_NODE_CPU', 3) }}
+ memory: {{ os_env('CID_NODE_MEMORY', 6144) }}
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: {{ os_env('CID_NODE_VOLUME_SIZE', 150) }}
+ backing_store: mcp_ubuntu_1604_image
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *interfaces
+ network_config: *network_config
+
+ - name: {{ HOSTNAME_MON01 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 2
+ memory: !os_env SLAVE_NODE_MEMORY, 2048
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: mcp_ubuntu_1604_image
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *interfaces
+ network_config: *network_config
+
+ - name: {{ HOSTNAME_MON02 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 2
+ memory: !os_env SLAVE_NODE_MEMORY, 2048
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: mcp_ubuntu_1604_image
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *interfaces
+ network_config: *network_config
+
+ - name: {{ HOSTNAME_MON03 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 2
+ memory: !os_env SLAVE_NODE_MEMORY, 2048
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: mcp_ubuntu_1604_image
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *interfaces
+ network_config: *network_config
+
+ - name: {{ HOSTNAME_LOG01 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 2
+ memory: !os_env SLAVE_NODE_MEMORY, 2048
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: mcp_ubuntu_1604_image
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *interfaces
+ network_config: *network_config
+
+ - name: {{ HOSTNAME_LOG02 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 2
+ memory: !os_env SLAVE_NODE_MEMORY, 2048
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: mcp_ubuntu_1604_image
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *interfaces
+ network_config: *network_config
+
+ - name: {{ HOSTNAME_LOG03 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 2
+ memory: !os_env SLAVE_NODE_MEMORY, 2048
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: mcp_ubuntu_1604_image
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *interfaces
+ network_config: *network_config
+
+ - name: {{ HOSTNAME_MTR01 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 2
+ memory: !os_env SLAVE_NODE_MEMORY, 2048
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: mcp_ubuntu_1604_image
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *interfaces
+ network_config: *network_config
+
+ - name: {{ HOSTNAME_MTR02 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 2
+ memory: !os_env SLAVE_NODE_MEMORY, 2048
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: mcp_ubuntu_1604_image
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *interfaces
+ network_config: *network_config
+
+ - name: {{ HOSTNAME_MTR03 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 2
+ memory: !os_env SLAVE_NODE_MEMORY, 2048
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: mcp_ubuntu_1604_image
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *interfaces
+ network_config: *network_config
diff --git a/tcp_tests/templates/cookied-mcp-pike-dvr-ceph/core.yaml b/tcp_tests/templates/cookied-mcp-pike-dvr-ceph/core.yaml
deleted file mode 100644
index 08a3c00..0000000
--- a/tcp_tests/templates/cookied-mcp-pike-dvr-ceph/core.yaml
+++ /dev/null
@@ -1,19 +0,0 @@
-{% from 'cookied-mcp-pike-dvr-ceph/underlay.yaml' import HOSTNAME_CFG01 with context %}
-
-{% import 'shared-core.yaml' as SHARED_CORE with context %}
-
-{{ SHARED_CORE.MACRO_INSTALL_KEEPALIVED() }}
-
-{{ SHARED_CORE.MACRO_INSTALL_GLUSTERFS() }}
-
-{{ SHARED_CORE.MACRO_INSTALL_RABBITMQ() }}
-
-{{ SHARED_CORE.MACRO_INSTALL_GALERA() }}
-
-{{ SHARED_CORE.MACRO_INSTALL_HAPROXY() }}
-
-{{ SHARED_CORE.MACRO_INSTALL_NGINX() }}
-
-{{ SHARED_CORE.MACRO_INSTALL_MEMCACHED() }}
-
-{{ SHARED_CORE.MACRO_CHECK_VIP() }}
diff --git a/tcp_tests/templates/cookied-mcp-pike-dvr-ceph/openstack.yaml b/tcp_tests/templates/cookied-mcp-pike-dvr-ceph/openstack.yaml
deleted file mode 100644
index 8531cc3..0000000
--- a/tcp_tests/templates/cookied-mcp-pike-dvr-ceph/openstack.yaml
+++ /dev/null
@@ -1,46 +0,0 @@
-{% from 'cookied-mcp-pike-dvr-ceph/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% from 'cookied-mcp-pike-dvr-ceph/underlay.yaml' import HOSTNAME_CTL01 with context %}
-{% from 'cookied-mcp-pike-dvr-ceph/underlay.yaml' import HOSTNAME_CTL02 with context %}
-{% from 'cookied-mcp-pike-dvr-ceph/underlay.yaml' import HOSTNAME_CTL03 with context %}
-{% from 'cookied-mcp-pike-dvr-ceph/underlay.yaml' import HOSTNAME_GTW01 with context %}
-{% from 'shared-salt.yaml' import IPV4_NET_EXTERNAL_PREFIX with context %}
-{% from 'shared-salt.yaml' import IPV4_NET_TENANT_PREFIX with context %}
-
-{% import 'shared-salt.yaml' as SHARED with context %}
-{% import 'shared-openstack.yaml' as SHARED_OPENSTACK with context %}
-
-{% import 'shared-ceph.yaml' as SHARED_CEPH with context %}
-
-{{ SHARED_CEPH.MACRO_INSTALL_CEPH_MONS() }}
-
-{{ SHARED_CEPH.MACRO_INSTALL_CEPH_MGR() }}
-
-{{ SHARED_CEPH.MACRO_INSTALL_CEPH_OSD_AND_RADOSGW() }}
-
-{{ SHARED_CEPH.CONNECT_CEPH_TO_SERVICES() }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_KEYSTONE() }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_GLANCE() }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_NOVA() }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_CINDER() }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_NEUTRON() }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_HEAT() }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_REDIS() }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_GNOCCHI() }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_PANKO() }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_CEILOMETER() }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_AODH() }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_HORIZON() }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_COMPUTE(CELL_MAPPING=true) }}
\ No newline at end of file
diff --git a/tcp_tests/templates/cookied-mcp-pike-dvr-ceph/salt.yaml b/tcp_tests/templates/cookied-mcp-pike-dvr-ceph/salt.yaml
deleted file mode 100644
index e90b99b..0000000
--- a/tcp_tests/templates/cookied-mcp-pike-dvr-ceph/salt.yaml
+++ /dev/null
@@ -1,30 +0,0 @@
-{% from 'cookied-mcp-pike-dvr-ceph/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% from 'cookied-mcp-pike-dvr-ceph/underlay.yaml' import HOSTNAME_CMP01 with context %}
-{% from 'cookied-mcp-pike-dvr-ceph/underlay.yaml' import HOSTNAME_CMP02 with context %}
-{% from 'cookied-mcp-pike-dvr-ceph/underlay.yaml' import HOSTNAME_GTW01 with context %}
-{% from 'cookied-mcp-pike-dvr-ceph/underlay.yaml' import LAB_CONFIG_NAME with context %}
-{% from 'cookied-mcp-pike-dvr-ceph/underlay.yaml' import DOMAIN_NAME with context %}
-
-{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.com/salt-models/mcp-virtual-lab') %}
-# Other salt model repository parameters see in shared-salt.yaml
-
-{% import 'shared-salt.yaml' as SHARED with context %}
-
-{{ SHARED.MACRO_INSTALL_SALT_MASTER() }}
-
-{{ SHARED.MACRO_CLONE_RECLASS_MODELS() }}
-
-{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='\*') }}
-{{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
-
-{{ SHARED.MACRO_RUN_SALT_MASTER_UNDERLAY_STATES() }}
-
-{{ SHARED.MACRO_GENERATE_INVENTORY() }}
-
-{{ SHARED.MACRO_NETWORKING_WORKAROUNDS() }}
-
-{{ SHARED.MACRO_BOOTSTRAP_ALL_MINIONS() }}
-
-{{SHARED.MACRO_CHECK_SALT_VERSION_SERVICES_ON_CFG()}}
-
-{{SHARED.MACRO_CHECK_SALT_VERSION_ON_NODES()}}
\ No newline at end of file
diff --git a/tcp_tests/templates/cookied-mcp-pike-dvr-ceph/underlay--user-data-cfg01.yaml b/tcp_tests/templates/cookied-mcp-pike-dvr-ceph/underlay--user-data-cfg01.yaml
deleted file mode 100644
index 48562ad..0000000
--- a/tcp_tests/templates/cookied-mcp-pike-dvr-ceph/underlay--user-data-cfg01.yaml
+++ /dev/null
@@ -1,68 +0,0 @@
-| # All the data below will be stored as a string object
- #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
-
- ssh_pwauth: True
- users:
- - name: root
- sudo: ALL=(ALL) NOPASSWD:ALL
- shell: /bin/bash
- ssh_authorized_keys:
- {% for key in config.underlay.ssh_keys %}
- - ssh-rsa {{ key['public'] }}
- {% endfor %}
-
- disable_root: false
- chpasswd:
- list: |
- root:r00tme
- expire: False
-
- bootcmd:
- # Enable root access
- - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
- - service sshd restart
- output:
- all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
-
- runcmd:
- # Configure dhclient
- - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
- - sudo resolvconf -u
-
- # Prepare network connection
- - sudo ifdown ens3
- - sudo ip r d default || true # remove existing default route to get it from dhcp
- - sudo ifup ens3
- #- sudo route add default gw {gateway} {interface_name}
-
- # Create swap
- - fallocate -l 4G /swapfile
- - chmod 600 /swapfile
- - mkswap /swapfile
- - swapon /swapfile
- - echo "/swapfile none swap defaults 0 0" >> /etc/fstab
-
- - echo "nameserver 172.18.208.44" >> /etc/resolv.conf;
-
- - mkdir -p /srv/salt/reclass/nodes
- - systemctl enable salt-master
- - systemctl enable salt-minion
- - systemctl start salt-master
- - systemctl start salt-minion
- - salt-call -l info --timeout=120 test.ping
-
- write_files:
- - path: /etc/network/interfaces
- content: |
- auto ens3
- iface ens3 inet dhcp
-
- - path: /root/.ssh/config
- owner: root:root
- permissions: '0600'
- content: |
- Host *
- ServerAliveInterval 300
- ServerAliveCountMax 10
- StrictHostKeyChecking no
- UserKnownHostsFile /dev/null
diff --git a/tcp_tests/templates/cookied-mcp-pike-dvr-ceph/vcp-context-environment.yaml b/tcp_tests/templates/cookied-mcp-pike-dvr-ceph/vcp-context-environment.yaml
deleted file mode 100644
index cc7acd1..0000000
--- a/tcp_tests/templates/cookied-mcp-pike-dvr-ceph/vcp-context-environment.yaml
+++ /dev/null
@@ -1,207 +0,0 @@
-nodes:
- cfg01.cookied-mcp-pike-dvr-ceph.local:
- reclass_storage_name: infra_config_node01
- roles:
- - infra_config
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- ctl01.cookied-mcp-pike-dvr-ceph.local:
- reclass_storage_name: openstack_control_node01
- roles:
- - infra_kvm
- - openstack_control_leader
- - openstack_database_leader
- - openstack_message_queue
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- ctl02.cookied-mcp-pike-dvr-ceph.local:
- reclass_storage_name: openstack_control_node02
- roles:
- - infra_kvm
- - openstack_control
- - openstack_database
- - openstack_message_queue
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- ctl03.cookied-mcp-pike-dvr-ceph.local:
- reclass_storage_name: openstack_control_node03
- roles:
- - infra_kvm
- - openstack_control
- - openstack_database
- - openstack_message_queue
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- prx01.cookied-mcp-pike-dvr-ceph.local:
- reclass_storage_name: openstack_proxy_node01
- roles:
- - openstack_proxy
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- # Generator-based computes. For compatibility only
- cmp<<count>>.cookied-mcp-pike-dvr-ceph.local:
- reclass_storage_name: openstack_compute_rack01
- roles:
- - openstack_compute
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
- ens5:
- role: bond0_ab_ovs_vxlan_mesh
- ens6:
- role: bond1_ab_ovs_floating
-
- gtw01.cookied-mcp-pike-dvr-ceph.local:
- reclass_storage_name: openstack_gateway_node01
- roles:
- - openstack_gateway
- - linux_system_codename_xenial
- classes:
- - system.linux.system.repo.mcp.apt_mirantis.docker
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
- ens5:
- role: bond0_ab_ovs_vxlan_mesh
- ens6:
- role: bond1_ab_ovs_floating
-
- osd<<count>>.cookied-mcp-pike-dvr-ceph.local:
- reclass_storage_name: ceph_osd_rack01
- roles:
- - ceph_osd
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- cmn01.cookied-mcp-pike-dvr-ceph.local:
- reclass_storage_name: ceph_mon_node01
- roles:
- - ceph_mon
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- cmn02.cookied-mcp-pike-dvr-ceph.local:
- reclass_storage_name: ceph_mon_node02
- roles:
- - ceph_mon
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- cmn03.cookied-mcp-pike-dvr-ceph.local:
- reclass_storage_name: ceph_mon_node03
- roles:
- - ceph_mon
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- rgw01.cookied-mcp-pike-dvr-ceph.local:
- reclass_storage_name: ceph_rgw_node01
- roles:
- - ceph_rgw
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- rgw02.cookied-mcp-pike-dvr-ceph.local:
- reclass_storage_name: ceph_rgw_node02
- roles:
- - ceph_rgw
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- rgw03.cookied-mcp-pike-dvr-ceph.local:
- reclass_storage_name: ceph_rgw_node03
- roles:
- - ceph_rgw
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- mdb01.cookied-mcp-pike-dvr-ceph.local:
- reclass_storage_name: openstack_telemetry_node01
- roles:
- - linux_system_codename_xenial
- - openstack_telemetry
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- mdb02.cookied-mcp-pike-dvr-ceph.local:
- reclass_storage_name: openstack_telemetry_node02
- roles:
- - linux_system_codename_xenial
- - openstack_telemetry
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
-
- mdb03.cookied-mcp-pike-dvr-ceph.local:
- reclass_storage_name: openstack_telemetry_node03
- roles:
- - linux_system_codename_xenial
- - openstack_telemetry
- interfaces:
- ens3:
- role: single_dhcp
- ens4:
- role: single_ctl
diff --git a/tcp_tests/templates/cookied-mcp-queens-dvr-ceph/openstack.yaml b/tcp_tests/templates/cookied-mcp-queens-dvr-ceph/openstack.yaml
index 636187b..e9cd9a1 100644
--- a/tcp_tests/templates/cookied-mcp-queens-dvr-ceph/openstack.yaml
+++ b/tcp_tests/templates/cookied-mcp-queens-dvr-ceph/openstack.yaml
@@ -1,8 +1,8 @@
-{% from 'cookied-mcp-pike-dvr-ceph/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% from 'cookied-mcp-pike-dvr-ceph/underlay.yaml' import HOSTNAME_CTL01 with context %}
-{% from 'cookied-mcp-pike-dvr-ceph/underlay.yaml' import HOSTNAME_CTL02 with context %}
-{% from 'cookied-mcp-pike-dvr-ceph/underlay.yaml' import HOSTNAME_CTL03 with context %}
-{% from 'cookied-mcp-pike-dvr-ceph/underlay.yaml' import HOSTNAME_GTW01 with context %}
+{% from 'cookied-mcp-queens-dvr-ceph/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% from 'cookied-mcp-queens-dvr-ceph/underlay.yaml' import HOSTNAME_CTL01 with context %}
+{% from 'cookied-mcp-queens-dvr-ceph/underlay.yaml' import HOSTNAME_CTL02 with context %}
+{% from 'cookied-mcp-queens-dvr-ceph/underlay.yaml' import HOSTNAME_CTL03 with context %}
+{% from 'cookied-mcp-queens-dvr-ceph/underlay.yaml' import HOSTNAME_GTW01 with context %}
{% from 'shared-salt.yaml' import IPV4_NET_EXTERNAL_PREFIX with context %}
{% from 'shared-salt.yaml' import IPV4_NET_TENANT_PREFIX with context %}
diff --git a/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-pike-dvr-ceph.yaml b/tcp_tests/templates/cookied-model-generator/salt_cookied-cicd-pike-dvr-ceph.yaml
similarity index 96%
rename from tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-pike-dvr-ceph.yaml
rename to tcp_tests/templates/cookied-model-generator/salt_cookied-cicd-pike-dvr-ceph.yaml
index d4377b7..d854cb2 100644
--- a/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-pike-dvr-ceph.yaml
+++ b/tcp_tests/templates/cookied-model-generator/salt_cookied-cicd-pike-dvr-ceph.yaml
@@ -1,16 +1,17 @@
{% from 'cookied-model-generator/underlay.yaml' import HOSTNAME_CFG01 with context %}
{% from 'cookied-model-generator/underlay.yaml' import DOMAIN_NAME with context %}
-{% set LAB_CONFIG_NAME = 'cookied-mcp-pike-dvr-ceph' %}
+{% set LAB_CONFIG_NAME = 'cookied-cicd-pike-dvr-ceph' %}
# Name of the context file (without extension, that is fixed .yaml) used to render the Environment model
{% set ENVIRONMENT_MODEL_INVENTORY_NAME = os_env('ENVIRONMENT_MODEL_INVENTORY_NAME', LAB_CONFIG_NAME) %}
# Path to the context files used to render Cluster and Environment models
{%- set CLUSTER_CONTEXT_NAME = 'cookiecutter-context-dvr-ceph.yaml' %}
-{%- set ENVIRONMENT_CONTEXT_NAMES = ['vcp-context-environment.yaml'] %}
+{%- set ENVIRONMENT_CONTEXT_NAMES = ['environment_context.yaml'] %}
{% import 'shared-salt.yaml' as SHARED with context %}
{{ SHARED.MACRO_INSTALL_PACKAGES_ON_NODES(HOSTNAME_CFG01) }}
+
{{ SHARED.MACRO_INSTALL_FORMULAS('\*') }}
{{ SHARED.MACRO_GENERATE_COOKIECUTTER_MODEL() }}
diff --git a/tcp_tests/templates/shared-salt.yaml b/tcp_tests/templates/shared-salt.yaml
index bfa601a..1535db6 100644
--- a/tcp_tests/templates/shared-salt.yaml
+++ b/tcp_tests/templates/shared-salt.yaml
@@ -20,7 +20,8 @@
# Currently we support 2 salt version that can be set over bellow var
{% set SALT_VERSION = os_env('SALT_VERSION','2017.7') %}
{% set REPOSITORY_SUITE = os_env('REPOSITORY_SUITE', 'testing') %}
-{% set FORMULA_REPOSITORY = os_env('FORMULA_REPOSITORY', 'deb [arch=amd64] http://apt.mirantis.com/${DISTRIB_CODENAME} ' + REPOSITORY_SUITE + ' salt extra') %}
+#{% set FORMULA_REPOSITORY = os_env('FORMULA_REPOSITORY', 'deb [arch=amd64] http://apt.mirantis.com/${DISTRIB_CODENAME} ' + REPOSITORY_SUITE + ' salt extra') %}
+{% set FORMULA_REPOSITORY = os_env('FORMULA_REPOSITORY', "deb [arch=amd64] http://mirror.mirantis.com/" + REPOSITORY_SUITE+ "/salt-formulas"+"/${DISTRIB_CODENAME} ${DISTRIB_CODENAME} main") %}
{% set FORMULA_GPG = os_env('FORMULA_GPG', 'http://apt.mirantis.com/public.gpg') %}
#{# set SALT_REPOSITORY = os_env('SALT_REPOSITORY', "deb [arch=amd64] http://apt.mirantis.com/${DISTRIB_CODENAME}/salt/2016.3 " + REPOSITORY_SUITE + " main") #}
# Note repo is changed so new one looks like defined bellow
diff --git a/tcp_tests/tests/system/test_ovs_pike_ceph.py b/tcp_tests/tests/system/test_ovs_pike_ceph.py
deleted file mode 100644
index 1f62a94..0000000
--- a/tcp_tests/tests/system/test_ovs_pike_ceph.py
+++ /dev/null
@@ -1,49 +0,0 @@
-# Copyright 2018 Mirantis, Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import pytest
-
-from tcp_tests import logger
-from tcp_tests import settings
-
-LOG = logger.logger
-
-
-@pytest.mark.deploy
-class TestInstallOvsPikeCeph(object):
- """Test class for test openstack with ceph and ovs deploy"""
-
- @pytest.mark.grab_versions
- @pytest.mark.fail_snapshot
- def test_pike_ceph_all_ovs_install(self, underlay,
- openstack_deployed,
- ceph_deployed,
- openstack_actions,
- tempest_actions):
- """Test for deploying pike ovs with ceph and check it
- Scenario:
- 1. Prepare salt on hosts
- 2. Setup controller nodes
- 3. Setup compute nodes
- 4. Setup ceph RBD, replication factor 2 for cinder, nova, glance
- 5. Run tempest
-
- """
- openstack_actions._salt.local(
- tgt='*', fun='cmd.run',
- args='service ntp stop; ntpd -gq; service ntp start')
-
- if settings.RUN_TEMPEST:
- tempest_actions.prepare_and_run_tempest()
- LOG.info("*************** DONE **************")