Merge "Fix genrated models for labs designed for static models"
diff --git a/tcp_tests/managers/openstack_manager.py b/tcp_tests/managers/openstack_manager.py
index e54f4fe..bbf9315 100644
--- a/tcp_tests/managers/openstack_manager.py
+++ b/tcp_tests/managers/openstack_manager.py
@@ -119,8 +119,8 @@
"-e SOURCE_FILE=keystonercv3 "
"-e CUSTOM='--pattern {1}' "
"-v /root/:/home/rally "
- "-v /var/log/:/home/rally/rally_reports/ {2} "
- "-v /etc/ssl/certs/:/etc/ssl/certs/ >> image.output"
+ "-v /var/log/:/home/rally/rally_reports/ "
+ "-v /etc/ssl/certs/:/etc/ssl/certs/ {2} >> image.output"
.format(conf_name, pattern, registry))
else:
cmd = ("docker run --rm --net=host "
@@ -129,8 +129,8 @@
"-e SET=full "
"-e SOURCE_FILE=keystonercv3 "
"-v /root/:/home/rally "
- "-v /var/log/:/home/rally/rally_reports/ {2} "
- "-v /etc/ssl/certs/:/etc/ssl/certs/ >> image.output"
+ "-v /var/log/:/home/rally/rally_reports/ "
+ "-v /etc/ssl/certs/:/etc/ssl/certs/ {2} >> image.output"
.format(conf_name, pattern, registry))
LOG.info("Running tempest testing on node {0} using the following "
"command:\n{1}".format(target_name[0], cmd))
diff --git a/tcp_tests/templates/cookied-bm-ocata-cicd-pipeline/common-services.yaml b/tcp_tests/templates/cookied-bm-ocata-cicd-pipeline/common-services.yaml
new file mode 100644
index 0000000..c0c50a4
--- /dev/null
+++ b/tcp_tests/templates/cookied-bm-ocata-cicd-pipeline/common-services.yaml
@@ -0,0 +1,49 @@
+{% from 'cookied-bm-ocata-cicd-pipeline/underlay.yaml' import HOSTNAME_CFG01 with context %}
+
+- description: Approve cfg01 ssh key for jenkins user
+ cmd: mkdir -p /var/lib/jenkins/.ssh && ssh-keyscan cfg01 > /var/lib/jenkins/.ssh/known_hosts && chown jenkins /var/lib/jenkins/.ssh/known_hosts
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 15}
+ skip_fail: false
+
+- description: Install jq for parse json output
+ cmd: apt install -y jq
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 15}
+ skip_fail: false
+
+# Install support services
+- description: Install keepalived on ctl01
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@keepalived:cluster and *01*' state.sls keepalived
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: true
+
+- description: Install keepalived
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@keepalived:cluster' state.sls keepalived
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Install haproxy
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@haproxy:proxy' state.sls haproxy
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Check haproxy status
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@haproxy:proxy' service.status haproxy
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Restart rsyslog
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@haproxy:proxy' service.restart rsyslog
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
diff --git a/tcp_tests/templates/cookied-bm-ocata-cicd-pipeline/lab04-physical-inventory-nfv.yaml b/tcp_tests/templates/cookied-bm-ocata-cicd-pipeline/lab04-physical-inventory-nfv.yaml
new file mode 100644
index 0000000..1baa7be
--- /dev/null
+++ b/tcp_tests/templates/cookied-bm-ocata-cicd-pipeline/lab04-physical-inventory-nfv.yaml
@@ -0,0 +1,143 @@
+nodes:
+ cfg01.ocata-cicd.local:
+ reclass_storage_name: infra_config_node01
+ roles:
+ - infra_config
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ # Physical nodes
+
+ cid01.ocata-cicd.local:
+ reclass_storage_name: cicd_control_node01
+ roles:
+ - cicd_control_leader
+ - linux_system_codename_xenial
+ interfaces:
+ enp2s0f0:
+ role: single_mgm
+ deploy_address: 172.16.49.70
+ enp2s0f1:
+ role: single_vlan_ctl
+ single_address: 10.167.8.91
+
+ cid02.ocata-cicd.local:
+ reclass_storage_name: cicd_control_node02
+ roles:
+ - cicd_control_manager
+ - linux_system_codename_xenial
+ interfaces:
+ enp2s0f0:
+ role: single_mgm
+ deploy_address: 172.16.49.71
+ enp2s0f1:
+ role: single_vlan_ctl
+ single_address: 10.167.8.92
+
+ cid03.ocata-cicd.local:
+ reclass_storage_name: cicd_control_node03
+ roles:
+ - cicd_control_manager
+ - linux_system_codename_xenial
+ interfaces:
+ enp2s0f0:
+ role: single_mgm
+ deploy_address: 172.16.49.72
+ enp2s0f1:
+ role: single_vlan_ctl
+ single_address: 10.167.8.93
+
+ kvm01.ocata-cicd.local:
+ reclass_storage_name: infra_kvm_node01
+ roles:
+ - infra_kvm
+ - linux_system_codename_xenial
+ interfaces:
+ enp9s0f0:
+ role: single_mgm
+ enp9s0f1:
+ role: bond0_ab_ovs_vlan_ctl
+
+ kvm02.ocata-cicd.local:
+ reclass_storage_name: infra_kvm_node02
+ roles:
+ - infra_kvm
+ - linux_system_codename_xenial
+ interfaces:
+ enp9s0f0:
+ role: single_mgm
+ enp9s0f1:
+ role: bond0_ab_ovs_vlan_ctl
+
+ kvm03.ocata-cicd.local:
+ reclass_storage_name: infra_kvm_node03
+ roles:
+ - infra_kvm
+ - linux_system_codename_xenial
+ interfaces:
+ enp9s0f0:
+ role: single_mgm
+ enp9s0f1:
+ role: bond0_ab_ovs_vlan_ctl
+
+ cmp001.ocata-cicd.local:
+ reclass_storage_name: openstack_compute_node01
+ roles:
+ - openstack_compute_dpdk
+ - features_lvm_backend
+ - linux_system_codename_xenial
+ - openstack_compute_sriov
+ interfaces:
+ enp2s0f0:
+ role: single_mgm
+ deploy_address: 172.16.49.73
+ enp2s0f1:
+ role: single_vlan_ctl
+ single_address: 10.167.8.101
+ enp5s0f0:
+ role: single_ovs_dpdk_prv
+ tenant_address: 192.168.0.101
+ dpdk_pci: "0000:05:00.0"
+
+ cmp002.ocata-cicd.local:
+ reclass_storage_name: openstack_compute_node02
+ roles:
+ - openstack_compute_dpdk
+ - features_lvm_backend
+ - linux_system_codename_xenial
+ - openstack_compute_sriov
+ interfaces:
+ enp2s0f0:
+ role: single_mgm
+ deploy_address: 172.16.49.74
+ enp2s0f1:
+ role: single_vlan_ctl
+ single_address: 10.167.8.102
+ enp5s0f0:
+ role: single_ovs_dpdk_prv
+ tenant_address: 192.168.0.102
+ dpdk_pci: "0000:05:00.0"
+
+ gtw01.ocata-cicd.local:
+ reclass_storage_name: openstack_gateway_node01
+ roles:
+ - openstack_gateway
+ - linux_system_codename_xenial
+ interfaces:
+ enp9s0f0:
+ role: single_mgm
+ deploy_address: 172.16.49.75
+ enp9s0f1:
+ role: bond0_ab_dvr_vxlan_ctl_mesh_floating
+
+# gtw02.ocata-cicd.local:
+# reclass_storage_name: openstack_gateway_node02
+# roles:
+# - openstack_gateway
+# - linux_system_codename_xenial
+# interfaces:
+# enp10s0f0:
+# role: single_mgm
+# enp10s0f1:
+# role: bond0_ab_dvr_vlan_ctl_prv_floating
diff --git a/tcp_tests/templates/cookied-bm-ocata-cicd-pipeline/lab04-physical-inventory.yaml b/tcp_tests/templates/cookied-bm-ocata-cicd-pipeline/lab04-physical-inventory.yaml
new file mode 100644
index 0000000..d17abc9
--- /dev/null
+++ b/tcp_tests/templates/cookied-bm-ocata-cicd-pipeline/lab04-physical-inventory.yaml
@@ -0,0 +1,135 @@
+nodes:
+ cfg01.ocata-cicd.local:
+ reclass_storage_name: infra_config_node01
+ roles:
+ - infra_config
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ # Physical nodes
+
+ cid01.ocata-cicd.local:
+ reclass_storage_name: cicd_control_node01
+ roles:
+ - cicd_control_leader
+ - linux_system_codename_xenial
+ interfaces:
+ enp2s0f0:
+ role: single_mgm
+ deploy_address: 172.16.49.70
+ enp2s0f1:
+ role: single_vlan_ctl
+ single_address: 10.167.8.91
+
+ cid02.ocata-cicd.local:
+ reclass_storage_name: cicd_control_node02
+ roles:
+ - cicd_control_manager
+ - linux_system_codename_xenial
+ interfaces:
+ enp2s0f0:
+ role: single_mgm
+ deploy_address: 172.16.49.71
+ enp2s0f1:
+ role: single_vlan_ctl
+ single_address: 10.167.8.92
+
+ cid03.ocata-cicd.local:
+ reclass_storage_name: cicd_control_node03
+ roles:
+ - cicd_control_manager
+ - linux_system_codename_xenial
+ interfaces:
+ enp2s0f0:
+ role: single_mgm
+ deploy_address: 172.16.49.72
+ enp2s0f1:
+ role: single_vlan_ctl
+ single_address: 10.167.8.93
+
+ kvm01.ocata-cicd.local:
+ reclass_storage_name: infra_kvm_node01
+ roles:
+ - infra_kvm
+ - linux_system_codename_xenial
+ interfaces:
+ enp9s0f0:
+ role: single_mgm
+ enp9s0f1:
+ role: bond0_ab_ovs_vlan_ctl
+
+ kvm02.ocata-cicd.local:
+ reclass_storage_name: infra_kvm_node02
+ roles:
+ - infra_kvm
+ - linux_system_codename_xenial
+ interfaces:
+ enp9s0f0:
+ role: single_mgm
+ enp9s0f1:
+ role: bond0_ab_ovs_vlan_ctl
+
+ kvm03.ocata-cicd.local:
+ reclass_storage_name: infra_kvm_node03
+ roles:
+ - infra_kvm
+ - linux_system_codename_xenial
+ interfaces:
+ enp9s0f0:
+ role: single_mgm
+ enp9s0f1:
+ role: bond0_ab_ovs_vlan_ctl
+
+ cmp001.ocata-cicd.local:
+ reclass_storage_name: openstack_compute_node01
+ roles:
+ - openstack_compute
+ - features_lvm_backend
+ - linux_system_codename_xenial
+ interfaces:
+ enp2s0f0:
+ role: single_mgm
+ deploy_address: 172.16.49.73
+ enp2s0f1:
+ role: bond0_ab_ovs_vxlan_ctl_mesh
+ single_address: 10.167.8.101
+ tenant_address: 192.168.0.101
+
+ cmp002.ocata-cicd.local:
+ reclass_storage_name: openstack_compute_node02
+ roles:
+ - openstack_compute
+ - features_lvm_backend
+ - linux_system_codename_xenial
+ interfaces:
+ enp2s0f0:
+ role: single_mgm
+ deploy_address: 172.16.49.74
+ enp2s0f1:
+ role: bond0_ab_ovs_vxlan_ctl_mesh
+ single_address: 10.167.8.102
+ tenant_address: 192.168.0.102
+
+ gtw01.ocata-cicd.local:
+ reclass_storage_name: openstack_gateway_node01
+ roles:
+ - openstack_gateway
+ - linux_system_codename_xenial
+ interfaces:
+ enp9s0f0:
+ role: single_mgm
+ deploy_address: 172.16.49.75
+ enp9s0f1:
+ role: bond0_ab_dvr_vxlan_ctl_mesh_floating
+
+# gtw02.ocata-cicd.local:
+# reclass_storage_name: openstack_gateway_node02
+# roles:
+# - openstack_gateway
+# - linux_system_codename_xenial
+# interfaces:
+# enp10s0f0:
+# role: single_mgm
+# enp10s0f1:
+# role: bond0_ab_dvr_vlan_ctl_prv_floating
diff --git a/tcp_tests/templates/cookied-bm-ocata-cicd-pipeline/salt-context-cookiecutter-initial-cicd-nfv.yaml b/tcp_tests/templates/cookied-bm-ocata-cicd-pipeline/salt-context-cookiecutter-initial-cicd-nfv.yaml
new file mode 100644
index 0000000..3a2ada7
--- /dev/null
+++ b/tcp_tests/templates/cookied-bm-ocata-cicd-pipeline/salt-context-cookiecutter-initial-cicd-nfv.yaml
@@ -0,0 +1,170 @@
+default_context:
+ bmk_enabled: 'False'
+ ceph_enabled: 'False'
+ cicd_control_node01_address: 10.167.8.91
+ cicd_control_node01_hostname: cid01
+ cicd_control_node02_address: 10.167.8.92
+ cicd_control_node02_hostname: cid02
+ cicd_control_node03_address: 10.167.8.93
+ cicd_control_node03_hostname: cid03
+ cicd_control_vip_address: 10.167.8.90
+ cicd_control_vip_hostname: cid
+ cicd_enabled: 'True'
+ cicd_private_key: |-
+ -----BEGIN RSA PRIVATE KEY-----
+ MIIEpAIBAAKCAQEAxhMRv9nCgnLGtWFLfq8r97sPjEIA3eNvVfYWV87S2TjtiUyT
+ Bknw/eXC0l2SGN3q7NXaA6sqtiVgk2dw0DfhGOKuWucr5C3pyRDMZOpj98gNzwNX
+ X6kUmqS8MjALcpw68qVnq0cuQ0RNY8gxbLGIGWZx29AdcHtczWqce3LXDllVLp/U
+ XLIcZuoB3Z2JvKWMo+QF7bKFJB8+O6tBXuRDJblVQFds/dG6cdLXGOTu3TvD/iu8
+ FcDOPDqTff7j2669DmTV6NU2lzuCd5WOIaRmX0bmyOripMtTRZvj83W3ZvLZO/7I
+ L9sXB1S/IpL09P1Fq5rMK3rDXBAS7CY/fBOBUwIDAQABAoIBAAyR4zt4l0ZuADw8
+ e20Dstea0GbHPYTXTRZ4cnyKDIlqenCPInlwsdF7Bj0RYRcg5iq3e4lmEGTUxGc2
+ VktwcGGC6SutpfRqEX8ICOCSm1t7H502ihHxCfSyZZsNv3w7e+YwJv8Qmlf8eqjN
+ aI2MSqXKAYnAkE22FnKWxG11IT6wwgIEB01z5DbnNv3FToIOGAnX1wB8cOP5IS5C
+ E89Q9X6YqB/k+L5Xms+WfkHiqDBlkGfH1M97CmjDrR1wDapqCxSVjt/qIaSBoyMi
+ NiXweo+P1ZZs9myQbOMX/mS8svCEVM/EiQe6M+K4wSNZNxIzZWCozMHYTAQRIcfF
+ SRIMMrECgYEA65ExKZuoi2MGTdqFH04sZuz6sH3hXI7er2ol6mmhXZALJWd+Sk9a
+ j/eq9HKBGhc41LeowIYrcwMiUlXhoZ17AiGALgG1UG5btomvwADd9DuzmJYaZ8VO
+ xiLG5Y1czIyh4JapPOdlP0oj2Sy+nlIsCCpmmYbZK+qvbkT0nnWJzskCgYEA10FY
+ rENlLZ6L34f4c9Ow+GpI/z3+QP6By3xwOQTZOLV+gBQeOHO45TfIXT2hsAXbfcAA
+ 0pvXeKhQ89a7A24L/3UCex/gV4BivghIVai1Lh6/++LC4s3Ue68+CQCm4Q1zTDqk
+ GTVtEH7r2Bq9Wm08vPpkLwiHYOJhVGTGGvEpkTsCgYEAlv2WjHvoeYd8Z/ST9W/v
+ B/4H5Y6aH/C3jao1KV6Rft4wNsZybYEVpEf0fQDT/Xw7AiXCdbzKJssweaPwnt3J
+ FaGRfmu74xUJliQE0cX8fmqyADDeNfuDNX7fDA4jGD1gGQuY6J/NBtcnyTFj8Sjs
+ bkN3RhroIr0nuz9ZqCPgs/kCgYEAt1s5fltWRzrDaOA4uek72Q8oKQuUlaZ1x3Cz
+ Y06G/jBTliQM7gddGxueOBZ0sSz8H6y6xqvrKiMt+dcSrEREQhFY0KqBfeeltLv3
+ acfwtV2KKbSqT2oHMmg/DooYnKHJcciN2c9RnPiQSx/T5cAhOdSMHChGsTeEss+4
+ lGCTCNsCgYAm8R8A3XZ2TPl0iMM9LxUJGXBFs5r/GkeM1XMXQmnNDzoDbaLhkj/F
+ 7UayG1rOOYl64oYmGZ4UPYTzyDDcb4m/ZiqQsoAmNjXoKykvMhhBnGUb4CZj0J/b
+ QppRJ86CYR17df+W+4TLhXcn41afuO+8TCBLUKpb1jeJ5hifWctK5Q==
+ -----END RSA PRIVATE KEY-----
+ cicd_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDGExG/2cKCcsa1YUt+ryv3uw+MQgDd429V9hZXztLZOO2JTJMGSfD95cLSXZIY3ers1doDqyq2JWCTZ3DQN+EY4q5a5yvkLenJEMxk6mP3yA3PA1dfqRSapLwyMAtynDrypWerRy5DRE1jyDFssYgZZnHb0B1we1zNapx7ctcOWVUun9Rcshxm6gHdnYm8pYyj5AXtsoUkHz47q0Fe5EMluVVAV2z90bpx0tcY5O7dO8P+K7wVwM48OpN9/uPbrr0OZNXo1TaXO4J3lY4hpGZfRubI6uKky1NFm+Pzdbdm8tk7/sgv2xcHVL8ikvT0/UWrmswresNcEBLsJj98E4FT
+ cluster_domain: ocata-cicd.local
+ cluster_name: ocata-cicd
+ compute_bond_mode: active-backup
+ compute_primary_first_nic: eth1
+ compute_primary_second_nic: eth2
+ context_seed: azAQwe19yxcUFK03MqWVyrJtQ6ZblWAFaDDiITqT9ed7jJnj6EdHQpgyizt1Gxdg
+ control_network_netmask: 255.255.255.0
+ control_network_subnet: 10.167.8.0/24
+ control_vlan: '2422'
+ cookiecutter_template_branch: master
+ cookiecutter_template_credentials: gerrit
+ cookiecutter_template_url: ssh://mcp-jenkins@gerrit.mcp.mirantis.net:29418/mk/cookiecutter-templates.git
+ deploy_network_gateway: 172.16.49.126
+ deploy_network_netmask: 255.255.255.192
+ deploy_network_subnet: 172.16.49.64/26
+ deployment_type: physical
+ dns_server01: 8.8.8.8
+ dns_server02: 8.8.4.4
+ email_address: sgudz@mirantis.com
+ gateway_primary_first_nic: eth1
+ gateway_primary_second_nic: eth2
+ infra_bond_mode: active-backup
+ infra_deploy_nic: eth0
+ infra_kvm01_control_address: 10.167.8.241
+ infra_kvm01_deploy_address: 172.16.49.67
+ infra_kvm01_hostname: kvm01
+ infra_kvm02_control_address: 10.167.8.242
+ infra_kvm02_deploy_address: 172.16.49.68
+ infra_kvm02_hostname: kvm02
+ infra_kvm03_control_address: 10.167.8.243
+ infra_kvm03_deploy_address: 172.16.49.69
+ infra_kvm03_hostname: kvm03
+ infra_kvm_vip_address: 10.167.8.240
+ infra_primary_first_nic: eth1
+ infra_primary_second_nic: eth2
+ kubernetes_enabled: 'False'
+ local_repositories: 'False'
+ maas_deploy_address: 172.16.49.66
+ maas_hostname: cfg01
+ mcp_version: stable
+ offline_deployment: 'False'
+ opencontrail_enabled: 'False'
+ openldap_domain: ocata-cicd.local
+ openldap_enabled: 'True'
+ openldap_organisation: ${_param:cluster_name}
+ openstack_benchmark_node01_address: 10.167.8.95
+ openstack_benchmark_node01_hostname: bmk01
+ openstack_cluster_size: compact
+ openstack_compute_count: '2'
+ openstack_compute_rack01_hostname: cmp
+ openstack_compute_rack01_single_subnet: 10.167.8
+ openstack_compute_rack01_tenant_subnet: 192.168.0
+ openstack_control_address: 10.167.8.10
+ openstack_control_hostname: ctl
+ openstack_control_node01_address: 10.167.8.11
+ openstack_control_node01_hostname: ctl01
+ openstack_control_node02_address: 10.167.8.12
+ openstack_control_node02_hostname: ctl02
+ openstack_control_node03_address: 10.167.8.13
+ openstack_control_node03_hostname: ctl03
+ openstack_database_address: 10.167.8.50
+ openstack_database_hostname: dbs
+ openstack_database_node01_address: 10.167.8.51
+ openstack_database_node01_hostname: dbs01
+ openstack_database_node02_address: 10.167.8.52
+ openstack_database_node02_hostname: dbs02
+ openstack_database_node03_address: 10.167.8.53
+ openstack_database_node03_hostname: dbs03
+ openstack_enabled: 'True'
+ openstack_gateway_node01_address: 10.167.8.224
+ openstack_gateway_node01_hostname: gtw01
+ openstack_gateway_node01_tenant_address: 192.168.0.6
+ openstack_gateway_node02_address: 10.167.8.225
+ openstack_gateway_node02_hostname: gtw02
+ openstack_gateway_node02_tenant_address: 192.168.0.7
+ openstack_gateway_node03_address: 10.167.8.226
+ openstack_gateway_node03_hostname: gtw03
+ openstack_gateway_node03_tenant_address: 192.168.0.8
+ openstack_message_queue_address: 10.167.8.40
+ openstack_message_queue_hostname: msg
+ openstack_message_queue_node01_address: 10.167.8.41
+ openstack_message_queue_node01_hostname: msg01
+ openstack_message_queue_node02_address: 10.167.8.42
+ openstack_message_queue_node02_hostname: msg02
+ openstack_message_queue_node03_address: 10.167.8.43
+ openstack_message_queue_node03_hostname: msg03
+ openstack_network_engine: ovs
+ openstack_neutron_qos: 'False'
+ openstack_neutron_vlan_aware_vms: 'False'
+ openstack_nfv_dpdk_enabled: 'True'
+ openstack_nfv_sriov_enabled: 'True'
+ openstack_nfv_sriov_network: physnet1
+ openstack_nfv_sriov_numvfs: '7'
+ openstack_nfv_sriov_pf_nic: enp5s0f1
+ openstack_nova_compute_hugepages_count: '16'
+ openstack_nova_compute_nfv_req_enabled: 'True'
+ openstack_nova_cpu_pinning: 1,2,7,8
+ openstack_ovs_dvr_enabled: 'False'
+ openstack_ovs_encapsulation_type: vxlan
+ openstack_proxy_address: 10.167.8.80
+ openstack_proxy_hostname: prx
+ openstack_proxy_node01_address: 10.167.8.81
+ openstack_proxy_node01_hostname: prx01
+ openstack_proxy_node02_address: 10.167.8.82
+ openstack_proxy_node02_hostname: prx02
+ openstack_upgrade_node01_address: 10.167.8.19
+ openstack_version: ocata
+ oss_enabled: 'False'
+ oss_node03_address: ${_param:stacklight_monitor_node03_address}
+ platform: openstack_enabled
+ public_host: ${_param:openstack_proxy_address}
+ publication_method: email
+ reclass_repository: https://github.com/Mirantis/mk-lab-salt-model.git
+ salt_api_password: KEfAXxIWJykUBv0v8uKWdI2v4cBG5A07
+ salt_api_password_hash: $6$XBCrfheG$2q48l7h1giiqF2sdp7CFtLQQi8pcMa6K5A8cPYQmhuGqJtzv08YXVqYyhkHARzl1VBLVf.aTMY6d0M5naM5WU0
+ salt_master_address: 172.16.49.66
+ salt_master_hostname: cfg01
+ salt_master_management_address: 172.16.49.66
+ shared_reclass_refspec: refs/changes/44/16144/1
+ shared_reclass_url: ssh://mcp-jenkins@gerrit.mcp.mirantis.net:29418/salt-models/reclass-system.git
+ stacklight_enabled: 'False'
+ stacklight_version: '2'
+ static_ips_on_deploy_network_enabled: 'False'
+ tenant_network_gateway: 192.168.0.1
+ tenant_network_netmask: 255.255.255.0
+ tenant_network_subnet: 192.168.0.0/24
+ tenant_vlan: '2423'
+ upstream_proxy_enabled: 'False'
+ use_default_network_scheme: 'True'
diff --git a/tcp_tests/templates/cookied-bm-ocata-cicd-pipeline/salt-context-cookiecutter-initial-cicd.yaml b/tcp_tests/templates/cookied-bm-ocata-cicd-pipeline/salt-context-cookiecutter-initial-cicd.yaml
new file mode 100644
index 0000000..225c6f0
--- /dev/null
+++ b/tcp_tests/templates/cookied-bm-ocata-cicd-pipeline/salt-context-cookiecutter-initial-cicd.yaml
@@ -0,0 +1,245 @@
+default_context:
+ bmk_enabled: 'True'
+ ceph_enabled: 'False'
+ cicd_control_node01_address: 10.167.8.91
+ cicd_control_node01_hostname: cid01
+ cicd_control_node02_address: 10.167.8.92
+ cicd_control_node02_hostname: cid02
+ cicd_control_node03_address: 10.167.8.93
+ cicd_control_node03_hostname: cid03
+ cicd_control_vip_address: 10.167.8.90
+ cicd_control_vip_hostname: cid
+ cicd_enabled: 'True'
+ cicd_private_key: |-
+ -----BEGIN RSA PRIVATE KEY-----
+ MIIEpAIBAAKCAQEA0i/Kw7U6RvH3ELsRZwtfX5ZSaV2tkRiWqCgpSK7fvZn/YYuy
+ f+DiMuviyb1a2rmgQgckJjGlQeEdtETyh+lAYUMCxcJUqiPMNtbs4GDqqpc91Nv+
+ /Qa5JXGuInQC4L5MAx2BeJ4Swc1jnDMLXZ21zmcZwK3Uo5ENyhQqlwe2QZ+SzH76
+ U7DQgSCU3xlI/ieBDiC4w4HLj3z3X36J6bb3x33DVh02UCaWCwdU0v+PGW+v4RBt
+ RYMAuCnErOXGQysgwcpyqSpBvZ6QRdktIeAyK5X8RVLx7Tta5BPVB8jbseF/0N2C
+ wxmovDbnUzJMF8lRUuuqXzKPfXeHjDbWWOpzeQIDAQABAoIBAQCdnXsmwWr/Mol8
+ QVuucwfFDVulabszWEX5uYwj1gcwDiBFDNYBMSlO2DbL47QS2ypC+UnxXj83pNF6
+ kk1w/8foZ7DXjX+hypBj+03MgMDYxet3CLYxFe6XFqVnbqmN0QX9PA4P3jHgpN1y
+ j5CRVie0rPnR1Ejlk2vMwsyhTwYsNFmCqW0IC5qWHPyMGiOnFTcCYUIA8IZGkbvS
+ dYdVavAxwLVgmBoWVegg7eOL6hB3W1BZHxra/TEiHy0gsTSMr/l8YRqsDX8A91D1
+ mOMYqaEfqtJMuAKQwJDsXFiqXjsctQyUcx8TcPgsDTB/B8OPt9g8JdcmUUnWMaa9
+ mj6i9VdVAoGBAPtdkkgKGPHxq4bxWKrwzaI1gUPlPikxA+rVeplNvF0pzt4yUHD6
+ l56ash7Yl6E3n1s2mLh4IMYw8a5QZffHf51cQBtHkg72u4dXVUWLUGIzwsIkx6Zu
+ kmIfWCkC3uMbcE7o1pQSGgc3nUUeD4K6KBqUWxinEzCIYu3a909MANL/AoGBANYP
+ 2/troZi9SfYkxF/ZdCeufDsSj4DZICzG7wbKMUJ1jDpegPBT4D8d1Nip7tj10q/P
+ 4HdRrv0p5ZsFgtua9Una+ICpjpkEqjGgHUBBD7o/+Jjd8cmE57DSNSMUTVY8mwi3
+ OuKNI5itlU/8gwrAtEjV5qcgsUQSH6zZm69sx9GHAoGAb78ee/Y5Nr4YYipDH+Nj
+ l3wv1k5AfzFyK2DyWtrrJYOjmrZFeqR86R6elwX1Cs/egT9ZT5DgCRvTJYpKeofv
+ HLbZLQd5UDuyDK3vk7YmazHVoFeXhk7bttF2cOz8x3v4RqxOUI9nkNPRj9uYS6aU
+ k5RmlyQXbNkFGfbhQRhpuWMCgYBqPFKzk3YOhJrJQvQGkbgY3XqgIpT/oEJclpoX
+ 547M6eOfMDmTjDz1dgulP6jfGhjm0icXcH2E/R8LcY/BB0WG0tqTmBLCFFOW71y0
+ /9UbXRY2X6fYmFIYKire7vt2uftDNxRNTTiGVFeO68XpNzwCDc470XjITKpVsWWX
+ iUgdzwKBgQCs+hCR03K+6npMnANOBEi4I91pJQlbcMBK5fvX1w4VAwBIEu4Z58JM
+ kBbpgmANffYjTWxMIEGvdq9yZWwKCw7pv8pu/dU63MS5ST10K+7/QaLejabCskzP
+ Hjy15CfE5Iy/6aBIEz30uW/B/THd0icbGKSaFjor+sW7S2GqJvE9ow==
+ -----END RSA PRIVATE KEY-----
+ cicd_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDSL8rDtTpG8fcQuxFnC19fllJpXa2RGJaoKClIrt+9mf9hi7J/4OIy6+LJvVrauaBCByQmMaVB4R20RPKH6UBhQwLFwlSqI8w21uzgYOqqlz3U2/79Brklca4idALgvkwDHYF4nhLBzWOcMwtdnbXOZxnArdSjkQ3KFCqXB7ZBn5LMfvpTsNCBIJTfGUj+J4EOILjDgcuPfPdffonptvfHfcNWHTZQJpYLB1TS/48Zb6/hEG1FgwC4KcSs5cZDKyDBynKpKkG9npBF2S0h4DIrlfxFUvHtO1rkE9UHyNux4X/Q3YLDGai8NudTMkwXyVFS66pfMo99d4eMNtZY6nN5
+ cluster_domain: ocata-cicd.local
+ cluster_name: ocata-cicd
+ compute_bond_mode: active-backup
+ compute_primary_first_nic: eth1
+ compute_primary_second_nic: eth2
+ context_seed: TyDcKDMkhMxUlgpgsB0sXCfWQGyBdEJDIBpcRnfTlwS89gRFQZBQbyxYB9gy0kcQ
+ control_network_netmask: 255.255.255.0
+ control_network_subnet: 10.167.8.0/24
+ control_vlan: '2422'
+ cookiecutter_template_branch: master
+ cookiecutter_template_credentials: gerrit
+ cookiecutter_template_url: ssh://mcp-jenkins@gerrit.mcp.mirantis.net:29418/mk/cookiecutter-templates.git
+ deploy_network_gateway: 172.16.49.126
+ deploy_network_netmask: 255.255.255.192
+ deploy_network_subnet: 172.16.49.64/26
+ deployment_type: physical
+ dns_server01: 8.8.8.8
+ dns_server02: 8.8.4.4
+ email_address: sgudz@mirantis.com
+ gateway_primary_first_nic: eth1
+ gateway_primary_second_nic: eth2
+ infra_bond_mode: active-backup
+ infra_deploy_nic: eth0
+ infra_kvm01_control_address: 10.167.8.241
+ infra_kvm01_deploy_address: 172.16.49.67
+ infra_kvm01_hostname: kvm01
+ infra_kvm02_control_address: 10.167.8.242
+ infra_kvm02_deploy_address: 172.16.49.68
+ infra_kvm02_hostname: kvm02
+ infra_kvm03_control_address: 10.167.8.243
+ infra_kvm03_deploy_address: 172.16.49.69
+ infra_kvm03_hostname: kvm03
+ infra_kvm_vip_address: 10.167.8.240
+ infra_primary_first_nic: eth1
+ infra_primary_second_nic: eth2
+ kubernetes_enabled: 'False'
+ local_repositories: 'False'
+ maas_deploy_address: 172.16.49.66
+ maas_hostname: cfg01
+ mcp_version: stable
+ offline_deployment: 'False'
+ opencontrail_enabled: 'False'
+ openldap_domain: ocata-cicd.local
+ openldap_enabled: 'True'
+ openldap_organisation: ${_param:cluster_name}
+ openstack_benchmark_node01_address: 10.167.8.95
+ openstack_benchmark_node01_hostname: bmk01
+ openstack_compute_count: '2'
+ openstack_compute_rack01_hostname: cmp
+ openstack_compute_rack01_single_subnet: 10.167.8
+ openstack_compute_rack01_tenant_subnet: 192.168.0
+ openstack_control_address: 10.167.8.10
+ openstack_control_hostname: ctl
+ openstack_control_node01_address: 10.167.8.11
+ openstack_control_node01_hostname: ctl01
+ openstack_control_node02_address: 10.167.8.12
+ openstack_control_node02_hostname: ctl02
+ openstack_control_node03_address: 10.167.8.13
+ openstack_control_node03_hostname: ctl03
+ openstack_database_address: 10.167.8.50
+ openstack_database_hostname: dbs
+ openstack_database_node01_address: 10.167.8.51
+ openstack_database_node01_hostname: dbs01
+ openstack_database_node02_address: 10.167.8.52
+ openstack_database_node02_hostname: dbs02
+ openstack_database_node03_address: 10.167.8.53
+ openstack_database_node03_hostname: dbs03
+ openstack_enabled: 'True'
+ openstack_gateway_node01_address: 10.167.8.224
+ openstack_gateway_node01_hostname: gtw01
+ openstack_gateway_node01_tenant_address: 192.168.0.6
+ openstack_gateway_node02_address: 10.167.8.225
+ openstack_gateway_node02_hostname: gtw02
+ openstack_gateway_node02_tenant_address: 192.168.0.7
+ openstack_gateway_node03_address: 10.167.8.226
+ openstack_gateway_node03_hostname: gtw03
+ openstack_gateway_node03_tenant_address: 192.168.0.8
+ openstack_message_queue_address: 10.167.8.40
+ openstack_message_queue_hostname: msg
+ openstack_message_queue_node01_address: 10.167.8.41
+ openstack_message_queue_node01_hostname: msg01
+ openstack_message_queue_node02_address: 10.167.8.42
+ openstack_message_queue_node02_hostname: msg02
+ openstack_message_queue_node03_address: 10.167.8.43
+ openstack_message_queue_node03_hostname: msg03
+ openstack_network_engine: ovs
+ openstack_neutron_qos: 'False'
+ openstack_neutron_vlan_aware_vms: 'False'
+ openstack_nfv_dpdk_enabled: 'False'
+ openstack_nfv_sriov_enabled: 'False'
+ openstack_nova_compute_nfv_req_enabled: 'False'
+ openstack_ovs_dvr_enabled: 'True'
+ openstack_ovs_encapsulation_type: vxlan
+ openstack_proxy_address: 10.167.8.80
+ openstack_proxy_hostname: prx
+ openstack_proxy_node01_address: 10.167.8.81
+ openstack_proxy_node01_hostname: prx01
+ openstack_proxy_node02_address: 10.167.8.82
+ openstack_proxy_node02_hostname: prx02
+ openstack_upgrade_node01_address: 10.167.8.19
+ openstack_version: ocata
+ oss_address: ${_param:stacklight_monitor_address}
+ oss_cis_cert: ${_param:oss_openstack_cert}
+ oss_cis_domain_id: ${_param:oss_openstack_domain_id}
+ oss_cis_enabled: 'True'
+ oss_cis_jobs_repository: https://github.com/Mirantis/rundeck-cis-jobs.git
+ oss_cis_jobs_repository_branch: master
+ oss_cis_password: ${_param:oss_openstack_password}
+ oss_cis_project: ${_param:oss_openstack_project}
+ oss_cis_username: ${_param:oss_openstack_username}
+ oss_cleanup_service_enabled: 'True'
+ oss_cleanup_service_os_credentials_path: ${_param:oss_openstack_credentials_path}
+ oss_cleanup_service_password: ${_param:oss_openstack_password}
+ oss_cleanup_service_project: ${_param:oss_openstack_project}
+ oss_cleanup_service_project_domain_id: ${_param:oss_openstack_username_domain_id}
+ oss_cleanup_service_username: ${_param:oss_openstack_username}
+ oss_enabled: 'True'
+ oss_node01_address: ${_param:stacklight_monitor_node01_address}
+ oss_node02_address: ${_param:stacklight_monitor_node02_address}
+ oss_node03_address: ${_param:stacklight_monitor_node03_address}
+ oss_notification_email_from: sgudz@mirantis.com
+ oss_notification_email_recipients: sgudz@mirantis.com
+ oss_openstack_auth_url: http://172.17.16.190:5000/v3
+ oss_openstack_cert: ' -----BEGIN CERTIFICATE----- MIIE0DCCA7igAwIBAgIBBzANBgkqhkiG9w0BAQsFADCBgzELMAkGA1UEBhMCVVMx
+ EDAOBgNVBAgTB0FyaXpvbmExEzARBgNVBAcTClNjb3R0c2RhbGUxGjAYBgNVBAoT AAGjggEaMIIBFjAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIBBjAdBgNV
+ 91cxG7685C/b+LrTW+C05+Z5Yg4MotdqY3MxtfWoSKQ7CC2iXZDXtHwlTxFWMMS2 -----END CERTIFICATE-----
+ -----BEGIN CERTIFICATE----- dGhvcml0eSAtIEcyMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAv3Fi
+ 9kIDN0zmiN/VryTyscPfzLXs4Jlet0lUIDyUGAzHHFIYSaRt4bNYC8...'
+ oss_openstack_credentials_path: /srv/volumes/rundeck/storage
+ oss_openstack_domain_id: default
+ oss_openstack_endpoint_type: public
+ oss_openstack_password: nova
+ oss_openstack_project: admin
+ oss_openstack_ssl_verify: 'False'
+ oss_openstack_username: admin
+ oss_runbook_private_key: |-
+ -----BEGIN RSA PRIVATE KEY-----
+ MIIEpAIBAAKCAQEA0i/Kw7U6RvH3ELsRZwtfX5ZSaV2tkRiWqCgpSK7fvZn/YYuy
+ f+DiMuviyb1a2rmgQgckJjGlQeEdtETyh+lAYUMCxcJUqiPMNtbs4GDqqpc91Nv+
+ /Qa5JXGuInQC4L5MAx2BeJ4Swc1jnDMLXZ21zmcZwK3Uo5ENyhQqlwe2QZ+SzH76
+ U7DQgSCU3xlI/ieBDiC4w4HLj3z3X36J6bb3x33DVh02UCaWCwdU0v+PGW+v4RBt
+ RYMAuCnErOXGQysgwcpyqSpBvZ6QRdktIeAyK5X8RVLx7Tta5BPVB8jbseF/0N2C
+ wxmovDbnUzJMF8lRUuuqXzKPfXeHjDbWWOpzeQIDAQABAoIBAQCdnXsmwWr/Mol8
+ QVuucwfFDVulabszWEX5uYwj1gcwDiBFDNYBMSlO2DbL47QS2ypC+UnxXj83pNF6
+ kk1w/8foZ7DXjX+hypBj+03MgMDYxet3CLYxFe6XFqVnbqmN0QX9PA4P3jHgpN1y
+ j5CRVie0rPnR1Ejlk2vMwsyhTwYsNFmCqW0IC5qWHPyMGiOnFTcCYUIA8IZGkbvS
+ dYdVavAxwLVgmBoWVegg7eOL6hB3W1BZHxra/TEiHy0gsTSMr/l8YRqsDX8A91D1
+ mOMYqaEfqtJMuAKQwJDsXFiqXjsctQyUcx8TcPgsDTB/B8OPt9g8JdcmUUnWMaa9
+ mj6i9VdVAoGBAPtdkkgKGPHxq4bxWKrwzaI1gUPlPikxA+rVeplNvF0pzt4yUHD6
+ l56ash7Yl6E3n1s2mLh4IMYw8a5QZffHf51cQBtHkg72u4dXVUWLUGIzwsIkx6Zu
+ kmIfWCkC3uMbcE7o1pQSGgc3nUUeD4K6KBqUWxinEzCIYu3a909MANL/AoGBANYP
+ 2/troZi9SfYkxF/ZdCeufDsSj4DZICzG7wbKMUJ1jDpegPBT4D8d1Nip7tj10q/P
+ 4HdRrv0p5ZsFgtua9Una+ICpjpkEqjGgHUBBD7o/+Jjd8cmE57DSNSMUTVY8mwi3
+ OuKNI5itlU/8gwrAtEjV5qcgsUQSH6zZm69sx9GHAoGAb78ee/Y5Nr4YYipDH+Nj
+ l3wv1k5AfzFyK2DyWtrrJYOjmrZFeqR86R6elwX1Cs/egT9ZT5DgCRvTJYpKeofv
+ HLbZLQd5UDuyDK3vk7YmazHVoFeXhk7bttF2cOz8x3v4RqxOUI9nkNPRj9uYS6aU
+ k5RmlyQXbNkFGfbhQRhpuWMCgYBqPFKzk3YOhJrJQvQGkbgY3XqgIpT/oEJclpoX
+ 547M6eOfMDmTjDz1dgulP6jfGhjm0icXcH2E/R8LcY/BB0WG0tqTmBLCFFOW71y0
+ /9UbXRY2X6fYmFIYKire7vt2uftDNxRNTTiGVFeO68XpNzwCDc470XjITKpVsWWX
+ iUgdzwKBgQCs+hCR03K+6npMnANOBEi4I91pJQlbcMBK5fvX1w4VAwBIEu4Z58JM
+ kBbpgmANffYjTWxMIEGvdq9yZWwKCw7pv8pu/dU63MS5ST10K+7/QaLejabCskzP
+ Hjy15CfE5Iy/6aBIEz30uW/B/THd0icbGKSaFjor+sW7S2GqJvE9ow==
+ -----END RSA PRIVATE KEY-----
+ oss_runbook_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDSL8rDtTpG8fcQuxFnC19fllJpXa2RGJaoKClIrt+9mf9hi7J/4OIy6+LJvVrauaBCByQmMaVB4R20RPKH6UBhQwLFwlSqI8w21uzgYOqqlz3U2/79Brklca4idALgvkwDHYF4nhLBzWOcMwtdnbXOZxnArdSjkQ3KFCqXB7ZBn5LMfvpTsNCBIJTfGUj+J4EOILjDgcuPfPdffonptvfHfcNWHTZQJpYLB1TS/48Zb6/hEG1FgwC4KcSs5cZDKyDBynKpKkG9npBF2S0h4DIrlfxFUvHtO1rkE9UHyNux4X/Q3YLDGai8NudTMkwXyVFS66pfMo99d4eMNtZY6nN5
+ oss_rundeck_external_datasource_enabled: 'False'
+ oss_security_audit_enabled: 'True'
+ oss_security_audit_os_credentials_path: ${_param:oss_openstack_credentials_path}
+ oss_security_audit_password: ${_param:oss_openstack_password}
+ oss_security_audit_project: ${_param:oss_openstack_project}
+ oss_security_audit_project_domain_id: ${_param:oss_openstack_domain_id}
+ oss_security_audit_user_domain_id: ${_param:oss_openstack_domain_id}
+ oss_security_audit_username: ${_param:oss_openstack_username}
+ oss_sfdc_support_enabled: 'True'
+ platform: openstack_enabled
+ public_host: ${_param:openstack_proxy_address}
+ publication_method: email
+ reclass_repository: https://github.com/Mirantis/mk-lab-salt-model.git
+ rundeck_forward_iframe: 'False'
+ rundeck_iframe_host: ${_param:openstack_proxy_address}
+ rundeck_iframe_port: ${_param:haproxy_rundeck_exposed_port}
+ rundeck_iframe_ssl: 'False'
+ salt_api_password: qFc7FCrLBnWkeLxRWWQeezH5dmuI5vsI
+ salt_api_password_hash: $6$hbWiURAY$Sfzk6dzos6j1B8gFDK6WoGNDk0I2Bd2IOarWDGOflgY2sBpUJ4KTq1Uw241ri933/ROHTSuhNcodmDe13i5gS.
+ salt_master_address: 172.16.49.66
+ salt_master_hostname: cfg01
+ salt_master_management_address: 172.16.49.66
+ sfdc_auth_url: asas
+ sfdc_consumer_key: admin
+ sfdc_consumer_secret: admin
+ sfdc_environment_id: admin
+ sfdc_organization_id: admin
+ sfdc_password: admin
+ sfdc_sandbox_enabled: 'False'
+ sfdc_username: admin
+ shared_reclass_url: ssh://mcp-jenkins@gerrit.mcp.mirantis.net:29418/salt-models/reclass-system.git
+ stacklight_enabled: 'True'
+ stacklight_version: '2'
+ static_ips_on_deploy_network_enabled: 'False'
+ tenant_network_gateway: ''
+ tenant_network_netmask: 255.255.255.0
+ tenant_network_subnet: 192.168.0.0/24
+ tenant_vlan: '2423'
+ upstream_proxy_enabled: 'False'
+ use_default_network_scheme: 'True'
diff --git a/tcp_tests/templates/cookied-bm-ocata-cicd-pipeline/salt-context-environment.yaml b/tcp_tests/templates/cookied-bm-ocata-cicd-pipeline/salt-context-environment.yaml
new file mode 100644
index 0000000..9e8ef5d
--- /dev/null
+++ b/tcp_tests/templates/cookied-bm-ocata-cicd-pipeline/salt-context-environment.yaml
@@ -0,0 +1,101 @@
+nodes:
+ # Virtual Control Plane nodes
+
+ ctl01.ocata-cicd.local:
+ reclass_storage_name: openstack_control_node01
+ roles:
+ - openstack_control_leader
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_ctl
+
+ ctl02.ocata-cicd.local:
+ reclass_storage_name: openstack_control_node02
+ roles:
+ - openstack_control
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_ctl
+
+ ctl03.ocata-cicd.local:
+ reclass_storage_name: openstack_control_node03
+ roles:
+ - openstack_control
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_ctl
+
+ dbs01.ocata-cicd.local:
+ reclass_storage_name: openstack_database_node01
+ roles:
+ - openstack_database_leader
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_ctl
+
+ dbs02.ocata-cicd.local:
+ reclass_storage_name: openstack_database_node02
+ roles:
+ - openstack_database
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_ctl
+
+ dbs03.ocata-cicd.local:
+ reclass_storage_name: openstack_database_node03
+ roles:
+ - openstack_database
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_ctl
+
+ msg01.ocata-cicd.local:
+ reclass_storage_name: openstack_message_queue_node01
+ roles:
+ - openstack_message_queue
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_ctl
+
+ msg02.ocata-cicd.local:
+ reclass_storage_name: openstack_message_queue_node02
+ roles:
+ - openstack_message_queue
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_ctl
+
+ msg03.ocata-cicd.local:
+ reclass_storage_name: openstack_message_queue_node03
+ roles:
+ - openstack_message_queue
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_ctl
+
+ prx01.ocata-cicd.local:
+ reclass_storage_name: openstack_proxy_node01
+ roles:
+ - openstack_proxy
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_ctl
+
+ prx02.ocata-cicd.local:
+ reclass_storage_name: openstack_proxy_node02
+ roles:
+ - openstack_proxy
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_ctl
diff --git a/tcp_tests/templates/cookied-bm-ocata-cicd-pipeline/salt.yaml b/tcp_tests/templates/cookied-bm-ocata-cicd-pipeline/salt.yaml
new file mode 100644
index 0000000..40e8d68
--- /dev/null
+++ b/tcp_tests/templates/cookied-bm-ocata-cicd-pipeline/salt.yaml
@@ -0,0 +1,139 @@
+{% from 'cookied-bm-ocata-cicd-pipeline/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% from 'cookied-bm-ocata-cicd-pipeline/underlay.yaml' import DOMAIN_NAME with context %}
+{% from 'cookied-bm-ocata-cicd-pipeline/underlay.yaml' import LAB_CONFIG_NAME with context %}
+
+{% set NFV_ENABLED = os_env('NFV_ENABLED','false') %}
+
+{%- if NFV_ENABLED == 'true' %}
+{%- set CLUSTER_CONTEXT_NAME = 'salt-context-cookiecutter-initial-cicd-nfv.yaml' %}
+{%- set ENVIRONMENT_CONTEXT_NAMES = ['salt-context-environment.yaml','lab04-physical-inventory-nfv.yaml'] %}
+{% set ENVIRONMENT_MODEL_INVENTORY_NAME = os_env('ENVIRONMENT_MODEL_INVENTORY_NAME','physical-ocata-cicd-nfv') %}
+{%- else %}
+{%- set CLUSTER_CONTEXT_NAME = 'salt-context-cookiecutter-initial-cicd.yaml' %}
+{%- set ENVIRONMENT_CONTEXT_NAMES = ['salt-context-environment.yaml','lab04-physical-inventory.yaml'] %}
+{% set ENVIRONMENT_MODEL_INVENTORY_NAME = os_env('ENVIRONMENT_MODEL_INVENTORY_NAME','physical-ocata-cicd') %}
+{%- endif %}
+
+{%- set CONTROL_VLAN = os_env('CONTROL_VLAN', '2422') %}
+{%- set TENANT_VLAN = os_env('TENANT_VLAN', '2423') %}
+
+
+{% import 'shared-salt.yaml' as SHARED with context %}
+
+
+{{ SHARED.MACRO_INSTALL_SALT_MASTER() }}
+
+{{ SHARED.MACRO_GENERATE_COOKIECUTTER_MODEL(CONTROL_VLAN=CONTROL_VLAN, TENANT_VLAN=TENANT_VLAN) }}
+
+{{ SHARED.MACRO_GENERATE_AND_ENABLE_ENVIRONMENT_MODEL() }}
+
+{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='"linux" "reclass" "salt" "openssh" "ntp" "git" "nginx" "collectd" "sensu" "heka" "sphinx" "keystone" "mysql" "grafana" "haproxy" "rsyslog" "horizon" "prometheus" "telegraf" "elasticsearch" "powerdns" "glusterfs" "xtrabackup" "maas" "backupninja" "jenkins"') }}
+
+- description: 'Workaround for typo in salt.minion.service (https://gerrit.mcp.mirantis.net/#/c/14806/)'
+ cmd: |
+ git clone https://gerrit.mcp.mirantis.net/salt-formulas/salt /tmp/salt-formula-salt;
+ pushd /tmp/salt-formula-salt;
+ git fetch https://gerrit.mcp.mirantis.net/salt-formulas/salt refs/changes/06/14806/1 && git checkout FETCH_HEAD;
+ popd;
+ cp /tmp/salt-formula-salt/salt/minion/service.sls /usr/share/salt-formulas/env/salt/minion/service.sls;
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+{{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
+
+{{ SHARED.MACRO_RUN_SALT_MASTER_UNDERLAY_STATES() }}
+
+- description: "Workaround for rack01 compute generator"
+ cmd: |
+ set -e;
+ # Remove rack01 key
+ . /root/venv-reclass-tools/bin/activate;
+ reclass-tools del-key parameters.reclass.storage.node.openstack_compute_rack01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+ # Add openstack_compute_node definition from system
+ reclass-tools add-key 'classes' 'system.reclass.storage.system.openstack_compute_multi' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml --merge;
+
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Remove cicd nodes from VCP, because we have baremetal cicd nodes
+ cmd: |
+ sed -i 's/\-\ system\.salt\.control\.cluster\.cicd\_control\_cluster//g' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/kvm.yml;
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+
+- description: "Workaround for PROD-16973"
+ cmd: |
+ set -e;
+ # Remove obsolete logging section key
+ . /root/venv-reclass-tools/bin/activate;
+ reclass-tools del-key parameters.nova.controller.logging /srv/salt/reclass/classes/system/nova/control/cluster.yml;
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+{{ SHARED.MACRO_GENERATE_INVENTORY() }}
+
+{{ SHARED.MACRO_NETWORKING_WORKAROUNDS() }}
+
+- description: Temporary workaround for removing cinder-volume from CTL nodes
+ cmd: |
+ sed -i 's/\-\ system\.cinder\.volume\.single//g' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/control.yml;
+ sed -i 's/\-\ system\.cinder\.volume\.notification\.messagingv2//g' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/control.yml;
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: true
+
+- description: WR for mounting 1G hugepages before linux.state
+ cmd: |
+ salt 'cmp*' state.sls linux.system.hugepages;
+ salt 'cmp*' cmd.run "mount -o mode=775,pagesize=1G -t hugetlbfs Hugetlbfs-kvm /mnt/hugepages_1G";
+ salt 'cmp*' cmd.run "echo 16 | sudo tee /sys/kernel/mm/hugepages/hugepages-1048576kB/nr_hugepages";
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: WR for correct acces to git repo from jenkins on cfg01 node
+ cmd: |
+ git clone --mirror https://github.com/Mirantis/mk-pipelines.git /home/repo/mk/mk-pipelines/;
+ git clone --mirror https://github.com/Mirantis/pipeline-library.git /home/repo/mcp-ci/pipeline-library/;
+ chown -R git:www-data /home/repo/mk/mk-pipelines/*;
+ chown -R git:www-data /home/repo/mcp-ci/pipeline-library/*;
+
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+{{ SHARED.MACRO_BOOTSTRAP_ALL_MINIONS() }}
+
+#########################################
+# Configure all running salt minion nodes
+#########################################
+
+- description: Hack resolv.conf on VCP nodes for internal services access
+ cmd: |
+ salt --hard-crash --state-output=mixed --state-verbose=False -C '* and not kvm* and not cmp* and not gtw* and not cfg*' cmd.run "echo 'nameserver 172.18.208.44' > /etc/resolv.conf;"
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Refresh pillars on all minions
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' saltutil.refresh_pillar
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Sync all salt resources
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' saltutil.sync_all && sleep 5
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Show reclass-salt --top for generated nodes
+ cmd: reclass-salt --top -u /srv/salt/reclass/nodes/_generated/
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
diff --git a/tcp_tests/templates/cookied-bm-ocata-cicd-pipeline/underlay--meta-data.yaml b/tcp_tests/templates/cookied-bm-ocata-cicd-pipeline/underlay--meta-data.yaml
new file mode 100644
index 0000000..a594a53
--- /dev/null
+++ b/tcp_tests/templates/cookied-bm-ocata-cicd-pipeline/underlay--meta-data.yaml
@@ -0,0 +1,4 @@
+| # All the data below will be stored as a string object
+ instance-id: iid-local1
+ hostname: {hostname}
+ local-hostname: {hostname}
diff --git a/tcp_tests/templates/cookied-bm-ocata-cicd-pipeline/underlay--user-data-cfg01.yaml b/tcp_tests/templates/cookied-bm-ocata-cicd-pipeline/underlay--user-data-cfg01.yaml
new file mode 100644
index 0000000..c6314ad
--- /dev/null
+++ b/tcp_tests/templates/cookied-bm-ocata-cicd-pipeline/underlay--user-data-cfg01.yaml
@@ -0,0 +1,103 @@
+| # All the data below will be stored as a string object
+ #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
+
+ ssh_pwauth: True
+ users:
+ - name: root
+ sudo: ALL=(ALL) NOPASSWD:ALL
+ shell: /bin/bash
+ ssh_authorized_keys:
+ {% for key in config.underlay.ssh_keys %}
+ - ssh-rsa {{ key['public'] }}
+ {% endfor %}
+
+ disable_root: false
+ chpasswd:
+ list: |
+ root:r00tme
+ expire: False
+
+ bootcmd:
+ # Block access to SSH while node is preparing
+ - cloud-init-per once sudo iptables -A INPUT -p tcp --dport 22 -j DROP
+ # Enable root access
+ - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
+ - service sshd restart
+ output:
+ all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
+
+ runcmd:
+ # Configure dhclient
+ - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
+ - sudo resolvconf -u
+
+ # Enable grub menu using updated config below
+ - update-grub
+
+ # Prepare network connection
+ - sudo ifdown ens3
+ - sudo ip r d default || true # remove existing default route to get it from dhcp
+ - sudo ifup ens3
+ #- sudo route add default gw {gateway} {interface_name}
+
+ # Create swap
+ - fallocate -l 4G /swapfile
+ - chmod 600 /swapfile
+ - mkswap /swapfile
+ - swapon /swapfile
+ - echo "/swapfile none swap defaults 0 0" >> /etc/fstab
+
+ ############## TCP Cloud cfg01 node ##################
+ #- sleep 120
+ - echo "Preparing base OS"
+
+ - echo "nameserver 172.18.208.44" > /etc/resolv.conf;
+ - echo "nameserver 8.8.8.8" >> /etc/resolv.conf;
+ - which wget >/dev/null || (apt-get update; apt-get install -y wget);
+
+ # Configure Ubuntu mirrors
+ - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial main restricted universe" > /etc/apt/sources.list
+ - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-updates main restricted universe" >> /etc/apt/sources.list
+ - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-security main restricted universe" >> /etc/apt/sources.list
+
+ - echo "deb [arch=amd64] http://apt.mirantis.com/xenial {{ REPOSITORY_SUITE }} salt extra" > /etc/apt/sources.list.d/mcp_salt.list;
+ - wget -O - http://apt.mirantis.com/public.gpg | apt-key add -;
+ - echo "deb http://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3 xenial main" > /etc/apt/sources.list.d/saltstack.list;
+ - wget -O - https://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3/SALTSTACK-GPG-KEY.pub | apt-key add -;
+
+ - apt-get clean
+ - apt-get update
+
+ # Install common packages
+ - eatmydata apt-get install -y python-pip git curl tmux byobu iputils-ping traceroute htop tree mc
+
+ # Install salt-minion and stop it until it is configured
+ - eatmydata apt-get install -y salt-minion && service salt-minion stop
+
+ ########################################################
+ # Node is ready, allow SSH access
+ - echo "Allow SSH access ..."
+ - sudo iptables -D INPUT -p tcp --dport 22 -j DROP
+ ########################################################
+
+ write_files:
+ - path: /etc/default/grub.d/97-enable-grub-menu.cfg
+ content: |
+ GRUB_RECORDFAIL_TIMEOUT=30
+ GRUB_TIMEOUT=3
+ GRUB_TIMEOUT_STYLE=menu
+
+ - path: /etc/network/interfaces
+ content: |
+ auto ens3
+ iface ens3 inet dhcp
+
+ - path: /root/.ssh/config
+ owner: root:root
+ permissions: '0600'
+ content: |
+ Host *
+ ServerAliveInterval 300
+ ServerAliveCountMax 10
+ StrictHostKeyChecking no
+ UserKnownHostsFile /dev/null
diff --git a/tcp_tests/templates/cookied-bm-ocata-cicd-pipeline/underlay--user-data1604.yaml b/tcp_tests/templates/cookied-bm-ocata-cicd-pipeline/underlay--user-data1604.yaml
new file mode 100644
index 0000000..c3ffe17
--- /dev/null
+++ b/tcp_tests/templates/cookied-bm-ocata-cicd-pipeline/underlay--user-data1604.yaml
@@ -0,0 +1,100 @@
+| # All the data below will be stored as a string object
+ #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
+
+ ssh_pwauth: True
+ users:
+ - name: root
+ sudo: ALL=(ALL) NOPASSWD:ALL
+ shell: /bin/bash
+ ssh_authorized_keys:
+ {% for key in config.underlay.ssh_keys %}
+ - ssh-rsa {{ key['public'] }}
+ {% endfor %}
+
+ disable_root: false
+ chpasswd:
+ list: |
+ root:r00tme
+ expire: False
+
+ bootcmd:
+ # Block access to SSH while node is preparing
+ - cloud-init-per once sudo iptables -A INPUT -p tcp --dport 22 -j DROP
+ # Enable root access
+ - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
+ - service sshd restart
+ output:
+ all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
+
+ runcmd:
+ - export TERM=linux
+ - export LANG=C
+ # Configure dhclient
+ - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
+ - sudo resolvconf -u
+
+ # Enable grub menu using updated config below
+ - update-grub
+
+ # Prepare network connection
+ - sudo ifup {interface_name}
+ #- sudo route add default gw {gateway} {interface_name}
+
+ # Create swap
+ - fallocate -l 4G /swapfile
+ - chmod 600 /swapfile
+ - mkswap /swapfile
+ - swapon /swapfile
+ - echo "/swapfile none swap defaults 0 0" >> /etc/fstab
+
+
+ ############## TCP Cloud cfg01 node ##################
+ #- sleep 120
+ - echo "Preparing base OS"
+ - echo "nameserver 172.18.208.44" > /etc/resolv.conf;
+ - echo "nameserver 8.8.8.8" >> /etc/resolv.conf;
+ - which wget >/dev/null || (apt-get update; apt-get install -y wget)
+
+ # Configure Ubuntu mirrors
+ - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial main restricted universe" > /etc/apt/sources.list
+ - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-updates main restricted universe" >> /etc/apt/sources.list
+ - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-security main restricted universe" >> /etc/apt/sources.list
+
+ - echo "deb [arch=amd64] http://apt.mirantis.com/xenial {{ REPOSITORY_SUITE }} salt extra" > /etc/apt/sources.list.d/mcp_salt.list;
+ - wget -O - http://apt.mirantis.com/public.gpg | apt-key add -;
+ - echo "deb http://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3 xenial main" > /etc/apt/sources.list.d/saltstack.list;
+ - wget -O - https://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3/SALTSTACK-GPG-KEY.pub | apt-key add -;
+
+ - apt-get clean
+ - eatmydata apt-get update && apt-get -y upgrade
+
+ # Install common packages
+ - eatmydata apt-get install -y python-pip git curl tmux byobu iputils-ping traceroute htop tree mc
+
+ # Install salt-minion and stop it until it is configured
+ - eatmydata apt-get install -y salt-minion && service salt-minion stop
+
+ # Install latest kernel
+ - eatmydata apt-get install -y linux-generic-hwe-16.04
+
+ ########################################################
+ # Node is ready, allow SSH access
+ #- echo "Allow SSH access ..."
+ #- sudo iptables -D INPUT -p tcp --dport 22 -j DROP
+ - reboot
+ ########################################################
+
+ write_files:
+ - path: /etc/default/grub.d/97-enable-grub-menu.cfg
+ content: |
+ GRUB_RECORDFAIL_TIMEOUT=30
+ GRUB_TIMEOUT=3
+ GRUB_TIMEOUT_STYLE=menu
+
+ - path: /etc/network/interfaces
+ content: |
+ # The loopback network interface
+ auto lo
+ iface lo inet loopback
+ auto {interface_name}
+ iface {interface_name} inet dhcp
diff --git a/tcp_tests/templates/cookied-bm-ocata-cicd-pipeline/underlay.yaml b/tcp_tests/templates/cookied-bm-ocata-cicd-pipeline/underlay.yaml
new file mode 100644
index 0000000..bff4542
--- /dev/null
+++ b/tcp_tests/templates/cookied-bm-ocata-cicd-pipeline/underlay.yaml
@@ -0,0 +1,688 @@
+# Set the repository suite, one of the: 'nightly', 'testing', 'stable', or any other required
+{% set REPOSITORY_SUITE = os_env('REPOSITORY_SUITE', 'testing') %}
+
+{% set LAB_CONFIG_NAME = os_env('LAB_CONFIG_NAME', 'cookied-bm-ocata-cicd-pipeline') %}
+{% set DOMAIN_NAME = os_env('DOMAIN_NAME', LAB_CONFIG_NAME + '.local') %}
+{% set HOSTNAME_CFG01 = os_env('HOSTNAME_CFG01', 'cfg01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_KVM01 = os_env('HOSTNAME_KVM01', 'kvm01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_KVM02 = os_env('HOSTNAME_KVM02', 'kvm02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_KVM03 = os_env('HOSTNAME_KVM03', 'kvm03.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CID01 = os_env('HOSTNAME_CID01', 'cid01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CID02 = os_env('HOSTNAME_CID02', 'cid02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CID03 = os_env('HOSTNAME_CID03', 'cid03.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CMP001 = os_env('HOSTNAME_CMP001', 'cmp001.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CMP002 = os_env('HOSTNAME_CMP002', 'cmp002.' + DOMAIN_NAME) %}
+{% set HOSTNAME_GTW01 = os_env('HOSTNAME_GTW01', 'gtw01.' + DOMAIN_NAME) %}
+# {% set HOSTNAME_GTW02 = os_env('HOSTNAME_GTW02', 'gtw02.' + DOMAIN_NAME) %}
+
+{% set ETH1_IP_ADDRESS_CFG01 = os_env('ETH1_IP_ADDRESS_CFG01', '172.16.49.66') %}
+{% set ETH0_IP_ADDRESS_KVM01 = os_env('ETH0_IP_ADDRESS_KVM01', '172.16.49.67') %}
+{% set ETH0_IP_ADDRESS_KVM02 = os_env('ETH0_IP_ADDRESS_KVM02', '172.16.49.68') %}
+{% set ETH0_IP_ADDRESS_KVM03 = os_env('ETH0_IP_ADDRESS_KVM03', '172.16.49.69') %}
+{% set ETH0_IP_ADDRESS_CID01 = os_env('ETH0_IP_ADDRESS_CID01', '172.16.49.70') %}
+{% set ETH0_IP_ADDRESS_CID02 = os_env('ETH0_IP_ADDRESS_CID02', '172.16.49.71') %}
+{% set ETH0_IP_ADDRESS_CID03 = os_env('ETH0_IP_ADDRESS_CID03', '172.16.49.72') %}
+{% set ETH0_IP_ADDRESS_CMP001 = os_env('ETH0_IP_ADDRESS_CMP001', '172.16.49.73') %}
+{% set ETH0_IP_ADDRESS_CMP002 = os_env('ETH0_IP_ADDRESS_CMP002', '172.16.49.74') %}
+# {% set ETH0_IP_ADDRESS_CMP003 = os_env('ETH0_IP_ADDRESS_CMP003', '172.16.167.140') %}
+{% set ETH0_IP_ADDRESS_GTW01 = os_env('ETH0_IP_ADDRESS_GTW01', '172.16.49.75') %}
+# {% set ETH0_IP_ADDRESS_GTW02 = os_env('ETH0_IP_ADDRESS_GTW02', '172.16.49.76') %}
+
+{% import 'cookied-bm-ocata-cicd-pipeline/underlay--meta-data.yaml' as CLOUDINIT_META_DATA with context %}
+{% import 'cookied-bm-ocata-cicd-pipeline/underlay--user-data-cfg01.yaml' as CLOUDINIT_USER_DATA_CFG01 with context %}
+{% import 'cookied-bm-ocata-cicd-pipeline/underlay--user-data1604.yaml' as CLOUDINIT_USER_DATA with context %}
+
+---
+aliases:
+ - &interface_model {{ os_env('INTERFACE_MODEL', 'virtio') }}
+ - &cloudinit_meta_data {{ CLOUDINIT_META_DATA }}
+ - &cloudinit_user_data_cfg01 {{ CLOUDINIT_USER_DATA_CFG01 }}
+ - &cloudinit_user_data {{ CLOUDINIT_USER_DATA }}
+
+
+template:
+ devops_settings:
+ env_name: {{ os_env('ENV_NAME', 'ocata-cicd_' + REPOSITORY_SUITE + "_" + os_env('BUILD_NUMBER', '')) }}
+
+ address_pools:
+ admin-pool01:
+ net: {{ os_env('ADMIN_ADDRESS_POOL01', '172.16.49.64/26:26') }}
+ params:
+ ip_reserved:
+ gateway: +62
+ l2_network_device: +61
+ default_{{ HOSTNAME_CFG01 }}: {{ ETH1_IP_ADDRESS_CFG01 }}
+ default_{{ HOSTNAME_KVM01 }}: {{ ETH0_IP_ADDRESS_KVM01 }}
+ default_{{ HOSTNAME_KVM02 }}: {{ ETH0_IP_ADDRESS_KVM02 }}
+ default_{{ HOSTNAME_KVM03 }}: {{ ETH0_IP_ADDRESS_KVM03 }}
+ default_{{ HOSTNAME_CID01 }}: {{ ETH0_IP_ADDRESS_CID01 }}
+ default_{{ HOSTNAME_CID02 }}: {{ ETH0_IP_ADDRESS_CID02 }}
+ default_{{ HOSTNAME_CID03 }}: {{ ETH0_IP_ADDRESS_CID03 }}
+ default_{{ HOSTNAME_CMP001 }}: {{ ETH0_IP_ADDRESS_CMP001 }}
+ default_{{ HOSTNAME_CMP002 }}: {{ ETH0_IP_ADDRESS_CMP002 }}
+ default_{{ HOSTNAME_CMP003 }}: {{ ETH0_IP_ADDRESS_CMP003 }}
+ default_{{ HOSTNAME_GTW01 }}: {{ ETH0_IP_ADDRESS_GTW01 }}
+ # default_{{ HOSTNAME_GTW02 }}: {{ ETH0_IP_ADDRESS_GTW02 }}
+ virtual_{{ HOSTNAME_CFG01 }}: {{ ETH1_IP_ADDRESS_CFG01 }}
+ virtual_{{ HOSTNAME_KVM01 }}: {{ ETH0_IP_ADDRESS_KVM01 }}
+ virtual_{{ HOSTNAME_KVM02 }}: {{ ETH0_IP_ADDRESS_KVM02 }}
+ virtual_{{ HOSTNAME_KVM03 }}: {{ ETH0_IP_ADDRESS_KVM03 }}
+ virtual_{{ HOSTNAME_CID01 }}: {{ ETH0_IP_ADDRESS_CID01 }}
+ virtual_{{ HOSTNAME_CID02 }}: {{ ETH0_IP_ADDRESS_CID02 }}
+ virtual_{{ HOSTNAME_CID03 }}: {{ ETH0_IP_ADDRESS_CID03 }}
+ virtual_{{ HOSTNAME_CMP001 }}: {{ ETH0_IP_ADDRESS_CMP001 }}
+ virtual_{{ HOSTNAME_CMP002 }}: {{ ETH0_IP_ADDRESS_CMP002 }}
+ # virtual_{{ HOSTNAME_CMP003 }}: {{ ETH0_IP_ADDRESS_CMP003 }}
+ virtual_{{ HOSTNAME_GTW01 }}: {{ ETH0_IP_ADDRESS_GTW01 }}
+ # virtual_{{ HOSTNAME_GTW02 }}: {{ ETH0_IP_ADDRESS_GTW02 }}
+ #ip_ranges:
+ # dhcp: [+2, -4]
+ private-pool01:
+ net: {{ os_env('PRIVATE_ADDRESS_POOL01', '10.167.8.0/24:24') }}
+ params:
+ ip_reserved:
+ gateway: +1
+ l2_network_device: +1
+
+ tenant-pool01:
+ net: {{ os_env('TENANT_ADDRESS_POOL01', '10.167.10.0/24:24') }}
+ params:
+ ip_reserved:
+ gateway: +1
+ l2_network_device: +1
+
+ external-pool01:
+ net: {{ os_env('EXTERNAL_ADDRESS_POOL01', '172.17.42.192/26:26') }}
+ params:
+ ip_reserved:
+ gateway: +1
+ l2_network_device: -2
+
+ groups:
+
+ - name: virtual
+ driver:
+ name: devops.driver.libvirt
+ params:
+ connection_string: !os_env CONNECTION_STRING, qemu:///system
+ storage_pool_name: !os_env STORAGE_POOL_NAME, default
+ stp: False
+ hpet: False
+ enable_acpi: true
+ use_host_cpu: !os_env DRIVER_USE_HOST_CPU, true
+
+ network_pools:
+ admin: admin-pool01
+
+ l2_network_devices:
+ # Ironic management interface
+ admin:
+ address_pool: admin-pool01
+ dhcp: false
+ parent_iface:
+ phys_dev: !os_env IRONIC_LAB_PXE_IFACE_0
+
+ group_volumes:
+ - name: cloudimage1604 # This name is used for 'backing_store' option for node volumes.
+ source_image: !os_env IMAGE_PATH1604 # https://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-amd64-disk1.img
+ format: qcow2
+ - name: cfg01_day01_image # Pre-configured day01 image
+ source_image: {{ os_env('IMAGE_PATH_CFG01_DAY01', os_env('IMAGE_PATH1604')) }} # http://images.mirantis.com/cfg01-day01.qcow2 or fallback to IMAGE_PATH1604
+ format: qcow2
+
+ nodes:
+ - name: {{ HOSTNAME_CFG01 }}
+ role: salt_master
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 4
+ memory: !os_env SLAVE_NODE_MEMORY, 4096
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: cfg01_day01_image
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_cfg01
+
+ interfaces:
+ - label: ens3
+ l2_network_device: admin
+ interface_model: *interface_model
+ mac_address: !os_env ETH1_MAC_ADDRESS_CFG01
+ #- label: ens4
+ # l2_network_device: private
+ # interface_model: *interface_model
+ network_config:
+ ens3:
+ networks:
+ - admin
+ #ens4:
+ # networks:
+ # - private
+
+
+ - name: default
+ driver:
+ name: devops_driver_ironic
+ params:
+ os_auth_token: fake-token
+ ironic_url: !os_env IRONIC_URL # URL that will be used by fuel-devops
+ # to access Ironic API
+ # Agent URL that is accessible from deploying node when nodes
+ # are bootstrapped with PXE. Usually PXE/provision network address is used.
+ agent_kernel_url: !os_env IRONIC_AGENT_KERNEL_URL
+ agent_ramdisk_url: !os_env IRONIC_AGENT_RAMDISK_URL
+
+ network_pools:
+ admin: admin-pool01
+
+ nodes:
+
+ # - name: {{ HOSTNAME_CFG01 }}
+ # role: salt_master
+ # params:
+ # ipmi_user: !os_env IPMI_USER
+ # ipmi_password: !os_env IPMI_PASSWORD
+ # ipmi_previlegies: OPERATOR
+ # ipmi_host: !os_env IPMI_HOST_CFG01 # hostname or IP address
+ # ipmi_lan_interface: lanplus
+ # ipmi_port: 623
+
+ # root_volume_name: system # see 'volumes' below
+ # cloud_init_volume_name: iso # see 'volumes' below
+ # cloud_init_iface_up: enp3s0f1 # see 'interfaces' below.
+ # volumes:
+ # - name: system
+ # capacity: !os_env NODE_VOLUME_SIZE, 200
+
+ # # The same as for agent URL, here is an URL to the image that should be
+ # # used for deploy the node. It should also be accessible from deploying
+ # # node when nodes are provisioned by agent. Usually PXE/provision network address is used.
+ # source_image: !os_env IRONIC_SOURCE_IMAGE_URL
+ # source_image_checksum: !os_env IRONIC_SOURCE_IMAGE_CHECKSUM
+
+ # - name: iso # Volume with name 'iso' will be used
+ # # for store image with cloud-init metadata.
+
+ # cloudinit_meta_data: *cloudinit_meta_data
+ # cloudinit_user_data: *cloudinit_user_data_cfg01
+
+ # interfaces:
+ # - label: enp3s0f0 # Infra interface
+ # mac_address: !os_env ETH0_MAC_ADDRESS_CFG01
+ # - label: enp3s0f1
+ # l2_network_device: admin
+ # mac_address: !os_env ETH1_MAC_ADDRESS_CFG01
+
+ # network_config:
+ # enp3s0f0:
+ # networks:
+ # - infra
+ # enp3s0f1:
+ # networks:
+ # - admin
+
+ - name: {{ HOSTNAME_KVM01 }}
+ role: salt_minion
+ params:
+ ipmi_user: !os_env IPMI_USER
+ ipmi_password: !os_env IPMI_PASSWORD
+ ipmi_previlegies: OPERATOR
+ ipmi_host: !os_env IPMI_HOST_KVM01 # hostname or IP address
+ ipmi_lan_interface: lanplus
+ ipmi_port: 623
+
+ root_volume_name: system # see 'volumes' below
+ cloud_init_volume_name: iso # see 'volumes' below
+ cloud_init_iface_up: enp9s0f0 # see 'interfaces' below.
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 200
+
+ # The same as for agent URL, here is an URL to the image that should be
+ # used for deploy the node. It should also be accessible from deploying
+ # node when nodes are provisioned by agent. Usually PXE/provision network address is used.
+ source_image: !os_env IRONIC_SOURCE_IMAGE_URL
+ source_image_checksum: !os_env IRONIC_SOURCE_IMAGE_CHECKSUM
+
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data
+
+ interfaces:
+ - label: enp9s0f0
+ l2_network_device: admin
+ mac_address: !os_env ETH0_MAC_ADDRESS_KVM01
+ - label: enp9s0f1
+ mac_address: !os_env ETH1_MAC_ADDRESS_KVM01
+
+ network_config:
+ enp9s0f0:
+ networks:
+ - admin
+ bond0:
+ networks:
+ - control
+ aggregation: active-backup
+ parents:
+ - enp9s0f1
+
+ - name: {{ HOSTNAME_KVM02 }}
+ role: salt_minion
+ params:
+ ipmi_user: !os_env IPMI_USER
+ ipmi_password: !os_env IPMI_PASSWORD
+ ipmi_previlegies: OPERATOR
+ ipmi_host: !os_env IPMI_HOST_KVM02 # hostname or IP address
+ ipmi_lan_interface: lanplus
+ ipmi_port: 623
+
+ root_volume_name: system # see 'volumes' below
+ cloud_init_volume_name: iso # see 'volumes' below
+ cloud_init_iface_up: enp9s0f0 # see 'interfaces' below.
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 200
+
+ # The same as for agent URL, here is an URL to the image that should be
+ # used for deploy the node. It should also be accessible from deploying
+ # node when nodes are provisioned by agent. Usually PXE/provision network address is used.
+ source_image: !os_env IRONIC_SOURCE_IMAGE_URL
+ source_image_checksum: !os_env IRONIC_SOURCE_IMAGE_CHECKSUM
+
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data
+
+ interfaces:
+ - label: enp9s0f0
+ l2_network_device: admin
+ mac_address: !os_env ETH0_MAC_ADDRESS_KVM02
+ - label: enp9s0f1
+ mac_address: !os_env ETH1_MAC_ADDRESS_KVM02
+
+ network_config:
+ enp9s0f0:
+ networks:
+ - admin
+ bond0:
+ networks:
+ - control
+ aggregation: active-backup
+ parents:
+ - enp9s0f1
+
+ - name: {{ HOSTNAME_KVM03 }}
+ role: salt_minion
+ params:
+ ipmi_user: !os_env IPMI_USER
+ ipmi_password: !os_env IPMI_PASSWORD
+ ipmi_previlegies: OPERATOR
+ ipmi_host: !os_env IPMI_HOST_KVM03 # hostname or IP address
+ ipmi_lan_interface: lanplus
+ ipmi_port: 623
+
+ root_volume_name: system # see 'volumes' below
+ cloud_init_volume_name: iso # see 'volumes' below
+ # cloud_init_iface_up: eno1 # see 'interfaces' below.
+ cloud_init_iface_up: enp9s0f0 # see 'interfaces' below.
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 200
+
+ # The same as for agent URL, here is an URL to the image that should be
+ # used for deploy the node. It should also be accessible from deploying
+ # node when nodes are provisioned by agent. Usually PXE/provision network address is used.
+ source_image: !os_env IRONIC_SOURCE_IMAGE_URL
+ source_image_checksum: !os_env IRONIC_SOURCE_IMAGE_CHECKSUM
+
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data
+
+ interfaces:
+ # - label: eno1
+ - label: enp9s0f0
+ l2_network_device: admin
+ mac_address: !os_env ETH0_MAC_ADDRESS_KVM03
+ # - label: eno2
+ - label: enp9s0f1
+ mac_address: !os_env ETH1_MAC_ADDRESS_KVM03
+
+ network_config:
+ # eno1:
+ enp9s0f0:
+ networks:
+ - admin
+ bond0:
+ networks:
+ - control
+ aggregation: active-backup
+ parents:
+ - enp9s0f1
+
+ - name: {{ HOSTNAME_CID01 }}
+ role: salt_minion
+ params:
+ ipmi_user: !os_env IPMI_USER
+ ipmi_password: !os_env IPMI_PASSWORD
+ ipmi_previlegies: OPERATOR
+ ipmi_host: !os_env IPMI_HOST_CID01 # hostname or IP address
+ ipmi_lan_interface: lanplus
+ ipmi_port: 623
+
+ root_volume_name: system # see 'volumes' below
+ cloud_init_volume_name: iso # see 'volumes' below
+ cloud_init_iface_up: enp2s0f0 # see 'interfaces' below.
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 200
+
+ # The same as for agent URL, here is an URL to the image that should be
+ # used for deploy the node. It should also be accessible from deploying
+ # node when nodes are provisioned by agent. Usually PXE/provision network address is used.
+ source_image: !os_env IRONIC_SOURCE_IMAGE_URL
+ source_image_checksum: !os_env IRONIC_SOURCE_IMAGE_CHECKSUM
+
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data
+
+ interfaces:
+ - label: enp2s0f0
+ l2_network_device: admin
+ mac_address: !os_env ETH0_MAC_ADDRESS_CID01
+ - label: enp2s0f1
+ mac_address: !os_env ETH1_MAC_ADDRESS_CID01
+
+ network_config:
+ enp2s0f0:
+ networks:
+ - admin
+ bond0:
+ networks:
+ - control
+ aggregation: active-backup
+ parents:
+ - enp2s0f1
+
+ - name: {{ HOSTNAME_CID02}}
+ role: salt_minion
+ params:
+ ipmi_user: !os_env IPMI_USER
+ ipmi_password: !os_env IPMI_PASSWORD
+ ipmi_previlegies: OPERATOR
+ ipmi_host: !os_env IPMI_HOST_CID02 # hostname or IP address
+ ipmi_lan_interface: lanplus
+ ipmi_port: 623
+
+ root_volume_name: system # see 'volumes' below
+ cloud_init_volume_name: iso # see 'volumes' below
+ cloud_init_iface_up: enp2s0f0 # see 'interfaces' below.
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 200
+
+ # The same as for agent URL, here is an URL to the image that should be
+ # used for deploy the node. It should also be accessible from deploying
+ # node when nodes are provisioned by agent. Usually PXE/provision network address is used.
+ source_image: !os_env IRONIC_SOURCE_IMAGE_URL
+ source_image_checksum: !os_env IRONIC_SOURCE_IMAGE_CHECKSUM
+
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data
+
+ interfaces:
+ - label: enp2s0f0
+ l2_network_device: admin
+ mac_address: !os_env ETH0_MAC_ADDRESS_CID02
+ - label: enp2s0f1
+ mac_address: !os_env ETH1_MAC_ADDRESS_CID02
+
+ network_config:
+ enp2s0f0:
+ networks:
+ - admin
+ bond0:
+ networks:
+ - control
+ aggregation: active-backup
+ parents:
+ - enp2s0f1
+
+ - name: {{ HOSTNAME_CID03 }}
+ role: salt_minion
+ params:
+ ipmi_user: !os_env IPMI_USER
+ ipmi_password: !os_env IPMI_PASSWORD
+ ipmi_previlegies: OPERATOR
+ ipmi_host: !os_env IPMI_HOST_CID03 # hostname or IP address
+ ipmi_lan_interface: lanplus
+ ipmi_port: 623
+
+ root_volume_name: system # see 'volumes' below
+ cloud_init_volume_name: iso # see 'volumes' below
+ cloud_init_iface_up: enp2s0f0 # see 'interfaces' below.
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 200
+
+ # The same as for agent URL, here is an URL to the image that should be
+ # used for deploy the node. It should also be accessible from deploying
+ # node when nodes are provisioned by agent. Usually PXE/provision network address is used.
+ source_image: !os_env IRONIC_SOURCE_IMAGE_URL
+ source_image_checksum: !os_env IRONIC_SOURCE_IMAGE_CHECKSUM
+
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data
+
+ interfaces:
+ - label: enp2s0f0
+ l2_network_device: admin
+ mac_address: !os_env ETH0_MAC_ADDRESS_CID03
+ - label: enp2s0f1
+ mac_address: !os_env ETH1_MAC_ADDRESS_CID03
+
+ network_config:
+ enp2s0f0:
+ networks:
+ - admin
+ bond0:
+ networks:
+ - control
+ aggregation: active-backup
+ parents:
+ - enp2s0f1
+
+ - name: {{ HOSTNAME_CMP001 }}
+ role: salt_minion
+ params:
+ ipmi_user: !os_env IPMI_USER
+ ipmi_password: !os_env IPMI_PASSWORD
+ ipmi_previlegies: OPERATOR
+ ipmi_host: !os_env IPMI_HOST_CMP001 # hostname or IP address
+ ipmi_lan_interface: lanplus
+ ipmi_port: 623
+
+ root_volume_name: system # see 'volumes' below
+ cloud_init_volume_name: iso # see 'volumes' below
+ # cloud_init_iface_up: enp3s0f0 # see 'interfaces' below.
+ cloud_init_iface_up: enp2s0f1 # see 'interfaces' below.
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 200
+
+ # The same as for agent URL, here is an URL to the image that should be
+ # used for deploy the node. It should also be accessible from deploying
+ # node when nodes are provisioned by agent. Usually PXE/provision network address is used.
+ source_image: !os_env IRONIC_SOURCE_IMAGE_URL
+ source_image_checksum: !os_env IRONIC_SOURCE_IMAGE_CHECKSUM
+
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data
+
+ interfaces:
+ - label: enp2s0f0
+ mac_address: !os_env ETH0_MAC_ADDRESS_CMP001
+ - label: enp2s0f1
+ l2_network_device: admin
+ mac_address: !os_env ETH1_MAC_ADDRESS_CMP001
+ - label: enp5s0f0
+ mac_address: !os_env ETH2_MAC_ADDRESS_CMP001
+ features: ['dpdk', 'dpdk_pci: 0000:05:00.0']
+ - label: enp5s0f1
+ mac_address: !os_env ETH3_MAC_ADDRESS_CMP001
+ features: ['dpdk', 'dpdk_pci: 0000:05:00.1']
+ # - label: enp5s0f2
+ # mac_address: !os_env ETH4_MAC_ADDRESS_CMP001
+ # features: ['dpdk', 'dpdk_pci: 0000:05:00.2']
+
+ network_config:
+ enp2s0f0:
+ networks:
+ - admin
+ bond0:
+ networks:
+ - control
+ aggregation: active-backup
+ parents:
+ - enp5s0f0
+ - enp5s0f1
+
+
+
+ - name: {{ HOSTNAME_CMP002 }}
+ role: salt_minion
+ params:
+ ipmi_user: !os_env IPMI_USER
+ ipmi_password: !os_env IPMI_PASSWORD
+ ipmi_previlegies: OPERATOR
+ ipmi_host: !os_env IPMI_HOST_CMP002 # hostname or IP address
+ ipmi_lan_interface: lanplus
+ ipmi_port: 623
+
+ root_volume_name: system # see 'volumes' below
+ cloud_init_volume_name: iso # see 'volumes' below
+ # cloud_init_iface_up: eno1 # see 'interfaces' below.
+ cloud_init_iface_up: enp2s0f1 # see 'interfaces' below.
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 200
+
+ # The same as for agent URL, here is an URL to the image that should be
+ # used for deploy the node. It should also be accessible from deploying
+ # node when nodes are provisioned by agent. Usually PXE/provision network address is used.
+ source_image: !os_env IRONIC_SOURCE_IMAGE_URL
+ source_image_checksum: !os_env IRONIC_SOURCE_IMAGE_CHECKSUM
+
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data
+
+ interfaces:
+ # - label: eno1
+ - label: enp2s0f0
+ mac_address: !os_env ETH0_MAC_ADDRESS_CMP002
+ # - label: eth0
+ - label: enp2s0f1
+ l2_network_device: admin
+ mac_address: !os_env ETH1_MAC_ADDRESS_CMP002
+ # - label: eth3
+ - label: enp5s0f0
+ mac_address: !os_env ETH2_MAC_ADDRESS_CMP002
+ features: ['dpdk', 'dpdk_pci: 0000:05:00.0']
+ # - label: eth2
+ - label: enp5s0f1
+ mac_address: !os_env ETH3_MAC_ADDRESS_CMP002
+ features: ['dpdk', 'dpdk_pci: 0000:05:00.1']
+ # - label: eth4
+ # mac_address: !os_env ETH4_MAC_ADDRESS_CMP002
+ # features: ['dpdk', 'dpdk_pci: 0000:0b:00.0']
+
+ network_config:
+ enp2s0f1:
+ networks:
+ - admin
+ bond0:
+ networks:
+ - control
+ aggregation: active-backup
+ parents:
+ - enp5s0f0
+ - enp5s0f1
+
+ - name: {{ HOSTNAME_GTW01 }}
+ role: salt_minion
+ params:
+ ipmi_user: !os_env IPMI_USER
+ ipmi_password: !os_env IPMI_PASSWORD
+ ipmi_previlegies: OPERATOR
+ ipmi_host: !os_env IPMI_HOST_GTW01 # hostname or IP address
+ ipmi_lan_interface: lanplus
+ ipmi_port: 623
+
+ root_volume_name: system # see 'volumes' below
+ cloud_init_volume_name: iso # see 'volumes' below
+ cloud_init_iface_up: enp9s0f0 # see 'interfaces' below.
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 200
+
+ # The same as for agent URL, here is an URL to the image that should be
+ # used for deploy the node. It should also be accessible from deploying
+ # node when nodes are provisioned by agent. Usually PXE/provision network address is used.
+ source_image: !os_env IRONIC_SOURCE_IMAGE_URL
+ source_image_checksum: !os_env IRONIC_SOURCE_IMAGE_CHECKSUM
+
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data
+
+ interfaces:
+ - label: enp9s0f0
+ l2_network_device: admin
+ mac_address: !os_env ETH0_MAC_ADDRESS_GTW01
+ - label: enp9s0f1
+ mac_address: !os_env ETH1_MAC_ADDRESS_GTW01
+
+ network_config:
+ enp9s0f0:
+ networks:
+ - admin
+ bond0:
+ networks:
+ - control
+ aggregation: active-backup
+ parents:
+ - enp9s0f1
diff --git a/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-pike-dvr.yaml b/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-pike-dvr.yaml
index c823df7..7f50cef 100644
--- a/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-pike-dvr.yaml
+++ b/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-pike-dvr.yaml
@@ -8,7 +8,7 @@
# Name of the context file (without extension, that is fixed .yaml) used to render the Environment model
{% set ENVIRONMENT_MODEL_INVENTORY_NAME = os_env('ENVIRONMENT_MODEL_INVENTORY_NAME', LAB_CONFIG_NAME) %}
# Path to the context files used to render Cluster and Environment models
-{%- set CLUSTER_CONTEXT_NAME = '_context-cookiecutter-mcp-pike-dvr.yaml' %}
+{%- set CLUSTER_CONTEXT_NAME = '_context-cookiecutter-mcp-pike-dvr-ssl.yaml' %}
{%- set ENVIRONMENT_CONTEXT_NAMES = ['_context-environment.yaml'] %}
{% import 'shared-salt.yaml' as SHARED with context %}
diff --git a/tcp_tests/templates/virtual-mcp-pike-dvr-ssl/_context-cookiecutter-mcp-pike-dvr-ssl.yaml b/tcp_tests/templates/virtual-mcp-pike-dvr-ssl/_context-cookiecutter-mcp-pike-dvr-ssl.yaml
new file mode 100644
index 0000000..9fb83bf
--- /dev/null
+++ b/tcp_tests/templates/virtual-mcp-pike-dvr-ssl/_context-cookiecutter-mcp-pike-dvr-ssl.yaml
@@ -0,0 +1,154 @@
+default_context:
+ bmk_enabled: 'False'
+ ceph_enabled: 'False'
+ cicd_enabled: 'False'
+ cluster_domain: virtual-mcp-pike-dvr-ssl.local
+ cluster_name: virtual-mcp-pike-dvr-ssl
+ compute_bond_mode: active-backup
+ compute_primary_first_nic: eth1
+ compute_primary_second_nic: eth2
+ context_seed: wUqrwKeBTCpRpVrhK1KwZQv4cjM9VhG7L2vQ0iQsTuMrXASklEBDmJEf6bnPEqcK
+ control_network_netmask: 255.255.255.0
+ control_network_subnet: 172.16.10.0/24
+ control_vlan: '10'
+ cookiecutter_template_branch: master
+ cookiecutter_template_credentials: gerrit
+ cookiecutter_template_url: ssh://mcp-jenkins@gerrit.mcp.mirantis.net:29418/mk/cookiecutter-templates.git
+ deploy_network_gateway: 192.168.10.1
+ deploy_network_netmask: 255.255.255.0
+ deploy_network_subnet: 192.168.10.0/24
+ deployment_type: physical
+ dns_server01: 8.8.8.8
+ dns_server02: 8.8.4.4
+ email_address: ddmitriev@mirantis.com
+ gateway_primary_first_nic: eth1
+ gateway_primary_second_nic: eth2
+ infra_bond_mode: active-backup
+ infra_deploy_nic: eth0
+ infra_kvm01_control_address: 172.16.10.101
+ infra_kvm01_deploy_address: 192.168.10.101
+ infra_kvm01_hostname: kvm01
+ infra_kvm02_control_address: 172.16.10.102
+ infra_kvm02_deploy_address: 192.168.10.102
+ infra_kvm02_hostname: kvm02
+ infra_kvm03_control_address: 172.16.10.103
+ infra_kvm03_deploy_address: 192.168.10.103
+ infra_kvm03_hostname: kvm03
+ infra_kvm_vip_address: 172.16.10.100
+ infra_primary_first_nic: eth1
+ infra_primary_second_nic: eth2
+ kubernetes_enabled: 'False'
+ local_repositories: 'False'
+ maas_deploy_address: 192.168.10.90
+ maas_hostname: cfg01
+ mcp_version: stable
+ offline_deployment: 'False'
+ opencontrail_enabled: 'False'
+ openstack_benchmark_node01_address: 172.16.10.95
+ openstack_benchmark_node01_hostname: bmk01
+ openstack_cluster_size: compact
+ openstack_compute_count: '100'
+ openstack_compute_rack01_hostname: cmp
+ openstack_compute_rack01_single_subnet: 172.16.10
+ openstack_compute_rack01_tenant_subnet: 10.1.0
+ openstack_control_address: 172.16.10.100
+ openstack_control_hostname: ctl
+ openstack_control_node01_address: 172.16.10.101
+ openstack_control_node01_hostname: ctl01
+ openstack_control_node02_address: 172.16.10.102
+ openstack_control_node02_hostname: ctl02
+ openstack_control_node03_address: 172.16.10.103
+ openstack_control_node03_hostname: ctl03
+ openstack_database_address: 172.16.10.100
+ openstack_database_hostname: ctl
+ openstack_database_node01_address: 172.16.10.101
+ openstack_database_node01_hostname: ctl01
+ openstack_database_node02_address: 172.16.10.102
+ openstack_database_node02_hostname: ctl02
+ openstack_database_node03_address: 172.16.10.103
+ openstack_database_node03_hostname: ctl03
+ openstack_enabled: 'True'
+ openstack_gateway_node01_address: 172.16.10.110
+ openstack_gateway_node01_hostname: gtw01
+ openstack_gateway_node01_tenant_address: 10.1.0.6
+ openstack_gateway_node02_address: 172.16.10.111
+ openstack_gateway_node02_hostname: gtw02
+ openstack_gateway_node02_tenant_address: 10.1.0.7
+ openstack_gateway_node03_address: 172.16.10.112
+ openstack_gateway_node03_hostname: gtw03
+ openstack_gateway_node03_tenant_address: 10.1.0.8
+ openstack_message_queue_address: 172.16.10.100
+ openstack_message_queue_hostname: ctl
+ openstack_message_queue_node01_address: 172.16.10.101
+ openstack_message_queue_node01_hostname: ctl01
+ openstack_message_queue_node02_address: 172.16.10.102
+ openstack_message_queue_node02_hostname: ctl02
+ openstack_message_queue_node03_address: 172.16.10.103
+ openstack_message_queue_node03_hostname: ctl03
+ openstack_network_engine: ovs
+ openstack_neutron_qos: 'False'
+ openstack_neutron_vlan_aware_vms: 'False'
+ openstack_nfv_dpdk_enabled: 'False'
+ openstack_nfv_sriov_enabled: 'False'
+ openstack_nova_compute_nfv_req_enabled: 'False'
+ openstack_ovs_dvr_enabled: 'True'
+ openstack_ovs_encapsulation_type: vxlan
+ openstack_proxy_address: 172.16.10.80
+ openstack_proxy_hostname: prx
+ openstack_proxy_node01_address: 172.16.10.121
+ openstack_proxy_node01_hostname: prx01
+ openstack_proxy_node02_address: 172.16.10.122
+ openstack_proxy_node02_hostname: prx02
+ openstack_upgrade_node01_address: 172.16.10.19
+ openstack_version: pike
+ oss_enabled: 'False'
+ oss_node03_address: ${_param:stacklight_monitor_node03_address}
+ oss_notification_app_id: '24'
+ oss_notification_sender_password: password
+ oss_notification_smtp_port: '587'
+ oss_notification_webhook_login_id: '13'
+ platform: openstack_enabled
+ public_host: ${_param:openstack_proxy_address}
+ publication_method: email
+ reclass_repository: https://github.com/Mirantis/mk-lab-salt-model.git
+ salt_api_password: H0rTPdmktZ8RI7T7y6fjqY0uEbbs7Kwi
+ salt_api_password_hash: $6$lfbIFtMZ$.nTbTDMzs1iYv0WqkZHia8H8Fma963Nv3qyyz1x68jQh0YXK9i907B/hvoG4QHMvfolE7V7vQnFClJ1mVA3Yb.
+ salt_master_address: 172.16.10.90
+ salt_master_hostname: cfg01
+ salt_master_management_address: 192.168.10.90
+ shared_reclass_url: ssh://mcp-jenkins@gerrit.mcp.mirantis.net:29418/salt-models/reclass-system.git
+ stacklight_enabled: 'True'
+ stacklight_log_address: 172.16.10.70
+ stacklight_log_hostname: mon
+ stacklight_log_node01_address: 172.16.10.107
+ stacklight_log_node01_hostname: mon01
+ stacklight_log_node02_address: 172.16.10.108
+ stacklight_log_node02_hostname: mon02
+ stacklight_log_node03_address: 172.16.10.109
+ stacklight_log_node03_hostname: mon03
+ stacklight_monitor_address: 172.16.10.70
+ stacklight_monitor_hostname: mon
+ stacklight_monitor_node01_address: 172.16.10.107
+ stacklight_monitor_node01_hostname: mon01
+ stacklight_monitor_node02_address: 172.16.10.108
+ stacklight_monitor_node02_hostname: mon02
+ stacklight_monitor_node03_address: 172.16.10.109
+ stacklight_monitor_node03_hostname: mon03
+ stacklight_notification_address: alerts@localhost
+ stacklight_notification_smtp_host: 127.0.0.1
+ stacklight_telemetry_address: 172.16.10.70
+ stacklight_telemetry_hostname: mon
+ stacklight_telemetry_node01_address: 172.16.10.107
+ stacklight_telemetry_node01_hostname: mon01
+ stacklight_telemetry_node02_address: 172.16.10.108
+ stacklight_telemetry_node02_hostname: mon02
+ stacklight_telemetry_node03_address: 172.16.10.109
+ stacklight_telemetry_node03_hostname: mon03
+ stacklight_version: '2'
+ static_ips_on_deploy_network_enabled: 'False'
+ tenant_network_gateway: 10.1.0.1
+ tenant_network_netmask: 255.255.255.0
+ tenant_network_subnet: 10.1.0.0/24
+ tenant_vlan: '20'
+ upstream_proxy_enabled: 'False'
+ use_default_network_scheme: 'False'
diff --git a/tcp_tests/templates/virtual-mcp-pike-dvr-ssl/_context-environment.yaml b/tcp_tests/templates/virtual-mcp-pike-dvr-ssl/_context-environment.yaml
new file mode 100644
index 0000000..dcb1e40
--- /dev/null
+++ b/tcp_tests/templates/virtual-mcp-pike-dvr-ssl/_context-environment.yaml
@@ -0,0 +1,165 @@
+nodes:
+ cfg01.virtual-mcp-pike-dvr-ssl.local:
+ reclass_storage_name: infra_config_node01
+ roles:
+ - infra_config
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_vlan_ctl
+
+ ctl01.virtual-mcp-pike-dvr-ssl.local:
+ reclass_storage_name: openstack_control_node01
+ roles:
+ - infra_kvm
+ - openstack_control_leader
+ - openstack_database_leader
+ - openstack_message_queue
+ - features_designate_pool_manager_database
+ - features_designate_pool_manager
+ - features_designate_pool_manager_keystone
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_vlan_ctl
+
+ ctl02.virtual-mcp-pike-dvr-ssl.local:
+ reclass_storage_name: openstack_control_node02
+ roles:
+ - infra_kvm
+ - openstack_control
+ - openstack_database
+ - openstack_message_queue
+ - features_designate_pool_manager_database
+ - features_designate_pool_manager
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_vlan_ctl
+
+ ctl03.virtual-mcp-pike-dvr-ssl.local:
+ reclass_storage_name: openstack_control_node03
+ roles:
+ - infra_kvm
+ - openstack_control
+ - openstack_database
+ - openstack_message_queue
+ - features_designate_pool_manager_database
+ - features_designate_pool_manager
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_vlan_ctl
+
+ prx01.virtual-mcp-pike-dvr-ssl.local:
+ reclass_storage_name: openstack_proxy_node01
+ roles:
+ - openstack_proxy
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_vlan_ctl
+
+ mon01.virtual-mcp-pike-dvr-ssl.local:
+ reclass_storage_name: stacklight_server_node01
+ roles:
+ - stacklightv2_server_leader
+ - stacklight_telemetry_leader
+ - stacklight_log_leader_v2
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_vlan_ctl
+
+ mon02.virtual-mcp-pike-dvr-ssl.local:
+ reclass_storage_name: stacklight_server_node02
+ roles:
+ - stacklightv2_server
+ - stacklight_telemetry
+ - stacklight_log
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_vlan_ctl
+
+ mon03.virtual-mcp-pike-dvr-ssl.local:
+ reclass_storage_name: stacklight_server_node03
+ roles:
+ - stacklightv2_server
+ - stacklight_telemetry
+ - stacklight_log
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_vlan_ctl
+
+ # Generator-based computes. For compatibility only
+ cmp<<count>>.virtual-mcp-pike-dvr-ssl.local:
+ reclass_storage_name: openstack_compute_rack01
+ roles:
+ - openstack_compute
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: bond0_ab_ovs_vxlan_ctl_mesh
+ ens5:
+ role: bond0_ab_ovs_vxlan_ctl_mesh
+ ens6:
+ role: bond1_ab_ovs_floating
+
+ gtw01.virtual-mcp-pike-dvr-ssl.local:
+ reclass_storage_name: openstack_gateway_node01
+ roles:
+ - openstack_gateway
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: bond0_ab_ovs_vxlan_ctl_mesh
+ ens5:
+ role: bond0_ab_ovs_vxlan_ctl_mesh
+ ens6:
+ role: bond1_ab_ovs_floating
+
+ dns01.virtual-mcp-pike-dvr-ssl.local:
+ reclass_storage_name: openstack_dns_node01
+ roles:
+ - features_designate_pool_manager_dns
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_vlan_ctl
+ single_address: ${_param:openstack_dns_node01_address}
+
+ dns02.virtual-mcp-pike-dvr-ssl.local:
+ reclass_storage_name: openstack_dns_node02
+ roles:
+ - features_designate_pool_manager_dns
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_vlan_ctl
+ single_address: ${_param:openstack_dns_node02_address}
diff --git a/tcp_tests/templates/virtual-mcp-pike-dvr-ssl/common-services.yaml b/tcp_tests/templates/virtual-mcp-pike-dvr-ssl/common-services.yaml
new file mode 100644
index 0000000..39e9398
--- /dev/null
+++ b/tcp_tests/templates/virtual-mcp-pike-dvr-ssl/common-services.yaml
@@ -0,0 +1,118 @@
+{% from 'virtual-mcp-pike-dvr-ssl/underlay.yaml' import HOSTNAME_CFG01 with context %}
+
+# Install support services
+- description: Install keepalived on ctl01
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@keepalived:cluster and *01*' state.sls keepalived
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: true
+
+- description: Install keepalived
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@keepalived:cluster' state.sls keepalived
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: true
+
+- description: Check the VIP
+ cmd: |
+ OPENSTACK_CONTROL_ADDRESS=`salt-call --out=newline_values_only pillar.get _param:openstack_control_address`;
+ echo "_param:openstack_control_address (vip): ${OPENSTACK_CONTROL_ADDRESS}";
+ salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@keepalived:cluster' cmd.run "ip a | grep ${OPENSTACK_CONTROL_ADDRESS}" | grep -B1 ${OPENSTACK_CONTROL_ADDRESS}
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+
+- description: Install glusterfs
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@glusterfs:server' state.sls glusterfs.server.service
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Setup glusterfs on primary controller
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@glusterfs:server' state.sls glusterfs.server.setup -b 1
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 2, delay: 5}
+ skip_fail: false
+
+- description: Check the gluster status
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@glusterfs:server' cmd.run 'gluster peer status; gluster volume status' -b 1
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Install RabbitMQ on ctl01
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@rabbitmq:server and *01*' state.sls rabbitmq
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Install RabbitMQ
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@rabbitmq:server' state.sls rabbitmq
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Check the rabbitmq status
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@rabbitmq:server' cmd.run 'rabbitmqctl cluster_status'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Install Galera on first server
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@galera:master' state.sls galera
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Install Galera on other servers
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@galera:slave' state.sls galera
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Check mysql status
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@galera:*' mysql.status | grep -A1 -e "wsrep_incoming_addresses\|wsrep_cluster_size"
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: true
+
+
+- description: Install haproxy
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@haproxy:proxy' state.sls haproxy
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Check haproxy status
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@haproxy:proxy' service.status haproxy
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Restart rsyslog
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@haproxy:proxy' service.restart rsyslog
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Install memcached on all controllers
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@memcached:server' state.sls memcached
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
diff --git a/tcp_tests/templates/virtual-mcp-pike-dvr-ssl/openstack.yaml b/tcp_tests/templates/virtual-mcp-pike-dvr-ssl/openstack.yaml
new file mode 100644
index 0000000..5d573d7
--- /dev/null
+++ b/tcp_tests/templates/virtual-mcp-pike-dvr-ssl/openstack.yaml
@@ -0,0 +1,401 @@
+{% from 'virtual-mcp-pike-dvr-ssl/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% from 'virtual-mcp-pike-dvr-ssl/underlay.yaml' import HOSTNAME_CTL01 with context %}
+{% from 'virtual-mcp-pike-dvr-ssl/underlay.yaml' import HOSTNAME_CTL02 with context %}
+{% from 'virtual-mcp-pike-dvr-ssl/underlay.yaml' import HOSTNAME_CTL03 with context %}
+{% from 'virtual-mcp-pike-dvr-ssl/underlay.yaml' import HOSTNAME_GTW01 with context %}
+{% from 'shared-salt.yaml' import IPV4_NET_EXTERNAL_PREFIX with context %}
+{% from 'shared-salt.yaml' import IPV4_NET_TENANT_PREFIX with context %}
+{% set LAB_CONFIG_NAME = os_env('LAB_CONFIG_NAME') %}
+{% set OVERRIDE_POLICY = os_env('OVERRIDE_POLICY', '') %}
+
+# Install OpenStack control services
+
+{%- if OVERRIDE_POLICY != '' %}
+- description: Upload policy override
+ upload:
+ local_path: {{ config.salt_deploy.templates_dir }}{{ LAB_CONFIG_NAME }}/
+ local_filename: overrides-policy.yml
+ remote_path: /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/
+ node_name: {{ HOSTNAME_CFG01 }}
+
+- description: Create custom cluster control class
+ cmd: echo -e "classes:\n- cluster.{{ LAB_CONFIG_NAME }}.openstack.control_orig\n$(cat /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/overrides-policy.yml)" > /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/overrides-policy.yml
+ node_name: {{ HOSTNAME_CFG01 }}
+
+- description: Rename control classes
+ cmd: mv /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/control.yml /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/control_orig.yml &&
+ ln -s /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/overrides-policy.yml /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/control.yml &&
+ salt --hard-crash --state-output=mixed --state-verbose=False '*' saltutil.sync_all &&
+ salt --hard-crash --state-output=mixed --state-verbose=False '*' saltutil.refresh_pillar
+ node_name: {{ HOSTNAME_CFG01 }}
+{%- endif %}
+
+- description: Nginx
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@nginx:server' state.sls salt.minion
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: true
+
+- description: Deploy nginx proxy
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@nginx:server' state.sls nginx
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: true
+
+- description: Install glance on all controllers
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@glance:server' state.sls glance -b 1
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Install keystone service (note that different fernet keys are created on different nodes)
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@keystone:server' state.sls keystone.server -b 1
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 2, delay: 15}
+ skip_fail: false
+
+- description: Restart apache due to PROD-10477
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl*' cmd.run "systemctl restart apache2"
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 15}
+ skip_fail: false
+
+- description: Check apache status to PROD-10477
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl*' cmd.run "systemctl status apache2"
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 15}
+ skip_fail: false
+
+- description: Mount glusterfs.client volumes (resuires created 'keystone' and 'glusterfs' system users)
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@glance:server' state.sls glusterfs.client
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Update fernet keys for keystone server on the mounted glusterfs volume
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@keystone:server' state.sls keystone.server -b 1
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Populate keystone services/tenants/admins
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@keystone:client' state.sls keystone.client
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 2, delay: 5}
+ skip_fail: false
+
+- description: Check keystone service-list
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@keystone:server' cmd.run '. /root/keystonercv3; openstack service list'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Check glance image-list
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@keystone:server' cmd.run '. /root/keystonerc; glance image-list'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+
+- description: Install nova on all controllers
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@nova:controller' state.sls nova -b 1
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 2, delay: 5}
+ skip_fail: false
+
+- description: Check nova service-list
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@keystone:server' cmd.run '. /root/keystonerc; nova --debug service-list'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 3, delay: 5}
+ skip_fail: false
+
+
+- description: Install cinder
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@cinder:controller' state.sls cinder -b 1
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Check cinder list
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@keystone:server' cmd.run '. /root/keystonerc; cinder list'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+
+- description: Install neutron service
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@neutron:server' state.sls neutron -b 1
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Install neutron on gtw node
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@neutron:gateway' state.sls neutron
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+# isntall designate
+- description: Install powerdns
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@powerdns:server' state.sls powerdns.server
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Install designate
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@designate:server' state.sls designate -b 1
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 5, delay: 10}
+ skip_fail: false
+
+- description: Check neutron agent-list
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@keystone:server' cmd.run '. /root/keystonerc; neutron agent-list'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Install heat service
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@heat:server' state.sls heat -b 1
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Check heat service
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@keystone:server' cmd.run '. /root/keystonercv3; openstack orchestration resource type list'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 5, delay: 10}
+ skip_fail: false
+
+
+- description: Deploy horizon dashboard
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@horizon:server' state.sls horizon
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: true
+
+# Install compute node
+
+- description: Apply formulas for compute node
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' state.apply
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: true
+
+- description: Re-apply(as in doc) formulas for compute node
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' state.apply
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Check IP on computes
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' cmd.run
+ 'ip a'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 10, delay: 30}
+ skip_fail: false
+
+
+ # Upload cirros image
+
+- description: Upload cirros image on ctl01
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+ 'wget http://download.cirros-cloud.net/0.3.4/cirros-0.3.4-i386-disk.img'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 2, delay: 30}
+ skip_fail: false
+
+- description: Register image in glance
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+ '. /root/keystonercv3; glance --timeout 120 image-create --name cirros --visibility public --disk-format qcow2 --container-format bare --progress < /root/cirros-0.3.4-i386-disk.img'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Create net04_external
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+ '. /root/keystonercv3; neutron net-create net04_ext --router:external True --provider:physical_network physnet1 --provider:network_type flat'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Create subnet_external
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+ '. /root/keystonercv3; neutron subnet-create net04_ext {{ IPV4_NET_EXTERNAL_PREFIX }}.0/24 --name net04_ext__subnet --disable-dhcp --allocation-pool start={{ IPV4_NET_EXTERNAL_PREFIX }}.150,end={{ IPV4_NET_EXTERNAL_PREFIX }}.180 --gateway {{ IPV4_NET_EXTERNAL_PREFIX }}.1'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Create net04
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+ '. /root/keystonercv3; neutron net-create net04'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Create subnet_net04
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+ '. /root/keystonercv3; neutron subnet-create net04 {{ IPV4_NET_TENANT_PREFIX }}.0/24 --name net04__subnet --allocation-pool start={{ IPV4_NET_TENANT_PREFIX }}.120,end={{ IPV4_NET_TENANT_PREFIX }}.240'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Create router
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+ '. /root/keystonercv3; neutron router-create net04_router01'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Set geteway
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+ '. /root/keystonercv3; neutron router-gateway-set net04_router01 net04_ext'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Add interface
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+ '. /root/keystonercv3; neutron router-interface-add net04_router01 net04__subnet'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+#- description: Allow all tcp
+# cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+# '. /root/keystonercv3; nova secgroup-add-rule default tcp 1 65535 0.0.0.0/0'
+# node_name: {{ HOSTNAME_CFG01 }}
+# retry: {count: 1, delay: 30}
+# skip_fail: false
+#
+#- description: Allow all icmp
+# cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+# '. /root/keystonercv3; nova secgroup-add-rule default icmp -1 -1 0.0.0.0/0'
+# node_name: {{ HOSTNAME_CFG01 }}
+# retry: {count: 1, delay: 30}
+# skip_fail: false
+
+- description: sync time
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' cmd.run
+ 'service ntp stop; ntpd -gq; service ntp start'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+# Configure cinder-volume salt-call PROD-13167
+- description: Set disks 01
+ cmd: salt-call cmd.run 'echo -e "nn\np\n\n\n\nw" | fdisk /dev/vdb'
+ node_name: {{ HOSTNAME_CTL01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Set disks 02
+ cmd: salt-call cmd.run 'echo -e "nn\np\n\n\n\nw" | fdisk /dev/vdb'
+ node_name: {{ HOSTNAME_CTL02 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Set disks 03
+ cmd: salt-call cmd.run 'echo -e "nn\np\n\n\n\nw" | fdisk /dev/vdb'
+ node_name: {{ HOSTNAME_CTL03 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Create partitions 01
+ cmd: salt-call cmd.run 'pvcreate /dev/vdb1'
+ node_name: {{ HOSTNAME_CTL01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Create partitions 02
+ cmd: salt-call cmd.run 'pvcreate /dev/vdb1'
+ node_name: {{ HOSTNAME_CTL02 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Create partitions 03
+ cmd: salt-call cmd.run 'pvcreate /dev/vdb1'
+ node_name: {{ HOSTNAME_CTL03 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: create volume_group
+ cmd: salt "ctl*" cmd.run 'vgcreate cinder-volumes /dev/vdb1'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Install cinder-volume
+ cmd: salt 'ctl*' cmd.run 'apt-get install cinder-volume -y'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Install crudini
+ cmd: salt "ctl*" cmd.run 'apt-get install crudini -y'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Temporary WR set enabled backends value 01
+ cmd: salt-call cmd.run 'crudini --verbose --set /etc/cinder/cinder.conf DEFAULT enabled_backends lvm'
+ node_name: {{ HOSTNAME_CTL01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Temporary WR set enabled backends value 02
+ cmd: salt-call cmd.run 'crudini --verbose --set /etc/cinder/cinder.conf DEFAULT enabled_backends lvm'
+ node_name: {{ HOSTNAME_CTL02 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Temporary WR set enabled backends value 03
+ cmd: salt-call cmd.run 'crudini --verbose --set /etc/cinder/cinder.conf DEFAULT enabled_backends lvm'
+ node_name: {{ HOSTNAME_CTL03 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Install docker.io on gtw
+ cmd: salt-call cmd.run 'apt-get install docker.io -y'
+ node_name: {{ HOSTNAME_GTW01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Restart cinder volume
+ cmd: |
+ salt -C 'I@cinder:controller' service.restart cinder-volume;
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 2, delay: 5}
+ skip_fail: false
+
+- description: create rc file on cfg
+ cmd: scp ctl01:/root/keystonercv3 /root
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Copy rc file
+ cmd: scp /root/keystonercv3 gtw01:/root
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
diff --git a/tcp_tests/templates/virtual-mcp-pike-dvr-ssl/overrides-policy.yml b/tcp_tests/templates/virtual-mcp-pike-dvr-ssl/overrides-policy.yml
new file mode 100644
index 0000000..1f35a6b
--- /dev/null
+++ b/tcp_tests/templates/virtual-mcp-pike-dvr-ssl/overrides-policy.yml
@@ -0,0 +1,40 @@
+parameters:
+ nova:
+ controller:
+ policy:
+ context_is_admin: 'role:admin or role:administrator'
+ 'compute:create': 'rule:admin_or_owner'
+ 'compute:create:attach_network':
+ cinder:
+ controller:
+ policy:
+ 'volume:delete': 'rule:admin_or_owner'
+ 'volume:extend':
+ neutron:
+ server:
+ policy:
+ create_subnet: 'rule:admin_or_network_owner'
+ 'get_network:queue_id': 'rule:admin_only'
+ 'create_network:shared':
+ glance:
+ server:
+ policy:
+ publicize_image: "role:admin"
+ add_member:
+ keystone:
+ server:
+ policy:
+ admin_or_token_subject: 'rule:admin_required or rule:token_subject'
+ heat:
+ server:
+ policy:
+ context_is_admin: 'role:admin and is_admin_project:True'
+ deny_stack_user: 'not role:heat_stack_user'
+ deny_everybody: '!'
+ 'cloudformation:ValidateTemplate': 'rule:deny_everybody'
+ 'cloudformation:DescribeStackResources':
+ ceilometer:
+ server:
+ policy:
+ segregation: 'rule:context_is_admin'
+ 'telemetry:get_resource':
diff --git a/tcp_tests/templates/virtual-mcp-pike-dvr-ssl/salt.yaml b/tcp_tests/templates/virtual-mcp-pike-dvr-ssl/salt.yaml
new file mode 100644
index 0000000..d56e449
--- /dev/null
+++ b/tcp_tests/templates/virtual-mcp-pike-dvr-ssl/salt.yaml
@@ -0,0 +1,45 @@
+{% from 'virtual-mcp-pike-dvr-ssl/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% from 'virtual-mcp-pike-dvr-ssl/underlay.yaml' import HOSTNAME_CMP01 with context %}
+{% from 'virtual-mcp-pike-dvr-ssl/underlay.yaml' import HOSTNAME_CMP02 with context %}
+{% from 'virtual-mcp-pike-dvr-ssl/underlay.yaml' import HOSTNAME_GTW01 with context %}
+{% from 'virtual-mcp-pike-dvr-ssl/underlay.yaml' import LAB_CONFIG_NAME with context %}
+{% from 'virtual-mcp-pike-dvr-ssl/underlay.yaml' import DOMAIN_NAME with context %}
+
+{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.net/salt-models/mcp-virtual-lab') %}
+# Other salt model repository parameters see in shared-salt.yaml
+
+{% import 'shared-salt.yaml' as SHARED with context %}
+
+{{ SHARED.MACRO_INSTALL_SALT_MASTER() }}
+
+{{ SHARED.MACRO_CLONE_RECLASS_MODELS() }}
+
+{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='"linux" "reclass" "salt" "openssh" "ntp" "git" "nginx" "collectd" "sensu" "heka" "sphinx" "keystone" "mysql" "grafana" "haproxy" "rsyslog" "horizon" "prometheus" "telegraf" "elasticsearch" "powerdns" "fluentd"') }}
+
+{{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
+
+{{ SHARED.MACRO_RUN_SALT_MASTER_UNDERLAY_STATES() }}
+
+{{ SHARED.ADJUST_SL_OPTS(OVERRIDES_FILENAME='/srv/salt/reclass/classes/cluster/' + SHARED.CLUSTER_NAME + '/stacklight/server.yml') }}
+
+{{ SHARED.MACRO_GENERATE_INVENTORY() }}
+
+{{ SHARED.MACRO_BOOTSTRAP_ALL_MINIONS() }}
+
+- description: Hack gtw node
+ cmd: salt '{{ HOSTNAME_GTW01 }}' cmd.run "ip addr del {{ SHARED.IPV4_NET_CONTROL_PREFIX }}.110/24 dev ens4; ip addr flush dev ens4";
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Hack cmp01 node
+ cmd: salt '{{ HOSTNAME_CMP01 }}' cmd.run "ip addr del {{ SHARED.IPV4_NET_CONTROL_PREFIX }}.105/24 dev ens4; ip addr flush dev ens4";
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Hack cmp02 node
+ cmd: salt '{{ HOSTNAME_CMP02 }}' cmd.run "ip addr del {{ SHARED.IPV4_NET_CONTROL_PREFIX }}.106/24 dev ens4; ip addr flush dev ens4";
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
diff --git a/tcp_tests/templates/virtual-mcp-pike-dvr-ssl/sl.yaml b/tcp_tests/templates/virtual-mcp-pike-dvr-ssl/sl.yaml
new file mode 100644
index 0000000..f492e73
--- /dev/null
+++ b/tcp_tests/templates/virtual-mcp-pike-dvr-ssl/sl.yaml
@@ -0,0 +1,177 @@
+{% from 'virtual-mcp-pike-dvr-ssl/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% import 'shared-sl-tests.yaml' as SHARED_SL_TESTS with context %}
+# Install docker swarm
+- description: Install keepalived on mon nodes
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'mon*' state.sls keepalived
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Check the VIP on mon nodes
+ cmd: |
+ SL_VIP=`salt-call --out=newline_values_only pillar.get _param:stacklight_monitor_address`;
+ echo "_param:stacklight_monitor_address (vip): ${SL_VIP}";
+ salt --hard-crash --state-output=mixed --state-verbose=False -C 'mon*' cmd.run "ip a | grep ${SL_VIP}" | grep -B1 ${SL_VIP}
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Configure docker service
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' state.sls docker.host
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Install docker swarm on master node
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' state.sls docker.swarm
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Send grains to the swarm slave nodes
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' state.sls salt.minion.grains
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Update mine
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' mine.update
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Refresh modules
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' saltutil.refresh_modules; sleep 5;
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Rerun swarm on slaves to proper token population
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' state.sls docker.swarm
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Configure slave nodes
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:manager' state.sls docker.swarm -b 1
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: List registered Docker swarm nodes
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' cmd.run 'docker node ls'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+# Install slv2 infra
+- description: Install telegraf
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@telegraf:agent or I@telegraf:remote_agent' state.sls telegraf
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 2, delay: 10}
+ skip_fail: false
+
+- description: Configure Prometheus exporters, if pillar 'prometheus:exporters' exists on any server
+ cmd: |
+ if salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:exporters' match.pillar 'prometheus:exporters' ; then
+ salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:exporters' state.sls prometheus
+ fi
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Configure fluentd
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@fluentd:agent' state.sls fluentd
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Install elasticsearch server
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@elasticsearch:server' state.sls elasticsearch.server -b 1
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Install kibana server
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@kibana:server' state.sls kibana.server -b 1
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Install elasticsearch client
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@elasticsearch:client' state.sls elasticsearch.client
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Install kibana client
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@kibana:client' state.sls kibana.client
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Check influix db
+ cmd: |
+ INFLUXDB_SERVICE=`salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@influxdb:server' test.ping 1>/dev/null 2>&1 && echo true`;
+ echo "Influxdb service presence: ${INFLUXDB_SERVICE}";
+ if [[ "$INFLUXDB_SERVICE" == "true" ]]; then
+ salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@influxdb:server' state.sls influxdb
+ fi
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: true
+
+# Collect grains needed to configure the services
+
+- description: Get grains
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' state.sls salt.minion.grains
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Sync modules
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' saltutil.refresh_modules
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Update mine
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' mine.update; sleep 5;
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 5, delay: 15}
+ skip_fail: false
+
+# Configure the services running in Docker Swarm
+- description: Install prometheus alertmanager
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' state.sls prometheus,heka.remote_collector -b 1
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: run docker state
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' state.sls docker
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: docker ps
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' dockerng.ps
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Configure Grafana dashboards and datasources
+ cmd: sleep 30; salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@grafana:client' state.sls grafana.client
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 2, delay: 10}
+ skip_fail: false
+
+- description: Run salt minion to create cert files
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False "*" state.sls salt.minion
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+{{ SHARED_SL_TESTS.MACRO_CLONE_SL_TESTS() }}
+{{ SHARED_SL_TESTS.MACRO_CONFIGURE_TESTS() }}
diff --git a/tcp_tests/templates/virtual-mcp-pike-dvr-ssl/underlay--meta-data.yaml b/tcp_tests/templates/virtual-mcp-pike-dvr-ssl/underlay--meta-data.yaml
new file mode 100644
index 0000000..3699401
--- /dev/null
+++ b/tcp_tests/templates/virtual-mcp-pike-dvr-ssl/underlay--meta-data.yaml
@@ -0,0 +1,4 @@
+| # All the data below will be stored as a string object
+ instance-id: iid-local1
+ hostname: {hostname}
+ local-hostname: {hostname}
diff --git a/tcp_tests/templates/virtual-mcp-pike-dvr-ssl/underlay--user-data-cfg01.yaml b/tcp_tests/templates/virtual-mcp-pike-dvr-ssl/underlay--user-data-cfg01.yaml
new file mode 100644
index 0000000..da7908d
--- /dev/null
+++ b/tcp_tests/templates/virtual-mcp-pike-dvr-ssl/underlay--user-data-cfg01.yaml
@@ -0,0 +1,61 @@
+| # All the data below will be stored as a string object
+ #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
+
+ ssh_pwauth: True
+ users:
+ - name: root
+ sudo: ALL=(ALL) NOPASSWD:ALL
+ shell: /bin/bash
+ ssh_authorized_keys:
+ {% for key in config.underlay.ssh_keys %}
+ - ssh-rsa {{ key['public'] }}
+ {% endfor %}
+
+ disable_root: false
+ chpasswd:
+ list: |
+ root:r00tme
+ expire: False
+
+ bootcmd:
+ # Enable root access
+ - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
+ - service sshd restart
+ output:
+ all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
+
+ runcmd:
+ # Configure dhclient
+ - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
+ - sudo resolvconf -u
+
+ # Prepare network connection
+ - sudo ifdown ens3
+ - sudo ip r d default || true # remove existing default route to get it from dhcp
+ - sudo ifup ens3
+ #- sudo route add default gw {gateway} {interface_name}
+
+ # Create swap
+ - fallocate -l 4G /swapfile
+ - chmod 600 /swapfile
+ - mkswap /swapfile
+ - swapon /swapfile
+ - echo "/swapfile none swap defaults 0 0" >> /etc/fstab
+
+ - echo "nameserver 172.18.208.44" >> /etc/resolv.conf;
+
+ write_files:
+ - path: /etc/network/interfaces
+ content: |
+ auto ens3
+ iface ens3 inet dhcp
+
+ - path: /root/.ssh/config
+ owner: root:root
+ permissions: '0600'
+ content: |
+ Host *
+ ServerAliveInterval 300
+ ServerAliveCountMax 10
+ StrictHostKeyChecking no
+ UserKnownHostsFile /dev/null
diff --git a/tcp_tests/templates/virtual-mcp-pike-dvr-ssl/underlay--user-data1604.yaml b/tcp_tests/templates/virtual-mcp-pike-dvr-ssl/underlay--user-data1604.yaml
new file mode 100644
index 0000000..3fbb777
--- /dev/null
+++ b/tcp_tests/templates/virtual-mcp-pike-dvr-ssl/underlay--user-data1604.yaml
@@ -0,0 +1,50 @@
+| # All the data below will be stored as a string object
+ #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
+
+ ssh_pwauth: True
+ users:
+ - name: root
+ sudo: ALL=(ALL) NOPASSWD:ALL
+ shell: /bin/bash
+ ssh_authorized_keys:
+ {% for key in config.underlay.ssh_keys %}
+ - ssh-rsa {{ key['public'] }}
+ {% endfor %}
+
+ disable_root: false
+ chpasswd:
+ list: |
+ root:r00tme
+ expire: False
+
+ bootcmd:
+ # Enable root access
+ - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
+ - service sshd restart
+ output:
+ all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
+
+ runcmd:
+ - export TERM=linux
+ - export LANG=C
+ # Configure dhclient
+ - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
+ - sudo resolvconf -u
+
+ # Prepare network connection
+ - sudo ifup ens3
+ #- sudo route add default gw {gateway} {interface_name}
+
+ # Create swap
+ - fallocate -l 4G /swapfile
+ - chmod 600 /swapfile
+ - mkswap /swapfile
+ - swapon /swapfile
+ - echo "/swapfile none swap defaults 0 0" >> /etc/fstab
+
+ write_files:
+ - path: /etc/network/interfaces
+ content: |
+ auto ens3
+ iface ens3 inet dhcp
+
diff --git a/tcp_tests/templates/virtual-mcp-pike-dvr-ssl/underlay.yaml b/tcp_tests/templates/virtual-mcp-pike-dvr-ssl/underlay.yaml
new file mode 100644
index 0000000..32c0e91
--- /dev/null
+++ b/tcp_tests/templates/virtual-mcp-pike-dvr-ssl/underlay.yaml
@@ -0,0 +1,516 @@
+# Set the repository suite, one of the: 'nightly', 'testing', 'stable', or any other required
+{% set REPOSITORY_SUITE = os_env('REPOSITORY_SUITE', 'testing') %}
+
+{% import 'virtual-mcp-pike-dvr-ssl/underlay--meta-data.yaml' as CLOUDINIT_META_DATA with context %}
+{% import 'virtual-mcp-pike-dvr-ssl/underlay--user-data-cfg01.yaml' as CLOUDINIT_USER_DATA_CFG01 with context %}
+{% import 'virtual-mcp-pike-dvr-ssl/underlay--user-data1604.yaml' as CLOUDINIT_USER_DATA_1604 with context %}
+
+---
+aliases:
+ - &interface_model {{ os_env('INTERFACE_MODEL', 'virtio') }}
+ - &cloudinit_meta_data {{ CLOUDINIT_META_DATA }}
+ - &cloudinit_user_data_cfg01 {{ CLOUDINIT_USER_DATA_CFG01 }}
+ - &cloudinit_user_data_1604 {{ CLOUDINIT_USER_DATA_1604 }}
+
+{% set LAB_CONFIG_NAME = os_env('LAB_CONFIG_NAME', 'virtual-mcp-pike-dvr-ssl') %}
+{% set DOMAIN_NAME = os_env('DOMAIN_NAME', LAB_CONFIG_NAME) + '.local' %}
+{% set HOSTNAME_CFG01 = os_env('HOSTNAME_CFG01', 'cfg01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CTL01 = os_env('HOSTNAME_CTL01', 'ctl01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CTL02 = os_env('HOSTNAME_CTL02', 'ctl02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CTL03 = os_env('HOSTNAME_CTL03', 'ctl03.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CMP01 = os_env('HOSTNAME_CMP01', 'cmp01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CMP02 = os_env('HOSTNAME_CMP02', 'cmp02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_MON01 = os_env('HOSTNAME_MON01', 'mon01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_MON02 = os_env('HOSTNAME_MON02', 'mon02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_MON03 = os_env('HOSTNAME_MON03', 'mon03.' + DOMAIN_NAME) %}
+{% set HOSTNAME_GTW01 = os_env('HOSTNAME_GTW01', 'gtw01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_PRX01 = os_env('HOSTNAME_PRX01', 'prx01.' + DOMAIN_NAME) %}
+
+template:
+ devops_settings:
+ env_name: {{ os_env('ENV_NAME', 'virtual-mcp-pike-dvr-ssl_' + REPOSITORY_SUITE + "_" + os_env('BUILD_NUMBER', '')) }}
+
+ address_pools:
+ private-pool01:
+ net: {{ os_env('PRIVATE_ADDRESS_POOL01', '10.60.0.0/16:24') }}
+ params:
+ ip_reserved:
+ gateway: +1
+ l2_network_device: +1
+ default_{{ HOSTNAME_CFG01 }}: +100
+ default_{{ HOSTNAME_CTL01 }}: +101
+ default_{{ HOSTNAME_CTL02 }}: +102
+ default_{{ HOSTNAME_CTL03 }}: +103
+ default_{{ HOSTNAME_CMP01 }}: +105
+ default_{{ HOSTNAME_CMP02 }}: +106
+ default_{{ HOSTNAME_MON01 }}: +107
+ default_{{ HOSTNAME_MON02 }}: +108
+ default_{{ HOSTNAME_MON03 }}: +109
+ default_{{ HOSTNAME_GTW01 }}: +110
+ default_{{ HOSTNAME_PRX01 }}: +121
+ ip_ranges:
+ dhcp: [+90, -10]
+
+ admin-pool01:
+ net: {{ os_env('ADMIN_ADDRESS_POOL01', '10.70.0.0/16:24') }}
+ params:
+ ip_reserved:
+ gateway: +1
+ l2_network_device: +1
+ default_{{ HOSTNAME_CFG01 }}: +90
+ default_{{ HOSTNAME_CTL01 }}: +101
+ default_{{ HOSTNAME_CTL02 }}: +102
+ default_{{ HOSTNAME_CTL03 }}: +103
+ default_{{ HOSTNAME_CMP01 }}: +105
+ default_{{ HOSTNAME_CMP02 }}: +106
+ default_{{ HOSTNAME_MON01 }}: +107
+ default_{{ HOSTNAME_MON02 }}: +108
+ default_{{ HOSTNAME_MON03 }}: +109
+ default_{{ HOSTNAME_GTW01 }}: +110
+ default_{{ HOSTNAME_PRX01 }}: +121
+ ip_ranges:
+ dhcp: [+90, -10]
+
+ tenant-pool01:
+ net: {{ os_env('TENANT_ADDRESS_POOL01', '10.80.0.0/16:24') }}
+ params:
+ ip_reserved:
+ gateway: +1
+ l2_network_device: +1
+ default_{{ HOSTNAME_CFG01 }}: +100
+ default_{{ HOSTNAME_CTL01 }}: +101
+ default_{{ HOSTNAME_CTL02 }}: +102
+ default_{{ HOSTNAME_CTL03 }}: +103
+ default_{{ HOSTNAME_CMP01 }}: +105
+ default_{{ HOSTNAME_CMP02 }}: +106
+ default_{{ HOSTNAME_MON01 }}: +107
+ default_{{ HOSTNAME_MON02 }}: +108
+ default_{{ HOSTNAME_MON03 }}: +109
+ default_{{ HOSTNAME_GTW01 }}: +110
+ default_{{ HOSTNAME_PRX01 }}: +121
+ ip_ranges:
+ dhcp: [+10, -10]
+
+ external-pool01:
+ net: {{ os_env('EXTERNAL_ADDRESS_POOL01', '10.90.0.0/16:24') }}
+ params:
+ ip_reserved:
+ gateway: +1
+ l2_network_device: +1
+ default_{{ HOSTNAME_CFG01 }}: +100
+ default_{{ HOSTNAME_CTL01 }}: +101
+ default_{{ HOSTNAME_CTL02 }}: +102
+ default_{{ HOSTNAME_CTL03 }}: +103
+ default_{{ HOSTNAME_CMP01 }}: +105
+ default_{{ HOSTNAME_CMP02 }}: +106
+ default_{{ HOSTNAME_MON01 }}: +107
+ default_{{ HOSTNAME_MON02 }}: +108
+ default_{{ HOSTNAME_MON03 }}: +109
+ default_{{ HOSTNAME_GTW01 }}: +110
+ default_{{ HOSTNAME_PRX01 }}: +121
+ ip_ranges:
+ dhcp: [+10, -10]
+
+
+ groups:
+ - name: default
+ driver:
+ name: devops.driver.libvirt
+ params:
+ connection_string: !os_env CONNECTION_STRING, qemu:///system
+ storage_pool_name: !os_env STORAGE_POOL_NAME, default
+ stp: False
+ hpet: False
+ enable_acpi: true
+ use_host_cpu: !os_env DRIVER_USE_HOST_CPU, true
+ use_hugepages: !os_env DRIVER_USE_HUGEPAGES, false
+
+ network_pools:
+ admin: admin-pool01
+ private: private-pool01
+ tenant: tenant-pool01
+ external: external-pool01
+
+ l2_network_devices:
+ private:
+ address_pool: private-pool01
+ dhcp: true
+ forward:
+ mode: route
+
+ admin:
+ address_pool: admin-pool01
+ dhcp: true
+ forward:
+ mode: nat
+
+ tenant:
+ address_pool: tenant-pool01
+ dhcp: true
+
+ external:
+ address_pool: external-pool01
+ dhcp: true
+ forward:
+ mode: nat
+
+
+ group_volumes:
+ - name: cloudimage1604 # This name is used for 'backing_store' option for node volumes.
+ source_image: !os_env IMAGE_PATH1604 # https://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-amd64-disk1.img
+ format: qcow2
+ - name: cfg01_day01_image # Pre-configured day01 image
+ source_image: {{ os_env('IMAGE_PATH_CFG01_DAY01', os_env('IMAGE_PATH1604')) }} # http://images.mirantis.com/cfg01-day01.qcow2 or fallback to IMAGE_PATH1604
+ format: qcow2
+ - name: mcp_ubuntu_1604_image # Pre-configured image for control plane
+ source_image: !os_env MCP_IMAGE_PATH1604
+ format: qcow2
+
+ nodes:
+ - name: {{ HOSTNAME_CFG01 }}
+ role: salt_master
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 2
+ memory: !os_env SLAVE_NODE_MEMORY, 4096
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: cfg01_day01_image
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_cfg01
+
+ interfaces:
+ - label: ens3
+ l2_network_device: admin
+ interface_model: *interface_model
+ - label: ens4
+ l2_network_device: private
+ interface_model: *interface_model
+ network_config:
+ ens3:
+ networks:
+ - admin
+ ens4:
+ networks:
+ - private
+
+ - name: {{ HOSTNAME_CTL01 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 2
+ memory: !os_env SLAVE_NODE_MEMORY, 12288
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: mcp_ubuntu_1604_image
+ format: qcow2
+ - name: cinder
+ capacity: 50
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: &interfaces
+ - label: ens3
+ l2_network_device: admin
+ interface_model: *interface_model
+ - label: ens4
+ l2_network_device: private
+ interface_model: *interface_model
+ network_config: &network_config
+ ens3:
+ networks:
+ - admin
+ ens4:
+ networks:
+ - private
+
+ - name: {{ HOSTNAME_CTL02 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 2
+ memory: !os_env SLAVE_NODE_MEMORY, 12288
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: mcp_ubuntu_1604_image
+ format: qcow2
+ - name: cinder
+ capacity: 50
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *interfaces
+ network_config: *network_config
+
+ - name: {{ HOSTNAME_CTL03 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 2
+ memory: !os_env SLAVE_NODE_MEMORY, 12288
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: mcp_ubuntu_1604_image
+ format: qcow2
+ - name: cinder
+ capacity: 50
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *interfaces
+ network_config: *network_config
+
+ - name: {{ HOSTNAME_MON01 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 3
+ memory: !os_env SLAVE_NODE_MEMORY, 2048
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: mcp_ubuntu_1604_image
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *interfaces
+ network_config: *network_config
+
+ - name: {{ HOSTNAME_MON02 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 3
+ memory: !os_env SLAVE_NODE_MEMORY, 2048
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: mcp_ubuntu_1604_image
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *interfaces
+ network_config: *network_config
+
+ - name: {{ HOSTNAME_MON03 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 3
+ memory: !os_env SLAVE_NODE_MEMORY, 2048
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: mcp_ubuntu_1604_image
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *interfaces
+ network_config: *network_config
+
+ - name: {{ HOSTNAME_PRX01 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 1
+ memory: !os_env SLAVE_NODE_MEMORY, 8192
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: mcp_ubuntu_1604_image
+ format: qcow2
+ - name: cinder
+ capacity: 50
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *interfaces
+ network_config: *network_config
+
+
+ - name: {{ HOSTNAME_CMP01 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 3
+ memory: !os_env SLAVE_NODE_MEMORY, 4096
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: cloudimage1604
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+
+ interfaces: &all_interfaces
+ - label: ens3
+ l2_network_device: admin
+ interface_model: *interface_model
+ - label: ens4
+ l2_network_device: private
+ interface_model: *interface_model
+ - label: ens5
+ l2_network_device: tenant
+ interface_model: *interface_model
+ - label: ens6
+ l2_network_device: external
+ interface_model: *interface_model
+ network_config: &all_network_config
+ ens3:
+ networks:
+ - admin
+ ens4:
+ networks:
+ - private
+ ens5:
+ networks:
+ - tenant
+ ens6:
+ networks:
+ - external
+
+ - name: {{ HOSTNAME_CMP02 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 3
+ memory: !os_env SLAVE_NODE_MEMORY, 4096
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: cloudimage1604
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *all_interfaces
+ network_config: *all_network_config
+
+ - name: {{ HOSTNAME_GTW01 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 1
+ memory: !os_env SLAVE_NODE_MEMORY, 2048
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: cloudimage1604
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *all_interfaces
+ network_config: *all_network_config
diff --git a/tcp_tests/templates/virtual-mcp-sl-os/salt.yaml b/tcp_tests/templates/virtual-mcp-sl-os/salt.yaml
index 595695f..738463b 100644
--- a/tcp_tests/templates/virtual-mcp-sl-os/salt.yaml
+++ b/tcp_tests/templates/virtual-mcp-sl-os/salt.yaml
@@ -11,7 +11,7 @@
{{ SHARED.MACRO_CLONE_RECLASS_MODELS() }}
-{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='"linux" "reclass" "salt" "openssh" "ntp" "git" "nginx" "collectd" "sensu" "heka" "sphinx" "keystone" "mysql" "grafana" "haproxy" "rsyslog" "horizon" "prometheus" "telegraf" "elasticsearch" "fluentd"') }}
+{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='"linux" "reclass" "salt" "openssh" "ntp" "git" "nginx" "collectd" "sensu" "heka" "sphinx" "keystone" "mysql" "grafana" "haproxy" "rsyslog" "horizon" "prometheus" "telegraf" "elasticsearch" "fluentd" "docker"') }}
{{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
diff --git a/tcp_tests/tests/system/test_pipeline_deploy.py b/tcp_tests/tests/system/test_pipeline_deploy.py
new file mode 100644
index 0000000..81e5e8b
--- /dev/null
+++ b/tcp_tests/tests/system/test_pipeline_deploy.py
@@ -0,0 +1,76 @@
+# Copyright 2016 Mirantis, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import pytest
+from tcp_tests.managers.jenkins.client import JenkinsClient
+
+from tcp_tests import logger
+
+LOG = logger.logger
+
+
+@pytest.mark.deploy
+class TestPipeline(object):
+ """Test class for testing deploy via Pipelines"""
+
+ @pytest.mark.fail_snapshot
+ def test_pipeline(self, show_step, underlay,
+ common_services_deployed, salt_deployed):
+ """Runner for Juniper contrail-tests
+
+ Scenario:
+ 1. Prepare salt on hosts.
+ 2. Setup controller nodes
+ 3. Setup compute nodes
+ 4. Deploy openstack via pipelines
+ 5. Deploy CICD via pipelines
+ """
+ nodes = underlay.node_names()
+ LOG.info("Nodes - {}".format(nodes))
+ cfg_node = 'cfg01.ocata-cicd.local'
+ salt_api = salt_deployed.get_pillar(
+ cfg_node, '_param:jenkins_salt_api_url')
+ salt_api = salt_api[0].get(cfg_node)
+ jenkins = JenkinsClient(
+ host='http://172.16.49.66:8081',
+ username='admin',
+ password='r00tme')
+
+ # Creating param list for openstack deploy
+ params = jenkins.make_defults_params('deploy_openstack')
+ params['SALT_MASTER_URL'] = salt_api
+ params['STACK_INSTALL'] = 'core,kvm,openstack,ovs'
+ show_step(4)
+ build = jenkins.run_build('deploy_openstack', params)
+ jenkins.wait_end_of_build(
+ name=build[0],
+ build_id=build[1],
+ timeout=60 * 60 * 4)
+ result = jenkins.build_info(name=build[0],
+ build_id=build[1])['result']
+ assert result == 'SUCCESS', "Deploy openstack was failed"
+
+ # Changing param for cicd deploy
+ show_step(5)
+ params['STACK_INSTALL'] = 'cicd'
+ build = jenkins.run_build('deploy_openstack', params)
+ jenkins.wait_end_of_build(
+ name=build[0],
+ build_id=build[1],
+ timeout=60 * 60 * 2)
+ result = jenkins.build_info(name=build[0],
+ build_id=build[1])['result']
+ assert result == 'SUCCESS', "Deploy CICD was failed"
+
+ LOG.info("*************** DONE **************")