Added model for initial ocata deploy with cicd
Changed template name to cookied-bm-ocata-cicd-pipeline

Change-Id: I9854b32869251582e2bb52dcae9d09053d734a82
diff --git a/tcp_tests/templates/cookied-bm-ocata-cicd-pipeline/common-services.yaml b/tcp_tests/templates/cookied-bm-ocata-cicd-pipeline/common-services.yaml
new file mode 100644
index 0000000..c0c50a4
--- /dev/null
+++ b/tcp_tests/templates/cookied-bm-ocata-cicd-pipeline/common-services.yaml
@@ -0,0 +1,49 @@
+{% from 'cookied-bm-ocata-cicd-pipeline/underlay.yaml' import HOSTNAME_CFG01 with context %}
+
+- description: Approve cfg01 ssh key for jenkins user
+  cmd: mkdir -p /var/lib/jenkins/.ssh && ssh-keyscan cfg01 > /var/lib/jenkins/.ssh/known_hosts && chown jenkins /var/lib/jenkins/.ssh/known_hosts
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 15}
+  skip_fail: false
+
+- description: Install jq for parse json output
+  cmd: apt install -y jq
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 15}
+  skip_fail: false
+
+# Install support services
+- description: Install keepalived on ctl01
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@keepalived:cluster and *01*' state.sls keepalived
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: true
+
+- description: Install keepalived
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@keepalived:cluster' state.sls keepalived
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+
+- description: Install haproxy
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@haproxy:proxy' state.sls haproxy
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Check haproxy status
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@haproxy:proxy' service.status haproxy
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Restart rsyslog
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@haproxy:proxy' service.restart rsyslog
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
diff --git a/tcp_tests/templates/cookied-bm-ocata-cicd-pipeline/lab04-physical-inventory-nfv.yaml b/tcp_tests/templates/cookied-bm-ocata-cicd-pipeline/lab04-physical-inventory-nfv.yaml
new file mode 100644
index 0000000..1baa7be
--- /dev/null
+++ b/tcp_tests/templates/cookied-bm-ocata-cicd-pipeline/lab04-physical-inventory-nfv.yaml
@@ -0,0 +1,143 @@
+nodes:

+    cfg01.ocata-cicd.local:

+      reclass_storage_name: infra_config_node01

+      roles:

+      - infra_config

+      - linux_system_codename_xenial

+      interfaces:

+        ens3:

+          role: single_dhcp

+    # Physical nodes

+

+    cid01.ocata-cicd.local:

+      reclass_storage_name: cicd_control_node01

+      roles:

+      - cicd_control_leader

+      - linux_system_codename_xenial

+      interfaces:

+        enp2s0f0:

+          role: single_mgm

+          deploy_address: 172.16.49.70

+        enp2s0f1:

+          role: single_vlan_ctl

+          single_address: 10.167.8.91

+

+    cid02.ocata-cicd.local:

+      reclass_storage_name: cicd_control_node02

+      roles:

+      - cicd_control_manager

+      - linux_system_codename_xenial

+      interfaces:

+        enp2s0f0:

+          role: single_mgm

+          deploy_address: 172.16.49.71

+        enp2s0f1:

+          role: single_vlan_ctl

+          single_address: 10.167.8.92

+

+    cid03.ocata-cicd.local:

+      reclass_storage_name: cicd_control_node03

+      roles:

+      - cicd_control_manager

+      - linux_system_codename_xenial

+      interfaces:

+        enp2s0f0:

+          role: single_mgm

+          deploy_address: 172.16.49.72

+        enp2s0f1:

+          role: single_vlan_ctl

+          single_address: 10.167.8.93

+

+    kvm01.ocata-cicd.local:

+      reclass_storage_name: infra_kvm_node01

+      roles:

+      - infra_kvm

+      - linux_system_codename_xenial

+      interfaces:

+        enp9s0f0:

+          role: single_mgm

+        enp9s0f1:

+          role: bond0_ab_ovs_vlan_ctl

+

+    kvm02.ocata-cicd.local:

+      reclass_storage_name: infra_kvm_node02

+      roles:

+      - infra_kvm

+      - linux_system_codename_xenial

+      interfaces:

+        enp9s0f0:

+          role: single_mgm

+        enp9s0f1:

+          role: bond0_ab_ovs_vlan_ctl

+

+    kvm03.ocata-cicd.local:

+      reclass_storage_name: infra_kvm_node03

+      roles:

+      - infra_kvm

+      - linux_system_codename_xenial

+      interfaces:

+        enp9s0f0:

+          role: single_mgm

+        enp9s0f1:

+          role: bond0_ab_ovs_vlan_ctl

+

+    cmp001.ocata-cicd.local:

+      reclass_storage_name: openstack_compute_node01

+      roles:

+      - openstack_compute_dpdk

+      - features_lvm_backend

+      - linux_system_codename_xenial

+      - openstack_compute_sriov

+      interfaces:

+        enp2s0f0:

+          role: single_mgm

+          deploy_address: 172.16.49.73

+        enp2s0f1:

+          role: single_vlan_ctl

+          single_address: 10.167.8.101

+        enp5s0f0:

+          role: single_ovs_dpdk_prv

+          tenant_address: 192.168.0.101          

+          dpdk_pci: "0000:05:00.0"

+

+    cmp002.ocata-cicd.local:

+      reclass_storage_name: openstack_compute_node02

+      roles:

+      - openstack_compute_dpdk

+      - features_lvm_backend

+      - linux_system_codename_xenial

+      - openstack_compute_sriov  

+      interfaces:

+        enp2s0f0:

+          role: single_mgm

+          deploy_address: 172.16.49.74

+        enp2s0f1:

+          role: single_vlan_ctl

+          single_address: 10.167.8.102

+        enp5s0f0:

+          role: single_ovs_dpdk_prv

+          tenant_address: 192.168.0.102

+          dpdk_pci: "0000:05:00.0"

+

+    gtw01.ocata-cicd.local:

+      reclass_storage_name: openstack_gateway_node01

+      roles:

+      - openstack_gateway

+      - linux_system_codename_xenial

+      interfaces:

+        enp9s0f0:

+          role: single_mgm

+          deploy_address: 172.16.49.75 

+        enp9s0f1:

+          role: bond0_ab_dvr_vxlan_ctl_mesh_floating

+

+#    gtw02.ocata-cicd.local:

+#      reclass_storage_name: openstack_gateway_node02

+#      roles:

+#      - openstack_gateway

+#      - linux_system_codename_xenial

+#      interfaces:

+#        enp10s0f0:

+#          role: single_mgm

+#        enp10s0f1:

+#          role: bond0_ab_dvr_vlan_ctl_prv_floating

diff --git a/tcp_tests/templates/cookied-bm-ocata-cicd-pipeline/lab04-physical-inventory.yaml b/tcp_tests/templates/cookied-bm-ocata-cicd-pipeline/lab04-physical-inventory.yaml
new file mode 100644
index 0000000..d17abc9
--- /dev/null
+++ b/tcp_tests/templates/cookied-bm-ocata-cicd-pipeline/lab04-physical-inventory.yaml
@@ -0,0 +1,135 @@
+nodes:

+    cfg01.ocata-cicd.local:

+      reclass_storage_name: infra_config_node01

+      roles:

+      - infra_config

+      - linux_system_codename_xenial

+      interfaces:

+        ens3:

+          role: single_dhcp

+    # Physical nodes

+

+    cid01.ocata-cicd.local:

+      reclass_storage_name: cicd_control_node01

+      roles:

+      - cicd_control_leader

+      - linux_system_codename_xenial

+      interfaces:

+        enp2s0f0:

+          role: single_mgm

+          deploy_address: 172.16.49.70

+        enp2s0f1:

+          role: single_vlan_ctl

+          single_address: 10.167.8.91

+

+    cid02.ocata-cicd.local:

+      reclass_storage_name: cicd_control_node02

+      roles:

+      - cicd_control_manager

+      - linux_system_codename_xenial

+      interfaces:

+        enp2s0f0:

+          role: single_mgm

+          deploy_address: 172.16.49.71

+        enp2s0f1:

+          role: single_vlan_ctl

+          single_address: 10.167.8.92

+

+    cid03.ocata-cicd.local:

+      reclass_storage_name: cicd_control_node03

+      roles:

+      - cicd_control_manager

+      - linux_system_codename_xenial

+      interfaces:

+        enp2s0f0:

+          role: single_mgm

+          deploy_address: 172.16.49.72

+        enp2s0f1:

+          role: single_vlan_ctl

+          single_address: 10.167.8.93

+

+    kvm01.ocata-cicd.local:

+      reclass_storage_name: infra_kvm_node01

+      roles:

+      - infra_kvm

+      - linux_system_codename_xenial

+      interfaces:

+        enp9s0f0:

+          role: single_mgm

+        enp9s0f1:

+          role: bond0_ab_ovs_vlan_ctl

+

+    kvm02.ocata-cicd.local:

+      reclass_storage_name: infra_kvm_node02

+      roles:

+      - infra_kvm

+      - linux_system_codename_xenial

+      interfaces:

+        enp9s0f0:

+          role: single_mgm

+        enp9s0f1:

+          role: bond0_ab_ovs_vlan_ctl

+

+    kvm03.ocata-cicd.local:

+      reclass_storage_name: infra_kvm_node03

+      roles:

+      - infra_kvm

+      - linux_system_codename_xenial

+      interfaces:

+        enp9s0f0:

+          role: single_mgm

+        enp9s0f1:

+          role: bond0_ab_ovs_vlan_ctl

+

+    cmp001.ocata-cicd.local:

+      reclass_storage_name: openstack_compute_node01

+      roles:

+      - openstack_compute

+      - features_lvm_backend

+      - linux_system_codename_xenial

+      interfaces:

+        enp2s0f0:

+          role: single_mgm

+          deploy_address: 172.16.49.73

+        enp2s0f1:

+          role: bond0_ab_ovs_vxlan_ctl_mesh

+          single_address: 10.167.8.101

+          tenant_address: 192.168.0.101

+

+    cmp002.ocata-cicd.local:

+      reclass_storage_name: openstack_compute_node02

+      roles:

+      - openstack_compute

+      - features_lvm_backend

+      - linux_system_codename_xenial

+      interfaces:

+        enp2s0f0:

+          role: single_mgm

+          deploy_address: 172.16.49.74

+        enp2s0f1:

+          role: bond0_ab_ovs_vxlan_ctl_mesh

+          single_address: 10.167.8.102

+          tenant_address: 192.168.0.102

+

+    gtw01.ocata-cicd.local:

+      reclass_storage_name: openstack_gateway_node01

+      roles:

+      - openstack_gateway

+      - linux_system_codename_xenial

+      interfaces:

+        enp9s0f0:

+          role: single_mgm

+          deploy_address: 172.16.49.75 

+        enp9s0f1:

+          role: bond0_ab_dvr_vxlan_ctl_mesh_floating

+

+#    gtw02.ocata-cicd.local:

+#      reclass_storage_name: openstack_gateway_node02

+#      roles:

+#      - openstack_gateway

+#      - linux_system_codename_xenial

+#      interfaces:

+#        enp10s0f0:

+#          role: single_mgm

+#        enp10s0f1:

+#          role: bond0_ab_dvr_vlan_ctl_prv_floating

diff --git a/tcp_tests/templates/cookied-bm-ocata-cicd-pipeline/salt-context-cookiecutter-initial-cicd-nfv.yaml b/tcp_tests/templates/cookied-bm-ocata-cicd-pipeline/salt-context-cookiecutter-initial-cicd-nfv.yaml
new file mode 100644
index 0000000..3a2ada7
--- /dev/null
+++ b/tcp_tests/templates/cookied-bm-ocata-cicd-pipeline/salt-context-cookiecutter-initial-cicd-nfv.yaml
@@ -0,0 +1,170 @@
+default_context:
+  bmk_enabled: 'False'
+  ceph_enabled: 'False'
+  cicd_control_node01_address: 10.167.8.91
+  cicd_control_node01_hostname: cid01
+  cicd_control_node02_address: 10.167.8.92
+  cicd_control_node02_hostname: cid02
+  cicd_control_node03_address: 10.167.8.93
+  cicd_control_node03_hostname: cid03
+  cicd_control_vip_address: 10.167.8.90
+  cicd_control_vip_hostname: cid
+  cicd_enabled: 'True'
+  cicd_private_key: |-
+    -----BEGIN RSA PRIVATE KEY-----
+    MIIEpAIBAAKCAQEAxhMRv9nCgnLGtWFLfq8r97sPjEIA3eNvVfYWV87S2TjtiUyT
+    Bknw/eXC0l2SGN3q7NXaA6sqtiVgk2dw0DfhGOKuWucr5C3pyRDMZOpj98gNzwNX
+    X6kUmqS8MjALcpw68qVnq0cuQ0RNY8gxbLGIGWZx29AdcHtczWqce3LXDllVLp/U
+    XLIcZuoB3Z2JvKWMo+QF7bKFJB8+O6tBXuRDJblVQFds/dG6cdLXGOTu3TvD/iu8
+    FcDOPDqTff7j2669DmTV6NU2lzuCd5WOIaRmX0bmyOripMtTRZvj83W3ZvLZO/7I
+    L9sXB1S/IpL09P1Fq5rMK3rDXBAS7CY/fBOBUwIDAQABAoIBAAyR4zt4l0ZuADw8
+    e20Dstea0GbHPYTXTRZ4cnyKDIlqenCPInlwsdF7Bj0RYRcg5iq3e4lmEGTUxGc2
+    VktwcGGC6SutpfRqEX8ICOCSm1t7H502ihHxCfSyZZsNv3w7e+YwJv8Qmlf8eqjN
+    aI2MSqXKAYnAkE22FnKWxG11IT6wwgIEB01z5DbnNv3FToIOGAnX1wB8cOP5IS5C
+    E89Q9X6YqB/k+L5Xms+WfkHiqDBlkGfH1M97CmjDrR1wDapqCxSVjt/qIaSBoyMi
+    NiXweo+P1ZZs9myQbOMX/mS8svCEVM/EiQe6M+K4wSNZNxIzZWCozMHYTAQRIcfF
+    SRIMMrECgYEA65ExKZuoi2MGTdqFH04sZuz6sH3hXI7er2ol6mmhXZALJWd+Sk9a
+    j/eq9HKBGhc41LeowIYrcwMiUlXhoZ17AiGALgG1UG5btomvwADd9DuzmJYaZ8VO
+    xiLG5Y1czIyh4JapPOdlP0oj2Sy+nlIsCCpmmYbZK+qvbkT0nnWJzskCgYEA10FY
+    rENlLZ6L34f4c9Ow+GpI/z3+QP6By3xwOQTZOLV+gBQeOHO45TfIXT2hsAXbfcAA
+    0pvXeKhQ89a7A24L/3UCex/gV4BivghIVai1Lh6/++LC4s3Ue68+CQCm4Q1zTDqk
+    GTVtEH7r2Bq9Wm08vPpkLwiHYOJhVGTGGvEpkTsCgYEAlv2WjHvoeYd8Z/ST9W/v
+    B/4H5Y6aH/C3jao1KV6Rft4wNsZybYEVpEf0fQDT/Xw7AiXCdbzKJssweaPwnt3J
+    FaGRfmu74xUJliQE0cX8fmqyADDeNfuDNX7fDA4jGD1gGQuY6J/NBtcnyTFj8Sjs
+    bkN3RhroIr0nuz9ZqCPgs/kCgYEAt1s5fltWRzrDaOA4uek72Q8oKQuUlaZ1x3Cz
+    Y06G/jBTliQM7gddGxueOBZ0sSz8H6y6xqvrKiMt+dcSrEREQhFY0KqBfeeltLv3
+    acfwtV2KKbSqT2oHMmg/DooYnKHJcciN2c9RnPiQSx/T5cAhOdSMHChGsTeEss+4
+    lGCTCNsCgYAm8R8A3XZ2TPl0iMM9LxUJGXBFs5r/GkeM1XMXQmnNDzoDbaLhkj/F
+    7UayG1rOOYl64oYmGZ4UPYTzyDDcb4m/ZiqQsoAmNjXoKykvMhhBnGUb4CZj0J/b
+    QppRJ86CYR17df+W+4TLhXcn41afuO+8TCBLUKpb1jeJ5hifWctK5Q==
+    -----END RSA PRIVATE KEY-----
+  cicd_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDGExG/2cKCcsa1YUt+ryv3uw+MQgDd429V9hZXztLZOO2JTJMGSfD95cLSXZIY3ers1doDqyq2JWCTZ3DQN+EY4q5a5yvkLenJEMxk6mP3yA3PA1dfqRSapLwyMAtynDrypWerRy5DRE1jyDFssYgZZnHb0B1we1zNapx7ctcOWVUun9Rcshxm6gHdnYm8pYyj5AXtsoUkHz47q0Fe5EMluVVAV2z90bpx0tcY5O7dO8P+K7wVwM48OpN9/uPbrr0OZNXo1TaXO4J3lY4hpGZfRubI6uKky1NFm+Pzdbdm8tk7/sgv2xcHVL8ikvT0/UWrmswresNcEBLsJj98E4FT
+  cluster_domain: ocata-cicd.local
+  cluster_name: ocata-cicd
+  compute_bond_mode: active-backup
+  compute_primary_first_nic: eth1
+  compute_primary_second_nic: eth2
+  context_seed: azAQwe19yxcUFK03MqWVyrJtQ6ZblWAFaDDiITqT9ed7jJnj6EdHQpgyizt1Gxdg
+  control_network_netmask: 255.255.255.0
+  control_network_subnet: 10.167.8.0/24
+  control_vlan: '2422'
+  cookiecutter_template_branch: master
+  cookiecutter_template_credentials: gerrit
+  cookiecutter_template_url: ssh://mcp-jenkins@gerrit.mcp.mirantis.net:29418/mk/cookiecutter-templates.git
+  deploy_network_gateway: 172.16.49.126
+  deploy_network_netmask: 255.255.255.192
+  deploy_network_subnet: 172.16.49.64/26
+  deployment_type: physical
+  dns_server01: 8.8.8.8
+  dns_server02: 8.8.4.4
+  email_address: sgudz@mirantis.com
+  gateway_primary_first_nic: eth1
+  gateway_primary_second_nic: eth2
+  infra_bond_mode: active-backup
+  infra_deploy_nic: eth0
+  infra_kvm01_control_address: 10.167.8.241
+  infra_kvm01_deploy_address: 172.16.49.67
+  infra_kvm01_hostname: kvm01
+  infra_kvm02_control_address: 10.167.8.242
+  infra_kvm02_deploy_address: 172.16.49.68
+  infra_kvm02_hostname: kvm02
+  infra_kvm03_control_address: 10.167.8.243
+  infra_kvm03_deploy_address: 172.16.49.69
+  infra_kvm03_hostname: kvm03
+  infra_kvm_vip_address: 10.167.8.240
+  infra_primary_first_nic: eth1
+  infra_primary_second_nic: eth2
+  kubernetes_enabled: 'False'
+  local_repositories: 'False'
+  maas_deploy_address: 172.16.49.66
+  maas_hostname: cfg01
+  mcp_version: stable
+  offline_deployment: 'False'
+  opencontrail_enabled: 'False'
+  openldap_domain: ocata-cicd.local
+  openldap_enabled: 'True'
+  openldap_organisation: ${_param:cluster_name}
+  openstack_benchmark_node01_address: 10.167.8.95
+  openstack_benchmark_node01_hostname: bmk01
+  openstack_cluster_size: compact
+  openstack_compute_count: '2'
+  openstack_compute_rack01_hostname: cmp
+  openstack_compute_rack01_single_subnet: 10.167.8
+  openstack_compute_rack01_tenant_subnet: 192.168.0
+  openstack_control_address: 10.167.8.10
+  openstack_control_hostname: ctl
+  openstack_control_node01_address: 10.167.8.11
+  openstack_control_node01_hostname: ctl01
+  openstack_control_node02_address: 10.167.8.12
+  openstack_control_node02_hostname: ctl02
+  openstack_control_node03_address: 10.167.8.13
+  openstack_control_node03_hostname: ctl03
+  openstack_database_address: 10.167.8.50
+  openstack_database_hostname: dbs
+  openstack_database_node01_address: 10.167.8.51
+  openstack_database_node01_hostname: dbs01
+  openstack_database_node02_address: 10.167.8.52
+  openstack_database_node02_hostname: dbs02
+  openstack_database_node03_address: 10.167.8.53
+  openstack_database_node03_hostname: dbs03
+  openstack_enabled: 'True'
+  openstack_gateway_node01_address: 10.167.8.224
+  openstack_gateway_node01_hostname: gtw01
+  openstack_gateway_node01_tenant_address: 192.168.0.6
+  openstack_gateway_node02_address: 10.167.8.225
+  openstack_gateway_node02_hostname: gtw02
+  openstack_gateway_node02_tenant_address: 192.168.0.7
+  openstack_gateway_node03_address: 10.167.8.226
+  openstack_gateway_node03_hostname: gtw03
+  openstack_gateway_node03_tenant_address: 192.168.0.8
+  openstack_message_queue_address: 10.167.8.40
+  openstack_message_queue_hostname: msg
+  openstack_message_queue_node01_address: 10.167.8.41
+  openstack_message_queue_node01_hostname: msg01
+  openstack_message_queue_node02_address: 10.167.8.42
+  openstack_message_queue_node02_hostname: msg02
+  openstack_message_queue_node03_address: 10.167.8.43
+  openstack_message_queue_node03_hostname: msg03
+  openstack_network_engine: ovs
+  openstack_neutron_qos: 'False'
+  openstack_neutron_vlan_aware_vms: 'False'
+  openstack_nfv_dpdk_enabled: 'True'
+  openstack_nfv_sriov_enabled: 'True'
+  openstack_nfv_sriov_network: physnet1
+  openstack_nfv_sriov_numvfs: '7'
+  openstack_nfv_sriov_pf_nic: enp5s0f1
+  openstack_nova_compute_hugepages_count: '16'
+  openstack_nova_compute_nfv_req_enabled: 'True'
+  openstack_nova_cpu_pinning: 1,2,7,8
+  openstack_ovs_dvr_enabled: 'False'
+  openstack_ovs_encapsulation_type: vxlan
+  openstack_proxy_address: 10.167.8.80
+  openstack_proxy_hostname: prx
+  openstack_proxy_node01_address: 10.167.8.81
+  openstack_proxy_node01_hostname: prx01
+  openstack_proxy_node02_address: 10.167.8.82
+  openstack_proxy_node02_hostname: prx02
+  openstack_upgrade_node01_address: 10.167.8.19
+  openstack_version: ocata
+  oss_enabled: 'False'
+  oss_node03_address: ${_param:stacklight_monitor_node03_address}
+  platform: openstack_enabled
+  public_host: ${_param:openstack_proxy_address}
+  publication_method: email
+  reclass_repository: https://github.com/Mirantis/mk-lab-salt-model.git
+  salt_api_password: KEfAXxIWJykUBv0v8uKWdI2v4cBG5A07
+  salt_api_password_hash: $6$XBCrfheG$2q48l7h1giiqF2sdp7CFtLQQi8pcMa6K5A8cPYQmhuGqJtzv08YXVqYyhkHARzl1VBLVf.aTMY6d0M5naM5WU0
+  salt_master_address: 172.16.49.66
+  salt_master_hostname: cfg01
+  salt_master_management_address: 172.16.49.66
+  shared_reclass_refspec: refs/changes/44/16144/1
+  shared_reclass_url: ssh://mcp-jenkins@gerrit.mcp.mirantis.net:29418/salt-models/reclass-system.git
+  stacklight_enabled: 'False'
+  stacklight_version: '2'
+  static_ips_on_deploy_network_enabled: 'False'
+  tenant_network_gateway: 192.168.0.1
+  tenant_network_netmask: 255.255.255.0
+  tenant_network_subnet: 192.168.0.0/24
+  tenant_vlan: '2423'
+  upstream_proxy_enabled: 'False'
+  use_default_network_scheme: 'True'
diff --git a/tcp_tests/templates/cookied-bm-ocata-cicd-pipeline/salt-context-cookiecutter-initial-cicd.yaml b/tcp_tests/templates/cookied-bm-ocata-cicd-pipeline/salt-context-cookiecutter-initial-cicd.yaml
new file mode 100644
index 0000000..225c6f0
--- /dev/null
+++ b/tcp_tests/templates/cookied-bm-ocata-cicd-pipeline/salt-context-cookiecutter-initial-cicd.yaml
@@ -0,0 +1,245 @@
+default_context:
+  bmk_enabled: 'True'
+  ceph_enabled: 'False'
+  cicd_control_node01_address: 10.167.8.91
+  cicd_control_node01_hostname: cid01
+  cicd_control_node02_address: 10.167.8.92
+  cicd_control_node02_hostname: cid02
+  cicd_control_node03_address: 10.167.8.93
+  cicd_control_node03_hostname: cid03
+  cicd_control_vip_address: 10.167.8.90
+  cicd_control_vip_hostname: cid
+  cicd_enabled: 'True'
+  cicd_private_key: |-
+    -----BEGIN RSA PRIVATE KEY-----
+    MIIEpAIBAAKCAQEA0i/Kw7U6RvH3ELsRZwtfX5ZSaV2tkRiWqCgpSK7fvZn/YYuy
+    f+DiMuviyb1a2rmgQgckJjGlQeEdtETyh+lAYUMCxcJUqiPMNtbs4GDqqpc91Nv+
+    /Qa5JXGuInQC4L5MAx2BeJ4Swc1jnDMLXZ21zmcZwK3Uo5ENyhQqlwe2QZ+SzH76
+    U7DQgSCU3xlI/ieBDiC4w4HLj3z3X36J6bb3x33DVh02UCaWCwdU0v+PGW+v4RBt
+    RYMAuCnErOXGQysgwcpyqSpBvZ6QRdktIeAyK5X8RVLx7Tta5BPVB8jbseF/0N2C
+    wxmovDbnUzJMF8lRUuuqXzKPfXeHjDbWWOpzeQIDAQABAoIBAQCdnXsmwWr/Mol8
+    QVuucwfFDVulabszWEX5uYwj1gcwDiBFDNYBMSlO2DbL47QS2ypC+UnxXj83pNF6
+    kk1w/8foZ7DXjX+hypBj+03MgMDYxet3CLYxFe6XFqVnbqmN0QX9PA4P3jHgpN1y
+    j5CRVie0rPnR1Ejlk2vMwsyhTwYsNFmCqW0IC5qWHPyMGiOnFTcCYUIA8IZGkbvS
+    dYdVavAxwLVgmBoWVegg7eOL6hB3W1BZHxra/TEiHy0gsTSMr/l8YRqsDX8A91D1
+    mOMYqaEfqtJMuAKQwJDsXFiqXjsctQyUcx8TcPgsDTB/B8OPt9g8JdcmUUnWMaa9
+    mj6i9VdVAoGBAPtdkkgKGPHxq4bxWKrwzaI1gUPlPikxA+rVeplNvF0pzt4yUHD6
+    l56ash7Yl6E3n1s2mLh4IMYw8a5QZffHf51cQBtHkg72u4dXVUWLUGIzwsIkx6Zu
+    kmIfWCkC3uMbcE7o1pQSGgc3nUUeD4K6KBqUWxinEzCIYu3a909MANL/AoGBANYP
+    2/troZi9SfYkxF/ZdCeufDsSj4DZICzG7wbKMUJ1jDpegPBT4D8d1Nip7tj10q/P
+    4HdRrv0p5ZsFgtua9Una+ICpjpkEqjGgHUBBD7o/+Jjd8cmE57DSNSMUTVY8mwi3
+    OuKNI5itlU/8gwrAtEjV5qcgsUQSH6zZm69sx9GHAoGAb78ee/Y5Nr4YYipDH+Nj
+    l3wv1k5AfzFyK2DyWtrrJYOjmrZFeqR86R6elwX1Cs/egT9ZT5DgCRvTJYpKeofv
+    HLbZLQd5UDuyDK3vk7YmazHVoFeXhk7bttF2cOz8x3v4RqxOUI9nkNPRj9uYS6aU
+    k5RmlyQXbNkFGfbhQRhpuWMCgYBqPFKzk3YOhJrJQvQGkbgY3XqgIpT/oEJclpoX
+    547M6eOfMDmTjDz1dgulP6jfGhjm0icXcH2E/R8LcY/BB0WG0tqTmBLCFFOW71y0
+    /9UbXRY2X6fYmFIYKire7vt2uftDNxRNTTiGVFeO68XpNzwCDc470XjITKpVsWWX
+    iUgdzwKBgQCs+hCR03K+6npMnANOBEi4I91pJQlbcMBK5fvX1w4VAwBIEu4Z58JM
+    kBbpgmANffYjTWxMIEGvdq9yZWwKCw7pv8pu/dU63MS5ST10K+7/QaLejabCskzP
+    Hjy15CfE5Iy/6aBIEz30uW/B/THd0icbGKSaFjor+sW7S2GqJvE9ow==
+    -----END RSA PRIVATE KEY-----
+  cicd_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDSL8rDtTpG8fcQuxFnC19fllJpXa2RGJaoKClIrt+9mf9hi7J/4OIy6+LJvVrauaBCByQmMaVB4R20RPKH6UBhQwLFwlSqI8w21uzgYOqqlz3U2/79Brklca4idALgvkwDHYF4nhLBzWOcMwtdnbXOZxnArdSjkQ3KFCqXB7ZBn5LMfvpTsNCBIJTfGUj+J4EOILjDgcuPfPdffonptvfHfcNWHTZQJpYLB1TS/48Zb6/hEG1FgwC4KcSs5cZDKyDBynKpKkG9npBF2S0h4DIrlfxFUvHtO1rkE9UHyNux4X/Q3YLDGai8NudTMkwXyVFS66pfMo99d4eMNtZY6nN5
+  cluster_domain: ocata-cicd.local
+  cluster_name: ocata-cicd
+  compute_bond_mode: active-backup
+  compute_primary_first_nic: eth1
+  compute_primary_second_nic: eth2
+  context_seed: TyDcKDMkhMxUlgpgsB0sXCfWQGyBdEJDIBpcRnfTlwS89gRFQZBQbyxYB9gy0kcQ
+  control_network_netmask: 255.255.255.0
+  control_network_subnet: 10.167.8.0/24
+  control_vlan: '2422'
+  cookiecutter_template_branch: master
+  cookiecutter_template_credentials: gerrit
+  cookiecutter_template_url: ssh://mcp-jenkins@gerrit.mcp.mirantis.net:29418/mk/cookiecutter-templates.git
+  deploy_network_gateway: 172.16.49.126
+  deploy_network_netmask: 255.255.255.192
+  deploy_network_subnet: 172.16.49.64/26
+  deployment_type: physical
+  dns_server01: 8.8.8.8
+  dns_server02: 8.8.4.4
+  email_address: sgudz@mirantis.com
+  gateway_primary_first_nic: eth1
+  gateway_primary_second_nic: eth2
+  infra_bond_mode: active-backup
+  infra_deploy_nic: eth0
+  infra_kvm01_control_address: 10.167.8.241
+  infra_kvm01_deploy_address: 172.16.49.67
+  infra_kvm01_hostname: kvm01
+  infra_kvm02_control_address: 10.167.8.242
+  infra_kvm02_deploy_address: 172.16.49.68
+  infra_kvm02_hostname: kvm02
+  infra_kvm03_control_address: 10.167.8.243
+  infra_kvm03_deploy_address: 172.16.49.69
+  infra_kvm03_hostname: kvm03
+  infra_kvm_vip_address: 10.167.8.240
+  infra_primary_first_nic: eth1
+  infra_primary_second_nic: eth2
+  kubernetes_enabled: 'False'
+  local_repositories: 'False'
+  maas_deploy_address: 172.16.49.66
+  maas_hostname: cfg01
+  mcp_version: stable
+  offline_deployment: 'False'
+  opencontrail_enabled: 'False'
+  openldap_domain: ocata-cicd.local
+  openldap_enabled: 'True'
+  openldap_organisation: ${_param:cluster_name}
+  openstack_benchmark_node01_address: 10.167.8.95
+  openstack_benchmark_node01_hostname: bmk01
+  openstack_compute_count: '2'
+  openstack_compute_rack01_hostname: cmp
+  openstack_compute_rack01_single_subnet: 10.167.8
+  openstack_compute_rack01_tenant_subnet: 192.168.0
+  openstack_control_address: 10.167.8.10
+  openstack_control_hostname: ctl
+  openstack_control_node01_address: 10.167.8.11
+  openstack_control_node01_hostname: ctl01
+  openstack_control_node02_address: 10.167.8.12
+  openstack_control_node02_hostname: ctl02
+  openstack_control_node03_address: 10.167.8.13
+  openstack_control_node03_hostname: ctl03
+  openstack_database_address: 10.167.8.50
+  openstack_database_hostname: dbs
+  openstack_database_node01_address: 10.167.8.51
+  openstack_database_node01_hostname: dbs01
+  openstack_database_node02_address: 10.167.8.52
+  openstack_database_node02_hostname: dbs02
+  openstack_database_node03_address: 10.167.8.53
+  openstack_database_node03_hostname: dbs03
+  openstack_enabled: 'True'
+  openstack_gateway_node01_address: 10.167.8.224
+  openstack_gateway_node01_hostname: gtw01
+  openstack_gateway_node01_tenant_address: 192.168.0.6
+  openstack_gateway_node02_address: 10.167.8.225
+  openstack_gateway_node02_hostname: gtw02
+  openstack_gateway_node02_tenant_address: 192.168.0.7
+  openstack_gateway_node03_address: 10.167.8.226
+  openstack_gateway_node03_hostname: gtw03
+  openstack_gateway_node03_tenant_address: 192.168.0.8
+  openstack_message_queue_address: 10.167.8.40
+  openstack_message_queue_hostname: msg
+  openstack_message_queue_node01_address: 10.167.8.41
+  openstack_message_queue_node01_hostname: msg01
+  openstack_message_queue_node02_address: 10.167.8.42
+  openstack_message_queue_node02_hostname: msg02
+  openstack_message_queue_node03_address: 10.167.8.43
+  openstack_message_queue_node03_hostname: msg03
+  openstack_network_engine: ovs
+  openstack_neutron_qos: 'False'
+  openstack_neutron_vlan_aware_vms: 'False'
+  openstack_nfv_dpdk_enabled: 'False'
+  openstack_nfv_sriov_enabled: 'False'
+  openstack_nova_compute_nfv_req_enabled: 'False'
+  openstack_ovs_dvr_enabled: 'True'
+  openstack_ovs_encapsulation_type: vxlan
+  openstack_proxy_address: 10.167.8.80
+  openstack_proxy_hostname: prx
+  openstack_proxy_node01_address: 10.167.8.81
+  openstack_proxy_node01_hostname: prx01
+  openstack_proxy_node02_address: 10.167.8.82
+  openstack_proxy_node02_hostname: prx02
+  openstack_upgrade_node01_address: 10.167.8.19
+  openstack_version: ocata
+  oss_address: ${_param:stacklight_monitor_address}
+  oss_cis_cert: ${_param:oss_openstack_cert}
+  oss_cis_domain_id: ${_param:oss_openstack_domain_id}
+  oss_cis_enabled: 'True'
+  oss_cis_jobs_repository: https://github.com/Mirantis/rundeck-cis-jobs.git
+  oss_cis_jobs_repository_branch: master
+  oss_cis_password: ${_param:oss_openstack_password}
+  oss_cis_project: ${_param:oss_openstack_project}
+  oss_cis_username: ${_param:oss_openstack_username}
+  oss_cleanup_service_enabled: 'True'
+  oss_cleanup_service_os_credentials_path: ${_param:oss_openstack_credentials_path}
+  oss_cleanup_service_password: ${_param:oss_openstack_password}
+  oss_cleanup_service_project: ${_param:oss_openstack_project}
+  oss_cleanup_service_project_domain_id: ${_param:oss_openstack_username_domain_id}
+  oss_cleanup_service_username: ${_param:oss_openstack_username}
+  oss_enabled: 'True'
+  oss_node01_address: ${_param:stacklight_monitor_node01_address}
+  oss_node02_address: ${_param:stacklight_monitor_node02_address}
+  oss_node03_address: ${_param:stacklight_monitor_node03_address}
+  oss_notification_email_from: sgudz@mirantis.com
+  oss_notification_email_recipients: sgudz@mirantis.com
+  oss_openstack_auth_url: http://172.17.16.190:5000/v3
+  oss_openstack_cert: '      -----BEGIN CERTIFICATE----- MIIE0DCCA7igAwIBAgIBBzANBgkqhkiG9w0BAQsFADCBgzELMAkGA1UEBhMCVVMx
+    EDAOBgNVBAgTB0FyaXpvbmExEzARBgNVBAcTClNjb3R0c2RhbGUxGjAYBgNVBAoT AAGjggEaMIIBFjAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIBBjAdBgNV
+    91cxG7685C/b+LrTW+C05+Z5Yg4MotdqY3MxtfWoSKQ7CC2iXZDXtHwlTxFWMMS2 -----END CERTIFICATE-----
+    -----BEGIN CERTIFICATE----- dGhvcml0eSAtIEcyMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAv3Fi
+    9kIDN0zmiN/VryTyscPfzLXs4Jlet0lUIDyUGAzHHFIYSaRt4bNYC8...'
+  oss_openstack_credentials_path: /srv/volumes/rundeck/storage
+  oss_openstack_domain_id: default
+  oss_openstack_endpoint_type: public
+  oss_openstack_password: nova
+  oss_openstack_project: admin
+  oss_openstack_ssl_verify: 'False'
+  oss_openstack_username: admin
+  oss_runbook_private_key: |-
+    -----BEGIN RSA PRIVATE KEY-----
+    MIIEpAIBAAKCAQEA0i/Kw7U6RvH3ELsRZwtfX5ZSaV2tkRiWqCgpSK7fvZn/YYuy
+    f+DiMuviyb1a2rmgQgckJjGlQeEdtETyh+lAYUMCxcJUqiPMNtbs4GDqqpc91Nv+
+    /Qa5JXGuInQC4L5MAx2BeJ4Swc1jnDMLXZ21zmcZwK3Uo5ENyhQqlwe2QZ+SzH76
+    U7DQgSCU3xlI/ieBDiC4w4HLj3z3X36J6bb3x33DVh02UCaWCwdU0v+PGW+v4RBt
+    RYMAuCnErOXGQysgwcpyqSpBvZ6QRdktIeAyK5X8RVLx7Tta5BPVB8jbseF/0N2C
+    wxmovDbnUzJMF8lRUuuqXzKPfXeHjDbWWOpzeQIDAQABAoIBAQCdnXsmwWr/Mol8
+    QVuucwfFDVulabszWEX5uYwj1gcwDiBFDNYBMSlO2DbL47QS2ypC+UnxXj83pNF6
+    kk1w/8foZ7DXjX+hypBj+03MgMDYxet3CLYxFe6XFqVnbqmN0QX9PA4P3jHgpN1y
+    j5CRVie0rPnR1Ejlk2vMwsyhTwYsNFmCqW0IC5qWHPyMGiOnFTcCYUIA8IZGkbvS
+    dYdVavAxwLVgmBoWVegg7eOL6hB3W1BZHxra/TEiHy0gsTSMr/l8YRqsDX8A91D1
+    mOMYqaEfqtJMuAKQwJDsXFiqXjsctQyUcx8TcPgsDTB/B8OPt9g8JdcmUUnWMaa9
+    mj6i9VdVAoGBAPtdkkgKGPHxq4bxWKrwzaI1gUPlPikxA+rVeplNvF0pzt4yUHD6
+    l56ash7Yl6E3n1s2mLh4IMYw8a5QZffHf51cQBtHkg72u4dXVUWLUGIzwsIkx6Zu
+    kmIfWCkC3uMbcE7o1pQSGgc3nUUeD4K6KBqUWxinEzCIYu3a909MANL/AoGBANYP
+    2/troZi9SfYkxF/ZdCeufDsSj4DZICzG7wbKMUJ1jDpegPBT4D8d1Nip7tj10q/P
+    4HdRrv0p5ZsFgtua9Una+ICpjpkEqjGgHUBBD7o/+Jjd8cmE57DSNSMUTVY8mwi3
+    OuKNI5itlU/8gwrAtEjV5qcgsUQSH6zZm69sx9GHAoGAb78ee/Y5Nr4YYipDH+Nj
+    l3wv1k5AfzFyK2DyWtrrJYOjmrZFeqR86R6elwX1Cs/egT9ZT5DgCRvTJYpKeofv
+    HLbZLQd5UDuyDK3vk7YmazHVoFeXhk7bttF2cOz8x3v4RqxOUI9nkNPRj9uYS6aU
+    k5RmlyQXbNkFGfbhQRhpuWMCgYBqPFKzk3YOhJrJQvQGkbgY3XqgIpT/oEJclpoX
+    547M6eOfMDmTjDz1dgulP6jfGhjm0icXcH2E/R8LcY/BB0WG0tqTmBLCFFOW71y0
+    /9UbXRY2X6fYmFIYKire7vt2uftDNxRNTTiGVFeO68XpNzwCDc470XjITKpVsWWX
+    iUgdzwKBgQCs+hCR03K+6npMnANOBEi4I91pJQlbcMBK5fvX1w4VAwBIEu4Z58JM
+    kBbpgmANffYjTWxMIEGvdq9yZWwKCw7pv8pu/dU63MS5ST10K+7/QaLejabCskzP
+    Hjy15CfE5Iy/6aBIEz30uW/B/THd0icbGKSaFjor+sW7S2GqJvE9ow==
+    -----END RSA PRIVATE KEY-----
+  oss_runbook_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDSL8rDtTpG8fcQuxFnC19fllJpXa2RGJaoKClIrt+9mf9hi7J/4OIy6+LJvVrauaBCByQmMaVB4R20RPKH6UBhQwLFwlSqI8w21uzgYOqqlz3U2/79Brklca4idALgvkwDHYF4nhLBzWOcMwtdnbXOZxnArdSjkQ3KFCqXB7ZBn5LMfvpTsNCBIJTfGUj+J4EOILjDgcuPfPdffonptvfHfcNWHTZQJpYLB1TS/48Zb6/hEG1FgwC4KcSs5cZDKyDBynKpKkG9npBF2S0h4DIrlfxFUvHtO1rkE9UHyNux4X/Q3YLDGai8NudTMkwXyVFS66pfMo99d4eMNtZY6nN5
+  oss_rundeck_external_datasource_enabled: 'False'
+  oss_security_audit_enabled: 'True'
+  oss_security_audit_os_credentials_path: ${_param:oss_openstack_credentials_path}
+  oss_security_audit_password: ${_param:oss_openstack_password}
+  oss_security_audit_project: ${_param:oss_openstack_project}
+  oss_security_audit_project_domain_id: ${_param:oss_openstack_domain_id}
+  oss_security_audit_user_domain_id: ${_param:oss_openstack_domain_id}
+  oss_security_audit_username: ${_param:oss_openstack_username}
+  oss_sfdc_support_enabled: 'True'
+  platform: openstack_enabled
+  public_host: ${_param:openstack_proxy_address}
+  publication_method: email
+  reclass_repository: https://github.com/Mirantis/mk-lab-salt-model.git
+  rundeck_forward_iframe: 'False'
+  rundeck_iframe_host: ${_param:openstack_proxy_address}
+  rundeck_iframe_port: ${_param:haproxy_rundeck_exposed_port}
+  rundeck_iframe_ssl: 'False'
+  salt_api_password: qFc7FCrLBnWkeLxRWWQeezH5dmuI5vsI
+  salt_api_password_hash: $6$hbWiURAY$Sfzk6dzos6j1B8gFDK6WoGNDk0I2Bd2IOarWDGOflgY2sBpUJ4KTq1Uw241ri933/ROHTSuhNcodmDe13i5gS.
+  salt_master_address: 172.16.49.66
+  salt_master_hostname: cfg01
+  salt_master_management_address: 172.16.49.66
+  sfdc_auth_url: asas
+  sfdc_consumer_key: admin
+  sfdc_consumer_secret: admin
+  sfdc_environment_id: admin
+  sfdc_organization_id: admin
+  sfdc_password: admin
+  sfdc_sandbox_enabled: 'False'
+  sfdc_username: admin
+  shared_reclass_url: ssh://mcp-jenkins@gerrit.mcp.mirantis.net:29418/salt-models/reclass-system.git
+  stacklight_enabled: 'True'
+  stacklight_version: '2'
+  static_ips_on_deploy_network_enabled: 'False'
+  tenant_network_gateway: ''
+  tenant_network_netmask: 255.255.255.0
+  tenant_network_subnet: 192.168.0.0/24
+  tenant_vlan: '2423'
+  upstream_proxy_enabled: 'False'
+  use_default_network_scheme: 'True'
diff --git a/tcp_tests/templates/cookied-bm-ocata-cicd-pipeline/salt-context-environment.yaml b/tcp_tests/templates/cookied-bm-ocata-cicd-pipeline/salt-context-environment.yaml
new file mode 100644
index 0000000..9e8ef5d
--- /dev/null
+++ b/tcp_tests/templates/cookied-bm-ocata-cicd-pipeline/salt-context-environment.yaml
@@ -0,0 +1,101 @@
+nodes:

+    # Virtual Control Plane nodes

+

+    ctl01.ocata-cicd.local:

+      reclass_storage_name: openstack_control_node01

+      roles:

+      - openstack_control_leader

+      - linux_system_codename_xenial

+      interfaces:

+        ens3:

+          role: single_ctl

+

+    ctl02.ocata-cicd.local:

+      reclass_storage_name: openstack_control_node02

+      roles:

+      - openstack_control

+      - linux_system_codename_xenial

+      interfaces:

+        ens3:

+          role: single_ctl

+

+    ctl03.ocata-cicd.local:

+      reclass_storage_name: openstack_control_node03

+      roles:

+      - openstack_control

+      - linux_system_codename_xenial

+      interfaces:

+        ens3:

+          role: single_ctl

+

+    dbs01.ocata-cicd.local:

+      reclass_storage_name: openstack_database_node01

+      roles:

+      - openstack_database_leader

+      - linux_system_codename_xenial

+      interfaces:

+        ens3:

+          role: single_ctl

+

+    dbs02.ocata-cicd.local:

+      reclass_storage_name: openstack_database_node02

+      roles:

+      - openstack_database

+      - linux_system_codename_xenial

+      interfaces:

+        ens3:

+          role: single_ctl

+

+    dbs03.ocata-cicd.local:

+      reclass_storage_name: openstack_database_node03

+      roles:

+      - openstack_database

+      - linux_system_codename_xenial

+      interfaces:

+        ens3:

+          role: single_ctl

+

+    msg01.ocata-cicd.local:

+      reclass_storage_name: openstack_message_queue_node01

+      roles:

+      - openstack_message_queue

+      - linux_system_codename_xenial

+      interfaces:

+        ens3:

+          role: single_ctl

+

+    msg02.ocata-cicd.local:

+      reclass_storage_name: openstack_message_queue_node02

+      roles:

+      - openstack_message_queue

+      - linux_system_codename_xenial

+      interfaces:

+        ens3:

+          role: single_ctl

+

+    msg03.ocata-cicd.local:

+      reclass_storage_name: openstack_message_queue_node03

+      roles:

+      - openstack_message_queue

+      - linux_system_codename_xenial

+      interfaces:

+        ens3:

+          role: single_ctl

+

+    prx01.ocata-cicd.local:

+      reclass_storage_name: openstack_proxy_node01

+      roles:

+      - openstack_proxy

+      - linux_system_codename_xenial

+      interfaces:

+        ens3:

+          role: single_ctl

+

+    prx02.ocata-cicd.local:

+      reclass_storage_name: openstack_proxy_node02

+      roles:

+      - openstack_proxy

+      - linux_system_codename_xenial

+      interfaces:

+        ens3:

+          role: single_ctl

diff --git a/tcp_tests/templates/cookied-bm-ocata-cicd-pipeline/salt.yaml b/tcp_tests/templates/cookied-bm-ocata-cicd-pipeline/salt.yaml
new file mode 100644
index 0000000..1856947
--- /dev/null
+++ b/tcp_tests/templates/cookied-bm-ocata-cicd-pipeline/salt.yaml
@@ -0,0 +1,127 @@
+{% from 'cookied-bm-ocata-cicd-pipeline/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% from 'cookied-bm-ocata-cicd-pipeline/underlay.yaml' import DOMAIN_NAME with context %}
+
+{% set NFV_ENABLED = os_env('NFV_ENABLED','false') %}
+
+{%- if NFV_ENABLED == 'true' %}
+{%- set CLUSTER_CONTEXT_NAME = 'salt-context-cookiecutter-initial-cicd-nfv.yaml' %}
+{%- set ENVIRONMENT_CONTEXT_NAMES = ['salt-context-environment.yaml','lab04-physical-inventory-nfv.yaml'] %}
+{% set ENVIRONMENT_MODEL_INVENTORY_NAME = os_env('ENVIRONMENT_MODEL_INVENTORY_NAME','physical-ocata-cicd-nfv') %}
+{%- else %}
+{%- set CLUSTER_CONTEXT_NAME = 'salt-context-cookiecutter-initial-cicd.yaml' %}
+{%- set ENVIRONMENT_CONTEXT_NAMES = ['salt-context-environment.yaml','lab04-physical-inventory.yaml'] %}
+{% set ENVIRONMENT_MODEL_INVENTORY_NAME = os_env('ENVIRONMENT_MODEL_INVENTORY_NAME','physical-ocata-cicd') %}
+{%- endif %}
+
+{%- set CONTROL_VLAN = os_env('CONTROL_VLAN', '2422') %}
+{%- set TENANT_VLAN = os_env('TENANT_VLAN', '2423') %}
+
+
+{% import 'shared-salt.yaml' as SHARED with context %}
+
+
+{{ SHARED.MACRO_INSTALL_SALT_MASTER() }}
+
+{{ SHARED.MACRO_GENERATE_COOKIECUTTER_MODEL(CONTROL_VLAN=CONTROL_VLAN, TENANT_VLAN=TENANT_VLAN) }}
+
+{{ SHARED.MACRO_GENERATE_AND_ENABLE_ENVIRONMENT_MODEL() }}
+
+{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='"linux" "reclass" "salt" "openssh" "ntp" "git" "nginx" "collectd" "sensu" "heka" "sphinx" "keystone" "mysql" "grafana" "haproxy" "rsyslog" "horizon" "prometheus" "telegraf" "elasticsearch" "powerdns" "glusterfs" "xtrabackup" "maas" "backupninja" "jenkins"') }}
+
+{{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
+
+{{ SHARED.MACRO_RUN_SALT_MASTER_UNDERLAY_STATES() }}
+
+- description: "Workaround for rack01 compute generator"
+  cmd: |
+    set -e;
+    # Remove rack01 key
+    . /root/venv-reclass-tools/bin/activate;
+    reclass-tools del-key parameters.reclass.storage.node.openstack_compute_rack01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+    # Add openstack_compute_node definition from system
+    reclass-tools add-key 'classes' 'system.reclass.storage.system.openstack_compute_multi' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml --merge;
+
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+
+- description: Remove cicd nodes from VCP, because we have baremetal cicd nodes
+  cmd: |
+    sed -i 's/\-\ system\.salt\.control\.cluster\.cicd\_control\_cluster//g' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/kvm.yml;
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+
+- description: "Workaround for PROD-16973"
+  cmd: |
+    set -e;
+    # Remove obsolete logging section key
+    . /root/venv-reclass-tools/bin/activate;
+    reclass-tools del-key parameters.nova.controller.logging /srv/salt/reclass/classes/system/nova/control/cluster.yml;
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+{{ SHARED.MACRO_GENERATE_INVENTORY() }}
+
+{{ SHARED.MACRO_NETWORKING_WORKAROUNDS() }}
+
+- description: Temporary workaround for removing cinder-volume from CTL nodes
+  cmd: |
+    sed -i 's/\-\ system\.cinder\.volume\.single//g' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/control.yml;
+    sed -i 's/\-\ system\.cinder\.volume\.notification\.messagingv2//g' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/control.yml;
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: true
+
+- description: WR for mounting 1G hugepages before linux.state
+  cmd: |
+    salt 'cmp*' state.sls linux.system.hugepages;
+    salt 'cmp*' cmd.run "mount -o mode=775,pagesize=1G -t hugetlbfs Hugetlbfs-kvm /mnt/hugepages_1G";
+    salt 'cmp*' cmd.run "echo 16 | sudo  tee  /sys/kernel/mm/hugepages/hugepages-1048576kB/nr_hugepages";
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+
+- description: WR for correct acces to git repo from jenkins on cfg01 node
+  cmd: |
+    git clone --mirror https://github.com/Mirantis/mk-pipelines.git /home/repo/mk/mk-pipelines/;
+    git clone --mirror https://github.com/Mirantis/pipeline-library.git /home/repo/mcp-ci/pipeline-library/;
+    chown -R git:www-data /home/repo/mk/mk-pipelines/*;
+    chown -R git:www-data /home/repo/mcp-ci/pipeline-library/*;
+
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+
+{{ SHARED.MACRO_BOOTSTRAP_ALL_MINIONS() }}
+
+#########################################
+# Configure all running salt minion nodes
+#########################################
+
+- description: Hack resolv.conf on VCP nodes for internal services access
+  cmd: |
+    salt --hard-crash --state-output=mixed --state-verbose=False -C '* and not kvm* and not cmp* and not gtw* and not cfg*' cmd.run "echo 'nameserver 172.18.208.44' > /etc/resolv.conf;"
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Refresh pillars on all minions
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' saltutil.refresh_pillar
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Sync all salt resources
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' saltutil.sync_all && sleep 5
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Show  reclass-salt --top for generated nodes
+  cmd: reclass-salt --top -u /srv/salt/reclass/nodes/_generated/
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
diff --git a/tcp_tests/templates/cookied-bm-ocata-cicd-pipeline/underlay--meta-data.yaml b/tcp_tests/templates/cookied-bm-ocata-cicd-pipeline/underlay--meta-data.yaml
new file mode 100644
index 0000000..a594a53
--- /dev/null
+++ b/tcp_tests/templates/cookied-bm-ocata-cicd-pipeline/underlay--meta-data.yaml
@@ -0,0 +1,4 @@
+| # All the data below will be stored as a string object

+  instance-id: iid-local1

+  hostname: {hostname}

+  local-hostname: {hostname}

diff --git a/tcp_tests/templates/cookied-bm-ocata-cicd-pipeline/underlay--user-data-cfg01.yaml b/tcp_tests/templates/cookied-bm-ocata-cicd-pipeline/underlay--user-data-cfg01.yaml
new file mode 100644
index 0000000..c6314ad
--- /dev/null
+++ b/tcp_tests/templates/cookied-bm-ocata-cicd-pipeline/underlay--user-data-cfg01.yaml
@@ -0,0 +1,103 @@
+| # All the data below will be stored as a string object

+  #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html

+

+  ssh_pwauth: True

+  users:

+   - name: root

+     sudo: ALL=(ALL) NOPASSWD:ALL

+     shell: /bin/bash

+     ssh_authorized_keys:

+     {% for key in config.underlay.ssh_keys %}

+      - ssh-rsa {{ key['public'] }}

+     {% endfor %}

+

+  disable_root: false

+  chpasswd:

+   list: |

+    root:r00tme

+   expire: False

+

+  bootcmd:

+   # Block access to SSH while node is preparing

+   - cloud-init-per once sudo iptables -A INPUT -p tcp --dport 22 -j DROP

+   # Enable root access

+   - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config

+   - service sshd restart

+  output:

+    all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'

+

+  runcmd:

+   # Configure dhclient

+   - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base

+   - sudo resolvconf -u

+

+   # Enable grub menu using updated config below

+   - update-grub

+

+   # Prepare network connection

+   - sudo ifdown ens3

+   - sudo ip r d default || true  # remove existing default route to get it from dhcp
+   - sudo ifup ens3

+   #- sudo route add default gw {gateway} {interface_name}

+

+   # Create swap

+   - fallocate -l 4G /swapfile

+   - chmod 600 /swapfile

+   - mkswap /swapfile

+   - swapon /swapfile

+   - echo "/swapfile   none    swap    defaults    0   0" >> /etc/fstab

+

+   ############## TCP Cloud cfg01 node ##################

+   #- sleep 120

+   - echo "Preparing base OS"

+

+   - echo "nameserver 172.18.208.44" > /etc/resolv.conf;

+   - echo "nameserver 8.8.8.8" >> /etc/resolv.conf;

+   - which wget >/dev/null || (apt-get update; apt-get install -y wget);

+

+   # Configure Ubuntu mirrors
+   - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial main restricted universe" > /etc/apt/sources.list
+   - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-updates main restricted universe" >> /etc/apt/sources.list
+   - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-security main restricted universe" >> /etc/apt/sources.list
+
+   - echo "deb [arch=amd64] http://apt.mirantis.com/xenial {{ REPOSITORY_SUITE }} salt extra" > /etc/apt/sources.list.d/mcp_salt.list;

+   - wget -O - http://apt.mirantis.com/public.gpg | apt-key add -;

+   - echo "deb http://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3 xenial main" > /etc/apt/sources.list.d/saltstack.list;

+   - wget -O - https://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3/SALTSTACK-GPG-KEY.pub | apt-key add -;

+

+   - apt-get clean

+   - apt-get update

+

+   # Install common packages

+   - eatmydata apt-get install -y python-pip git curl tmux byobu iputils-ping traceroute htop tree mc

+

+   # Install salt-minion and stop it until it is configured

+   - eatmydata apt-get install -y salt-minion && service salt-minion stop

+

+   ########################################################

+   # Node is ready, allow SSH access

+   - echo "Allow SSH access ..."

+   - sudo iptables -D INPUT -p tcp --dport 22 -j DROP

+   ########################################################

+

+  write_files:

+   - path: /etc/default/grub.d/97-enable-grub-menu.cfg

+     content: |

+         GRUB_RECORDFAIL_TIMEOUT=30

+         GRUB_TIMEOUT=3

+         GRUB_TIMEOUT_STYLE=menu

+

+   - path: /etc/network/interfaces

+     content: |

+          auto ens3

+          iface ens3 inet dhcp

+

+   - path: /root/.ssh/config

+     owner: root:root

+     permissions: '0600'

+     content: |

+          Host *

+            ServerAliveInterval 300

+            ServerAliveCountMax 10

+            StrictHostKeyChecking no

+            UserKnownHostsFile /dev/null

diff --git a/tcp_tests/templates/cookied-bm-ocata-cicd-pipeline/underlay--user-data1604.yaml b/tcp_tests/templates/cookied-bm-ocata-cicd-pipeline/underlay--user-data1604.yaml
new file mode 100644
index 0000000..c3ffe17
--- /dev/null
+++ b/tcp_tests/templates/cookied-bm-ocata-cicd-pipeline/underlay--user-data1604.yaml
@@ -0,0 +1,100 @@
+| # All the data below will be stored as a string object

+  #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html

+

+  ssh_pwauth: True

+  users:

+   - name: root

+     sudo: ALL=(ALL) NOPASSWD:ALL

+     shell: /bin/bash

+     ssh_authorized_keys:

+     {% for key in config.underlay.ssh_keys %}

+      - ssh-rsa {{ key['public'] }}

+     {% endfor %}

+

+  disable_root: false

+  chpasswd:

+   list: |

+    root:r00tme

+   expire: False

+

+  bootcmd:

+   # Block access to SSH while node is preparing

+   - cloud-init-per once sudo iptables -A INPUT -p tcp --dport 22 -j DROP

+   # Enable root access

+   - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config

+   - service sshd restart

+  output:

+    all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'

+

+  runcmd:

+   - export TERM=linux

+   - export LANG=C

+   # Configure dhclient

+   - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base

+   - sudo resolvconf -u

+

+   # Enable grub menu using updated config below

+   - update-grub

+

+   # Prepare network connection

+   - sudo ifup {interface_name}

+   #- sudo route add default gw {gateway} {interface_name}

+

+   # Create swap

+   - fallocate -l 4G /swapfile

+   - chmod 600 /swapfile

+   - mkswap /swapfile

+   - swapon /swapfile

+   - echo "/swapfile   none    swap    defaults   0   0" >> /etc/fstab

+

+

+   ############## TCP Cloud cfg01 node ##################

+   #- sleep 120

+   - echo "Preparing base OS"

+   - echo "nameserver 172.18.208.44" > /etc/resolv.conf;

+   - echo "nameserver 8.8.8.8" >> /etc/resolv.conf;

+   - which wget >/dev/null || (apt-get update; apt-get install -y wget)

+

+   # Configure Ubuntu mirrors
+   - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial main restricted universe" > /etc/apt/sources.list
+   - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-updates main restricted universe" >> /etc/apt/sources.list
+   - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-security main restricted universe" >> /etc/apt/sources.list
+
+   - echo "deb [arch=amd64] http://apt.mirantis.com/xenial {{ REPOSITORY_SUITE }} salt extra" > /etc/apt/sources.list.d/mcp_salt.list;

+   - wget -O - http://apt.mirantis.com/public.gpg | apt-key add -;

+   - echo "deb http://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3 xenial main" > /etc/apt/sources.list.d/saltstack.list;

+   - wget -O - https://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3/SALTSTACK-GPG-KEY.pub | apt-key add -;

+

+   - apt-get clean

+   - eatmydata apt-get update && apt-get -y upgrade

+

+   # Install common packages

+   - eatmydata apt-get install -y python-pip git curl tmux byobu iputils-ping traceroute htop tree mc

+

+   # Install salt-minion and stop it until it is configured

+   - eatmydata apt-get install -y salt-minion && service salt-minion stop

+

+   # Install latest kernel

+   - eatmydata apt-get install -y linux-generic-hwe-16.04

+

+   ########################################################

+   # Node is ready, allow SSH access

+   #- echo "Allow SSH access ..."

+   #- sudo iptables -D INPUT -p tcp --dport 22 -j DROP

+   - reboot

+   ########################################################

+

+  write_files:

+   - path: /etc/default/grub.d/97-enable-grub-menu.cfg

+     content: |

+         GRUB_RECORDFAIL_TIMEOUT=30

+         GRUB_TIMEOUT=3

+         GRUB_TIMEOUT_STYLE=menu

+

+   - path: /etc/network/interfaces

+     content: |

+          # The loopback network interface

+          auto lo

+          iface lo inet loopback

+          auto {interface_name}

+          iface {interface_name} inet dhcp

diff --git a/tcp_tests/templates/cookied-bm-ocata-cicd-pipeline/underlay.yaml b/tcp_tests/templates/cookied-bm-ocata-cicd-pipeline/underlay.yaml
new file mode 100644
index 0000000..bff4542
--- /dev/null
+++ b/tcp_tests/templates/cookied-bm-ocata-cicd-pipeline/underlay.yaml
@@ -0,0 +1,688 @@
+# Set the repository suite, one of the: 'nightly', 'testing', 'stable', or any other required
+{% set REPOSITORY_SUITE = os_env('REPOSITORY_SUITE', 'testing') %}
+
+{% set LAB_CONFIG_NAME = os_env('LAB_CONFIG_NAME', 'cookied-bm-ocata-cicd-pipeline') %}
+{% set DOMAIN_NAME = os_env('DOMAIN_NAME', LAB_CONFIG_NAME + '.local') %}
+{% set HOSTNAME_CFG01 = os_env('HOSTNAME_CFG01', 'cfg01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_KVM01 = os_env('HOSTNAME_KVM01', 'kvm01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_KVM02 = os_env('HOSTNAME_KVM02', 'kvm02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_KVM03 = os_env('HOSTNAME_KVM03', 'kvm03.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CID01 = os_env('HOSTNAME_CID01', 'cid01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CID02 = os_env('HOSTNAME_CID02', 'cid02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CID03 = os_env('HOSTNAME_CID03', 'cid03.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CMP001 = os_env('HOSTNAME_CMP001', 'cmp001.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CMP002 = os_env('HOSTNAME_CMP002', 'cmp002.' + DOMAIN_NAME) %}
+{% set HOSTNAME_GTW01 = os_env('HOSTNAME_GTW01', 'gtw01.' + DOMAIN_NAME) %}
+# {% set HOSTNAME_GTW02 = os_env('HOSTNAME_GTW02', 'gtw02.' + DOMAIN_NAME) %}
+
+{% set ETH1_IP_ADDRESS_CFG01 = os_env('ETH1_IP_ADDRESS_CFG01', '172.16.49.66') %}
+{% set ETH0_IP_ADDRESS_KVM01 = os_env('ETH0_IP_ADDRESS_KVM01', '172.16.49.67') %}
+{% set ETH0_IP_ADDRESS_KVM02 = os_env('ETH0_IP_ADDRESS_KVM02', '172.16.49.68') %}
+{% set ETH0_IP_ADDRESS_KVM03 = os_env('ETH0_IP_ADDRESS_KVM03', '172.16.49.69') %}
+{% set ETH0_IP_ADDRESS_CID01 = os_env('ETH0_IP_ADDRESS_CID01', '172.16.49.70') %}
+{% set ETH0_IP_ADDRESS_CID02 = os_env('ETH0_IP_ADDRESS_CID02', '172.16.49.71') %}
+{% set ETH0_IP_ADDRESS_CID03 = os_env('ETH0_IP_ADDRESS_CID03', '172.16.49.72') %}
+{% set ETH0_IP_ADDRESS_CMP001 = os_env('ETH0_IP_ADDRESS_CMP001', '172.16.49.73') %}
+{% set ETH0_IP_ADDRESS_CMP002 = os_env('ETH0_IP_ADDRESS_CMP002', '172.16.49.74') %}
+# {% set ETH0_IP_ADDRESS_CMP003 = os_env('ETH0_IP_ADDRESS_CMP003', '172.16.167.140') %}
+{% set ETH0_IP_ADDRESS_GTW01 = os_env('ETH0_IP_ADDRESS_GTW01', '172.16.49.75') %}
+# {% set ETH0_IP_ADDRESS_GTW02 = os_env('ETH0_IP_ADDRESS_GTW02', '172.16.49.76') %}
+
+{% import 'cookied-bm-ocata-cicd-pipeline/underlay--meta-data.yaml' as CLOUDINIT_META_DATA with context %}
+{% import 'cookied-bm-ocata-cicd-pipeline/underlay--user-data-cfg01.yaml' as CLOUDINIT_USER_DATA_CFG01 with context %}
+{% import 'cookied-bm-ocata-cicd-pipeline/underlay--user-data1604.yaml' as CLOUDINIT_USER_DATA with context %}
+
+---
+aliases:
+ - &interface_model {{ os_env('INTERFACE_MODEL', 'virtio') }}
+ - &cloudinit_meta_data {{ CLOUDINIT_META_DATA }}
+ - &cloudinit_user_data_cfg01 {{ CLOUDINIT_USER_DATA_CFG01 }}
+ - &cloudinit_user_data {{ CLOUDINIT_USER_DATA }}
+
+
+template:
+  devops_settings:
+    env_name: {{ os_env('ENV_NAME', 'ocata-cicd_' + REPOSITORY_SUITE + "_" + os_env('BUILD_NUMBER', '')) }}
+
+    address_pools:
+      admin-pool01:
+        net: {{ os_env('ADMIN_ADDRESS_POOL01', '172.16.49.64/26:26') }}
+        params:
+          ip_reserved:
+            gateway: +62
+            l2_network_device: +61
+            default_{{ HOSTNAME_CFG01 }}: {{ ETH1_IP_ADDRESS_CFG01 }}
+            default_{{ HOSTNAME_KVM01 }}: {{ ETH0_IP_ADDRESS_KVM01 }}
+            default_{{ HOSTNAME_KVM02 }}: {{ ETH0_IP_ADDRESS_KVM02 }}
+            default_{{ HOSTNAME_KVM03 }}: {{ ETH0_IP_ADDRESS_KVM03 }}
+            default_{{ HOSTNAME_CID01 }}: {{ ETH0_IP_ADDRESS_CID01 }}
+            default_{{ HOSTNAME_CID02 }}: {{ ETH0_IP_ADDRESS_CID02 }}
+            default_{{ HOSTNAME_CID03 }}: {{ ETH0_IP_ADDRESS_CID03 }}
+            default_{{ HOSTNAME_CMP001 }}: {{ ETH0_IP_ADDRESS_CMP001 }}
+            default_{{ HOSTNAME_CMP002 }}: {{ ETH0_IP_ADDRESS_CMP002 }}
+            default_{{ HOSTNAME_CMP003 }}: {{ ETH0_IP_ADDRESS_CMP003 }}
+            default_{{ HOSTNAME_GTW01 }}: {{ ETH0_IP_ADDRESS_GTW01 }}
+            # default_{{ HOSTNAME_GTW02 }}: {{ ETH0_IP_ADDRESS_GTW02 }}
+            virtual_{{ HOSTNAME_CFG01 }}: {{ ETH1_IP_ADDRESS_CFG01 }}
+            virtual_{{ HOSTNAME_KVM01 }}: {{ ETH0_IP_ADDRESS_KVM01 }}
+            virtual_{{ HOSTNAME_KVM02 }}: {{ ETH0_IP_ADDRESS_KVM02 }}
+            virtual_{{ HOSTNAME_KVM03 }}: {{ ETH0_IP_ADDRESS_KVM03 }}
+            virtual_{{ HOSTNAME_CID01 }}: {{ ETH0_IP_ADDRESS_CID01 }}
+            virtual_{{ HOSTNAME_CID02 }}: {{ ETH0_IP_ADDRESS_CID02 }}
+            virtual_{{ HOSTNAME_CID03 }}: {{ ETH0_IP_ADDRESS_CID03 }}
+            virtual_{{ HOSTNAME_CMP001 }}: {{ ETH0_IP_ADDRESS_CMP001 }}
+            virtual_{{ HOSTNAME_CMP002 }}: {{ ETH0_IP_ADDRESS_CMP002 }}
+            # virtual_{{ HOSTNAME_CMP003 }}: {{ ETH0_IP_ADDRESS_CMP003 }}
+            virtual_{{ HOSTNAME_GTW01 }}: {{ ETH0_IP_ADDRESS_GTW01 }}
+            # virtual_{{ HOSTNAME_GTW02 }}: {{ ETH0_IP_ADDRESS_GTW02 }}
+          #ip_ranges:
+          #    dhcp: [+2, -4]
+      private-pool01:
+        net: {{ os_env('PRIVATE_ADDRESS_POOL01', '10.167.8.0/24:24') }}
+        params:
+          ip_reserved:
+            gateway: +1
+            l2_network_device: +1
+
+      tenant-pool01:
+        net: {{ os_env('TENANT_ADDRESS_POOL01', '10.167.10.0/24:24') }}
+        params:
+          ip_reserved:
+            gateway: +1
+            l2_network_device: +1
+
+      external-pool01:
+        net: {{ os_env('EXTERNAL_ADDRESS_POOL01', '172.17.42.192/26:26') }}
+        params:
+          ip_reserved:
+            gateway: +1
+            l2_network_device: -2
+
+    groups:
+
+      - name: virtual
+        driver:
+          name: devops.driver.libvirt
+          params:
+            connection_string: !os_env CONNECTION_STRING, qemu:///system
+            storage_pool_name: !os_env STORAGE_POOL_NAME, default
+            stp: False
+            hpet: False
+            enable_acpi: true
+            use_host_cpu: !os_env DRIVER_USE_HOST_CPU, true
+
+        network_pools:
+          admin: admin-pool01
+
+        l2_network_devices:
+          # Ironic management interface
+          admin:
+            address_pool: admin-pool01
+            dhcp: false
+            parent_iface:
+              phys_dev: !os_env IRONIC_LAB_PXE_IFACE_0
+
+        group_volumes:
+         - name: cloudimage1604    # This name is used for 'backing_store' option for node volumes.
+           source_image: !os_env IMAGE_PATH1604  # https://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-amd64-disk1.img
+           format: qcow2
+         - name: cfg01_day01_image               # Pre-configured day01 image
+           source_image: {{ os_env('IMAGE_PATH_CFG01_DAY01', os_env('IMAGE_PATH1604')) }} # http://images.mirantis.com/cfg01-day01.qcow2 or fallback to IMAGE_PATH1604
+           format: qcow2
+
+        nodes:
+          - name: {{ HOSTNAME_CFG01 }}
+            role: salt_master
+            params:
+              vcpu: !os_env SLAVE_NODE_CPU, 4
+              memory: !os_env SLAVE_NODE_MEMORY, 4096
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 150
+                  backing_store: cfg01_day01_image
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_cfg01
+
+              interfaces:
+                - label: ens3
+                  l2_network_device: admin
+                  interface_model: *interface_model
+                  mac_address: !os_env ETH1_MAC_ADDRESS_CFG01
+                #- label: ens4
+                #  l2_network_device: private
+                #  interface_model: *interface_model
+              network_config:
+                ens3:
+                  networks:
+                    - admin
+                #ens4:
+                #  networks:
+                #    - private
+
+
+      - name: default
+        driver:
+          name: devops_driver_ironic
+          params:
+            os_auth_token: fake-token
+            ironic_url: !os_env IRONIC_URL  # URL that will be used by fuel-devops
+                                            # to access Ironic API
+            # Agent URL that is accessible from deploying node when nodes
+            # are bootstrapped with PXE. Usually PXE/provision network address is used.
+            agent_kernel_url: !os_env IRONIC_AGENT_KERNEL_URL
+            agent_ramdisk_url: !os_env IRONIC_AGENT_RAMDISK_URL
+
+        network_pools:
+          admin: admin-pool01
+
+        nodes:
+
+        #  - name: {{ HOSTNAME_CFG01 }}
+        #    role: salt_master
+        #    params:
+        #      ipmi_user: !os_env IPMI_USER
+        #      ipmi_password: !os_env IPMI_PASSWORD
+        #      ipmi_previlegies: OPERATOR
+        #      ipmi_host: !os_env IPMI_HOST_CFG01  # hostname or IP address
+        #      ipmi_lan_interface: lanplus
+        #      ipmi_port: 623
+
+        #      root_volume_name: system     # see 'volumes' below
+        #      cloud_init_volume_name: iso  # see 'volumes' below
+        #      cloud_init_iface_up: enp3s0f1  # see 'interfaces' below.
+        #      volumes:
+        #        - name: system
+        #          capacity: !os_env NODE_VOLUME_SIZE, 200
+
+        #          # The same as for agent URL, here is an URL to the image that should be
+        #          # used for deploy the node. It should also be accessible from deploying
+        #          # node when nodes are provisioned by agent. Usually PXE/provision network address is used.
+        #          source_image: !os_env IRONIC_SOURCE_IMAGE_URL
+        #          source_image_checksum: !os_env IRONIC_SOURCE_IMAGE_CHECKSUM
+
+        #        - name: iso  # Volume with name 'iso' will be used
+        #                     # for store image with cloud-init metadata.
+
+        #          cloudinit_meta_data: *cloudinit_meta_data
+        #          cloudinit_user_data: *cloudinit_user_data_cfg01
+
+        #      interfaces:
+        #        - label: enp3s0f0  # Infra interface
+        #          mac_address: !os_env ETH0_MAC_ADDRESS_CFG01
+        #        - label: enp3s0f1
+        #          l2_network_device: admin
+        #          mac_address: !os_env ETH1_MAC_ADDRESS_CFG01
+
+        #      network_config:
+        #        enp3s0f0:
+        #          networks:
+        #           - infra
+        #        enp3s0f1:
+        #          networks:
+        #           - admin
+
+          - name: {{ HOSTNAME_KVM01 }}
+            role: salt_minion
+            params:
+              ipmi_user: !os_env IPMI_USER
+              ipmi_password: !os_env IPMI_PASSWORD
+              ipmi_previlegies: OPERATOR
+              ipmi_host: !os_env IPMI_HOST_KVM01  # hostname or IP address
+              ipmi_lan_interface: lanplus
+              ipmi_port: 623
+
+              root_volume_name: system     # see 'volumes' below
+              cloud_init_volume_name: iso  # see 'volumes' below
+              cloud_init_iface_up: enp9s0f0  # see 'interfaces' below.
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 200
+
+                  # The same as for agent URL, here is an URL to the image that should be
+                  # used for deploy the node. It should also be accessible from deploying
+                  # node when nodes are provisioned by agent. Usually PXE/provision network address is used.
+                  source_image: !os_env IRONIC_SOURCE_IMAGE_URL
+                  source_image_checksum: !os_env IRONIC_SOURCE_IMAGE_CHECKSUM
+
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data
+
+              interfaces:
+                - label: enp9s0f0
+                  l2_network_device: admin
+                  mac_address: !os_env ETH0_MAC_ADDRESS_KVM01
+                - label: enp9s0f1
+                  mac_address: !os_env ETH1_MAC_ADDRESS_KVM01
+
+              network_config:
+                enp9s0f0:
+                  networks:
+                   - admin
+                bond0:
+                  networks:
+                   - control
+                  aggregation: active-backup
+                  parents:
+                   - enp9s0f1
+
+          - name: {{ HOSTNAME_KVM02 }}
+            role: salt_minion
+            params:
+              ipmi_user: !os_env IPMI_USER
+              ipmi_password: !os_env IPMI_PASSWORD
+              ipmi_previlegies: OPERATOR
+              ipmi_host: !os_env IPMI_HOST_KVM02  # hostname or IP address
+              ipmi_lan_interface: lanplus
+              ipmi_port: 623
+
+              root_volume_name: system     # see 'volumes' below
+              cloud_init_volume_name: iso  # see 'volumes' below
+              cloud_init_iface_up: enp9s0f0  # see 'interfaces' below.
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 200
+
+                  # The same as for agent URL, here is an URL to the image that should be
+                  # used for deploy the node. It should also be accessible from deploying
+                  # node when nodes are provisioned by agent. Usually PXE/provision network address is used.
+                  source_image: !os_env IRONIC_SOURCE_IMAGE_URL
+                  source_image_checksum: !os_env IRONIC_SOURCE_IMAGE_CHECKSUM
+
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data
+
+              interfaces:
+                - label: enp9s0f0
+                  l2_network_device: admin
+                  mac_address: !os_env ETH0_MAC_ADDRESS_KVM02
+                - label: enp9s0f1
+                  mac_address: !os_env ETH1_MAC_ADDRESS_KVM02
+
+              network_config:
+                enp9s0f0:
+                  networks:
+                   - admin
+                bond0:
+                  networks:
+                   - control
+                  aggregation: active-backup
+                  parents:
+                   - enp9s0f1
+
+          - name: {{ HOSTNAME_KVM03 }}
+            role: salt_minion
+            params:
+              ipmi_user: !os_env IPMI_USER
+              ipmi_password: !os_env IPMI_PASSWORD
+              ipmi_previlegies: OPERATOR
+              ipmi_host: !os_env IPMI_HOST_KVM03  # hostname or IP address
+              ipmi_lan_interface: lanplus
+              ipmi_port: 623
+
+              root_volume_name: system     # see 'volumes' below
+              cloud_init_volume_name: iso  # see 'volumes' below
+              # cloud_init_iface_up: eno1  # see 'interfaces' below.
+              cloud_init_iface_up: enp9s0f0  # see 'interfaces' below.
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 200
+
+                  # The same as for agent URL, here is an URL to the image that should be
+                  # used for deploy the node. It should also be accessible from deploying
+                  # node when nodes are provisioned by agent. Usually PXE/provision network address is used.
+                  source_image: !os_env IRONIC_SOURCE_IMAGE_URL
+                  source_image_checksum: !os_env IRONIC_SOURCE_IMAGE_CHECKSUM
+
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data
+
+              interfaces:
+                # - label: eno1
+                - label: enp9s0f0
+                  l2_network_device: admin
+                  mac_address: !os_env ETH0_MAC_ADDRESS_KVM03
+                # - label: eno2
+                - label: enp9s0f1
+                  mac_address: !os_env ETH1_MAC_ADDRESS_KVM03
+
+              network_config:
+                # eno1:
+                enp9s0f0:
+                  networks:
+                   - admin
+                bond0:
+                  networks:
+                   - control
+                  aggregation: active-backup
+                  parents:
+                   - enp9s0f1
+
+          - name: {{ HOSTNAME_CID01 }}
+            role: salt_minion
+            params:
+              ipmi_user: !os_env IPMI_USER
+              ipmi_password: !os_env IPMI_PASSWORD
+              ipmi_previlegies: OPERATOR
+              ipmi_host: !os_env IPMI_HOST_CID01  # hostname or IP address
+              ipmi_lan_interface: lanplus
+              ipmi_port: 623
+
+              root_volume_name: system     # see 'volumes' below
+              cloud_init_volume_name: iso  # see 'volumes' below
+              cloud_init_iface_up: enp2s0f0  # see 'interfaces' below.
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 200
+
+                  # The same as for agent URL, here is an URL to the image that should be
+                  # used for deploy the node. It should also be accessible from deploying
+                  # node when nodes are provisioned by agent. Usually PXE/provision network address is used.
+                  source_image: !os_env IRONIC_SOURCE_IMAGE_URL
+                  source_image_checksum: !os_env IRONIC_SOURCE_IMAGE_CHECKSUM
+
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data
+
+              interfaces:
+                - label: enp2s0f0
+                  l2_network_device: admin
+                  mac_address: !os_env ETH0_MAC_ADDRESS_CID01
+                - label: enp2s0f1
+                  mac_address: !os_env ETH1_MAC_ADDRESS_CID01
+
+              network_config:
+                enp2s0f0:
+                  networks:
+                   - admin
+                bond0:
+                  networks:
+                   - control
+                  aggregation: active-backup
+                  parents:
+                   - enp2s0f1
+
+          - name: {{ HOSTNAME_CID02}}
+            role: salt_minion
+            params:
+              ipmi_user: !os_env IPMI_USER
+              ipmi_password: !os_env IPMI_PASSWORD
+              ipmi_previlegies: OPERATOR
+              ipmi_host: !os_env IPMI_HOST_CID02  # hostname or IP address
+              ipmi_lan_interface: lanplus
+              ipmi_port: 623
+
+              root_volume_name: system     # see 'volumes' below
+              cloud_init_volume_name: iso  # see 'volumes' below
+              cloud_init_iface_up: enp2s0f0  # see 'interfaces' below.
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 200
+
+                  # The same as for agent URL, here is an URL to the image that should be
+                  # used for deploy the node. It should also be accessible from deploying
+                  # node when nodes are provisioned by agent. Usually PXE/provision network address is used.
+                  source_image: !os_env IRONIC_SOURCE_IMAGE_URL
+                  source_image_checksum: !os_env IRONIC_SOURCE_IMAGE_CHECKSUM
+
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data
+
+              interfaces:
+                - label: enp2s0f0
+                  l2_network_device: admin
+                  mac_address: !os_env ETH0_MAC_ADDRESS_CID02
+                - label: enp2s0f1
+                  mac_address: !os_env ETH1_MAC_ADDRESS_CID02
+
+              network_config:
+                enp2s0f0:
+                  networks:
+                   - admin
+                bond0:
+                  networks:
+                   - control
+                  aggregation: active-backup
+                  parents:
+                   - enp2s0f1
+
+          - name: {{ HOSTNAME_CID03 }}
+            role: salt_minion
+            params:
+              ipmi_user: !os_env IPMI_USER
+              ipmi_password: !os_env IPMI_PASSWORD
+              ipmi_previlegies: OPERATOR
+              ipmi_host: !os_env IPMI_HOST_CID03  # hostname or IP address
+              ipmi_lan_interface: lanplus
+              ipmi_port: 623
+
+              root_volume_name: system     # see 'volumes' below
+              cloud_init_volume_name: iso  # see 'volumes' below
+              cloud_init_iface_up: enp2s0f0  # see 'interfaces' below.
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 200
+
+                  # The same as for agent URL, here is an URL to the image that should be
+                  # used for deploy the node. It should also be accessible from deploying
+                  # node when nodes are provisioned by agent. Usually PXE/provision network address is used.
+                  source_image: !os_env IRONIC_SOURCE_IMAGE_URL
+                  source_image_checksum: !os_env IRONIC_SOURCE_IMAGE_CHECKSUM
+
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data
+
+              interfaces:
+                - label: enp2s0f0
+                  l2_network_device: admin
+                  mac_address: !os_env ETH0_MAC_ADDRESS_CID03
+                - label: enp2s0f1
+                  mac_address: !os_env ETH1_MAC_ADDRESS_CID03
+
+              network_config:
+                enp2s0f0:
+                  networks:
+                   - admin
+                bond0:
+                  networks:
+                   - control
+                  aggregation: active-backup
+                  parents:
+                   - enp2s0f1
+
+          - name: {{ HOSTNAME_CMP001 }}
+            role: salt_minion
+            params:
+              ipmi_user: !os_env IPMI_USER
+              ipmi_password: !os_env IPMI_PASSWORD
+              ipmi_previlegies: OPERATOR
+              ipmi_host: !os_env IPMI_HOST_CMP001  # hostname or IP address
+              ipmi_lan_interface: lanplus
+              ipmi_port: 623
+
+              root_volume_name: system     # see 'volumes' below
+              cloud_init_volume_name: iso  # see 'volumes' below
+              # cloud_init_iface_up: enp3s0f0  # see 'interfaces' below.
+              cloud_init_iface_up: enp2s0f1  # see 'interfaces' below.
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 200
+
+                  # The same as for agent URL, here is an URL to the image that should be
+                  # used for deploy the node. It should also be accessible from deploying
+                  # node when nodes are provisioned by agent. Usually PXE/provision network address is used.
+                  source_image: !os_env IRONIC_SOURCE_IMAGE_URL
+                  source_image_checksum: !os_env IRONIC_SOURCE_IMAGE_CHECKSUM
+
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data
+
+              interfaces:
+                - label: enp2s0f0
+                  mac_address: !os_env ETH0_MAC_ADDRESS_CMP001
+                - label: enp2s0f1
+                  l2_network_device: admin
+                  mac_address: !os_env ETH1_MAC_ADDRESS_CMP001
+                - label: enp5s0f0
+                  mac_address: !os_env ETH2_MAC_ADDRESS_CMP001
+                  features: ['dpdk', 'dpdk_pci: 0000:05:00.0']
+                - label: enp5s0f1
+                  mac_address: !os_env ETH3_MAC_ADDRESS_CMP001
+                  features: ['dpdk', 'dpdk_pci: 0000:05:00.1']
+                # - label: enp5s0f2
+                #   mac_address: !os_env ETH4_MAC_ADDRESS_CMP001
+                #   features: ['dpdk', 'dpdk_pci: 0000:05:00.2']
+
+              network_config:
+                enp2s0f0:
+                  networks:
+                   - admin
+                bond0:
+                  networks:
+                   - control
+                  aggregation: active-backup
+                  parents:
+                   - enp5s0f0
+                   - enp5s0f1
+
+
+
+          - name: {{ HOSTNAME_CMP002 }}
+            role: salt_minion
+            params:
+              ipmi_user: !os_env IPMI_USER
+              ipmi_password: !os_env IPMI_PASSWORD
+              ipmi_previlegies: OPERATOR
+              ipmi_host: !os_env IPMI_HOST_CMP002  # hostname or IP address
+              ipmi_lan_interface: lanplus
+              ipmi_port: 623
+
+              root_volume_name: system     # see 'volumes' below
+              cloud_init_volume_name: iso  # see 'volumes' below
+              # cloud_init_iface_up: eno1  # see 'interfaces' below.
+              cloud_init_iface_up: enp2s0f1  # see 'interfaces' below.
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 200
+
+                  # The same as for agent URL, here is an URL to the image that should be
+                  # used for deploy the node. It should also be accessible from deploying
+                  # node when nodes are provisioned by agent. Usually PXE/provision network address is used.
+                  source_image: !os_env IRONIC_SOURCE_IMAGE_URL
+                  source_image_checksum: !os_env IRONIC_SOURCE_IMAGE_CHECKSUM
+
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data
+
+              interfaces:
+                # - label: eno1
+                - label: enp2s0f0
+                  mac_address: !os_env ETH0_MAC_ADDRESS_CMP002
+                # - label: eth0
+                - label: enp2s0f1
+                  l2_network_device: admin
+                  mac_address: !os_env ETH1_MAC_ADDRESS_CMP002
+                # - label: eth3
+                - label: enp5s0f0
+                  mac_address: !os_env ETH2_MAC_ADDRESS_CMP002
+                  features: ['dpdk', 'dpdk_pci: 0000:05:00.0']
+                # - label: eth2
+                - label: enp5s0f1
+                  mac_address: !os_env ETH3_MAC_ADDRESS_CMP002
+                  features: ['dpdk', 'dpdk_pci: 0000:05:00.1']
+                # - label: eth4
+                #   mac_address: !os_env ETH4_MAC_ADDRESS_CMP002
+                #   features: ['dpdk', 'dpdk_pci: 0000:0b:00.0']
+
+              network_config:
+                enp2s0f1:
+                  networks:
+                   - admin
+                bond0:
+                  networks:
+                   - control
+                  aggregation: active-backup
+                  parents:
+                   - enp5s0f0
+                   - enp5s0f1
+
+          - name: {{ HOSTNAME_GTW01 }}
+            role: salt_minion
+            params:
+              ipmi_user: !os_env IPMI_USER
+              ipmi_password: !os_env IPMI_PASSWORD
+              ipmi_previlegies: OPERATOR
+              ipmi_host: !os_env IPMI_HOST_GTW01  # hostname or IP address
+              ipmi_lan_interface: lanplus
+              ipmi_port: 623
+
+              root_volume_name: system     # see 'volumes' below
+              cloud_init_volume_name: iso  # see 'volumes' below
+              cloud_init_iface_up: enp9s0f0  # see 'interfaces' below.
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 200
+
+                  # The same as for agent URL, here is an URL to the image that should be
+                  # used for deploy the node. It should also be accessible from deploying
+                  # node when nodes are provisioned by agent. Usually PXE/provision network address is used.
+                  source_image: !os_env IRONIC_SOURCE_IMAGE_URL
+                  source_image_checksum: !os_env IRONIC_SOURCE_IMAGE_CHECKSUM
+
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data
+
+              interfaces:
+                - label: enp9s0f0
+                  l2_network_device: admin
+                  mac_address: !os_env ETH0_MAC_ADDRESS_GTW01
+                - label: enp9s0f1
+                  mac_address: !os_env ETH1_MAC_ADDRESS_GTW01
+
+              network_config:
+                enp9s0f0:
+                  networks:
+                   - admin
+                bond0:
+                  networks:
+                   - control
+                  aggregation: active-backup
+                  parents:
+                   - enp9s0f1
diff --git a/tcp_tests/tests/system/test_pipeline_deploy.py b/tcp_tests/tests/system/test_pipeline_deploy.py
new file mode 100644
index 0000000..81e5e8b
--- /dev/null
+++ b/tcp_tests/tests/system/test_pipeline_deploy.py
@@ -0,0 +1,76 @@
+#    Copyright 2016 Mirantis, Inc.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import pytest
+from tcp_tests.managers.jenkins.client import JenkinsClient
+
+from tcp_tests import logger
+
+LOG = logger.logger
+
+
+@pytest.mark.deploy
+class TestPipeline(object):
+    """Test class for testing deploy via Pipelines"""
+
+    @pytest.mark.fail_snapshot
+    def test_pipeline(self, show_step, underlay,
+                      common_services_deployed, salt_deployed):
+        """Runner for Juniper contrail-tests
+
+        Scenario:
+            1. Prepare salt on hosts.
+            2. Setup controller nodes
+            3. Setup compute nodes
+            4. Deploy openstack via pipelines
+            5. Deploy CICD via pipelines
+        """
+        nodes = underlay.node_names()
+        LOG.info("Nodes - {}".format(nodes))
+        cfg_node = 'cfg01.ocata-cicd.local'
+        salt_api = salt_deployed.get_pillar(
+            cfg_node, '_param:jenkins_salt_api_url')
+        salt_api = salt_api[0].get(cfg_node)
+        jenkins = JenkinsClient(
+            host='http://172.16.49.66:8081',
+            username='admin',
+            password='r00tme')
+
+        # Creating param list for openstack deploy
+        params = jenkins.make_defults_params('deploy_openstack')
+        params['SALT_MASTER_URL'] = salt_api
+        params['STACK_INSTALL'] = 'core,kvm,openstack,ovs'
+        show_step(4)
+        build = jenkins.run_build('deploy_openstack', params)
+        jenkins.wait_end_of_build(
+            name=build[0],
+            build_id=build[1],
+            timeout=60 * 60 * 4)
+        result = jenkins.build_info(name=build[0],
+                                    build_id=build[1])['result']
+        assert result == 'SUCCESS', "Deploy openstack was failed"
+
+        # Changing param for cicd deploy
+        show_step(5)
+        params['STACK_INSTALL'] = 'cicd'
+        build = jenkins.run_build('deploy_openstack', params)
+        jenkins.wait_end_of_build(
+            name=build[0],
+            build_id=build[1],
+            timeout=60 * 60 * 2)
+        result = jenkins.build_info(name=build[0],
+                                    build_id=build[1])['result']
+        assert result == 'SUCCESS', "Deploy CICD was failed"
+
+        LOG.info("*************** DONE **************")