Support changes for 2019.2.0 release

Change-Id: Ia2e00ef74198818c8f73fa6fff08c47c947e1e52
diff --git a/tcp_tests/helpers/utils.py b/tcp_tests/helpers/utils.py
index a16a3a3..f0424e6 100644
--- a/tcp_tests/helpers/utils.py
+++ b/tcp_tests/helpers/utils.py
@@ -252,12 +252,14 @@
 
     def __get_file(self, mode="r"):
         if self.host:
+            keys = map(paramiko.RSAKey.from_private_key,
+                       map(StringIO.StringIO, self.__private_keys))
             remote = ssh_client.SSHClient(
                 host=self.host,
                 port=self.port,
                 username=self.username,
                 password=self.__password,
-                private_keys=self.__private_keys)
+                private_keys=keys)
 
             return remote.open(self.__file_path, mode=mode)
         else:
diff --git a/tcp_tests/managers/jenkins/client.py b/tcp_tests/managers/jenkins/client.py
index afc8900..e713e71 100644
--- a/tcp_tests/managers/jenkins/client.py
+++ b/tcp_tests/managers/jenkins/client.py
@@ -7,6 +7,8 @@
 
 from devops.helpers import helpers
 
+from requests.exceptions import ConnectionError
+
 
 class JenkinsWrapper(jenkins.Jenkins):
     """Workaround for the bug:
@@ -122,7 +124,11 @@
             print(prefix, end='')
 
         def building():
-            status = not self.build_info(name, build_id)['building']
+            try:
+                status = not self.build_info(name, build_id)['building']
+            except ConnectionError:
+                status = False
+
             if verbose:
                 time_str = time.strftime("%H:%M:%S")
                 prefix = "\n" + job_output_prefix.format(
diff --git a/tcp_tests/managers/saltmanager.py b/tcp_tests/managers/saltmanager.py
index 1e1640f..c2bcc05 100644
--- a/tcp_tests/managers/saltmanager.py
+++ b/tcp_tests/managers/saltmanager.py
@@ -277,9 +277,10 @@
         # because previous authentication most probably is not valid
         # before or after time sync.
         self.__api = None
-        self.run_state(
-            tgt,
-            'cmd.run', 'service ntp stop; if [ -x /usr/sbin/ntpdate ]; then ntpdate -s ntp.ubuntu.com; else ntpd -gq ; fi; service ntp start')  # noqa
+        if not settings.SKIP_SYNC_TIME:
+            self.run_state(
+                tgt,
+                'cmd.run', 'service ntp stop; if [ -x /usr/sbin/ntpdate ]; then ntpdate -s ntp.ubuntu.com; else ntpd -gq ; fi; service ntp start')  # noqa
         new_time_res = self.run_state(tgt, 'cmd.run', 'date')
         for node_name, time in sorted(new_time_res[0]['return'][0].items()):
             LOG.info("{0}: {1}".format(node_name, time))
diff --git a/tcp_tests/settings.py b/tcp_tests/settings.py
index e1fa137..166b492 100644
--- a/tcp_tests/settings.py
+++ b/tcp_tests/settings.py
@@ -91,3 +91,6 @@
 
 EXTERNAL_ADDRESS_POOL_NAME = os.environ.get('EXTERNAL_ADDRESS_POOL_NAME',
                                             'external-pool01')
+
+STACK_INSTALL = os.environ.get('STACK_INSTALL', None)
+SKIP_SYNC_TIME = get_var_as_bool("SKIP_SYNC_TIME", False)
diff --git a/tcp_tests/templates/physical-mcp-ocata-offline-ovs/core.yaml b/tcp_tests/templates/physical-mcp-ocata-offline-ovs/core.yaml
deleted file mode 100644
index c782dfa..0000000
--- a/tcp_tests/templates/physical-mcp-ocata-offline-ovs/core.yaml
+++ /dev/null
@@ -1,16 +0,0 @@
-{% from 'physical-mcp-ocata-offline-ovs/underlay.yaml' import HOSTNAME_CFG01 with context %}
-
-
-- description: Approve cfg01 ssh key for jenkins user
-  cmd: mkdir -p /var/lib/jenkins/.ssh && ssh-keyscan cfg01 > /var/lib/jenkins/.ssh/known_hosts && chown jenkins /var/lib/jenkins/.ssh/known_hosts
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 15}
-  skip_fail: false
-
-
-- description: Install jq for parse json output
-  cmd: apt install -y jq
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 15}
-  skip_fail: false
-
diff --git a/tcp_tests/templates/physical-mcp-ocata-offline-ovs/openstack.yaml b/tcp_tests/templates/physical-mcp-ocata-offline-ovs/openstack.yaml
deleted file mode 100644
index 4f2df46..0000000
--- a/tcp_tests/templates/physical-mcp-ocata-offline-ovs/openstack.yaml
+++ /dev/null
@@ -1,161 +0,0 @@
-{% from 'physical-mcp-ocata-offline-ovs/underlay.yaml' import HOSTNAME_CFG01 with context %}
-
-# Install OpenStack control services
-
-- description: Install glance on all controllers
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-     -C 'I@glance:server' state.sls glance -b 1
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Install keystone service (note that different fernet keys are created on different nodes)
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@keystone:server' state.sls keystone.server -b 1
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 2, delay: 15}
-  skip_fail: false
-
-- description: Restart apache due to PROD-10477
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl*' cmd.run "systemctl restart apache2"
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 15}
-  skip_fail: false
-
-- description: Check apache status to PROD-10477
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl*' cmd.run "systemctl status apache2"
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 15}
-  skip_fail: false
-
-- description: Mount glusterfs.client volumes (resuires created 'keystone' and 'glusterfs' system users)
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@glance:server' state.sls glusterfs.client
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Update fernet keys for keystone server on the mounted glusterfs volume
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@keystone:server' state.sls keystone.server -b 1
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Populate keystone services/tenants/admins
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@keystone:client' state.sls keystone.client
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Check keystone service-list
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@keystone:server' cmd.run '. /root/keystonercv3; openstack service list'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Check glance image-list
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@keystone:server' cmd.run '. /root/keystonerc; glance image-list'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-
-- description: Install nova on all controllers
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@nova:controller' state.sls nova -b 1
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 2, delay: 5}
-  skip_fail: false
-
-- description: Check nova service-list
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@keystone:server' cmd.run '. /root/keystonerc; nova service-list'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-
-- description: Install cinder
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@cinder:controller' state.sls cinder -b 1
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Check cinder list
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@keystone:server' cmd.run '. /root/keystonerc; cinder list'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-
-- description: Install neutron service
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@neutron:server' state.sls neutron -b 1
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Install neutron on gtw node
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@neutron:gateway' state.sls neutron
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-
-- description: Check neutron agent-list
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@keystone:server' cmd.run '. /root/keystonerc; neutron agent-list'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-
-- description: Install heat service
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@heat:server' state.sls heat -b 1
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Check heat service
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@keystone:server' cmd.run '. /root/keystonercv3; openstack orchestration resource type list'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-
-- description: Deploy horizon dashboard
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@horizon:server' state.sls horizon
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: true
-
-# Install compute node
-
-- description: Apply formulas for compute node
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' state.apply
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: true
-
-- description: Re-apply(as in doc) formulas for compute node
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' state.apply
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: true
-
-- description: Check IP on computes
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' cmd.run
-    'ip a'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 10, delay: 30}
-  skip_fail: false
diff --git a/tcp_tests/templates/physical-mcp-ocata-offline-ovs/salt.yaml b/tcp_tests/templates/physical-mcp-ocata-offline-ovs/salt.yaml
deleted file mode 100644
index 8faef2b..0000000
--- a/tcp_tests/templates/physical-mcp-ocata-offline-ovs/salt.yaml
+++ /dev/null
@@ -1,51 +0,0 @@
-{% from 'physical-mcp-ocata-offline-ovs/underlay.yaml' import HOSTNAME_CFG01 with context %}
-# {% from 'physical-mcp-ocata-offline-ovs/underlay.yaml' import LAB_CONFIG_NAME with context %}
-# {% from 'physical-mcp-ocata-offline-ovs/underlay.yaml' import DOMAIN_NAME with context %}
-
-# Environment model name stored in https://github.com/Mirantis/tcp-qa/tree/master/tcp_tests/environments
-# {% set ENVIRONMENT_MODEL_NAME = os_env('ENVIRONMENT_MODEL_NAME','physical-mcp-ocata-offline-ovs') %}
-
-# {% import 'shared-salt.yaml' as SHARED with context %}
-
-- description: Wait while a salt-minion is started
-  cmd: timeout 90s bash -c 'while ! systemctl is-active salt-minion; do sleep 10; echo salt-minion isnt run; done'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Refresh pillars on master node
-  cmd: sleep 90; salt-call --hard-crash --state-output=mixed --state-verbose=False saltutil.refresh_pillar
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Sync all salt resources on master node
-  cmd: sleep 60; salt-call --hard-crash --state-output=mixed --state-verbose=False saltutil.sync_all && sleep 5
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: MaaS auth
-  cmd: bash -x  /var/lib/maas/.maas_login.sh
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Set upstream dns in MaaS
-  cmd: sleep 30; maas mirantis maas set-config name=upstream_dns value='10.10.0.15 8.8.8.8 8.8.4.4'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Setup NTP
-  cmd: salt-call --hard-crash --state-output=mixed --state-verbose=False state.sls ntp.server
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Wait until salt is up
-  cmd: sleep 60
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
diff --git a/tcp_tests/templates/physical-mcp-ocata-offline-ovs/Readme.txt b/tcp_tests/templates/physical-mcp-offline-vxlan/Readme.txt
similarity index 100%
rename from tcp_tests/templates/physical-mcp-ocata-offline-ovs/Readme.txt
rename to tcp_tests/templates/physical-mcp-offline-vxlan/Readme.txt
diff --git a/tcp_tests/templates/physical-mcp-offline-vxlan/context-golden-ovs-offline-pike.yml b/tcp_tests/templates/physical-mcp-offline-vxlan/context-golden-ovs-offline-pike.yml
new file mode 100644
index 0000000..9ededfe
--- /dev/null
+++ b/tcp_tests/templates/physical-mcp-offline-vxlan/context-golden-ovs-offline-pike.yml
@@ -0,0 +1,2041 @@
+default_context:
+  # alertmanager_notification_email_enabled: 'True'
+  # alertmanager_notification_email_from: john.doe@example.org
+  # alertmanager_notification_email_hostname: 127.0.0.1
+  # alertmanager_notification_email_password: password
+  # alertmanager_notification_email_port: '587'
+  # alertmanager_notification_email_require_tls: 'True'
+  # alertmanager_notification_email_to: jane.doe@example.org
+  # auditd_enabled: 'False'
+# "=== CLUSTER GLOBALS ==="
+  cluster_domain: mcp-offline-vxlan.local
+  cluster_name: mcp-offline-vxlan
+  openldap_domain: mcp-offline-vxlan.local
+  mcp_version: 2019.2.0
+  cookiecutter_template_branch: master
+  cookiecutter_template_url: https://gerrit.mcp.mirantis.net/mk/cookiecutter-templates.git
+  shared_reclass_branch: ''
+  shared_reclass_url: https://gerrit.mcp.mirantis.net/salt-models/reclass-system.git
+  mcp_common_scripts_branch: ''
+
+# "=== COMPONENT SWITCHES ==="
+  cicd_enabled: 'True'
+  infra_enabled:      'True'
+  kubernetes_enabled: 'False'
+  ceph_enabled: 'True'
+  opencontrail_enabled: 'False'
+  openstack_enabled: 'True'
+  stacklight_enabled: 'True'
+  manila_enabled: 'False'
+
+# === CICD|INFRA COMPONENT SWITCHES ==="
+  maas_enabled: 'True'
+  openldap_enabled: 'True'
+  local_repositories: 'True'
+  offline_deployment: 'True'
+# Other
+  bmk_enabled: 'False'
+  upstream_proxy_enabled: 'False'
+  fluentd_enabled: 'True'
+## END of basic CC config
+  ceph_osd_count: '3'
+  openstack_compute_count: '4'
+  openscap_enabled: 'True'
+  openssh_groups: "qa_scale,oscore_devops,networking,stacklight,k8s_team,mcp_qa,drivetrain"
+  backup_private_key: |-
+    -----BEGIN RSA PRIVATE KEY-----
+    MIIEogIBAAKCAQEArK5R0R/X4kmWID1A+/vIH/L2wT2SJOCZ9hw/byVVUANJUI0U
+    bDPap3fYMsC/w8/sfb0hI7QjXvKKTT1fSTzKwt8idMrSMQfnjPIbwcFfu6E5i7jv
+    RfqY4g9mspP6tkeoiolxw5BMdxnKaNIJ00T4/uIr4naiGK1OEqzFN9k6aFBMkYhh
+    IAahLM60Ja4ANy521LO2O5NDarVze9l3Mk77diKPm+OFkfc9XgxTxj92vhuG0B6X
+    7Z9nMuF4zxXZascu7wqiMkQnaq0pABJEcXbhLxjFvno7g8e5NuwYcTwuazkx8+Di
+    mdC1uTD91EFrvc7hnFHGzXU843xejkMDPaWt6wIDAQABAoIBAFDVvZku1empVuy6
+    n+wVvmYuX5KwVvSKa6hJKuzFxj0RfMWHdXL9hzL0KDICBwMMF4H5AkVrc09kLJfh
+    zfRD0AsPV1rjAyhOsfdJ8vJtqnn0DDM7HE9e8yiH2yRnn2qq+tv3YVkR+KmcWiNd
+    h5nt5L20dKSrsk/o+O5HEH+HYg8oGrmZYLmq5qOMLp2JGfKH7BS5a8z2fIrFdGeN
+    CStkFbv3vIgzu7X+S40s3b0tfgXz0Kdg+yUZb86i4qm3AjiWhb39jJ7wnw6m9dtQ
+    2ynBHRZs7Sir9C7nUJL0JicVg+w/Lpp4fBnR3Q7kuu7o2jYKMdykYsUtpnJ6Y3iF
+    il2pTgkCgYEA3jEwx7v+Ch7LN98cE0EWjQS9kGnQKwbbuz49IyKBLHXQqfwHQUvJ
+    nXRQSRQmHJUElzN3CfGq+FJz5xP7ZKWQdQZsEjMZ3hWvGIuJgLZAdUdq8bF64NVF
+    eaRinuaoKu5pzUr8FzkGsqItwgqgK1HU2peEmjdJHE6ZeF0RrPj8EkUCgYEAxvSS
+    jvn6fD+hnsHjEC+2i4GPn4l2lKMVu02UC22LKQeAJW1B4MbmZEeOaBYuQN+ARYIf
+    RLPP4HPgmod+23o7hYJsFwxDlBzkdY30MhTyW2MEvUx7ND1pelth40FmYtEWoZXq
+    7EC0dZYeC7fXXVHQOPHw3k1r+VQAR854HZ/P2m8CgYAKyN5yQ2iwOxBoFNc7BATB
+    MYK1+LzW6c1OiFXVV75nVwyaw5XikHgnwVaIRYcYCg+f+yqC/AztYqsngyvHldWu
+    KHKka4CM7AaErXkuGyqDxsSlGJU7Rn7JybXTuqRc+Rd/lG0p/2WY1BBeg7+EesNX
+    HNWd2qMc0Z/XXMUX3Tn29QKBgCIuyy99i6flVuNr9JNHX1nAvOhwOct36bfwsEr2
+    Wz6FZ2MGzpbMFbGNCc6gYPoMbIGExdKMAl9SCXkDZqxW5/scWW3sUYAVJrt71ET2
+    jF1fOeU8Sr7C/mhjYwIkrm6z9et1UpOc2mSJkkf5IiuKbvgZuYS4UKDZ6eJsev68
+    An5JAoGAJTQ1wQXo8Gp1oI1hXG70zoU9AjSVin6rs8cAWzoqdr5aLiTD1OtMeEMF
+    AupRxHgOVKb7RZoePcdusTEErWaYvxPS6vAeGgHf/aKIb8BDL4Rn7FS1DAa8R+s9
+    FN15hAhG2BGLujWvwLWL3aLlxmYWukSmx5QBa//TaFwIJvqF7HU=
+    -----END RSA PRIVATE KEY-----
+  backup_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCsrlHRH9fiSZYgPUD7+8gf8vbBPZIk4Jn2HD9vJVVQA0lQjRRsM9qnd9gywL/Dz+x9vSEjtCNe8opNPV9JPMrC3yJ0ytIxB+eM8hvBwV+7oTmLuO9F+pjiD2ayk/q2R6iKiXHDkEx3Gcpo0gnTRPj+4ividqIYrU4SrMU32TpoUEyRiGEgBqEszrQlrgA3LnbUs7Y7k0NqtXN72XcyTvt2Io+b44WR9z1eDFPGP3a+G4bQHpftn2cy4XjPFdlqxy7vCqIyRCdqrSkAEkRxduEvGMW+ejuDx7k27BhxPC5rOTHz4OKZ0LW5MP3UQWu9zuGcUcbNdTzjfF6OQwM9pa3r
+  ceph_cluster_network: 10.11.0.0/16
+
+
+
+  aptly_server_control_address: 10.11.0.14
+  aptly_server_deploy_address: 10.10.0.14
+  aptly_server_hostname: apt
+
+
+  # backend_network_netmask: 255.255.0.0
+  # backend_network_subnet: 10.11.0.0/16
+  backend_vlan: '1111'
+
+  ceph_hyper_converged: 'False'
+  ceph_mon_node01_deploy_address: 10.10.0.66
+  ceph_mon_node01_address: 10.11.0.66
+  ceph_mon_node01_ceph_public_address: 10.11.0.66
+  ceph_mon_node01_hostname: cmn01
+  ceph_mon_node02_deploy_address: 10.10.0.67
+  ceph_mon_node02_address: 10.11.0.67
+  ceph_mon_node02_ceph_public_address: 10.11.0.67
+  ceph_mon_node02_hostname: cmn02
+  ceph_mon_node03_deploy_address: 10.10.0.68
+  ceph_mon_node03_address: 10.11.0.68
+  ceph_mon_node03_ceph_public_address: 10.11.0.68
+  ceph_mon_node03_hostname: cmn03
+  ceph_osd_backend: bluestore
+  ceph_osd_block_db_size: '20'
+  ceph_osd_bond_mode: active-backup
+  ceph_osd_data_disks: /dev/sdb
+  ceph_osd_journal_or_block_db_disks: /dev/sdb
+  ceph_osd_node_count: '5'
+  ceph_osd_primary_first_nic: ten1
+  ceph_osd_primary_second_nic: ten2
+  ceph_osd_rack01_hostname: osd
+  ceph_osd_single_address_ranges: "10.11.0.201-10.11.0.203"
+  ceph_osd_deploy_address_ranges: "10.10.0.201-10.10.0.203"
+  ceph_osd_ceph_public_address_ranges: "10.11.0.201-10.11.0.203"
+  ceph_osd_backend_address_ranges: "10.13.0.201-10.13.0.203"
+  ceph_public_vlan: '1110'
+
+  ceph_public_network: 10.11.0.0/16
+  ceph_rgw_address: 10.11.0.75
+  ceph_rgw_hostname: rgw
+  ceph_rgw_node01_deploy_address: 10.10.0.76
+  ceph_rgw_node01_address: 10.11.0.76
+  ceph_rgw_node01_hostname: rgw01
+  ceph_rgw_node02_deploy_address: 10.10.0.77
+  ceph_rgw_node02_address: 10.11.0.77
+  ceph_rgw_node02_hostname: rgw02
+  ceph_rgw_node03_deploy_address: 10.10.0.78
+  ceph_rgw_node03_address: 10.11.0.78
+  ceph_rgw_node03_hostname: rgw03
+  ceph_version: luminous
+  cicd_control_node01_deploy_address: 10.10.0.91
+  cicd_control_node01_address: 10.11.0.91
+  cicd_control_node01_hostname: cid01
+  cicd_control_node02_deploy_address: 10.10.0.92
+  cicd_control_node02_address: 10.11.0.92
+  cicd_control_node02_hostname: cid02
+  cicd_control_node03_deploy_address: 10.10.0.93
+  cicd_control_node03_address: 10.11.0.93
+  cicd_control_node03_hostname: cid03
+  cicd_control_vip_address: 10.11.0.90
+  cicd_control_vip_hostname: cid
+  cicd_private_key: |-
+    -----BEGIN RSA PRIVATE KEY-----
+    MIIEpAIBAAKCAQEAp+1TRJaP/LuTBLLgVEJpIFHvw8kE/WL0oCVfKaTbbAZHTrpi
+    4Mf3WDdlDrc9ujp4w8r9LC5HXOWLtIotzi9vlIUmvotDYF7H4vvcVVa3NTzpgZHq
+    1vKzraJv7Ay1c+iV5AlG5ExulyrDb7fHdUsh72BZmgLh4zjw21Hk3wEOx5UZ4804
+    eAZgTsTtBrfFtZ7GyF9Lqp2m72Jg7Zqb0VXCBuMi2giaoV3hdNtnftF5zzgMey9l
+    8PbPNvTuNuRo9Lnz3kFq5pnGf1tBRRqbAx7xxPy8q7pzrj9kw3LfJWAGBJN2z4kn
+    GDKvy5TjEIcKRYiKpFZ8tDsVnhJYUaePvrZ/wwIDAQABAoIBAGQ2Zsi5qTfDoJrf
+    PKxL7XySKSmHnuiv0xh29PFfJmqy3H8JD9TgwKTueepsQ/QMzODbFHb3LzMq8VJO
+    W8N933Kce2Cerxilt1enwfdThoXI0fi24ZRgxgVKuMv/UZHYLQsm1h2L1ztzE6pp
+    3CgNLDU0JISaT79Pzby0hIbolK9ZPccmdhcyXOo9T1Xa1hRxnn9zJX4I3B1HEgPr
+    GiZXSWIWDK1tTxb7M2QvXjp/3MGeI5JabxTzdlW6xJgrpTzx4Mms2GjXe5HO/vDq
+    TTv++oOcC+NlHrF64wrtkxAkV3ofA0m15KfZ4KljqinCdyZ07D9fm1kRZC3isLMz
+    xHx/oHECgYEA1UVGOFepQwPXnvZwDzSiOe0TeUy7yZ43r30rXrVf5+KwJDjaKsYW
+    msHzzxjnUopFelAPc5zIM3GQ0TWMtbxpGT5HzoGpDmhegwl4iCRXcBEA/mkcpV/N
+    VpeKUS8LFGu4XnbgJKuJs20rkoYCZSuEhSj1W2nB0u9tSRSzjMMI1m0CgYEAyZJd
+    LccoIh6SflYsYPUA4JTatPgVCZCPYf6tVIh9ov3v5ZbkSNe3N58odbu23dtY799k
+    RhCP4aGlB+VdbMJDsvK6OO3FZbleKdtgu/Eyhkf6BLidWNxRiH9mi4rNzhB3uRo/
+    DzCsH9Z+3aSGwn+kkXppX3GjUImalSmgm/CdkO8CgYEAtoqKpeLyLzf0sEzCcWFd
+    kTMIPgSdnt5bT0tVecTIh9ORbA91PLM72c40fogURqOvABfSgeQpv05u44JwI4VW
+    d5LFJ033gyt16baLgguJA5SqQxd4u1uah4lBjGc1lo70yXX6N6jTPc4tQ0aMekeb
+    L9Z0239TtNXVtn2PiOXOhKkCgYEAwzO0fKKDIReOMj5tV6+sG9DLQ7mDVfHiiLnD
+    TTuov3E/Io1PoaMVUQ4Wdn1Eh/DlXdZuGtPrIkwRr2XVZX9zZBZfdMX+BZbPs6U5
+    NohLr3KAkpXd+rHRW2hU/Al9aHLWHjFmo+U0qthjn2y2/B/0VNXAuacoytOXGaBo
+    YttPG40CgYA18z21jGveCpNwbov3G8eAeSWK6KudCNfjxJ2wqnjaLJQlbDoB89IQ
+    1yt4cQB15Tl2WhuCHSKUanPSG6ke8H4vNSht3aVXqHNFpOCwsfsAol2OcSHGrbhh
+    L+Ptf/em7cJ19QZEOKUGfBhsy6IdZE2+y/U5fbJwNTUMSUVxUfBIYQ==
+    -----END RSA PRIVATE KEY-----
+  cicd_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCn7VNElo/8u5MEsuBUQmkgUe/DyQT9YvSgJV8ppNtsBkdOumLgx/dYN2UOtz26OnjDyv0sLkdc5Yu0ii3OL2+UhSa+i0NgXsfi+9xVVrc1POmBkerW8rOtom/sDLVz6JXkCUbkTG6XKsNvt8d1SyHvYFmaAuHjOPDbUeTfAQ7HlRnjzTh4BmBOxO0Gt8W1nsbIX0uqnabvYmDtmpvRVcIG4yLaCJqhXeF022d+0XnPOAx7L2Xw9s829O425Gj0ufPeQWrmmcZ/W0FFGpsDHvHE/LyrunOuP2TDct8lYAYEk3bPiScYMq/LlOMQhwpFiIqkVny0OxWeElhRp4++tn/D
+
+  compute_bond_mode: active-backup
+  compute_deploy_nic: one1
+  compute_padding_with_zeros: 'True'
+  compute_primary_first_nic: ten1
+  compute_primary_second_nic: ten2
+  context_seed: hfadwDXYaA63EQY0T1Vug9JKAjh6miJ3gdBNBgkBuag5fJvsp9dxZamK7Q9BbVY5
+  control_network_netmask: 255.255.0.0
+  control_network_subnet: 10.11.0.0/16
+  control_vlan: '1110'
+  # cookiecutter_template_credentials: gerrit
+  deploy_network_gateway: 10.10.0.1
+  deploy_network_netmask: 255.255.0.0
+  deploy_network_subnet: 10.10.0.0/16
+  deployment_type: physical
+  dns_server01: 10.10.0.15
+  dns_server02: 10.11.0.15
+  email_address: product@mirantis.com
+  gainsight_service_enabled: 'False'
+  gateway_deploy_nic: one1
+  gateway_primary_first_nic: ten1
+  gateway_primary_second_nic: ten2
+  infra_bond_mode: active-backup
+  infra_deploy_nic: one1
+  #infra_kvm_vip_address: 10.11.0.239
+  infra_kvm_vip_address: 10.11.0.239
+  infra_kvm01_control_address: 10.11.0.241
+  infra_kvm01_deploy_address: 10.10.0.241
+  infra_kvm01_hostname: kvm01
+  infra_kvm02_control_address: 10.11.0.242
+  infra_kvm02_deploy_address: 10.10.0.242
+  infra_kvm02_hostname: kvm02
+  infra_kvm03_control_address: 10.11.0.243
+  infra_kvm03_deploy_address: 10.10.0.243
+  infra_kvm03_hostname: kvm03
+  infra_kvm04_control_address: 10.11.0.244
+  infra_kvm04_deploy_address: 10.10.0.244
+  infra_kvm04_hostname: kvm04
+  infra_kvm05_control_address: 10.11.0.245
+  infra_kvm05_deploy_address: 10.10.0.245
+  infra_kvm05_hostname: kvm05
+  infra_kvm06_control_address: 10.11.0.246
+  infra_kvm06_deploy_address: 10.10.0.246
+  infra_kvm06_hostname: kvm06
+  infra_primary_first_nic: ten1
+  infra_primary_second_nic: ten2
+  # internal_proxy_enabled: 'False'
+  # keycloak_enabled: 'False'
+  # kubernetes_ctl_on_kvm: 'False'
+  local_docker_images: 'True'
+  local_pipelines: 'True'
+  local_python_repo: 'True'
+  local_repo_url: ${_param:aptly_server_deploy_address}
+  # no_platform: 'False'
+  nova_vnc_tls_enabled: 'True'
+  openldap_organisation: ${_param:cluster_name}
+  # openssh_groups: ''
+  # For tempest tests which require access to instnaces via floating ips
+  openstack_create_public_network: 'True'
+  openstack_public_neutron_subnet_gateway: 10.16.250.1
+  openstack_public_neutron_subnet_cidr: 10.16.0.0/16
+  openstack_public_neutron_subnet_allocation_start: 10.16.250.10
+  openstack_public_neutron_subnet_allocation_end: 10.16.254.254
+
+  openstack_benchmark_node01_deploy_address: 10.10.0.95
+  openstack_benchmark_node01_address: 10.11.0.95
+  openstack_benchmark_node01_hostname: bmk01
+  # openstack_cluster_size: small
+  # openstack_cluster_size: small
+  openstack_cluster_size: golden
+  openstack_compute_rack01_hostname: cmp
+  openstack_compute_single_address_ranges: "10.11.0.101-10.11.0.104"
+  openstack_compute_deploy_address_ranges: "10.10.0.101-10.10.0.104"
+  openstack_compute_tenant_address_ranges: "10.12.0.101-10.12.0.104"
+  openstack_compute_backend_address_ranges: "10.11.0.101-10.11.0.104"
+  openstack_control_address: 10.11.0.10
+  openstack_control_hostname: ctl
+  openstack_control_node01_address: 10.11.0.11
+  openstack_control_node01_deploy_address: 10.10.0.11
+  openstack_control_node01_hostname: ctl01
+  openstack_control_node02_address: 10.11.0.12
+  openstack_control_node02_deploy_address: 10.10.0.12
+  openstack_control_node02_hostname: ctl02
+  openstack_control_node03_address: 10.11.0.13
+  openstack_control_node03_deploy_address: 10.10.0.13
+  openstack_control_node03_hostname: ctl03
+  openstack_database_address: 10.11.0.50
+  openstack_database_hostname: dbs
+  openstack_database_node01_address: 10.11.0.51
+  openstack_database_node01_deploy_address: 10.10.0.51
+  openstack_database_node01_hostname: dbs01
+  openstack_database_node02_address: 10.11.0.52
+  openstack_database_node02_deploy_address: 10.10.0.52
+  openstack_database_node02_hostname: dbs02
+  openstack_database_node03_address: 10.11.0.53
+  openstack_database_node03_deploy_address: 10.10.0.53
+  openstack_database_node03_hostname: dbs03
+  openstack_gateway_node01_address: 10.11.0.224
+  openstack_gateway_node01_deploy_address: 10.10.0.224
+  openstack_gateway_node01_tenant_address: 10.12.0.6
+  openstack_gateway_node01_hostname: gtw01
+  openstack_gateway_node02_address: 10.11.0.225
+  openstack_gateway_node02_deploy_address: 10.10.0.225
+  openstack_gateway_node02_tenant_address: 10.12.0.7
+  openstack_gateway_node02_hostname: gtw02
+  openstack_gateway_node03_address: 10.11.0.226
+  openstack_gateway_node03_deploy_address: 10.10.0.226
+  openstack_gateway_node03_tenant_address: 10.12.0.8
+  openstack_gateway_node03_hostname: gtw03
+  openstack_message_queue_address: 10.11.0.40
+  openstack_message_queue_hostname: msg
+  openstack_message_queue_node01_address: 10.11.0.41
+  openstack_message_queue_node01_deploy_address: 10.10.0.41
+  openstack_message_queue_node01_hostname: msg01
+  openstack_message_queue_node02_address: 10.11.0.42
+  openstack_message_queue_node02_deploy_address: 10.10.0.42
+  openstack_message_queue_node02_hostname: msg02
+  openstack_message_queue_node03_address: 10.11.0.43
+  openstack_message_queue_node03_deploy_address: 10.10.0.43
+  openstack_message_queue_node03_hostname: msg03
+  openstack_barbican_address: 10.11.0.44
+  openstack_barbican_hostname: kmn
+  openstack_barbican_node01_address: 10.11.0.45
+  openstack_barbican_node01_deploy_address: 10.10.0.45
+  openstack_barbican_node01_hostname: kmn01
+  openstack_barbican_node02_address: 10.11.0.46
+  openstack_barbican_node02_deploy_address: 10.10.0.46
+  openstack_barbican_node02_hostname: kmn02
+  openstack_barbican_node03_address: 10.11.0.47
+  openstack_barbican_node03_deploy_address: 10.10.0.47
+  openstack_barbican_node03_hostname: kmn03
+  openstack_network_engine: ovs
+  # openstack_neutron_bgp_vpn_driver: bagpipe
+  openstack_neutron_l2gw: 'False'
+  openstack_neutron_bgp_vpn: 'False'
+  openstack_neutron_qos: 'True'
+  openstack_neutron_vlan_aware_vms: 'True'
+  openstack_nfv_dpdk_enabled: 'False'
+  openstack_nfv_sriov_enabled: 'False'
+  openstack_nova_compute_nfv_req_enabled: 'False'
+  # openstack_nova_compute_reserved_host_memory_mb: '900'
+  openstack_ovs_dvr_enabled: 'True'
+  openstack_ovs_encapsulation_type: vxlan
+  openstack_proxy_address: 10.11.0.80
+  # openstack_proxy_address: 10.16.250.9
+  openstack_proxy_vip_interface: ens6
+  openstack_proxy_hostname: prx
+  openstack_proxy_node01_address: 10.11.0.81
+  openstack_proxy_node01_deploy_address: 10.10.0.81
+  openstack_proxy_node01_hostname: prx01
+  openstack_proxy_node02_address: 10.11.0.82
+  openstack_proxy_node02_deploy_address: 10.10.0.82
+  openstack_proxy_node02_hostname: prx02
+  openstack_nginx_proxy_glance_image_size: 30000m
+  openstack_upgrade_node01_address: 10.11.0.19
+  openstack_upgrade_node01_deploy_address: 10.10.0.19
+  openstack_version: pike
+  ovsdb_server: 'ovsdbx:127.0.0.1:6632'
+  ironic_enabled: 'False'
+  openstack_baremetal_address: 10.11.0.5
+  openstack_baremetal_deploy_address: 10.10.0.5
+  openstack_baremetal_hostname: bmt
+  openstack_baremetal_node01_address: 10.11.0.6
+  openstack_baremetal_node02_address: 10.11.0.7
+  openstack_baremetal_node03_address: 10.11.0.8
+  openstack_baremetal_node01_deploy_address: 10.10.0.6
+  openstack_baremetal_node02_deploy_address: 10.10.0.7
+  openstack_baremetal_node03_deploy_address: 10.10.0.8
+  openstack_baremetal_node01_hostname: bmt01
+  openstack_baremetal_node02_hostname: bmt02
+  openstack_baremetal_node03_hostname: bmt03
+  openstack_baremetal_address_baremetal: 10.15.0.10
+  openstack_baremetal_node01_baremetal_address: 10.15.0.11
+  openstack_baremetal_node02_baremetal_address: 10.15.0.12
+  openstack_baremetal_node03_baremetal_address: 10.15.0.13
+  openstack_baremetal_neutron_subnet_cidr: 10.15.0.0/16
+  openstack_baremetal_neutron_subnet_allocation_start: 10.15.90.1
+  openstack_baremetal_neutron_subnet_allocation_end: 10.15.199.255
+  openstack_dns_hostname: dns
+  openstack_dns_node01_address: 10.11.0.54
+  openstack_dns_node01_deploy_address: 10.10.0.54
+  openstack_dns_node01_hostname: dns01
+  openstack_dns_node02_address: 10.11.0.55
+  openstack_dns_node02_deploy_address: 10.10.0.55
+  openstack_dns_node02_hostname: dns02
+
+  openstack_telemetry_address: 10.11.0.35
+  openstack_telemetry_hostname: mdb
+  openstack_telemetry_node01_address: 10.11.0.36
+  openstack_telemetry_node01_deploy_address: 10.10.0.36
+  # openstack_telemetry_node01_storage_address: 10.11.0.36
+  openstack_telemetry_node01_hostname: mdb01
+  openstack_telemetry_node02_address: 10.11.0.37
+  openstack_telemetry_node02_deploy_address: 10.10.0.37
+  # openstack_telemetry_node02_storage_address: 10.11.0.37
+  openstack_telemetry_node02_hostname: mdb02
+  openstack_telemetry_node03_address: 10.11.0.38
+  openstack_telemetry_node03_deploy_address: 10.10.0.38
+  # openstack_telemetry_node03_storage_address: 10.11.0.38
+  openstack_telemetry_node03_hostname: mdb03
+
+  # oss_enabled: 'False'
+  # oss_node03_address: ${_param:stacklight_monitor_node03_address}
+  # oss_webhook_app_id: '24'
+  # oss_webhook_login_id: '13'
+  # platform: openstack_enabled
+  public_host: ${_param:openstack_proxy_address}
+  publication_method: email
+  # physical_lab_setup: 'False'
+  reclass_repository: https://github.com/Mirantis/mk-lab-salt-model.git
+  salt_api_password: nlRtYRLbBuvqk3b2XJGrSp7HCcDW0Mgr
+  salt_api_password_hash: $6$IXWjGjZz$6YfimiwAzBxmb6hs1IZ2vzdslJiiwQXAN/PdlrxdxLWBlPLb57wkkFRd5wUwXoWAPfAkkZFhP8rUKE14ucQ3e1
+  salt_master_address: 10.11.0.15
+  salt_master_hostname: cfg01
+  salt_master_management_address: 10.10.0.15
+
+  # sriov_network_subnet: 10.55.0.0/16
+  stacklight_log_address: 10.11.0.60
+  stacklight_log_hostname: log
+  stacklight_log_node01_address: 10.11.0.61
+  stacklight_log_node01_deploy_address: 10.10.0.61
+  stacklight_log_node01_hostname: log01
+  stacklight_log_node02_address: 10.11.0.62
+  stacklight_log_node02_deploy_address: 10.10.0.62
+  stacklight_log_node02_hostname: log02
+  stacklight_log_node03_address: 10.11.0.63
+  stacklight_log_node03_deploy_address: 10.10.0.63
+  stacklight_log_node03_hostname: log03
+  stacklight_long_term_storage_type: prometheus
+  stacklight_monitor_address: 10.11.0.70
+  stacklight_monitor_hostname: mon
+  stacklight_monitor_node01_address: 10.11.0.71
+  stacklight_monitor_node01_deploy_address: 10.10.0.71
+  stacklight_monitor_node01_hostname: mon01
+  stacklight_monitor_node02_address: 10.11.0.72
+  stacklight_monitor_node02_deploy_address: 10.10.0.72
+  stacklight_monitor_node02_hostname: mon02
+  stacklight_monitor_node03_address: 10.11.0.73
+  stacklight_monitor_node03_deploy_address: 10.10.0.73
+  stacklight_monitor_node03_hostname: mon03
+  stacklight_telemetry_address: 10.11.0.85
+  stacklight_telemetry_hostname: mtr
+  stacklight_telemetry_node01_address: 10.11.0.86
+  stacklight_telemetry_node01_deploy_address: 10.10.0.86
+  stacklight_telemetry_node01_hostname: mtr01
+  stacklight_telemetry_node02_address: 10.11.0.87
+  stacklight_telemetry_node02_deploy_address: 10.10.0.87
+  stacklight_telemetry_node02_hostname: mtr02
+  stacklight_telemetry_node03_address: 10.11.0.88
+  stacklight_telemetry_node03_deploy_address: 10.10.0.88
+  stacklight_telemetry_node03_hostname: mtr03
+  stacklight_version: '2'
+  static_ips_on_deploy_network_enabled: 'True'
+  tenant_network_gateway: 10.12.0.1
+  tenant_network_netmask: 255.255.255.0
+  tenant_network_subnet: 10.12.0.0/16
+  tenant_vlan: '1111'
+  use_default_network_scheme: 'False'
+  vnf_onboarding_enabled: 'False'
+  designate_enabled: 'True'
+  galera_ssl_enabled: 'True'
+  openstack_mysql_x509_enabled: 'True'
+  rabbitmq_ssl_enabled: 'True'
+  openstack_rabbitmq_x509_enabled: 'True'
+  openstack_internal_protocol: 'https'
+  tenant_telemetry_enabled: 'True'
+  gnocchi_aggregation_storage: 'ceph'
+  barbican_enabled: 'True'
+  barbican_integration_enabled: 'False'
+  barbican_backend: 'dogtag'
+  openstack_octavia_enabled: 'True'
+  octavia_manager_cluster: 'False'
+  octavia_private_key: |-
+    -----BEGIN RSA PRIVATE KEY-----
+    MIIEpAIBAAKCAQEAtjnPDJsQToHBtoqIo15mdSYpfi8z6DFMi8Gbo0KCN33OUn5u
+    OctbdtjUfeuhvI6px1SCnvyWi09Ft8eWwq+KwLCGKbUxLvqKltuJ7K3LIrGXkt+m
+    qZN4O9XKeVKfZH+mQWkkxRWgX2r8RKNV3GkdNtd74VjhP+R6XSKJQ1Z8b7eHM10v
+    6IjTY/jPczjK+eyCeEj4qbSnV8eKlqLhhquuSQRmUO2DRSjLVdpdf2BB4/BdWFsD
+    YOmX7mb8kpEr9vQ+c1JKMXDwD6ehzyU8kE+1kVm5zOeEy4HdYIMpvUfN49P1anRV
+    2ISQ1ZE+r22IAMKl0tekrGH0e/1NP1DF5rINMwIDAQABAoIBAQCkP/cgpaRNHyg8
+    ISKIHs67SWqdEm73G3ijgB+JSKmW2w7dzJgN//6xYUAnP/zIuM7PnJ0gMQyBBTMS
+    NBTv5spqZLKJZYivj6Tb1Ya8jupKm0jEWlMfBo2ZYVrfgFmrfGOfEebSvmuPlh9M
+    vuzlftmWVSSUOkjODmM9D6QpzgrbpktBuA/WpX+6esMTwJpOcQ5xZWEnHXnVzuTc
+    SncodVweE4gz6F1qorbqIJz8UAUQ5T0OZTdHzIS1IbamACHWaxQfixAO2s4+BoUK
+    ANGGZWkfneCxx7lthvY8DiKn7M5cSRnqFyDToGqaLezdkMNlGC7v3U11FF5blSEW
+    fL1o/HwBAoGBAOavhTr8eqezTchqZvarorFIq7HFWk/l0vguIotu6/wlh1V/KdF+
+    aLLHgPgJ5j+RrCMvTBoKqMeeHfVGrS2udEy8L1mK6b3meG+tMxU05OA55abmhYn7
+    7vF0q8XJmYIHIXmuCgF90R8Piscb0eaMlmHW9unKTKo8EOs5j+D8+AMJAoGBAMo4
+    8WW+D3XiD7fsymsfXalf7VpAt/H834QTbNZJweUWhg11eLutyahyyfjjHV200nNZ
+    cnU09DWKpBbLg7d1pyT69CNLXpNnxuWCt8oiUjhWCUpNqVm2nDJbUdlRFTzYb2fS
+    ZC4r0oQaPD5kMLSipjcwzMWe0PniySxNvKXKInFbAoGBAKxW2qD7uKKKuQSOQUft
+    aAksMmEIAHWKTDdvOA2VG6XvX5DHBLXmy08s7rPfqW06ZjCPCDq4Velzvgvc9koX
+    d/lP6cvqlL9za+x6p5wjPQ4rEt/CfmdcmOE4eY+1EgLrUt314LHGjjG3ScWAiirE
+    QyDrGOIGaYoQf89L3KqIMr0JAoGARYAklw8nSSCUvmXHe+Gf0yKA9M/haG28dCwo
+    780RsqZ3FBEXmYk1EYvCFqQX56jJ25MWX2n/tJcdpifz8Q2ikHcfiTHSI187YI34
+    lKQPFgWb08m1NnwoWrY//yx63BqWz1vjymqNQ5GwutC8XJi5/6Xp+tGGiRuEgJGH
+    EIPUKpkCgYAjBIVMkpNiLCREZ6b+qjrPV96ed3iTUt7TqP7yGlFI/OkORFS38xqC
+    hBP6Fk8iNWuOWQD+ohM/vMMnvIhk5jwlcwn+kF0ra04gi5KBFWSh/ddWMJxUtPC1
+    2htvlEc6zQAR6QfqXHmwhg1hP81JcpqpicQzCMhkzLoR1DC6stXdLg==
+    -----END RSA PRIVATE KEY-----
+  octavia_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC2Oc8MmxBOgcG2ioijXmZ1Jil+LzPoMUyLwZujQoI3fc5Sfm45y1t22NR966G8jqnHVIKe/JaLT0W3x5bCr4rAsIYptTEu+oqW24nsrcsisZeS36apk3g71cp5Up9kf6ZBaSTFFaBfavxEo1XcaR0213vhWOE/5HpdIolDVnxvt4czXS/oiNNj+M9zOMr57IJ4SPiptKdXx4qWouGGq65JBGZQ7YNFKMtV2l1/YEHj8F1YWwNg6ZfuZvySkSv29D5zUkoxcPAPp6HPJTyQT7WRWbnM54TLgd1ggym9R83j0/VqdFXYhJDVkT6vbYgAwqXS16SsYfR7/U0/UMXmsg0z
+
+
+## MAAS BEGIN
+  maas_deploy_address: 10.10.0.15
+  maas_deploy_network_name: deploy_network
+  maas_deploy_range_end: 10.10.254.255
+  maas_deploy_range_start: 10.10.254.1
+  maas_deploy_vlan: '0'
+  maas_fabric_name: deploy_fabric
+  maas_hostname: cfg01
+  maas_machines: |
+    kvm04:
+      distro_series: "xenial"
+      # hwe_kernel: "hwe-16.04"
+      # pxe_interface_mac:
+      pxe_interface_mac: "0c:c4:7a:33:1f:e4"
+      interfaces:
+        one1:
+          mac: "0c:c4:7a:33:1f:e4"
+          mode: "static"
+          ip: ${_param:infra_kvm_node04_deploy_address}
+          subnet: "10.10.0.0/16"
+          gateway: ${_param:deploy_network_gateway}
+          name: one1
+        one2:
+          mode: UNCONFIGURED
+          mac: "0c:c4:7a:33:1f:e5"
+          name: one2
+        ten1:
+          mode: UNCONFIGURED
+          mac: "0c:c4:7a:1e:3e:ea"
+          name: ten1
+        ten2:
+          mode: UNCONFIGURED
+          mac: "0c:c4:7a:1e:3e:eb"
+          name: ten2
+      power_parameters:
+        power_user: ${_param:ipmi_user}
+        power_pass: ${_param:ipmi_password}
+        power_address: ${_param:ipmi_kvm04_ip}
+        power_type: ipmi
+      disk_layout:
+        type: custom
+        bootable_device: sda
+        disk:
+          vgroot:
+            devices:
+              - sda
+            type: lvm
+            volume:
+              root:
+                mount: /
+                size: 1T
+                type: ext4
+
+    kvm05:
+      distro_series: "xenial"
+      # hwe_kernel: "hwe-16.04"
+      pxe_interface_mac: "0c:c4:7a:33:20:fc"
+      interfaces:
+        one1:
+          mac: "0c:c4:7a:33:20:fc"
+          mode: "static"
+          ip: ${_param:infra_kvm_node05_deploy_address}
+          subnet: "10.10.0.0/16"
+          gateway: ${_param:deploy_network_gateway}
+          name: one1
+        one2:
+          mode: UNCONFIGURED
+          mac: "0c:c4:7a:33:20:fd"
+          name: one2
+        ten1:
+          mode: UNCONFIGURED
+          mac: "0c:c4:7a:1e:3e:e6"
+          name: ten1
+        ten2:
+          mode: UNCONFIGURED
+          mac: "0c:c4:7a:1e:3e:e7"
+          name: ten2
+      power_parameters:
+        power_user: ${_param:ipmi_user}
+        power_pass: ${_param:ipmi_password}
+        power_address: ${_param:ipmi_kvm05_ip}
+        power_type: ipmi
+      disk_layout:
+        type: custom
+        bootable_device: sda
+        disk:
+          vgroot:
+            devices:
+              - sda
+            type: lvm
+            volume:
+              root:
+                mount: /
+                size: 1T
+                type: ext4
+
+    kvm06:
+      distro_series: "xenial"
+      # hwe_kernel: "hwe-16.04"
+      pxe_interface_mac: "0c:c4:7a:31:fb:b6"
+      interfaces:
+        one1:
+          mac: "0c:c4:7a:31:fb:b6"
+          mode: "static"
+          ip: ${_param:infra_kvm_node06_deploy_address}
+          subnet: "10.10.0.0/16"
+          gateway: ${_param:deploy_network_gateway}
+          name: one1
+        one2:
+          mode: UNCONFIGURED
+          mac: "0c:c4:7a:31:fb:b7"
+          name: one2
+        ten1:
+          mode: UNCONFIGURED
+          mac: "0c:c4:7a:1e:3e:fa"
+          name: ten1
+        ten2:
+          mode: UNCONFIGURED
+          mac: "0c:c4:7a:1e:3e:fb"
+          name: ten2
+      power_parameters:
+        power_user: ${_param:ipmi_user}
+        power_pass: ${_param:ipmi_password}
+        power_address: ${_param:ipmi_kvm06_ip}
+        power_type: ipmi
+      disk_layout:
+        type: custom
+        bootable_device: sda
+        disk:
+          vgroot:
+            devices:
+              - sda
+            type: lvm
+            volume:
+              root:
+                mount: /
+                size: 1T
+                type: ext4
+
+    kvm01:
+      distro_series: "xenial"
+      # hwe_kernel: "hwe-16.04"
+      pxe_interface_mac: "0c:c4:7a:31:f0:12"
+      interfaces:
+        one1:
+          mac: "0c:c4:7a:31:f0:12"
+          mode: "static"
+          ip: ${_param:infra_kvm_node01_deploy_address}
+          subnet: "10.10.0.0/16"
+          gateway: ${_param:deploy_network_gateway}
+          name: one1
+        one2:
+          mode: UNCONFIGURED
+          mac: "0c:c4:7a:31:f0:13"
+          name: one2
+        fourty1:
+          mode: UNCONFIGURED
+          mac: "00:02:c9:44:82:70"
+          name: ten1
+        fourty2:
+          mode: UNCONFIGURED
+          mac: "00:02:c9:44:82:71"
+          name: ten2
+      power_parameters:
+        power_user: ${_param:ipmi_user}
+        power_pass: ${_param:ipmi_password}
+        power_address: ${_param:ipmi_kvm01_ip}
+        power_type: ipmi
+      disk_layout:
+        type: custom
+        bootable_device: sda
+        disk:
+          vgroot:
+            devices:
+              - sda
+            type: lvm
+            volume:
+              root:
+                mount: /
+                size: 120G
+                type: ext4
+
+    kvm02:
+      distro_series: "xenial"
+      # hwe_kernel: "hwe-16.04"
+      pxe_interface_mac: "0c:c4:7a:31:ef:bc"
+      interfaces:
+        one1:
+          mac: "0c:c4:7a:31:ef:bc"
+          mode: "static"
+          ip: ${_param:infra_kvm_node02_deploy_address}
+          subnet: "10.10.0.0/16"
+          gateway: ${_param:deploy_network_gateway}
+          name: one1
+        one2:
+          mode: UNCONFIGURED
+          mac: "0c:c4:7a:31:ef:bd"
+          name: one2
+        fourty1:
+          mode: UNCONFIGURED
+          mac: "00:02:c9:44:81:40"
+          name: ten1
+        fourty2:
+          mode: UNCONFIGURED
+          mac: "00:02:c9:44:81:41"
+          name: ten2
+      power_parameters:
+        power_user: ${_param:ipmi_user}
+        power_pass: ${_param:ipmi_password}
+        power_address: ${_param:ipmi_kvm02_ip}
+        power_type: ipmi
+      disk_layout:
+        type: custom
+        bootable_device: sda
+        disk:
+          vgroot:
+            devices:
+              - sda
+            type: lvm
+            volume:
+              root:
+                mount: /
+                size: 120G
+                type: ext4
+
+    kvm03:
+      distro_series: "xenial"
+      # hwe_kernel: "hwe-16.04"
+      pxe_interface_mac: "0c:c4:7a:31:ef:aa"
+      interfaces:
+        one1:
+          mac: "0c:c4:7a:31:ef:aa"
+          mode: "static"
+          ip: ${_param:infra_kvm_node03_deploy_address}
+          subnet: "10.10.0.0/16"
+          gateway: ${_param:deploy_network_gateway}
+          name: one1
+        one2:
+          mode: UNCONFIGURED
+          mac: "0c:c4:7a:31:ef:ab"
+          name: one2
+        fourty1:
+          mode: UNCONFIGURED
+          mac: "e4:1d:2d:72:23:b1"
+          name: ten1
+        fourty2:
+          mode: UNCONFIGURED
+          mac: "e4:1d:2d:72:23:b2"
+          name: ten2
+      power_parameters:
+        power_user: ${_param:ipmi_user}
+        power_pass: ${_param:ipmi_password}
+        power_address: ${_param:ipmi_kvm03_ip}
+        power_type: ipmi
+      disk_layout:
+        type: custom
+        bootable_device: sda
+        disk:
+          vgroot:
+            devices:
+              - sda
+            type: lvm
+            volume:
+              root:
+                mount: /
+                size: 120G
+                type: ext4
+
+    cmp001:
+      distro_series: "xenial"
+      # hwe_kernel: "hwe-16.04"
+      pxe_interface_mac: "0c:c4:7a:6d:33:12"
+      interfaces:
+        one1:
+          mac: "0c:c4:7a:6d:33:12"
+          mode: "static"
+          ip: 10.10.0.101
+          subnet: "10.10.0.0/16"
+          gateway: ${_param:deploy_network_gateway}
+          name: one1
+        one2:
+          mode: UNCONFIGURED
+          mac: "0c:c4:7a:6d:33:13"
+          name: one2
+        ten1:
+          mode: UNCONFIGURED
+          mac: "00:1b:21:92:8b:bc"
+          name: ten1
+        ten2:
+          mode: UNCONFIGURED
+          mac: "00:1b:21:92:8b:bd"
+          name: ten2
+        ten3:
+          mode: UNCONFIGURED
+          mac: "00:1b:21:89:65:bc"
+          name: ten3
+        ten4:
+          mode: UNCONFIGURED
+          mac: "00:1b:21:89:65:bd"
+          name: ten4
+      power_parameters:
+        power_user: ${_param:ipmi_user}
+        power_pass: ${_param:ipmi_password}
+        power_address: ${_param:ipmi_cmp001_ip}
+        power_type: ipmi
+      disk_layout:
+        type: custom
+        bootable_device: sdc
+        disk:
+          vgroot:
+            devices:
+              - sdc
+            type: lvm
+            volume:
+              root:
+                mount: /
+                size: 240G
+                type: ext4
+
+    cmp002:
+      distro_series: "xenial"
+      # hwe_kernel: "hwe-16.04"
+      pxe_interface_mac: "0c:c4:7a:aa:cb:6a"
+      interfaces:
+        one1:
+          mac: "0c:c4:7a:aa:cb:6a"
+          mode: "static"
+          ip: 10.10.0.102
+          subnet: "10.10.0.0/16"
+          gateway: ${_param:deploy_network_gateway}
+          name: one1
+        one2:
+          mode: UNCONFIGURED
+          mac: "0c:c4:7a:aa:cb:6b"
+          name: one2
+        ten1:
+          mode: UNCONFIGURED
+          mac: "0c:c4:7a:58:41:d0"
+          name: ten1
+        ten2:
+          mode: UNCONFIGURED
+          mac: "0c:c4:7a:58:41:d1"
+          name: ten2
+        ten3:
+          mode: UNCONFIGURED
+          mac: "0c:c4:7a:1f:00:18"
+          name: ten3
+        ten4:
+          mode: UNCONFIGURED
+          mac: "0c:c4:7a:1f:00:19"
+          name: ten4
+      power_parameters:
+        power_user: ${_param:ipmi_user}
+        power_pass: ${_param:ipmi_password}
+        power_address: ${_param:ipmi_cmp002_ip}
+        power_type: ipmi
+      disk_layout:
+        type: custom
+        bootable_device: sda
+        disk:
+          vgroot:
+            devices:
+              - sda
+            type: lvm
+            volume:
+              root:
+                mount: /
+                size: 240G
+                type: ext4
+
+    cmp003:
+      distro_series: "xenial"
+      # hwe_kernel: "hwe-16.04"
+      pxe_interface_mac: "0c:c4:7a:aa:91:5a"
+      interfaces:
+        one1:
+          mac: "0c:c4:7a:aa:91:5a"
+          mode: "static"
+          ip: 10.10.0.103
+          subnet: "10.10.0.0/16"
+          gateway: ${_param:deploy_network_gateway}
+          name: one1
+        one2:
+          mode: UNCONFIGURED
+          mac: "0c:c4:7a:aa:91:5b"
+          name: one2
+        ten1:
+          mode: UNCONFIGURED
+          mac: "00:1b:21:97:e5:9c"
+          name: ten1
+        ten2:
+          mode: UNCONFIGURED
+          mac: "00:1b:21:97:e5:9d"
+          name: ten2
+        ten3:
+          mode: UNCONFIGURED
+          mac: "00:1b:21:a4:70:7c"
+          name: ten3
+        ten4:
+          mode: UNCONFIGURED
+          mac: "00:1b:21:a4:70:7d"
+          name: ten4
+      power_parameters:
+        power_user: ${_param:ipmi_user}
+        power_pass: ${_param:ipmi_password}
+        power_address: ${_param:ipmi_cmp003_ip}
+        power_type: ipmi
+      disk_layout:
+        type: custom
+        bootable_device: sdc
+        disk:
+          vgroot:
+            devices:
+              - sdc
+            type: lvm
+            volume:
+              root:
+                mount: /
+                size: 240G
+                type: ext4
+
+    cmp004:
+      distro_series: "xenial"
+      # hwe_kernel: "hwe-16.04"
+      pxe_interface_mac: "0c:c4:7a:aa:cb:5a"
+      interfaces:
+        one1:
+          mac: "0c:c4:7a:aa:cb:5a"
+          mode: "static"
+          ip: 10.10.0.104
+          subnet: "10.10.0.0/16"
+          gateway: ${_param:deploy_network_gateway}
+          name: one1
+        one2:
+          mode: UNCONFIGURED
+          mac: "0c:c4:7a:aa:cb:5b"
+          name: one2
+        ten1:
+          mode: UNCONFIGURED
+          mac: "00:1b:21:87:17:90"
+          name: ten1
+        ten2:
+          mode: UNCONFIGURED
+          mac: "00:1b:21:87:17:91"
+          name: ten2
+        ten3:
+          mode: UNCONFIGURED
+          mac: "00:1b:21:89:45:74"
+          name: ten3
+        ten4:
+          mode: UNCONFIGURED
+          mac: "00:1b:21:89:45:75"
+          name: ten4
+      power_parameters:
+        power_user: ${_param:ipmi_user}
+        power_pass: ${_param:ipmi_password}
+        power_address: ${_param:ipmi_cmp004_ip}
+        power_type: ipmi
+      disk_layout:
+        type: custom
+        bootable_device: sdc
+        disk:
+          vgroot:
+            devices:
+              - sdc
+            type: lvm
+            volume:
+              root:
+                mount: /
+                size: 240G
+                type: ext4
+
+    # OSD Nodes
+    osd001:
+      distro_series: "xenial"
+      # hwe_kernel: "hwe-16.04"
+      pxe_interface_mac: "0c:c4:7a:aa:cb:7a"
+      interfaces:
+        one1:
+          mac: "0c:c4:7a:aa:cb:7a"
+          mode: "static"
+          ip: 10.10.0.201
+          subnet: "10.10.0.0/16"
+          gateway: ${_param:deploy_network_gateway}
+          name: one1
+        one2:
+          mode: UNCONFIGURED
+          mac: "0c:c4:7a:aa:cb:7b"
+          name: one2
+        ten1:
+          mode: UNCONFIGURED
+          mac: "90:e2:ba:1a:6c:98"
+          name: ten1
+        ten2:
+          mode: UNCONFIGURED
+          mac: "90:e2:ba:1a:6c:99"
+          name: ten2
+        ten3:
+          mode: UNCONFIGURED
+          mac: "00:1b:21:97:df:84"
+          name: ten3
+        ten4:
+          mode: UNCONFIGURED
+          mac: "00:1b:21:97:df:85"
+          name: ten4
+      power_parameters:
+        power_user: ${_param:ipmi_user}
+        power_pass: ${_param:ipmi_password}
+        power_address: ${_param:ipmi_osd001_ip}
+        power_type: ipmi
+      disk_layout:
+        type: custom
+        bootable_device: sda
+        disk:
+          vgroot:
+            devices:
+              - sda
+            type: lvm
+            volume:
+              root:
+                mount: /
+                size: 240G
+                type: ext4
+
+    osd002:
+      distro_series: "xenial"
+      # hwe_kernel: "hwe-16.04"
+      pxe_interface_mac: "0c:c4:7a:6d:2e:1e"
+      interfaces:
+        one1:
+          mac: "0c:c4:7a:6d:2e:1e"
+          mode: "static"
+          ip: 10.10.0.202
+          subnet: "10.10.0.0/16"
+          gateway: ${_param:deploy_network_gateway}
+          name: one1
+        one2:
+          mode: UNCONFIGURED
+          mac: "0c:c4:7a:6d:2e:1f"
+          name: one2
+        ten1:
+          mode: UNCONFIGURED
+          mac: "00:1b:21:8d:6d:20"
+          name: ten1
+        ten2:
+          mode: UNCONFIGURED
+          mac: "00:1b:21:8d:6d:21"
+          name: ten2
+        ten3:
+          mode: UNCONFIGURED
+          mac: "00:1b:21:8d:70:e8"
+          name: ten3
+        ten4:
+          mode: UNCONFIGURED
+          mac: "00:1b:21:8d:70:e9"
+          name: ten4
+      power_parameters:
+        power_user: ${_param:ipmi_user}
+        power_pass: ${_param:ipmi_password}
+        power_address: ${_param:ipmi_osd002_ip}
+        power_type: ipmi
+      disk_layout:
+        type: custom
+        bootable_device: sdc
+        disk:
+          vgroot:
+            devices:
+              - sdc
+            type: lvm
+            volume:
+              root:
+                mount: /
+                size: 240G
+                type: ext4
+
+    osd003:
+      distro_series: "xenial"
+      # hwe_kernel: "hwe-16.04"
+      pxe_interface_mac: "0c:c4:7a:aa:d1:10"
+      interfaces:
+        one1:
+          mac: "0c:c4:7a:aa:d1:10"
+          mode: "static"
+          ip: 10.10.0.203
+          subnet: "10.10.0.0/16"
+          gateway: ${_param:deploy_network_gateway}
+          name: one1
+        one2:
+          mode: UNCONFIGURED
+          mac: "0c:c4:7a:aa:d1:11"
+          name: one2
+        ten1:
+          mode: UNCONFIGURED
+          mac: "00:1b:21:8a:c1:90"
+          name: ten1
+        ten2:
+          mode: UNCONFIGURED
+          mac: "00:1b:21:8a:c1:91"
+          name: ten2
+        ten3:
+          mode: UNCONFIGURED
+          mac: "00:1b:21:92:a3:10"
+          name: ten3
+        ten4:
+          mode: UNCONFIGURED
+          mac: "00:1b:21:92:a3:11"
+          name: ten4
+      power_parameters:
+        power_user: ${_param:ipmi_user}
+        power_pass: ${_param:ipmi_password}
+        power_address: ${_param:ipmi_osd003_ip}
+        power_type: ipmi
+      disk_layout:
+        type: custom
+        bootable_device: sdc
+        disk:
+          vgroot:
+            devices:
+              - sdc
+            type: lvm
+            volume:
+              root:
+                mount: /
+                size: 240G
+                type: ext4
+
+    # GTW Nodes
+    gtw01:
+      distro_series: "xenial"
+      # hwe_kernel: "hwe-16.04"
+      pxe_interface_mac: "0c:c4:7a:aa:d6:76"
+      interfaces:
+        one1:
+          mac: "0c:c4:7a:aa:d6:76"
+          mode: "static"
+          ip: 10.10.0.224
+          subnet: "10.10.0.0/16"
+          gateway: ${_param:deploy_network_gateway}
+          name: one1
+        one2:
+          mode: UNCONFIGURED
+          mac: "0c:c4:7a:aa:d6:77"
+          name: one2
+        ten1:
+          mode: UNCONFIGURED
+          mac: "00:1b:21:7c:5d:90"
+          name: ten1
+        ten2:
+          mode: UNCONFIGURED
+          mac: "00:1b:21:7c:5d:91"
+          name: ten2
+        ten3:
+          mode: UNCONFIGURED
+          mac: "00:1b:21:8d:3e:e8"
+          name: ten3
+        ten4:
+          mode: UNCONFIGURED
+          mac: "00:1b:21:8d:3e:e9"
+          name: ten4
+      power_parameters:
+        power_user: ${_param:ipmi_user}
+        power_pass: ${_param:ipmi_password}
+        power_address: ${_param:ipmi_gtw01_ip}
+        power_type: ipmi
+      disk_layout:
+        type: custom
+        bootable_device: sda
+        disk:
+          vgroot:
+            devices:
+              - sda
+            type: lvm
+            volume:
+              root:
+                mount: /
+                size: 1T
+                type: ext4
+
+    gtw02:
+      distro_series: "xenial"
+      # hwe_kernel: "hwe-16.04"
+      pxe_interface_mac: "0c:c4:7a:aa:c9:64"
+      interfaces:
+        one1:
+          mac: "0c:c4:7a:aa:c9:64"
+          mode: "static"
+          ip: 10.10.0.225
+          subnet: "10.10.0.0/16"
+          gateway: ${_param:deploy_network_gateway}
+          name: one1
+        one2:
+          mode: UNCONFIGURED
+          mac: "0c:c4:7a:aa:c9:65"
+          name: one2
+        ten1:
+          mode: UNCONFIGURED
+          mac: "00:1b:21:7c:a8:4c"
+          name: ten1
+        ten2:
+          mode: UNCONFIGURED
+          mac: "00:1b:21:7c:a8:4d"
+          name: ten2
+        ten3:
+          mode: UNCONFIGURED
+          mac: "00:1b:21:8d:13:b8"
+          name: ten3
+        ten4:
+          mode: UNCONFIGURED
+          mac: "00:1b:21:8d:13:b9"
+          name: ten4
+      power_parameters:
+        power_user: ${_param:ipmi_user}
+        power_pass: ${_param:ipmi_password}
+        power_address: ${_param:ipmi_gtw02_ip}
+        power_type: ipmi
+      disk_layout:
+        type: custom
+        bootable_device: sdc
+        disk:
+          vgroot:
+            devices:
+              - sdc
+            type: lvm
+            volume:
+              root:
+                mount: /
+                size: 1T
+                type: ext4
+
+    gtw03:
+      distro_series: "xenial"
+      # hwe_kernel: "hwe-16.04"
+      pxe_interface_mac: "0c:c4:7a:aa:cb:78"
+      interfaces:
+        one1:
+          mac: "0c:c4:7a:aa:cb:78"
+          mode: "static"
+          ip: 10.10.0.226
+          subnet: "10.10.0.0/16"
+          gateway: ${_param:deploy_network_gateway}
+          name: one1
+        one2:
+          mode: UNCONFIGURED
+          mac: "0c:c4:7a:aa:cb:79"
+          name: one2
+        ten1:
+          mode: UNCONFIGURED
+          mac: "00:1b:21:8a:80:48"
+          name: ten1
+        ten2:
+          mode: UNCONFIGURED
+          mac: "00:1b:21:8a:80:49"
+          name: ten2
+        ten3:
+          mode: UNCONFIGURED
+          mac: "00:1b:21:89:6e:40"
+          name: ten3
+        ten4:
+          mode: UNCONFIGURED
+          mac: "00:1b:21:89:6e:41"
+          name: ten4
+      power_parameters:
+        power_user: ${_param:ipmi_user}
+        power_pass: ${_param:ipmi_password}
+        power_address: ${_param:ipmi_gtw03_ip}
+        power_type: ipmi
+      disk_layout:
+        bootable_device: sdc
+        type: custom
+        disk:
+          vgroot:
+            devices:
+              - sdc
+            type: lvm
+            volume:
+              root:
+                mount: /
+                size: 1T
+                type: ext4
+#$ MAAS END
+
+
+
+
+nodes:
+  # cfg01.mcp-offline-vxlan.local:
+  #   reclass_storage_name: infra_config_node01
+  #   classes:
+  #   - cluster.${_param:cluster_name}.infra.config
+  #   - cluster.${_param:cluster_name}.infra.networking.ens5_config
+  #   roles:
+  #   - infra_config
+  #   - linux_system_codename_xenial
+  #   interfaces:
+  #     ens3:
+  #       role: single_mgm
+  #       deploy_address: 10.10.0.15
+  #       deploy_network_netmask: 255.255.0.0
+  #       deploy_network_gateway: 10.10.0.1
+  #     ens4:
+  #       role: single_ctl
+  #       single_address: 10.11.0.15
+
+  apt.mcp-offline-vxlan.local:
+    reclass_storage_name: aptly_server_node01
+    roles:
+    - linux_system_codename_xenial
+    classes:
+    - cluster.${_param:cluster_name}.infra
+    interfaces:
+      ens3:
+        role: single_mgm
+        deploy_address: 10.10.0.14
+        deploy_network_netmask: 255.255.0.0
+        deploy_network_gateway: 10.10.0.1
+
+  kvm01.mcp-offline-vxlan.local:
+    reclass_storage_name: infra_kvm_node01
+    roles:
+    - infra_kvm
+    - linux_system_codename_xenial
+    # - features_vcp_gateway
+    interfaces:
+      one1:
+        role: single_mgm
+        deploy_address: 10.10.0.241
+        deploy_network_netmask: 255.255.0.0
+        deploy_network_gateway: 10.10.0.1
+      ten1:
+        role: bond0_ab_nondvr_vxlan_ctl_mesh_raw
+        single_address: 10.11.0.241
+        tenant_address: 10.12.0.241
+      ten2:
+        role: bond0_ab_nondvr_vxlan_ctl_mesh_raw
+        single_address: 10.11.0.241
+        tenant_address: 10.12.0.241
+
+  kvm02.mcp-offline-vxlan.local:
+    reclass_storage_name: infra_kvm_node02
+    roles:
+    - infra_kvm
+    - linux_system_codename_xenial
+    # - features_vcp_gateway
+    interfaces:
+      one1:
+        role: single_mgm
+        deploy_address: 10.10.0.242
+        deploy_network_netmask: 255.255.0.0
+        deploy_network_gateway: 10.10.0.1
+      ten1:
+        role: bond0_ab_nondvr_vxlan_ctl_mesh_raw
+        single_address: 10.11.0.242
+        tenant_address: 10.12.0.242
+      ten2:
+        role: bond0_ab_nondvr_vxlan_ctl_mesh_raw
+        single_address: 10.11.0.242
+        tenant_address: 10.12.0.242
+
+  kvm03.mcp-offline-vxlan.local:
+    reclass_storage_name: infra_kvm_node03
+    roles:
+    - infra_kvm
+    - linux_system_codename_xenial
+    # - features_vcp_gateway
+    interfaces:
+      one1:
+        role: single_mgm
+        deploy_address: 10.10.0.243
+        deploy_network_netmask: 255.255.0.0
+        deploy_network_gateway: 10.10.0.1
+      ten1:
+        role: bond0_ab_nondvr_vxlan_ctl_mesh_raw
+        single_address: 10.11.0.243
+        tenant_address: 10.12.0.243
+      ten2:
+        role: bond0_ab_nondvr_vxlan_ctl_mesh_raw
+        single_address: 10.11.0.243
+        tenant_address: 10.12.0.243
+
+  kvm04.mcp-offline-vxlan.local:
+    reclass_storage_name: infra_kvm_node04
+    roles:
+    - infra_kvm_wo_gluster
+    - linux_system_codename_xenial
+    - salt_master_host
+    # - features_vcp_gateway
+    interfaces:
+      one1:
+        role: single_mgm
+        deploy_address: 10.10.0.244
+        deploy_network_netmask: 255.255.0.0
+        deploy_network_gateway: 10.10.0.1
+      ten1:
+        role: bond0_ab_nondvr_vxlan_ctl_mesh_raw
+        single_address: 10.11.0.244
+        tenant_address: 10.12.0.244
+      ten2:
+        role: bond0_ab_nondvr_vxlan_ctl_mesh_raw
+        single_address: 10.11.0.244
+        tenant_address: 10.12.0.244
+
+  kvm05.mcp-offline-vxlan.local:
+    reclass_storage_name: infra_kvm_node05
+    roles:
+    - infra_kvm_wo_gluster
+    - linux_system_codename_xenial
+    - salt_master_host
+    # - features_vcp_gateway
+    interfaces:
+      one1:
+        role: single_mgm
+        deploy_address: 10.10.0.245
+        deploy_network_netmask: 255.255.0.0
+        deploy_network_gateway: 10.10.0.1
+      ten1:
+        role: bond0_ab_nondvr_vxlan_ctl_mesh_raw
+        single_address: 10.11.0.245
+        tenant_address: 10.12.0.245
+      ten2:
+        role: bond0_ab_nondvr_vxlan_ctl_mesh_raw
+        single_address: 10.11.0.245
+        tenant_address: 10.12.0.245
+
+  kvm06.mcp-offline-vxlan.local:
+    reclass_storage_name: infra_kvm_node06
+    roles:
+    - infra_kvm_wo_gluster
+    - linux_system_codename_xenial
+    - salt_master_host
+    # - features_vcp_gateway
+    interfaces:
+      one1:
+        role: single_mgm
+        deploy_address: 10.10.0.246
+        deploy_network_netmask: 255.255.0.0
+        deploy_network_gateway: 10.10.0.1
+      ten1:
+        role: bond0_ab_nondvr_vxlan_ctl_mesh_raw
+        single_address: 10.11.0.246
+        tenant_address: 10.12.0.246
+      ten2:
+        role: bond0_ab_nondvr_vxlan_ctl_mesh_raw
+        single_address: 10.11.0.246
+        tenant_address: 10.12.0.246
+
+  cmp<<count>>.mcp-offline-vxlan.local:
+    reclass_storage_name: openstack_compute_rack01
+    roles:
+    - openstack_compute
+    - linux_system_codename_xenial
+    interfaces:
+      one1:
+        role: single_mgm
+      ten1:
+        role: bond0_ab_dvr_vxlan_ctl_mesh_floating
+        # role: bond0_ab_dvr_vxlan_ctl_mesh_external_vlan
+        # external_vlan: 1112
+        # external_address: 10.16.0.224
+        # external_network_netmask: 255.255.0.0
+      ten2:
+        role: bond0_ab_dvr_vxlan_ctl_mesh_floating
+        # role: bond0_ab_dvr_vxlan_ctl_mesh_external_vlan
+        # external_vlan: 1112
+
+  osd<<count>>.mcp-offline-vxlan.local:
+    reclass_storage_name: ceph_osd_rack01
+    roles:
+    - ceph_osd
+    - linux_system_codename_xenial
+    interfaces:
+      one1:
+        role: single_mgm
+      ten1:
+        role: bond0_ab_vlan_ceph_public_backend
+      ten2:
+        role: bond0_ab_vlan_ceph_public_backend
+
+  gtw01.mcp-offline-vxlan.local:
+    reclass_storage_name: openstack_gateway_node01
+    roles:
+    - openstack_gateway_octavia
+    - linux_system_codename_xenial
+    interfaces:
+      one1:
+        role: single_mgm
+        deploy_address: 10.10.0.224
+        deploy_network_netmask: 255.255.0.0
+        deploy_network_gateway: 10.10.0.1
+      ten1:
+        role: bond0_ab_dvr_vxlan_ctl_mesh_external_vlan
+        # role: bond0_ab_dvr_vxlan_ctl_mesh_external
+        # role: bond0_ab_dvr_vxlan_ctl_mesh_floating
+        single_address: 10.11.0.224
+        tenant_address: 10.12.0.224
+        external_address: 10.16.0.224
+        external_network_netmask: 255.255.0.0
+        external_vlan: 1112
+      ten2:
+        role: bond0_ab_dvr_vxlan_ctl_mesh_external_vlan
+        # role: bond0_ab_dvr_vxlan_ctl_mesh_external
+        # role: bond0_ab_dvr_vxlan_ctl_mesh_floating
+        single_address: 10.11.0.224
+        tenant_address: 10.12.0.224
+        external_address: 10.16.0.224
+        external_network_netmask: 255.255.0.0
+        external_vlan: 1112
+
+  gtw02.mcp-offline-vxlan.local:
+    reclass_storage_name: openstack_gateway_node02
+    roles:
+    - openstack_gateway
+    - linux_system_codename_xenial
+    interfaces:
+      one1:
+        role: single_mgm
+        deploy_address: 10.10.0.225
+        deploy_network_netmask: 255.255.0.0
+        deploy_network_gateway: 10.10.0.1
+      ten1:
+        role: bond0_ab_dvr_vxlan_ctl_mesh_external_vlan
+        # role: bond0_ab_dvr_vxlan_ctl_mesh_floating
+        # role: bond0_ab_dvr_vxlan_ctl_mesh_external
+        single_address: 10.11.0.225
+        tenant_address: 10.12.0.225
+        external_address: 10.16.0.225
+        external_network_netmask: 255.255.0.0
+        external_vlan: 1112
+      ten2:
+        role: bond0_ab_dvr_vxlan_ctl_mesh_external_vlan
+        # role: bond0_ab_dvr_vxlan_ctl_mesh_floating
+        # role: bond0_ab_dvr_vxlan_ctl_mesh_external
+        single_address: 10.11.0.225
+        tenant_address: 10.12.0.225
+        external_address: 10.16.0.225
+        external_network_netmask: 255.255.0.0
+        external_vlan: 1112
+
+  gtw03.mcp-offline-vxlan.local:
+    reclass_storage_name: openstack_gateway_node03
+    roles:
+    - openstack_gateway
+    - linux_system_codename_xenial
+    interfaces:
+      one1:
+        role: single_mgm
+        deploy_address: 10.10.0.226
+        deploy_network_netmask: 255.255.0.0
+        deploy_network_gateway: 10.10.0.1
+      ten1:
+        role: bond0_ab_dvr_vxlan_ctl_mesh_external_vlan
+        # role: bond0_ab_dvr_vxlan_ctl_mesh_floating
+        # role: bond0_ab_dvr_vxlan_ctl_mesh_external
+        single_address: 10.11.0.226
+        tenant_address: 10.12.0.226
+        external_address: 10.16.0.226
+        external_network_netmask: 255.255.0.0
+        external_vlan: 1112
+      ten2:
+        role: bond0_ab_dvr_vxlan_ctl_mesh_external_vlan
+        # role: bond0_ab_dvr_vxlan_ctl_mesh_floating
+        # role: bond0_ab_dvr_vxlan_ctl_mesh_external
+        single_address: 10.11.0.226
+        tenant_address: 10.12.0.226
+        external_address: 10.16.0.226
+        external_network_netmask: 255.255.0.0
+        external_vlan: 1112
+
+
+  ######
+  ######
+  #
+  # VCP Nodes
+  #
+  ######
+  ######
+
+  ctl01.mcp-offline-vxlan.local:
+    reclass_storage_name: openstack_control_node01
+    roles:
+    - openstack_control_leader
+    - linux_system_codename_xenial
+    - features_control_external_vlan
+    interfaces:
+      ens2:
+        role: single_mgm
+      ens3:
+        role: single_ctl
+      # ens2:
+        # deploy_address: 10.10.0.11
+        # deploy_network_netmask: 255.255.0.0
+        # deploy_network_gateway: 10.10.0.1
+        # keepalived_vip_priority: 103
+
+  ctl02.mcp-offline-vxlan.local:
+    reclass_storage_name: openstack_control_node02
+    roles:
+    - openstack_control
+    - linux_system_codename_xenial
+    - features_control_external_vlan
+    interfaces:
+      ens2:
+        role: single_mgm
+      ens3:
+        role: single_ctl
+      # ens2:
+        # deploy_address: 10.10.0.12
+        # deploy_network_netmask: 255.255.0.0
+        # deploy_network_gateway: 10.10.0.1
+        # keepalived_vip_priority: 102
+
+  ctl03.mcp-offline-vxlan.local:
+    reclass_storage_name: openstack_control_node03
+    roles:
+    - openstack_control
+    - linux_system_codename_xenial
+    - features_control_external_vlan
+    interfaces:
+      ens2:
+        role: single_mgm
+      ens3:
+        role: single_ctl
+      # ens2:
+        # deploy_address: 10.10.0.13
+        # deploy_network_netmask: 255.255.0.0
+        # deploy_network_gateway: 10.10.0.1
+        # keepalived_vip_priority: 101
+
+  # gtw01.mcp-offline-vxlan.local:
+  #   reclass_storage_name: openstack_gateway_node01
+  #   roles:
+  #   - openstack_gateway
+  #   - linux_system_codename_xenial
+  #   interfaces:
+  #     ens2:
+  #       role: single_mgm
+  #     ens3:
+  #       role: single_ctl
+  #     ens4:
+  #       role: bond0_ab_ovs_vxlan_mesh
+  #     ens5:
+  #       role: bond0_ab_ovs_vxlan_mesh
+
+  # gtw02.mcp-offline-vxlan.local:
+  #   reclass_storage_name: openstack_gateway_node02
+  #   roles:
+  #   - openstack_gateway
+  #   - linux_system_codename_xenial
+  #   interfaces:
+  #     ens2:
+  #       role: single_mgm
+  #     ens3:
+  #       role: single_ctl
+  #     ens4:
+  #       role: bond0_ab_ovs_vxlan_mesh
+  #     ens5:
+  #       role: bond0_ab_ovs_vxlan_mesh
+
+  # gtw03.mcp-offline-vxlan.local:
+  #   reclass_storage_name: openstack_gateway_node03
+  #   roles:
+  #   - openstack_gateway
+  #   - linux_system_codename_xenial
+  #   interfaces:
+  #     ens2:
+  #       role: single_mgm
+  #     ens3:
+  #       role: single_ctl
+  #     ens4:
+  #       role: bond0_ab_ovs_vxlan_mesh
+  #     ens5:
+  #       role: bond0_ab_ovs_vxlan_mesh
+
+  cid01.mcp-offline-vxlan.local:
+    reclass_storage_name: cicd_control_node01
+    roles:
+    - cicd_control_leader
+    - linux_system_codename_xenial
+    interfaces:
+      ens2:
+        role: single_mgm
+      ens3:
+        role: single_ctl
+      # ens2:
+        # deploy_address: 10.10.0.91
+        # deploy_network_netmask: 255.255.0.0
+        # deploy_network_gateway: 10.10.0.1
+
+  cid02.mcp-offline-vxlan.local:
+    reclass_storage_name: cicd_control_node02
+    roles:
+    - cicd_control_manager
+    - linux_system_codename_xenial
+    interfaces:
+      ens2:
+        role: single_mgm
+      ens3:
+        role: single_ctl
+      # ens2:
+        # deploy_address: 10.10.0.92
+        # deploy_network_netmask: 255.255.0.0
+        # deploy_network_gateway: 10.10.0.1
+
+  cid03.mcp-offline-vxlan.local:
+    reclass_storage_name: cicd_control_node03
+    roles:
+    - cicd_control_manager
+    - linux_system_codename_xenial
+    interfaces:
+      ens2:
+        role: single_mgm
+      ens3:
+        role: single_ctl
+      # ens2:
+        # deploy_address: 10.10.0.93
+        # deploy_network_netmask: 255.255.0.0
+        # deploy_network_gateway: 10.10.0.1
+
+  dbs01.mcp-offline-vxlan.local:
+    reclass_storage_name: openstack_database_node01
+    roles:
+    - openstack_database_leader
+    - linux_system_codename_xenial
+    interfaces:
+      ens2:
+        role: single_mgm
+      ens3:
+        role: single_ctl
+      # ens2:
+        # deploy_address: 10.10.0.51
+        # deploy_network_netmask: 255.255.0.0
+        # deploy_network_gateway: 10.10.0.1
+
+  dbs02.mcp-offline-vxlan.local:
+    reclass_storage_name: openstack_database_node02
+    roles:
+    - openstack_database
+    - linux_system_codename_xenial
+    interfaces:
+      ens2:
+        role: single_mgm
+      ens3:
+        role: single_ctl
+      # ens2:
+        # deploy_address: 10.10.0.52
+        # deploy_network_netmask: 255.255.0.0
+        # deploy_network_gateway: 10.10.0.1
+
+  dbs03.mcp-offline-vxlan.local:
+    reclass_storage_name: openstack_database_node03
+    roles:
+    - openstack_database
+    - linux_system_codename_xenial
+    interfaces:
+      ens2:
+        role: single_mgm
+      ens3:
+        role: single_ctl
+      # ens2:
+        # deploy_address: 10.10.0.53
+        # deploy_network_netmask: 255.255.0.0
+        # deploy_network_gateway: 10.10.0.1
+
+  msg01.mcp-offline-vxlan.local:
+    reclass_storage_name: openstack_message_queue_node01
+    roles:
+    - openstack_message_queue
+    - linux_system_codename_xenial
+    interfaces:
+      ens2:
+        role: single_mgm
+      ens3:
+        role: single_ctl
+
+  msg02.mcp-offline-vxlan.local:
+    reclass_storage_name: openstack_message_queue_node02
+    roles:
+    - openstack_message_queue
+    - linux_system_codename_xenial
+    interfaces:
+      ens2:
+        role: single_mgm
+      ens3:
+        role: single_ctl
+
+  msg03.mcp-offline-vxlan.local:
+    reclass_storage_name: openstack_message_queue_node03
+    roles:
+    - openstack_message_queue
+    - linux_system_codename_xenial
+    interfaces:
+      ens2:
+        role: single_mgm
+      ens3:
+        role: single_ctl
+
+  prx01.mcp-offline-vxlan.local:
+    reclass_storage_name: openstack_proxy_node01
+    roles:
+    - openstack_proxy
+    - linux_system_codename_xenial
+    interfaces:
+      ens2:
+        role: single_mgm
+      ens3:
+        role: single_ctl
+
+  prx02.mcp-offline-vxlan.local:
+    reclass_storage_name: openstack_proxy_node02
+    roles:
+    - openstack_proxy
+    - linux_system_codename_xenial
+    interfaces:
+      ens2:
+        role: single_mgm
+      ens3:
+        role: single_ctl
+
+  dns01.mcp-offline-vxlan.local:
+    reclass_storage_name: openstack_dns_node01
+    roles:
+    - openstack_dns
+    - linux_system_codename_xenial
+    interfaces:
+      ens2:
+        role: single_mgm
+      ens3:
+        role: single_ctl
+
+  dns02.mcp-offline-vxlan.local:
+    reclass_storage_name: openstack_dns_node02
+    roles:
+    - openstack_dns
+    - linux_system_codename_xenial
+    interfaces:
+      ens2:
+        role: single_mgm
+      ens3:
+        role: single_ctl
+
+  mon01.mcp-offline-vxlans.local:
+    reclass_storage_name: stacklight_server_node01
+    roles:
+    - stacklightv2_server_leader
+    - linux_system_codename_xenial
+    interfaces:
+      ens2:
+        role: single_mgm
+      ens3:
+        role: single_ctl
+
+  mon02.mcp-offline-vxlans.local:
+    reclass_storage_name: stacklight_server_node02
+    roles:
+    - stacklightv2_server
+    - linux_system_codename_xenial
+    interfaces:
+      ens2:
+        role: single_mgm
+      ens3:
+        role: single_ctl
+
+  mon03.mcp-offline-vxlans.local:
+    reclass_storage_name: stacklight_server_node03
+    roles:
+    - stacklightv2_server
+    - linux_system_codename_xenial
+    interfaces:
+      ens2:
+        role: single_mgm
+      ens3:
+        role: single_ctl
+
+  log01.mcp-offline-vxlan.local:
+    reclass_storage_name: stacklight_log_node01
+    roles:
+    - stacklight_log_leader_v2
+    - linux_system_codename_xenial
+    interfaces:
+      ens2:
+        role: single_mgm
+      ens3:
+        role: single_ctl
+
+  log02.mcp-offline-vxlan.local:
+    reclass_storage_name: stacklight_log_node02
+    roles:
+    - stacklight_log
+    - linux_system_codename_xenial
+    interfaces:
+      ens2:
+        role: single_mgm
+      ens3:
+        role: single_ctl
+
+  log03.mcp-offline-vxlan.local:
+    reclass_storage_name: stacklight_log_node03
+    roles:
+    - stacklight_log
+    - linux_system_codename_xenial
+    interfaces:
+      ens2:
+        role: single_mgm
+      ens3:
+        role: single_ctl
+
+  mtr01.mcp-offline-vxlan.local:
+    reclass_storage_name: stacklight_telemetry_node01
+    roles:
+    - stacklight_telemetry_leader
+    - linux_system_codename_xenial
+    interfaces:
+      ens2:
+        role: single_mgm
+      ens3:
+        role: single_ctl
+
+  mtr02.mcp-offline-vxlan.local:
+    reclass_storage_name: stacklight_telemetry_node02
+    roles:
+    - stacklight_telemetry
+    - linux_system_codename_xenial
+    interfaces:
+      ens2:
+        role: single_mgm
+      ens3:
+        role: single_ctl
+
+  mtr03.mcp-offline-vxlan.local:
+    reclass_storage_name: stacklight_telemetry_node03
+    roles:
+    - stacklight_telemetry
+    - linux_system_codename_xenial
+    interfaces:
+      ens2:
+        role: single_mgm
+      ens3:
+        role: single_ctl
+
+
+  cmn01.mcp-offline-vxlan.local:
+    reclass_storage_name: ceph_mon_node01
+    roles:
+    - ceph_mon
+    - linux_system_codename_xenial
+    interfaces:
+      ens2:
+        role: single_mgm
+      ens3:
+        role: single_ctl
+
+  cmn02.mcp-offline-vxlan.local:
+    reclass_storage_name: ceph_mon_node02
+    roles:
+    - ceph_mon
+    - linux_system_codename_xenial
+    interfaces:
+      ens2:
+        role: single_mgm
+      ens3:
+        role: single_ctl
+
+  cmn03.mcp-offline-vxlan.local:
+    reclass_storage_name: ceph_mon_node03
+    roles:
+    - ceph_mon
+    - linux_system_codename_xenial
+    interfaces:
+      ens2:
+        role: single_mgm
+      ens3:
+        role: single_ctl
+
+  rgw01.mcp-offline-vxlan.local:
+    reclass_storage_name: ceph_rgw_node01
+    roles:
+    - ceph_rgw
+    - linux_system_codename_xenial
+    interfaces:
+      ens2:
+        role: single_mgm
+      ens3:
+        role: single_ctl
+
+  rgw02.mcp-offline-vxlan.local:
+    reclass_storage_name: ceph_rgw_node02
+    roles:
+    - ceph_rgw
+    - linux_system_codename_xenial
+    interfaces:
+      ens2:
+        role: single_mgm
+      ens3:
+        role: single_ctl
+
+  rgw03.mcp-offline-vxlan.local:
+    reclass_storage_name: ceph_rgw_node03
+    roles:
+    - ceph_rgw
+    - linux_system_codename_xenial
+    interfaces:
+      ens2:
+        role: single_mgm
+      ens3:
+        role: single_ctl
+
+  mdb01.mcp-offline-vxlan.local:
+    reclass_storage_name: openstack_telemetry_node01
+    roles:
+    - openstack_telemetry
+    - linux_system_codename_xenial
+    interfaces:
+      ens2:
+        role: single_mgm
+      ens3:
+        role: single_ctl
+
+  mdb02.mcp-offline-vxlan.local:
+    reclass_storage_name: openstack_telemetry_node02
+    roles:
+    - openstack_telemetry
+    - linux_system_codename_xenial
+    interfaces:
+      ens2:
+        role: single_mgm
+      ens3:
+        role: single_ctl
+
+  mdb03.mcp-offline-vxlan.local:
+    reclass_storage_name: openstack_telemetry_node03
+    roles:
+    - openstack_telemetry
+    - linux_system_codename_xenial
+    interfaces:
+      ens2:
+        role: single_mgm
+      ens3:
+        role: single_ctl
+
+  kmn01.mcp-offline-vxlan.local:
+    reclass_storage_name: openstack_barbican_node01
+    roles:
+    - openstack_barbican
+    - linux_system_codename_xenial
+    interfaces:
+      ens2:
+        role: single_mgm
+      ens3:
+        role: single_ctl
+
+  kmn02.mcp-offline-vxlan.local:
+    reclass_storage_name: openstack_barbican_node02
+    roles:
+    - openstack_barbican
+    - linux_system_codename_xenial
+    interfaces:
+      ens2:
+        role: single_mgm
+      ens3:
+        role: single_ctl
+
+  kmn03.mcp-offline-vxlan.local:
+    reclass_storage_name: openstack_barbican_node03
+    roles:
+    - openstack_barbican
+    - linux_system_codename_xenial
+    interfaces:
+      ens2:
+        role: single_mgm
+      ens3:
+        role: single_ctl
+
diff --git a/tcp_tests/templates/physical-mcp-offline-vxlan/context-golden-ovs-offline-queens.yml b/tcp_tests/templates/physical-mcp-offline-vxlan/context-golden-ovs-offline-queens.yml
new file mode 100644
index 0000000..9ededfe
--- /dev/null
+++ b/tcp_tests/templates/physical-mcp-offline-vxlan/context-golden-ovs-offline-queens.yml
@@ -0,0 +1,2041 @@
+default_context:
+  # alertmanager_notification_email_enabled: 'True'
+  # alertmanager_notification_email_from: john.doe@example.org
+  # alertmanager_notification_email_hostname: 127.0.0.1
+  # alertmanager_notification_email_password: password
+  # alertmanager_notification_email_port: '587'
+  # alertmanager_notification_email_require_tls: 'True'
+  # alertmanager_notification_email_to: jane.doe@example.org
+  # auditd_enabled: 'False'
+# "=== CLUSTER GLOBALS ==="
+  cluster_domain: mcp-offline-vxlan.local
+  cluster_name: mcp-offline-vxlan
+  openldap_domain: mcp-offline-vxlan.local
+  mcp_version: 2019.2.0
+  cookiecutter_template_branch: master
+  cookiecutter_template_url: https://gerrit.mcp.mirantis.net/mk/cookiecutter-templates.git
+  shared_reclass_branch: ''
+  shared_reclass_url: https://gerrit.mcp.mirantis.net/salt-models/reclass-system.git
+  mcp_common_scripts_branch: ''
+
+# "=== COMPONENT SWITCHES ==="
+  cicd_enabled: 'True'
+  infra_enabled:      'True'
+  kubernetes_enabled: 'False'
+  ceph_enabled: 'True'
+  opencontrail_enabled: 'False'
+  openstack_enabled: 'True'
+  stacklight_enabled: 'True'
+  manila_enabled: 'False'
+
+# === CICD|INFRA COMPONENT SWITCHES ==="
+  maas_enabled: 'True'
+  openldap_enabled: 'True'
+  local_repositories: 'True'
+  offline_deployment: 'True'
+# Other
+  bmk_enabled: 'False'
+  upstream_proxy_enabled: 'False'
+  fluentd_enabled: 'True'
+## END of basic CC config
+  ceph_osd_count: '3'
+  openstack_compute_count: '4'
+  openscap_enabled: 'True'
+  openssh_groups: "qa_scale,oscore_devops,networking,stacklight,k8s_team,mcp_qa,drivetrain"
+  backup_private_key: |-
+    -----BEGIN RSA PRIVATE KEY-----
+    MIIEogIBAAKCAQEArK5R0R/X4kmWID1A+/vIH/L2wT2SJOCZ9hw/byVVUANJUI0U
+    bDPap3fYMsC/w8/sfb0hI7QjXvKKTT1fSTzKwt8idMrSMQfnjPIbwcFfu6E5i7jv
+    RfqY4g9mspP6tkeoiolxw5BMdxnKaNIJ00T4/uIr4naiGK1OEqzFN9k6aFBMkYhh
+    IAahLM60Ja4ANy521LO2O5NDarVze9l3Mk77diKPm+OFkfc9XgxTxj92vhuG0B6X
+    7Z9nMuF4zxXZascu7wqiMkQnaq0pABJEcXbhLxjFvno7g8e5NuwYcTwuazkx8+Di
+    mdC1uTD91EFrvc7hnFHGzXU843xejkMDPaWt6wIDAQABAoIBAFDVvZku1empVuy6
+    n+wVvmYuX5KwVvSKa6hJKuzFxj0RfMWHdXL9hzL0KDICBwMMF4H5AkVrc09kLJfh
+    zfRD0AsPV1rjAyhOsfdJ8vJtqnn0DDM7HE9e8yiH2yRnn2qq+tv3YVkR+KmcWiNd
+    h5nt5L20dKSrsk/o+O5HEH+HYg8oGrmZYLmq5qOMLp2JGfKH7BS5a8z2fIrFdGeN
+    CStkFbv3vIgzu7X+S40s3b0tfgXz0Kdg+yUZb86i4qm3AjiWhb39jJ7wnw6m9dtQ
+    2ynBHRZs7Sir9C7nUJL0JicVg+w/Lpp4fBnR3Q7kuu7o2jYKMdykYsUtpnJ6Y3iF
+    il2pTgkCgYEA3jEwx7v+Ch7LN98cE0EWjQS9kGnQKwbbuz49IyKBLHXQqfwHQUvJ
+    nXRQSRQmHJUElzN3CfGq+FJz5xP7ZKWQdQZsEjMZ3hWvGIuJgLZAdUdq8bF64NVF
+    eaRinuaoKu5pzUr8FzkGsqItwgqgK1HU2peEmjdJHE6ZeF0RrPj8EkUCgYEAxvSS
+    jvn6fD+hnsHjEC+2i4GPn4l2lKMVu02UC22LKQeAJW1B4MbmZEeOaBYuQN+ARYIf
+    RLPP4HPgmod+23o7hYJsFwxDlBzkdY30MhTyW2MEvUx7ND1pelth40FmYtEWoZXq
+    7EC0dZYeC7fXXVHQOPHw3k1r+VQAR854HZ/P2m8CgYAKyN5yQ2iwOxBoFNc7BATB
+    MYK1+LzW6c1OiFXVV75nVwyaw5XikHgnwVaIRYcYCg+f+yqC/AztYqsngyvHldWu
+    KHKka4CM7AaErXkuGyqDxsSlGJU7Rn7JybXTuqRc+Rd/lG0p/2WY1BBeg7+EesNX
+    HNWd2qMc0Z/XXMUX3Tn29QKBgCIuyy99i6flVuNr9JNHX1nAvOhwOct36bfwsEr2
+    Wz6FZ2MGzpbMFbGNCc6gYPoMbIGExdKMAl9SCXkDZqxW5/scWW3sUYAVJrt71ET2
+    jF1fOeU8Sr7C/mhjYwIkrm6z9et1UpOc2mSJkkf5IiuKbvgZuYS4UKDZ6eJsev68
+    An5JAoGAJTQ1wQXo8Gp1oI1hXG70zoU9AjSVin6rs8cAWzoqdr5aLiTD1OtMeEMF
+    AupRxHgOVKb7RZoePcdusTEErWaYvxPS6vAeGgHf/aKIb8BDL4Rn7FS1DAa8R+s9
+    FN15hAhG2BGLujWvwLWL3aLlxmYWukSmx5QBa//TaFwIJvqF7HU=
+    -----END RSA PRIVATE KEY-----
+  backup_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCsrlHRH9fiSZYgPUD7+8gf8vbBPZIk4Jn2HD9vJVVQA0lQjRRsM9qnd9gywL/Dz+x9vSEjtCNe8opNPV9JPMrC3yJ0ytIxB+eM8hvBwV+7oTmLuO9F+pjiD2ayk/q2R6iKiXHDkEx3Gcpo0gnTRPj+4ividqIYrU4SrMU32TpoUEyRiGEgBqEszrQlrgA3LnbUs7Y7k0NqtXN72XcyTvt2Io+b44WR9z1eDFPGP3a+G4bQHpftn2cy4XjPFdlqxy7vCqIyRCdqrSkAEkRxduEvGMW+ejuDx7k27BhxPC5rOTHz4OKZ0LW5MP3UQWu9zuGcUcbNdTzjfF6OQwM9pa3r
+  ceph_cluster_network: 10.11.0.0/16
+
+
+
+  aptly_server_control_address: 10.11.0.14
+  aptly_server_deploy_address: 10.10.0.14
+  aptly_server_hostname: apt
+
+
+  # backend_network_netmask: 255.255.0.0
+  # backend_network_subnet: 10.11.0.0/16
+  backend_vlan: '1111'
+
+  ceph_hyper_converged: 'False'
+  ceph_mon_node01_deploy_address: 10.10.0.66
+  ceph_mon_node01_address: 10.11.0.66
+  ceph_mon_node01_ceph_public_address: 10.11.0.66
+  ceph_mon_node01_hostname: cmn01
+  ceph_mon_node02_deploy_address: 10.10.0.67
+  ceph_mon_node02_address: 10.11.0.67
+  ceph_mon_node02_ceph_public_address: 10.11.0.67
+  ceph_mon_node02_hostname: cmn02
+  ceph_mon_node03_deploy_address: 10.10.0.68
+  ceph_mon_node03_address: 10.11.0.68
+  ceph_mon_node03_ceph_public_address: 10.11.0.68
+  ceph_mon_node03_hostname: cmn03
+  ceph_osd_backend: bluestore
+  ceph_osd_block_db_size: '20'
+  ceph_osd_bond_mode: active-backup
+  ceph_osd_data_disks: /dev/sdb
+  ceph_osd_journal_or_block_db_disks: /dev/sdb
+  ceph_osd_node_count: '5'
+  ceph_osd_primary_first_nic: ten1
+  ceph_osd_primary_second_nic: ten2
+  ceph_osd_rack01_hostname: osd
+  ceph_osd_single_address_ranges: "10.11.0.201-10.11.0.203"
+  ceph_osd_deploy_address_ranges: "10.10.0.201-10.10.0.203"
+  ceph_osd_ceph_public_address_ranges: "10.11.0.201-10.11.0.203"
+  ceph_osd_backend_address_ranges: "10.13.0.201-10.13.0.203"
+  ceph_public_vlan: '1110'
+
+  ceph_public_network: 10.11.0.0/16
+  ceph_rgw_address: 10.11.0.75
+  ceph_rgw_hostname: rgw
+  ceph_rgw_node01_deploy_address: 10.10.0.76
+  ceph_rgw_node01_address: 10.11.0.76
+  ceph_rgw_node01_hostname: rgw01
+  ceph_rgw_node02_deploy_address: 10.10.0.77
+  ceph_rgw_node02_address: 10.11.0.77
+  ceph_rgw_node02_hostname: rgw02
+  ceph_rgw_node03_deploy_address: 10.10.0.78
+  ceph_rgw_node03_address: 10.11.0.78
+  ceph_rgw_node03_hostname: rgw03
+  ceph_version: luminous
+  cicd_control_node01_deploy_address: 10.10.0.91
+  cicd_control_node01_address: 10.11.0.91
+  cicd_control_node01_hostname: cid01
+  cicd_control_node02_deploy_address: 10.10.0.92
+  cicd_control_node02_address: 10.11.0.92
+  cicd_control_node02_hostname: cid02
+  cicd_control_node03_deploy_address: 10.10.0.93
+  cicd_control_node03_address: 10.11.0.93
+  cicd_control_node03_hostname: cid03
+  cicd_control_vip_address: 10.11.0.90
+  cicd_control_vip_hostname: cid
+  cicd_private_key: |-
+    -----BEGIN RSA PRIVATE KEY-----
+    MIIEpAIBAAKCAQEAp+1TRJaP/LuTBLLgVEJpIFHvw8kE/WL0oCVfKaTbbAZHTrpi
+    4Mf3WDdlDrc9ujp4w8r9LC5HXOWLtIotzi9vlIUmvotDYF7H4vvcVVa3NTzpgZHq
+    1vKzraJv7Ay1c+iV5AlG5ExulyrDb7fHdUsh72BZmgLh4zjw21Hk3wEOx5UZ4804
+    eAZgTsTtBrfFtZ7GyF9Lqp2m72Jg7Zqb0VXCBuMi2giaoV3hdNtnftF5zzgMey9l
+    8PbPNvTuNuRo9Lnz3kFq5pnGf1tBRRqbAx7xxPy8q7pzrj9kw3LfJWAGBJN2z4kn
+    GDKvy5TjEIcKRYiKpFZ8tDsVnhJYUaePvrZ/wwIDAQABAoIBAGQ2Zsi5qTfDoJrf
+    PKxL7XySKSmHnuiv0xh29PFfJmqy3H8JD9TgwKTueepsQ/QMzODbFHb3LzMq8VJO
+    W8N933Kce2Cerxilt1enwfdThoXI0fi24ZRgxgVKuMv/UZHYLQsm1h2L1ztzE6pp
+    3CgNLDU0JISaT79Pzby0hIbolK9ZPccmdhcyXOo9T1Xa1hRxnn9zJX4I3B1HEgPr
+    GiZXSWIWDK1tTxb7M2QvXjp/3MGeI5JabxTzdlW6xJgrpTzx4Mms2GjXe5HO/vDq
+    TTv++oOcC+NlHrF64wrtkxAkV3ofA0m15KfZ4KljqinCdyZ07D9fm1kRZC3isLMz
+    xHx/oHECgYEA1UVGOFepQwPXnvZwDzSiOe0TeUy7yZ43r30rXrVf5+KwJDjaKsYW
+    msHzzxjnUopFelAPc5zIM3GQ0TWMtbxpGT5HzoGpDmhegwl4iCRXcBEA/mkcpV/N
+    VpeKUS8LFGu4XnbgJKuJs20rkoYCZSuEhSj1W2nB0u9tSRSzjMMI1m0CgYEAyZJd
+    LccoIh6SflYsYPUA4JTatPgVCZCPYf6tVIh9ov3v5ZbkSNe3N58odbu23dtY799k
+    RhCP4aGlB+VdbMJDsvK6OO3FZbleKdtgu/Eyhkf6BLidWNxRiH9mi4rNzhB3uRo/
+    DzCsH9Z+3aSGwn+kkXppX3GjUImalSmgm/CdkO8CgYEAtoqKpeLyLzf0sEzCcWFd
+    kTMIPgSdnt5bT0tVecTIh9ORbA91PLM72c40fogURqOvABfSgeQpv05u44JwI4VW
+    d5LFJ033gyt16baLgguJA5SqQxd4u1uah4lBjGc1lo70yXX6N6jTPc4tQ0aMekeb
+    L9Z0239TtNXVtn2PiOXOhKkCgYEAwzO0fKKDIReOMj5tV6+sG9DLQ7mDVfHiiLnD
+    TTuov3E/Io1PoaMVUQ4Wdn1Eh/DlXdZuGtPrIkwRr2XVZX9zZBZfdMX+BZbPs6U5
+    NohLr3KAkpXd+rHRW2hU/Al9aHLWHjFmo+U0qthjn2y2/B/0VNXAuacoytOXGaBo
+    YttPG40CgYA18z21jGveCpNwbov3G8eAeSWK6KudCNfjxJ2wqnjaLJQlbDoB89IQ
+    1yt4cQB15Tl2WhuCHSKUanPSG6ke8H4vNSht3aVXqHNFpOCwsfsAol2OcSHGrbhh
+    L+Ptf/em7cJ19QZEOKUGfBhsy6IdZE2+y/U5fbJwNTUMSUVxUfBIYQ==
+    -----END RSA PRIVATE KEY-----
+  cicd_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCn7VNElo/8u5MEsuBUQmkgUe/DyQT9YvSgJV8ppNtsBkdOumLgx/dYN2UOtz26OnjDyv0sLkdc5Yu0ii3OL2+UhSa+i0NgXsfi+9xVVrc1POmBkerW8rOtom/sDLVz6JXkCUbkTG6XKsNvt8d1SyHvYFmaAuHjOPDbUeTfAQ7HlRnjzTh4BmBOxO0Gt8W1nsbIX0uqnabvYmDtmpvRVcIG4yLaCJqhXeF022d+0XnPOAx7L2Xw9s829O425Gj0ufPeQWrmmcZ/W0FFGpsDHvHE/LyrunOuP2TDct8lYAYEk3bPiScYMq/LlOMQhwpFiIqkVny0OxWeElhRp4++tn/D
+
+  compute_bond_mode: active-backup
+  compute_deploy_nic: one1
+  compute_padding_with_zeros: 'True'
+  compute_primary_first_nic: ten1
+  compute_primary_second_nic: ten2
+  context_seed: hfadwDXYaA63EQY0T1Vug9JKAjh6miJ3gdBNBgkBuag5fJvsp9dxZamK7Q9BbVY5
+  control_network_netmask: 255.255.0.0
+  control_network_subnet: 10.11.0.0/16
+  control_vlan: '1110'
+  # cookiecutter_template_credentials: gerrit
+  deploy_network_gateway: 10.10.0.1
+  deploy_network_netmask: 255.255.0.0
+  deploy_network_subnet: 10.10.0.0/16
+  deployment_type: physical
+  dns_server01: 10.10.0.15
+  dns_server02: 10.11.0.15
+  email_address: product@mirantis.com
+  gainsight_service_enabled: 'False'
+  gateway_deploy_nic: one1
+  gateway_primary_first_nic: ten1
+  gateway_primary_second_nic: ten2
+  infra_bond_mode: active-backup
+  infra_deploy_nic: one1
+  #infra_kvm_vip_address: 10.11.0.239
+  infra_kvm_vip_address: 10.11.0.239
+  infra_kvm01_control_address: 10.11.0.241
+  infra_kvm01_deploy_address: 10.10.0.241
+  infra_kvm01_hostname: kvm01
+  infra_kvm02_control_address: 10.11.0.242
+  infra_kvm02_deploy_address: 10.10.0.242
+  infra_kvm02_hostname: kvm02
+  infra_kvm03_control_address: 10.11.0.243
+  infra_kvm03_deploy_address: 10.10.0.243
+  infra_kvm03_hostname: kvm03
+  infra_kvm04_control_address: 10.11.0.244
+  infra_kvm04_deploy_address: 10.10.0.244
+  infra_kvm04_hostname: kvm04
+  infra_kvm05_control_address: 10.11.0.245
+  infra_kvm05_deploy_address: 10.10.0.245
+  infra_kvm05_hostname: kvm05
+  infra_kvm06_control_address: 10.11.0.246
+  infra_kvm06_deploy_address: 10.10.0.246
+  infra_kvm06_hostname: kvm06
+  infra_primary_first_nic: ten1
+  infra_primary_second_nic: ten2
+  # internal_proxy_enabled: 'False'
+  # keycloak_enabled: 'False'
+  # kubernetes_ctl_on_kvm: 'False'
+  local_docker_images: 'True'
+  local_pipelines: 'True'
+  local_python_repo: 'True'
+  local_repo_url: ${_param:aptly_server_deploy_address}
+  # no_platform: 'False'
+  nova_vnc_tls_enabled: 'True'
+  openldap_organisation: ${_param:cluster_name}
+  # openssh_groups: ''
+  # For tempest tests which require access to instnaces via floating ips
+  openstack_create_public_network: 'True'
+  openstack_public_neutron_subnet_gateway: 10.16.250.1
+  openstack_public_neutron_subnet_cidr: 10.16.0.0/16
+  openstack_public_neutron_subnet_allocation_start: 10.16.250.10
+  openstack_public_neutron_subnet_allocation_end: 10.16.254.254
+
+  openstack_benchmark_node01_deploy_address: 10.10.0.95
+  openstack_benchmark_node01_address: 10.11.0.95
+  openstack_benchmark_node01_hostname: bmk01
+  # openstack_cluster_size: small
+  # openstack_cluster_size: small
+  openstack_cluster_size: golden
+  openstack_compute_rack01_hostname: cmp
+  openstack_compute_single_address_ranges: "10.11.0.101-10.11.0.104"
+  openstack_compute_deploy_address_ranges: "10.10.0.101-10.10.0.104"
+  openstack_compute_tenant_address_ranges: "10.12.0.101-10.12.0.104"
+  openstack_compute_backend_address_ranges: "10.11.0.101-10.11.0.104"
+  openstack_control_address: 10.11.0.10
+  openstack_control_hostname: ctl
+  openstack_control_node01_address: 10.11.0.11
+  openstack_control_node01_deploy_address: 10.10.0.11
+  openstack_control_node01_hostname: ctl01
+  openstack_control_node02_address: 10.11.0.12
+  openstack_control_node02_deploy_address: 10.10.0.12
+  openstack_control_node02_hostname: ctl02
+  openstack_control_node03_address: 10.11.0.13
+  openstack_control_node03_deploy_address: 10.10.0.13
+  openstack_control_node03_hostname: ctl03
+  openstack_database_address: 10.11.0.50
+  openstack_database_hostname: dbs
+  openstack_database_node01_address: 10.11.0.51
+  openstack_database_node01_deploy_address: 10.10.0.51
+  openstack_database_node01_hostname: dbs01
+  openstack_database_node02_address: 10.11.0.52
+  openstack_database_node02_deploy_address: 10.10.0.52
+  openstack_database_node02_hostname: dbs02
+  openstack_database_node03_address: 10.11.0.53
+  openstack_database_node03_deploy_address: 10.10.0.53
+  openstack_database_node03_hostname: dbs03
+  openstack_gateway_node01_address: 10.11.0.224
+  openstack_gateway_node01_deploy_address: 10.10.0.224
+  openstack_gateway_node01_tenant_address: 10.12.0.6
+  openstack_gateway_node01_hostname: gtw01
+  openstack_gateway_node02_address: 10.11.0.225
+  openstack_gateway_node02_deploy_address: 10.10.0.225
+  openstack_gateway_node02_tenant_address: 10.12.0.7
+  openstack_gateway_node02_hostname: gtw02
+  openstack_gateway_node03_address: 10.11.0.226
+  openstack_gateway_node03_deploy_address: 10.10.0.226
+  openstack_gateway_node03_tenant_address: 10.12.0.8
+  openstack_gateway_node03_hostname: gtw03
+  openstack_message_queue_address: 10.11.0.40
+  openstack_message_queue_hostname: msg
+  openstack_message_queue_node01_address: 10.11.0.41
+  openstack_message_queue_node01_deploy_address: 10.10.0.41
+  openstack_message_queue_node01_hostname: msg01
+  openstack_message_queue_node02_address: 10.11.0.42
+  openstack_message_queue_node02_deploy_address: 10.10.0.42
+  openstack_message_queue_node02_hostname: msg02
+  openstack_message_queue_node03_address: 10.11.0.43
+  openstack_message_queue_node03_deploy_address: 10.10.0.43
+  openstack_message_queue_node03_hostname: msg03
+  openstack_barbican_address: 10.11.0.44
+  openstack_barbican_hostname: kmn
+  openstack_barbican_node01_address: 10.11.0.45
+  openstack_barbican_node01_deploy_address: 10.10.0.45
+  openstack_barbican_node01_hostname: kmn01
+  openstack_barbican_node02_address: 10.11.0.46
+  openstack_barbican_node02_deploy_address: 10.10.0.46
+  openstack_barbican_node02_hostname: kmn02
+  openstack_barbican_node03_address: 10.11.0.47
+  openstack_barbican_node03_deploy_address: 10.10.0.47
+  openstack_barbican_node03_hostname: kmn03
+  openstack_network_engine: ovs
+  # openstack_neutron_bgp_vpn_driver: bagpipe
+  openstack_neutron_l2gw: 'False'
+  openstack_neutron_bgp_vpn: 'False'
+  openstack_neutron_qos: 'True'
+  openstack_neutron_vlan_aware_vms: 'True'
+  openstack_nfv_dpdk_enabled: 'False'
+  openstack_nfv_sriov_enabled: 'False'
+  openstack_nova_compute_nfv_req_enabled: 'False'
+  # openstack_nova_compute_reserved_host_memory_mb: '900'
+  openstack_ovs_dvr_enabled: 'True'
+  openstack_ovs_encapsulation_type: vxlan
+  openstack_proxy_address: 10.11.0.80
+  # openstack_proxy_address: 10.16.250.9
+  openstack_proxy_vip_interface: ens6
+  openstack_proxy_hostname: prx
+  openstack_proxy_node01_address: 10.11.0.81
+  openstack_proxy_node01_deploy_address: 10.10.0.81
+  openstack_proxy_node01_hostname: prx01
+  openstack_proxy_node02_address: 10.11.0.82
+  openstack_proxy_node02_deploy_address: 10.10.0.82
+  openstack_proxy_node02_hostname: prx02
+  openstack_nginx_proxy_glance_image_size: 30000m
+  openstack_upgrade_node01_address: 10.11.0.19
+  openstack_upgrade_node01_deploy_address: 10.10.0.19
+  openstack_version: pike
+  ovsdb_server: 'ovsdbx:127.0.0.1:6632'
+  ironic_enabled: 'False'
+  openstack_baremetal_address: 10.11.0.5
+  openstack_baremetal_deploy_address: 10.10.0.5
+  openstack_baremetal_hostname: bmt
+  openstack_baremetal_node01_address: 10.11.0.6
+  openstack_baremetal_node02_address: 10.11.0.7
+  openstack_baremetal_node03_address: 10.11.0.8
+  openstack_baremetal_node01_deploy_address: 10.10.0.6
+  openstack_baremetal_node02_deploy_address: 10.10.0.7
+  openstack_baremetal_node03_deploy_address: 10.10.0.8
+  openstack_baremetal_node01_hostname: bmt01
+  openstack_baremetal_node02_hostname: bmt02
+  openstack_baremetal_node03_hostname: bmt03
+  openstack_baremetal_address_baremetal: 10.15.0.10
+  openstack_baremetal_node01_baremetal_address: 10.15.0.11
+  openstack_baremetal_node02_baremetal_address: 10.15.0.12
+  openstack_baremetal_node03_baremetal_address: 10.15.0.13
+  openstack_baremetal_neutron_subnet_cidr: 10.15.0.0/16
+  openstack_baremetal_neutron_subnet_allocation_start: 10.15.90.1
+  openstack_baremetal_neutron_subnet_allocation_end: 10.15.199.255
+  openstack_dns_hostname: dns
+  openstack_dns_node01_address: 10.11.0.54
+  openstack_dns_node01_deploy_address: 10.10.0.54
+  openstack_dns_node01_hostname: dns01
+  openstack_dns_node02_address: 10.11.0.55
+  openstack_dns_node02_deploy_address: 10.10.0.55
+  openstack_dns_node02_hostname: dns02
+
+  openstack_telemetry_address: 10.11.0.35
+  openstack_telemetry_hostname: mdb
+  openstack_telemetry_node01_address: 10.11.0.36
+  openstack_telemetry_node01_deploy_address: 10.10.0.36
+  # openstack_telemetry_node01_storage_address: 10.11.0.36
+  openstack_telemetry_node01_hostname: mdb01
+  openstack_telemetry_node02_address: 10.11.0.37
+  openstack_telemetry_node02_deploy_address: 10.10.0.37
+  # openstack_telemetry_node02_storage_address: 10.11.0.37
+  openstack_telemetry_node02_hostname: mdb02
+  openstack_telemetry_node03_address: 10.11.0.38
+  openstack_telemetry_node03_deploy_address: 10.10.0.38
+  # openstack_telemetry_node03_storage_address: 10.11.0.38
+  openstack_telemetry_node03_hostname: mdb03
+
+  # oss_enabled: 'False'
+  # oss_node03_address: ${_param:stacklight_monitor_node03_address}
+  # oss_webhook_app_id: '24'
+  # oss_webhook_login_id: '13'
+  # platform: openstack_enabled
+  public_host: ${_param:openstack_proxy_address}
+  publication_method: email
+  # physical_lab_setup: 'False'
+  reclass_repository: https://github.com/Mirantis/mk-lab-salt-model.git
+  salt_api_password: nlRtYRLbBuvqk3b2XJGrSp7HCcDW0Mgr
+  salt_api_password_hash: $6$IXWjGjZz$6YfimiwAzBxmb6hs1IZ2vzdslJiiwQXAN/PdlrxdxLWBlPLb57wkkFRd5wUwXoWAPfAkkZFhP8rUKE14ucQ3e1
+  salt_master_address: 10.11.0.15
+  salt_master_hostname: cfg01
+  salt_master_management_address: 10.10.0.15
+
+  # sriov_network_subnet: 10.55.0.0/16
+  stacklight_log_address: 10.11.0.60
+  stacklight_log_hostname: log
+  stacklight_log_node01_address: 10.11.0.61
+  stacklight_log_node01_deploy_address: 10.10.0.61
+  stacklight_log_node01_hostname: log01
+  stacklight_log_node02_address: 10.11.0.62
+  stacklight_log_node02_deploy_address: 10.10.0.62
+  stacklight_log_node02_hostname: log02
+  stacklight_log_node03_address: 10.11.0.63
+  stacklight_log_node03_deploy_address: 10.10.0.63
+  stacklight_log_node03_hostname: log03
+  stacklight_long_term_storage_type: prometheus
+  stacklight_monitor_address: 10.11.0.70
+  stacklight_monitor_hostname: mon
+  stacklight_monitor_node01_address: 10.11.0.71
+  stacklight_monitor_node01_deploy_address: 10.10.0.71
+  stacklight_monitor_node01_hostname: mon01
+  stacklight_monitor_node02_address: 10.11.0.72
+  stacklight_monitor_node02_deploy_address: 10.10.0.72
+  stacklight_monitor_node02_hostname: mon02
+  stacklight_monitor_node03_address: 10.11.0.73
+  stacklight_monitor_node03_deploy_address: 10.10.0.73
+  stacklight_monitor_node03_hostname: mon03
+  stacklight_telemetry_address: 10.11.0.85
+  stacklight_telemetry_hostname: mtr
+  stacklight_telemetry_node01_address: 10.11.0.86
+  stacklight_telemetry_node01_deploy_address: 10.10.0.86
+  stacklight_telemetry_node01_hostname: mtr01
+  stacklight_telemetry_node02_address: 10.11.0.87
+  stacklight_telemetry_node02_deploy_address: 10.10.0.87
+  stacklight_telemetry_node02_hostname: mtr02
+  stacklight_telemetry_node03_address: 10.11.0.88
+  stacklight_telemetry_node03_deploy_address: 10.10.0.88
+  stacklight_telemetry_node03_hostname: mtr03
+  stacklight_version: '2'
+  static_ips_on_deploy_network_enabled: 'True'
+  tenant_network_gateway: 10.12.0.1
+  tenant_network_netmask: 255.255.255.0
+  tenant_network_subnet: 10.12.0.0/16
+  tenant_vlan: '1111'
+  use_default_network_scheme: 'False'
+  vnf_onboarding_enabled: 'False'
+  designate_enabled: 'True'
+  galera_ssl_enabled: 'True'
+  openstack_mysql_x509_enabled: 'True'
+  rabbitmq_ssl_enabled: 'True'
+  openstack_rabbitmq_x509_enabled: 'True'
+  openstack_internal_protocol: 'https'
+  tenant_telemetry_enabled: 'True'
+  gnocchi_aggregation_storage: 'ceph'
+  barbican_enabled: 'True'
+  barbican_integration_enabled: 'False'
+  barbican_backend: 'dogtag'
+  openstack_octavia_enabled: 'True'
+  octavia_manager_cluster: 'False'
+  octavia_private_key: |-
+    -----BEGIN RSA PRIVATE KEY-----
+    MIIEpAIBAAKCAQEAtjnPDJsQToHBtoqIo15mdSYpfi8z6DFMi8Gbo0KCN33OUn5u
+    OctbdtjUfeuhvI6px1SCnvyWi09Ft8eWwq+KwLCGKbUxLvqKltuJ7K3LIrGXkt+m
+    qZN4O9XKeVKfZH+mQWkkxRWgX2r8RKNV3GkdNtd74VjhP+R6XSKJQ1Z8b7eHM10v
+    6IjTY/jPczjK+eyCeEj4qbSnV8eKlqLhhquuSQRmUO2DRSjLVdpdf2BB4/BdWFsD
+    YOmX7mb8kpEr9vQ+c1JKMXDwD6ehzyU8kE+1kVm5zOeEy4HdYIMpvUfN49P1anRV
+    2ISQ1ZE+r22IAMKl0tekrGH0e/1NP1DF5rINMwIDAQABAoIBAQCkP/cgpaRNHyg8
+    ISKIHs67SWqdEm73G3ijgB+JSKmW2w7dzJgN//6xYUAnP/zIuM7PnJ0gMQyBBTMS
+    NBTv5spqZLKJZYivj6Tb1Ya8jupKm0jEWlMfBo2ZYVrfgFmrfGOfEebSvmuPlh9M
+    vuzlftmWVSSUOkjODmM9D6QpzgrbpktBuA/WpX+6esMTwJpOcQ5xZWEnHXnVzuTc
+    SncodVweE4gz6F1qorbqIJz8UAUQ5T0OZTdHzIS1IbamACHWaxQfixAO2s4+BoUK
+    ANGGZWkfneCxx7lthvY8DiKn7M5cSRnqFyDToGqaLezdkMNlGC7v3U11FF5blSEW
+    fL1o/HwBAoGBAOavhTr8eqezTchqZvarorFIq7HFWk/l0vguIotu6/wlh1V/KdF+
+    aLLHgPgJ5j+RrCMvTBoKqMeeHfVGrS2udEy8L1mK6b3meG+tMxU05OA55abmhYn7
+    7vF0q8XJmYIHIXmuCgF90R8Piscb0eaMlmHW9unKTKo8EOs5j+D8+AMJAoGBAMo4
+    8WW+D3XiD7fsymsfXalf7VpAt/H834QTbNZJweUWhg11eLutyahyyfjjHV200nNZ
+    cnU09DWKpBbLg7d1pyT69CNLXpNnxuWCt8oiUjhWCUpNqVm2nDJbUdlRFTzYb2fS
+    ZC4r0oQaPD5kMLSipjcwzMWe0PniySxNvKXKInFbAoGBAKxW2qD7uKKKuQSOQUft
+    aAksMmEIAHWKTDdvOA2VG6XvX5DHBLXmy08s7rPfqW06ZjCPCDq4Velzvgvc9koX
+    d/lP6cvqlL9za+x6p5wjPQ4rEt/CfmdcmOE4eY+1EgLrUt314LHGjjG3ScWAiirE
+    QyDrGOIGaYoQf89L3KqIMr0JAoGARYAklw8nSSCUvmXHe+Gf0yKA9M/haG28dCwo
+    780RsqZ3FBEXmYk1EYvCFqQX56jJ25MWX2n/tJcdpifz8Q2ikHcfiTHSI187YI34
+    lKQPFgWb08m1NnwoWrY//yx63BqWz1vjymqNQ5GwutC8XJi5/6Xp+tGGiRuEgJGH
+    EIPUKpkCgYAjBIVMkpNiLCREZ6b+qjrPV96ed3iTUt7TqP7yGlFI/OkORFS38xqC
+    hBP6Fk8iNWuOWQD+ohM/vMMnvIhk5jwlcwn+kF0ra04gi5KBFWSh/ddWMJxUtPC1
+    2htvlEc6zQAR6QfqXHmwhg1hP81JcpqpicQzCMhkzLoR1DC6stXdLg==
+    -----END RSA PRIVATE KEY-----
+  octavia_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC2Oc8MmxBOgcG2ioijXmZ1Jil+LzPoMUyLwZujQoI3fc5Sfm45y1t22NR966G8jqnHVIKe/JaLT0W3x5bCr4rAsIYptTEu+oqW24nsrcsisZeS36apk3g71cp5Up9kf6ZBaSTFFaBfavxEo1XcaR0213vhWOE/5HpdIolDVnxvt4czXS/oiNNj+M9zOMr57IJ4SPiptKdXx4qWouGGq65JBGZQ7YNFKMtV2l1/YEHj8F1YWwNg6ZfuZvySkSv29D5zUkoxcPAPp6HPJTyQT7WRWbnM54TLgd1ggym9R83j0/VqdFXYhJDVkT6vbYgAwqXS16SsYfR7/U0/UMXmsg0z
+
+
+## MAAS BEGIN
+  maas_deploy_address: 10.10.0.15
+  maas_deploy_network_name: deploy_network
+  maas_deploy_range_end: 10.10.254.255
+  maas_deploy_range_start: 10.10.254.1
+  maas_deploy_vlan: '0'
+  maas_fabric_name: deploy_fabric
+  maas_hostname: cfg01
+  maas_machines: |
+    kvm04:
+      distro_series: "xenial"
+      # hwe_kernel: "hwe-16.04"
+      # pxe_interface_mac:
+      pxe_interface_mac: "0c:c4:7a:33:1f:e4"
+      interfaces:
+        one1:
+          mac: "0c:c4:7a:33:1f:e4"
+          mode: "static"
+          ip: ${_param:infra_kvm_node04_deploy_address}
+          subnet: "10.10.0.0/16"
+          gateway: ${_param:deploy_network_gateway}
+          name: one1
+        one2:
+          mode: UNCONFIGURED
+          mac: "0c:c4:7a:33:1f:e5"
+          name: one2
+        ten1:
+          mode: UNCONFIGURED
+          mac: "0c:c4:7a:1e:3e:ea"
+          name: ten1
+        ten2:
+          mode: UNCONFIGURED
+          mac: "0c:c4:7a:1e:3e:eb"
+          name: ten2
+      power_parameters:
+        power_user: ${_param:ipmi_user}
+        power_pass: ${_param:ipmi_password}
+        power_address: ${_param:ipmi_kvm04_ip}
+        power_type: ipmi
+      disk_layout:
+        type: custom
+        bootable_device: sda
+        disk:
+          vgroot:
+            devices:
+              - sda
+            type: lvm
+            volume:
+              root:
+                mount: /
+                size: 1T
+                type: ext4
+
+    kvm05:
+      distro_series: "xenial"
+      # hwe_kernel: "hwe-16.04"
+      pxe_interface_mac: "0c:c4:7a:33:20:fc"
+      interfaces:
+        one1:
+          mac: "0c:c4:7a:33:20:fc"
+          mode: "static"
+          ip: ${_param:infra_kvm_node05_deploy_address}
+          subnet: "10.10.0.0/16"
+          gateway: ${_param:deploy_network_gateway}
+          name: one1
+        one2:
+          mode: UNCONFIGURED
+          mac: "0c:c4:7a:33:20:fd"
+          name: one2
+        ten1:
+          mode: UNCONFIGURED
+          mac: "0c:c4:7a:1e:3e:e6"
+          name: ten1
+        ten2:
+          mode: UNCONFIGURED
+          mac: "0c:c4:7a:1e:3e:e7"
+          name: ten2
+      power_parameters:
+        power_user: ${_param:ipmi_user}
+        power_pass: ${_param:ipmi_password}
+        power_address: ${_param:ipmi_kvm05_ip}
+        power_type: ipmi
+      disk_layout:
+        type: custom
+        bootable_device: sda
+        disk:
+          vgroot:
+            devices:
+              - sda
+            type: lvm
+            volume:
+              root:
+                mount: /
+                size: 1T
+                type: ext4
+
+    kvm06:
+      distro_series: "xenial"
+      # hwe_kernel: "hwe-16.04"
+      pxe_interface_mac: "0c:c4:7a:31:fb:b6"
+      interfaces:
+        one1:
+          mac: "0c:c4:7a:31:fb:b6"
+          mode: "static"
+          ip: ${_param:infra_kvm_node06_deploy_address}
+          subnet: "10.10.0.0/16"
+          gateway: ${_param:deploy_network_gateway}
+          name: one1
+        one2:
+          mode: UNCONFIGURED
+          mac: "0c:c4:7a:31:fb:b7"
+          name: one2
+        ten1:
+          mode: UNCONFIGURED
+          mac: "0c:c4:7a:1e:3e:fa"
+          name: ten1
+        ten2:
+          mode: UNCONFIGURED
+          mac: "0c:c4:7a:1e:3e:fb"
+          name: ten2
+      power_parameters:
+        power_user: ${_param:ipmi_user}
+        power_pass: ${_param:ipmi_password}
+        power_address: ${_param:ipmi_kvm06_ip}
+        power_type: ipmi
+      disk_layout:
+        type: custom
+        bootable_device: sda
+        disk:
+          vgroot:
+            devices:
+              - sda
+            type: lvm
+            volume:
+              root:
+                mount: /
+                size: 1T
+                type: ext4
+
+    kvm01:
+      distro_series: "xenial"
+      # hwe_kernel: "hwe-16.04"
+      pxe_interface_mac: "0c:c4:7a:31:f0:12"
+      interfaces:
+        one1:
+          mac: "0c:c4:7a:31:f0:12"
+          mode: "static"
+          ip: ${_param:infra_kvm_node01_deploy_address}
+          subnet: "10.10.0.0/16"
+          gateway: ${_param:deploy_network_gateway}
+          name: one1
+        one2:
+          mode: UNCONFIGURED
+          mac: "0c:c4:7a:31:f0:13"
+          name: one2
+        fourty1:
+          mode: UNCONFIGURED
+          mac: "00:02:c9:44:82:70"
+          name: ten1
+        fourty2:
+          mode: UNCONFIGURED
+          mac: "00:02:c9:44:82:71"
+          name: ten2
+      power_parameters:
+        power_user: ${_param:ipmi_user}
+        power_pass: ${_param:ipmi_password}
+        power_address: ${_param:ipmi_kvm01_ip}
+        power_type: ipmi
+      disk_layout:
+        type: custom
+        bootable_device: sda
+        disk:
+          vgroot:
+            devices:
+              - sda
+            type: lvm
+            volume:
+              root:
+                mount: /
+                size: 120G
+                type: ext4
+
+    kvm02:
+      distro_series: "xenial"
+      # hwe_kernel: "hwe-16.04"
+      pxe_interface_mac: "0c:c4:7a:31:ef:bc"
+      interfaces:
+        one1:
+          mac: "0c:c4:7a:31:ef:bc"
+          mode: "static"
+          ip: ${_param:infra_kvm_node02_deploy_address}
+          subnet: "10.10.0.0/16"
+          gateway: ${_param:deploy_network_gateway}
+          name: one1
+        one2:
+          mode: UNCONFIGURED
+          mac: "0c:c4:7a:31:ef:bd"
+          name: one2
+        fourty1:
+          mode: UNCONFIGURED
+          mac: "00:02:c9:44:81:40"
+          name: ten1
+        fourty2:
+          mode: UNCONFIGURED
+          mac: "00:02:c9:44:81:41"
+          name: ten2
+      power_parameters:
+        power_user: ${_param:ipmi_user}
+        power_pass: ${_param:ipmi_password}
+        power_address: ${_param:ipmi_kvm02_ip}
+        power_type: ipmi
+      disk_layout:
+        type: custom
+        bootable_device: sda
+        disk:
+          vgroot:
+            devices:
+              - sda
+            type: lvm
+            volume:
+              root:
+                mount: /
+                size: 120G
+                type: ext4
+
+    kvm03:
+      distro_series: "xenial"
+      # hwe_kernel: "hwe-16.04"
+      pxe_interface_mac: "0c:c4:7a:31:ef:aa"
+      interfaces:
+        one1:
+          mac: "0c:c4:7a:31:ef:aa"
+          mode: "static"
+          ip: ${_param:infra_kvm_node03_deploy_address}
+          subnet: "10.10.0.0/16"
+          gateway: ${_param:deploy_network_gateway}
+          name: one1
+        one2:
+          mode: UNCONFIGURED
+          mac: "0c:c4:7a:31:ef:ab"
+          name: one2
+        fourty1:
+          mode: UNCONFIGURED
+          mac: "e4:1d:2d:72:23:b1"
+          name: ten1
+        fourty2:
+          mode: UNCONFIGURED
+          mac: "e4:1d:2d:72:23:b2"
+          name: ten2
+      power_parameters:
+        power_user: ${_param:ipmi_user}
+        power_pass: ${_param:ipmi_password}
+        power_address: ${_param:ipmi_kvm03_ip}
+        power_type: ipmi
+      disk_layout:
+        type: custom
+        bootable_device: sda
+        disk:
+          vgroot:
+            devices:
+              - sda
+            type: lvm
+            volume:
+              root:
+                mount: /
+                size: 120G
+                type: ext4
+
+    cmp001:
+      distro_series: "xenial"
+      # hwe_kernel: "hwe-16.04"
+      pxe_interface_mac: "0c:c4:7a:6d:33:12"
+      interfaces:
+        one1:
+          mac: "0c:c4:7a:6d:33:12"
+          mode: "static"
+          ip: 10.10.0.101
+          subnet: "10.10.0.0/16"
+          gateway: ${_param:deploy_network_gateway}
+          name: one1
+        one2:
+          mode: UNCONFIGURED
+          mac: "0c:c4:7a:6d:33:13"
+          name: one2
+        ten1:
+          mode: UNCONFIGURED
+          mac: "00:1b:21:92:8b:bc"
+          name: ten1
+        ten2:
+          mode: UNCONFIGURED
+          mac: "00:1b:21:92:8b:bd"
+          name: ten2
+        ten3:
+          mode: UNCONFIGURED
+          mac: "00:1b:21:89:65:bc"
+          name: ten3
+        ten4:
+          mode: UNCONFIGURED
+          mac: "00:1b:21:89:65:bd"
+          name: ten4
+      power_parameters:
+        power_user: ${_param:ipmi_user}
+        power_pass: ${_param:ipmi_password}
+        power_address: ${_param:ipmi_cmp001_ip}
+        power_type: ipmi
+      disk_layout:
+        type: custom
+        bootable_device: sdc
+        disk:
+          vgroot:
+            devices:
+              - sdc
+            type: lvm
+            volume:
+              root:
+                mount: /
+                size: 240G
+                type: ext4
+
+    cmp002:
+      distro_series: "xenial"
+      # hwe_kernel: "hwe-16.04"
+      pxe_interface_mac: "0c:c4:7a:aa:cb:6a"
+      interfaces:
+        one1:
+          mac: "0c:c4:7a:aa:cb:6a"
+          mode: "static"
+          ip: 10.10.0.102
+          subnet: "10.10.0.0/16"
+          gateway: ${_param:deploy_network_gateway}
+          name: one1
+        one2:
+          mode: UNCONFIGURED
+          mac: "0c:c4:7a:aa:cb:6b"
+          name: one2
+        ten1:
+          mode: UNCONFIGURED
+          mac: "0c:c4:7a:58:41:d0"
+          name: ten1
+        ten2:
+          mode: UNCONFIGURED
+          mac: "0c:c4:7a:58:41:d1"
+          name: ten2
+        ten3:
+          mode: UNCONFIGURED
+          mac: "0c:c4:7a:1f:00:18"
+          name: ten3
+        ten4:
+          mode: UNCONFIGURED
+          mac: "0c:c4:7a:1f:00:19"
+          name: ten4
+      power_parameters:
+        power_user: ${_param:ipmi_user}
+        power_pass: ${_param:ipmi_password}
+        power_address: ${_param:ipmi_cmp002_ip}
+        power_type: ipmi
+      disk_layout:
+        type: custom
+        bootable_device: sda
+        disk:
+          vgroot:
+            devices:
+              - sda
+            type: lvm
+            volume:
+              root:
+                mount: /
+                size: 240G
+                type: ext4
+
+    cmp003:
+      distro_series: "xenial"
+      # hwe_kernel: "hwe-16.04"
+      pxe_interface_mac: "0c:c4:7a:aa:91:5a"
+      interfaces:
+        one1:
+          mac: "0c:c4:7a:aa:91:5a"
+          mode: "static"
+          ip: 10.10.0.103
+          subnet: "10.10.0.0/16"
+          gateway: ${_param:deploy_network_gateway}
+          name: one1
+        one2:
+          mode: UNCONFIGURED
+          mac: "0c:c4:7a:aa:91:5b"
+          name: one2
+        ten1:
+          mode: UNCONFIGURED
+          mac: "00:1b:21:97:e5:9c"
+          name: ten1
+        ten2:
+          mode: UNCONFIGURED
+          mac: "00:1b:21:97:e5:9d"
+          name: ten2
+        ten3:
+          mode: UNCONFIGURED
+          mac: "00:1b:21:a4:70:7c"
+          name: ten3
+        ten4:
+          mode: UNCONFIGURED
+          mac: "00:1b:21:a4:70:7d"
+          name: ten4
+      power_parameters:
+        power_user: ${_param:ipmi_user}
+        power_pass: ${_param:ipmi_password}
+        power_address: ${_param:ipmi_cmp003_ip}
+        power_type: ipmi
+      disk_layout:
+        type: custom
+        bootable_device: sdc
+        disk:
+          vgroot:
+            devices:
+              - sdc
+            type: lvm
+            volume:
+              root:
+                mount: /
+                size: 240G
+                type: ext4
+
+    cmp004:
+      distro_series: "xenial"
+      # hwe_kernel: "hwe-16.04"
+      pxe_interface_mac: "0c:c4:7a:aa:cb:5a"
+      interfaces:
+        one1:
+          mac: "0c:c4:7a:aa:cb:5a"
+          mode: "static"
+          ip: 10.10.0.104
+          subnet: "10.10.0.0/16"
+          gateway: ${_param:deploy_network_gateway}
+          name: one1
+        one2:
+          mode: UNCONFIGURED
+          mac: "0c:c4:7a:aa:cb:5b"
+          name: one2
+        ten1:
+          mode: UNCONFIGURED
+          mac: "00:1b:21:87:17:90"
+          name: ten1
+        ten2:
+          mode: UNCONFIGURED
+          mac: "00:1b:21:87:17:91"
+          name: ten2
+        ten3:
+          mode: UNCONFIGURED
+          mac: "00:1b:21:89:45:74"
+          name: ten3
+        ten4:
+          mode: UNCONFIGURED
+          mac: "00:1b:21:89:45:75"
+          name: ten4
+      power_parameters:
+        power_user: ${_param:ipmi_user}
+        power_pass: ${_param:ipmi_password}
+        power_address: ${_param:ipmi_cmp004_ip}
+        power_type: ipmi
+      disk_layout:
+        type: custom
+        bootable_device: sdc
+        disk:
+          vgroot:
+            devices:
+              - sdc
+            type: lvm
+            volume:
+              root:
+                mount: /
+                size: 240G
+                type: ext4
+
+    # OSD Nodes
+    osd001:
+      distro_series: "xenial"
+      # hwe_kernel: "hwe-16.04"
+      pxe_interface_mac: "0c:c4:7a:aa:cb:7a"
+      interfaces:
+        one1:
+          mac: "0c:c4:7a:aa:cb:7a"
+          mode: "static"
+          ip: 10.10.0.201
+          subnet: "10.10.0.0/16"
+          gateway: ${_param:deploy_network_gateway}
+          name: one1
+        one2:
+          mode: UNCONFIGURED
+          mac: "0c:c4:7a:aa:cb:7b"
+          name: one2
+        ten1:
+          mode: UNCONFIGURED
+          mac: "90:e2:ba:1a:6c:98"
+          name: ten1
+        ten2:
+          mode: UNCONFIGURED
+          mac: "90:e2:ba:1a:6c:99"
+          name: ten2
+        ten3:
+          mode: UNCONFIGURED
+          mac: "00:1b:21:97:df:84"
+          name: ten3
+        ten4:
+          mode: UNCONFIGURED
+          mac: "00:1b:21:97:df:85"
+          name: ten4
+      power_parameters:
+        power_user: ${_param:ipmi_user}
+        power_pass: ${_param:ipmi_password}
+        power_address: ${_param:ipmi_osd001_ip}
+        power_type: ipmi
+      disk_layout:
+        type: custom
+        bootable_device: sda
+        disk:
+          vgroot:
+            devices:
+              - sda
+            type: lvm
+            volume:
+              root:
+                mount: /
+                size: 240G
+                type: ext4
+
+    osd002:
+      distro_series: "xenial"
+      # hwe_kernel: "hwe-16.04"
+      pxe_interface_mac: "0c:c4:7a:6d:2e:1e"
+      interfaces:
+        one1:
+          mac: "0c:c4:7a:6d:2e:1e"
+          mode: "static"
+          ip: 10.10.0.202
+          subnet: "10.10.0.0/16"
+          gateway: ${_param:deploy_network_gateway}
+          name: one1
+        one2:
+          mode: UNCONFIGURED
+          mac: "0c:c4:7a:6d:2e:1f"
+          name: one2
+        ten1:
+          mode: UNCONFIGURED
+          mac: "00:1b:21:8d:6d:20"
+          name: ten1
+        ten2:
+          mode: UNCONFIGURED
+          mac: "00:1b:21:8d:6d:21"
+          name: ten2
+        ten3:
+          mode: UNCONFIGURED
+          mac: "00:1b:21:8d:70:e8"
+          name: ten3
+        ten4:
+          mode: UNCONFIGURED
+          mac: "00:1b:21:8d:70:e9"
+          name: ten4
+      power_parameters:
+        power_user: ${_param:ipmi_user}
+        power_pass: ${_param:ipmi_password}
+        power_address: ${_param:ipmi_osd002_ip}
+        power_type: ipmi
+      disk_layout:
+        type: custom
+        bootable_device: sdc
+        disk:
+          vgroot:
+            devices:
+              - sdc
+            type: lvm
+            volume:
+              root:
+                mount: /
+                size: 240G
+                type: ext4
+
+    osd003:
+      distro_series: "xenial"
+      # hwe_kernel: "hwe-16.04"
+      pxe_interface_mac: "0c:c4:7a:aa:d1:10"
+      interfaces:
+        one1:
+          mac: "0c:c4:7a:aa:d1:10"
+          mode: "static"
+          ip: 10.10.0.203
+          subnet: "10.10.0.0/16"
+          gateway: ${_param:deploy_network_gateway}
+          name: one1
+        one2:
+          mode: UNCONFIGURED
+          mac: "0c:c4:7a:aa:d1:11"
+          name: one2
+        ten1:
+          mode: UNCONFIGURED
+          mac: "00:1b:21:8a:c1:90"
+          name: ten1
+        ten2:
+          mode: UNCONFIGURED
+          mac: "00:1b:21:8a:c1:91"
+          name: ten2
+        ten3:
+          mode: UNCONFIGURED
+          mac: "00:1b:21:92:a3:10"
+          name: ten3
+        ten4:
+          mode: UNCONFIGURED
+          mac: "00:1b:21:92:a3:11"
+          name: ten4
+      power_parameters:
+        power_user: ${_param:ipmi_user}
+        power_pass: ${_param:ipmi_password}
+        power_address: ${_param:ipmi_osd003_ip}
+        power_type: ipmi
+      disk_layout:
+        type: custom
+        bootable_device: sdc
+        disk:
+          vgroot:
+            devices:
+              - sdc
+            type: lvm
+            volume:
+              root:
+                mount: /
+                size: 240G
+                type: ext4
+
+    # GTW Nodes
+    gtw01:
+      distro_series: "xenial"
+      # hwe_kernel: "hwe-16.04"
+      pxe_interface_mac: "0c:c4:7a:aa:d6:76"
+      interfaces:
+        one1:
+          mac: "0c:c4:7a:aa:d6:76"
+          mode: "static"
+          ip: 10.10.0.224
+          subnet: "10.10.0.0/16"
+          gateway: ${_param:deploy_network_gateway}
+          name: one1
+        one2:
+          mode: UNCONFIGURED
+          mac: "0c:c4:7a:aa:d6:77"
+          name: one2
+        ten1:
+          mode: UNCONFIGURED
+          mac: "00:1b:21:7c:5d:90"
+          name: ten1
+        ten2:
+          mode: UNCONFIGURED
+          mac: "00:1b:21:7c:5d:91"
+          name: ten2
+        ten3:
+          mode: UNCONFIGURED
+          mac: "00:1b:21:8d:3e:e8"
+          name: ten3
+        ten4:
+          mode: UNCONFIGURED
+          mac: "00:1b:21:8d:3e:e9"
+          name: ten4
+      power_parameters:
+        power_user: ${_param:ipmi_user}
+        power_pass: ${_param:ipmi_password}
+        power_address: ${_param:ipmi_gtw01_ip}
+        power_type: ipmi
+      disk_layout:
+        type: custom
+        bootable_device: sda
+        disk:
+          vgroot:
+            devices:
+              - sda
+            type: lvm
+            volume:
+              root:
+                mount: /
+                size: 1T
+                type: ext4
+
+    gtw02:
+      distro_series: "xenial"
+      # hwe_kernel: "hwe-16.04"
+      pxe_interface_mac: "0c:c4:7a:aa:c9:64"
+      interfaces:
+        one1:
+          mac: "0c:c4:7a:aa:c9:64"
+          mode: "static"
+          ip: 10.10.0.225
+          subnet: "10.10.0.0/16"
+          gateway: ${_param:deploy_network_gateway}
+          name: one1
+        one2:
+          mode: UNCONFIGURED
+          mac: "0c:c4:7a:aa:c9:65"
+          name: one2
+        ten1:
+          mode: UNCONFIGURED
+          mac: "00:1b:21:7c:a8:4c"
+          name: ten1
+        ten2:
+          mode: UNCONFIGURED
+          mac: "00:1b:21:7c:a8:4d"
+          name: ten2
+        ten3:
+          mode: UNCONFIGURED
+          mac: "00:1b:21:8d:13:b8"
+          name: ten3
+        ten4:
+          mode: UNCONFIGURED
+          mac: "00:1b:21:8d:13:b9"
+          name: ten4
+      power_parameters:
+        power_user: ${_param:ipmi_user}
+        power_pass: ${_param:ipmi_password}
+        power_address: ${_param:ipmi_gtw02_ip}
+        power_type: ipmi
+      disk_layout:
+        type: custom
+        bootable_device: sdc
+        disk:
+          vgroot:
+            devices:
+              - sdc
+            type: lvm
+            volume:
+              root:
+                mount: /
+                size: 1T
+                type: ext4
+
+    gtw03:
+      distro_series: "xenial"
+      # hwe_kernel: "hwe-16.04"
+      pxe_interface_mac: "0c:c4:7a:aa:cb:78"
+      interfaces:
+        one1:
+          mac: "0c:c4:7a:aa:cb:78"
+          mode: "static"
+          ip: 10.10.0.226
+          subnet: "10.10.0.0/16"
+          gateway: ${_param:deploy_network_gateway}
+          name: one1
+        one2:
+          mode: UNCONFIGURED
+          mac: "0c:c4:7a:aa:cb:79"
+          name: one2
+        ten1:
+          mode: UNCONFIGURED
+          mac: "00:1b:21:8a:80:48"
+          name: ten1
+        ten2:
+          mode: UNCONFIGURED
+          mac: "00:1b:21:8a:80:49"
+          name: ten2
+        ten3:
+          mode: UNCONFIGURED
+          mac: "00:1b:21:89:6e:40"
+          name: ten3
+        ten4:
+          mode: UNCONFIGURED
+          mac: "00:1b:21:89:6e:41"
+          name: ten4
+      power_parameters:
+        power_user: ${_param:ipmi_user}
+        power_pass: ${_param:ipmi_password}
+        power_address: ${_param:ipmi_gtw03_ip}
+        power_type: ipmi
+      disk_layout:
+        bootable_device: sdc
+        type: custom
+        disk:
+          vgroot:
+            devices:
+              - sdc
+            type: lvm
+            volume:
+              root:
+                mount: /
+                size: 1T
+                type: ext4
+#$ MAAS END
+
+
+
+
+nodes:
+  # cfg01.mcp-offline-vxlan.local:
+  #   reclass_storage_name: infra_config_node01
+  #   classes:
+  #   - cluster.${_param:cluster_name}.infra.config
+  #   - cluster.${_param:cluster_name}.infra.networking.ens5_config
+  #   roles:
+  #   - infra_config
+  #   - linux_system_codename_xenial
+  #   interfaces:
+  #     ens3:
+  #       role: single_mgm
+  #       deploy_address: 10.10.0.15
+  #       deploy_network_netmask: 255.255.0.0
+  #       deploy_network_gateway: 10.10.0.1
+  #     ens4:
+  #       role: single_ctl
+  #       single_address: 10.11.0.15
+
+  apt.mcp-offline-vxlan.local:
+    reclass_storage_name: aptly_server_node01
+    roles:
+    - linux_system_codename_xenial
+    classes:
+    - cluster.${_param:cluster_name}.infra
+    interfaces:
+      ens3:
+        role: single_mgm
+        deploy_address: 10.10.0.14
+        deploy_network_netmask: 255.255.0.0
+        deploy_network_gateway: 10.10.0.1
+
+  kvm01.mcp-offline-vxlan.local:
+    reclass_storage_name: infra_kvm_node01
+    roles:
+    - infra_kvm
+    - linux_system_codename_xenial
+    # - features_vcp_gateway
+    interfaces:
+      one1:
+        role: single_mgm
+        deploy_address: 10.10.0.241
+        deploy_network_netmask: 255.255.0.0
+        deploy_network_gateway: 10.10.0.1
+      ten1:
+        role: bond0_ab_nondvr_vxlan_ctl_mesh_raw
+        single_address: 10.11.0.241
+        tenant_address: 10.12.0.241
+      ten2:
+        role: bond0_ab_nondvr_vxlan_ctl_mesh_raw
+        single_address: 10.11.0.241
+        tenant_address: 10.12.0.241
+
+  kvm02.mcp-offline-vxlan.local:
+    reclass_storage_name: infra_kvm_node02
+    roles:
+    - infra_kvm
+    - linux_system_codename_xenial
+    # - features_vcp_gateway
+    interfaces:
+      one1:
+        role: single_mgm
+        deploy_address: 10.10.0.242
+        deploy_network_netmask: 255.255.0.0
+        deploy_network_gateway: 10.10.0.1
+      ten1:
+        role: bond0_ab_nondvr_vxlan_ctl_mesh_raw
+        single_address: 10.11.0.242
+        tenant_address: 10.12.0.242
+      ten2:
+        role: bond0_ab_nondvr_vxlan_ctl_mesh_raw
+        single_address: 10.11.0.242
+        tenant_address: 10.12.0.242
+
+  kvm03.mcp-offline-vxlan.local:
+    reclass_storage_name: infra_kvm_node03
+    roles:
+    - infra_kvm
+    - linux_system_codename_xenial
+    # - features_vcp_gateway
+    interfaces:
+      one1:
+        role: single_mgm
+        deploy_address: 10.10.0.243
+        deploy_network_netmask: 255.255.0.0
+        deploy_network_gateway: 10.10.0.1
+      ten1:
+        role: bond0_ab_nondvr_vxlan_ctl_mesh_raw
+        single_address: 10.11.0.243
+        tenant_address: 10.12.0.243
+      ten2:
+        role: bond0_ab_nondvr_vxlan_ctl_mesh_raw
+        single_address: 10.11.0.243
+        tenant_address: 10.12.0.243
+
+  kvm04.mcp-offline-vxlan.local:
+    reclass_storage_name: infra_kvm_node04
+    roles:
+    - infra_kvm_wo_gluster
+    - linux_system_codename_xenial
+    - salt_master_host
+    # - features_vcp_gateway
+    interfaces:
+      one1:
+        role: single_mgm
+        deploy_address: 10.10.0.244
+        deploy_network_netmask: 255.255.0.0
+        deploy_network_gateway: 10.10.0.1
+      ten1:
+        role: bond0_ab_nondvr_vxlan_ctl_mesh_raw
+        single_address: 10.11.0.244
+        tenant_address: 10.12.0.244
+      ten2:
+        role: bond0_ab_nondvr_vxlan_ctl_mesh_raw
+        single_address: 10.11.0.244
+        tenant_address: 10.12.0.244
+
+  kvm05.mcp-offline-vxlan.local:
+    reclass_storage_name: infra_kvm_node05
+    roles:
+    - infra_kvm_wo_gluster
+    - linux_system_codename_xenial
+    - salt_master_host
+    # - features_vcp_gateway
+    interfaces:
+      one1:
+        role: single_mgm
+        deploy_address: 10.10.0.245
+        deploy_network_netmask: 255.255.0.0
+        deploy_network_gateway: 10.10.0.1
+      ten1:
+        role: bond0_ab_nondvr_vxlan_ctl_mesh_raw
+        single_address: 10.11.0.245
+        tenant_address: 10.12.0.245
+      ten2:
+        role: bond0_ab_nondvr_vxlan_ctl_mesh_raw
+        single_address: 10.11.0.245
+        tenant_address: 10.12.0.245
+
+  kvm06.mcp-offline-vxlan.local:
+    reclass_storage_name: infra_kvm_node06
+    roles:
+    - infra_kvm_wo_gluster
+    - linux_system_codename_xenial
+    - salt_master_host
+    # - features_vcp_gateway
+    interfaces:
+      one1:
+        role: single_mgm
+        deploy_address: 10.10.0.246
+        deploy_network_netmask: 255.255.0.0
+        deploy_network_gateway: 10.10.0.1
+      ten1:
+        role: bond0_ab_nondvr_vxlan_ctl_mesh_raw
+        single_address: 10.11.0.246
+        tenant_address: 10.12.0.246
+      ten2:
+        role: bond0_ab_nondvr_vxlan_ctl_mesh_raw
+        single_address: 10.11.0.246
+        tenant_address: 10.12.0.246
+
+  cmp<<count>>.mcp-offline-vxlan.local:
+    reclass_storage_name: openstack_compute_rack01
+    roles:
+    - openstack_compute
+    - linux_system_codename_xenial
+    interfaces:
+      one1:
+        role: single_mgm
+      ten1:
+        role: bond0_ab_dvr_vxlan_ctl_mesh_floating
+        # role: bond0_ab_dvr_vxlan_ctl_mesh_external_vlan
+        # external_vlan: 1112
+        # external_address: 10.16.0.224
+        # external_network_netmask: 255.255.0.0
+      ten2:
+        role: bond0_ab_dvr_vxlan_ctl_mesh_floating
+        # role: bond0_ab_dvr_vxlan_ctl_mesh_external_vlan
+        # external_vlan: 1112
+
+  osd<<count>>.mcp-offline-vxlan.local:
+    reclass_storage_name: ceph_osd_rack01
+    roles:
+    - ceph_osd
+    - linux_system_codename_xenial
+    interfaces:
+      one1:
+        role: single_mgm
+      ten1:
+        role: bond0_ab_vlan_ceph_public_backend
+      ten2:
+        role: bond0_ab_vlan_ceph_public_backend
+
+  gtw01.mcp-offline-vxlan.local:
+    reclass_storage_name: openstack_gateway_node01
+    roles:
+    - openstack_gateway_octavia
+    - linux_system_codename_xenial
+    interfaces:
+      one1:
+        role: single_mgm
+        deploy_address: 10.10.0.224
+        deploy_network_netmask: 255.255.0.0
+        deploy_network_gateway: 10.10.0.1
+      ten1:
+        role: bond0_ab_dvr_vxlan_ctl_mesh_external_vlan
+        # role: bond0_ab_dvr_vxlan_ctl_mesh_external
+        # role: bond0_ab_dvr_vxlan_ctl_mesh_floating
+        single_address: 10.11.0.224
+        tenant_address: 10.12.0.224
+        external_address: 10.16.0.224
+        external_network_netmask: 255.255.0.0
+        external_vlan: 1112
+      ten2:
+        role: bond0_ab_dvr_vxlan_ctl_mesh_external_vlan
+        # role: bond0_ab_dvr_vxlan_ctl_mesh_external
+        # role: bond0_ab_dvr_vxlan_ctl_mesh_floating
+        single_address: 10.11.0.224
+        tenant_address: 10.12.0.224
+        external_address: 10.16.0.224
+        external_network_netmask: 255.255.0.0
+        external_vlan: 1112
+
+  gtw02.mcp-offline-vxlan.local:
+    reclass_storage_name: openstack_gateway_node02
+    roles:
+    - openstack_gateway
+    - linux_system_codename_xenial
+    interfaces:
+      one1:
+        role: single_mgm
+        deploy_address: 10.10.0.225
+        deploy_network_netmask: 255.255.0.0
+        deploy_network_gateway: 10.10.0.1
+      ten1:
+        role: bond0_ab_dvr_vxlan_ctl_mesh_external_vlan
+        # role: bond0_ab_dvr_vxlan_ctl_mesh_floating
+        # role: bond0_ab_dvr_vxlan_ctl_mesh_external
+        single_address: 10.11.0.225
+        tenant_address: 10.12.0.225
+        external_address: 10.16.0.225
+        external_network_netmask: 255.255.0.0
+        external_vlan: 1112
+      ten2:
+        role: bond0_ab_dvr_vxlan_ctl_mesh_external_vlan
+        # role: bond0_ab_dvr_vxlan_ctl_mesh_floating
+        # role: bond0_ab_dvr_vxlan_ctl_mesh_external
+        single_address: 10.11.0.225
+        tenant_address: 10.12.0.225
+        external_address: 10.16.0.225
+        external_network_netmask: 255.255.0.0
+        external_vlan: 1112
+
+  gtw03.mcp-offline-vxlan.local:
+    reclass_storage_name: openstack_gateway_node03
+    roles:
+    - openstack_gateway
+    - linux_system_codename_xenial
+    interfaces:
+      one1:
+        role: single_mgm
+        deploy_address: 10.10.0.226
+        deploy_network_netmask: 255.255.0.0
+        deploy_network_gateway: 10.10.0.1
+      ten1:
+        role: bond0_ab_dvr_vxlan_ctl_mesh_external_vlan
+        # role: bond0_ab_dvr_vxlan_ctl_mesh_floating
+        # role: bond0_ab_dvr_vxlan_ctl_mesh_external
+        single_address: 10.11.0.226
+        tenant_address: 10.12.0.226
+        external_address: 10.16.0.226
+        external_network_netmask: 255.255.0.0
+        external_vlan: 1112
+      ten2:
+        role: bond0_ab_dvr_vxlan_ctl_mesh_external_vlan
+        # role: bond0_ab_dvr_vxlan_ctl_mesh_floating
+        # role: bond0_ab_dvr_vxlan_ctl_mesh_external
+        single_address: 10.11.0.226
+        tenant_address: 10.12.0.226
+        external_address: 10.16.0.226
+        external_network_netmask: 255.255.0.0
+        external_vlan: 1112
+
+
+  ######
+  ######
+  #
+  # VCP Nodes
+  #
+  ######
+  ######
+
+  ctl01.mcp-offline-vxlan.local:
+    reclass_storage_name: openstack_control_node01
+    roles:
+    - openstack_control_leader
+    - linux_system_codename_xenial
+    - features_control_external_vlan
+    interfaces:
+      ens2:
+        role: single_mgm
+      ens3:
+        role: single_ctl
+      # ens2:
+        # deploy_address: 10.10.0.11
+        # deploy_network_netmask: 255.255.0.0
+        # deploy_network_gateway: 10.10.0.1
+        # keepalived_vip_priority: 103
+
+  ctl02.mcp-offline-vxlan.local:
+    reclass_storage_name: openstack_control_node02
+    roles:
+    - openstack_control
+    - linux_system_codename_xenial
+    - features_control_external_vlan
+    interfaces:
+      ens2:
+        role: single_mgm
+      ens3:
+        role: single_ctl
+      # ens2:
+        # deploy_address: 10.10.0.12
+        # deploy_network_netmask: 255.255.0.0
+        # deploy_network_gateway: 10.10.0.1
+        # keepalived_vip_priority: 102
+
+  ctl03.mcp-offline-vxlan.local:
+    reclass_storage_name: openstack_control_node03
+    roles:
+    - openstack_control
+    - linux_system_codename_xenial
+    - features_control_external_vlan
+    interfaces:
+      ens2:
+        role: single_mgm
+      ens3:
+        role: single_ctl
+      # ens2:
+        # deploy_address: 10.10.0.13
+        # deploy_network_netmask: 255.255.0.0
+        # deploy_network_gateway: 10.10.0.1
+        # keepalived_vip_priority: 101
+
+  # gtw01.mcp-offline-vxlan.local:
+  #   reclass_storage_name: openstack_gateway_node01
+  #   roles:
+  #   - openstack_gateway
+  #   - linux_system_codename_xenial
+  #   interfaces:
+  #     ens2:
+  #       role: single_mgm
+  #     ens3:
+  #       role: single_ctl
+  #     ens4:
+  #       role: bond0_ab_ovs_vxlan_mesh
+  #     ens5:
+  #       role: bond0_ab_ovs_vxlan_mesh
+
+  # gtw02.mcp-offline-vxlan.local:
+  #   reclass_storage_name: openstack_gateway_node02
+  #   roles:
+  #   - openstack_gateway
+  #   - linux_system_codename_xenial
+  #   interfaces:
+  #     ens2:
+  #       role: single_mgm
+  #     ens3:
+  #       role: single_ctl
+  #     ens4:
+  #       role: bond0_ab_ovs_vxlan_mesh
+  #     ens5:
+  #       role: bond0_ab_ovs_vxlan_mesh
+
+  # gtw03.mcp-offline-vxlan.local:
+  #   reclass_storage_name: openstack_gateway_node03
+  #   roles:
+  #   - openstack_gateway
+  #   - linux_system_codename_xenial
+  #   interfaces:
+  #     ens2:
+  #       role: single_mgm
+  #     ens3:
+  #       role: single_ctl
+  #     ens4:
+  #       role: bond0_ab_ovs_vxlan_mesh
+  #     ens5:
+  #       role: bond0_ab_ovs_vxlan_mesh
+
+  cid01.mcp-offline-vxlan.local:
+    reclass_storage_name: cicd_control_node01
+    roles:
+    - cicd_control_leader
+    - linux_system_codename_xenial
+    interfaces:
+      ens2:
+        role: single_mgm
+      ens3:
+        role: single_ctl
+      # ens2:
+        # deploy_address: 10.10.0.91
+        # deploy_network_netmask: 255.255.0.0
+        # deploy_network_gateway: 10.10.0.1
+
+  cid02.mcp-offline-vxlan.local:
+    reclass_storage_name: cicd_control_node02
+    roles:
+    - cicd_control_manager
+    - linux_system_codename_xenial
+    interfaces:
+      ens2:
+        role: single_mgm
+      ens3:
+        role: single_ctl
+      # ens2:
+        # deploy_address: 10.10.0.92
+        # deploy_network_netmask: 255.255.0.0
+        # deploy_network_gateway: 10.10.0.1
+
+  cid03.mcp-offline-vxlan.local:
+    reclass_storage_name: cicd_control_node03
+    roles:
+    - cicd_control_manager
+    - linux_system_codename_xenial
+    interfaces:
+      ens2:
+        role: single_mgm
+      ens3:
+        role: single_ctl
+      # ens2:
+        # deploy_address: 10.10.0.93
+        # deploy_network_netmask: 255.255.0.0
+        # deploy_network_gateway: 10.10.0.1
+
+  dbs01.mcp-offline-vxlan.local:
+    reclass_storage_name: openstack_database_node01
+    roles:
+    - openstack_database_leader
+    - linux_system_codename_xenial
+    interfaces:
+      ens2:
+        role: single_mgm
+      ens3:
+        role: single_ctl
+      # ens2:
+        # deploy_address: 10.10.0.51
+        # deploy_network_netmask: 255.255.0.0
+        # deploy_network_gateway: 10.10.0.1
+
+  dbs02.mcp-offline-vxlan.local:
+    reclass_storage_name: openstack_database_node02
+    roles:
+    - openstack_database
+    - linux_system_codename_xenial
+    interfaces:
+      ens2:
+        role: single_mgm
+      ens3:
+        role: single_ctl
+      # ens2:
+        # deploy_address: 10.10.0.52
+        # deploy_network_netmask: 255.255.0.0
+        # deploy_network_gateway: 10.10.0.1
+
+  dbs03.mcp-offline-vxlan.local:
+    reclass_storage_name: openstack_database_node03
+    roles:
+    - openstack_database
+    - linux_system_codename_xenial
+    interfaces:
+      ens2:
+        role: single_mgm
+      ens3:
+        role: single_ctl
+      # ens2:
+        # deploy_address: 10.10.0.53
+        # deploy_network_netmask: 255.255.0.0
+        # deploy_network_gateway: 10.10.0.1
+
+  msg01.mcp-offline-vxlan.local:
+    reclass_storage_name: openstack_message_queue_node01
+    roles:
+    - openstack_message_queue
+    - linux_system_codename_xenial
+    interfaces:
+      ens2:
+        role: single_mgm
+      ens3:
+        role: single_ctl
+
+  msg02.mcp-offline-vxlan.local:
+    reclass_storage_name: openstack_message_queue_node02
+    roles:
+    - openstack_message_queue
+    - linux_system_codename_xenial
+    interfaces:
+      ens2:
+        role: single_mgm
+      ens3:
+        role: single_ctl
+
+  msg03.mcp-offline-vxlan.local:
+    reclass_storage_name: openstack_message_queue_node03
+    roles:
+    - openstack_message_queue
+    - linux_system_codename_xenial
+    interfaces:
+      ens2:
+        role: single_mgm
+      ens3:
+        role: single_ctl
+
+  prx01.mcp-offline-vxlan.local:
+    reclass_storage_name: openstack_proxy_node01
+    roles:
+    - openstack_proxy
+    - linux_system_codename_xenial
+    interfaces:
+      ens2:
+        role: single_mgm
+      ens3:
+        role: single_ctl
+
+  prx02.mcp-offline-vxlan.local:
+    reclass_storage_name: openstack_proxy_node02
+    roles:
+    - openstack_proxy
+    - linux_system_codename_xenial
+    interfaces:
+      ens2:
+        role: single_mgm
+      ens3:
+        role: single_ctl
+
+  dns01.mcp-offline-vxlan.local:
+    reclass_storage_name: openstack_dns_node01
+    roles:
+    - openstack_dns
+    - linux_system_codename_xenial
+    interfaces:
+      ens2:
+        role: single_mgm
+      ens3:
+        role: single_ctl
+
+  dns02.mcp-offline-vxlan.local:
+    reclass_storage_name: openstack_dns_node02
+    roles:
+    - openstack_dns
+    - linux_system_codename_xenial
+    interfaces:
+      ens2:
+        role: single_mgm
+      ens3:
+        role: single_ctl
+
+  mon01.mcp-offline-vxlans.local:
+    reclass_storage_name: stacklight_server_node01
+    roles:
+    - stacklightv2_server_leader
+    - linux_system_codename_xenial
+    interfaces:
+      ens2:
+        role: single_mgm
+      ens3:
+        role: single_ctl
+
+  mon02.mcp-offline-vxlans.local:
+    reclass_storage_name: stacklight_server_node02
+    roles:
+    - stacklightv2_server
+    - linux_system_codename_xenial
+    interfaces:
+      ens2:
+        role: single_mgm
+      ens3:
+        role: single_ctl
+
+  mon03.mcp-offline-vxlans.local:
+    reclass_storage_name: stacklight_server_node03
+    roles:
+    - stacklightv2_server
+    - linux_system_codename_xenial
+    interfaces:
+      ens2:
+        role: single_mgm
+      ens3:
+        role: single_ctl
+
+  log01.mcp-offline-vxlan.local:
+    reclass_storage_name: stacklight_log_node01
+    roles:
+    - stacklight_log_leader_v2
+    - linux_system_codename_xenial
+    interfaces:
+      ens2:
+        role: single_mgm
+      ens3:
+        role: single_ctl
+
+  log02.mcp-offline-vxlan.local:
+    reclass_storage_name: stacklight_log_node02
+    roles:
+    - stacklight_log
+    - linux_system_codename_xenial
+    interfaces:
+      ens2:
+        role: single_mgm
+      ens3:
+        role: single_ctl
+
+  log03.mcp-offline-vxlan.local:
+    reclass_storage_name: stacklight_log_node03
+    roles:
+    - stacklight_log
+    - linux_system_codename_xenial
+    interfaces:
+      ens2:
+        role: single_mgm
+      ens3:
+        role: single_ctl
+
+  mtr01.mcp-offline-vxlan.local:
+    reclass_storage_name: stacklight_telemetry_node01
+    roles:
+    - stacklight_telemetry_leader
+    - linux_system_codename_xenial
+    interfaces:
+      ens2:
+        role: single_mgm
+      ens3:
+        role: single_ctl
+
+  mtr02.mcp-offline-vxlan.local:
+    reclass_storage_name: stacklight_telemetry_node02
+    roles:
+    - stacklight_telemetry
+    - linux_system_codename_xenial
+    interfaces:
+      ens2:
+        role: single_mgm
+      ens3:
+        role: single_ctl
+
+  mtr03.mcp-offline-vxlan.local:
+    reclass_storage_name: stacklight_telemetry_node03
+    roles:
+    - stacklight_telemetry
+    - linux_system_codename_xenial
+    interfaces:
+      ens2:
+        role: single_mgm
+      ens3:
+        role: single_ctl
+
+
+  cmn01.mcp-offline-vxlan.local:
+    reclass_storage_name: ceph_mon_node01
+    roles:
+    - ceph_mon
+    - linux_system_codename_xenial
+    interfaces:
+      ens2:
+        role: single_mgm
+      ens3:
+        role: single_ctl
+
+  cmn02.mcp-offline-vxlan.local:
+    reclass_storage_name: ceph_mon_node02
+    roles:
+    - ceph_mon
+    - linux_system_codename_xenial
+    interfaces:
+      ens2:
+        role: single_mgm
+      ens3:
+        role: single_ctl
+
+  cmn03.mcp-offline-vxlan.local:
+    reclass_storage_name: ceph_mon_node03
+    roles:
+    - ceph_mon
+    - linux_system_codename_xenial
+    interfaces:
+      ens2:
+        role: single_mgm
+      ens3:
+        role: single_ctl
+
+  rgw01.mcp-offline-vxlan.local:
+    reclass_storage_name: ceph_rgw_node01
+    roles:
+    - ceph_rgw
+    - linux_system_codename_xenial
+    interfaces:
+      ens2:
+        role: single_mgm
+      ens3:
+        role: single_ctl
+
+  rgw02.mcp-offline-vxlan.local:
+    reclass_storage_name: ceph_rgw_node02
+    roles:
+    - ceph_rgw
+    - linux_system_codename_xenial
+    interfaces:
+      ens2:
+        role: single_mgm
+      ens3:
+        role: single_ctl
+
+  rgw03.mcp-offline-vxlan.local:
+    reclass_storage_name: ceph_rgw_node03
+    roles:
+    - ceph_rgw
+    - linux_system_codename_xenial
+    interfaces:
+      ens2:
+        role: single_mgm
+      ens3:
+        role: single_ctl
+
+  mdb01.mcp-offline-vxlan.local:
+    reclass_storage_name: openstack_telemetry_node01
+    roles:
+    - openstack_telemetry
+    - linux_system_codename_xenial
+    interfaces:
+      ens2:
+        role: single_mgm
+      ens3:
+        role: single_ctl
+
+  mdb02.mcp-offline-vxlan.local:
+    reclass_storage_name: openstack_telemetry_node02
+    roles:
+    - openstack_telemetry
+    - linux_system_codename_xenial
+    interfaces:
+      ens2:
+        role: single_mgm
+      ens3:
+        role: single_ctl
+
+  mdb03.mcp-offline-vxlan.local:
+    reclass_storage_name: openstack_telemetry_node03
+    roles:
+    - openstack_telemetry
+    - linux_system_codename_xenial
+    interfaces:
+      ens2:
+        role: single_mgm
+      ens3:
+        role: single_ctl
+
+  kmn01.mcp-offline-vxlan.local:
+    reclass_storage_name: openstack_barbican_node01
+    roles:
+    - openstack_barbican
+    - linux_system_codename_xenial
+    interfaces:
+      ens2:
+        role: single_mgm
+      ens3:
+        role: single_ctl
+
+  kmn02.mcp-offline-vxlan.local:
+    reclass_storage_name: openstack_barbican_node02
+    roles:
+    - openstack_barbican
+    - linux_system_codename_xenial
+    interfaces:
+      ens2:
+        role: single_mgm
+      ens3:
+        role: single_ctl
+
+  kmn03.mcp-offline-vxlan.local:
+    reclass_storage_name: openstack_barbican_node03
+    roles:
+    - openstack_barbican
+    - linux_system_codename_xenial
+    interfaces:
+      ens2:
+        role: single_mgm
+      ens3:
+        role: single_ctl
+
diff --git a/tcp_tests/templates/physical-mcp-offline-vxlan/core.yaml b/tcp_tests/templates/physical-mcp-offline-vxlan/core.yaml
new file mode 100644
index 0000000..c5e528a
--- /dev/null
+++ b/tcp_tests/templates/physical-mcp-offline-vxlan/core.yaml
@@ -0,0 +1,8 @@
+{% from 'physical-mcp-offline-vxlan/underlay.yaml' import HOSTNAME_CFG01 with context %}
+
+- description: Install jq for parse json output
+  cmd: apt install -y jq
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 15}
+  skip_fail: false
+
diff --git a/tcp_tests/templates/physical-mcp-offline-vxlan/salt.yaml b/tcp_tests/templates/physical-mcp-offline-vxlan/salt.yaml
new file mode 100644
index 0000000..98307e3
--- /dev/null
+++ b/tcp_tests/templates/physical-mcp-offline-vxlan/salt.yaml
@@ -0,0 +1,47 @@
+{%- set test_public_key = config.underlay.ssh_keys[0]["public"] %}
+
+{% from 'physical-mcp-offline-vxlan/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% from 'physical-mcp-offline-vxlan/underlay.yaml' import ETH0_IP_ADDRESS_CFG01 with context %}
+
+{% import 'shared-maas.yaml' as SHARED_MAAS with context %}
+
+- description: Wait while a salt-minion is started
+  cmd: systemctl is-active salt-minion
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 10, delay: 10}
+  skip_fail: false
+
+- description: Refresh pillars on master node
+  cmd: sleep 90; salt-call --hard-crash --state-output=mixed --state-verbose=False saltutil.refresh_pillar
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Sync all salt resources on master node
+  cmd: sleep 60; salt-call --hard-crash --state-output=mixed --state-verbose=False saltutil.sync_all && sleep 5
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Setup NTP
+  cmd: salt-call --hard-crash --state-output=mixed --state-verbose=False state.sls ntp.server
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Wait until salt is up
+  cmd: sleep 60
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+{{ SHARED_MAAS.AUTH_IN_MAAS() }}
+{{ SHARED_MAAS.SET_LOCAL_DNS() }}
+{{ SHARED_MAAS.ADD_ROOT_SSH_KEY_TO_NODES() }}
+{{ SHARED_MAAS.ADD_TEST_SSH_KEY_TO_NODES(test_public_key) }}
+{{ SHARED_MAAS.ADD_ROOT_PUB_SSH_TO_MASS() }}
+{{ SHARED_MAAS.COMMISSION_VMS() }}
+{{ SHARED_MAAS.DEPLOY_VMS() }}
+{{ SHARED_MAAS.CONFIG_MACHINES(target='I@salt:control or I@nova:compute or I@ceph:osd or I@neutron:gateway') }}
+{{ SHARED_MAAS.ADD_ROOT_SSH_KEY_TO_NODES() }}
+{{ SHARED_MAAS.ADD_TEST_SSH_KEY_TO_NODES(test_public_key) }}
diff --git a/tcp_tests/templates/physical-mcp-ocata-offline-ovs/underlay--meta-data.yaml b/tcp_tests/templates/physical-mcp-offline-vxlan/underlay--meta-data.yaml
similarity index 100%
rename from tcp_tests/templates/physical-mcp-ocata-offline-ovs/underlay--meta-data.yaml
rename to tcp_tests/templates/physical-mcp-offline-vxlan/underlay--meta-data.yaml
diff --git a/tcp_tests/templates/physical-mcp-ocata-offline-ovs/underlay--user-data-apt01.yaml b/tcp_tests/templates/physical-mcp-offline-vxlan/underlay--user-data-apt01.yaml
similarity index 100%
rename from tcp_tests/templates/physical-mcp-ocata-offline-ovs/underlay--user-data-apt01.yaml
rename to tcp_tests/templates/physical-mcp-offline-vxlan/underlay--user-data-apt01.yaml
diff --git a/tcp_tests/templates/physical-mcp-ocata-offline-ovs/underlay--user-data-cfg01.yaml b/tcp_tests/templates/physical-mcp-offline-vxlan/underlay--user-data-cfg01.yaml
similarity index 79%
rename from tcp_tests/templates/physical-mcp-ocata-offline-ovs/underlay--user-data-cfg01.yaml
rename to tcp_tests/templates/physical-mcp-offline-vxlan/underlay--user-data-cfg01.yaml
index 8e4d506..39297d4 100644
--- a/tcp_tests/templates/physical-mcp-ocata-offline-ovs/underlay--user-data-cfg01.yaml
+++ b/tcp_tests/templates/physical-mcp-offline-vxlan/underlay--user-data-cfg01.yaml
@@ -38,12 +38,18 @@
    - echo "/swapfile   none    swap    defaults    0   0" >> /etc/fstab
 
    # Run user data script from config drive
+   - iptables -I OUTPUT -p tcp --dport 53 -j DROP
+   - iptables -I OUTPUT -p udp --dport 53 -j DROP
    - ifdown --force ens3; ifconfig ens3 down; ip a flush dev ens3; rm -f /var/run/network/ifstate.ens3
    - ifdown --force ens4; ifconfig ens4 down; ip a flush dev ens4; rm -f /var/run/network/ifstate.ens4
    - ifdown --force ens5; ifconfig ens5 down; ip a flush dev ens5; rm -f /var/run/network/ifstate.ens5
-   - cp /root/config-drive/user-data /root/config.sh && chmod 664 /root/config.sh
-   - sed -i '/^reboot$/d' /root/config.sh
-   - cd /root && /bin/bash -xe ./config.sh
+   - ifdown --force ens6; ifconfig ens6 down; ip a flush dev ens6; rm -f /var/run/network/ifstate.ens6
+   - cd /root/config-drive && cloud-init --file /root/config-drive/vendor-data --debug modules
+   # - [bash, -cex, *master_config]
+   # - cp /root/config-drive/user-data /root/config.sh && chmod 664 /root/config.sh
+   # - sed -i '/^reboot$/d' /root/config.sh
+   # - cd /root && /bin/bash -xe ./config.sh
+
 
    # Enable root access
    - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
@@ -69,6 +75,7 @@
             UserKnownHostsFile /dev/null
 
   ssh_pwauth: True
+  disable_root: false
   users:
    - name: root
      sudo: ALL=(ALL) NOPASSWD:ALL
@@ -77,3 +84,8 @@
      {% for key in config.underlay.ssh_keys %}
       - ssh-rsa {{ key['public'] }}
      {% endfor %}
+
+  chpasswd:
+     list: |
+      root:r00tme
+     expire: False
diff --git a/tcp_tests/templates/physical-mcp-ocata-offline-ovs/underlay--user-data.yaml b/tcp_tests/templates/physical-mcp-offline-vxlan/underlay--user-data.yaml
similarity index 100%
rename from tcp_tests/templates/physical-mcp-ocata-offline-ovs/underlay--user-data.yaml
rename to tcp_tests/templates/physical-mcp-offline-vxlan/underlay--user-data.yaml
diff --git a/tcp_tests/templates/physical-mcp-ocata-offline-ovs/underlay.yaml b/tcp_tests/templates/physical-mcp-offline-vxlan/underlay.yaml
similarity index 91%
rename from tcp_tests/templates/physical-mcp-ocata-offline-ovs/underlay.yaml
rename to tcp_tests/templates/physical-mcp-offline-vxlan/underlay.yaml
index 0228ee8..343a1bd 100644
--- a/tcp_tests/templates/physical-mcp-ocata-offline-ovs/underlay.yaml
+++ b/tcp_tests/templates/physical-mcp-offline-vxlan/underlay.yaml
@@ -1,15 +1,15 @@
 # Set the repository suite, one of the: 'nightly', 'testing', 'stable', or any other required
 {% set REPOSITORY_SUITE = os_env('REPOSITORY_SUITE', 'testing') %}
 
-{% import 'physical-mcp-ocata-offline-ovs/underlay--meta-data.yaml' as CLOUDINIT_META_DATA with context %}
-{% import 'physical-mcp-ocata-offline-ovs/underlay--user-data-cfg01.yaml' as CLOUDINIT_USER_DATA_CFG01 with context %}
+{% import 'physical-mcp-offline-vxlan/underlay--meta-data.yaml' as CLOUDINIT_META_DATA with context %}
+{% import 'physical-mcp-offline-vxlan/underlay--user-data-cfg01.yaml' as CLOUDINIT_USER_DATA_CFG01 with context %}
 
 
-{% set LAB_CONFIG_NAME = os_env('LAB_CONFIG_NAME', 'physical-mcp-ocata-offline-ovs') %}
+{% set LAB_CONFIG_NAME = os_env('LAB_CONFIG_NAME', 'mcp-offline-vxlan') %}
 {# set DOMAIN_NAME = os_env('DOMAIN_NAME', LAB_CONFIG_NAME) + '.local' #}
-{% set DOMAIN_NAME = os_env('DOMAIN_NAME', 'offline-ocata-vxlan.local') %}
-{% set HOSTNAME_APT = os_env('HOSTNAME_CFG01', 'apt.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CFG01 = os_env('HOSTNAME_APT01', 'cfg01.' + DOMAIN_NAME) %}
+{% set DOMAIN_NAME = os_env('DOMAIN_NAME', 'mcp-offline-vxlan.local') %}
+{% set HOSTNAME_APT = os_env('HOSTNAME_APT', 'apt.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CFG01 = os_env('HOSTNAME_CFG01', 'cfg01.' + DOMAIN_NAME) %}
 
 {% set HOSTNAME_KVM01 = os_env('HOSTNAME_KVM01', 'kvm01.' + DOMAIN_NAME) %}
 {% set HOSTNAME_KVM02 = os_env('HOSTNAME_KVM02', 'kvm02.' + DOMAIN_NAME) %}
@@ -159,7 +159,7 @@
 
 template:
   devops_settings:
-    env_name: {{ os_env('ENV_NAME', 'phy-mcp-ocata-offline-ovs_' + REPOSITORY_SUITE + "_" + os_env('BUILD_NUMBER', '')) }}
+    env_name: {{ os_env('ENV_NAME', 'phy-mcp-offline-vxlan_' + REPOSITORY_SUITE + "_" + os_env('BUILD_NUMBER', '')) }}
 
     address_pools:
       managment-pool01:
@@ -278,6 +278,16 @@
             l2_network_device: +1
             default_{{ HOSTNAME_CFG01 }}: {{ ETH2_IP_ADDRESS_CFG01 }}
 
+      external-pool01:
+        net: {{ os_env('EXTERNAL_ADDRESS_POOL01', '10.13.0.0/16:16') }}
+        params:
+          ip_reserved:
+            gateway: +1
+            l2_network_device: +1
+            default_{{ HOSTNAME_CFG01 }}: +15
+          ip_ranges:
+              dhcp: [+130, +230]
+
 
     groups:
 
@@ -296,6 +306,7 @@
           admin: admin-pool01
           managment: managment-pool01
           public: public-pool01
+          external: external-pool01
 
         l2_network_devices:
           # Ironic management interface
@@ -323,6 +334,15 @@
             parent_iface:
               phys_dev: !os_env PUBLIC_BRIDGE
 
+          external:
+            address_pool: external-pool01
+            dhcp: false
+            forward:
+              mode: bridge
+            parent_iface:
+              phys_dev: !os_env EXTERNAL_BRIDGE
+
+
           #admin:
           #  address_pool: admin-pool01
           #  dhcp: true
@@ -336,30 +356,30 @@
           - name: {{ HOSTNAME_CFG01 }}
             role: salt_master
             params:
-              vcpu: !os_env SLAVE_NODE_CPU, 4
-              memory: !os_env SLAVE_NODE_MEMORY, 8192
+              vcpu: !os_env SLAVE_NODE_CPU, 8
+              memory: !os_env SLAVE_NODE_MEMORY, 16384
               boot:
                 - hd
-              cloud_init_volume_name: iso
-              cloud_init_iface_up: ens3
+              # cloud_init_volume_name: iso
+              # cloud_init_iface_up: ens3
               volumes:
                 - name: system
                   capacity: !os_env NODE_VOLUME_SIZE, 150
                   shared_backing_store_name: !os_env CFG01_VOLUME_NAME
                   format: qcow2
-                - name: config
+                # - name: iso  # Volume with name 'iso' will be used
+                #              # for store image with cloud-init metadata.
+                #   capacity: 1
+                #   format: raw
+                #   device: cdrom
+                #   bus: ide
+                #   cloudinit_meta_data: *cloudinit_meta_data
+                #   cloudinit_user_data: *cloudinit_user_data_cfg01
+                - name: iso
                   format: raw
                   device: cdrom
                   bus: ide
                   source_image: !os_env CFG01_CONFIG_PATH
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-                  capacity: 1
-                  format: raw
-                  device: cdrom
-                  bus: ide
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data_cfg01
 
 
               interfaces:
@@ -375,9 +395,9 @@
                   l2_network_device: public
                   interface_model: *interface_model
 
-                #- label: ens6
-                #  l2_network_device: admin
-                #  interface_model: *interface_model
+                - label: ens6
+                  l2_network_device: external
+                  interface_model: *interface_model
 
 
               network_config:
@@ -390,9 +410,9 @@
                 ens5:
                   networks:
                     - public
-                #ens6:
-                #  networks:
-                #    - admin
+                ens6:
+                 networks:
+                   - external
 
           - name: {{ HOSTNAME_APT }}
             role: salt_minion
diff --git a/tcp_tests/templates/physical-mcp-offline-vxlan/user-data-cfg01.yaml b/tcp_tests/templates/physical-mcp-offline-vxlan/user-data-cfg01.yaml
new file mode 100644
index 0000000..f3d0eeb
--- /dev/null
+++ b/tcp_tests/templates/physical-mcp-offline-vxlan/user-data-cfg01.yaml
@@ -0,0 +1,28 @@
+#cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
+
+vendor_data:
+    enabled: True
+
+ssh_pwauth: True
+users:
+  - name: root
+    sudo: ALL=(ALL) NOPASSWD:ALL
+    shell: /bin/bash
+    ssh_pwauth: True
+
+
+disable_root: false
+chpasswd:
+  list: |
+    root:r00tme
+  expire: False
+
+bootcmd:
+ # Block access to SSH while node is preparing
+ # - cloud-init-per once sudo echo 'sshd:ALL' >> /etc/hosts.deny
+ # Enable root access
+ - touch /is_cloud_init_started
+ - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
+ - service sshd restart
+
+merge_how: "dict(recurse_array)+list(append)"
diff --git a/tcp_tests/templates/shared-maas.yaml b/tcp_tests/templates/shared-maas.yaml
new file mode 100644
index 0000000..0becf80
--- /dev/null
+++ b/tcp_tests/templates/shared-maas.yaml
@@ -0,0 +1,195 @@
+{# Collection of common macroses shared across MaaS #}
+
+{%- macro AUTH_IN_MAAS() %}
+
+- description: MaaS auth
+  cmd: bash -x /var/lib/maas/.maas_login.sh
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+{%- endmacro %}
+
+{%- macro SET_LOCAL_DNS() %}
+
+- description: Set upstream dns in MaaS
+  cmd: sleep 30; maas mirantis maas set-config name=upstream_dns value='{{ ETH0_IP_ADDRESS_CFG01 }} 8.8.8.8 8.8.4.4'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+{%- endmacro %}
+
+{%- macro ADD_TEST_SSH_KEY_TO_NODES(key) %}
+
+- description: Add key to root user on salt minions
+  cmd: salt "*" ssh.set_auth_key root "{{ key }}"
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Add key to ubuntu user on salt minions
+  cmd: salt "*" ssh.set_auth_key ubuntu "{{ key }}"
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+{%- endmacro %}
+
+{%- macro ADD_ROOT_SSH_KEY_TO_NODES() %}
+
+- description: Add root key to root user on salt minions
+  cmd: salt "*" ssh.set_auth_key root "$(ssh-keygen -y -f ~/.ssh/id_rsa | cut -d " " -f 2)"
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Add root key to ubuntu user on salt minions
+  cmd: salt "*" ssh.set_auth_key ubuntu "$(ssh-keygen -y -f ~/.ssh/id_rsa | cut -d " " -f 2)"
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+{%- endmacro %}
+
+{%- macro ADD_ROOT_PUB_SSH_TO_MASS() %}
+
+- description: Generate public from from root private key
+  cmd: ssh-keygen -y -f ~root/.ssh/id_rsa > ~root/.ssh/id_rsa.pub
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Add root public key to MaaS user
+  cmd: maas mirantis sshkeys create key="$(cat ~root/.ssh/id_rsa.pub)"
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+{%- endmacro %}
+
+{%- macro COMMISSION_VMS() %}
+
+- description: Wait dhcpd server
+  cmd: timeout 90s /bin/bash -c 'while ! pidof dhcpd; do echo "dhcpd still isnt running"; sleep 10; done'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Add machines and comission
+  cmd: salt-call state.sls maas.machines
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Wait finish of comissioning machines
+  cmd: salt-call state.sls maas.machines.wait_for_ready
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+{%- endmacro %}
+
+{%- macro DEPLOY_VMS() %}
+
+- description: Show salt keys before deploy
+  cmd: salt-key
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Assign IPs to machines
+  cmd: salt-call state.sls maas.machines.assign_ip
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Assign drive partitions to machines
+  cmd: salt-call state.sls maas.machines.storage
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Deploy machines
+  cmd: salt-call state.sls maas.machines.deploy
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Wait finish of deploying machines
+  cmd: salt-call state.sls maas.machines.wait_for_deployed
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Show salt keys after deploy
+  cmd: salt-key
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Accept salt keys
+  cmd: salt-key -A -y --include-denied --include-rejected
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Show salt keys after accepting
+  cmd: salt-key
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+{%- endmacro %}
+
+{%- macro CONFIG_MACHINES(target) %}
+
+- description: Refresh pillars
+  cmd: salt '*' saltutil.refresh_pillar
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Sync all
+  cmd: salt '*' saltutil.sync_all
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Show top of reclass
+  cmd: reclass-salt --top
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Set rebooted flag
+  cmd: salt -C '{{ target }}' cmd.run 'touch /run/is_rebooted'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Config machines and reboot them
+  cmd: salt --async -C '{{ target }}' cmd.run 'salt-call state.sls linux.system.repo,linux.system.user,openssh,linux.network;reboot'
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Wait 10 minutes for machines reboot
+  cmd: sleep 600
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Check salt minions
+  cmd: salt -C '{{ target }}' test.ping
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 5, delay: 30}
+  skip_fail: false
+
+- description: Check reboot status of machines
+  cmd: |
+    ! salt -C '{{ target }}' cmd.run '[ -f "/run/is_rebooted" ] && echo "Failed" || echo "Rebooted"' | grep -q Failed
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+{%- endmacro %}
diff --git a/tcp_tests/tests/system/test_offline.py b/tcp_tests/tests/system/test_offline.py
index 6c083cb..4d73e84 100644
--- a/tcp_tests/tests/system/test_offline.py
+++ b/tcp_tests/tests/system/test_offline.py
@@ -13,8 +13,8 @@
 #    under the License.
 import pytest
 import time
-
-from collections import Counter
+import socket
+import urlparse
 
 from tcp_tests import logger
 from tcp_tests.managers.jenkins.client import JenkinsClient
@@ -103,6 +103,12 @@
         LOG.info(r)
         LOG.info(f)
 
+        r, f = day1_cfg_config.salt.enforce_state(
+            'cfg01*',
+            'maas.machines.storage')
+        LOG.info(r)
+        LOG.info(f)
+
         # show_step(9)
         underlay.check_call(
             node_name=cfg_node, verbose=verbose,
@@ -169,28 +175,21 @@
 
         LOG.info("*************** DONE **************")
 
-    def test_deploy_day1(self, show_step, config, underlay, hardware,
-                         core_deployed, salt_deployed):
+    def test_deploy_day1_pike(self, show_step, config, underlay, hardware,
+                              core_deployed, salt_deployed, tempest_actions):
         """Test for deploying an mcp from day01 images
 
         Scenario:
             1. Wait salt master
-            2. Addition config of MaaS
-            3. Wait dhcpd server
-            4. Start comissioning node via MaaS
-            5. Wait of comissioning node by MaaS
-            6. Start deploing node via MaaS
-            7. Wait of deploing node by
-            8. Accept all keys
-            9. Configure and baremetal nodes after MaaS deployment
-            10. Run deploy OS job
+            2. Run deploy OS job
+            3. Add test and root keys
+            2. Run deploy CVP sanity job
 
         """
         # group = hardware._get_default_node_group()
         nodes = underlay.node_names()
         LOG.info("Nodes - {}".format(nodes))
-        cfg_node = 'cfg01.offline-ocata-vxlan.local'
-        tempest_node = 'gtw01.offline-ocata-vxlan.local'
+        cfg_node = 'cfg01.mcp-offline-vxlan.local'
         verbose = True
         ssh_test_key = config.underlay.ssh_keys[0]['public']
 
@@ -201,203 +200,33 @@
             cmd="""timeout 300s /bin/bash -c """
                 """'while ! salt-call test.ping; do """
                 """echo "salt master still isnt running"; sleep 10; done'"""
-        )  # noqa
+        )
 
         show_step(2)
-        underlay.check_call(
-            node_name=cfg_node,
-            verbose=verbose,
-            cmd='salt-call saltutil.sync_all')
-
-        underlay.check_call(
-            node_name=cfg_node,
-            verbose=verbose,
-            cmd="salt '*' ssh.set_auth_key root '{}'".format(ssh_test_key))
-        underlay.check_call(
-            node_name=cfg_node,
-            verbose=verbose,
-            cmd='salt "*" ssh.set_auth_key root '
-                '"$(ssh-keygen -y -f ~/.ssh/id_rsa | cut -d " " -f 2)"')
-        underlay.check_call(
-            node_name=cfg_node,
-            verbose=verbose,
-            cmd="salt '*' ssh.set_auth_key ubuntu '{}'".format(ssh_test_key))
-        underlay.check_call(
-            node_name=cfg_node,
-            verbose=verbose,
-            cmd='salt "*" ssh.set_auth_key ubuntu '
-                '"$(ssh-keygen -y -f ~/.ssh/id_rsa | cut -d " " -f 2)"')
-
-        # underlay.check_call(
-        #     node_name=cfg_node,
-        #     verbose=verbose,
-        #     cmd='salt-call state.sls maas.region')
-        # underlay.check_call(
-        #     node_name=cfg_node,
-        #     verbose=verbose,
-        #     cmd='maas logout mirantis && '
-        #     'maas login mirantis '
-        #     'http://localhost:5240/MAAS/api/2.0/ '
-        #     'FTvqwe7ybBp68gPar2:5mcctTAXVL8mns4ef4:zrA9LZwu2tMc8BAZpsPUfwWwTyQnAtDN'  # noqa
-        # )
-
-        # underlay.check_call(
-        #     node_name=cfg_node,
-        #     verbose=verbose,
-        #     cmd="maas mirantis maas set-config "
-        #         "name=upstream_dns value='10.10.0.15 8.8.8.8 8.8.4.4'")
-
-        # underlay.check_call(
-        #     node_name=cfg_node,
-        #     verbose=verbose,
-        #     cmd="maas mirantis ipranges create "
-        #         "type=dynamic start_ip=10.10.191.255 end_ip=10.10.255.254 "
-        #         "subnet=$(maas mirantis subnets read | jq '.[] | "
-        #         "select(.name==\"10.10.0.0/16\") | .id')")
-
-        # underlay.check_call(
-        #     node_name=cfg_node,
-        #     verbose=verbose,
-        #     cmd="maas mirantis vlan update "
-        #         "$(maas mirantis subnets read | jq '.[] | "
-        #         "select(.name==\"10.10.0.0/16\") | .vlan.fabric_id') "
-        #         "0 dhcp_on=True primary_rack='cfg01'")
-
-        underlay.check_call(
-            node_name=cfg_node,
-            verbose=verbose,
-            cmd="ssh-keygen -y -f ~root/.ssh/id_rsa > ~root/.ssh/id_rsa.pub")
-        underlay.check_call(
-            node_name=cfg_node,
-            verbose=verbose,
-            cmd='maas mirantis sshkeys create '
-                'key="$(cat ~root/.ssh/id_rsa.pub)"')
-
-        show_step(3)
-        underlay.check_call(
-            node_name=cfg_node,
-            verbose=verbose,
-            cmd="""timeout 90s /bin/bash -c 'while ! pidof dhcpd; do """
-                """echo "dhcpd still isnt running"; sleep 10; done'""")
-
-        show_step(4)
-        underlay.check_call(
-            node_name=cfg_node,
-            verbose=verbose,
-            cmd='salt-call state.sls maas.machines')
-        show_step(5)
-        # cmd = """   timeout 1200s bash -c 'hosts=$(maas mirantis nodes read | jq -r ".[] | select(.node_type_name==\\"Machine\\") | select(.status_name==\\"Ready\\") | .hostname "); while ! [ $(echo "$hosts" | wc -w) -eq 10 ]; do echo "Ready hosts:\n$hosts"; sleep 30; hosts=$(maas mirantis nodes read | jq -r ".[] | select(.node_type_name==\\"Machine\\") | select(.status_name==\\"Ready\\") | .hostname "); done '   """  # noqa
-        cmd = """salt-call state.sls maas.machines.wait_for_ready"""
-        underlay.check_call(node_name=cfg_node, verbose=verbose, cmd=cmd)
-        underlay.check_call(
-            node_name=cfg_node, verbose=verbose, cmd='salt-key')
-        underlay.check_call(
-            node_name=cfg_node,
-            verbose=verbose,
-            cmd='salt-call state.sls maas.machines.assign_ip')
-        show_step(6)
-        underlay.check_call(
-            node_name=cfg_node,
-            verbose=verbose,
-            cmd='salt-call state.sls maas.machines.deploy')
-        show_step(7)
-        underlay.check_call(
-            node_name=cfg_node,
-            verbose=verbose,
-            cmd='salt-call state.sls maas.machines.wait_for_deployed')
-        underlay.check_call(
-            node_name=cfg_node, verbose=verbose, cmd='salt-key')
-
-        show_step(8)
-        underlay.check_call(
-            node_name=cfg_node,
-            verbose=verbose,
-            expected=[0, 1],
-            cmd='salt-key -A -y --include-denied --include-rejected')
-        underlay.check_call(
-            node_name=cfg_node, verbose=verbose, cmd='salt-key')
-
-        show_step(9)
-        cmd = "salt '*' saltutil.refresh_pillar"
-        underlay.check_call(node_name=cfg_node, verbose=verbose, cmd=cmd)
-        cmd = "salt '*' saltutil.sync_all"
-        underlay.check_call(node_name=cfg_node, verbose=verbose, cmd=cmd)
-
-        underlay.check_call(
-            node_name=cfg_node, verbose=verbose, cmd="reclass-salt --top")
-
-        cmd = "salt -C " \
-              "'I@salt:control or I@nova:compute or I@ceph:osd' " \
-              "cmd.run 'touch /run/is_rebooted'"
-        underlay.check_call(node_name=cfg_node, verbose=verbose, cmd=cmd)
-
-        cmd = "salt --async -C " \
-              "'I@salt:control' cmd.run 'salt-call state.sls " \
-              "linux.system.user,openssh,linux.network;reboot'"
-        underlay.check_call(node_name=cfg_node, verbose=verbose, cmd=cmd)
-
-        cmd = "salt --async -C " \
-              "'I@nova:compute' cmd.run 'salt-call state.sls " \
-              "linux.system.user,openssh,linux.network;reboot'"
-        underlay.check_call(node_name=cfg_node, verbose=verbose, cmd=cmd)
-
-        cmd = "salt --async -C " \
-              "'I@ceph:osd' cmd.run 'salt-call state.sls " \
-              "linux.system.user,openssh,linux.network;reboot'"
-        underlay.check_call(node_name=cfg_node, verbose=verbose, cmd=cmd)
-
-        time.sleep(360)  # TODO: Add ssh waiter
-
-        cmd = "salt -C " \
-              "'I@salt:control or I@nova:compute or I@ceph:osd'" \
-              " test.ping"
-        underlay.check_call(node_name=cfg_node, verbose=verbose, cmd=cmd)
-
-        cmd = """salt -C """ \
-              """'I@salt:control or I@nova:compute or I@ceph:osd' """ \
-              """cmd.run '[ -f "/run/is_rebooted" ] && """ \
-              """echo "Has not been rebooted!" || echo "Rebooted"' """
-        ret = underlay.check_call(node_name=cfg_node, verbose=verbose, cmd=cmd)
-        count = Counter(ret['stdout_str'].split())
-
-        assert count['Rebooted'] == 13, "Should be rebooted 13 baremetal nodes"
-
-        underlay.check_call(
-            node_name=cfg_node,
-            verbose=verbose,
-            cmd="salt '*' ssh.set_auth_key root '{}'".format(ssh_test_key))
-        underlay.check_call(
-            node_name=cfg_node,
-            verbose=verbose,
-            cmd='salt "*" ssh.set_auth_key root '
-                '"$(ssh-keygen -y -f ~/.ssh/id_rsa | cut -d " " -f 2)"')
-        underlay.check_call(
-            node_name=cfg_node,
-            verbose=verbose,
-            cmd="salt '*' ssh.set_auth_key ubuntu '{}'".format(ssh_test_key))
-        underlay.check_call(
-            node_name=cfg_node,
-            verbose=verbose,
-            cmd='salt "*" ssh.set_auth_key ubuntu '
-                '"$(ssh-keygen -y -f ~/.ssh/id_rsa | cut -d " " -f 2)"')
 
         salt_api = \
             salt_deployed.get_pillar(cfg_node, '_param:jenkins_salt_api_url')
         salt_api = salt_api[0].get(cfg_node)
 
-        show_step(10)
         jenkins = JenkinsClient(
             host='http://172.16.44.33:8081',
             username='admin',
             password='r00tme')
         params = jenkins.make_defults_params('deploy_openstack')
         params['SALT_MASTER_URL'] = salt_api
-        params['STACK_INSTALL'] = \
-            'core,kvm,ceph,cicd,openstack,stacklight,finalize'
+        if settings.STACK_INSTALL:
+            params['STACK_INSTALL'] = settings.STACK_INSTALL
+        else:
+            params['STACK_INSTALL'] = \
+                'core,kvm,ceph,cicd,ovs,openstack,stacklight,finalize'
+        params['STATIC_MGMT_NETWORK'] = 'true'
         build = jenkins.run_build('deploy_openstack', params)
 
+        LOG.info("Take a look deploy progress here - %s. Build #%s",
+                 "http://172.16.44.33:8081/job/deploy_openstack/", build[1])
+
         jenkins.wait_end_of_build(
-            name=build[0], build_id=build[1], timeout=60 * 60 * 4)
+            name=build[0], build_id=build[1], timeout=60 * 60 * 5)
 
         with open("{path}/cfg01_jenkins_deploy_openstack_console.log".format(
                 path=settings.LOGS_DIR), 'w') as f:
@@ -411,6 +240,7 @@
                 name=build[0], build_id=build[1])['result'] == 'SUCCESS', \
             "Deploy openstack was failed"
 
+        show_step(3)
         underlay.check_call(
             node_name=cfg_node,
             verbose=verbose,
@@ -432,15 +262,54 @@
 
         salt_deployed.update_ssh_data_from_minions()
 
+        show_step(4)
+        params = jenkins.make_defults_params('cvp-sanity')
+        params['TESTS_SETTINGS'] = 'drivetrain_version=proposed'
+        build = jenkins.run_build('cvp-sanity', params)
+        LOG.info("Take a look test progress here - %s. Build #%s",
+                 "http://172.16.44.33:8081/job/cvp-sanity/", build[1])
+
+        jenkins.wait_end_of_build(
+            name=build[0], build_id=build[1], timeout=60 * 60 * 5)
+
+        assert \
+            jenkins.build_info(
+                name=build[0], build_id=build[1])['result'] == 'SUCCESS', \
+            "CVP sanity was failed"
+
         time.sleep(120)  # debug sleep
         cmd = "salt '*' test.ping"
         underlay.check_call(node_name=cfg_node, verbose=verbose, cmd=cmd)
 
-        openstack = managers.openstack_manager.OpenstackManager(
-            config, underlay, hardware, salt_deployed)
-
         if settings.RUN_TEMPEST:
-            openstack.run_tempest(
-                pattern=settings.PATTERN,
-                node_name=tempest_node)
-            openstack.download_tempest_report()
+            pillar = tempest_actions.runtest_pillar
+            params = [
+                'glance_image_cirros_location',
+                'glance_image_fedora_location',
+                'glance_image_manila_location']
+
+            urls = []
+            for p in params:
+                url = pillar.get('parameters', {}).get('_param', {}).get(p)
+                if url:
+                    urls.append(url)
+
+            LOG.info("Found url with images - %s", urls)
+
+            hosts = set()
+            hosts.add(settings.TEMPEST_IMAGE.split('/')[0])
+            for u in urls:
+                host = urlparse.urlparse(u)
+                hosts.add(host.netloc.split(':')[0])  # drop port if exists
+
+            records = []
+            for h in hosts:
+                ip = socket.gethostbyname(h)
+                records.append((ip, h))
+
+            for ip, h in records:
+                salt_deployed.local(cfg_node, 'hosts.add_host', args=(ip, h))
+
+            tempest_actions.prepare_and_run_tempest()
+
+    test_deploy_day1_queens = test_deploy_day1_pike