Add small cloud with Ironic

Change-Id: Ibd099366cb1c8de045e37bc299eaf6a7e932df7c
diff --git a/jobs/pipelines/deploy-without-drivetrain-and-run-tests.groovy b/jobs/pipelines/deploy-without-drivetrain-and-run-tests.groovy
new file mode 100644
index 0000000..96ddf76
--- /dev/null
+++ b/jobs/pipelines/deploy-without-drivetrain-and-run-tests.groovy
@@ -0,0 +1,105 @@
+@Library('tcp-qa')_
+
+def common = new com.mirantis.mk.Common()
+def shared = new com.mirantis.system_qa.SharedPipeline()
+def steps = "hardware,create_model,salt," + env.DRIVETRAIN_STACK_INSTALL + "," + env.PLATFORM_STACK_INSTALL
+
+currentBuild.description = "${NODE_NAME}:${ENV_NAME}"
+
+def deploy(shared, common, steps) {
+    def report_text = ''
+    try {
+
+        stage("Clean the environment and clone tcp-qa") {
+            shared.prepare_working_dir()
+        }
+
+        stage("Create environment, generate model, bootstrap the salt-cluster") {
+            // steps: "hardware,create_model,salt"
+            shared.swarm_bootstrap_salt_cluster_devops()
+        }
+
+        stage("Deploy platform components from day01 Jenkins") {
+            // steps: env.PLATFORM_STACK_INSTALL
+            shared.swarm_deploy_platform_non_cicd(env.PLATFORM_STACK_INSTALL, env.PLATFORM_STACK_INSTALL_TIMEOUT)
+        }
+
+        currentBuild.result = 'SUCCESS'
+
+    } catch (e) {
+        common.printMsg("Deploy is failed: " + e.message , "purple")
+        report_text = e.message
+        def snapshot_name = "deploy_failed"
+        shared.run_cmd("""\
+            dos.py suspend ${ENV_NAME} || true
+            dos.py snapshot ${ENV_NAME} ${snapshot_name} || true
+        """)
+        if ("${env.SHUTDOWN_ENV_ON_TEARDOWN}" == "false") {
+            shared.run_cmd("""\
+                dos.py resume ${ENV_NAME} || true
+            """)
+        }
+        shared.devops_snapshot_info(snapshot_name)
+        throw e
+    } finally {
+        shared.create_deploy_result_report(steps, currentBuild.result, report_text)
+    }
+}
+
+def test(shared, common, steps) {
+    try {
+        stage("Run tests") {
+            shared.swarm_run_pytest(steps)
+        }
+
+    } catch (e) {
+        common.printMsg("Tests are failed: " + e.message, "purple")
+        def snapshot_name = "tests_failed"
+        shared.run_cmd("""\
+            dos.py suspend ${ENV_NAME} || true
+            dos.py snapshot ${ENV_NAME} ${snapshot_name} || true
+        """)
+        if ("${env.SHUTDOWN_ENV_ON_TEARDOWN}" == "false") {
+            shared.run_cmd("""\
+                dos.py resume ${ENV_NAME} || true
+            """)
+        }
+        shared.devops_snapshot_info(snapshot_name)
+        throw e
+    }
+}
+
+// main
+// Temporary disable throttle to check how it will run
+//throttle(['fuel_devops_environment']) {
+  node ("${NODE_NAME}") {
+    try {
+        // run deploy stages
+        deploy(shared, common, steps)
+        // run test stages
+        test(shared, common, steps)
+    } catch (e) {
+        common.printMsg("Job is failed: " + e.message, "purple")
+        throw e
+    } finally {
+        // shutdown the environment if required
+        if ("${env.SHUTDOWN_ENV_ON_TEARDOWN}" == "true") {
+            shared.run_cmd("""\
+                dos.py destroy ${ENV_NAME} || true
+            """)
+        }
+
+        stage("Archive all xml reports") {
+            archiveArtifacts artifacts: "**/*.xml,**/*.ini,**/*.log,**/*.tar.gz"
+        }
+        stage("report results to testrail") {
+            shared.swarm_testrail_report(steps)
+        }
+        stage("Store TestRail reports to job description") {
+            def String description = readFile("description.txt")
+            currentBuild.description += "\n${description}"
+        }
+
+    }
+  }
+//}
\ No newline at end of file
diff --git a/jobs/pipelines/swarm-deploy-platform-without-cicd.groovy b/jobs/pipelines/swarm-deploy-platform-without-cicd.groovy
new file mode 100644
index 0000000..8d3eb22
--- /dev/null
+++ b/jobs/pipelines/swarm-deploy-platform-without-cicd.groovy
@@ -0,0 +1,80 @@
+/**
+ *
+ * Deploy the product cluster using Jenkins master on CICD cluster
+ *
+ * Expected parameters:
+
+ *   PARENT_NODE_NAME              Name of the jenkins slave to create the environment
+ *   PARENT_WORKSPACE              Path to the workspace of the parent job to use tcp-qa repo
+ *   ENV_NAME                      Fuel-devops environment name
+ *   STACK_INSTALL                 Stacks to install using Jenkins on CICD cluster: "openstack,stacklight"
+ *   STACK_INSTALL_TIMEOUT         Stacks installation timeout
+ *   TCP_QA_REFS                   Reference to the tcp-qa change on review.gerrithub.io, like refs/changes/46/418546/41
+ *   SHUTDOWN_ENV_ON_TEARDOWN      optional, shutdown fuel-devops environment at the end of the job
+ *
+ */
+
+@Library('tcp-qa')_
+
+common = new com.mirantis.mk.Common()
+shared = new com.mirantis.system_qa.SharedPipeline()
+
+if (! env.PARENT_NODE_NAME) {
+    error "'PARENT_NODE_NAME' must be set from the parent deployment job!"
+}
+
+currentBuild.description = "${PARENT_NODE_NAME}:${ENV_NAME}"
+
+def install_timeout = env.STACK_INSTALL_TIMEOUT.toInteger()
+
+timeout(time: install_timeout + 600, unit: 'SECONDS') {
+
+    node ("${PARENT_NODE_NAME}") {
+        if (! fileExists("${PARENT_WORKSPACE}")) {
+            error "'PARENT_WORKSPACE' contains path to non-existing directory ${PARENT_WORKSPACE} on the node '${PARENT_NODE_NAME}'."
+        }
+        dir("${PARENT_WORKSPACE}") {
+
+            if (! env.STACK_INSTALL) {
+                error "'STACK_INSTALL' must contain one or more comma separated stack names for [deploy_openstack] pipeline"
+            }
+
+            if (env.TCP_QA_REFS) {
+                stage("Update working dir to patch ${TCP_QA_REFS}") {
+                    shared.update_working_dir()
+                }
+            }
+
+            try {
+                // Install the cluster
+                stage("Run Jenkins job on day01 [deploy_openstack:${env.STACK_INSTALL}]") {
+                    shared.run_job_on_day01_node(env.STACK_INSTALL, install_timeout)
+                }
+
+                for (stack in "${env.STACK_INSTALL}".split(",")) {
+                    stage("Sanity check the deployed component [${stack}]") {
+                        shared.sanity_check_component(stack)
+                    }
+                    stage("Make environment snapshot [${stack}_deployed]") {
+                        shared.devops_snapshot(stack)
+                    }
+                } // for
+
+            } catch (e) {
+                common.printMsg("Job is failed", "purple")
+                shared.download_logs("deploy_platform_${ENV_NAME}")
+                throw e
+            } finally {
+                // TODO(ddmitriev): analyze the "def currentResult = currentBuild.result ?: 'SUCCESS'"
+                // and report appropriate data to TestRail
+                // TODO(ddmitriev): add checks for the installed stacks
+                if ("${env.SHUTDOWN_ENV_ON_TEARDOWN}" == "true") {
+                    shared.run_cmd("""\
+                        dos.py destroy ${ENV_NAME}
+                    """)
+                }
+            }
+
+        } // dir
+    } // node
+}
\ No newline at end of file
diff --git a/src/com/mirantis/system_qa/SharedPipeline.groovy b/src/com/mirantis/system_qa/SharedPipeline.groovy
index 9e3aadd..0483965 100644
--- a/src/com/mirantis/system_qa/SharedPipeline.groovy
+++ b/src/com/mirantis/system_qa/SharedPipeline.groovy
@@ -227,6 +227,22 @@
         build_pipeline_job('swarm-deploy-platform', parameters)
 }
 
+def swarm_deploy_platform_non_cicd(String stack_to_install, String install_timeout) {
+        // Run openstack_deploy job on day01 Jenkins for specified stacks
+        def common = new com.mirantis.mk.Common()
+        def tcp_qa_refs = env.TCP_QA_REFS ?: ''
+        def parameters = [
+                string(name: 'PARENT_NODE_NAME', value: "${NODE_NAME}"),
+                string(name: 'PARENT_WORKSPACE', value: pwd()),
+                string(name: 'ENV_NAME', value: "${ENV_NAME}"),
+                string(name: 'STACK_INSTALL', value: stack_to_install),
+                string(name: 'STACK_INSTALL_TIMEOUT', value: install_timeout),
+                string(name: 'TCP_QA_REFS', value: "${tcp_qa_refs}"),
+                booleanParam(name: 'SHUTDOWN_ENV_ON_TEARDOWN', value: false),
+            ]
+        build_pipeline_job('swarm-deploy-platform-without-cicd', parameters)
+}
+
 def swarm_run_pytest(String passed_steps) {
         // Run pytest tests
         def common = new com.mirantis.mk.Common()
diff --git a/tcp_tests/templates/cookied-model-generator/salt_cookied-small-mcp-ironic.yaml b/tcp_tests/templates/cookied-model-generator/salt_cookied-small-mcp-ironic.yaml
new file mode 100644
index 0000000..c32d229
--- /dev/null
+++ b/tcp_tests/templates/cookied-model-generator/salt_cookied-small-mcp-ironic.yaml
@@ -0,0 +1,66 @@
+{% from 'cookied-model-generator/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% from 'cookied-model-generator/underlay.yaml' import DOMAIN_NAME with context %}
+
+{% set LAB_CONFIG_NAME = 'cookied-small-mcp-ironic' %}
+# Name of the context file (without extension, that is fixed .yaml) used to render the Environment model
+{% set ENVIRONMENT_MODEL_INVENTORY_NAME = os_env('ENVIRONMENT_MODEL_INVENTORY_NAME', LAB_CONFIG_NAME) %}
+# Path to the context files used to render Cluster and Environment models
+{%- set CLUSTER_CONTEXT_NAME = 'cookiecutter-context-small-mcp-ironic.yaml' %}
+{%- set ENVIRONMENT_CONTEXT_NAMES = ['environment-context.yaml', 'cookiecutter-context-small-mcp-ironic.yaml'] %}
+
+{% import 'shared-salt.yaml' as SHARED with context %}
+
+{{ SHARED.MACRO_INSTALL_PACKAGES_ON_NODES(HOSTNAME_CFG01) }}
+
+{{ SHARED.MACRO_INSTALL_FORMULAS('\*') }}
+
+{{ SHARED.MACRO_GENERATE_COOKIECUTTER_MODEL() }}
+
+{{ SHARED.MACRO_GENERATE_AND_ENABLE_ENVIRONMENT_MODEL() }}
+
+- description: "Workaround for combined roles: remove unnecessary classes"
+  cmd: |
+    set -e;
+    sed -i '/system.reclass.storage.system.physical_control_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    sed -i '/system.reclass.storage.system.openstack_database_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    sed -i '/system.reclass.storage.system.openstack_message_queue_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    sed -i '/system.salt.control.placement.openstack.compact/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/kvm.yml;
+
+    . /root/venv-reclass-tools/bin/activate;
+    reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    reclass-tools del-key parameters.reclass.storage.node.openstack_database_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    reclass-tools del-key parameters.reclass.storage.node.openstack_database_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    reclass-tools del-key parameters.reclass.storage.node.openstack_database_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+#- description: "Workaround for using glusterfs on single node"
+#  cmd: |
+#    set -e;
+#    find /srv/salt/reclass/classes/system/glusterfs/ -type f -exec sed -i 's/replica: .*//g' {} +
+#    find /srv/salt/reclass/classes/system/glusterfs/ -type f -exec sed -i 's/.*cluster_node02_address.*//g' {} +
+#    find /srv/salt/reclass/classes/system/glusterfs/ -type f -exec sed -i 's/.*cluster_node03_address.*//g' {} +
+#    find /srv/salt/reclass/classes/system/glusterfs/ -type f -exec sed -i 's/opts: .*/opts: \"defaults,backup-volfile-servers=${_param:glusterfs_node01_address}\"/g' {} +
+#
+# node_name: {{ HOSTNAME_CFG01 }}
+#  retry: {count: 1, delay: 5}
+#  skip_fail: false
+
+#- description: "Workaround for changing services to single mode"
+#  cmd: |
+#    set -e;
+#    sed -i 's/- system.keystone.server.cluster/- system.keystone.server.single/g' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/control.yml;
+#    sed -i 's/- system.rabbitmq.server.cluster/- system.rabbitmq.server.single/g' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/message_queue.yml;
+#
+#  node_name: {{ HOSTNAME_CFG01 }}
+#  retry: {count: 1, delay: 5}
+#  skip_fail: false
+
+{{ SHARED.MACRO_GENERATE_INVENTORY(RERUN_SALTMASTER_STATE=true) }}
diff --git a/tcp_tests/templates/cookied-small-mcp-ironic/cookiecutter-context-small-mcp-ironic.yaml b/tcp_tests/templates/cookied-small-mcp-ironic/cookiecutter-context-small-mcp-ironic.yaml
new file mode 100644
index 0000000..1ca5bd3
--- /dev/null
+++ b/tcp_tests/templates/cookied-small-mcp-ironic/cookiecutter-context-small-mcp-ironic.yaml
@@ -0,0 +1,324 @@
+default_context:
+  alertmanager_notification_email_enabled: 'False'
+  auditd_enabled: 'False'
+  backend_network_netmask: ''
+  backup_private_key: '-----BEGIN RSA PRIVATE KEY-----
+
+    MIIEpAIBAAKCAQEA0YjCIcsHq6Jmai5de19wOERdbMMP1kvaHAK2j5tCiywJrmsN
+
+    Y+nzGzFKqUiuW/HlvHCY6UNdlDyz6H7WdhzmRoFgVIzWGWR1rJk70D2JdN+QrKhA
+
+    BqAhJ/zIOn0btx3y30VaIHAR6V9P6sezYLtaQAP9LLuHV+9tJH0dY3sisYDd9PdS
+
+    Hn2o0CYpe2Ojt1vaKhhIbtzxlaXM4XelxVbaahlIyzW6ooXYL5zwccG+g11T0R9z
+
+    6YNym+pI1arFjOTobeEiCkDUX+oFd/ICm3ntt6LP69gZKcGSOFB/8n17nBZfmqQf
+
+    puSwZKqwp6pca4VaT2uJx0jW9mBbMgyhoyKuPwIDAQABAoIBAQCAxfCxPgrfw7X3
+
+    ablP+i6IlhNopVTVWdaYwl4aUcwKpizpAafAEOa0fo2pDh8CKN8M+ZIwQZOAZ/IV
+
+    X+ZDvq0TBa4DKG7oOiJLyfzFlRwmNMPAKML4j27xGVyg/YSL/J7n8sJaDyYi6828
+
+    t7CZMWtczlbJKBMcyuujsjTej62ZskAz9S9LC3A8ppLYe2/8WUZueLehXVLfy3rO
+
+    c/7LU+zQ9kcP/nEuFgMYzcLxI8bJligI5JdtXL5baK3DX/9UsWzDouHePCCYo07k
+
+    xInodc9WCHKeAriV6qCctOm6TIhB30hDNQl+rnF2c+Ead5hyP1UneUW+8D8RSxe7
+
+    CT27o3IpAoGBAP8e4N+dbUxAAlRx+20Dgad7/g/zBb+HJyUIsmpxF5RebJNLp03o
+
+    8bOS1we/lS1HIQFolveR/pcoWowJUpDkPJLXC8Wnfjs5NvKRsqV5OLp+m9DynQ1y
+
+    xZmTfHJV4aluZvUd5Azw0lOdAgNu97fLS4IYS6hRtuEncSwWbDHIN9GlAoGBANJB
+
+    p2Z4h49XJ0YigUw7S/VyuU9vSA6nLQwehBMyAl6hmvnCg3DB8sNuOQYZcqr+aUyc
+
+    gicyiEwvwW8Qbm837eqv/8CJSkfQq/8JVg6F4vRweNI5eYv2N/ZInmSn1opYzqEd
+
+    J4TFalwwgUqbLer+dhCjfcn6mdkRyrnW1GepvXMTAoGBAJcJIdpg6gcdUgPKYy5b
+
+    yBNuna+1kW6dRfhv2KiZgXsuF5twS4EdapKEWVdV/awLkyexUscIoK++jTSghEgR
+
+    RycrtuVyTpIjQjuDND8wr/wA3qBqMb53dzJ/lUpfO7TCDqQI6S0cGXi02C9OL8uY
+
+    yIAhgBELJ3jOj/qo367tONadAoGAKz8l14XjHDCo+1wNjEiGDy5Rv2z8PUdVlLCS
+
+    KgCXTC+hWM4RixfZfykkwYRqeZFqxz9J5hYWwtTvzJBspqOyZBtfV5LlnG/ncMXS
+
+    1ZnkXLLlpxf7UDaMvDOjoMCE+F/b4HfGsSCKB/xSG65fe35renCmZu0MyAFI2YC+
+
+    n7PiK9ECgYAZ/9KBk0FQmOpc13+oqHyMPOUHPnfFkmVVJgaJQkLWst3x6+Mx0uQW
+
+    KFddR3UNh8V+oOP/WujT85WtueM2E3M4/C+koeVlaDFh8g4qglnpSuT4CTqLpmPb
+
+    KYWKD0IElw7/4ny4VTTMe6KbnDV0A154tFNvsTX6ELvy4V8OFuPfnQ==
+
+    -----END RSA PRIVATE KEY-----'
+  backup_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDRiMIhyweromZqLl17X3A4RF1sww/WS9ocAraPm0KLLAmuaw1j6fMbMUqpSK5b8eW8cJjpQ12UPLPoftZ2HOZGgWBUjNYZZHWsmTvQPYl035CsqEAGoCEn/Mg6fRu3HfLfRVogcBHpX0/qx7Ngu1pAA/0su4dX720kfR1jeyKxgN3091IefajQJil7Y6O3W9oqGEhu3PGVpczhd6XFVtpqGUjLNbqihdgvnPBxwb6DXVPRH3Ppg3Kb6kjVqsWM5Oht4SIKQNRf6gV38gKbee23os/r2BkpwZI4UH/yfXucFl+apB+m5LBkqrCnqlxrhVpPa4nHSNb2YFsyDKGjIq4/
+  barbican_backend: dogtag
+  barbican_enabled: 'False'
+  bmk_enabled: 'False'
+  ceph_enabled: 'False'
+  cfg_failsafe_ssh_public_key: '1'
+  cicd_enabled: 'False'
+  cicd_private_key: '-----BEGIN RSA PRIVATE KEY-----
+
+    MIIEpAIBAAKCAQEA0EnXqmZqVb+/ebPURO9wb3A8ntUfQmQOC1WNJv/hU7XathNA
+
+    kDmDSMCn9a7m7WbuANpVhZkmstebNgHFoCtGSL4JJYRrBB5QKN0QrpFKiGaetCE2
+
+    eGDAmvvFx9hgbQUwtuJvAhUg4sSq6WY0yblcID3fLX4YKuDVvS8fisB6i1xrQ/tQ
+
+    qEF8S5rsoXUigYnZZwpnkRgSuRtECY8OFSXH4sv+JPEVhrx5GD7CtwVuYliIg1mw
+
+    nF7J5X9wPNmNnm8g4Nz07zpA2FMoF5+QuPZrV7t3Xm2hcSmMZbsGG4zy2pqbwDvR
+
+    5FgQLfaPBYcqrrIr/kCnCajDzpZdBxIjdXPK9QIDAQABAoIBAQCEiVhIP58OLVND
+
+    t+AsdUfQQVTxpJ+deVlOcQO+ezgAMkmXbiy5GT+mvaCivsaG7eYoeMXOmyN6zaMf
+
+    /ISqZJ72jqX3T4lhN4C+X9zLl/wbS2FVMYUdmEM221qAzfidpp3W4cLNSnCAm7A9
+
+    kCGq8t3iTjyDECeEsUiQdznU6qGPpvqRC9E2wlatbUowYT8VSbtc9aDGkZNMnZAP
+
+    ypBzGQOhIIIs3V3m0XqF5dsqxq+IjZmBjkJ8TBEyRre+Hu25r4ksQk42Qw8Lj1yI
+
+    W/+XTJiI04XLbCubeknQuTy3baku1i58gEVuJcYdeC3pCF4nu1PvBRxgVE1TU1xq
+
+    mIL2rBrJAoGBAPjSOvM/XmBfwW3znJ7xknDoLFq2yoI5bAr0ehr0/VLbplAybY1b
+
+    6mWcpiWcNPnCwAUXUjI8FRklQwMrCNvdXBlixZM5Au1Bsg1JjuYrQScc8dRFDWH5
+
+    8YDFxrR9ijFkYmhTHOMfm2vk5BxaOgIvAwv5XN43Li1nKAjlwU3euPZTAoGBANZM
+
+    PbSHJ3Y8llOWNwFqqYC9VGVAC5K+kKKmJmUKBluZpHyYYwdGqF+ItM8LzLilW/jj
+
+    CZF9KU4lwovbTHDsvOvYPJFO+nVfXYpiqFG95X5w9L1qnfLhWNfbNjp1JgzOadGb
+
+    RIPKktjjJEde9ShNPRfHWRzgxAvj+57moq3v64CXAoGAAlyMbq6VSLjf7xk2zVNh
+
+    nmy1rw65EU9WNSxo1ESq+tRW2cAAxiyvJtO7x/OZlR1CEUpNX2iukpSF9Eu+Q8fw
+
+    DdWgJmuOGY5cEEA4ePrEHYjqyqb1H47tudkmr6PZYeqf/Hl9drJgGUAM4jABCPBF
+
+    SSHOvdUsPQYTnTIBCaopez0CgYEAsj9YVADXYhGjOIOJ3TPLKbpRqKZM+hJoW+G3
+
+    rfNYtnhlyP034aV0B4K+Yjl+X1er2KmAG/Pvl4DxAUO3oXZI7iM+vd7jlR1twN2v
+
+    A87gRPvPln5IQu4N5/3+bUFkFOLcQezUYIru+lh0pKjnYk5cspquFMcgSoOnl9Rf
+
+    HC6jxKECgYBxh0hH+vmudh36zuhqghZSI+DLd6WHpzpnnQeSkI2sHPB7EGTZ8+Ql
+
+    Rykt8XGiZvkuc9geH5Sc6aIKJr7WWTxgwmhj7T6iBHLFLyGfcAvUAGcLMOnjNFcL
+
+    qEaNVOfzXB9ZBN1h8wRbxoKx+o2c78agrQyetEyiz7wkYFQKj8xq4Q==
+
+    -----END RSA PRIVATE KEY-----'
+  cicd_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDQSdeqZmpVv795s9RE73BvcDye1R9CZA4LVY0m/+FTtdq2E0CQOYNIwKf1rubtZu4A2lWFmSay15s2AcWgK0ZIvgklhGsEHlAo3RCukUqIZp60ITZ4YMCa+8XH2GBtBTC24m8CFSDixKrpZjTJuVwgPd8tfhgq4NW9Lx+KwHqLXGtD+1CoQXxLmuyhdSKBidlnCmeRGBK5G0QJjw4VJcfiy/4k8RWGvHkYPsK3BW5iWIiDWbCcXsnlf3A82Y2ebyDg3PTvOkDYUygXn5C49mtXu3debaFxKYxluwYbjPLampvAO9HkWBAt9o8Fhyqusiv+QKcJqMPOll0HEiN1c8r1
+  cluster_domain: small_cloud.local
+  cluster_name: small_cloud
+  compute_bond_mode: active-backup
+  compute_padding_with_zeros: 'True'
+  compute_primary_first_nic: eth1
+  compute_primary_second_nic: eth2
+  context_seed: 88zA67wwzN74hI8Vzpy7CCEDXPGfKGUv37965C5bKeIZM8436V73PhAT9yaLDUYV0xj8zpidxbmh0FMN83dzNWAA
+  control_network_netmask: 255.255.255.0
+  control_network_subnet: 10.60.0.0/24
+  control_vlan: '10'
+  cookiecutter_template_branch: ''
+  cookiecutter_template_credentials: gerrit
+  cookiecutter_template_url: ssh://gerrit.mcp.mirantis.net:29418/mk/cookiecutter-templates.git
+  deploy_network_gateway: 10.70.0.1
+  deploy_network_netmask: 255.255.255.0
+  deploy_network_subnet: 10.70.0.0/24
+  deployment_type: physical
+  designate_backend: powerdns
+  designate_enabled: 'False'
+  openstack_create_public_network: 'True'
+  openstack_public_neutron_subnet_gateway: 172.17.41.129
+  openstack_public_neutron_subnet_cidr: 172.17.41.128/26
+  openstack_public_neutron_subnet_allocation_start: 172.17.41.170
+  openstack_public_neutron_subnet_allocation_end: 172.17.41.190
+  dns_server01: 172.18.176.6
+  dns_server02: 172.18.208.44
+  email_address: sgudz@mirantis.com
+  gainsight_service_enabled: 'False'
+  galera_ssl_enabled: 'False'
+  gateway_primary_first_nic: eth1
+  gateway_primary_second_nic: eth2
+  gnocchi_aggregation_storage: file
+  infra_bond_mode: active-backup
+  infra_deploy_nic: eth0
+  infra_kvm01_control_address: 10.60.0.11
+  infra_kvm01_hostname: kvm01
+  infra_kvm02_control_address: 10.60.0.12
+  infra_kvm02_hostname: kvm02
+  infra_kvm03_control_address: 10.60.0.13
+  infra_kvm03_hostname: kvm03
+  infra_kvm_vip_address: 10.60.0.10
+  infra_primary_first_nic: eth1
+  infra_primary_second_nic: eth2
+  internal_proxy_enabled: 'False'
+  ironic_enabled: 'True'
+  kubernetes_ctl_on_kvm: 'False'
+  kubernetes_enabled: 'False'
+  local_repositories: 'False'
+  maas_enabled: 'False'
+  manila_enabled: 'False'
+  manila_lvm_devices: /dev/sdb,/dev/sdc
+  manila_lvm_volume_name: manila-volume
+  manila_share_backend: lvm
+  mcp_common_scripts_branch: ''
+  mcp_version: 2019.2.0
+  motd_company_name: MirantisTestDeployment
+  no_platform: 'False'
+  nova_vnc_tls_enabled: 'False'
+  octavia_private_key: '-----BEGIN RSA PRIVATE KEY-----
+
+    MIIEpAIBAAKCAQEAuiHjkWRkhIWdhiRPqdE/rYIbcYsMkDSIZrlu0yzicGBxyNOH
+
+    qwzW48zQr5y/q4HaaPC5HB4LGylnEnyM4vSII+kiT9E8T1sr/XNi9GqYonPVEDUu
+
+    aE8EmZ1RnVppTqpkbGpRnF6GmBtGSdthM3pYt97/UeaviFJye3G3tz47pYwXND4j
+
+    6maElcSoUEntpHkJ5esBy+G3yr2Y9mF9EOV6ZNxN1jIc2ufxFTQruhqxx8ug1EWF
+
+    9JlEByfML6gwHq3FgSz2MHWX+to+LRJALv5KY4UAworAPzafY4/557c6ggqvvakk
+
+    wqju59z5QWqBV8Vu+30VTdbQd8xsnYlPdAUziwIDAQABAoIBAAkF5YtBy6Gk/Irp
+
+    Lbd0vlqB6SSq8fP3p/ka2iWAkoWSF1H99yCyoaQvZJpuxdfhlp3ptX+IcU9ixW1e
+
+    /lFuOz9KaPBnmz3ZlJtXwZ9Jn5aYeOatPU+vLPDpcHBpVpuKpQ2gZc5nwqyS/ehM
+
+    qzycnLplJAlrXm0EWipjy90lNi0DNV57hpaKKH5Ti/UGQnGTAfpFXf/YMu4PVVFj
+
+    GHx9VyTAX7dM1yRHMIF4/3qFAnbig8erCeRAEeUhLjMs6T1KidgBltyr7dz4mYoX
+
+    3MdgwTL4rnBCFFopN8vwcK+7bkj/6D1g2a6RnExa6ZB4QJBf28iBfn+i7qoNvt+1
+
+    iwgo9DECgYEA3IpFCAOMYiHagRrv3jTT0qKgCYBdt1La2Sjj7hCZUYJBp1it5zjr
+
+    VbT2J4xH38fr8OflEwcdpJnOVPKQA3AgRrixF8wG5WxIaDxBruOYE6QoqWSwnNYJ
+
+    egcKIDHbZH0C9y+P45IGbHEUEKJIdtSHo6uJYEQ+JSJdlR/BuTQDCFkCgYEA2A9Z
+
+    oxu7gwc1y01Y0FskU0guLm1XyDQEUf7uyFOMu7Fou/Nb4mKLVH5WmM5MQSBt77ow
+
+    yBOZliT7ucSzQYkZ7xENAJ4QNzLAsQ9nBOgAjpnSCM6KZXdklntdl0yc5vy3CrkK
+
+    QxcRURJdvrCNh+yGON7gh2tN3hmHldz9mIe5noMCgYBZL5WBCyZPXQ9nHISaYNS0
+
+    ns2HaiD8DQaxwUaDtLS78KgqS8Nv+WW6MEdHcQRz6/5vWugZjMwhD44QblNtAIAc
+
+    +X2sePbk+qhBdvS9DA8pCj7jWfPOSQRmE8i1glQNzDxRyCsxRZFRc11A6M/TNllw
+
+    B+OzSYW0MLSVpNUL14vOaQKBgQCx8IJsOKzHjqqZEsxwXH+gLzQJfHftJBnixcg2
+
+    J8kh00xkblpgKbSCE8KR+mUGxgSEiJ0gSjQVWcQPDJZtBNLc9vf0aDkGnL/hksPP
+
+    YJsE1l4Kbr/ALQIkhNlhf/FAsyS0qgxvkJHsaOnX4GPPa9ZnA/d6z77DidcGb4hT
+
+    lIQkrQKBgQCMKuX59yLIOEHgiaBNnM1/nunSyinlTbEU7mJuYKPdScJ8qi/CouN8
+
+    UBeSH9OEKUDqxj1V8BeHKCPcHrLRwTsYONpR+6VQ2n0Y7UUHwz4ZY+ljAUHhD/4B
+
+    d6GOUmhxa7PCcr2g4RwsGyDwvwm7fyQb0cCPW0aLeiLeVsdUBd5gbg==
+
+    -----END RSA PRIVATE KEY-----'
+  octavia_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC6IeORZGSEhZ2GJE+p0T+tghtxiwyQNIhmuW7TLOJwYHHI04erDNbjzNCvnL+rgdpo8LkcHgsbKWcSfIzi9Igj6SJP0TxPWyv9c2L0apiic9UQNS5oTwSZnVGdWmlOqmRsalGcXoaYG0ZJ22Ezeli33v9R5q+IUnJ7cbe3PjuljBc0PiPqZoSVxKhQSe2keQnl6wHL4bfKvZj2YX0Q5Xpk3E3WMhza5/EVNCu6GrHHy6DURYX0mUQHJ8wvqDAercWBLPYwdZf62j4tEkAu/kpjhQDCisA/Np9jj/nntzqCCq+9qSTCqO7n3PlBaoFXxW77fRVN1tB3zGydiU90BTOL
+  offline_deployment: 'False'
+  opencontrail_compute_iface_mask: '24'
+  opencontrail_enabled: 'False'
+  openscap_enabled: 'False'
+  openssh_groups: ''
+  openstack_benchmark_node01_address: 10.60.0.95
+  openstack_benchmark_node01_hostname: bmk01
+  openstack_cluster_size: compact
+  openstack_compute_control_address_ranges: 10.60.0.101-10.60.0.200
+  openstack_compute_count: '100'
+  openstack_compute_rack01_hostname: cmp
+  openstack_compute_tenant_address_ranges: 10.80.0.101-10.80.0.200
+  openstack_control_address: 10.60.0.10
+  openstack_control_hostname: ctl
+  openstack_control_node01_address: 10.60.0.11
+  openstack_control_node01_hostname: ctl01
+  openstack_control_node02_address: 10.60.0.12
+  openstack_control_node02_hostname: ctl02
+  openstack_control_node03_address: 10.60.0.13
+  openstack_control_node03_hostname: ctl03
+  openstack_database_address: 10.60.0.10
+  openstack_database_hostname: ctl
+  openstack_database_node01_address: 10.60.0.11
+  openstack_database_node01_hostname: ctl01
+  openstack_database_node02_address: 10.60.0.12
+  openstack_database_node02_hostname: ctl02
+  openstack_database_node03_address: 10.60.0.13
+  openstack_database_node03_hostname: ctl03
+  openstack_enabled: 'True'
+  openstack_gateway_node01_address: 10.60.0.224
+  openstack_gateway_node01_hostname: gtw01
+  openstack_gateway_node01_tenant_address: 10.80.0.6
+  openstack_gateway_node02_address: 10.60.0.225
+  openstack_gateway_node02_hostname: gtw02
+  openstack_gateway_node02_tenant_address: 10.80.0.7
+  openstack_gateway_node03_address: 10.60.0.226
+  openstack_gateway_node03_hostname: gtw03
+  openstack_gateway_node03_tenant_address: 10.80.0.8
+  openstack_internal_protocol: http
+  openstack_memcache_security_enabled: 'False'
+  openstack_message_queue_address: 10.60.0.10
+  openstack_message_queue_hostname: ctl
+  openstack_message_queue_node01_address: 10.60.0.11
+  openstack_message_queue_node01_hostname: ctl01
+  openstack_message_queue_node02_address: 10.60.0.12
+  openstack_message_queue_node02_hostname: ctl02
+  openstack_message_queue_node03_address: 10.60.0.13
+  openstack_message_queue_node03_hostname: ctl03
+  openstack_network_engine: ovs
+  openstack_neutron_bgp_vpn: 'False'
+  openstack_neutron_bgp_vpn_driver: bagpipe
+  openstack_neutron_qos: 'False'
+  openstack_neutron_vlan_aware_vms: 'False'
+  openstack_nfv_dpdk_enabled: 'False'
+  openstack_nfv_sriov_enabled: 'False'
+  openstack_nova_compute_nfv_req_enabled: 'False'
+  openstack_nova_compute_reserved_host_memory_mb: '900'
+  openstack_octavia_enabled: 'False'
+  openstack_ovs_dvr_enabled: 'False'
+  openstack_ovs_encapsulation_type: vxlan
+  openstack_proxy_address: 10.60.0.80
+  openstack_proxy_hostname: prx
+  openstack_proxy_node01_address: 10.60.0.81
+  openstack_proxy_node01_hostname: prx01
+  openstack_proxy_node02_address: 10.60.0.82
+  openstack_proxy_node02_hostname: prx02
+  openstack_upgrade_node01_address: 10.60.0.19
+  openstack_version: queens
+  platform: openstack_enabled
+  publication_method: email
+  rabbitmq_ssl_enabled: 'False'
+  reclass_repository: https://github.com/Mirantis/mk-lab-salt-model.git
+  salt_api_password: 8UWALgLbADCp18pAj8hCIzJE2ZWPXgK7E23n8W44ji0A
+  salt_api_password_hash: $6$FaHYNcaGIBJF$n4hvLCOhR0/IrbPVgWCfddYilsmXF8T1hj38VJ2auL5Y8DdY2TG2/wc6KNCivYe8uQ68L0keoDfcu1eio.WbS1
+  salt_master_address: 10.60.0.15
+  salt_master_hostname: cfg01
+  salt_master_management_address: 10.70.0.15
+  secrets_encryption_enabled: 'False'
+  secrets_encryption_private_key: '1'
+  sf_notifications_enabled: 'False'
+  shared_reclass_branch: ''
+  shared_reclass_url: https://github.com/Mirantis/reclass-system-salt-model.git
+  sriov_network_subnet: 10.55.0.0/16
+  stacklight_enabled: 'False'
+  static_ips_on_deploy_network_enabled: 'False'
+  tenant_network_gateway: 10.80.0.1
+  tenant_network_netmask: 255.255.255.0
+  tenant_network_subnet: 10.80.0.0/24
+  tenant_telemetry_enabled: 'False'
+  tenant_vlan: '20'
+  upstream_proxy_enabled: 'False'
+  use_default_network_scheme: 'True'
+  vnf_onboarding_enabled: 'False'
+  xtrabackup_client_throttle: '0'
diff --git a/tcp_tests/templates/cookied-small-mcp-ironic/environment-context.yaml b/tcp_tests/templates/cookied-small-mcp-ironic/environment-context.yaml
new file mode 100644
index 0000000..fb103d7
--- /dev/null
+++ b/tcp_tests/templates/cookied-small-mcp-ironic/environment-context.yaml
@@ -0,0 +1,105 @@
+nodes:
+    cfg01:
+      reclass_storage_name: infra_config_node01
+      roles:
+      - infra_config
+      - linux_system_codename_xenial
+      - features_runtest
+      classes:
+      - environment.cookied-small-mcp-ironic.override_ntp_virtual
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:
+          role: single_static_ctl
+
+    ctl01:
+      reclass_storage_name: openstack_control_node01
+      roles:
+      - infra_kvm
+      - openstack_control_leader
+      - openstack_database_leader
+      - openstack_message_queue
+      - features_lvm_backend_control
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:
+          role: single_ctl
+
+    ctl02:
+      reclass_storage_name: openstack_control_node02
+      roles:
+      - infra_kvm
+      - openstack_control
+      - openstack_database
+      - openstack_message_queue
+      - features_lvm_backend_control
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:
+          role: single_ctl
+
+    ctl03:
+      reclass_storage_name: openstack_control_node03
+      roles:
+      - infra_kvm
+      - openstack_control
+      - openstack_database
+      - openstack_message_queue
+      - features_lvm_backend_control
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:
+          role: single_ctl
+
+    prx01:
+      reclass_storage_name: openstack_proxy_node01
+      roles:
+      - openstack_proxy  # another VIP interface used
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:
+          role: single_ctl
+        ens5:
+          role: single_external
+          external_address: 172.17.16.121
+          external_network_netmask: 255.255.255.0
+
+    gtw01:
+      reclass_storage_name: openstack_gateway_node01
+      roles:
+      - linux_system_codename_xenial
+      classes:
+      - system.linux.system.repo.mcp.apt_mirantis.docker
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:
+          role: single_ctl
+        ens5:
+          role: bond0_ab_ovs_vxlan_mesh
+        ens6:
+          role: bond1_ab_ovs_floating
+        ens7:
+          role: single_ovs_br_baremetal
+
+    bmt01:
+      reclass_storage_name: openstack_baremetal_node01
+      roles:
+      - openstack_baremetal
+      - linux_system_codename_xenial
+      interfaces:
+        ens3:
+          role: single_dhcp
+        ens4:
+          role: single_ctl
+        ens7:
+          role: single_baremetal
diff --git a/tcp_tests/templates/cookied-small-mcp-ironic/salt.yaml b/tcp_tests/templates/cookied-small-mcp-ironic/salt.yaml
new file mode 100644
index 0000000..b82e26c
--- /dev/null
+++ b/tcp_tests/templates/cookied-small-mcp-ironic/salt.yaml
@@ -0,0 +1,26 @@
+{% from 'cookied-small-mcp-ironic/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% from 'cookied-small-mcp-ironic/underlay.yaml' import LAB_CONFIG_NAME with context %}
+{% from 'cookied-small-mcp-ironic/underlay.yaml' import DOMAIN_NAME with context %}
+
+{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.com/salt-models/mcp-virtual-lab') %}
+# Other salt model repository parameters see in shared-salt.yaml
+
+#- description: "Workaround for using glusterfs on single node"
+#  cmd: |
+#    set -e;
+#    find /srv/salt/reclass/classes/system/glusterfs/ -type f -exec sed -i 's/replica: .*//g' {} +
+#    find /srv/salt/reclass/classes/system/glusterfs/ -type f -exec sed -i 's/.*cluster_node02_address.*//g' {} +
+#    find /srv/salt/reclass/classes/system/glusterfs/ -type f -exec sed -i 's/.*cluster_node03_address.*//g' {} +
+#    find /srv/salt/reclass/classes/system/glusterfs/ -type f -exec sed -i 's/opts: .*/opts: \"defaults,backup-volfile-servers=${_param:glusterfs_node01_address}\"/g' {} +
+#
+#  node_name: {{ HOSTNAME_CFG01 }}
+#  retry: {count: 1, delay: 5}
+#  skip_fail: false
+
+{% import 'shared-salt.yaml' as SHARED with context %}
+
+{{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
+
+{{SHARED.MACRO_CHECK_SALT_VERSION_SERVICES_ON_CFG()}}
+
+{{SHARED.MACRO_CHECK_SALT_VERSION_ON_NODES()}}
diff --git a/tcp_tests/templates/cookied-small-mcp-ironic/underlay--meta-data.yaml b/tcp_tests/templates/cookied-small-mcp-ironic/underlay--meta-data.yaml
new file mode 100644
index 0000000..3699401
--- /dev/null
+++ b/tcp_tests/templates/cookied-small-mcp-ironic/underlay--meta-data.yaml
@@ -0,0 +1,4 @@
+| # All the data below will be stored as a string object
+  instance-id: iid-local1
+  hostname: {hostname}
+  local-hostname: {hostname}
diff --git a/tcp_tests/templates/cookied-small-mcp-ironic/underlay--user-data1604-swp.yaml b/tcp_tests/templates/cookied-small-mcp-ironic/underlay--user-data1604-swp.yaml
new file mode 100644
index 0000000..81936a4
--- /dev/null
+++ b/tcp_tests/templates/cookied-small-mcp-ironic/underlay--user-data1604-swp.yaml
@@ -0,0 +1,84 @@
+| # All the data below will be stored as a string object
+  #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
+
+  ssh_pwauth: True
+  users:
+   - name: root
+     sudo: ALL=(ALL) NOPASSWD:ALL
+     shell: /bin/bash
+     ssh_authorized_keys:
+     {% for key in config.underlay.ssh_keys %}
+      - ssh-rsa {{ key['public'] }}
+     {% endfor %}
+
+  disable_root: false
+  chpasswd:
+   list: |
+    root:r00tme
+   expire: False
+
+  bootcmd:
+   # Enable root access
+   - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
+   - service sshd restart
+  output:
+    all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
+
+  runcmd:
+   - if lvs vg0; then pvresize /dev/vda3; fi
+   - if lvs vg0; then /usr/bin/growlvm.py --image-layout-file /usr/share/growlvm/image-layout.yml; fi
+
+   - export TERM=linux
+   - export LANG=C
+   # Configure dhclient
+   - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
+   - sudo resolvconf -u
+
+   # Enable grub menu using updated config below
+   - update-grub
+
+   # Prepare network connection
+   - sudo ifup ens3
+   #- sudo route add default gw {gateway} {interface_name}
+
+   # Create swap
+   - fallocate -l 16G /swapfile
+   - chmod 600 /swapfile
+   - mkswap /swapfile
+   - swapon /swapfile
+   - echo "/swapfile   none    swap    defaults   0   0" >> /etc/fstab
+
+  write_files:
+   - path: /etc/default/grub.d/97-enable-grub-menu.cfg
+     content: |
+         GRUB_RECORDFAIL_TIMEOUT=30
+         GRUB_TIMEOUT=3
+         GRUB_TIMEOUT_STYLE=menu
+
+   - path: /etc/network/interfaces
+     content: |
+          auto ens3
+          iface ens3 inet dhcp
+
+   - path: /usr/share/growlvm/image-layout.yml
+     content: |
+       root:
+         size: '30%VG'
+       home:
+         size: '1G'
+       var_log:
+         size: '11%VG'
+       var_log_audit:
+         size: '5G'
+       var_tmp:
+         size: '11%VG'
+       tmp:
+         size: '5G'
+     owner: root:root
+
+  growpart:
+    mode: auto
+    devices:
+      - '/'
+      - '/dev/vda3'
+    ignore_growroot_disabled: false
diff --git a/tcp_tests/templates/cookied-small-mcp-ironic/underlay.yaml b/tcp_tests/templates/cookied-small-mcp-ironic/underlay.yaml
new file mode 100644
index 0000000..0311867
--- /dev/null
+++ b/tcp_tests/templates/cookied-small-mcp-ironic/underlay.yaml
@@ -0,0 +1,397 @@
+# Set the repository suite, one of the: 'nightly', 'testing', 'stable', or any other required
+{% set REPOSITORY_SUITE = os_env('REPOSITORY_SUITE', 'testing') %}
+
+{% import 'cookied-small-mcp-ironic/underlay--meta-data.yaml' as CLOUDINIT_META_DATA with context %}
+{% import 'cookied-small-mcp-ironic/underlay--user-data1604-swp.yaml' as CLOUDINIT_USER_DATA_1604_SWP with context %}
+
+---
+aliases:
+ - &interface_model {{ os_env('INTERFACE_MODEL', 'virtio') }}
+ - &cloudinit_meta_data {{ CLOUDINIT_META_DATA }}
+ - &cloudinit_user_data_1604_swp {{ CLOUDINIT_USER_DATA_1604_SWP }}
+
+{% set LAB_CONFIG_NAME = os_env('LAB_CONFIG_NAME', 'cookied-small-mcp-ironic') %}
+{% set DOMAIN_NAME = os_env('DOMAIN_NAME', LAB_CONFIG_NAME) + '.local' %}
+{% set HOSTNAME_CFG01 = os_env('HOSTNAME_CFG01', 'cfg01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CTL01 = os_env('HOSTNAME_CTL01', 'ctl01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CTL02 = os_env('HOSTNAME_CTL02', 'ctl02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CTL03 = os_env('HOSTNAME_CTL03', 'ctl03.' + DOMAIN_NAME) %}
+{% set HOSTNAME_GTW01 = os_env('HOSTNAME_GTW01', 'gtw01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_PRX01 = os_env('HOSTNAME_PRX01', 'prx01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_BMT01 = os_env('HOSTNAME_BMT01', 'bmt01.' + DOMAIN_NAME) %}
+
+template:
+  devops_settings:
+    env_name: {{ os_env('ENV_NAME', 'cookied-small-mcp-ironic_' + REPOSITORY_SUITE + "_" + os_env('BUILD_NUMBER', '')) }}
+
+    address_pools:
+      private-pool01:
+        net: {{ os_env('PRIVATE_ADDRESS_POOL01', '10.60.0.0/16:24') }}
+        params:
+          ip_reserved:
+            gateway: +1
+            l2_network_device: +1
+            default_{{ HOSTNAME_CFG01 }}: +15
+            default_{{ HOSTNAME_CTL01 }}: +101
+            default_{{ HOSTNAME_CTL02 }}: +102
+            default_{{ HOSTNAME_CTL03 }}: +103
+            default_{{ HOSTNAME_GTW01 }}: +110
+            default_{{ HOSTNAME_PRX01 }}: +121
+            default_{{ HOSTNAME_BMT01 }}: +76
+          ip_ranges:
+            dhcp: [+90, -10]
+
+      admin-pool01:
+        net: {{ os_env('ADMIN_ADDRESS_POOL01', '10.70.0.0/16:24') }}
+        params:
+          ip_reserved:
+            gateway: +1
+            l2_network_device: +1
+            default_{{ HOSTNAME_CFG01 }}: +15
+            default_{{ HOSTNAME_CTL01 }}: +101
+            default_{{ HOSTNAME_CTL02 }}: +102
+            default_{{ HOSTNAME_CTL03 }}: +103
+            default_{{ HOSTNAME_GTW01 }}: +110
+            default_{{ HOSTNAME_PRX01 }}: +121
+            default_{{ HOSTNAME_BMT01 }}: +76
+          ip_ranges:
+            dhcp: [+90, -10]
+
+      tenant-pool01:
+        net: {{ os_env('TENANT_ADDRESS_POOL01', '10.80.0.0/16:24') }}
+        params:
+          ip_reserved:
+            gateway: +1
+            l2_network_device: +1
+            default_{{ HOSTNAME_CFG01 }}: +15
+            default_{{ HOSTNAME_CTL01 }}: +101
+            default_{{ HOSTNAME_CTL02 }}: +102
+            default_{{ HOSTNAME_CTL03 }}: +103
+            default_{{ HOSTNAME_GTW01 }}: +110
+            default_{{ HOSTNAME_PRX01 }}: +121
+          ip_ranges:
+            dhcp: [+10, -10]
+
+      external-pool01:
+        net: {{ os_env('EXTERNAL_ADDRESS_POOL01', '10.90.0.0/16:24') }}
+        params:
+          ip_reserved:
+            gateway: +1
+            l2_network_device: +1
+            default_{{ HOSTNAME_CFG01 }}: +15
+            default_{{ HOSTNAME_CTL01 }}: +101
+            default_{{ HOSTNAME_CTL02 }}: +102
+            default_{{ HOSTNAME_CTL03 }}: +103
+            default_{{ HOSTNAME_GTW01 }}: +110
+            default_{{ HOSTNAME_PRX01 }}: +121
+          ip_ranges:
+            dhcp: [+180, +220]
+
+    groups:
+      - name: default
+        driver:
+          name: devops.driver.libvirt
+          params:
+            connection_string: !os_env CONNECTION_STRING, qemu:///system
+            storage_pool_name: !os_env STORAGE_POOL_NAME, default
+            stp: False
+            hpet: False
+            enable_acpi: true
+            use_host_cpu: !os_env DRIVER_USE_HOST_CPU, true
+            use_hugepages: !os_env DRIVER_USE_HUGEPAGES, false
+
+        network_pools:
+          admin: admin-pool01
+          private: private-pool01
+          tenant: tenant-pool01
+          external: external-pool01
+
+        l2_network_devices:
+          private:
+            address_pool: private-pool01
+            dhcp: false
+            forward:
+              mode: route
+
+          admin:
+            address_pool: admin-pool01
+            dhcp: true
+            forward:
+              mode: nat
+
+          tenant:
+            address_pool: tenant-pool01
+            dhcp: false
+
+          external:
+            address_pool: external-pool01
+            dhcp: false
+            forward:
+              mode: route
+
+          baremetal:
+            parent_iface:
+              phys_dev: {{ os_env('IFACE_IRONIC', 'veth-contdpdk1') }}
+
+
+        group_volumes:
+         - name: cloudimage1604    # This name is used for 'backing_store' option for node volumes.
+           source_image: {{ os_env('MCP_IMAGE_PATH1604') }}  # http://ci.mcp.mirantis.net:8085/images/ubuntu-16-04-x64-mcpproposed.qcow2
+           format: qcow2
+         - name: cfg01_day01_image               # Pre-configured day01 image
+           source_image: {{ os_env('IMAGE_PATH_CFG01_DAY01') }} # http://images.mirantis.com/cfg01-day01.qcow2 or fallback to IMAGE_PATH1604
+           format: qcow2
+         - name: mcp_ubuntu_1604_image           # Pre-configured image for control plane
+           source_image: !os_env MCP_IMAGE_PATH1604
+           format: qcow2
+
+        nodes:
+          - name: {{ HOSTNAME_CFG01 }}
+            role: salt_master
+            params:
+              vcpu: {{ os_env('CFG_NODE_CPU', 3) }}
+              memory: {{ os_env('CFG_NODE_MEMORY', 8192) }}
+              boot:
+                - hd
+              volumes:
+                - name: system
+                  capacity: {{ os_env('CFG_NODE_VOLUME_SIZE', 150) }}
+                  backing_store: cfg01_day01_image
+                  format: qcow2
+                - name: config
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  # source_image: !os_env CFG01_CONFIG_PATH # no source image required.
+                                                            # it will be uploaded after config drive generation
+              interfaces:
+                - label: ens3
+                  l2_network_device: admin
+                  interface_model: *interface_model
+                - label: ens4
+                  l2_network_device: private
+                  interface_model: *interface_model
+              network_config:
+                ens3:
+                  networks:
+                    - admin
+                ens4:
+                  networks:
+                    - private
+
+          - name: {{ HOSTNAME_CTL01 }}
+            role: salt_minion
+            params:
+              vcpu: !os_env SLAVE_NODE_CPU, 2
+              memory: !os_env SLAVE_NODE_MEMORY, 12288
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 150
+                  backing_store: mcp_ubuntu_1604_image
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_1604_swp
+
+              interfaces: &interfaces
+                - label: ens3
+                  l2_network_device: admin
+                  interface_model: *interface_model
+                - label: ens4
+                  l2_network_device: private
+                  interface_model: *interface_model
+              network_config: &network_config
+                ens3:
+                  networks:
+                    - admin
+                ens4:
+                  networks:
+                    - private
+
+          - name: {{ HOSTNAME_CTL02 }}
+            role: salt_minion
+            params:
+              vcpu: !os_env SLAVE_NODE_CPU, 2
+              memory: !os_env SLAVE_NODE_MEMORY, 12288
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 150
+                  backing_store: mcp_ubuntu_1604_image
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_1604_swp
+
+              interfaces: *interfaces
+              network_config: *network_config
+
+          - name: {{ HOSTNAME_CTL03 }}
+            role: salt_minion
+            params:
+              vcpu: !os_env SLAVE_NODE_CPU, 2
+              memory: !os_env SLAVE_NODE_MEMORY, 12288
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 150
+                  backing_store: mcp_ubuntu_1604_image
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_1604_swp
+
+              interfaces: *interfaces
+              network_config: *network_config
+
+          - name: {{ HOSTNAME_PRX01 }}
+            role: salt_minion
+            params:
+              vcpu: !os_env SLAVE_NODE_CPU, 1
+              memory: !os_env SLAVE_NODE_MEMORY, 2048
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 150
+                  backing_store: mcp_ubuntu_1604_image
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_1604_swp
+
+              interfaces:
+                - label: ens3
+                  l2_network_device: admin
+                  interface_model: *interface_model
+                - label: ens4
+                  l2_network_device: private
+                  interface_model: *interface_model
+                - label: ens5
+                  l2_network_device: external
+                  interface_model: *interface_model
+              network_config:
+                ens3:
+                  networks:
+                    - admin
+                ens4:
+                  networks:
+                    - private
+                ens5:
+                  networks:
+                    - external
+
+          - name: {{ HOSTNAME_GTW01 }}
+            role: salt_minion
+            params:
+              vcpu: !os_env SLAVE_NODE_CPU, 2
+              memory: !os_env SLAVE_NODE_MEMORY, 2048
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 150
+                  backing_store: cloudimage1604
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_1604_swp
+
+              interfaces: &all_gtw_interfaces
+                - label: ens3
+                  l2_network_device: admin
+                  interface_model: *interface_model
+                - label: ens4
+                  l2_network_device: private
+                  interface_model: *interface_model
+                - label: ens5
+                  l2_network_device: tenant
+                  interface_model: *interface_model
+                - label: ens6
+                  l2_network_device: external
+                  interface_model: *interface_model
+                - label: ens7
+                  l2_network_device: baremetal
+                  interface_model: *interface_model
+              network_config: &all_gtw_network_config
+                ens3:
+                  networks:
+                    - admin
+                ens4:
+                  networks:
+                    - private
+                ens5:
+                  networks:
+                    - tenant
+                ens6:
+                  networks:
+                    - external
+                ens7:
+                  networks:
+                    - baremetal
+
+          - name: {{ HOSTNAME_BMT01 }}
+            role: salt_minion
+            params:
+              vcpu: !os_env SLAVE_NODE_CPU, 2
+              memory: !os_env SLAVE_NODE_MEMORY, 4096
+              boot:
+                - hd
+              cloud_init_volume_name: iso
+              cloud_init_iface_up: ens3
+              volumes:
+                - name: system
+                  capacity: !os_env NODE_VOLUME_SIZE, 150
+                  backing_store: cloudimage1604
+                  format: qcow2
+                - name: iso  # Volume with name 'iso' will be used
+                             # for store image with cloud-init metadata.
+                  capacity: 1
+                  format: raw
+                  device: cdrom
+                  bus: ide
+                  cloudinit_meta_data: *cloudinit_meta_data
+                  cloudinit_user_data: *cloudinit_user_data_1604_swp
+
+              interfaces: *all_gtw_interfaces
+              network_config: *all_gtw_network_config