Merge "Remove stacklight_long_term_storage_type from cookiecutter template"
diff --git a/jobs/pipelines/deploy-cicd-and-run-tests.groovy b/jobs/pipelines/deploy-cicd-and-run-tests.groovy
index 55dda48..8d356e4 100644
--- a/jobs/pipelines/deploy-cicd-and-run-tests.groovy
+++ b/jobs/pipelines/deploy-cicd-and-run-tests.groovy
@@ -92,8 +92,18 @@
                 dos.py destroy ${ENV_NAME} || true
             """)
         }
-        // report results to testrail
-        shared.swarm_testrail_report(steps)
+
+        stage("Archive all xml reports") {
+            archiveArtifacts artifacts: "**/*.xml,**/*.ini,**/*.log,**/*.tar.gz"
+        }
+        stage("report results to testrail") {
+            shared.swarm_testrail_report(steps)
+        }
+        stage("Store TestRail reports to job description") {
+            def String description = readFile("description.txt")
+            currentBuild.description += "\n${description}"
+        }
+
     }
   }
 }
\ No newline at end of file
diff --git a/jobs/pipelines/swarm-bootstrap-salt-cluster-devops.groovy b/jobs/pipelines/swarm-bootstrap-salt-cluster-devops.groovy
index 64c8783..570f47f 100644
--- a/jobs/pipelines/swarm-bootstrap-salt-cluster-devops.groovy
+++ b/jobs/pipelines/swarm-bootstrap-salt-cluster-devops.groovy
@@ -38,54 +38,54 @@
         error "'PARENT_WORKSPACE' contains path to non-existing directory ${PARENT_WORKSPACE} on the node '${PARENT_NODE_NAME}'."
     }
     dir("${PARENT_WORKSPACE}") {
+        stage("Cleanup: erase ${ENV_NAME} and remove config drive") {
+            println "Remove environment ${ENV_NAME}"
+            shared.run_cmd("""\
+                dos.py erase ${ENV_NAME} || true
+            """)
+            println "Remove config drive ISO"
+            shared.run_cmd("""\
+                rm /home/jenkins/images/${CFG01_CONFIG_IMAGE_NAME} || true
+            """)
+        }
+
+        if (env.TCP_QA_REFS) {
+            stage("Update working dir to patch ${TCP_QA_REFS}") {
+                shared.update_working_dir()
+            }
+        }
+
+        stage("Create an environment ${ENV_NAME} in disabled state") {
+            // deploy_hardware.xml
+            shared.run_cmd("""\
+                export ENV_NAME=${ENV_NAME}
+                export LAB_CONFIG_NAME=${LAB_CONFIG_NAME}
+                export MANAGER=devops
+                export PYTHONIOENCODING=UTF-8
+                export REPOSITORY_SUITE=${MCP_VERSION}
+                export TEST_GROUP=test_create_environment
+                py.test -vvv -s -p no:django -p no:ipdb --junit-xml=deploy_hardware.xml -k \${TEST_GROUP}
+            """)
+        }
+
+        stage("Generate the model") {
+            shared.generate_cookied_model()
+        }
+
+        stage("Generate config drive ISO") {
+            shared.generate_configdrive_iso()
+        }
+
+        stage("Upload generated config drive ISO into volume on cfg01 node") {
+            shared.run_cmd("""\
+                # Get SALT_MASTER_HOSTNAME to determine the volume name
+                . ./tcp_tests/utils/env_salt
+                virsh vol-upload ${ENV_NAME}_\${SALT_MASTER_HOSTNAME}_config /home/jenkins/images/${CFG01_CONFIG_IMAGE_NAME} --pool default
+                virsh pool-refresh --pool default
+            """)
+        }
+
         try {
-            stage("Cleanup: erase ${ENV_NAME} and remove config drive") {
-                println "Remove environment ${ENV_NAME}"
-                shared.run_cmd("""\
-                    dos.py erase ${ENV_NAME} || true
-                """)
-                println "Remove config drive ISO"
-                shared.run_cmd("""\
-                    rm /home/jenkins/images/${CFG01_CONFIG_IMAGE_NAME} || true
-                """)
-            }
-
-            if (env.TCP_QA_REFS) {
-                stage("Update working dir to patch ${TCP_QA_REFS}") {
-                    shared.update_working_dir()
-                }
-            }
-
-            stage("Create an environment ${ENV_NAME} in disabled state") {
-                // deploy_hardware.xml
-                shared.run_cmd("""\
-                    export ENV_NAME=${ENV_NAME}
-                    export LAB_CONFIG_NAME=${LAB_CONFIG_NAME}
-                    export MANAGER=devops
-                    export PYTHONIOENCODING=UTF-8
-                    export REPOSITORY_SUITE=${MCP_VERSION}
-                    export TEST_GROUP=test_create_environment
-                    py.test -vvv -s -p no:django -p no:ipdb --junit-xml=deploy_hardware.xml -k \${TEST_GROUP}
-                """)
-            }
-
-            stage("Generate the model") {
-                shared.generate_cookied_model()
-            }
-
-            stage("Generate config drive ISO") {
-                shared.generate_configdrive_iso()
-            }
-
-            stage("Upload generated config drive ISO into volume on cfg01 node") {
-                shared.run_cmd("""\
-                    # Get SALT_MASTER_HOSTNAME to determine the volume name
-                    . ./tcp_tests/utils/env_salt
-                    virsh vol-upload ${ENV_NAME}_\${SALT_MASTER_HOSTNAME}_config /home/jenkins/images/${CFG01_CONFIG_IMAGE_NAME} --pool default
-                    virsh pool-refresh --pool default
-                """)
-            }
-
             stage("Run the 'underlay' and 'salt-deployed' fixtures to bootstrap salt cluster") {
                 // deploy_salt.xml
                 shared.run_cmd("""\
@@ -103,7 +103,8 @@
             }
 
           } catch (e) {
-              common.printMsg("Job is failed", "purple")
+              common.printMsg("Saltstack cluster deploy is failed", "purple")
+              shared.download_logs("deploy_salt")
               throw e
           } finally {
             // TODO(ddmitriev): analyze the "def currentResult = currentBuild.result ?: 'SUCCESS'"
diff --git a/jobs/pipelines/swarm-deploy-cicd.groovy b/jobs/pipelines/swarm-deploy-cicd.groovy
index 5ace2ca..7d7fd63 100644
--- a/jobs/pipelines/swarm-deploy-cicd.groovy
+++ b/jobs/pipelines/swarm-deploy-cicd.groovy
@@ -29,29 +29,30 @@
         error "'PARENT_WORKSPACE' contains path to non-existing directory ${PARENT_WORKSPACE} on the node '${PARENT_NODE_NAME}'."
     }
     dir("${PARENT_WORKSPACE}") {
-        try {
 
-            if (! env.STACK_INSTALL) {
-                error "'STACK_INSTALL' must contain one or more comma separated stack names for [deploy_openstack] pipeline"
+        if (! env.STACK_INSTALL) {
+            error "'STACK_INSTALL' must contain one or more comma separated stack names for [deploy_openstack] pipeline"
+        }
+
+        if (env.TCP_QA_REFS) {
+            stage("Update working dir to patch ${TCP_QA_REFS}") {
+                shared.update_working_dir()
+            }
+        }
+
+        // Install core and cicd
+        def stack
+        def timeout
+
+        for (element in "${env.STACK_INSTALL}".split(",")) {
+            if (element.contains(':')) {
+                (stack, timeout) = element.split(':')
+            } else {
+                stack = element
+                timeout = '1800'
             }
 
-            if (env.TCP_QA_REFS) {
-                stage("Update working dir to patch ${TCP_QA_REFS}") {
-                    shared.update_working_dir()
-                }
-            }
-
-            // Install core and cicd
-            def stack
-            def timeout
-
-            for (element in "${env.STACK_INSTALL}".split(",")) {
-                if (element.contains(':')) {
-                    (stack, timeout) = element.split(':')
-                } else {
-                    stack = element
-                    timeout = '1800'
-                }
+            try {
                 stage("Run Jenkins job on salt-master [deploy_openstack:${stack}]") {
                     shared.run_job_on_day01_node(stack, timeout)
                 }
@@ -60,23 +61,25 @@
                     shared.sanity_check_component(stack)
                 }
 
-                stage("Make environment snapshot [${stack}_deployed]") {
-                    shared.devops_snapshot(stack)
+            } catch (e) {
+                common.printMsg("Job is failed", "purple")
+                shared.download_logs("deploy_${stack}")
+                throw e
+            } finally {
+                // TODO(ddmitriev): analyze the "def currentResult = currentBuild.result ?: 'SUCCESS'"
+                // and report appropriate data to TestRail
+                // TODO(ddmitriev): add checks for cicd cluster
+                if ("${env.SHUTDOWN_ENV_ON_TEARDOWN}" == "true") {
+                    shared.run_cmd("""\
+                        dos.py destroy ${ENV_NAME}
+                    """)
                 }
             }
 
-        } catch (e) {
-            common.printMsg("Job is failed", "purple")
-            throw e
-        } finally {
-            // TODO(ddmitriev): analyze the "def currentResult = currentBuild.result ?: 'SUCCESS'"
-            // and report appropriate data to TestRail
-            // TODO(ddmitriev): add checks for cicd cluster
-            if ("${env.SHUTDOWN_ENV_ON_TEARDOWN}" == "true") {
-                shared.run_cmd("""\
-                    dos.py destroy ${ENV_NAME}
-                """)
+            stage("Make environment snapshot [${stack}_deployed]") {
+                shared.devops_snapshot(stack)
             }
-        }
-    }
-}
+
+        } // for
+    } // dir
+} // node
diff --git a/jobs/pipelines/swarm-deploy-platform.groovy b/jobs/pipelines/swarm-deploy-platform.groovy
index 9a6b1d1..42ebc7e 100644
--- a/jobs/pipelines/swarm-deploy-platform.groovy
+++ b/jobs/pipelines/swarm-deploy-platform.groovy
@@ -29,29 +29,31 @@
         error "'PARENT_WORKSPACE' contains path to non-existing directory ${PARENT_WORKSPACE} on the node '${PARENT_NODE_NAME}'."
     }
     dir("${PARENT_WORKSPACE}") {
-        try {
 
-            if (! env.STACK_INSTALL) {
-                error "'STACK_INSTALL' must contain one or more comma separated stack names for [deploy_openstack] pipeline"
+        if (! env.STACK_INSTALL) {
+            error "'STACK_INSTALL' must contain one or more comma separated stack names for [deploy_openstack] pipeline"
+        }
+
+        if (env.TCP_QA_REFS) {
+            stage("Update working dir to patch ${TCP_QA_REFS}") {
+                shared.update_working_dir()
+            }
+        }
+
+        // Install the cluster
+        def stack
+        def timeout
+
+        for (element in "${STACK_INSTALL}".split(",")) {
+            if (element.contains(':')) {
+                (stack, timeout) = element.split(':')
+            } else {
+                stack = element
+                timeout = '1800'
             }
 
-            if (env.TCP_QA_REFS) {
-                stage("Update working dir to patch ${TCP_QA_REFS}") {
-                    shared.update_working_dir()
-                }
-            }
+            try {
 
-            // Install the cluster
-            def stack
-            def timeout
-
-            for (element in "${STACK_INSTALL}".split(",")) {
-                if (element.contains(':')) {
-                    (stack, timeout) = element.split(':')
-                } else {
-                    stack = element
-                    timeout = '1800'
-                }
                 stage("Run Jenkins job on CICD [deploy_openstack:${stack}]") {
                     shared.run_job_on_cicd_nodes(stack, timeout)
                 }
@@ -60,23 +62,25 @@
                     shared.sanity_check_component(stack)
                 }
 
-                stage("Make environment snapshot [${stack}_deployed]") {
-                    shared.devops_snapshot(stack)
+            } catch (e) {
+                common.printMsg("Job is failed", "purple")
+                shared.download_logs("deploy_${stack}")
+                throw e
+            } finally {
+                // TODO(ddmitriev): analyze the "def currentResult = currentBuild.result ?: 'SUCCESS'"
+                // and report appropriate data to TestRail
+                // TODO(ddmitriev): add checks for the installed stacks
+                if ("${env.SHUTDOWN_ENV_ON_TEARDOWN}" == "true") {
+                    shared.run_cmd("""\
+                        dos.py destroy ${ENV_NAME}
+                    """)
                 }
             }
 
-        } catch (e) {
-            common.printMsg("Job is failed", "purple")
-            throw e
-        } finally {
-            // TODO(ddmitriev): analyze the "def currentResult = currentBuild.result ?: 'SUCCESS'"
-            // and report appropriate data to TestRail
-            // TODO(ddmitriev): add checks for the installed stacks
-            if ("${env.SHUTDOWN_ENV_ON_TEARDOWN}" == "true") {
-                shared.run_cmd("""\
-                    dos.py destroy ${ENV_NAME}
-                """)
+            stage("Make environment snapshot [${stack}_deployed]") {
+                shared.devops_snapshot(stack)
             }
-        }
-    }
-}
+
+        } // for
+    } // dir
+} // node
diff --git a/jobs/pipelines/swarm-run-pytest.groovy b/jobs/pipelines/swarm-run-pytest.groovy
index 0dd2d7a..bc411f7 100644
--- a/jobs/pipelines/swarm-run-pytest.groovy
+++ b/jobs/pipelines/swarm-run-pytest.groovy
@@ -72,6 +72,7 @@
                     """)
 
                 def snapshot_name = "test_completed"
+                shared.download_logs("test_completed")
                 shared.run_cmd("""\
                     dos.py suspend ${ENV_NAME}
                     dos.py snapshot ${ENV_NAME} ${snapshot_name}
@@ -86,6 +87,9 @@
 
         } catch (e) {
             common.printMsg("Job is failed", "purple")
+            // Downloading logs usually not needed here
+            // because tests should use the decorator @pytest.mark.grab_versions
+            // shared.download_logs("test_failed")
             throw e
         } finally {
             // TODO(ddmitriev): analyze the "def currentResult = currentBuild.result ?: 'SUCCESS'"
diff --git a/jobs/pipelines/swarm-testrail-report.groovy b/jobs/pipelines/swarm-testrail-report.groovy
index c43b3bb..42027f0 100644
--- a/jobs/pipelines/swarm-testrail-report.groovy
+++ b/jobs/pipelines/swarm-testrail-report.groovy
@@ -30,6 +30,7 @@
         error "'PARENT_WORKSPACE' contains path to non-existing directory ${PARENT_WORKSPACE} on the node '${PARENT_NODE_NAME}'."
     }
     dir("${PARENT_WORKSPACE}") {
+        def description = ''
         try {
 
             if (env.TCP_QA_REFS) {
@@ -44,9 +45,8 @@
             def testrail_name_template = ''
             def reporter_extra_options = []
 
-            stage("Archive all xml reports") {
-                archiveArtifacts artifacts: "**/*.xml"
-            }
+            def report_result = ''
+            def report_url = ''
 
             def deployment_report_name = sh(script: "find ${PARENT_WORKSPACE} -name \"deployment_${ENV_NAME}.xml\"", returnStdout: true)
             def tcpqa_report_name = sh(script: "find ${PARENT_WORKSPACE} -name \"nosetests.xml\"", returnStdout: true)
@@ -71,7 +71,14 @@
                       "--testrail-case-custom-fields {\\\"custom_qa_team\\\":\\\"9\\\"}",
                       "--testrail-case-section-name \'All\'",
                     ]
-                    shared.upload_results_to_testrail(deployment_report_name, testSuiteName, methodname, testrail_name_template, reporter_extra_options)
+                    report_result = shared.upload_results_to_testrail(deployment_report_name, testSuiteName, methodname, testrail_name_template, reporter_extra_options)
+                    common.printMsg(report_result, "blue")
+                    report_url = report_result.split("\n").each {
+                        if (it.contains("[TestRun URL]")) {
+                            common.printMsg("Found report URL: " + it.trim().split().last(), "blue")
+                            description += "\n<a href=" + it.trim().split().last() + ">${testSuiteName}</a>"
+                        }
+                    }
                 }
             }
 
@@ -86,7 +93,14 @@
                       "--testrail-case-custom-fields {\\\"custom_qa_team\\\":\\\"9\\\"}",
                       "--testrail-case-section-name \'All\'",
                     ]
-                    shared.upload_results_to_testrail(tcpqa_report_name, testSuiteName, methodname, testrail_name_template, reporter_extra_options)
+                    report_result = shared.upload_results_to_testrail(tcpqa_report_name, testSuiteName, methodname, testrail_name_template, reporter_extra_options)
+                    common.printMsg(report_result, "blue")
+                    report_url = report_result.split("\n").each {
+                        if (it.contains("[TestRun URL]")) {
+                            common.printMsg("Found report URL: " + it.trim().split().last(), "blue")
+                            description += "\n<a href=" + it.trim().split().last() + ">${testSuiteName}</a>"
+                        }
+                    }
                 }
             }
 
@@ -96,7 +110,14 @@
                     testSuiteName = "[MCP1.1_PIKE]Tempest"
                     methodname = "{classname}.{methodname}"
                     testrail_name_template = "{title}"
-                    shared.upload_results_to_testrail(tempest_report_name, testSuiteName, methodname, testrail_name_template)
+                    report_result = shared.upload_results_to_testrail(tempest_report_name, testSuiteName, methodname, testrail_name_template)
+                    common.printMsg(report_result, "blue")
+                    report_url = report_result.split("\n").each {
+                        if (it.contains("[TestRun URL]")) {
+                            common.printMsg("Found report URL: " + it.trim().split().last(), "blue")
+                            description += "\n<a href=" + it.trim().split().last() + ">${testSuiteName}</a>"
+                        }
+                    }
                 }
             }
 
@@ -118,7 +139,14 @@
                       "--testrail-case-custom-fields {\\\"custom_qa_team\\\":\\\"9\\\"}",
                       "--testrail-case-section-name \'Conformance\'",
                     ]
-                    shared.upload_results_to_testrail(k8s_conformance_report_name, testSuiteName, methodname, testrail_name_template, reporter_extra_options)
+                    report_result = shared.upload_results_to_testrail(k8s_conformance_report_name, testSuiteName, methodname, testrail_name_template, reporter_extra_options)
+                    common.printMsg(report_result, "blue")
+                    report_url = report_result.split("\n").each {
+                        if (it.contains("[TestRun URL]")) {
+                            common.printMsg("Found report URL: " + it.trim().split().last(), "blue")
+                            description += "\n<a href=" + it.trim().split().last() + ">${testSuiteName}</a>"
+                        }
+                    }
                 }
             }
 
@@ -128,7 +156,14 @@
                     testSuiteName = "LMA2.0_Automated"
                     methodname = "{methodname}"
                     testrail_name_template = "{title}"
-                    shared.upload_results_to_testrail(stacklight_report_name, testSuiteName, methodname, testrail_name_template)
+                    report_result = shared.upload_results_to_testrail(stacklight_report_name, testSuiteName, methodname, testrail_name_template)
+                    common.printMsg(report_result, "blue")
+                    report_url = report_result.split("\n").each {
+                        if (it.contains("[TestRun URL]")) {
+                            common.printMsg("Found report URL: " + it.trim().split().last(), "blue")
+                            description += "\n<a href=" + it.trim().split().last() + ">${testSuiteName}</a>"
+                        }
+                    }
                 }
             }
 
@@ -137,6 +172,7 @@
             throw e
         } finally {
             // reporting is failed for some reason
+            writeFile(file: "description.txt", text: description, encoding: "UTF-8")
         }
     }
 }
diff --git a/src/com/mirantis/system_qa/SharedPipeline.groovy b/src/com/mirantis/system_qa/SharedPipeline.groovy
index dcf05da..9b5621d 100644
--- a/src/com/mirantis/system_qa/SharedPipeline.groovy
+++ b/src/com/mirantis/system_qa/SharedPipeline.groovy
@@ -396,6 +396,17 @@
     }
 }
 
+def download_logs(archive_name_prefix) {
+    // Archive and download logs and debug info from salt nodes in the lab
+    // Do not fail in case of error to not lose the original error from the parent exception.
+    def common = new com.mirantis.mk.Common()
+    common.printMsg("Downloading nodes logs by ${archive_name_prefix}", "blue")
+    run_cmd("""\
+        export TESTS_CONFIGS=\$(pwd)/${ENV_NAME}_salt_deployed.ini
+        ./tcp_tests/utils/get_logs.py --archive-name-prefix ${archive_name_prefix} || true
+    """)
+}
+
 def devops_snapshot_info(snapshot_name) {
     // Print helper message after snapshot
     def common = new com.mirantis.mk.Common()
@@ -477,6 +488,7 @@
   def testrailProject = "Mirantis Cloud Platform"
   def testPlanName = "[MCP-Q2]System-${MCP_VERSION}-${new Date().format('yyyy-MM-dd')}"
   def testrailMilestone = "MCP1.1"
+  def testrailCaseMaxNameLenght = 250
   def jobURL = env.BUILD_URL
 
   def reporterOptions = [
@@ -493,6 +505,7 @@
     "--xunit-name-template \"${methodname}\"",
     "--testrail-name-template \"${testrail_name_template}\"",
     "--test-results-link \"${jobURL}\"",
+    "--testrail-case-max-name-lenght ${testrailCaseMaxNameLenght}",
   ] + reporter_extra_options
 
   def script = """
@@ -509,7 +522,7 @@
              passwordVariable: 'TESTRAIL_PASSWORD',
              usernameVariable: 'TESTRAIL_USER']
   ]) {
-    return run_cmd(script)
+    return run_cmd_stdout(script)
   }
 }
 
diff --git a/tcp_tests/fixtures/day1_fixtures.py b/tcp_tests/fixtures/day1_fixtures.py
index ff3a0b5..e223a2b 100644
--- a/tcp_tests/fixtures/day1_fixtures.py
+++ b/tcp_tests/fixtures/day1_fixtures.py
@@ -71,10 +71,6 @@
                     "region": {
                         "machines": macs}}}}
 
-        if not config.day1_underlay.lvm:
-            underlay.enable_lvm(hardware.lvm_storages())
-            config.day1_underlay.lvm = underlay.config_lvm
-
         hardware.create_snapshot(ext.SNAPSHOT.day1_underlay)
 
     else:
diff --git a/tcp_tests/fixtures/underlay_fixtures.py b/tcp_tests/fixtures/underlay_fixtures.py
index 65677a9..a3bcea4 100644
--- a/tcp_tests/fixtures/underlay_fixtures.py
+++ b/tcp_tests/fixtures/underlay_fixtures.py
@@ -200,10 +200,6 @@
         LOG.info("Config - {}".format(config))
         underlay_actions.add_config_ssh(config.underlay.ssh)
 
-        if not config.underlay.lvm:
-            underlay_actions.enable_lvm(hardware.lvm_storages())
-            config.underlay.lvm = underlay_actions.config_lvm
-
         hardware.create_snapshot(ext.SNAPSHOT.underlay)
 
         return underlay_actions
@@ -234,10 +230,6 @@
                     "region": {
                         "machines": macs}}}}
 
-        if not config.underlay.lvm:
-            underlay_actions.enable_lvm(hardware.lvm_storages())
-            config.underlay.lvm = underlay_actions.config_lvm
-
         for node in hardware.slave_nodes:
             # For correct comissioning by MaaS nodes should be powered off
             node.destroy()
diff --git a/tcp_tests/helpers/netchecker.py b/tcp_tests/helpers/netchecker.py
index dc58d9c..d3ba466 100644
--- a/tcp_tests/helpers/netchecker.py
+++ b/tcp_tests/helpers/netchecker.py
@@ -66,7 +66,7 @@
         else:
             assert self.get_connectivity_status().status_code == 400
 
-    def wait_check_network(self, works, timeout=60, interval=10):
+    def wait_check_network(self, works, timeout=600, interval=10):
         helpers.wait_pass(
             lambda: self.check_network(works=works),
             timeout=timeout,
diff --git a/tcp_tests/helpers/utils.py b/tcp_tests/helpers/utils.py
index f2311d4..480a646 100644
--- a/tcp_tests/helpers/utils.py
+++ b/tcp_tests/helpers/utils.py
@@ -356,6 +356,12 @@
 
         return var
 
+    def basename(path):
+        return os.path.basename(path)
+
+    def dirname(path):
+        return os.path.dirname(path)
+
     if options is None:
         options = {}
     options.update({'os_env': os_env, })
@@ -366,6 +372,9 @@
     environment = jinja2.Environment(
         loader=jinja2.FileSystemLoader([path, os.path.dirname(path)],
                                        followlinks=True))
+    environment.filters['basename'] = basename
+    environment.filters['dirname'] = dirname
+
     template = environment.get_template(filename).render(options)
 
     if required_env_vars and log_env_vars:
diff --git a/tcp_tests/managers/envmanager_devops.py b/tcp_tests/managers/envmanager_devops.py
index 4cd6c93..f776221 100644
--- a/tcp_tests/managers/envmanager_devops.py
+++ b/tcp_tests/managers/envmanager_devops.py
@@ -94,39 +94,6 @@
             )
         self.__devops_config = conf
 
-    def lvm_storages(self):
-        """Returns a dict object of lvm storages in current environment
-
-        returned data example:
-            {
-                "master": {
-                    "id": "virtio-bff72959d1a54cb19d08"
-                },
-                "slave-0": {
-                    "id": "virtio-5e33affc8fe44503839f"
-                },
-                "slave-1": {
-                    "id": "virtio-10b6a262f1ec4341a1ba"
-                },
-            }
-
-        :rtype: dict
-        """
-        result = {}
-        for node in self.__env.get_nodes(role__in=ext.UNDERLAY_NODE_ROLES):
-            lvm = filter(lambda x: x.volume.name == 'lvm', node.disk_devices)
-            if len(lvm) == 0:
-                continue
-            lvm = lvm[0]
-            result[node.name] = {}
-            result_node = result[node.name]
-            result_node['id'] = "{bus}-{serial}".format(
-                bus=lvm.bus,
-                serial=lvm.volume.serial[:20])
-            LOG.info("Got disk-id '{}' for node '{}'".format(
-                result_node['id'], node.name))
-        return result
-
     @property
     def _d_env_name(self):
         """Get environment name from fuel devops config
diff --git a/tcp_tests/managers/envmanager_empty.py b/tcp_tests/managers/envmanager_empty.py
index c4bb57e..39fd126 100644
--- a/tcp_tests/managers/envmanager_empty.py
+++ b/tcp_tests/managers/envmanager_empty.py
@@ -30,26 +30,6 @@
         """
         self.__config = config
 
-    def lvm_storages(self):
-        """Returns data of lvm_storages on nodes in environment
-
-        It's expected that data of self.__config.lvm_storages will be
-        like this:
-            {
-                "node1": {
-                    "device": "vdb"
-                },
-                "node2": {
-                    "device": "vdb"
-                },
-                "node3": {
-                    "device": "vdb"
-                },
-            }
-        :rtype: dict
-        """
-        return self.__config.underlay.lvm
-
     def get_ssh_data(self, roles=None):
         raise Exception("EnvironmentManagerEmpty doesn't have SSH details. "
                         "Please provide SSH details in config.underlay.ssh")
diff --git a/tcp_tests/managers/runtestmanager.py b/tcp_tests/managers/runtestmanager.py
index 8fafe01..50f934e 100644
--- a/tcp_tests/managers/runtestmanager.py
+++ b/tcp_tests/managers/runtestmanager.py
@@ -15,9 +15,12 @@
 import json
 import os
 
+from devops.helpers import helpers
+
 from tcp_tests import logger
 from tcp_tests import settings
 
+
 LOG = logger.logger
 
 TEMPEST_CFG_DIR = '/tmp/test'
@@ -31,6 +34,7 @@
     container_name = 'run-tempest-ci'
     master_host = "cfg01"
     control_host = "ctl01"
+    compute_host = "cmp"
     class_name = "runtest"
     run_cmd = '/bin/bash -c "run-tempest"'
 
@@ -51,6 +55,8 @@
             self.master_host)[0]
         self.control_name = self.underlay.get_target_node_names(
             self.control_host)[0]
+        self.compute_name = self.underlay.get_target_node_names(
+            self.compute_host)[0]
 
     @property
     def salt_api(self):
@@ -108,6 +114,9 @@
                                 '${_param:runtest_tempest_public_net}'
                             }
                         },
+                        'heat_plugin': {
+                            'build_timeout': '600'
+                        },
                         'share': {
                             'capability_snapshot_support': True,
                             'run_driver_assisted_migration_tests': False,
@@ -172,14 +181,22 @@
                                                indent=4, sort_keys=True)
                 f.write(container_inspect)
 
-    def prepare(self, dpdk=None):
+    def prepare(self):
         self.store_runtest_model()
         cirros_pillar = ("salt-call --out=newline_values_only "
                          "pillar.get "
                          "glance:client:identity:"
                          "admin_identity:image:cirros:location")
+        dpdk_pillar = "linux:network:dpdk:enabled"
         salt_cmd = "salt -l info --hard-crash --state-output=mixed "
         salt_call_cmd = "salt-call -l info --hard-crash --state-output=mixed "
+
+        result = self.__salt_api.get_pillar(tgt=self.compute_name,
+                                            pillar=dpdk_pillar)
+
+        dpdk = result[0].get(self.compute_name, False)
+        LOG.info("DPDK enabled: {}".format(bool(dpdk)))
+
         commands = [
             {
                 'description': "Sync salt objects for runtest model",
@@ -247,8 +264,10 @@
                 'cmd': ("set -ex;" +
                         salt_call_cmd + " cmd.run "
                         " '. /root/keystonercv3;"
+                        "  openstack flavor set m1.extra_tiny_test"
+                        "  --property hw:mem_page_size=any;"
                         "  openstack flavor set m1.tiny_test"
-                        "  --property hw:mem_page_size=small'")},
+                        "  --property hw:mem_page_size=any'")},
             )
 
         self.__salt_api.execute_commands(commands=commands,
@@ -259,13 +278,14 @@
         image_nameversion = "{}:{}".format(self.image_name, self.image_version)
 
         docker_args = (
+            " -t "
             " --name {container_name} "
             " -e ARGS=\"-r {tempest_pattern} -w {tempest_threads}\""
             " -v {cfg_dir}/tempest.conf:/etc/tempest/tempest.conf"
             " -v /tmp/:/tmp/"
             " -v {cfg_dir}:/root/tempest"
             " -v /etc/ssl/certs/:/etc/ssl/certs/"
-            " --rm"
+            " -d "
             " {image_nameversion} {run_cmd}"
             .format(
                 container_name=self.container_name,
@@ -292,6 +312,25 @@
         self.__salt_api.execute_commands(commands=commands,
                                          label="Run Tempest tests")
 
+        def wait_status(s):
+            inspect_res = self.salt_api.local(tgt,
+                                              'dockerng.inspect',
+                                              self.container_name)
+            if 'return' in inspect_res:
+                inspect = inspect_res['return']
+                inspect = inspect[0]
+                inspect = next(inspect.iteritems())[1]
+                status = inspect['State']['Status']
+
+                return status.lower() == s.lower()
+
+            return False
+
+        helpers.wait(lambda: wait_status('exited'),
+                     timeout=timeout,
+                     timeout_msg=('Tempest run didnt finished '
+                                  'in {}'.format(timeout)))
+
         inspect_res = self.salt_api.local(tgt,
                                           'dockerng.inspect',
                                           self.container_name)
@@ -310,12 +349,12 @@
         return {'inspect': inspect,
                 'logs': logs}
 
-    def prepare_and_run_tempest(self, username='root', dpdk=None):
+    def prepare_and_run_tempest(self, username='root'):
         """
         Run tempest tests
         """
         tempest_timeout = settings.TEMPEST_TIMEOUT
-        self.prepare(dpdk=dpdk)
+        self.prepare()
         test_res = self.run_tempest(tempest_timeout)
         self.fetch_arficats(username=username)
         self.save_runtime_logs(**test_res)
diff --git a/tcp_tests/managers/underlay_ssh_manager.py b/tcp_tests/managers/underlay_ssh_manager.py
index ee23654..0bfb463 100644
--- a/tcp_tests/managers/underlay_ssh_manager.py
+++ b/tcp_tests/managers/underlay_ssh_manager.py
@@ -75,7 +75,6 @@
     """
     __config = None
     config_ssh = None
-    config_lvm = None
 
     def __init__(self, config):
         """Read config.underlay.ssh object
@@ -86,9 +85,6 @@
         if self.config_ssh is None:
             self.config_ssh = []
 
-        if self.config_lvm is None:
-            self.config_lvm = {}
-
         self.add_config_ssh(self.__config.underlay.ssh)
 
     def add_config_ssh(self, config_ssh):
@@ -191,42 +187,6 @@
                 names.append(ssh['node_name'])
         return names
 
-    def enable_lvm(self, lvmconfig):
-        """Method for enabling lvm oh hosts in environment
-
-        :param lvmconfig: dict with ids or device' names of lvm storage
-        :raises: devops.error.DevopsCalledProcessError,
-        devops.error.TimeoutError, AssertionError, ValueError
-        """
-        def get_actions(lvm_id):
-            return [
-                "systemctl enable lvm2-lvmetad.service",
-                "systemctl enable lvm2-lvmetad.socket",
-                "systemctl start lvm2-lvmetad.service",
-                "systemctl start lvm2-lvmetad.socket",
-                "pvcreate {} && pvs".format(lvm_id),
-                "vgcreate default {} && vgs".format(lvm_id),
-                "lvcreate -L 1G -T default/pool && lvs",
-            ]
-        lvmpackages = ["lvm2", "liblvm2-dev", "thin-provisioning-tools"]
-        for node_name in self.node_names():
-            lvm = lvmconfig.get(node_name, None)
-            if not lvm:
-                continue
-            if 'id' in lvm:
-                lvmdevice = '/dev/disk/by-id/{}'.format(lvm['id'])
-            elif 'device' in lvm:
-                lvmdevice = '/dev/{}'.format(lvm['device'])
-            else:
-                raise ValueError("Unknown LVM device type")
-            if lvmdevice:
-                self.apt_install_package(
-                    packages=lvmpackages, node_name=node_name, verbose=True)
-                for command in get_actions(lvmdevice):
-                    self.sudo_check_call(command, node_name=node_name,
-                                         verbose=True)
-        self.config_lvm = dict(lvmconfig)
-
     def host_by_node_name(self, node_name, address_pool=None):
         ssh_data = self.__ssh_data(node_name=node_name,
                                    address_pool=address_pool)
@@ -438,12 +398,12 @@
             "docker ps > /root/\$(hostname -f)/dump_docker_ps.txt;"
             "docker service ls > "
             "  /root/\$(hostname -f)/dump_docker_services_ls.txt;"
-            "for SERVICE in \$(docker service ls | awk '{ print $2 }'); "
+            "for SERVICE in \$(docker service ls | awk '{ print \$2 }'); "
             "  do docker service ps --no-trunc 2>&1 \$SERVICE >> "
             "    /root/\$(hostname -f)/dump_docker_service_ps.txt;"
             "  done;"
-            "for SERVICE in \$(docker service ls | awk '{ print $2 }'); "
-            "  do docker service logs 2>&1 \$SERVICE > "
+            "for SERVICE in \$(docker service ls | awk '{ print \$2 }'); "
+            "  do timeout 30 docker service logs --no-trunc 2>&1 \$SERVICE > "
             "    /root/\$(hostname -f)/dump_docker_service_\${SERVICE}_logs;"
             "  done;"
             "vgdisplay > /root/\$(hostname -f)/dump_vgdisplay.txt;"
diff --git a/tcp_tests/settings.py b/tcp_tests/settings.py
index 9ebdf22..fca6a6d 100644
--- a/tcp_tests/settings.py
+++ b/tcp_tests/settings.py
@@ -77,7 +77,7 @@
     'docker-prod-virtual.docker.mirantis.net/mirantis/cicd/ci-tempest')  # noqa
 TEMPEST_IMAGE_VERSION = os.environ.get('TEMPEST_IMAGE_VERSION', 'pike')
 TEMPEST_PATTERN = os.environ.get('TEMPEST_PATTERN', 'tempest')
-TEMPEST_TIMEOUT = int(os.environ.get('TEMPEST_TIMEOUT', 60 * 60 * 5))
+TEMPEST_TIMEOUT = int(os.environ.get('TEMPEST_TIMEOUT', 60 * 60 * 6))
 TEMPEST_THREADS = int(os.environ.get('TEMPEST_THREADS', 2))
 TEMPEST_TARGET = os.environ.get('TEMPEST_TARGET', 'gtw01')
 SALT_VERSION = os.environ.get('SALT_VERSION', '2017.7')
diff --git a/tcp_tests/settings_oslo.py b/tcp_tests/settings_oslo.py
index 0361835..b3a3013 100644
--- a/tcp_tests/settings_oslo.py
+++ b/tcp_tests/settings_oslo.py
@@ -118,8 +118,6 @@
     ct.Cfg('upstream_dns_servers', ct.JSONList(),
            help="IP addresses of upstream DNS servers (dnsmasq)",
            default=[]),
-    ct.Cfg('lvm', ct.JSONDict(),
-           help="LVM settings for Underlay", default={}),
     ct.Cfg('address_pools', ct.JSONDict(),
            help="""Address pools (dynamically) allocated for the environment.
                    May be used to determine CIDR for a specific network from
diff --git a/tcp_tests/templates/cookied-bm-contrail-maas/salt.yaml b/tcp_tests/templates/cookied-bm-contrail-maas/salt.yaml
index 67833da..51dfc5d 100644
--- a/tcp_tests/templates/cookied-bm-contrail-maas/salt.yaml
+++ b/tcp_tests/templates/cookied-bm-contrail-maas/salt.yaml
@@ -42,9 +42,9 @@
     set -e;
     # Remove rack01 key
     . /root/venv-reclass-tools/bin/activate;
-    reclass-tools del-key parameters.reclass.storage.node.openstack_compute_rack01 /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/config.yml;
+    reclass-tools del-key parameters.reclass.storage.node.openstack_compute_rack01 /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/config/init.yml;
     # Add openstack_compute_node definition from system
-    reclass-tools add-key 'classes' 'system.reclass.storage.system.openstack_compute_multi' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/config.yml --merge;
+    reclass-tools add-key 'classes' 'system.reclass.storage.system.openstack_compute_multi' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/config/init.yml --merge;
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
   skip_fail: false
diff --git a/tcp_tests/templates/cookied-bm-contrail-maas/underlay--user-data-cfg01.yaml b/tcp_tests/templates/cookied-bm-contrail-maas/underlay--user-data-cfg01.yaml
index 1e318a5..6617855 100644
--- a/tcp_tests/templates/cookied-bm-contrail-maas/underlay--user-data-cfg01.yaml
+++ b/tcp_tests/templates/cookied-bm-contrail-maas/underlay--user-data-cfg01.yaml
@@ -52,6 +52,13 @@
    - export MAAS_DHCP_POOL_END={{ os_env('MAAS_DHCP_POOL_END', '172.16.49.119') }}
    - ifconfig $MAAS_PXE_INTERFACE_NAME $MAAS_PXE_INTERFACE_ADDRESS/$MAAS_DHCP_POOL_NETMASK_PREFIX
 
+   - mkdir -p /srv/salt/reclass/nodes
+   - systemctl enable salt-master
+   - systemctl enable salt-minion
+   - systemctl start salt-master
+   - systemctl start salt-minion
+   - salt-call -l info --timeout=120 test.ping
+
   write_files:
    - path: /etc/network/interfaces
      content: |
diff --git a/tcp_tests/templates/cookied-bm-contrail-nfv-maas/salt.yaml b/tcp_tests/templates/cookied-bm-contrail-nfv-maas/salt.yaml
index 3b508ce..31e9736 100644
--- a/tcp_tests/templates/cookied-bm-contrail-nfv-maas/salt.yaml
+++ b/tcp_tests/templates/cookied-bm-contrail-nfv-maas/salt.yaml
@@ -42,9 +42,9 @@
     set -e;
     # Remove rack01 key
     . /root/venv-reclass-tools/bin/activate;
-    reclass-tools del-key parameters.reclass.storage.node.openstack_compute_rack01 /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/config.yml;
+    reclass-tools del-key parameters.reclass.storage.node.openstack_compute_rack01 /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/config/init.yml;
     # Add openstack_compute_node definition from system
-    reclass-tools add-key 'classes' 'system.reclass.storage.system.openstack_compute_multi' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/config.yml --merge;
+    reclass-tools add-key 'classes' 'system.reclass.storage.system.openstack_compute_multi' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/config/init.yml --merge;
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
   skip_fail: false
diff --git a/tcp_tests/templates/cookied-bm-contrail-nfv-maas/underlay--user-data-cfg01.yaml b/tcp_tests/templates/cookied-bm-contrail-nfv-maas/underlay--user-data-cfg01.yaml
index 1e318a5..6617855 100644
--- a/tcp_tests/templates/cookied-bm-contrail-nfv-maas/underlay--user-data-cfg01.yaml
+++ b/tcp_tests/templates/cookied-bm-contrail-nfv-maas/underlay--user-data-cfg01.yaml
@@ -52,6 +52,13 @@
    - export MAAS_DHCP_POOL_END={{ os_env('MAAS_DHCP_POOL_END', '172.16.49.119') }}
    - ifconfig $MAAS_PXE_INTERFACE_NAME $MAAS_PXE_INTERFACE_ADDRESS/$MAAS_DHCP_POOL_NETMASK_PREFIX
 
+   - mkdir -p /srv/salt/reclass/nodes
+   - systemctl enable salt-master
+   - systemctl enable salt-minion
+   - systemctl start salt-master
+   - systemctl start salt-minion
+   - salt-call -l info --timeout=120 test.ping
+
   write_files:
    - path: /etc/network/interfaces
      content: |
diff --git a/tcp_tests/templates/cookied-bm-contrail40-nfv/salt.yaml b/tcp_tests/templates/cookied-bm-contrail40-nfv/salt.yaml
index f8a1d9a..801adfb 100644
--- a/tcp_tests/templates/cookied-bm-contrail40-nfv/salt.yaml
+++ b/tcp_tests/templates/cookied-bm-contrail40-nfv/salt.yaml
@@ -32,9 +32,9 @@
     set -e;
     # Remove rack01 key
     . /root/venv-reclass-tools/bin/activate;
-    reclass-tools del-key parameters.reclass.storage.node.openstack_compute_rack01 /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/config.yml;
+    reclass-tools del-key parameters.reclass.storage.node.openstack_compute_rack01 /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/config/init.yml;
     # Add openstack_compute_node definition from system
-    reclass-tools add-key 'classes' 'system.reclass.storage.system.openstack_compute_multi' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/config.yml --merge;
+    reclass-tools add-key 'classes' 'system.reclass.storage.system.openstack_compute_multi' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/config/init.yml --merge;
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
   skip_fail: false
diff --git a/tcp_tests/templates/cookied-bm-contrail40-nfv/underlay--user-data-cfg01.yaml b/tcp_tests/templates/cookied-bm-contrail40-nfv/underlay--user-data-cfg01.yaml
index 6c9e48f..cc69c64 100644
--- a/tcp_tests/templates/cookied-bm-contrail40-nfv/underlay--user-data-cfg01.yaml
+++ b/tcp_tests/templates/cookied-bm-contrail40-nfv/underlay--user-data-cfg01.yaml
@@ -47,37 +47,13 @@
    - swapon /swapfile

    - echo "/swapfile   none    swap    defaults    0   0" >> /etc/fstab

 

-   ############## TCP Cloud cfg01 node ##################

-   #- sleep 120

-   #   - echo "Preparing base OS"

-

    - echo "nameserver 172.18.208.44" > /etc/resolv.conf;

-   #   - which wget >/dev/null || (apt-get update; apt-get install -y wget);

-

-   # Configure Ubuntu mirrors
-   #   - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial main restricted universe" > /etc/apt/sources.list
-   #   - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-updates main restricted universe" >> /etc/apt/sources.list
-   #   - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-security main restricted universe" >> /etc/apt/sources.list
-
-   #   - echo "deb [arch=amd64] http://apt.mirantis.com/xenial {{ REPOSITORY_SUITE }} salt extra" > /etc/apt/sources.list.d/mcp_salt.list;

-   #   - wget -O - http://apt.mirantis.com/public.gpg | apt-key add -;

-   #   - echo "deb http://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3 xenial main" > /etc/apt/sources.list.d/saltstack.list;

-   #   - wget -O - https://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3/SALTSTACK-GPG-KEY.pub | apt-key add -;

-

-   #   - apt-get clean

-   #   - apt-get update

-

-   # Install common packages

-   #   - eatmydata apt-get install -y python-pip git curl tmux byobu iputils-ping traceroute htop tree mc

-

-   # Install salt-minion and stop it until it is configured

-   #   - eatmydata apt-get install -y salt-minion && service salt-minion stop

-

-   ########################################################

-   # Node is ready, allow SSH access

-   #   - echo "Allow SSH access ..."

-   #   - sudo iptables -D INPUT -p tcp --dport 22 -j DROP

-   ########################################################

+   - mkdir -p /srv/salt/reclass/nodes
+   - systemctl enable salt-master
+   - systemctl enable salt-minion
+   - systemctl start salt-master
+   - systemctl start salt-minion
+   - salt-call -l info --timeout=120 test.ping
 

   write_files:

    - path: /etc/default/grub.d/97-enable-grub-menu.cfg

diff --git a/tcp_tests/templates/cookied-bm-contrail40/salt-context-cookiecutter-contrail-ocata.yaml b/tcp_tests/templates/cookied-bm-contrail40/salt-context-cookiecutter-contrail-ocata.yaml
index c4470e6..db9b61b 100644
--- a/tcp_tests/templates/cookied-bm-contrail40/salt-context-cookiecutter-contrail-ocata.yaml
+++ b/tcp_tests/templates/cookied-bm-contrail40/salt-context-cookiecutter-contrail-ocata.yaml
@@ -82,7 +82,7 @@
   control_vlan: '2422'
   cookiecutter_template_branch: ''
   cookiecutter_template_credentials: gerrit
-  cookiecutter_template_url: https://gerrit.mcp.mirantis.net/mk/cookiecutter-templates.git
+  cookiecutter_template_url: https://gerrit.mcp.mirantis.com/mk/cookiecutter-templates.git
   deploy_network_gateway: 172.16.49.65
   deploy_network_netmask: 255.255.255.192
   deploy_network_subnet: 172.16.49.64/26
@@ -207,7 +207,7 @@
   salt_master_hostname: cfg01
   salt_master_management_address: 172.16.49.66
   shared_reclass_branch: ''
-  shared_reclass_url: https://gerrit.mcp.mirantis.net/salt-models/reclass-system.git
+  shared_reclass_url: https://gerrit.mcp.mirantis.com/salt-models/reclass-system.git
   stacklight_enabled: 'True'
   stacklight_log_address: 10.167.8.60
   stacklight_log_hostname: log
diff --git a/tcp_tests/templates/cookied-bm-contrail40/salt.yaml b/tcp_tests/templates/cookied-bm-contrail40/salt.yaml
index 9319634..4c9f4a7 100644
--- a/tcp_tests/templates/cookied-bm-contrail40/salt.yaml
+++ b/tcp_tests/templates/cookied-bm-contrail40/salt.yaml
@@ -32,9 +32,9 @@
     set -e;
     # Remove rack01 key
     . /root/venv-reclass-tools/bin/activate;
-    reclass-tools del-key parameters.reclass.storage.node.openstack_compute_rack01 /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/config.yml;
+    reclass-tools del-key parameters.reclass.storage.node.openstack_compute_rack01 /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/config/init.yml;
     # Add openstack_compute_node definition from system
-    reclass-tools add-key 'classes' 'system.reclass.storage.system.openstack_compute_multi' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/config.yml --merge;
+    reclass-tools add-key 'classes' 'system.reclass.storage.system.openstack_compute_multi' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/config/init.yml --merge;
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
   skip_fail: false
diff --git a/tcp_tests/templates/cookied-bm-contrail40/underlay--user-data-cfg01.yaml b/tcp_tests/templates/cookied-bm-contrail40/underlay--user-data-cfg01.yaml
index 6c9e48f..6b6ec9f 100644
--- a/tcp_tests/templates/cookied-bm-contrail40/underlay--user-data-cfg01.yaml
+++ b/tcp_tests/templates/cookied-bm-contrail40/underlay--user-data-cfg01.yaml
@@ -47,37 +47,14 @@
    - swapon /swapfile

    - echo "/swapfile   none    swap    defaults    0   0" >> /etc/fstab

 

-   ############## TCP Cloud cfg01 node ##################

-   #- sleep 120

-   #   - echo "Preparing base OS"

-

    - echo "nameserver 172.18.208.44" > /etc/resolv.conf;

-   #   - which wget >/dev/null || (apt-get update; apt-get install -y wget);

-

-   # Configure Ubuntu mirrors
-   #   - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial main restricted universe" > /etc/apt/sources.list
-   #   - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-updates main restricted universe" >> /etc/apt/sources.list
-   #   - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-security main restricted universe" >> /etc/apt/sources.list
 
-   #   - echo "deb [arch=amd64] http://apt.mirantis.com/xenial {{ REPOSITORY_SUITE }} salt extra" > /etc/apt/sources.list.d/mcp_salt.list;

-   #   - wget -O - http://apt.mirantis.com/public.gpg | apt-key add -;

-   #   - echo "deb http://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3 xenial main" > /etc/apt/sources.list.d/saltstack.list;

-   #   - wget -O - https://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3/SALTSTACK-GPG-KEY.pub | apt-key add -;

-

-   #   - apt-get clean

-   #   - apt-get update

-

-   # Install common packages

-   #   - eatmydata apt-get install -y python-pip git curl tmux byobu iputils-ping traceroute htop tree mc

-

-   # Install salt-minion and stop it until it is configured

-   #   - eatmydata apt-get install -y salt-minion && service salt-minion stop

-

-   ########################################################

-   # Node is ready, allow SSH access

-   #   - echo "Allow SSH access ..."

-   #   - sudo iptables -D INPUT -p tcp --dport 22 -j DROP

-   ########################################################

+   - mkdir -p /srv/salt/reclass/nodes
+   - systemctl enable salt-master
+   - systemctl enable salt-minion
+   - systemctl start salt-master
+   - systemctl start salt-minion
+   - salt-call -l info --timeout=120 test.ping
 

   write_files:

    - path: /etc/default/grub.d/97-enable-grub-menu.cfg

diff --git a/tcp_tests/templates/cookied-bm-dpdk-pipeline/salt-context-cookiecutter-openstack_ovs_dpdk.yaml b/tcp_tests/templates/cookied-bm-dpdk-pipeline/salt-context-cookiecutter-openstack_ovs_dpdk.yaml
index 99ad264..ce13598 100644
--- a/tcp_tests/templates/cookied-bm-dpdk-pipeline/salt-context-cookiecutter-openstack_ovs_dpdk.yaml
+++ b/tcp_tests/templates/cookied-bm-dpdk-pipeline/salt-context-cookiecutter-openstack_ovs_dpdk.yaml
@@ -123,7 +123,6 @@
   openstack_compute_single_address_ranges: 172.16.10.105-172.16.10.106
   openstack_compute_deploy_address_ranges: 192.168.10.105-192.168.10.106
   openstack_compute_tenant_address_ranges: 10.1.0.105-10.1.0.106
-  openstack_compute_backend_address_ranges: 10.1.0.105-10.1.0.106
   openstack_control_address: 10.167.11.10
   openstack_control_hostname: ctl
   openstack_control_node01_address: 10.167.11.11
diff --git a/tcp_tests/templates/cookied-bm-dpdk-pipeline/underlay--user-data-cfg01.yaml b/tcp_tests/templates/cookied-bm-dpdk-pipeline/underlay--user-data-cfg01.yaml
index 3f4f128..b77550a 100644
--- a/tcp_tests/templates/cookied-bm-dpdk-pipeline/underlay--user-data-cfg01.yaml
+++ b/tcp_tests/templates/cookied-bm-dpdk-pipeline/underlay--user-data-cfg01.yaml
@@ -18,8 +18,6 @@
    expire: False
 
   bootcmd:
-   # Block access to SSH while node is preparing
-   #- cloud-init-per once sudo iptables -A INPUT -p tcp --dport 22 -j DROP
    # Enable root access
    - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
    - service sshd restart
@@ -51,16 +49,13 @@
    - echo "Preparing base OS"
 
    - echo "nameserver 172.18.208.44" > /etc/resolv.conf;
-   - which wget >/dev/null || (apt-get update; apt-get install -y wget);
 
-   # Install common packages
-   - eatmydata apt-get install -y python-pip git curl tmux byobu iputils-ping traceroute htop tree sshuttle
-
-   ########################################################
-   # Node is ready, allow SSH access
-   #- echo "Allow SSH access ..."
-   #- sudo iptables -D INPUT -p tcp --dport 22 -j DROP
-   ########################################################
+   - mkdir -p /srv/salt/reclass/nodes
+   - systemctl enable salt-master
+   - systemctl enable salt-minion
+   - systemctl start salt-master
+   - systemctl start salt-minion
+   - salt-call -l info --timeout=120 test.ping
 
   write_files:
    - path: /etc/network/interfaces
diff --git a/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/core.yaml b/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/core.yaml
index 6dc4829..bf6c2da 100644
--- a/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/core.yaml
+++ b/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/core.yaml
@@ -1,124 +1,10 @@
 {% from 'cookied-bm-mcp-dvr-vxlan/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% import 'shared-core.yaml' as SHARED_CORE with context %}
 
-# Install support services
-- description: Install keepalived on ctl01
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@keepalived:cluster and *01*' state.sls keepalived
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: true
-
-- description: Install keepalived
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@keepalived:cluster' state.sls keepalived
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: true
-
-- description: Install glusterfs
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@glusterfs:server' state.sls glusterfs.server.service
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Setup glusterfs on primary controller
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@glusterfs:server' state.sls glusterfs.server.setup -b 1
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 2, delay: 5}
-  skip_fail: false
-
-- description: Check the gluster status
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@glusterfs:server' cmd.run 'gluster peer status; gluster volume status' -b 1
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Install RabbitMQ on ctl01
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@rabbitmq:server and *01*' state.sls rabbitmq
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Install RabbitMQ
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@rabbitmq:server' state.sls rabbitmq
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Check the rabbitmq status
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@rabbitmq:server' cmd.run 'rabbitmqctl cluster_status'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Install Galera on first server
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@galera:master' state.sls galera
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Install Galera on other servers
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@galera:slave' state.sls galera -b 1
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Check mysql status
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@galera:*' mysql.status | grep -A1 -e "wsrep_incoming_addresses\|wsrep_cluster_size"
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: true
-
-
-- description: Install haproxy
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@haproxy:proxy' state.sls haproxy
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Check haproxy status
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@haproxy:proxy' service.status haproxy
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Install nginx on prx nodes
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@nginx:server' state.sls nginx
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Restart rsyslog
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@haproxy:proxy' service.restart rsyslog
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Install memcached on all controllers
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@memcached:server' state.sls memcached
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Check the VIP
-  cmd: |
-    OPENSTACK_CONTROL_ADDRESS=`salt-call --out=newline_values_only pillar.get _param:openstack_control_address`;
-    echo "_param:openstack_control_address (vip): ${OPENSTACK_CONTROL_ADDRESS}";
-    salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@keepalived:cluster' cmd.run "ip a | grep ${OPENSTACK_CONTROL_ADDRESS}" | grep -B1 ${OPENSTACK_CONTROL_ADDRESS}
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 3, delay: 10}
-  skip_fail: false
+{{ SHARED_CORE.MACRO_INSTALL_KEEPALIVED() }}
+{{ SHARED_CORE.MACRO_INSTALL_GLUSTERFS() }}
+{{ SHARED_CORE.MACRO_INSTALL_RABBITMQ() }}
+{{ SHARED_CORE.MACRO_INSTALL_GALERA() }}
+{{ SHARED_CORE.MACRO_INSTALL_HAPROXY() }}
+{{ SHARED_CORE.MACRO_INSTALL_MEMCACHED() }}
+{{ SHARED_CORE.MACRO_CHECK_VIP() }}
\ No newline at end of file
diff --git a/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/openstack.yaml b/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/openstack.yaml
index 7148d00..1d8cbbf 100644
--- a/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/openstack.yaml
+++ b/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/openstack.yaml
@@ -2,20 +2,13 @@
 {% from 'cookied-bm-mcp-dvr-vxlan/underlay.yaml' import HOSTNAME_CTL01 with context %}
 {% from 'cookied-bm-mcp-dvr-vxlan/underlay.yaml' import HOSTNAME_GTW01 with context %}
 {% from 'cookied-bm-mcp-dvr-vxlan/underlay.yaml' import HOSTNAME_GTW02 with context %}
-{% from 'shared-salt.yaml' import IPV4_NET_EXTERNAL_PREFIX with context %}
-{% from 'shared-salt.yaml' import IPV4_NET_TENANT_PREFIX with context %}
-
 {% import 'shared-salt.yaml' as SHARED with context %}
 {% import 'shared-openstack.yaml' as SHARED_OPENSTACK with context %}
 
 # Install OpenStack control services
-
 {{ SHARED_OPENSTACK.MACRO_INSTALL_KEYSTONE() }}
-
 {{ SHARED_OPENSTACK.MACRO_INSTALL_GLANCE() }}
-
 {{ SHARED_OPENSTACK.MACRO_INSTALL_NOVA() }}
-
 {{ SHARED_OPENSTACK.MACRO_INSTALL_CINDER(INSTALL_VOLUME=false) }}
 
 - description: Install cinder volume
@@ -26,10 +19,7 @@
   skip_fail: false
 
 {{ SHARED_OPENSTACK.MACRO_INSTALL_NEUTRON(INSTALL_GATEWAY=true) }}
-
 {{ SHARED_OPENSTACK.MACRO_INSTALL_HEAT() }}
-
 {{ SHARED_OPENSTACK.MACRO_INSTALL_HORIZON() }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_COMPUTE() }}
+{{ SHARED_OPENSTACK.MACRO_INSTALL_COMPUTE(CELL_MAPPING=true) }}
 
diff --git a/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/salt-context-cookiecutter-openstack_ovs_dvr_vxlan.yaml b/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/salt-context-cookiecutter-openstack_ovs_dvr_vxlan.yaml
index cb97e5e..7585c41 100644
--- a/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/salt-context-cookiecutter-openstack_ovs_dvr_vxlan.yaml
+++ b/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/salt-context-cookiecutter-openstack_ovs_dvr_vxlan.yaml
@@ -1,7 +1,7 @@
 default_context:
   mcp_version: proposed
   ceph_enabled: 'False'
-  cicd_enabled: 'True'
+  cicd_enabled: 'False'
   cicd_control_node01_address: 10.167.4.91
   cicd_control_node01_hostname: cid01
   cicd_control_node02_address: 10.167.4.92
@@ -65,7 +65,6 @@
   openstack_compute_single_address_ranges: 172.16.10.105-172.16.10.106
   openstack_compute_deploy_address_ranges: 192.168.10.105-192.168.10.106
   openstack_compute_tenant_address_ranges: 10.1.0.105-10.1.0.106
-  openstack_compute_backend_address_ranges: 10.1.0.105-10.1.0.106
   openstack_compute_node01_hostname: cmp01
   openstack_compute_node02_hostname: cmp02
   openstack_compute_node01_address: 10.167.4.3
diff --git a/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/salt-context-lab03-environment.yaml b/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/salt-context-lab03-environment.yaml
index def5353..692cf19 100644
--- a/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/salt-context-lab03-environment.yaml
+++ b/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/salt-context-lab03-environment.yaml
@@ -44,34 +44,29 @@
         enp9s0f1:
           role: bond0_ab_ovs_vlan_ctl
 
-    cmp01.cookied-bm-mcp-dvr-vxlan.local:
+    cmp001.cookied-bm-mcp-dvr-vxlan.local:
       reclass_storage_name: openstack_compute_node01
       roles:
       - openstack_compute
-      - features_lvm_backend
+      - features_lvm_backend_volume_sdb
       - linux_system_codename_xenial
       interfaces:
         enp9s0f0:
           role: single_mgm_dhcp
         enp9s0f1:
           role: bond0_ab_dvr_vxlan_ctl_mesh_floating
-          single_address: 10.167.4.105
-          tenant_address: 10.167.6.105
 
-
-    cmp02.cookied-bm-mcp-dvr-vxlan.local:
+    cmp002.cookied-bm-mcp-dvr-vxlan.local:
       reclass_storage_name: openstack_compute_node02
       roles:
       - openstack_compute
-      - features_lvm_backend
+      - features_lvm_backend_volume_sdb
       - linux_system_codename_xenial
       interfaces:
         enp9s0f0:
           role: single_mgm_dhcp
         enp9s0f1:
           role: bond0_ab_dvr_vxlan_ctl_mesh_floating
-          single_address: 10.167.4.106
-          tenant_address: 10.167.6.106
 
     gtw01.cookied-bm-mcp-dvr-vxlan.local:
       reclass_storage_name: openstack_gateway_node01
diff --git a/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/salt-context-vcp-environment.yaml b/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/salt-context-vcp-environment.yaml
index 37d0b14..6cace03 100644
--- a/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/salt-context-vcp-environment.yaml
+++ b/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/salt-context-vcp-environment.yaml
@@ -4,6 +4,7 @@
       roles:
       - openstack_control_leader
       - linux_system_codename_xenial
+      - features_lvm_backend_control
       interfaces:
         ens2:
           role: single_dhcp
@@ -15,6 +16,7 @@
       roles:
       - openstack_control
       - linux_system_codename_xenial
+      - features_lvm_backend_control
       interfaces:
         ens2:
           role: single_dhcp
@@ -26,6 +28,7 @@
       roles:
       - openstack_control
       - linux_system_codename_xenial
+      - features_lvm_backend_control
       interfaces:
         ens2:
           role: single_dhcp
@@ -119,57 +122,3 @@
           role: single_dhcp
         ens3:
           role: single_ctl
-
-    cid01.cookied-bm-mcp-dvr-vxlan.local:
-      reclass_storage_name: cicd_control_node01
-      roles:
-      - cicd_control_leader
-      - linux_system_codename_xenial
-      interfaces:
-        ens3:
-          role: single_ctl
-
-    cid02.cookied-bm-mcp-dvr-vxlan.local:
-      reclass_storage_name: cicd_control_node02
-      roles:
-      - cicd_control_manager
-      - linux_system_codename_xenial
-      interfaces:
-        ens3:
-          role: single_ctl
-
-    cid03.cookied-bm-mcp-dvr-vxlan.local:
-      reclass_storage_name: cicd_control_node03
-      roles:
-      - cicd_control_manager
-      - linux_system_codename_xenial
-      interfaces:
-        ens3:
-          role: single_ctl
-
-#    mon01.cookied-bm-mcp-dvr-vxlan.local:
-#      reclass_storage_name: stacklight_server_node01
-#      roles:
-#      - stacklightv2_server_leader
-#      - linux_system_codename_xenial
-#      interfaces:
-#        ens3:
-#          role: single_ctl
-#
-#    mon02.cookied-bm-mcp-dvr-vxlan.local:
-#      reclass_storage_name: stacklight_server_node02
-#      roles:
-#      - stacklightv2_server
-#      - linux_system_codename_xenial
-#      interfaces:
-#        ens3:
-#         role: single_ctl
-#
-#    mon03.cookied-bm-mcp-dvr-vxlan.local:
-#      reclass_storage_name: stacklight_server_node03
-#      roles:
-#      - stacklightv2_server
-#      - linux_system_codename_xenial
-#      interfaces:
-#        ens3:
-#          role: single_ctl
diff --git a/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/salt.yaml b/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/salt.yaml
index 81d9096..8804721 100644
--- a/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/salt.yaml
+++ b/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/salt.yaml
@@ -12,53 +12,19 @@
 {% import 'shared-salt.yaml' as SHARED with context %}
 
 {{ SHARED.MACRO_INSTALL_SALT_MASTER() }}
-
 {{ SHARED.MACRO_CLONE_RECLASS_MODELS() }}
-
 {{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='\*') }}
 {{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
-
 {{ SHARED.MACRO_RUN_SALT_MASTER_UNDERLAY_STATES() }}
-
-- description: "WR for changing VCP images path to internal storage"
-  cmd: |
-    set -e;
-    apt-get -y install python-virtualenv python-pip build-essential python-dev libssl-dev;
-    [[ -d /root/venv-reclass-tools ]] || virtualenv /root/venv-reclass-tools;
-    . /root/venv-reclass-tools/bin/activate;
-    pip install git+https://github.com/dis-xcom/reclass-tools;
-    reclass-tools add-key parameters._param.salt_control_xenial_image 'https://apt.mcp.mirantis.net/images/ubuntu-16-04-x64-mcp{{ SHARED.REPOSITORY_SUITE }}.qcow2' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/init.yml;
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: false
-
-
 {{ SHARED.MACRO_GENERATE_INVENTORY() }}
 {{ SHARED.MACRO_NETWORKING_WORKAROUNDS() }}
 
-- description: Temporary workaround for removing cinder-volume from CTL nodes
-  cmd: |
-    sed -i 's/\-\ system\.cinder\.volume\.single//g' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/control.yml;
-    sed -i 's/\-\ system\.cinder\.volume\.notification\.messagingv2//g' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/control.yml;
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: true
-
-- description: Temporary workaround for removing virtual gtw nodes
-  cmd: |
-    sed -i 's/\-\ system\.salt\.control\.sizes\.ovs\.compact//g' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/kvm.yml;
-    sed -i 's/\-\ system\.salt\.control\.placement\.ovs\.compact//g' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/kvm.yml;
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: true
-
 - description: Rerun openssh after env model is generated
   cmd: |
     salt-call state.sls openssh
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
   skip_fail: false
-  
 {{ SHARED.MACRO_BOOTSTRAP_ALL_MINIONS() }}
 
 ########################################
@@ -129,4 +95,3 @@
 {{SHARED.MACRO_CHECK_SALT_VERSION_SERVICES_ON_CFG()}}
 
 {{SHARED.MACRO_CHECK_SALT_VERSION_ON_NODES()}}
-
diff --git a/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/underlay--user-data-cfg01.yaml b/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/underlay--user-data-cfg01.yaml
index 3f4f128..b77550a 100644
--- a/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/underlay--user-data-cfg01.yaml
+++ b/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/underlay--user-data-cfg01.yaml
@@ -18,8 +18,6 @@
    expire: False
 
   bootcmd:
-   # Block access to SSH while node is preparing
-   #- cloud-init-per once sudo iptables -A INPUT -p tcp --dport 22 -j DROP
    # Enable root access
    - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
    - service sshd restart
@@ -51,16 +49,13 @@
    - echo "Preparing base OS"
 
    - echo "nameserver 172.18.208.44" > /etc/resolv.conf;
-   - which wget >/dev/null || (apt-get update; apt-get install -y wget);
 
-   # Install common packages
-   - eatmydata apt-get install -y python-pip git curl tmux byobu iputils-ping traceroute htop tree sshuttle
-
-   ########################################################
-   # Node is ready, allow SSH access
-   #- echo "Allow SSH access ..."
-   #- sudo iptables -D INPUT -p tcp --dport 22 -j DROP
-   ########################################################
+   - mkdir -p /srv/salt/reclass/nodes
+   - systemctl enable salt-master
+   - systemctl enable salt-minion
+   - systemctl start salt-master
+   - systemctl start salt-minion
+   - salt-call -l info --timeout=120 test.ping
 
   write_files:
    - path: /etc/network/interfaces
diff --git a/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/underlay.yaml b/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/underlay.yaml
index a7308e9..8d2bf09 100644
--- a/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/underlay.yaml
+++ b/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/underlay.yaml
@@ -6,8 +6,8 @@
 {% set HOSTNAME_KVM01 = os_env('HOSTNAME_KVM01', 'kvm01.' + DOMAIN_NAME) %}
 {% set HOSTNAME_KVM02 = os_env('HOSTNAME_KVM02', 'kvm02.' + DOMAIN_NAME) %}
 {% set HOSTNAME_KVM03 = os_env('HOSTNAME_KVM03', 'kvm03.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMP01 = os_env('HOSTNAME_CMP01', 'cmp01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMP02 = os_env('HOSTNAME_CMP02', 'cmp02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CMP001 = os_env('HOSTNAME_CMP001', 'cmp001.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CMP002 = os_env('HOSTNAME_CMP002', 'cmp002.' + DOMAIN_NAME) %}
 {% set HOSTNAME_GTW01 = os_env('HOSTNAME_GTW01', 'gtw01.' + DOMAIN_NAME) %}
 {% set HOSTNAME_GTW02 = os_env('HOSTNAME_GTW02', 'gtw02.' + DOMAIN_NAME) %}
 {% set ETH1_IP_ADDRESS_CFG01 = os_env('ETH1_IP_ADDRESS_CFG01', '172.16.164.2') %}
@@ -15,8 +15,8 @@
 {% set ETH0_IP_ADDRESS_KVM01 = os_env('ETH0_IP_ADDRESS_KVM01', '172.16.164.11') %}
 {% set ETH0_IP_ADDRESS_KVM02 = os_env('ETH0_IP_ADDRESS_KVM02', '172.16.164.12') %}
 {% set ETH0_IP_ADDRESS_KVM03 = os_env('ETH0_IP_ADDRESS_KVM03', '172.16.164.13') %}
-{% set ETH0_IP_ADDRESS_CMP01 = os_env('ETH0_IP_ADDRESS_CMP01', '172.16.164.3') %}
-{% set ETH0_IP_ADDRESS_CMP02 = os_env('ETH0_IP_ADDRESS_CMP02', '172.16.164.31') %}
+{% set ETH0_IP_ADDRESS_CMP001 = os_env('ETH0_IP_ADDRESS_CMP001', '172.16.164.3') %}
+{% set ETH0_IP_ADDRESS_CMP002 = os_env('ETH0_IP_ADDRESS_CMP002', '172.16.164.31') %}
 {% set ETH0_IP_ADDRESS_GTW01 = os_env('ETH0_IP_ADDRESS_GTW01', '172.16.164.4') %}
 {% set ETH0_IP_ADDRESS_GTW02 = os_env('ETH0_IP_ADDRESS_GTW02', '172.16.164.5') %}
 
@@ -48,8 +48,8 @@
             default_{{ HOSTNAME_KVM01 }}: {{ ETH0_IP_ADDRESS_KVM01 }}
             default_{{ HOSTNAME_KVM02 }}: {{ ETH0_IP_ADDRESS_KVM02 }}
             default_{{ HOSTNAME_KVM03 }}: {{ ETH0_IP_ADDRESS_KVM03 }}
-            default_{{ HOSTNAME_CMP01 }}: {{ ETH0_IP_ADDRESS_CMP01 }}
-            default_{{ HOSTNAME_CMP02 }}: {{ ETH0_IP_ADDRESS_CMP02 }}
+            default_{{ HOSTNAME_CMP001 }}: {{ ETH0_IP_ADDRESS_CMP001 }}
+            default_{{ HOSTNAME_CMP002 }}: {{ ETH0_IP_ADDRESS_CMP002 }}
             default_{{ HOSTNAME_GTW01 }}: {{ ETH0_IP_ADDRESS_GTW01 }}
             default_{{ HOSTNAME_GTW02 }}: {{ ETH0_IP_ADDRESS_GTW02 }}
           ip_ranges:
@@ -311,14 +311,13 @@
                   parents:
                    - enp9s0f1
 
-
-          - name: {{ HOSTNAME_CMP01 }}
+          - name: {{ HOSTNAME_CMP001 }}
             role: salt_minion
             params:
               ipmi_user: !os_env IPMI_USER
               ipmi_password: !os_env IPMI_PASSWORD
               ipmi_previlegies: OPERATOR
-              ipmi_host: !os_env IPMI_HOST_CMP01  # hostname or IP address
+              ipmi_host: !os_env IPMI_HOST_CMP001  # hostname or IP address
               ipmi_lan_interface: lanplus
               ipmi_port: 623
 
@@ -344,9 +343,9 @@
               interfaces:
                 - label: enp9s0f0
                   l2_network_device: admin
-                  mac_address: !os_env ETH0_MAC_ADDRESS_CMP01
+                  mac_address: !os_env ETH0_MAC_ADDRESS_CMP001
                 - label: enp9s0f1
-                  mac_address: !os_env ETH1_MAC_ADDRESS_CMP01
+                  mac_address: !os_env ETH1_MAC_ADDRESS_CMP001
               network_config:
                 enp9s0f0:
                   networks:
@@ -359,15 +358,13 @@
                    - enp9s0f0
                    - enp9s0f1
 
-
-
-          - name: {{ HOSTNAME_CMP02 }}
+          - name: {{ HOSTNAME_CMP002 }}
             role: salt_minion
             params:
               ipmi_user: !os_env IPMI_USER
               ipmi_password: !os_env IPMI_PASSWORD
               ipmi_previlegies: OPERATOR
-              ipmi_host: !os_env IPMI_HOST_CMP02  # hostname or IP address
+              ipmi_host: !os_env IPMI_HOST_CMP002  # hostname or IP address
               ipmi_lan_interface: lanplus
               ipmi_port: 623
 
@@ -393,9 +390,9 @@
               interfaces:
                 - label: enp9s0f0
                   l2_network_device: admin
-                  mac_address: !os_env ETH0_MAC_ADDRESS_CMP02
+                  mac_address: !os_env ETH0_MAC_ADDRESS_CMP002
                 - label: enp9s0f1
-                  mac_address: !os_env ETH1_MAC_ADDRESS_CMP02
+                  mac_address: !os_env ETH1_MAC_ADDRESS_CMP002
               network_config:
                 enp9s0f0:
                   networks:
@@ -408,7 +405,6 @@
                    - enp9s0f0
                    - enp9s0f1
 
-
           - name: {{ HOSTNAME_GTW01 }}
             role: salt_minion
             params:
diff --git a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail-nfv/salt.yaml b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail-nfv/salt.yaml
index afec74c..9c6d2bc 100644
--- a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail-nfv/salt.yaml
+++ b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail-nfv/salt.yaml
@@ -33,9 +33,9 @@
     set -e;
     # Remove rack01 key
     . /root/venv-reclass-tools/bin/activate;
-    reclass-tools del-key parameters.reclass.storage.node.openstack_compute_rack01 /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/config.yml;
+    reclass-tools del-key parameters.reclass.storage.node.openstack_compute_rack01 /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/config/init.yml;
     # Add openstack_compute_node definition from system
-    reclass-tools add-key 'classes' 'system.reclass.storage.system.openstack_compute_multi' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/config.yml --merge;
+    reclass-tools add-key 'classes' 'system.reclass.storage.system.openstack_compute_multi' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/config/init.yml --merge;
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
   skip_fail: false
diff --git a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail-nfv/underlay--user-data-cfg01.yaml b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail-nfv/underlay--user-data-cfg01.yaml
index 7677268..59a799e 100644
--- a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail-nfv/underlay--user-data-cfg01.yaml
+++ b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail-nfv/underlay--user-data-cfg01.yaml
@@ -47,43 +47,14 @@
    - swapon /swapfile

    - echo "/swapfile   none    swap    defaults    0   0" >> /etc/fstab

 

-   ############## TCP Cloud cfg01 node ##################

-   #- sleep 120

-   #   - echo "Preparing base OS"

-

    - echo "nameserver 172.18.208.44" > /etc/resolv.conf;

-   #   - which wget >/dev/null || (apt-get update; apt-get install -y wget);

-

-   # Configure Ubuntu mirrors
-   #   - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial main restricted universe" > /etc/apt/sources.list
-   #   - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-updates main restricted universe" >> /etc/apt/sources.list
-   #   - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-security main restricted universe" >> /etc/apt/sources.list
 
-   #   - echo "deb [arch=amd64] http://apt.mirantis.com/xenial {{ REPOSITORY_SUITE }} salt extra" > /etc/apt/sources.list.d/mcp_salt.list;

-   #   - wget -O - http://apt.mirantis.com/public.gpg | apt-key add -;

-   #   - echo "deb http://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3 xenial main" > /etc/apt/sources.list.d/saltstack.list;

-   #   - wget -O - https://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3/SALTSTACK-GPG-KEY.pub | apt-key add -;

-

-   #   - apt-get clean

-   #   - apt-get update

-

-   # Install common packages

-   #   - eatmydata apt-get install -y python-pip git curl tmux byobu iputils-ping traceroute htop tree mc

-

-   # Install salt-minion and stop it until it is configured

-   #   - eatmydata apt-get install -y salt-minion && service salt-minion stop

-

-   ########################################################

-   # Node is ready, allow SSH access

-   #   - echo "Allow SSH access ..."

-   #   - sudo iptables -D INPUT -p tcp --dport 22 -j DROP

-   ########################################################

-

-   # Install common packages
-   - eatmydata apt-get install -y python-pip git curl tmux byobu iputils-ping traceroute htop tree sshuttle
-
-   # Use sshuttle to allow SSH access to the model-related control network 10.167.4.0/24 on baremetal/VM nodes from cfg01
-   - sshuttle -r {{ ETH0_IP_ADDRESS_KVM01 }} 10.167.8.0/24 -D
+   - mkdir -p /srv/salt/reclass/nodes
+   - systemctl enable salt-master
+   - systemctl enable salt-minion
+   - systemctl start salt-master
+   - systemctl start salt-minion
+   - salt-call -l info --timeout=120 test.ping
 
   write_files:

    - path: /etc/default/grub.d/97-enable-grub-menu.cfg

diff --git a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/salt.yaml b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/salt.yaml
index 1adfd90..d8c6a67 100644
--- a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/salt.yaml
+++ b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/salt.yaml
@@ -34,9 +34,9 @@
     set -e;
     # Remove rack01 key
     . /root/venv-reclass-tools/bin/activate;
-    reclass-tools del-key parameters.reclass.storage.node.openstack_compute_rack01 /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/config.yml;
+    reclass-tools del-key parameters.reclass.storage.node.openstack_compute_rack01 /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/config/init.yml;
     # Add openstack_compute_node definition from system
-    reclass-tools add-key 'classes' 'system.reclass.storage.system.openstack_compute_multi' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/config.yml --merge;
+    reclass-tools add-key 'classes' 'system.reclass.storage.system.openstack_compute_multi' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/config/init.yml --merge;
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
   skip_fail: false
diff --git a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/underlay--user-data-cfg01.yaml b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/underlay--user-data-cfg01.yaml
index 6c9e48f..cde8295 100644
--- a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/underlay--user-data-cfg01.yaml
+++ b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/underlay--user-data-cfg01.yaml
@@ -18,8 +18,6 @@
    expire: False

 

   bootcmd:

-   #   # Block access to SSH while node is preparing

-   #   - cloud-init-per once sudo iptables -A INPUT -p tcp --dport 22 -j DROP

    # Enable root access

    - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config

    - service sshd restart

@@ -47,38 +45,15 @@
    - swapon /swapfile

    - echo "/swapfile   none    swap    defaults    0   0" >> /etc/fstab

 

-   ############## TCP Cloud cfg01 node ##################

-   #- sleep 120

-   #   - echo "Preparing base OS"

-

    - echo "nameserver 172.18.208.44" > /etc/resolv.conf;

-   #   - which wget >/dev/null || (apt-get update; apt-get install -y wget);

 

-   # Configure Ubuntu mirrors
-   #   - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial main restricted universe" > /etc/apt/sources.list
-   #   - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-updates main restricted universe" >> /etc/apt/sources.list
-   #   - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-security main restricted universe" >> /etc/apt/sources.list
+   - mkdir -p /srv/salt/reclass/nodes
+   - systemctl enable salt-master
+   - systemctl enable salt-minion
+   - systemctl start salt-master
+   - systemctl start salt-minion
+   - salt-call -l info --timeout=120 test.ping
 
-   #   - echo "deb [arch=amd64] http://apt.mirantis.com/xenial {{ REPOSITORY_SUITE }} salt extra" > /etc/apt/sources.list.d/mcp_salt.list;

-   #   - wget -O - http://apt.mirantis.com/public.gpg | apt-key add -;

-   #   - echo "deb http://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3 xenial main" > /etc/apt/sources.list.d/saltstack.list;

-   #   - wget -O - https://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3/SALTSTACK-GPG-KEY.pub | apt-key add -;

-

-   #   - apt-get clean

-   #   - apt-get update

-

-   # Install common packages

-   #   - eatmydata apt-get install -y python-pip git curl tmux byobu iputils-ping traceroute htop tree mc

-

-   # Install salt-minion and stop it until it is configured

-   #   - eatmydata apt-get install -y salt-minion && service salt-minion stop

-

-   ########################################################

-   # Node is ready, allow SSH access

-   #   - echo "Allow SSH access ..."

-   #   - sudo iptables -D INPUT -p tcp --dport 22 -j DROP

-   ########################################################

-

   write_files:

    - path: /etc/default/grub.d/97-enable-grub-menu.cfg

      content: |

diff --git a/tcp_tests/templates/cookied-bm-mcp-ovs-dpdk/salt-context-cookiecutter-openstack_ovs_dpdk.yaml b/tcp_tests/templates/cookied-bm-mcp-ovs-dpdk/salt-context-cookiecutter-openstack_ovs_dpdk.yaml
index 484b776..c137d12 100644
--- a/tcp_tests/templates/cookied-bm-mcp-ovs-dpdk/salt-context-cookiecutter-openstack_ovs_dpdk.yaml
+++ b/tcp_tests/templates/cookied-bm-mcp-ovs-dpdk/salt-context-cookiecutter-openstack_ovs_dpdk.yaml
@@ -123,7 +123,6 @@
   openstack_compute_single_address_ranges: 172.16.10.105-172.16.10.106
   openstack_compute_deploy_address_ranges: 192.168.10.105-192.168.10.106
   openstack_compute_tenant_address_ranges: 10.1.0.105-10.1.0.106
-  openstack_compute_backend_address_ranges: 10.1.0.105-10.1.0.106
   openstack_control_address: 10.167.11.10
   openstack_control_hostname: ctl
   openstack_control_node01_address: 10.167.11.11
diff --git a/tcp_tests/templates/cookied-bm-mcp-ovs-dpdk/underlay--user-data-cfg01.yaml b/tcp_tests/templates/cookied-bm-mcp-ovs-dpdk/underlay--user-data-cfg01.yaml
index 3f4f128..48bf712 100644
--- a/tcp_tests/templates/cookied-bm-mcp-ovs-dpdk/underlay--user-data-cfg01.yaml
+++ b/tcp_tests/templates/cookied-bm-mcp-ovs-dpdk/underlay--user-data-cfg01.yaml
@@ -18,8 +18,6 @@
    expire: False
 
   bootcmd:
-   # Block access to SSH while node is preparing
-   #- cloud-init-per once sudo iptables -A INPUT -p tcp --dport 22 -j DROP
    # Enable root access
    - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
    - service sshd restart
@@ -46,21 +44,16 @@
    - swapon /swapfile
    - echo "/swapfile   none    swap    defaults    0   0" >> /etc/fstab
 
-   ############## TCP Cloud cfg01 node ##################
-   #- sleep 120
    - echo "Preparing base OS"
 
    - echo "nameserver 172.18.208.44" > /etc/resolv.conf;
-   - which wget >/dev/null || (apt-get update; apt-get install -y wget);
 
-   # Install common packages
-   - eatmydata apt-get install -y python-pip git curl tmux byobu iputils-ping traceroute htop tree sshuttle
-
-   ########################################################
-   # Node is ready, allow SSH access
-   #- echo "Allow SSH access ..."
-   #- sudo iptables -D INPUT -p tcp --dport 22 -j DROP
-   ########################################################
+   - mkdir -p /srv/salt/reclass/nodes
+   - systemctl enable salt-master
+   - systemctl enable salt-minion
+   - systemctl start salt-master
+   - systemctl start salt-minion
+   - salt-call -l info --timeout=120 test.ping
 
   write_files:
    - path: /etc/network/interfaces
diff --git a/tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/salt.yaml b/tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/salt.yaml
index 7ebda02..0e60fd0 100644
--- a/tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/salt.yaml
+++ b/tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/salt.yaml
@@ -35,7 +35,7 @@
     set -e;
     # Remove rack01 key
     . /root/venv-reclass-tools/bin/activate;
-    reclass-tools del-key parameters.reclass.storage.node.kubernetes_compute_rack01 /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/config.yml;
+    reclass-tools del-key parameters.reclass.storage.node.kubernetes_compute_rack01 /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/config/init.yml;
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
   skip_fail: false
diff --git a/tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/underlay--user-data-cfg01.yaml b/tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/underlay--user-data-cfg01.yaml
index 646af7a..16bd9f6 100644
--- a/tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/underlay--user-data-cfg01.yaml
+++ b/tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/underlay--user-data-cfg01.yaml
@@ -18,8 +18,6 @@
    expire: False
 
   bootcmd:
-   #   # Block access to SSH while node is preparing
-   #   - cloud-init-per once sudo iptables -A INPUT -p tcp --dport 22 -j DROP
    # Enable root access
    - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
    - sed -i -e '/^PasswordAuthentication/s/^.*$/PasswordAuthentication yes/' /etc/ssh/sshd_config
@@ -48,37 +46,14 @@
    - swapon /swapfile
    - echo "/swapfile   none    swap    defaults    0   0" >> /etc/fstab
 
-   ############## TCP Cloud cfg01 node ##################
-   #- sleep 120
-   #   - echo "Preparing base OS"
-
    - echo "nameserver 172.17.41.2" > /etc/resolv.conf;
-   #   - which wget >/dev/null || (apt-get update; apt-get install -y wget);
 
-   # Configure Ubuntu mirrors
-   #   - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial main restricted universe" > /etc/apt/sources.list
-   #   - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-updates main restricted universe" >> /etc/apt/sources.list
-   #   - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-security main restricted universe" >> /etc/apt/sources.list
-
-   #   - echo "deb [arch=amd64] http://apt.mirantis.com/xenial {{ REPOSITORY_SUITE }} salt extra" > /etc/apt/sources.list.d/mcp_salt.list;
-   #   - wget -O - http://apt.mirantis.com/public.gpg | apt-key add -;
-   #   - echo "deb http://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3 xenial main" > /etc/apt/sources.list.d/saltstack.list;
-   #   - wget -O - https://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3/SALTSTACK-GPG-KEY.pub | apt-key add -;
-
-   #   - apt-get clean
-   #   - apt-get update
-
-   # Install common packages
-   #   - eatmydata apt-get install -y python-pip git curl tmux byobu iputils-ping traceroute htop tree mc
-
-   # Install salt-minion and stop it until it is configured
-   #   - eatmydata apt-get install -y salt-minion && service salt-minion stop
-
-   ########################################################
-   # Node is ready, allow SSH access
-   #   - echo "Allow SSH access ..."
-   #   - sudo iptables -D INPUT -p tcp --dport 22 -j DROP
-   ########################################################
+   - mkdir -p /srv/salt/reclass/nodes
+   - systemctl enable salt-master
+   - systemctl enable salt-minion
+   - systemctl start salt-master
+   - systemctl start salt-minion
+   - salt-call -l info --timeout=120 test.ping
 
   write_files:
    - path: /etc/default/grub.d/97-enable-grub-menu.cfg
diff --git a/tcp_tests/templates/cookied-bm-ocata-cicd-pipeline/salt.yaml b/tcp_tests/templates/cookied-bm-ocata-cicd-pipeline/salt.yaml
index d39ca10..fa2d723 100644
--- a/tcp_tests/templates/cookied-bm-ocata-cicd-pipeline/salt.yaml
+++ b/tcp_tests/templates/cookied-bm-ocata-cicd-pipeline/salt.yaml
@@ -48,9 +48,9 @@
     set -e;
     # Remove rack01 key
     . /root/venv-reclass-tools/bin/activate;
-    reclass-tools del-key parameters.reclass.storage.node.openstack_compute_rack01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+    reclass-tools del-key parameters.reclass.storage.node.openstack_compute_rack01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
     # Add openstack_compute_node definition from system
-    reclass-tools add-key 'classes' 'system.reclass.storage.system.openstack_compute_multi' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml --merge;
+    reclass-tools add-key 'classes' 'system.reclass.storage.system.openstack_compute_multi' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml --merge;
 
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
diff --git a/tcp_tests/templates/cookied-bm-ocata-cicd-pipeline/underlay--user-data-cfg01.yaml b/tcp_tests/templates/cookied-bm-ocata-cicd-pipeline/underlay--user-data-cfg01.yaml
index 8faced7..3839f93 100644
--- a/tcp_tests/templates/cookied-bm-ocata-cicd-pipeline/underlay--user-data-cfg01.yaml
+++ b/tcp_tests/templates/cookied-bm-ocata-cicd-pipeline/underlay--user-data-cfg01.yaml
@@ -45,6 +45,13 @@
    - swapon /swapfile

    - echo "/swapfile   none    swap    defaults    0   0" >> /etc/fstab

    - echo "nameserver 172.18.208.44" > /etc/resolv.conf;

+
+   - mkdir -p /srv/salt/reclass/nodes
+   - systemctl enable salt-master
+   - systemctl enable salt-minion
+   - systemctl start salt-master
+   - systemctl start salt-minion
+   - salt-call -l info --timeout=120 test.ping
 

   write_files:

    - path: /etc/default/grub.d/97-enable-grub-menu.cfg

diff --git a/tcp_tests/templates/cookied-cicd-k8s-calico-sl/underlay.yaml b/tcp_tests/templates/cookied-cicd-k8s-calico-sl/underlay.yaml
index 5799a2d..c0ceb3d 100644
--- a/tcp_tests/templates/cookied-cicd-k8s-calico-sl/underlay.yaml
+++ b/tcp_tests/templates/cookied-cicd-k8s-calico-sl/underlay.yaml
@@ -403,9 +403,6 @@
                   capacity: !os_env NODE_VOLUME_SIZE, 150
                   backing_store: mcp_ubuntu_1604_image
                   format: qcow2
-                - name: cinder
-                  capacity: 50
-                  format: qcow2
                 - name: iso  # Volume with name 'iso' will be used
                              # for store image with cloud-init metadata.
                   capacity: 1
@@ -432,9 +429,6 @@
                   capacity: !os_env NODE_VOLUME_SIZE, 150
                   backing_store: mcp_ubuntu_1604_image
                   format: qcow2
-                - name: cinder
-                  capacity: 50
-                  format: qcow2
                 - name: iso  # Volume with name 'iso' will be used
                              # for store image with cloud-init metadata.
                   capacity: 1
@@ -461,9 +455,6 @@
                   capacity: !os_env NODE_VOLUME_SIZE, 150
                   backing_store: mcp_ubuntu_1604_image
                   format: qcow2
-                - name: cinder
-                  capacity: 50
-                  format: qcow2
                 - name: iso  # Volume with name 'iso' will be used
                              # for store image with cloud-init metadata.
                   capacity: 1
diff --git a/tcp_tests/templates/cookied-cicd-k8s-calico/underlay.yaml b/tcp_tests/templates/cookied-cicd-k8s-calico/underlay.yaml
index d9dcdaf..fe1970f 100644
--- a/tcp_tests/templates/cookied-cicd-k8s-calico/underlay.yaml
+++ b/tcp_tests/templates/cookied-cicd-k8s-calico/underlay.yaml
@@ -403,9 +403,6 @@
                   capacity: !os_env NODE_VOLUME_SIZE, 150
                   backing_store: mcp_ubuntu_1604_image
                   format: qcow2
-                - name: cinder
-                  capacity: 50
-                  format: qcow2
                 - name: iso  # Volume with name 'iso' will be used
                              # for store image with cloud-init metadata.
                   capacity: 1
@@ -432,9 +429,6 @@
                   capacity: !os_env NODE_VOLUME_SIZE, 150
                   backing_store: mcp_ubuntu_1604_image
                   format: qcow2
-                - name: cinder
-                  capacity: 50
-                  format: qcow2
                 - name: iso  # Volume with name 'iso' will be used
                              # for store image with cloud-init metadata.
                   capacity: 1
@@ -461,9 +455,6 @@
                   capacity: !os_env NODE_VOLUME_SIZE, 150
                   backing_store: mcp_ubuntu_1604_image
                   format: qcow2
-                - name: cinder
-                  capacity: 50
-                  format: qcow2
                 - name: iso  # Volume with name 'iso' will be used
                              # for store image with cloud-init metadata.
                   capacity: 1
diff --git a/tcp_tests/templates/cookied-cicd-k8s-genie/underlay.yaml b/tcp_tests/templates/cookied-cicd-k8s-genie/underlay.yaml
index 2fa2aaa..bda254b 100644
--- a/tcp_tests/templates/cookied-cicd-k8s-genie/underlay.yaml
+++ b/tcp_tests/templates/cookied-cicd-k8s-genie/underlay.yaml
@@ -380,9 +380,6 @@
                   capacity: !os_env NODE_VOLUME_SIZE, 150
                   backing_store: mcp_ubuntu_1604_image
                   format: qcow2
-                - name: cinder
-                  capacity: 50
-                  format: qcow2
                 - name: iso  # Volume with name 'iso' will be used
                              # for store image with cloud-init metadata.
                   capacity: 1
@@ -409,9 +406,6 @@
                   capacity: !os_env NODE_VOLUME_SIZE, 150
                   backing_store: mcp_ubuntu_1604_image
                   format: qcow2
-                - name: cinder
-                  capacity: 50
-                  format: qcow2
                 - name: iso  # Volume with name 'iso' will be used
                              # for store image with cloud-init metadata.
                   capacity: 1
@@ -438,9 +432,6 @@
                   capacity: !os_env NODE_VOLUME_SIZE, 150
                   backing_store: mcp_ubuntu_1604_image
                   format: qcow2
-                - name: cinder
-                  capacity: 50
-                  format: qcow2
                 - name: iso  # Volume with name 'iso' will be used
                              # for store image with cloud-init metadata.
                   capacity: 1
diff --git a/tcp_tests/templates/cookied-cicd-pike-dpdk/cookiecutter-context-pike-ovs-dpdk.yaml b/tcp_tests/templates/cookied-cicd-pike-dpdk/cookiecutter-context-pike-ovs-dpdk.yaml
index fdef434..e4c5fbd 100644
--- a/tcp_tests/templates/cookied-cicd-pike-dpdk/cookiecutter-context-pike-ovs-dpdk.yaml
+++ b/tcp_tests/templates/cookied-cicd-pike-dpdk/cookiecutter-context-pike-ovs-dpdk.yaml
@@ -95,12 +95,11 @@
   openstack_cluster_size: compact
   openstack_compute_count: '2'
   openstack_compute_rack01_hostname: cmp
-  openstack_compute_rack01_single_subnet: 172.16.10
-  openstack_compute_rack01_tenant_subnet: 10.1.0
-  openstack_compute_single_address_ranges: 172.16.10.105-172.16.10.106
-  openstack_compute_deploy_address_ranges: 192.168.10.105-192.168.10.106
-  openstack_compute_tenant_address_ranges: 10.1.0.105-10.1.0.106
-  openstack_compute_backend_address_ranges: 10.1.0.105-10.1.0.106
+  openstack_compute_rack01_single_subnet: 10.167.4
+  openstack_compute_rack01_tenant_subnet: 10.167.6
+  openstack_compute_single_address_ranges: 10.167.4.105-10.167.4.106
+  openstack_compute_deploy_address_ranges: 10.167.5.105-10.167.5.106
+  openstack_compute_tenant_address_ranges: 10.167.6.105-10.167.6.106
   openstack_control_address: 10.167.4.100
   openstack_control_hostname: ctl
   openstack_control_node01_address: 10.167.4.101
@@ -142,7 +141,7 @@
   openstack_nfv_sriov_enabled: 'False'
   openstack_nova_compute_hugepages_count: '2048'
   openstack_nova_compute_nfv_req_enabled: 'False'
-  openstack_nova_cpu_pinning: '3'
+  openstack_nova_cpu_pinning: '4,5,8,9,10,11'
   openstack_ovs_dvr_enabled: 'False'
   openstack_ovs_encapsulation_type: vxlan
   openstack_proxy_address: 10.167.4.80
diff --git a/tcp_tests/templates/cookied-cicd-pike-dpdk/environment-context.yaml b/tcp_tests/templates/cookied-cicd-pike-dpdk/environment-context.yaml
index 53f5dd0..467680e 100644
--- a/tcp_tests/templates/cookied-cicd-pike-dpdk/environment-context.yaml
+++ b/tcp_tests/templates/cookied-cicd-pike-dpdk/environment-context.yaml
@@ -80,6 +80,7 @@
       reclass_storage_name: openstack_control_node01
       roles:
       - openstack_control_leader
+      - features_lvm_backend_control
       - linux_system_codename_xenial
       interfaces:
         ens3:
@@ -91,6 +92,7 @@
       reclass_storage_name: openstack_control_node02
       roles:
       - openstack_control
+      - features_lvm_backend_control
       - linux_system_codename_xenial
       interfaces:
         ens3:
@@ -102,6 +104,7 @@
       reclass_storage_name: openstack_control_node03
       roles:
       - openstack_control
+      - features_lvm_backend_control
       - linux_system_codename_xenial
       interfaces:
         ens3:
@@ -191,7 +194,7 @@
       reclass_storage_name: openstack_compute_rack01
       roles:
       - openstack_compute
-      - features_lvm_backend
+      - features_lvm_backend_volume_vdb
       - linux_system_codename_xenial
       interfaces:
         ens3:
@@ -218,7 +221,10 @@
         ens4:
           role: single_ctl
         ens5:
-          role: single_ovs_br_prv
-          mtu: 1500
+          role: bond0_ab_ovs_vxlan_mesh_no_tag
+        ens6:
+          role: bond0_ab_ovs_vxlan_mesh_no_tag
         ens7:
-          role: bond1_ab_ovs_floating
+          role: single_ovs_br_floating
+          external_address: 10.90.0.110
+          external_network_netmask: 255.255.255.0
diff --git a/tcp_tests/templates/cookied-cicd-pike-dpdk/underlay.yaml b/tcp_tests/templates/cookied-cicd-pike-dpdk/underlay.yaml
index 71c1914..d4e5d4d 100644
--- a/tcp_tests/templates/cookied-cicd-pike-dpdk/underlay.yaml
+++ b/tcp_tests/templates/cookied-cicd-pike-dpdk/underlay.yaml
@@ -505,9 +505,6 @@
                   capacity: !os_env NODE_VOLUME_SIZE, 150
                   backing_store: cloudimage1604
                   format: qcow2
-                - name: cinder
-                  capacity: 50
-                  format: qcow2
                 - name: iso  # Volume with name 'iso' will be used
                              # for store image with cloud-init metadata.
                   capacity: 1
diff --git a/tcp_tests/templates/cookied-cicd-pike-dvr-sl/cookiecutter-context-pike-dvr-sl.yaml b/tcp_tests/templates/cookied-cicd-pike-dvr-sl/cookiecutter-context-pike-dvr-sl.yaml
index 7acd05a..298fd81 100644
--- a/tcp_tests/templates/cookied-cicd-pike-dvr-sl/cookiecutter-context-pike-dvr-sl.yaml
+++ b/tcp_tests/templates/cookied-cicd-pike-dvr-sl/cookiecutter-context-pike-dvr-sl.yaml
@@ -95,12 +95,11 @@
   openstack_cluster_size: compact
   openstack_compute_count: '2'
   openstack_compute_rack01_hostname: cmp
-  openstack_compute_rack01_single_subnet: 172.16.10
-  openstack_compute_rack01_tenant_subnet: 10.1.0
-  openstack_compute_single_address_ranges: 172.16.10.105-172.16.10.106
-  openstack_compute_deploy_address_ranges: 192.168.10.105-192.168.10.106
-  openstack_compute_tenant_address_ranges: 10.1.0.105-10.1.0.106
-  openstack_compute_backend_address_ranges: 10.1.0.105-10.1.0.106
+  openstack_compute_rack01_single_subnet: 10.167.4
+  openstack_compute_rack01_tenant_subnet: 10.167.6
+  openstack_compute_single_address_ranges: 10.167.4.105-10.167.4.106
+  openstack_compute_deploy_address_ranges: 10.167.5.105-10.167.5.106
+  openstack_compute_tenant_address_ranges: 10.167.6.105-10.167.6.106
   openstack_control_address: 10.167.4.100
   openstack_control_hostname: ctl
   openstack_control_node01_address: 10.167.4.101
diff --git a/tcp_tests/templates/cookied-cicd-pike-dvr-sl/environment_context.yaml b/tcp_tests/templates/cookied-cicd-pike-dvr-sl/environment_context.yaml
index 5ed6d36..24c36e5 100644
--- a/tcp_tests/templates/cookied-cicd-pike-dvr-sl/environment_context.yaml
+++ b/tcp_tests/templates/cookied-cicd-pike-dvr-sl/environment_context.yaml
@@ -84,6 +84,7 @@
       - openstack_control_leader
       - openstack_database_leader
       - openstack_message_queue
+      - features_lvm_backend_control
       - linux_system_codename_xenial
       interfaces:
         ens3:
@@ -97,6 +98,7 @@
       - openstack_control
       - openstack_database
       - openstack_message_queue
+      - features_lvm_backend_control
       - linux_system_codename_xenial
       interfaces:
         ens3:
@@ -110,6 +112,7 @@
       - openstack_control
       - openstack_database
       - openstack_message_queue
+      - features_lvm_backend_control
       - linux_system_codename_xenial
       interfaces:
         ens3:
@@ -232,6 +235,7 @@
       reclass_storage_name: openstack_compute_rack01
       roles:
       - openstack_compute
+      - features_lvm_backend_volume_vdb
       - linux_system_codename_xenial
       interfaces:
         ens3:
@@ -246,7 +250,6 @@
     gtw01:
       reclass_storage_name: openstack_gateway_node01
       roles:
-      - openstack_gateway
       - linux_system_codename_xenial
       interfaces:
         ens3:
diff --git a/tcp_tests/templates/cookied-cicd-pike-dvr-sl/underlay.yaml b/tcp_tests/templates/cookied-cicd-pike-dvr-sl/underlay.yaml
index 25af0b6..5b871d7 100644
--- a/tcp_tests/templates/cookied-cicd-pike-dvr-sl/underlay.yaml
+++ b/tcp_tests/templates/cookied-cicd-pike-dvr-sl/underlay.yaml
@@ -264,9 +264,6 @@
                   capacity: !os_env NODE_VOLUME_SIZE, 150
                   backing_store: mcp_ubuntu_1604_image
                   format: qcow2
-                - name: cinder
-                  capacity: 50
-                  format: qcow2
                 - name: iso  # Volume with name 'iso' will be used
                              # for store image with cloud-init metadata.
                   capacity: 1
@@ -305,9 +302,6 @@
                   capacity: !os_env NODE_VOLUME_SIZE, 150
                   backing_store: mcp_ubuntu_1604_image
                   format: qcow2
-                - name: cinder
-                  capacity: 50
-                  format: qcow2
                 - name: iso  # Volume with name 'iso' will be used
                              # for store image with cloud-init metadata.
                   capacity: 1
@@ -334,9 +328,6 @@
                   capacity: !os_env NODE_VOLUME_SIZE, 150
                   backing_store: mcp_ubuntu_1604_image
                   format: qcow2
-                - name: cinder
-                  capacity: 50
-                  format: qcow2
                 - name: iso  # Volume with name 'iso' will be used
                              # for store image with cloud-init metadata.
                   capacity: 1
@@ -597,9 +588,6 @@
                   capacity: !os_env NODE_VOLUME_SIZE, 150
                   backing_store: mcp_ubuntu_1604_image
                   format: qcow2
-                - name: cinder
-                  capacity: 50
-                  format: qcow2
                 - name: iso  # Volume with name 'iso' will be used
                              # for store image with cloud-init metadata.
                   capacity: 1
@@ -627,6 +615,9 @@
                   capacity: !os_env NODE_VOLUME_SIZE, 150
                   backing_store: cloudimage1604
                   format: qcow2
+                - name: cinder
+                  capacity: 50
+                  format: qcow2
                 - name: iso  # Volume with name 'iso' will be used
                              # for store image with cloud-init metadata.
                   capacity: 1
@@ -678,6 +669,9 @@
                   capacity: !os_env NODE_VOLUME_SIZE, 150
                   backing_store: cloudimage1604
                   format: qcow2
+                - name: cinder
+                  capacity: 50
+                  format: qcow2
                 - name: iso  # Volume with name 'iso' will be used
                              # for store image with cloud-init metadata.
                   capacity: 1
diff --git a/tcp_tests/templates/cookied-cicd-pike-ovs-sl/cookiecutter-context-pike-ovs-sl.yaml b/tcp_tests/templates/cookied-cicd-pike-ovs-sl/cookiecutter-context-pike-ovs-sl.yaml
index f9d1f15..7917d5b 100644
--- a/tcp_tests/templates/cookied-cicd-pike-ovs-sl/cookiecutter-context-pike-ovs-sl.yaml
+++ b/tcp_tests/templates/cookied-cicd-pike-ovs-sl/cookiecutter-context-pike-ovs-sl.yaml
@@ -95,12 +95,11 @@
   openstack_cluster_size: compact
   openstack_compute_count: '2'
   openstack_compute_rack01_hostname: cmp
-  openstack_compute_rack01_single_subnet: 172.16.10
-  openstack_compute_rack01_tenant_subnet: 10.1.0
-  openstack_compute_single_address_ranges: 172.16.10.105-172.16.10.106
-  openstack_compute_deploy_address_ranges: 192.168.10.105-192.168.10.106
-  openstack_compute_tenant_address_ranges: 10.1.0.105-10.1.0.106
-  openstack_compute_backend_address_ranges: 10.1.0.105-10.1.0.106
+  openstack_compute_rack01_single_subnet: 10.167.4
+  openstack_compute_rack01_tenant_subnet: 10.167.6
+  openstack_compute_single_address_ranges: 10.167.4.105-10.167.4.106
+  openstack_compute_deploy_address_ranges: 10.167.5.105-10.167.5.106
+  openstack_compute_tenant_address_ranges: 10.167.6.105-10.167.6.106
   openstack_control_address: 10.167.4.100
   openstack_control_hostname: ctl
   openstack_control_node01_address: 10.167.4.101
@@ -232,3 +231,4 @@
   tenant_vlan: '20'
   upstream_proxy_enabled: 'False'
   use_default_network_scheme: 'False'
+  openstack_octavia_enabled: 'True'
diff --git a/tcp_tests/templates/cookied-cicd-pike-ovs-sl/environment-context.yaml b/tcp_tests/templates/cookied-cicd-pike-ovs-sl/environment-context.yaml
index 1791477..adcfe0c 100644
--- a/tcp_tests/templates/cookied-cicd-pike-ovs-sl/environment-context.yaml
+++ b/tcp_tests/templates/cookied-cicd-pike-ovs-sl/environment-context.yaml
@@ -82,6 +82,7 @@
       - openstack_control_leader
       - openstack_database_leader
       - openstack_message_queue
+      - features_lvm_backend_control
       - linux_system_codename_xenial
       interfaces:
         ens3:
@@ -95,6 +96,7 @@
       - openstack_control
       - openstack_database
       - openstack_message_queue
+      - features_lvm_backend_control
       - linux_system_codename_xenial
       interfaces:
         ens3:
@@ -108,6 +110,7 @@
       - openstack_control
       - openstack_database
       - openstack_message_queue
+      - features_lvm_backend_control
       - linux_system_codename_xenial
       interfaces:
         ens3:
@@ -230,6 +233,7 @@
       reclass_storage_name: openstack_compute_rack01
       roles:
       - openstack_compute
+      - features_lvm_backend_volume_vdb
       - linux_system_codename_xenial
       interfaces:
         ens3:
@@ -244,7 +248,6 @@
     gtw01:
       reclass_storage_name: openstack_gateway_node01
       roles:
-      - openstack_gateway
       - linux_system_codename_xenial
       interfaces:
         ens3:
diff --git a/tcp_tests/templates/cookied-cicd-pike-ovs-sl/underlay.yaml b/tcp_tests/templates/cookied-cicd-pike-ovs-sl/underlay.yaml
index 78ef271..b1efeb1 100644
--- a/tcp_tests/templates/cookied-cicd-pike-ovs-sl/underlay.yaml
+++ b/tcp_tests/templates/cookied-cicd-pike-ovs-sl/underlay.yaml
@@ -265,9 +265,6 @@
                   capacity: !os_env NODE_VOLUME_SIZE, 150
                   backing_store: mcp_ubuntu_1604_image
                   format: qcow2
-                - name: cinder
-                  capacity: 50
-                  format: qcow2
                 - name: iso  # Volume with name 'iso' will be used
                              # for store image with cloud-init metadata.
                   capacity: 1
@@ -306,9 +303,6 @@
                   capacity: !os_env NODE_VOLUME_SIZE, 150
                   backing_store: mcp_ubuntu_1604_image
                   format: qcow2
-                - name: cinder
-                  capacity: 50
-                  format: qcow2
                 - name: iso  # Volume with name 'iso' will be used
                              # for store image with cloud-init metadata.
                   capacity: 1
@@ -335,9 +329,6 @@
                   capacity: !os_env NODE_VOLUME_SIZE, 150
                   backing_store: mcp_ubuntu_1604_image
                   format: qcow2
-                - name: cinder
-                  capacity: 50
-                  format: qcow2
                 - name: iso  # Volume with name 'iso' will be used
                              # for store image with cloud-init metadata.
                   capacity: 1
@@ -598,9 +589,6 @@
                   capacity: !os_env NODE_VOLUME_SIZE, 150
                   backing_store: mcp_ubuntu_1604_image
                   format: qcow2
-                - name: cinder
-                  capacity: 50
-                  format: qcow2
                 - name: iso  # Volume with name 'iso' will be used
                              # for store image with cloud-init metadata.
                   capacity: 1
@@ -627,6 +615,9 @@
                   capacity: !os_env NODE_VOLUME_SIZE, 150
                   backing_store: cloudimage1604
                   format: qcow2
+                - name: cinder
+                  capacity: 50
+                  format: qcow2
                 - name: iso  # Volume with name 'iso' will be used
                              # for store image with cloud-init metadata.
                   capacity: 1
@@ -678,6 +669,9 @@
                   capacity: !os_env NODE_VOLUME_SIZE, 150
                   backing_store: cloudimage1604
                   format: qcow2
+                - name: cinder
+                  capacity: 50
+                  format: qcow2
                 - name: iso  # Volume with name 'iso' will be used
                              # for store image with cloud-init metadata.
                   capacity: 1
diff --git a/tcp_tests/templates/cookied-mcp-mitaka-dvr/_context-cookiecutter-mcp-mitaka-dvr.yaml b/tcp_tests/templates/cookied-mcp-mitaka-dvr/_context-cookiecutter-mcp-mitaka-dvr.yaml
index 8b320f4..58281a4 100644
--- a/tcp_tests/templates/cookied-mcp-mitaka-dvr/_context-cookiecutter-mcp-mitaka-dvr.yaml
+++ b/tcp_tests/templates/cookied-mcp-mitaka-dvr/_context-cookiecutter-mcp-mitaka-dvr.yaml
@@ -54,7 +54,6 @@
   openstack_compute_single_address_ranges: 172.16.10.105-172.16.10.106
   openstack_compute_deploy_address_ranges: 192.168.10.105-192.168.10.106
   openstack_compute_tenant_address_ranges: 10.1.0.105-10.1.0.106
-  openstack_compute_backend_address_ranges: 10.1.0.105-10.1.0.106
   openstack_control_address: 172.16.10.100
   openstack_control_hostname: ctl
   openstack_control_node01_address: 172.16.10.101
diff --git a/tcp_tests/templates/cookied-mcp-mitaka-dvr/_context-environment.yaml b/tcp_tests/templates/cookied-mcp-mitaka-dvr/_context-environment.yaml
index ca8114b..65f4131 100644
--- a/tcp_tests/templates/cookied-mcp-mitaka-dvr/_context-environment.yaml
+++ b/tcp_tests/templates/cookied-mcp-mitaka-dvr/_context-environment.yaml
@@ -20,6 +20,7 @@
       - features_designate_pool_manager_database
       - features_designate_pool_manager
       - features_designate_pool_manager_keystone
+      - features_lvm_backend_control
       - linux_system_codename_xenial
       interfaces:
         ens3:
@@ -36,6 +37,7 @@
       - openstack_message_queue
       - features_designate_pool_manager_database
       - features_designate_pool_manager
+      - features_lvm_backend_control
       - linux_system_codename_xenial
       interfaces:
         ens3:
@@ -52,6 +54,7 @@
       - openstack_message_queue
       - features_designate_pool_manager_database
       - features_designate_pool_manager
+      - features_lvm_backend_control
       - linux_system_codename_xenial
       interfaces:
         ens3:
@@ -115,6 +118,7 @@
       reclass_storage_name: openstack_compute_rack01
       roles:
       - openstack_compute
+      - features_lvm_backend_volume_vdb
       - linux_system_codename_xenial
       interfaces:
         ens3:
diff --git a/tcp_tests/templates/cookied-mcp-mitaka-dvr/salt.yaml b/tcp_tests/templates/cookied-mcp-mitaka-dvr/salt.yaml
index b965d0f..25ada5f 100644
--- a/tcp_tests/templates/cookied-mcp-mitaka-dvr/salt.yaml
+++ b/tcp_tests/templates/cookied-mcp-mitaka-dvr/salt.yaml
@@ -24,7 +24,7 @@
 
 {%- if OVERRIDES != '' %}
 {%- for param in OVERRIDES.splitlines() %}
-{%- set key, value = param.replace(' ','').split(':') %}
+{%- set key, value = param.replace(' ','').split(':', 1) %}
 - description: Override cluster parameters
   cmd: |
     salt-call reclass.cluster_meta_set name='{{ key }}' value='{{ value }}' file_name='{{OVERRIDES_FILENAME}}'
diff --git a/tcp_tests/templates/cookied-mcp-mitaka-dvr/underlay--user-data-cfg01.yaml b/tcp_tests/templates/cookied-mcp-mitaka-dvr/underlay--user-data-cfg01.yaml
index a73ca23..d75dab1 100644
--- a/tcp_tests/templates/cookied-mcp-mitaka-dvr/underlay--user-data-cfg01.yaml
+++ b/tcp_tests/templates/cookied-mcp-mitaka-dvr/underlay--user-data-cfg01.yaml
@@ -47,6 +47,13 @@
    # Enable grub menu using updated config below
    - update-grub
 
+   - mkdir -p /srv/salt/reclass/nodes
+   - systemctl enable salt-master
+   - systemctl enable salt-minion
+   - systemctl start salt-master
+   - systemctl start salt-minion
+   - salt-call -l info --timeout=120 test.ping
+
   write_files:
    - path: /etc/default/grub.d/97-enable-grub-menu.cfg
      content: |
diff --git a/tcp_tests/templates/cookied-mcp-mitaka-dvr/underlay.yaml b/tcp_tests/templates/cookied-mcp-mitaka-dvr/underlay.yaml
index 7225c6d..9572556 100644
--- a/tcp_tests/templates/cookied-mcp-mitaka-dvr/underlay.yaml
+++ b/tcp_tests/templates/cookied-mcp-mitaka-dvr/underlay.yaml
@@ -232,9 +232,6 @@
                   capacity: !os_env NODE_VOLUME_SIZE, 150
                   backing_store: mcp_ubuntu_1604_image
                   format: qcow2
-                - name: cinder
-                  capacity: 50
-                  format: qcow2
                 - name: iso  # Volume with name 'iso' will be used
                              # for store image with cloud-init metadata.
                   capacity: 1
@@ -273,9 +270,6 @@
                   capacity: !os_env NODE_VOLUME_SIZE, 150
                   backing_store: mcp_ubuntu_1604_image
                   format: qcow2
-                - name: cinder
-                  capacity: 50
-                  format: qcow2
                 - name: iso  # Volume with name 'iso' will be used
                              # for store image with cloud-init metadata.
                   capacity: 1
@@ -302,9 +296,6 @@
                   capacity: !os_env NODE_VOLUME_SIZE, 150
                   backing_store: mcp_ubuntu_1604_image
                   format: qcow2
-                - name: cinder
-                  capacity: 50
-                  format: qcow2
                 - name: iso  # Volume with name 'iso' will be used
                              # for store image with cloud-init metadata.
                   capacity: 1
@@ -409,9 +400,6 @@
                   capacity: !os_env NODE_VOLUME_SIZE, 150
                   backing_store: mcp_ubuntu_1604_image
                   format: qcow2
-                - name: cinder
-                  capacity: 50
-                  format: qcow2
                 - name: iso  # Volume with name 'iso' will be used
                              # for store image with cloud-init metadata.
                   capacity: 1
@@ -439,6 +427,9 @@
                   capacity: !os_env NODE_VOLUME_SIZE, 150
                   backing_store: cloudimage1604
                   format: qcow2
+                - name: cinder
+                  capacity: 50
+                  format: qcow2
                 - name: iso  # Volume with name 'iso' will be used
                              # for store image with cloud-init metadata.
                   capacity: 1
@@ -490,6 +481,9 @@
                   capacity: !os_env NODE_VOLUME_SIZE, 150
                   backing_store: cloudimage1604
                   format: qcow2
+                - name: cinder
+                  capacity: 50
+                  format: qcow2
                 - name: iso  # Volume with name 'iso' will be used
                              # for store image with cloud-init metadata.
                   capacity: 1
diff --git a/tcp_tests/templates/cookied-mcp-mitaka-ovs/_context-cookiecutter-mcp-mitaka-ovs.yaml b/tcp_tests/templates/cookied-mcp-mitaka-ovs/_context-cookiecutter-mcp-mitaka-ovs.yaml
index 5f2ede6..ecc8054 100644
--- a/tcp_tests/templates/cookied-mcp-mitaka-ovs/_context-cookiecutter-mcp-mitaka-ovs.yaml
+++ b/tcp_tests/templates/cookied-mcp-mitaka-ovs/_context-cookiecutter-mcp-mitaka-ovs.yaml
@@ -54,7 +54,6 @@
   openstack_compute_single_address_ranges: 172.16.10.105-172.16.10.106
   openstack_compute_deploy_address_ranges: 192.168.10.105-192.168.10.106
   openstack_compute_tenant_address_ranges: 10.1.0.105-10.1.0.106
-  openstack_compute_backend_address_ranges: 10.1.0.105-10.1.0.106
   openstack_control_address: 172.16.10.100
   openstack_control_hostname: ctl
   openstack_control_node01_address: 172.16.10.101
diff --git a/tcp_tests/templates/cookied-mcp-mitaka-ovs/_context-environment.yaml b/tcp_tests/templates/cookied-mcp-mitaka-ovs/_context-environment.yaml
index ca8114b..65f4131 100644
--- a/tcp_tests/templates/cookied-mcp-mitaka-ovs/_context-environment.yaml
+++ b/tcp_tests/templates/cookied-mcp-mitaka-ovs/_context-environment.yaml
@@ -20,6 +20,7 @@
       - features_designate_pool_manager_database
       - features_designate_pool_manager
       - features_designate_pool_manager_keystone
+      - features_lvm_backend_control
       - linux_system_codename_xenial
       interfaces:
         ens3:
@@ -36,6 +37,7 @@
       - openstack_message_queue
       - features_designate_pool_manager_database
       - features_designate_pool_manager
+      - features_lvm_backend_control
       - linux_system_codename_xenial
       interfaces:
         ens3:
@@ -52,6 +54,7 @@
       - openstack_message_queue
       - features_designate_pool_manager_database
       - features_designate_pool_manager
+      - features_lvm_backend_control
       - linux_system_codename_xenial
       interfaces:
         ens3:
@@ -115,6 +118,7 @@
       reclass_storage_name: openstack_compute_rack01
       roles:
       - openstack_compute
+      - features_lvm_backend_volume_vdb
       - linux_system_codename_xenial
       interfaces:
         ens3:
diff --git a/tcp_tests/templates/cookied-mcp-mitaka-ovs/salt.yaml b/tcp_tests/templates/cookied-mcp-mitaka-ovs/salt.yaml
index 3f3def3..e71451b 100644
--- a/tcp_tests/templates/cookied-mcp-mitaka-ovs/salt.yaml
+++ b/tcp_tests/templates/cookied-mcp-mitaka-ovs/salt.yaml
@@ -24,7 +24,7 @@
 
 {%- if OVERRIDES != '' %}
 {%- for param in OVERRIDES.splitlines() %}
-{%- set key, value = param.replace(' ','').split(':') %}
+{%- set key, value = param.replace(' ','').split(':', 1) %}
 - description: Override cluster parameters
   cmd: |
     salt-call reclass.cluster_meta_set name='{{ key }}' value='{{ value }}' file_name='{{OVERRIDES_FILENAME}}'
diff --git a/tcp_tests/templates/cookied-mcp-mitaka-ovs/underlay--user-data-cfg01.yaml b/tcp_tests/templates/cookied-mcp-mitaka-ovs/underlay--user-data-cfg01.yaml
index da7908d..48562ad 100644
--- a/tcp_tests/templates/cookied-mcp-mitaka-ovs/underlay--user-data-cfg01.yaml
+++ b/tcp_tests/templates/cookied-mcp-mitaka-ovs/underlay--user-data-cfg01.yaml
@@ -44,6 +44,13 @@
 
    - echo "nameserver 172.18.208.44" >> /etc/resolv.conf;
 
+   - mkdir -p /srv/salt/reclass/nodes
+   - systemctl enable salt-master
+   - systemctl enable salt-minion
+   - systemctl start salt-master
+   - systemctl start salt-minion
+   - salt-call -l info --timeout=120 test.ping
+
   write_files:
    - path: /etc/network/interfaces
      content: |
diff --git a/tcp_tests/templates/cookied-mcp-mitaka-ovs/underlay.yaml b/tcp_tests/templates/cookied-mcp-mitaka-ovs/underlay.yaml
index c02624c..6fefafb 100644
--- a/tcp_tests/templates/cookied-mcp-mitaka-ovs/underlay.yaml
+++ b/tcp_tests/templates/cookied-mcp-mitaka-ovs/underlay.yaml
@@ -230,9 +230,6 @@
                   capacity: !os_env NODE_VOLUME_SIZE, 150
                   backing_store: mcp_ubuntu_1604_image
                   format: qcow2
-                - name: cinder
-                  capacity: 50
-                  format: qcow2
                 - name: iso  # Volume with name 'iso' will be used
                              # for store image with cloud-init metadata.
                   capacity: 1
@@ -271,9 +268,6 @@
                   capacity: !os_env NODE_VOLUME_SIZE, 150
                   backing_store: mcp_ubuntu_1604_image
                   format: qcow2
-                - name: cinder
-                  capacity: 50
-                  format: qcow2
                 - name: iso  # Volume with name 'iso' will be used
                              # for store image with cloud-init metadata.
                   capacity: 1
@@ -300,9 +294,6 @@
                   capacity: !os_env NODE_VOLUME_SIZE, 150
                   backing_store: mcp_ubuntu_1604_image
                   format: qcow2
-                - name: cinder
-                  capacity: 50
-                  format: qcow2
                 - name: iso  # Volume with name 'iso' will be used
                              # for store image with cloud-init metadata.
                   capacity: 1
@@ -407,9 +398,6 @@
                   capacity: !os_env NODE_VOLUME_SIZE, 150
                   backing_store: mcp_ubuntu_1604_image
                   format: qcow2
-                - name: cinder
-                  capacity: 50
-                  format: qcow2
                 - name: iso  # Volume with name 'iso' will be used
                              # for store image with cloud-init metadata.
                   capacity: 1
@@ -436,6 +424,9 @@
                   capacity: !os_env NODE_VOLUME_SIZE, 150
                   backing_store: cloudimage1604
                   format: qcow2
+                - name: cinder
+                  capacity: 50
+                  format: qcow2
                 - name: iso  # Volume with name 'iso' will be used
                              # for store image with cloud-init metadata.
                   capacity: 1
@@ -487,6 +478,9 @@
                   capacity: !os_env NODE_VOLUME_SIZE, 150
                   backing_store: cloudimage1604
                   format: qcow2
+                - name: cinder
+                  capacity: 50
+                  format: qcow2
                 - name: iso  # Volume with name 'iso' will be used
                              # for store image with cloud-init metadata.
                   capacity: 1
diff --git a/tcp_tests/templates/cookied-mcp-newton-dvr/_context-cookiecutter-mcp-newton-dvr.yaml b/tcp_tests/templates/cookied-mcp-newton-dvr/_context-cookiecutter-mcp-newton-dvr.yaml
index 5773e62..9cb3979 100644
--- a/tcp_tests/templates/cookied-mcp-newton-dvr/_context-cookiecutter-mcp-newton-dvr.yaml
+++ b/tcp_tests/templates/cookied-mcp-newton-dvr/_context-cookiecutter-mcp-newton-dvr.yaml
@@ -54,7 +54,6 @@
   openstack_compute_single_address_ranges: 172.16.10.105-172.16.10.106
   openstack_compute_deploy_address_ranges: 192.168.10.105-192.168.10.106
   openstack_compute_tenant_address_ranges: 10.1.0.105-10.1.0.106
-  openstack_compute_backend_address_ranges: 10.1.0.105-10.1.0.106
   openstack_control_address: 172.16.10.100
   openstack_control_hostname: ctl
   openstack_control_node01_address: 172.16.10.101
diff --git a/tcp_tests/templates/cookied-mcp-newton-dvr/_context-environment.yaml b/tcp_tests/templates/cookied-mcp-newton-dvr/_context-environment.yaml
index 803068e..6afe16e 100644
--- a/tcp_tests/templates/cookied-mcp-newton-dvr/_context-environment.yaml
+++ b/tcp_tests/templates/cookied-mcp-newton-dvr/_context-environment.yaml
@@ -20,6 +20,7 @@
       - features_designate_pool_manager_database
       - features_designate_pool_manager
       - features_designate_pool_manager_keystone
+      - features_lvm_backend_control
       - linux_system_codename_xenial
       interfaces:
         ens3:
@@ -36,6 +37,7 @@
       - openstack_message_queue
       - features_designate_pool_manager_database
       - features_designate_pool_manager
+      - features_lvm_backend_control
       - linux_system_codename_xenial
       interfaces:
         ens3:
@@ -52,6 +54,7 @@
       - openstack_message_queue
       - features_designate_pool_manager_database
       - features_designate_pool_manager
+      - features_lvm_backend_control
       - linux_system_codename_xenial
       interfaces:
         ens3:
@@ -115,6 +118,7 @@
       reclass_storage_name: openstack_compute_rack01
       roles:
       - openstack_compute
+      - features_lvm_backend_volume_vdb
       - linux_system_codename_xenial
       interfaces:
         ens3:
diff --git a/tcp_tests/templates/cookied-mcp-newton-dvr/openstack.yaml b/tcp_tests/templates/cookied-mcp-newton-dvr/openstack.yaml
index 590d125..e298fcf 100644
--- a/tcp_tests/templates/cookied-mcp-newton-dvr/openstack.yaml
+++ b/tcp_tests/templates/cookied-mcp-newton-dvr/openstack.yaml
@@ -195,3 +195,9 @@
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 30}
   skip_fail: false
+
+- description: WORKAROUND PROD-23354
+  cmd: sed -n 's/max_microversion = 2.42/max_microversion = 2.38/;w /var/log/lvm_mcp_newton.conf' /var/log/lvm_mcp.conf
+  node_name: {{ HOSTNAME_GTW01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
diff --git a/tcp_tests/templates/cookied-mcp-newton-dvr/salt.yaml b/tcp_tests/templates/cookied-mcp-newton-dvr/salt.yaml
index d869571..8592287 100644
--- a/tcp_tests/templates/cookied-mcp-newton-dvr/salt.yaml
+++ b/tcp_tests/templates/cookied-mcp-newton-dvr/salt.yaml
@@ -24,7 +24,7 @@
 
 {%- if OVERRIDES != '' %}
 {%- for param in OVERRIDES.splitlines() %}
-{%- set key, value = param.replace(' ','').split(':') %}
+{%- set key, value = param.replace(' ','').split(':', 1) %}
 - description: Override cluster parameters
   cmd: |
     salt-call reclass.cluster_meta_set name='{{ key }}' value='{{ value }}' file_name='{{OVERRIDES_FILENAME}}'
diff --git a/tcp_tests/templates/cookied-mcp-newton-dvr/underlay--user-data-cfg01.yaml b/tcp_tests/templates/cookied-mcp-newton-dvr/underlay--user-data-cfg01.yaml
index a73ca23..d75dab1 100644
--- a/tcp_tests/templates/cookied-mcp-newton-dvr/underlay--user-data-cfg01.yaml
+++ b/tcp_tests/templates/cookied-mcp-newton-dvr/underlay--user-data-cfg01.yaml
@@ -47,6 +47,13 @@
    # Enable grub menu using updated config below
    - update-grub
 
+   - mkdir -p /srv/salt/reclass/nodes
+   - systemctl enable salt-master
+   - systemctl enable salt-minion
+   - systemctl start salt-master
+   - systemctl start salt-minion
+   - salt-call -l info --timeout=120 test.ping
+
   write_files:
    - path: /etc/default/grub.d/97-enable-grub-menu.cfg
      content: |
diff --git a/tcp_tests/templates/cookied-mcp-newton-dvr/underlay.yaml b/tcp_tests/templates/cookied-mcp-newton-dvr/underlay.yaml
index df0515f..62072c6 100644
--- a/tcp_tests/templates/cookied-mcp-newton-dvr/underlay.yaml
+++ b/tcp_tests/templates/cookied-mcp-newton-dvr/underlay.yaml
@@ -232,9 +232,6 @@
                   capacity: !os_env NODE_VOLUME_SIZE, 150
                   backing_store: mcp_ubuntu_1604_image
                   format: qcow2
-                - name: cinder
-                  capacity: 50
-                  format: qcow2
                 - name: iso  # Volume with name 'iso' will be used
                              # for store image with cloud-init metadata.
                   capacity: 1
@@ -273,9 +270,6 @@
                   capacity: !os_env NODE_VOLUME_SIZE, 150
                   backing_store: mcp_ubuntu_1604_image
                   format: qcow2
-                - name: cinder
-                  capacity: 50
-                  format: qcow2
                 - name: iso  # Volume with name 'iso' will be used
                              # for store image with cloud-init metadata.
                   capacity: 1
@@ -302,9 +296,6 @@
                   capacity: !os_env NODE_VOLUME_SIZE, 150
                   backing_store: mcp_ubuntu_1604_image
                   format: qcow2
-                - name: cinder
-                  capacity: 50
-                  format: qcow2
                 - name: iso  # Volume with name 'iso' will be used
                              # for store image with cloud-init metadata.
                   capacity: 1
@@ -409,9 +400,6 @@
                   capacity: !os_env NODE_VOLUME_SIZE, 150
                   backing_store: mcp_ubuntu_1604_image
                   format: qcow2
-                - name: cinder
-                  capacity: 50
-                  format: qcow2
                 - name: iso  # Volume with name 'iso' will be used
                              # for store image with cloud-init metadata.
                   capacity: 1
@@ -439,6 +427,9 @@
                   capacity: !os_env NODE_VOLUME_SIZE, 150
                   backing_store: cloudimage1604
                   format: qcow2
+                - name: cinder
+                  capacity: 50
+                  format: qcow2
                 - name: iso  # Volume with name 'iso' will be used
                              # for store image with cloud-init metadata.
                   capacity: 1
@@ -490,6 +481,9 @@
                   capacity: !os_env NODE_VOLUME_SIZE, 150
                   backing_store: cloudimage1604
                   format: qcow2
+                - name: cinder
+                  capacity: 50
+                  format: qcow2
                 - name: iso  # Volume with name 'iso' will be used
                              # for store image with cloud-init metadata.
                   capacity: 1
diff --git a/tcp_tests/templates/cookied-mcp-newton-ovs/_context-cookiecutter-mcp-newton-ovs.yaml b/tcp_tests/templates/cookied-mcp-newton-ovs/_context-cookiecutter-mcp-newton-ovs.yaml
index 1786959..8049430 100644
--- a/tcp_tests/templates/cookied-mcp-newton-ovs/_context-cookiecutter-mcp-newton-ovs.yaml
+++ b/tcp_tests/templates/cookied-mcp-newton-ovs/_context-cookiecutter-mcp-newton-ovs.yaml
@@ -54,7 +54,6 @@
   openstack_compute_single_address_ranges: 172.16.10.105-172.16.10.106
   openstack_compute_deploy_address_ranges: 192.168.10.105-192.168.10.106
   openstack_compute_tenant_address_ranges: 10.1.0.105-10.1.0.106
-  openstack_compute_backend_address_ranges: 10.1.0.105-10.1.0.106
   openstack_control_address: 172.16.10.100
   openstack_control_hostname: ctl
   openstack_control_node01_address: 172.16.10.101
diff --git a/tcp_tests/templates/cookied-mcp-newton-ovs/_context-environment.yaml b/tcp_tests/templates/cookied-mcp-newton-ovs/_context-environment.yaml
index 3e05cf0..7baf03e 100644
--- a/tcp_tests/templates/cookied-mcp-newton-ovs/_context-environment.yaml
+++ b/tcp_tests/templates/cookied-mcp-newton-ovs/_context-environment.yaml
@@ -21,6 +21,7 @@
       - features_designate_bind9_dns
       - features_designate_bind9
       - features_designate_bind9_keystone
+      - features_lvm_backend_control
       - linux_system_codename_xenial
       interfaces:
         ens3:
@@ -38,6 +39,7 @@
       - features_designate_bind9_database
       - features_designate_bind9_dns
       - features_designate_bind9
+      - features_lvm_backend_control
       - linux_system_codename_xenial
       interfaces:
         ens3:
@@ -54,6 +56,7 @@
       - openstack_message_queue
       - features_designate_bind9_database
       - features_designate_bind9
+      - features_lvm_backend_control
       - linux_system_codename_xenial
       interfaces:
         ens3:
@@ -117,6 +120,7 @@
       reclass_storage_name: openstack_compute_rack01
       roles:
       - openstack_compute
+      - features_lvm_backend_volume_vdb
       - linux_system_codename_xenial
       interfaces:
         ens3:
diff --git a/tcp_tests/templates/cookied-mcp-newton-ovs/openstack.yaml b/tcp_tests/templates/cookied-mcp-newton-ovs/openstack.yaml
index ac225a5..52c0fdb 100644
--- a/tcp_tests/templates/cookied-mcp-newton-ovs/openstack.yaml
+++ b/tcp_tests/templates/cookied-mcp-newton-ovs/openstack.yaml
@@ -217,3 +217,9 @@
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 30}
   skip_fail: false
+
+- description: WORKAROUND PROD-23354
+  cmd: sed -n 's/max_microversion = 2.42/max_microversion = 2.38/;w /var/log/lvm_mcp_newton.conf' /var/log/lvm_mcp.conf
+  node_name: {{ HOSTNAME_GTW01 }}
+  retry: {count: 1, delay: 30}
+  skip_fail: false
diff --git a/tcp_tests/templates/cookied-mcp-newton-ovs/salt.yaml b/tcp_tests/templates/cookied-mcp-newton-ovs/salt.yaml
index 496da5b..4d41eda 100644
--- a/tcp_tests/templates/cookied-mcp-newton-ovs/salt.yaml
+++ b/tcp_tests/templates/cookied-mcp-newton-ovs/salt.yaml
@@ -24,7 +24,7 @@
 
 {%- if OVERRIDES != '' %}
 {%- for param in OVERRIDES.splitlines() %}
-{%- set key, value = param.replace(' ','').split(':') %}
+{%- set key, value = param.replace(' ','').split(':', 1) %}
 - description: Override cluster parameters
   cmd: |
     salt-call reclass.cluster_meta_set name='{{ key }}' value='{{ value }}' file_name='{{OVERRIDES_FILENAME}}'
diff --git a/tcp_tests/templates/cookied-mcp-newton-ovs/underlay--user-data-cfg01.yaml b/tcp_tests/templates/cookied-mcp-newton-ovs/underlay--user-data-cfg01.yaml
index da7908d..48562ad 100644
--- a/tcp_tests/templates/cookied-mcp-newton-ovs/underlay--user-data-cfg01.yaml
+++ b/tcp_tests/templates/cookied-mcp-newton-ovs/underlay--user-data-cfg01.yaml
@@ -44,6 +44,13 @@
 
    - echo "nameserver 172.18.208.44" >> /etc/resolv.conf;
 
+   - mkdir -p /srv/salt/reclass/nodes
+   - systemctl enable salt-master
+   - systemctl enable salt-minion
+   - systemctl start salt-master
+   - systemctl start salt-minion
+   - salt-call -l info --timeout=120 test.ping
+
   write_files:
    - path: /etc/network/interfaces
      content: |
diff --git a/tcp_tests/templates/cookied-mcp-newton-ovs/underlay.yaml b/tcp_tests/templates/cookied-mcp-newton-ovs/underlay.yaml
index e84c25a..b21f928 100644
--- a/tcp_tests/templates/cookied-mcp-newton-ovs/underlay.yaml
+++ b/tcp_tests/templates/cookied-mcp-newton-ovs/underlay.yaml
@@ -222,9 +222,6 @@
                   capacity: !os_env NODE_VOLUME_SIZE, 150
                   backing_store: mcp_ubuntu_1604_image
                   format: qcow2
-                - name: cinder
-                  capacity: 50
-                  format: qcow2
                 - name: iso  # Volume with name 'iso' will be used
                              # for store image with cloud-init metadata.
                   capacity: 1
@@ -263,9 +260,6 @@
                   capacity: !os_env NODE_VOLUME_SIZE, 150
                   backing_store: mcp_ubuntu_1604_image
                   format: qcow2
-                - name: cinder
-                  capacity: 50
-                  format: qcow2
                 - name: iso  # Volume with name 'iso' will be used
                              # for store image with cloud-init metadata.
                   capacity: 1
@@ -292,9 +286,6 @@
                   capacity: !os_env NODE_VOLUME_SIZE, 150
                   backing_store: mcp_ubuntu_1604_image
                   format: qcow2
-                - name: cinder
-                  capacity: 50
-                  format: qcow2
                 - name: iso  # Volume with name 'iso' will be used
                              # for store image with cloud-init metadata.
                   capacity: 1
@@ -399,9 +390,6 @@
                   capacity: !os_env NODE_VOLUME_SIZE, 150
                   backing_store: mcp_ubuntu_1604_image
                   format: qcow2
-                - name: cinder
-                  capacity: 50
-                  format: qcow2
                 - name: iso  # Volume with name 'iso' will be used
                              # for store image with cloud-init metadata.
                   capacity: 1
@@ -428,6 +416,9 @@
                   capacity: !os_env NODE_VOLUME_SIZE, 150
                   backing_store: cloudimage1604
                   format: qcow2
+                - name: cinder
+                  capacity: 50
+                  format: qcow2
                 - name: iso  # Volume with name 'iso' will be used
                              # for store image with cloud-init metadata.
                   capacity: 1
@@ -479,6 +470,9 @@
                   capacity: !os_env NODE_VOLUME_SIZE, 150
                   backing_store: cloudimage1604
                   format: qcow2
+                - name: cinder
+                  capacity: 50
+                  format: qcow2
                 - name: iso  # Volume with name 'iso' will be used
                              # for store image with cloud-init metadata.
                   capacity: 1
diff --git a/tcp_tests/templates/cookied-mcp-pike-dpdk/_context-cookiecutter-pike-ovs-dpdk.yaml b/tcp_tests/templates/cookied-mcp-pike-dpdk/_context-cookiecutter-pike-ovs-dpdk.yaml
index 807b66a..725ff1c 100644
--- a/tcp_tests/templates/cookied-mcp-pike-dpdk/_context-cookiecutter-pike-ovs-dpdk.yaml
+++ b/tcp_tests/templates/cookied-mcp-pike-dpdk/_context-cookiecutter-pike-ovs-dpdk.yaml
@@ -54,7 +54,6 @@
   openstack_compute_single_address_ranges: 172.16.10.105-172.16.10.106
   openstack_compute_deploy_address_ranges: 192.168.10.105-192.168.10.106
   openstack_compute_tenant_address_ranges: 10.1.0.105-10.1.0.106
-  openstack_compute_backend_address_ranges: 10.1.0.105-10.1.0.106
   openstack_control_address: 172.16.10.100
   openstack_control_hostname: ctl
   openstack_control_node01_address: 172.16.10.101
@@ -96,7 +95,7 @@
   openstack_nfv_sriov_enabled: 'False'
   openstack_nova_compute_hugepages_count: '2048'
   openstack_nova_compute_nfv_req_enabled: 'False'
-  openstack_nova_cpu_pinning: '3'
+  openstack_nova_cpu_pinning: '4,5,8,9,10,11'
   openstack_ovs_dvr_enabled: 'False'
   openstack_ovs_encapsulation_type: vxlan
   openstack_proxy_address: 172.16.10.80
diff --git a/tcp_tests/templates/cookied-mcp-pike-dpdk/_context-environment.yaml b/tcp_tests/templates/cookied-mcp-pike-dpdk/_context-environment.yaml
index bd516f9..0cd60ba 100644
--- a/tcp_tests/templates/cookied-mcp-pike-dpdk/_context-environment.yaml
+++ b/tcp_tests/templates/cookied-mcp-pike-dpdk/_context-environment.yaml
@@ -15,6 +15,7 @@
       roles:
       - infra_kvm
       - openstack_control_leader
+      - features_lvm_backend_control
       - linux_system_codename_xenial
       interfaces:
         ens3:
@@ -27,6 +28,7 @@
       roles:
       - infra_kvm
       - openstack_control
+      - features_lvm_backend_control
       - linux_system_codename_xenial
       interfaces:
         ens3:
@@ -39,6 +41,7 @@
       roles:
       - infra_kvm
       - openstack_control
+      - features_lvm_backend_control
       - linux_system_codename_xenial
       interfaces:
         ens3:
@@ -128,7 +131,7 @@
       reclass_storage_name: openstack_compute_rack01
       roles:
       - openstack_compute
-      - features_lvm_backend
+      - features_lvm_backend_volume_vdb
       - linux_system_codename_xenial
       interfaces:
         ens3:
@@ -155,7 +158,10 @@
         ens4:
           role: single_ctl
         ens5:
-          role: single_ovs_br_prv
-          mtu: 1500
+          role: bond0_ab_ovs_vxlan_mesh_no_tag
+        ens6:
+          role: bond0_ab_ovs_vxlan_mesh_no_tag
         ens7:
-          role: bond1_ab_ovs_floating
+          role: single_ovs_br_floating
+          external_address: 10.90.0.110
+          external_network_netmask: 255.255.255.0
diff --git a/tcp_tests/templates/cookied-mcp-pike-dpdk/underlay--user-data-cfg01.yaml b/tcp_tests/templates/cookied-mcp-pike-dpdk/underlay--user-data-cfg01.yaml
index da7908d..48562ad 100644
--- a/tcp_tests/templates/cookied-mcp-pike-dpdk/underlay--user-data-cfg01.yaml
+++ b/tcp_tests/templates/cookied-mcp-pike-dpdk/underlay--user-data-cfg01.yaml
@@ -44,6 +44,13 @@
 
    - echo "nameserver 172.18.208.44" >> /etc/resolv.conf;
 
+   - mkdir -p /srv/salt/reclass/nodes
+   - systemctl enable salt-master
+   - systemctl enable salt-minion
+   - systemctl start salt-master
+   - systemctl start salt-minion
+   - salt-call -l info --timeout=120 test.ping
+
   write_files:
    - path: /etc/network/interfaces
      content: |
diff --git a/tcp_tests/templates/cookied-mcp-pike-dpdk/underlay.yaml b/tcp_tests/templates/cookied-mcp-pike-dpdk/underlay.yaml
index a9f0722..1dba85e 100644
--- a/tcp_tests/templates/cookied-mcp-pike-dpdk/underlay.yaml
+++ b/tcp_tests/templates/cookied-mcp-pike-dpdk/underlay.yaml
@@ -489,9 +489,6 @@
                   capacity: !os_env NODE_VOLUME_SIZE, 150
                   backing_store: cloudimage1604
                   format: qcow2
-                - name: cinder
-                  capacity: 50
-                  format: qcow2
                 - name: iso  # Volume with name 'iso' will be used
                              # for store image with cloud-init metadata.
                   capacity: 1
diff --git a/tcp_tests/templates/cookied-mcp-pike-dvr-ceph/cookiecutter-context-dvr-ceph.yaml b/tcp_tests/templates/cookied-mcp-pike-dvr-ceph/cookiecutter-context-dvr-ceph.yaml
index cbc0f81..7df1f81 100644
--- a/tcp_tests/templates/cookied-mcp-pike-dvr-ceph/cookiecutter-context-dvr-ceph.yaml
+++ b/tcp_tests/templates/cookied-mcp-pike-dvr-ceph/cookiecutter-context-dvr-ceph.yaml
@@ -136,7 +136,6 @@
   openstack_compute_single_address_ranges: 172.16.10.105-172.16.10.106
   openstack_compute_deploy_address_ranges: 192.168.10.105-192.168.10.106
   openstack_compute_tenant_address_ranges: 10.1.0.105-10.1.0.106
-  openstack_compute_backend_address_ranges: 10.1.0.105-10.1.0.106
   openstack_control_address: 10.167.4.10
   openstack_control_hostname: ctl
   openstack_control_node01_address: 10.167.4.11
diff --git a/tcp_tests/templates/cookied-mcp-pike-dvr-ceph/underlay--user-data-cfg01.yaml b/tcp_tests/templates/cookied-mcp-pike-dvr-ceph/underlay--user-data-cfg01.yaml
index da7908d..48562ad 100644
--- a/tcp_tests/templates/cookied-mcp-pike-dvr-ceph/underlay--user-data-cfg01.yaml
+++ b/tcp_tests/templates/cookied-mcp-pike-dvr-ceph/underlay--user-data-cfg01.yaml
@@ -44,6 +44,13 @@
 
    - echo "nameserver 172.18.208.44" >> /etc/resolv.conf;
 
+   - mkdir -p /srv/salt/reclass/nodes
+   - systemctl enable salt-master
+   - systemctl enable salt-minion
+   - systemctl start salt-master
+   - systemctl start salt-minion
+   - salt-call -l info --timeout=120 test.ping
+
   write_files:
    - path: /etc/network/interfaces
      content: |
diff --git a/tcp_tests/templates/cookied-mcp-pike-dvr-ceph/underlay.yaml b/tcp_tests/templates/cookied-mcp-pike-dvr-ceph/underlay.yaml
index 621d9b4..a140743 100644
--- a/tcp_tests/templates/cookied-mcp-pike-dvr-ceph/underlay.yaml
+++ b/tcp_tests/templates/cookied-mcp-pike-dvr-ceph/underlay.yaml
@@ -240,9 +240,6 @@
                   capacity: !os_env NODE_VOLUME_SIZE, 150
                   backing_store: mcp_ubuntu_1604_image
                   format: qcow2
-                - name: cinder
-                  capacity: 50
-                  format: qcow2
                 - name: iso  # Volume with name 'iso' will be used
                              # for store image with cloud-init metadata.
                   capacity: 1
@@ -281,9 +278,6 @@
                   capacity: !os_env NODE_VOLUME_SIZE, 150
                   backing_store: mcp_ubuntu_1604_image
                   format: qcow2
-                - name: cinder
-                  capacity: 50
-                  format: qcow2
                 - name: iso  # Volume with name 'iso' will be used
                              # for store image with cloud-init metadata.
                   capacity: 1
@@ -310,9 +304,6 @@
                   capacity: !os_env NODE_VOLUME_SIZE, 150
                   backing_store: mcp_ubuntu_1604_image
                   format: qcow2
-                - name: cinder
-                  capacity: 50
-                  format: qcow2
                 - name: iso  # Volume with name 'iso' will be used
                              # for store image with cloud-init metadata.
                   capacity: 1
@@ -417,10 +408,10 @@
                   capacity: !os_env NODE_VOLUME_SIZE, 150
                   backing_store: cloudimage1604
                   format: qcow2
-                - name: cinder
+                - name: ceph_osd
                   capacity: 50
                   format: qcow2
-                - name: ceph
+                - name: ceph_journal
                   capacity: 50
                   format: qcow2
                 - name: iso  # Volume with name 'iso' will be used
@@ -449,10 +440,10 @@
                   capacity: !os_env NODE_VOLUME_SIZE, 150
                   backing_store: cloudimage1604
                   format: qcow2
-                - name: cinder
+                - name: ceph_osd
                   capacity: 50
                   format: qcow2
-                - name: ceph
+                - name: ceph_journal
                   capacity: 50
                   format: qcow2
                 - name: iso  # Volume with name 'iso' will be used
diff --git a/tcp_tests/templates/cookied-mcp-pike-dvr-ssl/_context-environment.yaml b/tcp_tests/templates/cookied-mcp-pike-dvr-ssl/_context-environment.yaml
index 327788e..2e68b53 100644
--- a/tcp_tests/templates/cookied-mcp-pike-dvr-ssl/_context-environment.yaml
+++ b/tcp_tests/templates/cookied-mcp-pike-dvr-ssl/_context-environment.yaml
@@ -20,6 +20,7 @@
       - features_designate_pool_manager_database
       - features_designate_pool_manager
       - features_designate_pool_manager_keystone
+      - features_lvm_backend_control
       - linux_system_codename_xenial
       interfaces:
         ens3:
@@ -36,6 +37,7 @@
       - openstack_message_queue
       - features_designate_pool_manager_database
       - features_designate_pool_manager
+      - features_lvm_backend_control
       - linux_system_codename_xenial
       interfaces:
         ens3:
@@ -52,6 +54,7 @@
       - openstack_message_queue
       - features_designate_pool_manager_database
       - features_designate_pool_manager
+      - features_lvm_backend_control
       - linux_system_codename_xenial
       interfaces:
         ens3:
@@ -115,6 +118,7 @@
       reclass_storage_name: openstack_compute_rack01
       roles:
       - openstack_compute
+      - features_lvm_backend_volume_vdb
       - linux_system_codename_xenial
       interfaces:
         ens3:
diff --git a/tcp_tests/templates/cookied-mcp-pike-dvr-ssl/openstack.yaml b/tcp_tests/templates/cookied-mcp-pike-dvr-ssl/openstack.yaml
index a779e5d..679763c 100644
--- a/tcp_tests/templates/cookied-mcp-pike-dvr-ssl/openstack.yaml
+++ b/tcp_tests/templates/cookied-mcp-pike-dvr-ssl/openstack.yaml
@@ -188,79 +188,6 @@
   retry: {count: 1, delay: 30}
   skip_fail: false
 
-# Configure cinder-volume salt-call PROD-13167
-- description: Set disks 01
-  cmd: salt-call cmd.run 'echo -e "nn\np\n\n\n\nw" | fdisk /dev/vdb'
-  node_name: {{ HOSTNAME_CTL01 }}
-  retry: {count: 1, delay: 30}
-  skip_fail: false
-
-- description: Set disks 02
-  cmd: salt-call cmd.run 'echo -e "nn\np\n\n\n\nw" | fdisk /dev/vdb'
-  node_name: {{ HOSTNAME_CTL02 }}
-  retry: {count: 1, delay: 30}
-  skip_fail: false
-
-- description: Set disks 03
-  cmd: salt-call cmd.run 'echo -e "nn\np\n\n\n\nw" | fdisk /dev/vdb'
-  node_name: {{ HOSTNAME_CTL03 }}
-  retry: {count: 1, delay: 30}
-  skip_fail: false
-
-- description: Create partitions 01
-  cmd: salt-call cmd.run 'pvcreate /dev/vdb1'
-  node_name: {{ HOSTNAME_CTL01 }}
-  retry: {count: 1, delay: 30}
-  skip_fail: false
-
-- description: Create partitions 02
-  cmd: salt-call cmd.run 'pvcreate /dev/vdb1'
-  node_name: {{ HOSTNAME_CTL02 }}
-  retry: {count: 1, delay: 30}
-  skip_fail: false
-
-- description: Create partitions 03
-  cmd: salt-call cmd.run 'pvcreate /dev/vdb1'
-  node_name: {{ HOSTNAME_CTL03 }}
-  retry: {count: 1, delay: 30}
-  skip_fail: false
-
-- description: create volume_group
-  cmd: salt "ctl*" cmd.run 'vgcreate cinder-volumes /dev/vdb1'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 30}
-  skip_fail: false
-
-- description: Install cinder-volume
-  cmd: salt 'ctl*' cmd.run 'apt-get install cinder-volume -y'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 30}
-  skip_fail: false
-
-- description: Install crudini
-  cmd: salt "ctl*" cmd.run 'apt-get install crudini -y'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 30}
-  skip_fail: false
-
-- description: Temporary WR set enabled backends value 01
-  cmd: salt-call cmd.run 'crudini --verbose --set /etc/cinder/cinder.conf DEFAULT enabled_backends lvm'
-  node_name: {{ HOSTNAME_CTL01 }}
-  retry: {count: 1, delay: 30}
-  skip_fail: false
-
-- description: Temporary WR set enabled backends value 02
-  cmd: salt-call cmd.run 'crudini --verbose --set /etc/cinder/cinder.conf DEFAULT enabled_backends lvm'
-  node_name: {{ HOSTNAME_CTL02 }}
-  retry: {count: 1, delay: 30}
-  skip_fail: false
-
-- description: Temporary WR set enabled backends value 03
-  cmd: salt-call cmd.run 'crudini --verbose --set /etc/cinder/cinder.conf DEFAULT enabled_backends lvm'
-  node_name: {{ HOSTNAME_CTL03 }}
-  retry: {count: 1, delay: 30}
-  skip_fail: false
-
 - description: Install docker.io on gtw
   cmd: salt-call cmd.run 'apt-get install docker.io -y'
   node_name: {{ HOSTNAME_GTW01 }}
@@ -273,13 +200,6 @@
   retry: {count: 1, delay: 30}
   skip_fail: false
 
-- description: Restart cinder volume
-  cmd: |
-    salt -C 'I@cinder:controller' service.restart cinder-volume;
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 2, delay: 5}
-  skip_fail: false
-
 - description: create rc file on cfg
   cmd: scp ctl01:/root/keystonercv3 /root
   node_name: {{ HOSTNAME_CFG01 }}
diff --git a/tcp_tests/templates/cookied-mcp-pike-dvr-ssl/underlay--user-data-cfg01.yaml b/tcp_tests/templates/cookied-mcp-pike-dvr-ssl/underlay--user-data-cfg01.yaml
index da7908d..48562ad 100644
--- a/tcp_tests/templates/cookied-mcp-pike-dvr-ssl/underlay--user-data-cfg01.yaml
+++ b/tcp_tests/templates/cookied-mcp-pike-dvr-ssl/underlay--user-data-cfg01.yaml
@@ -44,6 +44,13 @@
 
    - echo "nameserver 172.18.208.44" >> /etc/resolv.conf;
 
+   - mkdir -p /srv/salt/reclass/nodes
+   - systemctl enable salt-master
+   - systemctl enable salt-minion
+   - systemctl start salt-master
+   - systemctl start salt-minion
+   - salt-call -l info --timeout=120 test.ping
+
   write_files:
    - path: /etc/network/interfaces
      content: |
diff --git a/tcp_tests/templates/cookied-mcp-pike-dvr-ssl/underlay.yaml b/tcp_tests/templates/cookied-mcp-pike-dvr-ssl/underlay.yaml
index 4c8efd8..2e5ceaa 100644
--- a/tcp_tests/templates/cookied-mcp-pike-dvr-ssl/underlay.yaml
+++ b/tcp_tests/templates/cookied-mcp-pike-dvr-ssl/underlay.yaml
@@ -219,9 +219,6 @@
                   capacity: !os_env NODE_VOLUME_SIZE, 150
                   backing_store: mcp_ubuntu_1604_image
                   format: qcow2
-                - name: cinder
-                  capacity: 50
-                  format: qcow2
                 - name: iso  # Volume with name 'iso' will be used
                              # for store image with cloud-init metadata.
                   capacity: 1
@@ -260,9 +257,6 @@
                   capacity: !os_env NODE_VOLUME_SIZE, 150
                   backing_store: mcp_ubuntu_1604_image
                   format: qcow2
-                - name: cinder
-                  capacity: 50
-                  format: qcow2
                 - name: iso  # Volume with name 'iso' will be used
                              # for store image with cloud-init metadata.
                   capacity: 1
@@ -289,9 +283,6 @@
                   capacity: !os_env NODE_VOLUME_SIZE, 150
                   backing_store: mcp_ubuntu_1604_image
                   format: qcow2
-                - name: cinder
-                  capacity: 50
-                  format: qcow2
                 - name: iso  # Volume with name 'iso' will be used
                              # for store image with cloud-init metadata.
                   capacity: 1
@@ -396,9 +387,6 @@
                   capacity: !os_env NODE_VOLUME_SIZE, 150
                   backing_store: mcp_ubuntu_1604_image
                   format: qcow2
-                - name: cinder
-                  capacity: 50
-                  format: qcow2
                 - name: iso  # Volume with name 'iso' will be used
                              # for store image with cloud-init metadata.
                   capacity: 1
@@ -426,6 +414,9 @@
                   capacity: !os_env NODE_VOLUME_SIZE, 150
                   backing_store: cloudimage1604
                   format: qcow2
+                - name: cinder
+                  capacity: 50
+                  format: qcow2
                 - name: iso  # Volume with name 'iso' will be used
                              # for store image with cloud-init metadata.
                   capacity: 1
@@ -477,6 +468,9 @@
                   capacity: !os_env NODE_VOLUME_SIZE, 150
                   backing_store: cloudimage1604
                   format: qcow2
+                - name: cinder
+                  capacity: 50
+                  format: qcow2
                 - name: iso  # Volume with name 'iso' will be used
                              # for store image with cloud-init metadata.
                   capacity: 1
diff --git a/tcp_tests/templates/cookied-mcp-pike-dvr/_context-cookiecutter-mcp-pike-dvr.yaml b/tcp_tests/templates/cookied-mcp-pike-dvr/_context-cookiecutter-mcp-pike-dvr.yaml
index 111520b..15c8c9b 100644
--- a/tcp_tests/templates/cookied-mcp-pike-dvr/_context-cookiecutter-mcp-pike-dvr.yaml
+++ b/tcp_tests/templates/cookied-mcp-pike-dvr/_context-cookiecutter-mcp-pike-dvr.yaml
@@ -54,7 +54,6 @@
   openstack_compute_single_address_ranges: 172.16.10.105-172.16.10.106
   openstack_compute_deploy_address_ranges: 192.168.10.105-192.168.10.106
   openstack_compute_tenant_address_ranges: 10.1.0.105-10.1.0.106
-  openstack_compute_backend_address_ranges: 10.1.0.105-10.1.0.106
   openstack_control_address: 172.16.10.100
   openstack_control_hostname: ctl
   openstack_control_node01_address: 172.16.10.101
@@ -104,6 +103,10 @@
   openstack_proxy_node02_address: 172.16.10.122
   openstack_proxy_node02_hostname: prx02
   openstack_upgrade_node01_address: 172.16.10.19
+  designate_backend: bind
+  designate_enabled: 'True'
+  openstack_dns_node01_address: 172.16.10.113
+  openstack_dns_node02_address: 172.16.10.114
   openstack_version: pike
   oss_enabled: 'False'
   oss_node03_address: ${_param:stacklight_monitor_node03_address}
@@ -188,17 +191,11 @@
   manila_enabled: 'True'
   manila_share_backend: 'lvm'
   manila_lvm_volume_name: 'manila-volume'
-  manila_lvm_devices: '/dev/loop1'
-  openstack_share_address: 172.16.10.68
-  openstack_share_node01_address: 172.16.10.69
-  openstack_share_node02_address: 172.16.10.70
-  openstack_share_node03_address: 172.16.10.71
-  openstack_share_node01_deploy_address: 192.168.10.69
-  openstack_share_node02_deploy_address: 192.168.10.70
-  openstack_share_node03_deploy_address: 192.168.10.71
+  manila_lvm_devices: '/dev/vdc'
+  openstack_share_address: 172.16.10.203
+  openstack_share_node01_address: 172.16.10.204
+  openstack_share_node01_deploy_address: 192.168.10.204
   openstack_share_hostname: share
   openstack_share_node01_hostname: share01
-  openstack_share_node02_hostname: share02
-  openstack_share_node03_hostname: share03
-  designate_backend: bind
-  designate_enabled: 'True'
+  openstack_octavia_enabled: 'True'
+
diff --git a/tcp_tests/templates/cookied-mcp-pike-dvr/_context-environment.yaml b/tcp_tests/templates/cookied-mcp-pike-dvr/_context-environment.yaml
index 04c1e4c..f1ba914 100644
--- a/tcp_tests/templates/cookied-mcp-pike-dvr/_context-environment.yaml
+++ b/tcp_tests/templates/cookied-mcp-pike-dvr/_context-environment.yaml
@@ -17,6 +17,7 @@
       - openstack_control_leader
       - openstack_database_leader
       - openstack_message_queue
+      - features_lvm_backend_control
       - linux_system_codename_xenial
       interfaces:
         ens3:
@@ -31,6 +32,7 @@
       - openstack_control
       - openstack_database
       - openstack_message_queue
+      - features_lvm_backend_control
       - linux_system_codename_xenial
       interfaces:
         ens3:
@@ -45,6 +47,7 @@
       - openstack_control
       - openstack_database
       - openstack_message_queue
+      - features_lvm_backend_control
       - linux_system_codename_xenial
       interfaces:
         ens3:
@@ -167,6 +170,7 @@
       reclass_storage_name: openstack_compute_rack01
       roles:
       - openstack_compute
+      - features_lvm_backend_volume_vdb
       - linux_system_codename_xenial
       interfaces:
         ens3:
@@ -181,7 +185,6 @@
     gtw01.mcp-pike-dvr.local:
       reclass_storage_name: openstack_gateway_node01
       roles:
-      - openstack_gateway
       - linux_system_codename_xenial
       interfaces:
         ens3:
@@ -204,28 +207,6 @@
         ens4:
           role: single_ctl
 
-    share02.mcp-pike-dvr.local:
-      reclass_storage_name: openstack_share_node02
-      roles:
-      - openstack_share
-      - linux_system_codename_xenial
-      interfaces:
-        ens3:
-          role: single_dhcp
-        ens4:
-          role: single_ctl
-
-    share03.mcp-pike-dvr.local:
-      reclass_storage_name: openstack_share_node03
-      roles:
-      - openstack_share
-      - linux_system_codename_xenial
-      interfaces:
-        ens3:
-          role: single_dhcp
-        ens4:
-          role: single_ctl
-
     dns01.mcp-pike-dvr.local:
       reclass_storage_name: openstack_dns_node01
       roles:
diff --git a/tcp_tests/templates/cookied-mcp-pike-dvr/openstack.yaml b/tcp_tests/templates/cookied-mcp-pike-dvr/openstack.yaml
index 000f4f9..59e85e3 100644
--- a/tcp_tests/templates/cookied-mcp-pike-dvr/openstack.yaml
+++ b/tcp_tests/templates/cookied-mcp-pike-dvr/openstack.yaml
@@ -56,3 +56,8 @@
 {{ SHARED_OPENSTACK.MACRO_INSTALL_COMPUTE(CELL_MAPPING=true) }}
 
 {{ SHARED_OPENSTACK.MACRO_INSTALL_MANILA() }}
+
+{{ SHARED_OPENSTACK.MACRO_INSTALL_OCTAVIA_API() }}
+
+{{ SHARED_OPENSTACK.MACRO_INSTALL_OCTAVIA_MANAGER() }}
+
diff --git a/tcp_tests/templates/cookied-mcp-pike-dvr/underlay--user-data-cfg01.yaml b/tcp_tests/templates/cookied-mcp-pike-dvr/underlay--user-data-cfg01.yaml
index da7908d..48562ad 100644
--- a/tcp_tests/templates/cookied-mcp-pike-dvr/underlay--user-data-cfg01.yaml
+++ b/tcp_tests/templates/cookied-mcp-pike-dvr/underlay--user-data-cfg01.yaml
@@ -44,6 +44,13 @@
 
    - echo "nameserver 172.18.208.44" >> /etc/resolv.conf;
 
+   - mkdir -p /srv/salt/reclass/nodes
+   - systemctl enable salt-master
+   - systemctl enable salt-minion
+   - systemctl start salt-master
+   - systemctl start salt-minion
+   - salt-call -l info --timeout=120 test.ping
+
   write_files:
    - path: /etc/network/interfaces
      content: |
diff --git a/tcp_tests/templates/cookied-mcp-pike-dvr/underlay.yaml b/tcp_tests/templates/cookied-mcp-pike-dvr/underlay.yaml
index 698c854..32ec67d 100644
--- a/tcp_tests/templates/cookied-mcp-pike-dvr/underlay.yaml
+++ b/tcp_tests/templates/cookied-mcp-pike-dvr/underlay.yaml
@@ -33,8 +33,6 @@
 {% set HOSTNAME_DNS01 = os_env('HOSTNAME_DNS01', 'dns01.' + DOMAIN_NAME) %}
 {% set HOSTNAME_DNS02 = os_env('HOSTNAME_DNS02', 'dns02.' + DOMAIN_NAME) %}
 {% set HOSTNAME_SHARE01 = os_env('HOSTNAME_SHARE01', 'share01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_SHARE02 = os_env('HOSTNAME_SHARE01', 'share02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_SHARE03 = os_env('HOSTNAME_SHARE01', 'share03.' + DOMAIN_NAME) %}
 {% set HOSTNAME_PRX01 = os_env('HOSTNAME_PRX01', 'prx01.' + DOMAIN_NAME) %}
 
 template:
@@ -64,12 +62,10 @@
             default_{{ HOSTNAME_MTR02 }}: +87
             default_{{ HOSTNAME_MTR03 }}: +88
             default_{{ HOSTNAME_GTW01 }}: +110
-            default_{{ HOSTNAME_DNS01 }}: +111
-            default_{{ HOSTNAME_DNS02 }}: +112
+            default_{{ HOSTNAME_DNS01 }}: +113
+            default_{{ HOSTNAME_DNS02 }}: +114
             default_{{ HOSTNAME_PRX01 }}: +121
-            default_{{ HOSTNAME_SHARE01 }}: +69
-            default_{{ HOSTNAME_SHARE02 }}: +70
-            default_{{ HOSTNAME_SHARE03 }}: +71
+            default_{{ HOSTNAME_SHARE01 }}: +204
           ip_ranges:
             dhcp: [+90, -10]
 
@@ -95,12 +91,10 @@
             default_{{ HOSTNAME_MTR02 }}: +87
             default_{{ HOSTNAME_MTR03 }}: +88
             default_{{ HOSTNAME_GTW01 }}: +110
-            default_{{ HOSTNAME_DNS01 }}: +111
-            default_{{ HOSTNAME_DNS02 }}: +112
+            default_{{ HOSTNAME_DNS01 }}: +113
+            default_{{ HOSTNAME_DNS02 }}: +114
             default_{{ HOSTNAME_PRX01 }}: +121
-            default_{{ HOSTNAME_SHARE01 }}: +69
-            default_{{ HOSTNAME_SHARE02 }}: +70
-            default_{{ HOSTNAME_SHARE03 }}: +71
+            default_{{ HOSTNAME_SHARE01 }}: +204
           ip_ranges:
             dhcp: [+90, -10]
 
@@ -126,12 +120,10 @@
             default_{{ HOSTNAME_MTR02 }}: +87
             default_{{ HOSTNAME_MTR03 }}: +88
             default_{{ HOSTNAME_GTW01 }}: +110
-            default_{{ HOSTNAME_DNS01 }}: +111
-            default_{{ HOSTNAME_DNS02 }}: +112
+            default_{{ HOSTNAME_DNS01 }}: +113
+            default_{{ HOSTNAME_DNS02 }}: +114
             default_{{ HOSTNAME_PRX01 }}: +121
-            default_{{ HOSTNAME_SHARE01 }}: +69
-            default_{{ HOSTNAME_SHARE02 }}: +70
-            default_{{ HOSTNAME_SHARE03 }}: +71
+            default_{{ HOSTNAME_SHARE01 }}: +204
           ip_ranges:
             dhcp: [+10, -10]
 
@@ -157,12 +149,10 @@
             default_{{ HOSTNAME_MTR02 }}: +87
             default_{{ HOSTNAME_MTR03 }}: +88
             default_{{ HOSTNAME_GTW01 }}: +110
-            default_{{ HOSTNAME_DNS01 }}: +111
-            default_{{ HOSTNAME_DNS02 }}: +112
+            default_{{ HOSTNAME_DNS01 }}: +113
+            default_{{ HOSTNAME_DNS02 }}: +114
             default_{{ HOSTNAME_PRX01 }}: +121
-            default_{{ HOSTNAME_SHARE01 }}: +69
-            default_{{ HOSTNAME_SHARE02 }}: +70
-            default_{{ HOSTNAME_SHARE03 }}: +71
+            default_{{ HOSTNAME_SHARE01 }}: +204
           ip_ranges:
             dhcp: [+10, -10]
 
@@ -274,9 +264,6 @@
                   capacity: !os_env NODE_VOLUME_SIZE, 150
                   backing_store: mcp_ubuntu_1604_image
                   format: qcow2
-                - name: cinder
-                  capacity: 50
-                  format: qcow2
                 - name: iso  # Volume with name 'iso' will be used
                              # for store image with cloud-init metadata.
                   capacity: 1
@@ -315,9 +302,6 @@
                   capacity: !os_env NODE_VOLUME_SIZE, 150
                   backing_store: mcp_ubuntu_1604_image
                   format: qcow2
-                - name: cinder
-                  capacity: 50
-                  format: qcow2
                 - name: iso  # Volume with name 'iso' will be used
                              # for store image with cloud-init metadata.
                   capacity: 1
@@ -344,9 +328,6 @@
                   capacity: !os_env NODE_VOLUME_SIZE, 150
                   backing_store: mcp_ubuntu_1604_image
                   format: qcow2
-                - name: cinder
-                  capacity: 50
-                  format: qcow2
                 - name: iso  # Volume with name 'iso' will be used
                              # for store image with cloud-init metadata.
                   capacity: 1
@@ -607,9 +588,6 @@
                   capacity: !os_env NODE_VOLUME_SIZE, 150
                   backing_store: mcp_ubuntu_1604_image
                   format: qcow2
-                - name: cinder
-                  capacity: 50
-                  format: qcow2
                 - name: iso  # Volume with name 'iso' will be used
                              # for store image with cloud-init metadata.
                   capacity: 1
@@ -637,6 +615,12 @@
                   capacity: !os_env NODE_VOLUME_SIZE, 150
                   backing_store: cloudimage1604
                   format: qcow2
+                - name: cinder
+                  capacity: 50
+                  format: qcow2
+                - name: manila
+                  capacity: 20
+                  format: qcow2
                 - name: iso  # Volume with name 'iso' will be used
                              # for store image with cloud-init metadata.
                   capacity: 1
@@ -688,6 +672,12 @@
                   capacity: !os_env NODE_VOLUME_SIZE, 150
                   backing_store: cloudimage1604
                   format: qcow2
+                - name: cinder
+                  capacity: 50
+                  format: qcow2
+                - name: manila
+                  capacity: 20
+                  format: qcow2
                 - name: iso  # Volume with name 'iso' will be used
                              # for store image with cloud-init metadata.
                   capacity: 1
@@ -752,58 +742,6 @@
               interfaces: *all_interfaces
               network_config: *all_network_config
 
-          - name: {{ HOSTNAME_SHARE02 }}
-            role: salt_minion
-            params:
-              vcpu: !os_env SLAVE_NODE_CPU, 2
-              memory: !os_env SLAVE_NODE_MEMORY, 4096
-              boot:
-                - hd
-              cloud_init_volume_name: iso
-              cloud_init_iface_up: ens3
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 150
-                  backing_store: mcp_ubuntu_1604_image
-                  format: qcow2
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-                  capacity: 1
-                  format: raw
-                  device: cdrom
-                  bus: ide
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data_1604
-
-              interfaces: *all_interfaces
-              network_config: *all_network_config
-
-          - name: {{ HOSTNAME_SHARE03 }}
-            role: salt_minion
-            params:
-              vcpu: !os_env SLAVE_NODE_CPU, 2
-              memory: !os_env SLAVE_NODE_MEMORY, 4096
-              boot:
-                - hd
-              cloud_init_volume_name: iso
-              cloud_init_iface_up: ens3
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 150
-                  backing_store: mcp_ubuntu_1604_image
-                  format: qcow2
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-                  capacity: 1
-                  format: raw
-                  device: cdrom
-                  bus: ide
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data_1604
-
-              interfaces: *all_interfaces
-              network_config: *all_network_config
-
           - name: {{ HOSTNAME_DNS01 }}
             role: salt_minion
             params:
@@ -854,4 +792,4 @@
                   cloudinit_user_data: *cloudinit_user_data_1604
 
               interfaces: *all_interfaces
-              network_config: *all_network_config
\ No newline at end of file
+              network_config: *all_network_config
diff --git a/tcp_tests/templates/cookied-mcp-pike-ovs/_context-cookiecutter-mcp-pike-ovs.yaml b/tcp_tests/templates/cookied-mcp-pike-ovs/_context-cookiecutter-mcp-pike-ovs.yaml
index 131d153..80ca7f6 100644
--- a/tcp_tests/templates/cookied-mcp-pike-ovs/_context-cookiecutter-mcp-pike-ovs.yaml
+++ b/tcp_tests/templates/cookied-mcp-pike-ovs/_context-cookiecutter-mcp-pike-ovs.yaml
@@ -54,7 +54,6 @@
   openstack_compute_single_address_ranges: 172.16.10.105-172.16.10.106
   openstack_compute_deploy_address_ranges: 192.168.10.105-192.168.10.106
   openstack_compute_tenant_address_ranges: 10.1.0.105-10.1.0.106
-  openstack_compute_backend_address_ranges: 10.1.0.105-10.1.0.106
   openstack_control_address: 172.16.10.100
   openstack_control_hostname: ctl
   openstack_control_node01_address: 172.16.10.101
@@ -104,6 +103,10 @@
   openstack_proxy_node02_address: 172.16.10.122
   openstack_proxy_node02_hostname: prx02
   openstack_upgrade_node01_address: 172.16.10.19
+  designate_backend: powerdns
+  designate_enabled: 'True'
+  openstack_dns_node01_address: 172.16.10.113
+  openstack_dns_node02_address: 172.16.10.114
   openstack_version: pike
   oss_enabled: 'False'
   oss_node03_address: ${_param:stacklight_monitor_node03_address}
@@ -188,17 +191,9 @@
   manila_enabled: 'True'
   manila_share_backend: 'lvm'
   manila_lvm_volume_name: 'manila-volume'
-  manila_lvm_devices: '/dev/loop1'
-  openstack_share_address: 172.16.10.68
-  openstack_share_node01_address: 172.16.10.69
-  openstack_share_node02_address: 172.16.10.70
-  openstack_share_node03_address: 172.16.10.71
-  openstack_share_node01_deploy_address: 192.168.10.69
-  openstack_share_node02_deploy_address: 192.168.10.70
-  openstack_share_node03_deploy_address: 192.168.10.71
+  manila_lvm_devices: '/dev/vdc'
+  openstack_share_address: 172.16.10.203
+  openstack_share_node01_address: 172.16.10.204
+  openstack_share_node01_deploy_address: 192.168.10.204
   openstack_share_hostname: share
   openstack_share_node01_hostname: share01
-  openstack_share_node02_hostname: share02
-  openstack_share_node03_hostname: share03
-  designate_backend: powerdns
-  designate_enabled: 'True'
diff --git a/tcp_tests/templates/cookied-mcp-pike-ovs/_context-environment.yaml b/tcp_tests/templates/cookied-mcp-pike-ovs/_context-environment.yaml
index 2fd6295..d57ceaf 100644
--- a/tcp_tests/templates/cookied-mcp-pike-ovs/_context-environment.yaml
+++ b/tcp_tests/templates/cookied-mcp-pike-ovs/_context-environment.yaml
@@ -17,6 +17,7 @@
       - openstack_control_leader
       - openstack_database_leader
       - openstack_message_queue
+      - features_lvm_backend_control
       - linux_system_codename_xenial
       interfaces:
         ens3:
@@ -31,6 +32,7 @@
       - openstack_control
       - openstack_database
       - openstack_message_queue
+      - features_lvm_backend_control
       - linux_system_codename_xenial
       interfaces:
         ens3:
@@ -45,6 +47,7 @@
       - openstack_control
       - openstack_database
       - openstack_message_queue
+      - features_lvm_backend_control
       - linux_system_codename_xenial
       interfaces:
         ens3:
@@ -167,6 +170,7 @@
       reclass_storage_name: openstack_compute_rack01
       roles:
       - openstack_compute
+      - features_lvm_backend_volume_vdb
       - linux_system_codename_xenial
       interfaces:
         ens3:
@@ -225,25 +229,3 @@
           role: single_dhcp
         ens4:
           role: single_ctl
-
-    share02.mcp-pike-ovs.local:
-      reclass_storage_name: openstack_share_node02
-      roles:
-      - openstack_share
-      - linux_system_codename_xenial
-      interfaces:
-        ens3:
-          role: single_dhcp
-        ens4:
-          role: single_ctl
-
-    share03.mcp-pike-ovs.local:
-      reclass_storage_name: openstack_share_node03
-      roles:
-      - openstack_share
-      - linux_system_codename_xenial
-      interfaces:
-        ens3:
-          role: single_dhcp
-        ens4:
-          role: single_ctl
diff --git a/tcp_tests/templates/cookied-mcp-pike-ovs/underlay--user-data-cfg01.yaml b/tcp_tests/templates/cookied-mcp-pike-ovs/underlay--user-data-cfg01.yaml
index da7908d..48562ad 100644
--- a/tcp_tests/templates/cookied-mcp-pike-ovs/underlay--user-data-cfg01.yaml
+++ b/tcp_tests/templates/cookied-mcp-pike-ovs/underlay--user-data-cfg01.yaml
@@ -44,6 +44,13 @@
 
    - echo "nameserver 172.18.208.44" >> /etc/resolv.conf;
 
+   - mkdir -p /srv/salt/reclass/nodes
+   - systemctl enable salt-master
+   - systemctl enable salt-minion
+   - systemctl start salt-master
+   - systemctl start salt-minion
+   - salt-call -l info --timeout=120 test.ping
+
   write_files:
    - path: /etc/network/interfaces
      content: |
diff --git a/tcp_tests/templates/cookied-mcp-pike-ovs/underlay.yaml b/tcp_tests/templates/cookied-mcp-pike-ovs/underlay.yaml
index 95ffa33..d1c83dd 100644
--- a/tcp_tests/templates/cookied-mcp-pike-ovs/underlay.yaml
+++ b/tcp_tests/templates/cookied-mcp-pike-ovs/underlay.yaml
@@ -34,8 +34,6 @@
 {% set HOSTNAME_DNS02 = os_env('HOSTNAME_DNS02', 'dns02.' + DOMAIN_NAME) %}
 {% set HOSTNAME_PRX01 = os_env('HOSTNAME_PRX01', 'prx01.' + DOMAIN_NAME) %}
 {% set HOSTNAME_SHARE01 = os_env('HOSTNAME_SHARE01', 'share01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_SHARE02 = os_env('HOSTNAME_SHARE01', 'share02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_SHARE03 = os_env('HOSTNAME_SHARE01', 'share03.' + DOMAIN_NAME) %}
 
 template:
   devops_settings:
@@ -64,12 +62,10 @@
             default_{{ HOSTNAME_MTR02 }}: +87
             default_{{ HOSTNAME_MTR03 }}: +88
             default_{{ HOSTNAME_GTW01 }}: +110
-            default_{{ HOSTNAME_DNS01 }}: +111
-            default_{{ HOSTNAME_DNS02 }}: +112
+            default_{{ HOSTNAME_DNS01 }}: +113
+            default_{{ HOSTNAME_DNS02 }}: +114
             default_{{ HOSTNAME_PRX01 }}: +121
-            default_{{ HOSTNAME_SHARE01 }}: +69
-            default_{{ HOSTNAME_SHARE02 }}: +70
-            default_{{ HOSTNAME_SHARE03 }}: +71
+            default_{{ HOSTNAME_SHARE01 }}: +204
           ip_ranges:
             dhcp: [+90, -10]
 
@@ -95,12 +91,10 @@
             default_{{ HOSTNAME_MTR02 }}: +87
             default_{{ HOSTNAME_MTR03 }}: +88
             default_{{ HOSTNAME_GTW01 }}: +110
-            default_{{ HOSTNAME_DNS01 }}: +111
-            default_{{ HOSTNAME_DNS02 }}: +112
+            default_{{ HOSTNAME_DNS01 }}: +113
+            default_{{ HOSTNAME_DNS02 }}: +114
             default_{{ HOSTNAME_PRX01 }}: +121
-            default_{{ HOSTNAME_SHARE01 }}: +69
-            default_{{ HOSTNAME_SHARE02 }}: +70
-            default_{{ HOSTNAME_SHARE03 }}: +711
+            default_{{ HOSTNAME_SHARE01 }}: +204
           ip_ranges:
             dhcp: [+90, -10]
 
@@ -126,12 +120,10 @@
             default_{{ HOSTNAME_MTR02 }}: +87
             default_{{ HOSTNAME_MTR03 }}: +88
             default_{{ HOSTNAME_GTW01 }}: +110
-            default_{{ HOSTNAME_DNS01 }}: +111
-            default_{{ HOSTNAME_DNS02 }}: +112
+            default_{{ HOSTNAME_DNS01 }}: +113
+            default_{{ HOSTNAME_DNS02 }}: +114
             default_{{ HOSTNAME_PRX01 }}: +121
-            default_{{ HOSTNAME_SHARE01 }}: +69
-            default_{{ HOSTNAME_SHARE02 }}: +70
-            default_{{ HOSTNAME_SHARE03 }}: +71
+            default_{{ HOSTNAME_SHARE01 }}: +204
           ip_ranges:
             dhcp: [+10, -10]
 
@@ -157,12 +149,10 @@
             default_{{ HOSTNAME_MTR02 }}: +87
             default_{{ HOSTNAME_MTR03 }}: +88
             default_{{ HOSTNAME_GTW01 }}: +110
-            default_{{ HOSTNAME_DNS01 }}: +111
-            default_{{ HOSTNAME_DNS02 }}: +112
+            default_{{ HOSTNAME_DNS01 }}: +113
+            default_{{ HOSTNAME_DNS02 }}: +114
             default_{{ HOSTNAME_PRX01 }}: +121
-            default_{{ HOSTNAME_SHARE01 }}: +69
-            default_{{ HOSTNAME_SHARE02 }}: +70
-            default_{{ HOSTNAME_SHARE03 }}: +71
+            default_{{ HOSTNAME_SHARE01 }}: +204
           ip_ranges:
             dhcp: [+10, -10]
 
@@ -274,9 +264,6 @@
                   capacity: !os_env NODE_VOLUME_SIZE, 150
                   backing_store: mcp_ubuntu_1604_image
                   format: qcow2
-                - name: cinder
-                  capacity: 50
-                  format: qcow2
                 - name: iso  # Volume with name 'iso' will be used
                              # for store image with cloud-init metadata.
                   capacity: 1
@@ -315,9 +302,6 @@
                   capacity: !os_env NODE_VOLUME_SIZE, 150
                   backing_store: mcp_ubuntu_1604_image
                   format: qcow2
-                - name: cinder
-                  capacity: 50
-                  format: qcow2
                 - name: iso  # Volume with name 'iso' will be used
                              # for store image with cloud-init metadata.
                   capacity: 1
@@ -344,9 +328,6 @@
                   capacity: !os_env NODE_VOLUME_SIZE, 150
                   backing_store: mcp_ubuntu_1604_image
                   format: qcow2
-                - name: cinder
-                  capacity: 50
-                  format: qcow2
                 - name: iso  # Volume with name 'iso' will be used
                              # for store image with cloud-init metadata.
                   capacity: 1
@@ -607,9 +588,6 @@
                   capacity: !os_env NODE_VOLUME_SIZE, 150
                   backing_store: mcp_ubuntu_1604_image
                   format: qcow2
-                - name: cinder
-                  capacity: 50
-                  format: qcow2
                 - name: iso  # Volume with name 'iso' will be used
                              # for store image with cloud-init metadata.
                   capacity: 1
@@ -636,6 +614,12 @@
                   capacity: !os_env NODE_VOLUME_SIZE, 150
                   backing_store: cloudimage1604
                   format: qcow2
+                - name: cinder
+                  capacity: 50
+                  format: qcow2
+                - name: manila
+                  capacity: 20
+                  format: qcow2
                 - name: iso  # Volume with name 'iso' will be used
                              # for store image with cloud-init metadata.
                   capacity: 1
@@ -687,6 +671,12 @@
                   capacity: !os_env NODE_VOLUME_SIZE, 150
                   backing_store: cloudimage1604
                   format: qcow2
+                - name: cinder
+                  capacity: 50
+                  format: qcow2
+                - name: manila
+                  capacity: 20
+                  format: qcow2
                 - name: iso  # Volume with name 'iso' will be used
                              # for store image with cloud-init metadata.
                   capacity: 1
@@ -802,55 +792,3 @@
 
               interfaces: *all_interfaces
               network_config: *all_network_config
-
-          - name: {{ HOSTNAME_SHARE02 }}
-            role: salt_minion
-            params:
-              vcpu: !os_env SLAVE_NODE_CPU, 2
-              memory: !os_env SLAVE_NODE_MEMORY, 4096
-              boot:
-                - hd
-              cloud_init_volume_name: iso
-              cloud_init_iface_up: ens3
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 150
-                  backing_store: mcp_ubuntu_1604_image
-                  format: qcow2
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-                  capacity: 1
-                  format: raw
-                  device: cdrom
-                  bus: ide
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data_1604
-
-              interfaces: *all_interfaces
-              network_config: *all_network_config
-
-          - name: {{ HOSTNAME_SHARE03 }}
-            role: salt_minion
-            params:
-              vcpu: !os_env SLAVE_NODE_CPU, 2
-              memory: !os_env SLAVE_NODE_MEMORY, 4096
-              boot:
-                - hd
-              cloud_init_volume_name: iso
-              cloud_init_iface_up: ens3
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 150
-                  backing_store: mcp_ubuntu_1604_image
-                  format: qcow2
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-                  capacity: 1
-                  format: raw
-                  device: cdrom
-                  bus: ide
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data_1604
-
-              interfaces: *all_interfaces
-              network_config: *all_network_config
\ No newline at end of file
diff --git a/tcp_tests/templates/cookied-mcp-queens-dvr-ceph/cookiecutter-context-dvr-ceph.yaml b/tcp_tests/templates/cookied-mcp-queens-dvr-ceph/cookiecutter-context-dvr-ceph.yaml
index 135a2c4..39099ee 100644
--- a/tcp_tests/templates/cookied-mcp-queens-dvr-ceph/cookiecutter-context-dvr-ceph.yaml
+++ b/tcp_tests/templates/cookied-mcp-queens-dvr-ceph/cookiecutter-context-dvr-ceph.yaml
@@ -135,7 +135,6 @@
   openstack_compute_single_address_ranges: 10.167.4.105-10.167.4.106
   openstack_compute_deploy_address_ranges: 10.167.5.105-10.167.5.106
   openstack_compute_tenant_address_ranges: 10.167.6.105-10.167.6.106
-  openstack_compute_backend_address_ranges: 10.167.6.105-10.167.6.106
   openstack_dns_hostname: dns
   openstack_dns_node01_address: 10.167.4.111
   openstack_dns_node01_hostname: dns01
diff --git a/tcp_tests/templates/cookied-mcp-queens-dvr-ceph/underlay--user-data-cfg01.yaml b/tcp_tests/templates/cookied-mcp-queens-dvr-ceph/underlay--user-data-cfg01.yaml
index da7908d..48562ad 100644
--- a/tcp_tests/templates/cookied-mcp-queens-dvr-ceph/underlay--user-data-cfg01.yaml
+++ b/tcp_tests/templates/cookied-mcp-queens-dvr-ceph/underlay--user-data-cfg01.yaml
@@ -44,6 +44,13 @@
 
    - echo "nameserver 172.18.208.44" >> /etc/resolv.conf;
 
+   - mkdir -p /srv/salt/reclass/nodes
+   - systemctl enable salt-master
+   - systemctl enable salt-minion
+   - systemctl start salt-master
+   - systemctl start salt-minion
+   - salt-call -l info --timeout=120 test.ping
+
   write_files:
    - path: /etc/network/interfaces
      content: |
diff --git a/tcp_tests/templates/cookied-mcp-queens-dvr-ceph/underlay.yaml b/tcp_tests/templates/cookied-mcp-queens-dvr-ceph/underlay.yaml
index c898837..727758e 100644
--- a/tcp_tests/templates/cookied-mcp-queens-dvr-ceph/underlay.yaml
+++ b/tcp_tests/templates/cookied-mcp-queens-dvr-ceph/underlay.yaml
@@ -240,9 +240,6 @@
                   capacity: !os_env NODE_VOLUME_SIZE, 150
                   backing_store: mcp_ubuntu_1604_image
                   format: qcow2
-                - name: cinder
-                  capacity: 50
-                  format: qcow2
                 - name: iso  # Volume with name 'iso' will be used
                              # for store image with cloud-init metadata.
                   capacity: 1
@@ -281,9 +278,6 @@
                   capacity: !os_env NODE_VOLUME_SIZE, 150
                   backing_store: mcp_ubuntu_1604_image
                   format: qcow2
-                - name: cinder
-                  capacity: 50
-                  format: qcow2
                 - name: iso  # Volume with name 'iso' will be used
                              # for store image with cloud-init metadata.
                   capacity: 1
@@ -310,9 +304,6 @@
                   capacity: !os_env NODE_VOLUME_SIZE, 150
                   backing_store: mcp_ubuntu_1604_image
                   format: qcow2
-                - name: cinder
-                  capacity: 50
-                  format: qcow2
                 - name: iso  # Volume with name 'iso' will be used
                              # for store image with cloud-init metadata.
                   capacity: 1
@@ -417,10 +408,10 @@
                   capacity: !os_env NODE_VOLUME_SIZE, 150
                   backing_store: cloudimage1604
                   format: qcow2
-                - name: cinder
+                - name: ceph_osd
                   capacity: 50
                   format: qcow2
-                - name: ceph
+                - name: ceph_journal
                   capacity: 50
                   format: qcow2
                 - name: iso  # Volume with name 'iso' will be used
@@ -449,10 +440,10 @@
                   capacity: !os_env NODE_VOLUME_SIZE, 150
                   backing_store: cloudimage1604
                   format: qcow2
-                - name: cinder
+                - name: ceph_osd
                   capacity: 50
                   format: qcow2
-                - name: ceph
+                - name: ceph_journal
                   capacity: 50
                   format: qcow2
                 - name: iso  # Volume with name 'iso' will be used
diff --git a/tcp_tests/templates/cookied-mcp-queens-dvr-ssl/_context-cookiecutter-mcp-queens-dvr-ssl.yaml b/tcp_tests/templates/cookied-mcp-queens-dvr-ssl/_context-cookiecutter-mcp-queens-dvr-ssl.yaml
index 35587d1..8c55d51 100644
--- a/tcp_tests/templates/cookied-mcp-queens-dvr-ssl/_context-cookiecutter-mcp-queens-dvr-ssl.yaml
+++ b/tcp_tests/templates/cookied-mcp-queens-dvr-ssl/_context-cookiecutter-mcp-queens-dvr-ssl.yaml
@@ -58,7 +58,6 @@
   openstack_compute_single_address_ranges: 172.16.10.105-172.16.10.106
   openstack_compute_deploy_address_ranges: 192.168.10.105-192.168.10.106
   openstack_compute_tenant_address_ranges: 10.1.0.105-10.1.0.106
-  openstack_compute_backend_address_ranges: 10.1.0.105-10.1.0.106
   openstack_control_address: 172.16.10.100
   openstack_control_hostname: ctl
   openstack_control_node01_address: 172.16.10.101
@@ -109,9 +108,9 @@
   openstack_proxy_node02_hostname: prx02
   openstack_upgrade_node01_address: 172.16.10.19
   openstack_dns_hostname: dns
-  openstack_dns_node01_address: 172.16.10.111
+  openstack_dns_node01_address: 172.16.10.113
   openstack_dns_node01_hostname: dns01
-  openstack_dns_node02_address: 172.16.10.112
+  openstack_dns_node02_address: 172.16.10.114
   openstack_dns_node02_hostname: dns02
   openstack_version: queens
   oss_enabled: 'False'
@@ -197,7 +196,7 @@
   rsync_fernet_rotation: 'True'
   compute_padding_with_zeros: False
   designate_backend: bind
-  designate_enabled: 'True'
+  designate_enabled: 'False'
   nova_vnc_tls_enabled: 'True'
   galera_ssl_enabled: 'True'
   openstack_mysql_x509_enabled: 'True'
@@ -206,18 +205,12 @@
   openstack_internal_protocol: 'https'
   tenant_telemetry_enabled: 'True'
   gnocchi_aggregation_storage: file
-  manila_enabled: 'True'
+  manila_enabled: 'False'
   manila_share_backend: 'lvm'
   manila_lvm_volume_name: 'manila-volume'
-  manila_lvm_devices: '/dev/loop1'
-  openstack_share_address: 172.16.10.68
-  openstack_share_node01_address: 172.16.10.69
-  openstack_share_node02_address: 172.16.10.70
-  openstack_share_node03_address: 172.16.10.71
-  openstack_share_node01_deploy_address: 192.168.10.69
-  openstack_share_node02_deploy_address: 192.168.10.70
-  openstack_share_node03_deploy_address: 192.168.10.71
+  manila_lvm_devices: '/dev/vdc'
+  openstack_share_address: 172.16.10.203
+  openstack_share_node01_address: 172.16.10.204
+  openstack_share_node01_deploy_address: 192.168.10.204
   openstack_share_hostname: share
-  openstack_share_node01_hostname: share01
-  openstack_share_node02_hostname: share02
-  openstack_share_node03_hostname: share03
\ No newline at end of file
+  openstack_share_node01_hostname: share01
\ No newline at end of file
diff --git a/tcp_tests/templates/cookied-mcp-queens-dvr-ssl/_context-environment.yaml b/tcp_tests/templates/cookied-mcp-queens-dvr-ssl/_context-environment.yaml
index 4ff3212..cc6140a 100644
--- a/tcp_tests/templates/cookied-mcp-queens-dvr-ssl/_context-environment.yaml
+++ b/tcp_tests/templates/cookied-mcp-queens-dvr-ssl/_context-environment.yaml
@@ -17,6 +17,7 @@
       - openstack_control_leader
       - openstack_database_leader
       - openstack_message_queue
+      - features_lvm_backend_control
       - linux_system_codename_xenial
       interfaces:
         ens3:
@@ -31,6 +32,7 @@
       - openstack_control
       - openstack_database
       - openstack_message_queue
+      - features_lvm_backend_control
       - linux_system_codename_xenial
       interfaces:
         ens3:
@@ -45,6 +47,7 @@
       - openstack_control
       - openstack_database
       - openstack_message_queue
+      - features_lvm_backend_control
       - linux_system_codename_xenial
       interfaces:
         ens3:
@@ -167,6 +170,7 @@
       reclass_storage_name: openstack_compute_rack01
       roles:
       - openstack_compute
+      - features_lvm_backend_volume_vdb
       - linux_system_codename_xenial
       interfaces:
         ens3:
@@ -192,58 +196,3 @@
           role: bond0_ab_ovs_vxlan_mesh
         ens6:
           role: bond1_ab_ovs_floating
-
-    share01.mcp-queens-dvr-ssl.local:
-      reclass_storage_name: openstack_share_node01
-      roles:
-      - openstack_share
-      - linux_system_codename_xenial
-      interfaces:
-        ens3:
-          role: single_dhcp
-        ens4:
-          role: single_ctl
-
-    share02.mcp-queens-dvr-ssl.local:
-      reclass_storage_name: openstack_share_node02
-      roles:
-      - openstack_share
-      - linux_system_codename_xenial
-      interfaces:
-        ens3:
-          role: single_dhcp
-        ens4:
-          role: single_ctl
-
-    share03.mcp-queens-dvr-ssl.local:
-      reclass_storage_name: openstack_share_node03
-      roles:
-      - openstack_share
-      - linux_system_codename_xenial
-      interfaces:
-        ens3:
-          role: single_dhcp
-        ens4:
-          role: single_ctl
-
-    dns01.mcp-queens-dvr-ssl.local:
-      reclass_storage_name: openstack_dns_node01
-      roles:
-      - openstack_dns
-      - linux_system_codename_xenial
-      interfaces:
-        ens3:
-          role: single_dhcp
-        ens4:
-          role: single_ctl
-
-    dns02.mcp-queens-dvr-ssl.local:
-      reclass_storage_name: openstack_dns_node02
-      roles:
-      - openstack_dns
-      - linux_system_codename_xenial
-      interfaces:
-        ens3:
-          role: single_dhcp
-        ens4:
-          role: single_ctl
\ No newline at end of file
diff --git a/tcp_tests/templates/cookied-mcp-queens-dvr-ssl/openstack.yaml b/tcp_tests/templates/cookied-mcp-queens-dvr-ssl/openstack.yaml
index a768afe..64a3ce0 100644
--- a/tcp_tests/templates/cookied-mcp-queens-dvr-ssl/openstack.yaml
+++ b/tcp_tests/templates/cookied-mcp-queens-dvr-ssl/openstack.yaml
@@ -27,10 +27,6 @@
 
 {{ SHARED_OPENSTACK.MACRO_INSTALL_HEAT() }}
 
-{{ SHARED_OPENSTACK.MACRO_INSTALL_DESIGNATE(INSTALL_BIND=true) }}
-
 {{ SHARED_OPENSTACK.MACRO_INSTALL_HORIZON() }}
 
 {{ SHARED_OPENSTACK.MACRO_INSTALL_COMPUTE(CELL_MAPPING=true) }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_MANILA() }}
diff --git a/tcp_tests/templates/cookied-mcp-queens-dvr-ssl/underlay--user-data-cfg01.yaml b/tcp_tests/templates/cookied-mcp-queens-dvr-ssl/underlay--user-data-cfg01.yaml
index da7908d..48562ad 100644
--- a/tcp_tests/templates/cookied-mcp-queens-dvr-ssl/underlay--user-data-cfg01.yaml
+++ b/tcp_tests/templates/cookied-mcp-queens-dvr-ssl/underlay--user-data-cfg01.yaml
@@ -44,6 +44,13 @@
 
    - echo "nameserver 172.18.208.44" >> /etc/resolv.conf;
 
+   - mkdir -p /srv/salt/reclass/nodes
+   - systemctl enable salt-master
+   - systemctl enable salt-minion
+   - systemctl start salt-master
+   - systemctl start salt-minion
+   - salt-call -l info --timeout=120 test.ping
+
   write_files:
    - path: /etc/network/interfaces
      content: |
diff --git a/tcp_tests/templates/cookied-mcp-queens-dvr-ssl/underlay.yaml b/tcp_tests/templates/cookied-mcp-queens-dvr-ssl/underlay.yaml
index 895945a..f39e2a3 100644
--- a/tcp_tests/templates/cookied-mcp-queens-dvr-ssl/underlay.yaml
+++ b/tcp_tests/templates/cookied-mcp-queens-dvr-ssl/underlay.yaml
@@ -32,8 +32,6 @@
 {% set HOSTNAME_GTW01 = os_env('HOSTNAME_GTW01', 'gtw01.' + DOMAIN_NAME) %}
 {% set HOSTNAME_PRX01 = os_env('HOSTNAME_PRX01', 'prx01.' + DOMAIN_NAME) %}
 {% set HOSTNAME_SHARE01 = os_env('HOSTNAME_SHARE01', 'share01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_SHARE02 = os_env('HOSTNAME_SHARE01', 'share02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_SHARE03 = os_env('HOSTNAME_SHARE01', 'share03.' + DOMAIN_NAME) %}
 
 template:
   devops_settings:
@@ -63,9 +61,7 @@
             default_{{ HOSTNAME_MTR03 }}: +88
             default_{{ HOSTNAME_GTW01 }}: +110
             default_{{ HOSTNAME_PRX01 }}: +121
-            default_{{ HOSTNAME_SHARE01 }}: +69
-            default_{{ HOSTNAME_SHARE02 }}: +70
-            default_{{ HOSTNAME_SHARE03 }}: +71
+            default_{{ HOSTNAME_SHARE01 }}: +204
           ip_ranges:
             dhcp: [+90, -10]
 
@@ -92,9 +88,7 @@
             default_{{ HOSTNAME_MTR03 }}: +88
             default_{{ HOSTNAME_GTW01 }}: +110
             default_{{ HOSTNAME_PRX01 }}: +121
-            default_{{ HOSTNAME_SHARE01 }}: +69
-            default_{{ HOSTNAME_SHARE02 }}: +70
-            default_{{ HOSTNAME_SHARE03 }}: +71
+            default_{{ HOSTNAME_SHARE01 }}: +204
           ip_ranges:
             dhcp: [+90, -10]
 
@@ -121,9 +115,7 @@
             default_{{ HOSTNAME_MTR03 }}: +88
             default_{{ HOSTNAME_GTW01 }}: +110
             default_{{ HOSTNAME_PRX01 }}: +121
-            default_{{ HOSTNAME_SHARE01 }}: +69
-            default_{{ HOSTNAME_SHARE02 }}: +70
-            default_{{ HOSTNAME_SHARE03 }}: +71
+            default_{{ HOSTNAME_SHARE01 }}: +204
           ip_ranges:
             dhcp: [+10, -10]
 
@@ -150,9 +142,7 @@
             default_{{ HOSTNAME_MTR03 }}: +88
             default_{{ HOSTNAME_GTW01 }}: +110
             default_{{ HOSTNAME_PRX01 }}: +121
-            default_{{ HOSTNAME_SHARE01 }}: +69
-            default_{{ HOSTNAME_SHARE02 }}: +70
-            default_{{ HOSTNAME_SHARE03 }}: +71
+            default_{{ HOSTNAME_SHARE01 }}: +204
           ip_ranges:
             dhcp: [+130, +220]
 
@@ -264,9 +254,6 @@
                   capacity: !os_env NODE_VOLUME_SIZE, 150
                   backing_store: mcp_ubuntu_1604_image
                   format: qcow2
-                - name: cinder
-                  capacity: 50
-                  format: qcow2
                 - name: iso  # Volume with name 'iso' will be used
                              # for store image with cloud-init metadata.
                   capacity: 1
@@ -305,9 +292,6 @@
                   capacity: !os_env NODE_VOLUME_SIZE, 150
                   backing_store: mcp_ubuntu_1604_image
                   format: qcow2
-                - name: cinder
-                  capacity: 50
-                  format: qcow2
                 - name: iso  # Volume with name 'iso' will be used
                              # for store image with cloud-init metadata.
                   capacity: 1
@@ -334,9 +318,6 @@
                   capacity: !os_env NODE_VOLUME_SIZE, 150
                   backing_store: mcp_ubuntu_1604_image
                   format: qcow2
-                - name: cinder
-                  capacity: 50
-                  format: qcow2
                 - name: iso  # Volume with name 'iso' will be used
                              # for store image with cloud-init metadata.
                   capacity: 1
@@ -597,9 +578,6 @@
                   capacity: !os_env NODE_VOLUME_SIZE, 150
                   backing_store: mcp_ubuntu_1604_image
                   format: qcow2
-                - name: cinder
-                  capacity: 50
-                  format: qcow2
                 - name: iso  # Volume with name 'iso' will be used
                              # for store image with cloud-init metadata.
                   capacity: 1
@@ -627,6 +605,12 @@
                   capacity: !os_env NODE_VOLUME_SIZE, 150
                   backing_store: cloudimage1604
                   format: qcow2
+                - name: cinder
+                  capacity: 50
+                  format: qcow2
+                - name: manila
+                  capacity: 20
+                  format: qcow2
                 - name: iso  # Volume with name 'iso' will be used
                              # for store image with cloud-init metadata.
                   capacity: 1
@@ -678,6 +662,12 @@
                   capacity: !os_env NODE_VOLUME_SIZE, 150
                   backing_store: cloudimage1604
                   format: qcow2
+                - name: cinder
+                  capacity: 50
+                  format: qcow2
+                - name: manila
+                  capacity: 20
+                  format: qcow2
                 - name: iso  # Volume with name 'iso' will be used
                              # for store image with cloud-init metadata.
                   capacity: 1
@@ -741,55 +731,3 @@
 
               interfaces: *all_interfaces
               network_config: *all_network_config
-
-          - name: {{ HOSTNAME_SHARE02 }}
-            role: salt_minion
-            params:
-              vcpu: !os_env SLAVE_NODE_CPU, 2
-              memory: !os_env SLAVE_NODE_MEMORY, 4096
-              boot:
-                - hd
-              cloud_init_volume_name: iso
-              cloud_init_iface_up: ens3
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 150
-                  backing_store: mcp_ubuntu_1604_image
-                  format: qcow2
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-                  capacity: 1
-                  format: raw
-                  device: cdrom
-                  bus: ide
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data_1604
-
-              interfaces: *all_interfaces
-              network_config: *all_network_config
-
-          - name: {{ HOSTNAME_SHARE03 }}
-            role: salt_minion
-            params:
-              vcpu: !os_env SLAVE_NODE_CPU, 2
-              memory: !os_env SLAVE_NODE_MEMORY, 4096
-              boot:
-                - hd
-              cloud_init_volume_name: iso
-              cloud_init_iface_up: ens3
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 150
-                  backing_store: mcp_ubuntu_1604_image
-                  format: qcow2
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-                  capacity: 1
-                  format: raw
-                  device: cdrom
-                  bus: ide
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data_1604
-
-              interfaces: *all_interfaces
-              network_config: *all_network_config
diff --git a/tcp_tests/templates/cookied-mcp-queens-dvr/_context-cookiecutter-mcp-queens-dvr.yaml b/tcp_tests/templates/cookied-mcp-queens-dvr/_context-cookiecutter-mcp-queens-dvr.yaml
index a37d4e2..15f8d68 100644
--- a/tcp_tests/templates/cookied-mcp-queens-dvr/_context-cookiecutter-mcp-queens-dvr.yaml
+++ b/tcp_tests/templates/cookied-mcp-queens-dvr/_context-cookiecutter-mcp-queens-dvr.yaml
@@ -58,7 +58,6 @@
   openstack_compute_single_address_ranges: 172.16.10.105-172.16.10.106
   openstack_compute_deploy_address_ranges: 192.168.10.105-192.168.10.106
   openstack_compute_tenant_address_ranges: 10.1.0.105-10.1.0.106
-  openstack_compute_backend_address_ranges: 10.1.0.105-10.1.0.106
   openstack_control_address: 172.16.10.100
   openstack_control_hostname: ctl
   openstack_control_node01_address: 172.16.10.101
@@ -109,9 +108,9 @@
   openstack_proxy_node02_hostname: prx02
   openstack_upgrade_node01_address: 172.16.10.19
   openstack_dns_hostname: dns
-  openstack_dns_node01_address: 172.16.10.111
+  openstack_dns_node01_address: 172.16.10.113
   openstack_dns_node01_hostname: dns01
-  openstack_dns_node02_address: 172.16.10.112
+  openstack_dns_node02_address: 172.16.10.114
   openstack_dns_node02_hostname: dns02
   openstack_version: queens
   oss_enabled: 'False'
@@ -208,15 +207,9 @@
   manila_enabled: 'True'
   manila_share_backend: 'lvm'
   manila_lvm_volume_name: 'manila-volume'
-  manila_lvm_devices: '/dev/loop1'
-  openstack_share_address: 172.16.10.68
-  openstack_share_node01_address: 172.16.10.69
-  openstack_share_node02_address: 172.16.10.70
-  openstack_share_node03_address: 172.16.10.71
-  openstack_share_node01_deploy_address: 192.168.10.69
-  openstack_share_node02_deploy_address: 192.168.10.70
-  openstack_share_node03_deploy_address: 192.168.10.71
+  manila_lvm_devices: '/dev/vdc'
+  openstack_share_address: 172.16.10.203
+  openstack_share_node01_address: 172.16.10.204
+  openstack_share_node01_deploy_address: 192.168.10.204
   openstack_share_hostname: share
   openstack_share_node01_hostname: share01
-  openstack_share_node02_hostname: share02
-  openstack_share_node03_hostname: share03
\ No newline at end of file
diff --git a/tcp_tests/templates/cookied-mcp-queens-dvr/_context-environment.yaml b/tcp_tests/templates/cookied-mcp-queens-dvr/_context-environment.yaml
index 42d9589..081c51d 100644
--- a/tcp_tests/templates/cookied-mcp-queens-dvr/_context-environment.yaml
+++ b/tcp_tests/templates/cookied-mcp-queens-dvr/_context-environment.yaml
@@ -17,6 +17,7 @@
       - openstack_control_leader
       - openstack_database_leader
       - openstack_message_queue
+      - features_lvm_backend_control
       - linux_system_codename_xenial
       interfaces:
         ens3:
@@ -31,6 +32,7 @@
       - openstack_control
       - openstack_database
       - openstack_message_queue
+      - features_lvm_backend_control
       - linux_system_codename_xenial
       interfaces:
         ens3:
@@ -45,6 +47,7 @@
       - openstack_control
       - openstack_database
       - openstack_message_queue
+      - features_lvm_backend_control
       - linux_system_codename_xenial
       interfaces:
         ens3:
@@ -167,6 +170,7 @@
       reclass_storage_name: openstack_compute_rack01
       roles:
       - openstack_compute
+      - features_lvm_backend_volume_vdb
       - linux_system_codename_xenial
       interfaces:
         ens3:
@@ -204,28 +208,6 @@
         ens4:
           role: single_ctl
 
-    share02.mcp-queens-dvr.local:
-      reclass_storage_name: openstack_share_node02
-      roles:
-      - openstack_share
-      - linux_system_codename_xenial
-      interfaces:
-        ens3:
-          role: single_dhcp
-        ens4:
-          role: single_ctl
-
-    share03.mcp-queens-dvr.local:
-      reclass_storage_name: openstack_share_node03
-      roles:
-      - openstack_share
-      - linux_system_codename_xenial
-      interfaces:
-        ens3:
-          role: single_dhcp
-        ens4:
-          role: single_ctl
-
     dns01.mcp-queens-dvr.local:
       reclass_storage_name: openstack_dns_node01
       roles:
diff --git a/tcp_tests/templates/cookied-mcp-queens-dvr/underlay--user-data-cfg01.yaml b/tcp_tests/templates/cookied-mcp-queens-dvr/underlay--user-data-cfg01.yaml
index da7908d..48562ad 100644
--- a/tcp_tests/templates/cookied-mcp-queens-dvr/underlay--user-data-cfg01.yaml
+++ b/tcp_tests/templates/cookied-mcp-queens-dvr/underlay--user-data-cfg01.yaml
@@ -44,6 +44,13 @@
 
    - echo "nameserver 172.18.208.44" >> /etc/resolv.conf;
 
+   - mkdir -p /srv/salt/reclass/nodes
+   - systemctl enable salt-master
+   - systemctl enable salt-minion
+   - systemctl start salt-master
+   - systemctl start salt-minion
+   - salt-call -l info --timeout=120 test.ping
+
   write_files:
    - path: /etc/network/interfaces
      content: |
diff --git a/tcp_tests/templates/cookied-mcp-queens-dvr/underlay.yaml b/tcp_tests/templates/cookied-mcp-queens-dvr/underlay.yaml
index 18ea180..f6d9b98 100644
--- a/tcp_tests/templates/cookied-mcp-queens-dvr/underlay.yaml
+++ b/tcp_tests/templates/cookied-mcp-queens-dvr/underlay.yaml
@@ -34,8 +34,6 @@
 {% set HOSTNAME_DNS01 = os_env('HOSTNAME_DNS01', 'dns01.' + DOMAIN_NAME) %}
 {% set HOSTNAME_DNS02 = os_env('HOSTNAME_DNS02', 'dns02.' + DOMAIN_NAME) %}
 {% set HOSTNAME_SHARE01 = os_env('HOSTNAME_SHARE01', 'share01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_SHARE02 = os_env('HOSTNAME_SHARE01', 'share02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_SHARE03 = os_env('HOSTNAME_SHARE01', 'share03.' + DOMAIN_NAME) %}
 
 template:
   devops_settings:
@@ -64,12 +62,10 @@
             default_{{ HOSTNAME_MTR02 }}: +87
             default_{{ HOSTNAME_MTR03 }}: +88
             default_{{ HOSTNAME_GTW01 }}: +110
-            default_{{ HOSTNAME_DNS01 }}: +111
-            default_{{ HOSTNAME_DNS02 }}: +112
+            default_{{ HOSTNAME_DNS01 }}: +113
+            default_{{ HOSTNAME_DNS02 }}: +114
             default_{{ HOSTNAME_PRX01 }}: +121
-            default_{{ HOSTNAME_SHARE01 }}: +69
-            default_{{ HOSTNAME_SHARE02 }}: +70
-            default_{{ HOSTNAME_SHARE03 }}: +71
+            default_{{ HOSTNAME_SHARE01 }}: +204
           ip_ranges:
             dhcp: [+90, -10]
 
@@ -95,12 +91,10 @@
             default_{{ HOSTNAME_MTR02 }}: +87
             default_{{ HOSTNAME_MTR03 }}: +88
             default_{{ HOSTNAME_GTW01 }}: +110
-            default_{{ HOSTNAME_DNS01 }}: +111
-            default_{{ HOSTNAME_DNS02 }}: +112
+            default_{{ HOSTNAME_DNS01 }}: +113
+            default_{{ HOSTNAME_DNS02 }}: +114
             default_{{ HOSTNAME_PRX01 }}: +121
-            default_{{ HOSTNAME_SHARE01 }}: +69
-            default_{{ HOSTNAME_SHARE02 }}: +70
-            default_{{ HOSTNAME_SHARE03 }}: +71
+            default_{{ HOSTNAME_SHARE01 }}: +204
           ip_ranges:
             dhcp: [+90, -10]
 
@@ -127,11 +121,9 @@
             default_{{ HOSTNAME_MTR03 }}: +88
             default_{{ HOSTNAME_GTW01 }}: +110
             default_{{ HOSTNAME_PRX01 }}: +121
-            default_{{ HOSTNAME_DNS01 }}: +111
-            default_{{ HOSTNAME_DNS02 }}: +112
-            default_{{ HOSTNAME_SHARE01 }}: +69
-            default_{{ HOSTNAME_SHARE02 }}: +70
-            default_{{ HOSTNAME_SHARE03 }}: +71
+            default_{{ HOSTNAME_DNS01 }}: +113
+            default_{{ HOSTNAME_DNS02 }}: +114
+            default_{{ HOSTNAME_SHARE01 }}: +204
           ip_ranges:
             dhcp: [+10, -10]
 
@@ -157,12 +149,10 @@
             default_{{ HOSTNAME_MTR02 }}: +87
             default_{{ HOSTNAME_MTR03 }}: +88
             default_{{ HOSTNAME_GTW01 }}: +110
-            default_{{ HOSTNAME_DNS01 }}: +111
-            default_{{ HOSTNAME_DNS02 }}: +112
+            default_{{ HOSTNAME_DNS01 }}: +113
+            default_{{ HOSTNAME_DNS02 }}: +114
             default_{{ HOSTNAME_PRX01 }}: +121
-            default_{{ HOSTNAME_SHARE01 }}: +69
-            default_{{ HOSTNAME_SHARE02 }}: +70
-            default_{{ HOSTNAME_SHARE03 }}: +71
+            default_{{ HOSTNAME_SHARE01 }}: +204
           ip_ranges:
             dhcp: [+130, +220]
 
@@ -274,9 +264,6 @@
                   capacity: !os_env NODE_VOLUME_SIZE, 150
                   backing_store: mcp_ubuntu_1604_image
                   format: qcow2
-                - name: cinder
-                  capacity: 50
-                  format: qcow2
                 - name: iso  # Volume with name 'iso' will be used
                              # for store image with cloud-init metadata.
                   capacity: 1
@@ -315,9 +302,6 @@
                   capacity: !os_env NODE_VOLUME_SIZE, 150
                   backing_store: mcp_ubuntu_1604_image
                   format: qcow2
-                - name: cinder
-                  capacity: 50
-                  format: qcow2
                 - name: iso  # Volume with name 'iso' will be used
                              # for store image with cloud-init metadata.
                   capacity: 1
@@ -344,9 +328,6 @@
                   capacity: !os_env NODE_VOLUME_SIZE, 150
                   backing_store: mcp_ubuntu_1604_image
                   format: qcow2
-                - name: cinder
-                  capacity: 50
-                  format: qcow2
                 - name: iso  # Volume with name 'iso' will be used
                              # for store image with cloud-init metadata.
                   capacity: 1
@@ -607,9 +588,6 @@
                   capacity: !os_env NODE_VOLUME_SIZE, 150
                   backing_store: mcp_ubuntu_1604_image
                   format: qcow2
-                - name: cinder
-                  capacity: 50
-                  format: qcow2
                 - name: iso  # Volume with name 'iso' will be used
                              # for store image with cloud-init metadata.
                   capacity: 1
@@ -637,6 +615,12 @@
                   capacity: !os_env NODE_VOLUME_SIZE, 150
                   backing_store: cloudimage1604
                   format: qcow2
+                - name: cinder
+                  capacity: 50
+                  format: qcow2
+                - name: manila
+                  capacity: 20
+                  format: qcow2
                 - name: iso  # Volume with name 'iso' will be used
                              # for store image with cloud-init metadata.
                   capacity: 1
@@ -688,6 +672,12 @@
                   capacity: !os_env NODE_VOLUME_SIZE, 150
                   backing_store: cloudimage1604
                   format: qcow2
+                - name: cinder
+                  capacity: 50
+                  format: qcow2
+                - name: manila
+                  capacity: 20
+                  format: qcow2
                 - name: iso  # Volume with name 'iso' will be used
                              # for store image with cloud-init metadata.
                   capacity: 1
@@ -752,58 +742,6 @@
               interfaces: *all_interfaces
               network_config: *all_network_config
 
-          - name: {{ HOSTNAME_SHARE02 }}
-            role: salt_minion
-            params:
-              vcpu: !os_env SLAVE_NODE_CPU, 2
-              memory: !os_env SLAVE_NODE_MEMORY, 4096
-              boot:
-                - hd
-              cloud_init_volume_name: iso
-              cloud_init_iface_up: ens3
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 150
-                  backing_store: mcp_ubuntu_1604_image
-                  format: qcow2
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-                  capacity: 1
-                  format: raw
-                  device: cdrom
-                  bus: ide
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data_1604
-
-              interfaces: *all_interfaces
-              network_config: *all_network_config
-
-          - name: {{ HOSTNAME_SHARE03 }}
-            role: salt_minion
-            params:
-              vcpu: !os_env SLAVE_NODE_CPU, 2
-              memory: !os_env SLAVE_NODE_MEMORY, 4096
-              boot:
-                - hd
-              cloud_init_volume_name: iso
-              cloud_init_iface_up: ens3
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 150
-                  backing_store: mcp_ubuntu_1604_image
-                  format: qcow2
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-                  capacity: 1
-                  format: raw
-                  device: cdrom
-                  bus: ide
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data_1604
-
-              interfaces: *all_interfaces
-              network_config: *all_network_config
-
           - name: {{ HOSTNAME_DNS01 }}
             role: salt_minion
             params:
diff --git a/tcp_tests/templates/cookied-mcp-queens-ovs/_context-cookiecutter-mcp-queens-ovs.yaml b/tcp_tests/templates/cookied-mcp-queens-ovs/_context-cookiecutter-mcp-queens-ovs.yaml
index a3707d1..18a8beb 100644
--- a/tcp_tests/templates/cookied-mcp-queens-ovs/_context-cookiecutter-mcp-queens-ovs.yaml
+++ b/tcp_tests/templates/cookied-mcp-queens-ovs/_context-cookiecutter-mcp-queens-ovs.yaml
@@ -58,7 +58,6 @@
   openstack_compute_single_address_ranges: 172.16.10.105-172.16.10.106
   openstack_compute_deploy_address_ranges: 192.168.10.105-192.168.10.106
   openstack_compute_tenant_address_ranges: 10.1.0.105-10.1.0.106
-  openstack_compute_backend_address_ranges: 10.1.0.105-10.1.0.106
   openstack_control_address: 172.16.10.100
   openstack_control_hostname: ctl
   openstack_control_node01_address: 172.16.10.101
@@ -109,9 +108,9 @@
   openstack_proxy_node02_hostname: prx02
   openstack_upgrade_node01_address: 172.16.10.19
   openstack_dns_hostname: dns
-  openstack_dns_node01_address: 172.16.10.111
+  openstack_dns_node01_address: 172.16.10.113
   openstack_dns_node01_hostname: dns01
-  openstack_dns_node02_address: 172.16.10.112
+  openstack_dns_node02_address: 172.16.10.114
   openstack_dns_node02_hostname: dns02
   openstack_version: queens
   oss_enabled: 'False'
@@ -208,15 +207,9 @@
   manila_enabled: 'True'
   manila_share_backend: 'lvm'
   manila_lvm_volume_name: 'manila-volume'
-  manila_lvm_devices: '/dev/loop1'
-  openstack_share_address: 172.16.10.68
-  openstack_share_node01_address: 172.16.10.69
-  openstack_share_node02_address: 172.16.10.70
-  openstack_share_node03_address: 172.16.10.71
-  openstack_share_node01_deploy_address: 192.168.10.69
-  openstack_share_node02_deploy_address: 192.168.10.70
-  openstack_share_node03_deploy_address: 192.168.10.71
+  manila_lvm_devices: '/dev/vdc'
+  openstack_share_address: 172.16.10.203
+  openstack_share_node01_address: 172.16.10.204
+  openstack_share_node01_deploy_address: 192.168.10.204
   openstack_share_hostname: share
   openstack_share_node01_hostname: share01
-  openstack_share_node02_hostname: share02
-  openstack_share_node03_hostname: share03
diff --git a/tcp_tests/templates/cookied-mcp-queens-ovs/_context-environment.yaml b/tcp_tests/templates/cookied-mcp-queens-ovs/_context-environment.yaml
index 2be8edf..1593d43 100644
--- a/tcp_tests/templates/cookied-mcp-queens-ovs/_context-environment.yaml
+++ b/tcp_tests/templates/cookied-mcp-queens-ovs/_context-environment.yaml
@@ -17,6 +17,7 @@
       - openstack_control_leader
       - openstack_database_leader
       - openstack_message_queue
+      - features_lvm_backend_control
       - linux_system_codename_xenial
       interfaces:
         ens3:
@@ -31,6 +32,7 @@
       - openstack_control
       - openstack_database
       - openstack_message_queue
+      - features_lvm_backend_control
       - linux_system_codename_xenial
       interfaces:
         ens3:
@@ -45,6 +47,7 @@
       - openstack_control
       - openstack_database
       - openstack_message_queue
+      - features_lvm_backend_control
       - linux_system_codename_xenial
       interfaces:
         ens3:
@@ -167,6 +170,7 @@
       reclass_storage_name: openstack_compute_rack01
       roles:
       - openstack_compute
+      - features_lvm_backend_volume_vdb
       - linux_system_codename_xenial
       interfaces:
         ens3:
@@ -225,25 +229,3 @@
           role: single_dhcp
         ens4:
           role: single_ctl
-
-    share02.mcp-queens-ovs.local:
-      reclass_storage_name: openstack_share_node02
-      roles:
-      - openstack_share
-      - linux_system_codename_xenial
-      interfaces:
-        ens3:
-          role: single_dhcp
-        ens4:
-          role: single_ctl
-
-    share03.mcp-queens-ovs.local:
-      reclass_storage_name: openstack_share_node03
-      roles:
-      - openstack_share
-      - linux_system_codename_xenial
-      interfaces:
-        ens3:
-          role: single_dhcp
-        ens4:
-          role: single_ctl
\ No newline at end of file
diff --git a/tcp_tests/templates/cookied-mcp-queens-ovs/underlay--user-data-cfg01.yaml b/tcp_tests/templates/cookied-mcp-queens-ovs/underlay--user-data-cfg01.yaml
index da7908d..48562ad 100644
--- a/tcp_tests/templates/cookied-mcp-queens-ovs/underlay--user-data-cfg01.yaml
+++ b/tcp_tests/templates/cookied-mcp-queens-ovs/underlay--user-data-cfg01.yaml
@@ -44,6 +44,13 @@
 
    - echo "nameserver 172.18.208.44" >> /etc/resolv.conf;
 
+   - mkdir -p /srv/salt/reclass/nodes
+   - systemctl enable salt-master
+   - systemctl enable salt-minion
+   - systemctl start salt-master
+   - systemctl start salt-minion
+   - salt-call -l info --timeout=120 test.ping
+
   write_files:
    - path: /etc/network/interfaces
      content: |
diff --git a/tcp_tests/templates/cookied-mcp-queens-ovs/underlay.yaml b/tcp_tests/templates/cookied-mcp-queens-ovs/underlay.yaml
index ac187d1..6ea4098 100644
--- a/tcp_tests/templates/cookied-mcp-queens-ovs/underlay.yaml
+++ b/tcp_tests/templates/cookied-mcp-queens-ovs/underlay.yaml
@@ -34,8 +34,6 @@
 {% set HOSTNAME_DNS02 = os_env('HOSTNAME_DNS02', 'dns02.' + DOMAIN_NAME) %}
 {% set HOSTNAME_PRX01 = os_env('HOSTNAME_PRX01', 'prx01.' + DOMAIN_NAME) %}
 {% set HOSTNAME_SHARE01 = os_env('HOSTNAME_SHARE01', 'share01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_SHARE02 = os_env('HOSTNAME_SHARE01', 'share02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_SHARE03 = os_env('HOSTNAME_SHARE01', 'share03.' + DOMAIN_NAME) %}
 template:
   devops_settings:
     env_name: {{ os_env('ENV_NAME', 'cookied-mcp-queens-ovs_' + REPOSITORY_SUITE + "_" + os_env('BUILD_NUMBER', '')) }}
@@ -63,12 +61,10 @@
             default_{{ HOSTNAME_MTR02 }}: +87
             default_{{ HOSTNAME_MTR03 }}: +88
             default_{{ HOSTNAME_GTW01 }}: +110
-            default_{{ HOSTNAME_DNS01 }}: +111
-            default_{{ HOSTNAME_DNS02 }}: +112
+            default_{{ HOSTNAME_DNS01 }}: +113
+            default_{{ HOSTNAME_DNS02 }}: +114
             default_{{ HOSTNAME_PRX01 }}: +121
-            default_{{ HOSTNAME_SHARE01 }}: +69
-            default_{{ HOSTNAME_SHARE02 }}: +70
-            default_{{ HOSTNAME_SHARE03 }}: +71
+            default_{{ HOSTNAME_SHARE01 }}: +204
           ip_ranges:
             dhcp: [+90, -10]
 
@@ -94,12 +90,10 @@
             default_{{ HOSTNAME_MTR02 }}: +87
             default_{{ HOSTNAME_MTR03 }}: +88
             default_{{ HOSTNAME_GTW01 }}: +110
-            default_{{ HOSTNAME_DNS01 }}: +111
-            default_{{ HOSTNAME_DNS02 }}: +112
+            default_{{ HOSTNAME_DNS01 }}: +113
+            default_{{ HOSTNAME_DNS02 }}: +114
             default_{{ HOSTNAME_PRX01 }}: +121
-            default_{{ HOSTNAME_SHARE01 }}: +69
-            default_{{ HOSTNAME_SHARE02 }}: +70
-            default_{{ HOSTNAME_SHARE03 }}: +71
+            default_{{ HOSTNAME_SHARE01 }}: +204
           ip_ranges:
             dhcp: [+90, -10]
 
@@ -125,12 +119,10 @@
             default_{{ HOSTNAME_MTR02 }}: +87
             default_{{ HOSTNAME_MTR03 }}: +88
             default_{{ HOSTNAME_GTW01 }}: +110
-            default_{{ HOSTNAME_DNS01 }}: +111
-            default_{{ HOSTNAME_DNS02 }}: +112
+            default_{{ HOSTNAME_DNS01 }}: +113
+            default_{{ HOSTNAME_DNS02 }}: +114
             default_{{ HOSTNAME_PRX01 }}: +121
-            default_{{ HOSTNAME_SHARE01 }}: +69
-            default_{{ HOSTNAME_SHARE02 }}: +70
-            default_{{ HOSTNAME_SHARE03 }}: +71
+            default_{{ HOSTNAME_SHARE01 }}: +204
           ip_ranges:
             dhcp: [+10, -10]
 
@@ -156,12 +148,10 @@
             default_{{ HOSTNAME_MTR02 }}: +87
             default_{{ HOSTNAME_MTR03 }}: +88
             default_{{ HOSTNAME_GTW01 }}: +110
-            default_{{ HOSTNAME_DNS01 }}: +111
-            default_{{ HOSTNAME_DNS02 }}: +112
+            default_{{ HOSTNAME_DNS01 }}: +113
+            default_{{ HOSTNAME_DNS02 }}: +114
             default_{{ HOSTNAME_PRX01 }}: +121
-            default_{{ HOSTNAME_SHARE01 }}: +69
-            default_{{ HOSTNAME_SHARE02 }}: +70
-            default_{{ HOSTNAME_SHARE03 }}: +71
+            default_{{ HOSTNAME_SHARE01 }}: +204
           ip_ranges:
             dhcp: [+130, +220]
 
@@ -273,9 +263,6 @@
                   capacity: !os_env NODE_VOLUME_SIZE, 150
                   backing_store: mcp_ubuntu_1604_image
                   format: qcow2
-                - name: cinder
-                  capacity: 50
-                  format: qcow2
                 - name: iso  # Volume with name 'iso' will be used
                              # for store image with cloud-init metadata.
                   capacity: 1
@@ -314,9 +301,6 @@
                   capacity: !os_env NODE_VOLUME_SIZE, 150
                   backing_store: mcp_ubuntu_1604_image
                   format: qcow2
-                - name: cinder
-                  capacity: 50
-                  format: qcow2
                 - name: iso  # Volume with name 'iso' will be used
                              # for store image with cloud-init metadata.
                   capacity: 1
@@ -343,9 +327,6 @@
                   capacity: !os_env NODE_VOLUME_SIZE, 150
                   backing_store: mcp_ubuntu_1604_image
                   format: qcow2
-                - name: cinder
-                  capacity: 50
-                  format: qcow2
                 - name: iso  # Volume with name 'iso' will be used
                              # for store image with cloud-init metadata.
                   capacity: 1
@@ -606,9 +587,6 @@
                   capacity: !os_env NODE_VOLUME_SIZE, 150
                   backing_store: mcp_ubuntu_1604_image
                   format: qcow2
-                - name: cinder
-                  capacity: 50
-                  format: qcow2
                 - name: iso  # Volume with name 'iso' will be used
                              # for store image with cloud-init metadata.
                   capacity: 1
@@ -635,6 +613,12 @@
                   capacity: !os_env NODE_VOLUME_SIZE, 150
                   backing_store: cloudimage1604
                   format: qcow2
+                - name: cinder
+                  capacity: 50
+                  format: qcow2
+                - name: manila
+                  capacity: 20
+                  format: qcow2
                 - name: iso  # Volume with name 'iso' will be used
                              # for store image with cloud-init metadata.
                   capacity: 1
@@ -686,6 +670,12 @@
                   capacity: !os_env NODE_VOLUME_SIZE, 150
                   backing_store: cloudimage1604
                   format: qcow2
+                - name: cinder
+                  capacity: 50
+                  format: qcow2
+                - name: manila
+                  capacity: 20
+                  format: qcow2
                 - name: iso  # Volume with name 'iso' will be used
                              # for store image with cloud-init metadata.
                   capacity: 1
@@ -801,55 +791,3 @@
 
               interfaces: *all_interfaces
               network_config: *all_network_config
-
-          - name: {{ HOSTNAME_SHARE02 }}
-            role: salt_minion
-            params:
-              vcpu: !os_env SLAVE_NODE_CPU, 2
-              memory: !os_env SLAVE_NODE_MEMORY, 4096
-              boot:
-                - hd
-              cloud_init_volume_name: iso
-              cloud_init_iface_up: ens3
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 150
-                  backing_store: mcp_ubuntu_1604_image
-                  format: qcow2
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-                  capacity: 1
-                  format: raw
-                  device: cdrom
-                  bus: ide
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data_1604
-
-              interfaces: *all_interfaces
-              network_config: *all_network_config
-
-          - name: {{ HOSTNAME_SHARE03 }}
-            role: salt_minion
-            params:
-              vcpu: !os_env SLAVE_NODE_CPU, 2
-              memory: !os_env SLAVE_NODE_MEMORY, 4096
-              boot:
-                - hd
-              cloud_init_volume_name: iso
-              cloud_init_iface_up: ens3
-              volumes:
-                - name: system
-                  capacity: !os_env NODE_VOLUME_SIZE, 150
-                  backing_store: mcp_ubuntu_1604_image
-                  format: qcow2
-                - name: iso  # Volume with name 'iso' will be used
-                             # for store image with cloud-init metadata.
-                  capacity: 1
-                  format: raw
-                  device: cdrom
-                  bus: ide
-                  cloudinit_meta_data: *cloudinit_meta_data
-                  cloudinit_user_data: *cloudinit_user_data_1604
-
-              interfaces: *all_interfaces
-              network_config: *all_network_config
diff --git a/tcp_tests/templates/cookied-model-generator/salt_cookied-bm-mcp-dvr-vxlan.yaml b/tcp_tests/templates/cookied-model-generator/salt_cookied-bm-mcp-dvr-vxlan.yaml
index c5d17cf..eb26bae 100644
--- a/tcp_tests/templates/cookied-model-generator/salt_cookied-bm-mcp-dvr-vxlan.yaml
+++ b/tcp_tests/templates/cookied-model-generator/salt_cookied-bm-mcp-dvr-vxlan.yaml
@@ -29,6 +29,14 @@
   retry: {count: 1, delay: 5}
   skip_fail: true
 
+- description: Temporary workaround for removing virtual gtw nodes
+  cmd: |
+    sed -i 's/\-\ system\.salt\.control\.sizes\.ovs\.compact//g' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/kvm.yml;
+    sed -i 's/\-\ system\.salt\.control\.placement\.ovs\.compact//g' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/kvm.yml;
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: true
+
 - description: Temporary WR for correct bridge name according to envoronment templates
   cmd: |
     sed -i 's/br\-ctl/br\_ctl/g' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/kvm.yml;
@@ -39,5 +47,15 @@
   retry: {count: 1, delay: 10}
   skip_fail: false
 
+- description: "WR for PROD-24311"
+  cmd: |
+    set -e;
+    . /root/venv-reclass-tools/bin/activate;
+    reclass-tools add-key parameters._param.salt_control_xenial_image 'https://apt.mcp.mirantis.net/images/ubuntu-16-04-x64-mcp{{ SHARED.REPOSITORY_SUITE }}.qcow2' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/init.yml;
+    reclass-tools add-key parameters._param.salt_control_trusty_image 'https://apt.mcp.mirantis.net/images/ubuntu-14-04-x64-mcp{{ SHARED.REPOSITORY_SUITE }}.qcow2' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/init.yml;
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+
 {{ SHARED.MACRO_GENERATE_INVENTORY(RERUN_SALTMASTER_STATE=true) }}
 
diff --git a/tcp_tests/templates/cookied-model-generator/salt_cookied-bm-mcp-ocata-contrail.yaml b/tcp_tests/templates/cookied-model-generator/salt_cookied-bm-mcp-ocata-contrail.yaml
index 62da3ec..9970edd 100644
--- a/tcp_tests/templates/cookied-model-generator/salt_cookied-bm-mcp-ocata-contrail.yaml
+++ b/tcp_tests/templates/cookied-model-generator/salt_cookied-bm-mcp-ocata-contrail.yaml
@@ -32,12 +32,12 @@
     set -e;
     . /root/venv-reclass-tools/bin/activate;
     # Remove rack01 key
-    reclass-tools del-key parameters.reclass.storage.node.openstack_compute_rack01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+    reclass-tools del-key parameters.reclass.storage.node.openstack_compute_rack01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
     # Add openstack_compute_node definition from system
-    reclass-tools add-key 'classes' 'system.reclass.storage.system.openstack_compute_multi' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml --merge;
+    reclass-tools add-key 'classes' 'system.reclass.storage.system.openstack_compute_multi' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml --merge;
     # Workaround for compute nodes addresses
-    reclass-tools add-key parameters._param.openstack_compute_node01_address '${_param:openstack_compute_rack01_single_subnet}'.105 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    reclass-tools add-key parameters._param.openstack_compute_node02_address '${_param:openstack_compute_rack01_single_subnet}'.106 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+    reclass-tools add-key parameters._param.openstack_compute_node01_address '${_param:openstack_compute_rack01_single_subnet}'.105 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    reclass-tools add-key parameters._param.openstack_compute_node02_address '${_param:openstack_compute_rack01_single_subnet}'.106 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
   skip_fail: false
diff --git a/tcp_tests/templates/cookied-model-generator/salt_cookied-cicd-pike-dpdk.yaml b/tcp_tests/templates/cookied-model-generator/salt_cookied-cicd-pike-dpdk.yaml
index 6d36cfd..e31a230 100644
--- a/tcp_tests/templates/cookied-model-generator/salt_cookied-cicd-pike-dpdk.yaml
+++ b/tcp_tests/templates/cookied-model-generator/salt_cookied-cicd-pike-dpdk.yaml
@@ -21,23 +21,31 @@
 - description: "Workaround for combined roles: remove unnecessary classes"
   cmd: |
     set -e;
-    sed -i '/system.reclass.storage.system.stacklight_telemetry_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    sed -i '/system.reclass.storage.system.stacklight_log_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+    sed -i '/system.reclass.storage.system.stacklight_telemetry_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    sed -i '/system.reclass.storage.system.stacklight_log_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
 
     # set wider cpu mask for DPDK
-    salt-call reclass.cluster_meta_set name='compute_ovs_dpdk_lcore_mask' value='"0xF"' file_name=/srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/init.yml
+    salt-call reclass.cluster_meta_set name='compute_ovs_dpdk_lcore_mask' value='"0x41"' file_name=/srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/init.yml
+    salt-call reclass.cluster_meta_set name='compute_ovs_pmd_cpu_mask' value='"0xe"' file_name=/srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/init.yml
+    salt-call reclass.cluster_meta_set name='compute_ovs_dpdk_socket_mem' value='"512,512"' file_name=/srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/init.yml
     salt-call reclass.cluster_meta_set name='compute_hugepages_size' value='"2M"' file_name=/srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/init.yml
     salt-call reclass.cluster_meta_set name='compute_hugepages_mount' value='"/mnt/hugepages_2M"' file_name=/srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/init.yml
-    # set virtual disks for compute
-    sed -i 's/cinder_lvm_devices: \[ "\/dev\/sdb" \]/cinder_lvm_devices: \[ "\/dev\/vdb" \]/g' /srv/salt/reclass/classes/environment/{{ SHARED.CLUSTER_NAME }}/features/lvm_backend/init.yml
 
     . /root/venv-reclass-tools/bin/activate;
-    reclass-tools del-key parameters.reclass.storage.node.stacklight_log_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+    reclass-tools del-key parameters.reclass.storage.node.stacklight_log_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
 
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 5}
   skip_fail: false
 
+- description: "Temporary workaround: remove cinder-volume from CTL nodes"
+  cmd: |
+    sed -i 's/\-\ system\.cinder\.volume\.single//g' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/control.yml;
+    sed -i 's/\-\ system\.cinder\.volume\.notification\.messagingv2//g' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/control.yml;
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: true
+
 - description: "Workaround for combined roles: remove unnecessary classes"
   cmd: |
     cat << 'EOF' >> /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/compute/dpdk.yml
diff --git a/tcp_tests/templates/cookied-model-generator/salt_cookied-cicd-pike-dvr-sl.yaml b/tcp_tests/templates/cookied-model-generator/salt_cookied-cicd-pike-dvr-sl.yaml
index 2b2a2e3..55d6a8b 100644
--- a/tcp_tests/templates/cookied-model-generator/salt_cookied-cicd-pike-dvr-sl.yaml
+++ b/tcp_tests/templates/cookied-model-generator/salt_cookied-cicd-pike-dvr-sl.yaml
@@ -21,10 +21,10 @@
 - description: "Workaround for combined roles: remove unnecessary classes"
   cmd: |
     set -e;
-    sed -i '/system.reclass.storage.system.openstack_database_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    sed -i '/system.reclass.storage.system.openstack_message_queue_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    # sed -i '/system.reclass.storage.system.stacklight_telemetry_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    # sed -i '/system.reclass.storage.system.stacklight_log_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+    sed -i '/system.reclass.storage.system.openstack_database_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    sed -i '/system.reclass.storage.system.openstack_message_queue_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    # sed -i '/system.reclass.storage.system.stacklight_telemetry_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    # sed -i '/system.reclass.storage.system.stacklight_log_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
 
     # salt-call reclass.cluster_meta_set name='openstack_dns_node01_address' value='{{ SHARED.IPV4_NET_CONTROL_PREFIX }}.111' file_name=/srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/init.yml
     # salt-call reclass.cluster_meta_set name='openstack_dns_node02_address' value='{{ SHARED.IPV4_NET_CONTROL_PREFIX }}.112' file_name=/srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/init.yml
@@ -32,25 +32,24 @@
     # salt-call reclass.cluster_meta_set name='salt_master_host' value='${_param:infra_config_deploy_address}' file_name=/srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/init.yml
 
     . /root/venv-reclass-tools/bin/activate;
-    reclass-tools del-key parameters.reclass.storage.node.openstack_database_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    reclass-tools del-key parameters.reclass.storage.node.openstack_database_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    reclass-tools del-key parameters.reclass.storage.node.openstack_database_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    # reclass-tools del-key parameters.reclass.storage.node.stacklight_log_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+    reclass-tools del-key parameters.reclass.storage.node.openstack_database_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    reclass-tools del-key parameters.reclass.storage.node.openstack_database_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    reclass-tools del-key parameters.reclass.storage.node.openstack_database_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    # reclass-tools del-key parameters.reclass.storage.node.stacklight_log_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
 
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 5}
   skip_fail: false
 
-- description: Temporary workaround for removing cinder-volume from CTL nodes
+- description: "Temporary workaround: remove cinder-volume from CTL nodes"
   cmd: |
     sed -i 's/\-\ system\.cinder\.volume\.single//g' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/control.yml;
     sed -i 's/\-\ system\.cinder\.volume\.notification\.messagingv2//g' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/control.yml;
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 5}
   skip_fail: true
-  
 
 {{ SHARED.MACRO_GENERATE_INVENTORY(RERUN_SALTMASTER_STATE=true) }}
diff --git a/tcp_tests/templates/cookied-model-generator/salt_cookied-cicd-pike-ovs-sl.yaml b/tcp_tests/templates/cookied-model-generator/salt_cookied-cicd-pike-ovs-sl.yaml
index d189cce..c9961c2 100644
--- a/tcp_tests/templates/cookied-model-generator/salt_cookied-cicd-pike-ovs-sl.yaml
+++ b/tcp_tests/templates/cookied-model-generator/salt_cookied-cicd-pike-ovs-sl.yaml
@@ -21,26 +21,34 @@
 - description: "Workaround for combined roles: remove unnecessary classes"
   cmd: |
     set -e;
-    sed -i '/system.reclass.storage.system.openstack_database_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    sed -i '/system.reclass.storage.system.openstack_message_queue_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    # sed -i '/system.reclass.storage.system.stacklight_telemetry_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    # sed -i '/system.reclass.storage.system.stacklight_log_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+    sed -i '/system.reclass.storage.system.openstack_database_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    sed -i '/system.reclass.storage.system.openstack_message_queue_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    # sed -i '/system.reclass.storage.system.stacklight_telemetry_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    # sed -i '/system.reclass.storage.system.stacklight_log_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
 
     # Bind9 services are placed on the first two ctl nodes
     # salt-call reclass.cluster_meta_set name='openstack_dns_node01_address' value='${_param:openstack_control_node01_address}' file_name=/srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/init.yml
     # salt-call reclass.cluster_meta_set name='openstack_dns_node02_address' value='${_param:openstack_control_node02_address}' file_name=/srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/init.yml
 
     . /root/venv-reclass-tools/bin/activate;
-    reclass-tools del-key parameters.reclass.storage.node.openstack_database_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    reclass-tools del-key parameters.reclass.storage.node.openstack_database_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    reclass-tools del-key parameters.reclass.storage.node.openstack_database_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    # reclass-tools del-key parameters.reclass.storage.node.stacklight_log_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+    reclass-tools del-key parameters.reclass.storage.node.openstack_database_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    reclass-tools del-key parameters.reclass.storage.node.openstack_database_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    reclass-tools del-key parameters.reclass.storage.node.openstack_database_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    # reclass-tools del-key parameters.reclass.storage.node.stacklight_log_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
 
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 5}
   skip_fail: false
 
+- description: "Temporary workaround: remove cinder-volume from CTL nodes"
+  cmd: |
+    sed -i 's/\-\ system\.cinder\.volume\.single//g' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/control.yml;
+    sed -i 's/\-\ system\.cinder\.volume\.notification\.messagingv2//g' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/control.yml;
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: true
+
 {{ SHARED.MACRO_GENERATE_INVENTORY(RERUN_SALTMASTER_STATE=true) }}
diff --git a/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-mitaka-dvr.yaml b/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-mitaka-dvr.yaml
index 8a1031a..094cf0b 100644
--- a/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-mitaka-dvr.yaml
+++ b/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-mitaka-dvr.yaml
@@ -57,11 +57,6 @@
     reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
     reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
     reclass-tools del-key parameters.reclass.storage.node.stacklight_log_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    # Add cinder volume on cmp nodes. PROD-20945
-    reclass-tools add-key 'classes' 'system.cinder.volume.single' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/compute/init.yml --merge ;
-    reclass-tools add-key 'classes' 'system.cinder.volume.backend.lvm' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/compute/init.yml --merge ;
-    reclass-tools add-key 'classes' 'system.linux.storage.loopback' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/compute/init.yml --merge ;
-    reclass-tools add-key parameters._param.loopback_device_size '20' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/init.yml;
 
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 5}
diff --git a/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-mitaka-ovs.yaml b/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-mitaka-ovs.yaml
index 91eafe9..d0f96f8 100644
--- a/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-mitaka-ovs.yaml
+++ b/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-mitaka-ovs.yaml
@@ -55,11 +55,6 @@
     reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
     reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
     reclass-tools del-key parameters.reclass.storage.node.stacklight_log_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    # Add cinder volume on cmp nodes. PROD-20945
-    reclass-tools add-key 'classes' 'system.cinder.volume.single' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/compute/init.yml --merge ;
-    reclass-tools add-key 'classes' 'system.cinder.volume.backend.lvm' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/compute/init.yml --merge ;
-    reclass-tools add-key 'classes' 'system.linux.storage.loopback' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/compute/init.yml --merge ;
-    reclass-tools add-key parameters._param.loopback_device_size '20' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/init.yml;
 
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 5}
diff --git a/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-newton-dvr.yaml b/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-newton-dvr.yaml
index 3a0f213..439071c 100644
--- a/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-newton-dvr.yaml
+++ b/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-newton-dvr.yaml
@@ -47,11 +47,6 @@
     reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
     reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
     reclass-tools del-key parameters.reclass.storage.node.stacklight_log_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    # Add cinder volume on cmp nodes. PROD-20945
-    reclass-tools add-key 'classes' 'system.cinder.volume.single' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/compute/init.yml --merge ;
-    reclass-tools add-key 'classes' 'system.cinder.volume.backend.lvm' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/compute/init.yml --merge ;
-    reclass-tools add-key 'classes' 'system.linux.storage.loopback' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/compute/init.yml --merge ;
-    reclass-tools add-key parameters._param.loopback_device_size '20' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/init.yml;
 
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 5}
diff --git a/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-newton-ovs.yaml b/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-newton-ovs.yaml
index a9da336..92b9782 100644
--- a/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-newton-ovs.yaml
+++ b/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-newton-ovs.yaml
@@ -46,11 +46,6 @@
     reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
     reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
     reclass-tools del-key parameters.reclass.storage.node.stacklight_log_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    # Add cinder volume on cmp nodes. PROD-20945
-    reclass-tools add-key 'classes' 'system.cinder.volume.single' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/compute/init.yml --merge ;
-    reclass-tools add-key 'classes' 'system.cinder.volume.backend.lvm' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/compute/init.yml --merge ;
-    reclass-tools add-key 'classes' 'system.linux.storage.loopback' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/compute/init.yml --merge ;
-    reclass-tools add-key parameters._param.loopback_device_size '20' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/init.yml;
 
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 5}
diff --git a/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-ocata-dvr.yaml b/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-ocata-dvr.yaml
index b92f638..ef98b6e 100644
--- a/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-ocata-dvr.yaml
+++ b/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-ocata-dvr.yaml
@@ -47,11 +47,6 @@
     reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
     reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
     reclass-tools del-key parameters.reclass.storage.node.stacklight_log_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    # Add cinder volume on cmp nodes. PROD-20945
-    reclass-tools add-key 'classes' 'system.cinder.volume.single' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/compute/init.yml --merge ;
-    reclass-tools add-key 'classes' 'system.cinder.volume.backend.lvm' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/compute/init.yml --merge ;
-    reclass-tools add-key 'classes' 'system.linux.storage.loopback' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/compute/init.yml --merge ;
-    reclass-tools add-key parameters._param.loopback_device_size '20' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/init.yml;
 
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 5}
diff --git a/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-ocata-ovs.yaml b/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-ocata-ovs.yaml
index 8a984f8..f21b46a 100644
--- a/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-ocata-ovs.yaml
+++ b/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-ocata-ovs.yaml
@@ -46,11 +46,6 @@
     reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
     reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
     reclass-tools del-key parameters.reclass.storage.node.stacklight_log_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    # Add cinder volume on cmp nodes. PROD-20945
-    reclass-tools add-key 'classes' 'system.cinder.volume.single' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/compute/init.yml --merge ;
-    reclass-tools add-key 'classes' 'system.cinder.volume.backend.lvm' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/compute/init.yml --merge ;
-    reclass-tools add-key 'classes' 'system.linux.storage.loopback' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/compute/init.yml --merge ;
-    reclass-tools add-key parameters._param.loopback_device_size '20' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/init.yml;
 
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 5}
diff --git a/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-pike-dpdk.yaml b/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-pike-dpdk.yaml
index 4bae834..b59248a 100644
--- a/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-pike-dpdk.yaml
+++ b/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-pike-dpdk.yaml
@@ -21,27 +21,35 @@
 - description: "Workaround for combined roles: remove unnecessary classes"
   cmd: |
     set -e;
-    sed -i '/system.reclass.storage.system.physical_control_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    sed -i '/system.reclass.storage.system.stacklight_telemetry_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    sed -i '/system.reclass.storage.system.stacklight_log_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+    sed -i '/system.reclass.storage.system.physical_control_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    sed -i '/system.reclass.storage.system.stacklight_telemetry_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    sed -i '/system.reclass.storage.system.stacklight_log_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
 
     # set wider cpu mask for DPDK
-    salt-call reclass.cluster_meta_set name='compute_ovs_dpdk_lcore_mask' value='"0xF"' file_name=/srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/init.yml
+    salt-call reclass.cluster_meta_set name='compute_ovs_dpdk_lcore_mask' value='"0x41"' file_name=/srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/init.yml
+    salt-call reclass.cluster_meta_set name='compute_ovs_pmd_cpu_mask' value='"0xe"' file_name=/srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/init.yml
+    salt-call reclass.cluster_meta_set name='compute_ovs_dpdk_socket_mem' value='"512,512"' file_name=/srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/init.yml
     salt-call reclass.cluster_meta_set name='compute_hugepages_size' value='"2M"' file_name=/srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/init.yml
     salt-call reclass.cluster_meta_set name='compute_hugepages_mount' value='"/mnt/hugepages_2M"' file_name=/srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/init.yml
-    # set virtual disks for compute
-    sed -i 's/cinder_lvm_devices: \[ "\/dev\/sdb" \]/cinder_lvm_devices: \[ "\/dev\/vdb" \]/g' /srv/salt/reclass/classes/environment/{{ SHARED.CLUSTER_NAME }}/features/lvm_backend/init.yml
 
     . /root/venv-reclass-tools/bin/activate;
-    reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    reclass-tools del-key parameters.reclass.storage.node.stacklight_log_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+    reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    reclass-tools del-key parameters.reclass.storage.node.stacklight_log_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
 
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 5}
   skip_fail: false
 
+- description: Temporary workaround for removing cinder-volume from CTL nodes
+  cmd: |
+    sed -i 's/\-\ system\.cinder\.volume\.single//g' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/control.yml;
+    sed -i 's/\-\ system\.cinder\.volume\.notification\.messagingv2//g' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/control.yml;
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: true
+
 - description: "Workaround for combined roles: remove unnecessary classes"
   cmd: |
     cat << 'EOF' >> /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/compute/dpdk.yml
@@ -55,4 +63,13 @@
   retry: {count: 1, delay: 5}
   skip_fail: false
 
+- description: Temporary workaround for removing cinder-volume from CTL nodes
+  cmd: |
+    sed -i 's/\-\ system\.cinder\.volume\.single//g' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/control.yml;
+    sed -i 's/\-\ system\.cinder\.volume\.notification\.messagingv2//g' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/control.yml;
+
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: true
+
 {{ SHARED.MACRO_GENERATE_INVENTORY(RERUN_SALTMASTER_STATE=true) }}
diff --git a/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-pike-dvr-ceph.yaml b/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-pike-dvr-ceph.yaml
index a726b96..d4377b7 100644
--- a/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-pike-dvr-ceph.yaml
+++ b/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-pike-dvr-ceph.yaml
@@ -20,21 +20,21 @@
 - description: "Workaround for combined roles: remove unnecessary classes"
   cmd: |
     set -e;
-    sed -i '/system.reclass.storage.system.physical_control_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    sed -i '/system.reclass.storage.system.openstack_database_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    sed -i '/system.reclass.storage.system.openstack_message_queue_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+    sed -i '/system.reclass.storage.system.physical_control_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    sed -i '/system.reclass.storage.system.openstack_database_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    sed -i '/system.reclass.storage.system.openstack_message_queue_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
     sed -i '/system.salt.control.placement.openstack.compact/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/kvm.yml;
 
     . /root/venv-reclass-tools/bin/activate;
-    reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    reclass-tools del-key parameters.reclass.storage.node.openstack_database_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    reclass-tools del-key parameters.reclass.storage.node.openstack_database_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    reclass-tools del-key parameters.reclass.storage.node.openstack_database_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+    reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    reclass-tools del-key parameters.reclass.storage.node.openstack_database_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    reclass-tools del-key parameters.reclass.storage.node.openstack_database_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    reclass-tools del-key parameters.reclass.storage.node.openstack_database_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
 
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 5}
diff --git a/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-pike-dvr.yaml b/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-pike-dvr.yaml
index 31beafa..2f19cd5 100644
--- a/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-pike-dvr.yaml
+++ b/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-pike-dvr.yaml
@@ -21,38 +21,24 @@
 - description: "Workaround for combined roles: remove unnecessary classes"
   cmd: |
     set -e;
-    sed -i '/system.reclass.storage.system.physical_control_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    sed -i '/system.reclass.storage.system.openstack_database_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    sed -i '/system.reclass.storage.system.openstack_message_queue_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    # sed -i '/system.reclass.storage.system.stacklight_telemetry_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    # sed -i '/system.reclass.storage.system.stacklight_log_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+    sed -i '/system.reclass.storage.system.physical_control_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    sed -i '/system.reclass.storage.system.openstack_database_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    sed -i '/system.reclass.storage.system.openstack_message_queue_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    # sed -i '/system.reclass.storage.system.stacklight_telemetry_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    # sed -i '/system.reclass.storage.system.stacklight_log_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
     sed -i '/system.salt.control.placement.openstack.compact/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/kvm.yml;
 
-    # salt-call reclass.cluster_meta_set name='openstack_dns_node01_address' value='{{ SHARED.IPV4_NET_CONTROL_PREFIX }}.111' file_name=/srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/init.yml
-    # salt-call reclass.cluster_meta_set name='openstack_dns_node02_address' value='{{ SHARED.IPV4_NET_CONTROL_PREFIX }}.112' file_name=/srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/init.yml
-    # Workaround of missing reclass.system for dns role
-    # salt-call reclass.cluster_meta_set name='salt_master_host' value='${_param:infra_config_deploy_address}' file_name=/srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/init.yml
-
     . /root/venv-reclass-tools/bin/activate;
-    reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    reclass-tools del-key parameters.reclass.storage.node.openstack_database_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    reclass-tools del-key parameters.reclass.storage.node.openstack_database_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    reclass-tools del-key parameters.reclass.storage.node.openstack_database_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    # reclass-tools del-key parameters.reclass.storage.node.stacklight_log_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    # Add cinder volume on cmp nodes. PROD-20945
-    reclass-tools add-key 'classes' 'system.cinder.volume.single' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/compute/init.yml --merge ;
-    reclass-tools add-key 'classes' 'system.cinder.volume.backend.lvm' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/compute/init.yml --merge ;
-    reclass-tools add-key 'classes' 'system.linux.storage.loopback' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/compute/init.yml --merge ;
-    reclass-tools add-key parameters._param.loopback_device_size '20' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/init.yml;
-    # Add for manila
-    reclass-tools add-key parameters._param.loopback_device1_size '20' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/init.yml;
-    reclass-tools add-key 'classes' 'system.linux.storage.loopback_manila' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/compute/init.yml --merge ;
-
+    reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    reclass-tools del-key parameters.reclass.storage.node.openstack_database_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    reclass-tools del-key parameters.reclass.storage.node.openstack_database_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    reclass-tools del-key parameters.reclass.storage.node.openstack_database_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    # reclass-tools del-key parameters.reclass.storage.node.stacklight_log_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 5}
   skip_fail: false
diff --git a/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-pike-ovs.yaml b/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-pike-ovs.yaml
index afbbab8..ed3a6c9 100644
--- a/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-pike-ovs.yaml
+++ b/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-pike-ovs.yaml
@@ -21,37 +21,24 @@
 - description: "Workaround for combined roles: remove unnecessary classes"
   cmd: |
     set -e;
-    sed -i '/system.reclass.storage.system.physical_control_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    sed -i '/system.reclass.storage.system.openstack_database_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    sed -i '/system.reclass.storage.system.openstack_message_queue_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    # sed -i '/system.reclass.storage.system.stacklight_telemetry_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+    sed -i '/system.reclass.storage.system.physical_control_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    sed -i '/system.reclass.storage.system.openstack_database_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    sed -i '/system.reclass.storage.system.openstack_message_queue_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    # sed -i '/system.reclass.storage.system.stacklight_telemetry_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
     sed -i '/system.salt.control.placement.openstack.compact/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/kvm.yml;
-    # sed -i '/system.reclass.storage.system.stacklight_log_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-
-    # Bind9 services are placed on the first two ctl nodes
-    # salt-call reclass.cluster_meta_set name='openstack_dns_node01_address' value='${_param:openstack_control_node01_address}' file_name=/srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/init.yml
-    # salt-call reclass.cluster_meta_set name='openstack_dns_node02_address' value='${_param:openstack_control_node02_address}' file_name=/srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/init.yml
+    # sed -i '/system.reclass.storage.system.stacklight_log_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
 
     . /root/venv-reclass-tools/bin/activate;
-    reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    reclass-tools del-key parameters.reclass.storage.node.openstack_database_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    reclass-tools del-key parameters.reclass.storage.node.openstack_database_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    reclass-tools del-key parameters.reclass.storage.node.openstack_database_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    # reclass-tools del-key parameters.reclass.storage.node.stacklight_log_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    # Add cinder volume on cmp nodes. PROD-20945
-    reclass-tools add-key 'classes' 'system.cinder.volume.single' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/compute/init.yml --merge ;
-    reclass-tools add-key 'classes' 'system.cinder.volume.backend.lvm' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/compute/init.yml --merge ;
-    reclass-tools add-key 'classes' 'system.linux.storage.loopback' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/compute/init.yml --merge ;
-    reclass-tools add-key parameters._param.loopback_device_size '20' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/init.yml;
-
-    # Add for manila
-    reclass-tools add-key parameters._param.loopback_device1_size '20' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/init.yml;
-    reclass-tools add-key 'classes' 'system.linux.storage.loopback_manila' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/compute/init.yml --merge ;
+    reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    reclass-tools del-key parameters.reclass.storage.node.openstack_database_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    reclass-tools del-key parameters.reclass.storage.node.openstack_database_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    reclass-tools del-key parameters.reclass.storage.node.openstack_database_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    # reclass-tools del-key parameters.reclass.storage.node.stacklight_log_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
 
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 5}
diff --git a/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-queens-dvr-ceph.yaml b/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-queens-dvr-ceph.yaml
index b7f1c59..657e7c2 100644
--- a/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-queens-dvr-ceph.yaml
+++ b/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-queens-dvr-ceph.yaml
@@ -20,21 +20,21 @@
 - description: "Workaround for combined roles: remove unnecessary classes"
   cmd: |
     set -e;
-    sed -i '/system.reclass.storage.system.physical_control_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    sed -i '/system.reclass.storage.system.openstack_database_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    sed -i '/system.reclass.storage.system.openstack_message_queue_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+    sed -i '/system.reclass.storage.system.physical_control_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    sed -i '/system.reclass.storage.system.openstack_database_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    sed -i '/system.reclass.storage.system.openstack_message_queue_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
     sed -i '/system.salt.control.placement.openstack.compact/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/kvm.yml;
 
     . /root/venv-reclass-tools/bin/activate;
-    reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    reclass-tools del-key parameters.reclass.storage.node.openstack_database_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    reclass-tools del-key parameters.reclass.storage.node.openstack_database_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    reclass-tools del-key parameters.reclass.storage.node.openstack_database_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+    reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    reclass-tools del-key parameters.reclass.storage.node.openstack_database_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    reclass-tools del-key parameters.reclass.storage.node.openstack_database_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    reclass-tools del-key parameters.reclass.storage.node.openstack_database_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
 
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 5}
diff --git a/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-queens-dvr-ssl.yaml b/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-queens-dvr-ssl.yaml
index 7457eeb..5ed474e 100644
--- a/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-queens-dvr-ssl.yaml
+++ b/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-queens-dvr-ssl.yaml
@@ -21,29 +21,20 @@
 - description: "Workaround for combined roles: remove unnecessary classes"
   cmd: |
     set -e;
-    sed -i '/system.reclass.storage.system.physical_control_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    sed -i '/system.reclass.storage.system.openstack_database_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    sed -i '/system.reclass.storage.system.openstack_message_queue_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+    sed -i '/system.reclass.storage.system.physical_control_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    sed -i '/system.reclass.storage.system.openstack_database_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    sed -i '/system.reclass.storage.system.openstack_message_queue_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
     sed -i '/system.salt.control.placement.openstack.compact/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/kvm.yml;
     . /root/venv-reclass-tools/bin/activate;
-    reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    reclass-tools del-key parameters.reclass.storage.node.openstack_database_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    reclass-tools del-key parameters.reclass.storage.node.openstack_database_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    reclass-tools del-key parameters.reclass.storage.node.openstack_database_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    # Add cinder volume on cmp nodes. PROD-20945
-    reclass-tools add-key 'classes' 'system.cinder.volume.single' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/compute/init.yml --merge ;
-    reclass-tools add-key 'classes' 'system.cinder.volume.backend.lvm' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/compute/init.yml --merge ;
-    reclass-tools add-key 'classes' 'system.linux.storage.loopback' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/compute/init.yml --merge ;
-    reclass-tools add-key parameters._param.loopback_device_size '20' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/init.yml;
-    # Add for manila
-    reclass-tools add-key parameters._param.loopback_device1_size '20' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/init.yml;
-    reclass-tools add-key 'classes' 'system.linux.storage.loopback_manila' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/compute/init.yml --merge ;
-
+    reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    reclass-tools del-key parameters.reclass.storage.node.openstack_database_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    reclass-tools del-key parameters.reclass.storage.node.openstack_database_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    reclass-tools del-key parameters.reclass.storage.node.openstack_database_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 5}
   skip_fail: false
diff --git a/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-queens-dvr.yaml b/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-queens-dvr.yaml
index 4732a9a..1e50429 100644
--- a/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-queens-dvr.yaml
+++ b/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-queens-dvr.yaml
@@ -21,29 +21,20 @@
 - description: "Workaround for combined roles: remove unnecessary classes"
   cmd: |
     set -e;
-    sed -i '/system.reclass.storage.system.physical_control_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    sed -i '/system.reclass.storage.system.openstack_database_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    sed -i '/system.reclass.storage.system.openstack_message_queue_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+    sed -i '/system.reclass.storage.system.physical_control_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    sed -i '/system.reclass.storage.system.openstack_database_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    sed -i '/system.reclass.storage.system.openstack_message_queue_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
     sed -i '/system.salt.control.placement.openstack.compact/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/kvm.yml;
     . /root/venv-reclass-tools/bin/activate;
-    reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    reclass-tools del-key parameters.reclass.storage.node.openstack_database_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    reclass-tools del-key parameters.reclass.storage.node.openstack_database_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    reclass-tools del-key parameters.reclass.storage.node.openstack_database_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    # Add cinder volume on cmp nodes. PROD-20945
-    reclass-tools add-key 'classes' 'system.cinder.volume.single' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/compute/init.yml --merge ;
-    reclass-tools add-key 'classes' 'system.cinder.volume.backend.lvm' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/compute/init.yml --merge ;
-    reclass-tools add-key 'classes' 'system.linux.storage.loopback' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/compute/init.yml --merge ;
-    reclass-tools add-key parameters._param.loopback_device_size '20' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/init.yml;
-    # Add for manila
-    reclass-tools add-key parameters._param.loopback_device1_size '20' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/init.yml;
-    reclass-tools add-key 'classes' 'system.linux.storage.loopback_manila' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/compute/init.yml --merge ;
-
+    reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    reclass-tools del-key parameters.reclass.storage.node.openstack_database_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    reclass-tools del-key parameters.reclass.storage.node.openstack_database_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    reclass-tools del-key parameters.reclass.storage.node.openstack_database_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 5}
   skip_fail: false
diff --git a/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-queens-ovs.yaml b/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-queens-ovs.yaml
index 6434349..7e2d2de 100644
--- a/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-queens-ovs.yaml
+++ b/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-queens-ovs.yaml
@@ -21,29 +21,20 @@
 - description: "Workaround for combined roles: remove unnecessary classes"
   cmd: |
     set -e;
-    sed -i '/system.reclass.storage.system.physical_control_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    sed -i '/system.reclass.storage.system.openstack_database_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    sed -i '/system.reclass.storage.system.openstack_message_queue_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+    sed -i '/system.reclass.storage.system.physical_control_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    sed -i '/system.reclass.storage.system.openstack_database_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    sed -i '/system.reclass.storage.system.openstack_message_queue_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
     sed -i '/system.salt.control.placement.openstack.compact/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/kvm.yml;
     . /root/venv-reclass-tools/bin/activate;
-    reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    reclass-tools del-key parameters.reclass.storage.node.openstack_database_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    reclass-tools del-key parameters.reclass.storage.node.openstack_database_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    reclass-tools del-key parameters.reclass.storage.node.openstack_database_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    # Add cinder volume on cmp nodes. PROD-20945
-    reclass-tools add-key 'classes' 'system.cinder.volume.single' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/compute/init.yml --merge ;
-    reclass-tools add-key 'classes' 'system.cinder.volume.backend.lvm' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/compute/init.yml --merge ;
-    reclass-tools add-key 'classes' 'system.linux.storage.loopback' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/compute/init.yml --merge ;
-    reclass-tools add-key parameters._param.loopback_device_size '20' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/init.yml;
-    # Add for manila
-    reclass-tools add-key parameters._param.loopback_device1_size '20' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/init.yml;
-    reclass-tools add-key 'classes' 'system.linux.storage.loopback_manila' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/compute/init.yml --merge ;
-
+    reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    reclass-tools del-key parameters.reclass.storage.node.openstack_database_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    reclass-tools del-key parameters.reclass.storage.node.openstack_database_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    reclass-tools del-key parameters.reclass.storage.node.openstack_database_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 5}
   skip_fail: false
diff --git a/tcp_tests/templates/cookied-model-generator/underlay--user-data-cfg01.yaml b/tcp_tests/templates/cookied-model-generator/underlay--user-data-cfg01.yaml
index 7bd7a02..695e537 100644
--- a/tcp_tests/templates/cookied-model-generator/underlay--user-data-cfg01.yaml
+++ b/tcp_tests/templates/cookied-model-generator/underlay--user-data-cfg01.yaml
@@ -18,10 +18,6 @@
    expire: False
 
   bootcmd:
-   # Block access to SSH while node is preparing
-   #- cloud-init-per once sudo iptables -A INPUT -p tcp --dport 22 -j DROP
-   - cloud-init-per once sudo ifdown ens4
-
    # Enable root access
    - cloud-init-per once sudo sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
    - cloud-init-per once sudo service sshd restart
@@ -32,26 +28,21 @@
   runcmd:
    # Prepare network connection
    - sudo ifdown ens3
+   - sudo ifdown ens4
+   - sudo ip r d default || true  # remove existing default route to get it from dhcp
    - sudo ifup ens3
+   - sudo ifup ens4
    #- sudo route add default gw {gateway} {interface_name}
 
-   # Purge the unattended-upgrades package (Workaround for PROD-17904, PROD-18736)"
-   - echo "APT::Periodic::Update-Package-Lists 0;" > /etc/apt/apt.conf.d/99dont_update_package_list-salt
-   - echo "APT::Periodic::Download-Upgradeable-Packages 0;" > /etc/apt/apt.conf.d/99dont_update_download_upg_packages-salt
-   - echo "APT::Periodic::Unattended-Upgrade 0;" > /etc/apt/apt.conf.d/99disable_unattended_upgrade-salt
-   - apt-get -y purge unattended-upgrades
-   # Stop currently running apt-daily service, source: https://unix.stackexchange.com/a/315517
-   - systemctl stop apt-daily.service
-   - systemctl kill --kill-who=all apt-daily.service
-   - while ! (systemctl list-units --all apt-daily.service | fgrep -q dead); do sleep 1; done
-
-   # Configure dhclient
-   - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
-   - sudo resolvconf -u
-
    # Enable grub menu using updated config below
    - update-grub
 
+   - mkdir -p /srv/salt/reclass/nodes
+   - systemctl enable salt-master
+   - systemctl enable salt-minion
+   - systemctl start salt-master
+   - systemctl start salt-minion
+   - salt-call -l info --timeout=120 test.ping
    # Create swap
    #- fallocate -l 16G /swapfile
    #- chmod 600 /swapfile
@@ -62,26 +53,10 @@
    ############## TCP Cloud cfg01 node ##################
    - echo "Preparing base OS"
 
-   - echo "nameserver 172.18.208.44" >> /etc/resolv.conf;
-
-   - apt-get clean
-   - apt-get update
+   - echo "nameserver 172.18.208.44" > /etc/resolv.conf;
 
    # Ensure that the salt-master service is ready to receive requests
    - salt-key -y -D
-   - service salt-master restart
-   - service salt-minion restart
-   - apt-get install -y salt-formula-*
-   - for f in $(ls -1 /usr/share/salt-formulas/reclass/service); do ln -s /usr/share/salt-formulas/reclass/service/$f /srv/salt/reclass/classes/service/ || true; done
-   - salt-call --timeout=180 test.ping
-
-   ########################################################
-   # Node is ready, allow SSH access
-   #- echo "Allow SSH access ..."
-   #- sudo iptables -D INPUT -p tcp --dport 22 -j DROP
-   - sudo ifup ens4
-   ########################################################
-
 
   write_files:
    - path: /etc/default/grub.d/97-enable-grub-menu.cfg
diff --git a/tcp_tests/templates/k8s-ha-calico/underlay--user-data-cfg01.yaml b/tcp_tests/templates/k8s-ha-calico/underlay--user-data-cfg01.yaml
index 7f8b8ec..dc9f8cd 100644
--- a/tcp_tests/templates/k8s-ha-calico/underlay--user-data-cfg01.yaml
+++ b/tcp_tests/templates/k8s-ha-calico/underlay--user-data-cfg01.yaml
@@ -44,6 +44,13 @@
    - swapon /swapfile
    - echo "/swapfile   none    swap    defaults    0   0" >> /etc/fstab
 
+   - mkdir -p /srv/salt/reclass/nodes
+   - systemctl enable salt-master
+   - systemctl enable salt-minion
+   - systemctl start salt-master
+   - systemctl start salt-minion
+   - salt-call -l info --timeout=120 test.ping
+
   write_files:
    - path: /etc/network/interfaces
      content: |
diff --git a/tcp_tests/templates/k8s-ha-calico/underlay.yaml b/tcp_tests/templates/k8s-ha-calico/underlay.yaml
index eef7bb8..ac11a62 100644
--- a/tcp_tests/templates/k8s-ha-calico/underlay.yaml
+++ b/tcp_tests/templates/k8s-ha-calico/underlay.yaml
@@ -145,9 +145,6 @@
                   capacity: !os_env NODE_VOLUME_SIZE, 150
                   backing_store: cloudimage1604
                   format: qcow2
-                - name: cinder
-                  capacity: 50
-                  format: qcow2
                 - name: iso  # Volume with name 'iso' will be used
                              # for store image with cloud-init metadata.
                   capacity: 1
@@ -186,9 +183,6 @@
                   capacity: !os_env NODE_VOLUME_SIZE, 150
                   backing_store: cloudimage1604
                   format: qcow2
-                - name: cinder
-                  capacity: 50
-                  format: qcow2
                 - name: iso  # Volume with name 'iso' will be used
                              # for store image with cloud-init metadata.
                   capacity: 1
@@ -215,9 +209,6 @@
                   capacity: !os_env NODE_VOLUME_SIZE, 150
                   backing_store: cloudimage1604
                   format: qcow2
-                - name: cinder
-                  capacity: 50
-                  format: qcow2
                 - name: iso  # Volume with name 'iso' will be used
                              # for store image with cloud-init metadata.
                   capacity: 1
diff --git a/tcp_tests/templates/k8s-ha-contrail/underlay--user-data-cfg01.yaml b/tcp_tests/templates/k8s-ha-contrail/underlay--user-data-cfg01.yaml
index 48577ab..6076ffa 100644
--- a/tcp_tests/templates/k8s-ha-contrail/underlay--user-data-cfg01.yaml
+++ b/tcp_tests/templates/k8s-ha-contrail/underlay--user-data-cfg01.yaml
@@ -18,8 +18,6 @@
    expire: False
 
   bootcmd:
-   # Block access to SSH while node is preparing
-   - cloud-init-per once sudo iptables -A INPUT -p tcp --dport 22 -j DROP
    # Enable root access
    - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
    - service sshd restart
@@ -65,11 +63,12 @@
    # Install common packages
    - eatmydata apt-get install -y python-pip git curl tmux byobu iputils-ping traceroute htop tree
 
-   ########################################################
-   # Node is ready, allow SSH access
-   - echo "Allow SSH access ..."
-   - sudo iptables -D INPUT -p tcp --dport 22 -j DROP
-   ########################################################
+   - mkdir -p /srv/salt/reclass/nodes
+   - systemctl enable salt-master
+   - systemctl enable salt-minion
+   - systemctl start salt-master
+   - systemctl start salt-minion
+   - salt-call -l info --timeout=120 test.ping
 
   write_files:
    - path: /etc/network/interfaces
diff --git a/tcp_tests/templates/k8s-ha-contrail/underlay.yaml b/tcp_tests/templates/k8s-ha-contrail/underlay.yaml
index 2551d12..b87f888 100644
--- a/tcp_tests/templates/k8s-ha-contrail/underlay.yaml
+++ b/tcp_tests/templates/k8s-ha-contrail/underlay.yaml
@@ -200,9 +200,6 @@
                   capacity: !os_env NODE_VOLUME_SIZE, 20
                   backing_store: cloudimage1604
                   format: qcow2
-                - name: cinder
-                  capacity: 50
-                  format: qcow2
                 - name: iso  # Volume with name 'iso' will be used
                              # for store image with cloud-init metadata.
                   capacity: 1
@@ -247,9 +244,6 @@
                   capacity: !os_env NODE_VOLUME_SIZE, 20
                   backing_store: cloudimage1604
                   format: qcow2
-                - name: cinder
-                  capacity: 50
-                  format: qcow2
                 - name: iso  # Volume with name 'iso' will be used
                              # for store image with cloud-init metadata.
                   capacity: 1
@@ -276,9 +270,6 @@
                   capacity: !os_env NODE_VOLUME_SIZE, 20
                   backing_store: cloudimage1604
                   format: qcow2
-                - name: cinder
-                  capacity: 50
-                  format: qcow2
                 - name: iso  # Volume with name 'iso' will be used
                              # for store image with cloud-init metadata.
                   capacity: 1
diff --git a/tcp_tests/templates/runtest.yml b/tcp_tests/templates/runtest.yml
index cceaf8b..263dbb0 100644
--- a/tcp_tests/templates/runtest.yml
+++ b/tcp_tests/templates/runtest.yml
@@ -1,10 +1,11 @@
 classes:
 - service.runtest.tempest
+- service.runtest.tempest.services.manila.glance
 parameters:
   _param:
-    runtest_tempest_cfg_dir: /root/test/
+    runtest_tempest_cfg_dir: /tmp/test/
     runtest_tempest_cfg_name: tempest.conf
-    runtest_tempest_public_net: net04_ext
+    runtest_tempest_public_net: public
     tempest_test_target: gtw01*
   neutron:
     client:
@@ -19,25 +20,10 @@
       convert_to_uuid:
         network:
           public_network_id: ${_param:runtest_tempest_public_net}
-      network:
-          floating_network_name: ${_param:runtest_tempest_public_net}
       DEFAULT:
         log_file: tempest.log
-      heat_plugin:
-        floating_network_name: ${_param:runtest_tempest_public_net}
       compute:
-        build_timeout: 600
-        min_microversion: 2.1
-        max_microversion: 2.53
         min_compute_nodes: 2
-        volume_device_name: 'vdc'
-      dns_feature_enabled:
-        api_admin: false
-        api_v1: false
-        api_v2: true
-        api_v2_quotas: true
-        api_v2_root_recordsets: true
-        bug_1573141_fixed: true
       share:
         capability_snapshot_support: True
         run_driver_assisted_migration_tests: False
diff --git a/tcp_tests/templates/shared-openstack.yaml b/tcp_tests/templates/shared-openstack.yaml
index 0461358..ededd1d 100644
--- a/tcp_tests/templates/shared-openstack.yaml
+++ b/tcp_tests/templates/shared-openstack.yaml
@@ -290,13 +290,6 @@
   retry: {count: 1, delay: 5}
   skip_fail: false
 
-- description: Install manila-api on other nodes
-  cmd: |
-    salt -C 'I@manila:api and not *01*' state.sls manila.api;
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
 - description: Install manila-scheduler
   cmd: |
     salt -C 'I@manila:scheduler' state.sls manila.scheduler;
@@ -337,7 +330,51 @@
 {%- endmacro %}
 
 {%- macro MACRO_INSTALL_OCTAVIA_API() %}
-# TO DO
+- description: Install octavia api service on primary node
+  cmd: salt -C 'I@octavia:api:role:primary' state.sls octavia.api
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Install octavia api service
+  cmd: salt -C 'I@octavia:api' state.sls octavia.api
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+{%- endmacro %}
+
+{%- macro MACRO_INSTALL_OCTAVIA_MANAGER() %}
+- description: Update mine
+  cmd: salt -C 'I@neutron:client' mine.update && sleep 60
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Install octavia manager
+  cmd: salt -C 'I@octavia:manager' state.sls octavia.manager
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Execute octavia ca
+  cmd: salt -C 'I@octavia:manager' state.sls salt.minion.ca
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Execute octavia cert
+  cmd: salt -C 'I@octavia:manager' state.sls salt.minion.cert
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+- description: Execute octavia client
+  cmd: salt -C 'I@octavia:client' state.sls octavia.client
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
 {%- endmacro %}
 
 {%- macro MACRO_INSTALL_DOGTAG() %}
@@ -450,4 +487,4 @@
     salt --hard-crash --state-output=mixed --state-verbose=False '*' saltutil.sync_all &&
     salt --hard-crash --state-output=mixed --state-verbose=False '*' saltutil.refresh_pillar
   node_name: {{ HOSTNAME_CFG01 }}
-{%- endmacro %}
\ No newline at end of file
+{%- endmacro %}
diff --git a/tcp_tests/templates/shared-salt.yaml b/tcp_tests/templates/shared-salt.yaml
index 1f4ced5..ab5feb5 100644
--- a/tcp_tests/templates/shared-salt.yaml
+++ b/tcp_tests/templates/shared-salt.yaml
@@ -9,7 +9,9 @@
 {% set SALT_MODELS_SYSTEM_COMMIT = os_env('SALT_MODELS_SYSTEM_COMMIT','') %}
 {% set SALT_MODELS_SYSTEM_REF_CHANGE = os_env('SALT_MODELS_SYSTEM_REF_CHANGE','') %}
 {% set SALT_MODELS_SYSTEM_TAG = os_env('SALT_MODELS_SYSTEM_TAG','') %}
-{% set COOKIECUTTER_TEMPLATES_REPOSITORY = os_env('COOKIECUTTER_TEMPLATES_REPOSITORY','https://gerrit.mcp.mirantis.com/mk/cookiecutter-templates') %}
+{% set COOKIECUTTER_TEMPLATES_REPOSITORY_USER = os_env('COOKIECUTTER_TEMPLATES_REPOSITORY_USER','mcp-gerrit') %}
+{% set COOKIECUTTER_TEMPLATES_REPOSITORY_KEY_PATH = os_env('COOKIECUTTER_TEMPLATES_REPOSITORY_KEY_PATH','') %}
+{% set COOKIECUTTER_TEMPLATES_REPOSITORY = os_env('COOKIECUTTER_TEMPLATES_REPOSITORY','ssh://' + COOKIECUTTER_TEMPLATES_REPOSITORY_USER +'@gerrit.mcp.mirantis.com:29418/mk/cookiecutter-templates') %}
 {% set COOKIECUTTER_REF_CHANGE = os_env('COOKIECUTTER_REF_CHANGE','') %}
 {% set COOKIECUTTER_TAG = os_env('COOKIECUTTER_TAG','') %}
 {% set COOKIECUTTER_TEMPLATE_COMMIT = os_env('COOKIECUTTER_TEMPLATE_COMMIT','') %}
@@ -365,12 +367,23 @@
   retry: {count: 1, delay: 1}
   skip_fail: false
 
+- description: "Upload {{ COOKIECUTTER_TEMPLATES_REPOSITORY_USER }} key"
+  upload:
+    local_path: {{ COOKIECUTTER_TEMPLATES_REPOSITORY_KEY_PATH | dirname }}/
+    local_filename: {{ COOKIECUTTER_TEMPLATES_REPOSITORY_KEY_PATH | basename }}
+    remote_path: /tmp/
+  node_name: {{ HOSTNAME_CFG01 }}
+
 - description: Create cluster model from cookiecutter templates
   cmd: |
     set -e;
     set -x;
     sudo apt-get install python-setuptools -y
     pip install cookiecutter
+
+    chmod 0600 /tmp/{{ COOKIECUTTER_TEMPLATES_REPOSITORY_KEY_PATH | basename }}
+    eval $(ssh-agent)
+    ssh-add /tmp/{{ COOKIECUTTER_TEMPLATES_REPOSITORY_KEY_PATH | basename }}
     export GIT_SSL_NO_VERIFY=true; git clone {{ COOKIECUTTER_TEMPLATES_REPOSITORY }} /root/cookiecutter-templates
 
     {%- if COOKIECUTTER_REF_CHANGE != '' %}
@@ -761,6 +774,12 @@
 
 {# Prepare salt services and nodes settings #}
 
+- description: '*Workaround* of harcoded host from day01 grains'
+  cmd: salt-key -d cfg01.mcp-day01.local  -y
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 1}
+  skip_fail: true
+
 - description: Run 'linux' formula on cfg01
   cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:master' state.sls linux;
   node_name: {{ HOSTNAME_CFG01 }}
@@ -777,12 +796,6 @@
   retry: {count: 3, delay: 5}
   skip_fail: false
 
-- description: '*Workaround* of harcoded host from day01 grains'
-  cmd: salt-key -d cfg01.mcp-day01.local  -y
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 1}
-  skip_fail: true
-
 - description: '*Workaround* of the bug https://mirantis.jira.com/browse/PROD-7962'
   cmd: salt --hard-crash --state-output=mixed --state-verbose=False
     '*' cmd.run "echo '    StrictHostKeyChecking no' >> /root/.ssh/config"
diff --git a/tcp_tests/templates/virtual-mcp-ocata-ceph-offline/underlay--user-data-cfg01.yaml b/tcp_tests/templates/virtual-mcp-ocata-ceph-offline/underlay--user-data-cfg01.yaml
index 79adad5..800a0b1 100644
--- a/tcp_tests/templates/virtual-mcp-ocata-ceph-offline/underlay--user-data-cfg01.yaml
+++ b/tcp_tests/templates/virtual-mcp-ocata-ceph-offline/underlay--user-data-cfg01.yaml
@@ -18,8 +18,6 @@
    expire: False
 
   bootcmd:
-   # Block access to SSH while node is preparing
-   - cloud-init-per once sudo iptables -A INPUT -p tcp --dport 22 -j DROP
    # Enable root access
    - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
    - service sshd restart
@@ -48,15 +46,12 @@
    - swapon /swapfile
    - echo "/swapfile   none    swap    defaults    0   0" >> /etc/fstab
 
-   ############## TCP Cloud cfg01 node ##################
-   - echo "Preparing base OS"
-   - sleep 160;
-   # find /etc/apt/ -type f -exec sed -i "s/ubuntu.com/ubuntu.local.test/g" {} +;
-   ########################################################
-   # Node is ready, allow SSH access
-   - echo "Allow SSH access ..."
-   - sudo iptables -D INPUT -p tcp --dport 22 -j DROP
-   ########################################################
+   - mkdir -p /srv/salt/reclass/nodes
+   - systemctl enable salt-master
+   - systemctl enable salt-minion
+   - systemctl start salt-master
+   - systemctl start salt-minion
+   - salt-call -l info --timeout=120 test.ping
 
   write_files:
    - path: /etc/network/interfaces
diff --git a/tcp_tests/templates/virtual-mcp-ocata-dvr/_context-cookiecutter-mcp-ocata-dvr.yaml b/tcp_tests/templates/virtual-mcp-ocata-dvr/_context-cookiecutter-mcp-ocata-dvr.yaml
index 84c6c06..b860da9 100644
--- a/tcp_tests/templates/virtual-mcp-ocata-dvr/_context-cookiecutter-mcp-ocata-dvr.yaml
+++ b/tcp_tests/templates/virtual-mcp-ocata-dvr/_context-cookiecutter-mcp-ocata-dvr.yaml
@@ -54,7 +54,6 @@
   openstack_compute_single_address_ranges: 172.16.10.105-172.16.10.106
   openstack_compute_deploy_address_ranges: 192.168.10.105-192.168.10.106
   openstack_compute_tenant_address_ranges: 10.1.0.105-10.1.0.106
-  openstack_compute_backend_address_ranges: 10.1.0.105-10.1.0.106
   openstack_control_address: 172.16.10.100
   openstack_control_hostname: ctl
   openstack_control_node01_address: 172.16.10.101
diff --git a/tcp_tests/templates/virtual-mcp-ocata-dvr/_context-environment.yaml b/tcp_tests/templates/virtual-mcp-ocata-dvr/_context-environment.yaml
index 803068e..6afe16e 100644
--- a/tcp_tests/templates/virtual-mcp-ocata-dvr/_context-environment.yaml
+++ b/tcp_tests/templates/virtual-mcp-ocata-dvr/_context-environment.yaml
@@ -20,6 +20,7 @@
       - features_designate_pool_manager_database
       - features_designate_pool_manager
       - features_designate_pool_manager_keystone
+      - features_lvm_backend_control
       - linux_system_codename_xenial
       interfaces:
         ens3:
@@ -36,6 +37,7 @@
       - openstack_message_queue
       - features_designate_pool_manager_database
       - features_designate_pool_manager
+      - features_lvm_backend_control
       - linux_system_codename_xenial
       interfaces:
         ens3:
@@ -52,6 +54,7 @@
       - openstack_message_queue
       - features_designate_pool_manager_database
       - features_designate_pool_manager
+      - features_lvm_backend_control
       - linux_system_codename_xenial
       interfaces:
         ens3:
@@ -115,6 +118,7 @@
       reclass_storage_name: openstack_compute_rack01
       roles:
       - openstack_compute
+      - features_lvm_backend_volume_vdb
       - linux_system_codename_xenial
       interfaces:
         ens3:
diff --git a/tcp_tests/templates/virtual-mcp-ocata-dvr/salt.yaml b/tcp_tests/templates/virtual-mcp-ocata-dvr/salt.yaml
index e7e32a6..97868d6 100644
--- a/tcp_tests/templates/virtual-mcp-ocata-dvr/salt.yaml
+++ b/tcp_tests/templates/virtual-mcp-ocata-dvr/salt.yaml
@@ -24,7 +24,7 @@
 
 {%- if OVERRIDES != '' %}
 {%- for param in OVERRIDES.splitlines() %}
-{%- set key, value = param.replace(' ','').split(':') %}
+{%- set key, value = param.replace(' ','').split(':', 1) %}
 - description: Override cluster parameters
   cmd: |
     salt-call reclass.cluster_meta_set name='{{ key }}' value='{{ value }}' file_name='{{OVERRIDES_FILENAME}}'
diff --git a/tcp_tests/templates/virtual-mcp-ocata-dvr/underlay--user-data-cfg01.yaml b/tcp_tests/templates/virtual-mcp-ocata-dvr/underlay--user-data-cfg01.yaml
index a73ca23..d75dab1 100644
--- a/tcp_tests/templates/virtual-mcp-ocata-dvr/underlay--user-data-cfg01.yaml
+++ b/tcp_tests/templates/virtual-mcp-ocata-dvr/underlay--user-data-cfg01.yaml
@@ -47,6 +47,13 @@
    # Enable grub menu using updated config below
    - update-grub
 
+   - mkdir -p /srv/salt/reclass/nodes
+   - systemctl enable salt-master
+   - systemctl enable salt-minion
+   - systemctl start salt-master
+   - systemctl start salt-minion
+   - salt-call -l info --timeout=120 test.ping
+
   write_files:
    - path: /etc/default/grub.d/97-enable-grub-menu.cfg
      content: |
diff --git a/tcp_tests/templates/virtual-mcp-ocata-dvr/underlay.yaml b/tcp_tests/templates/virtual-mcp-ocata-dvr/underlay.yaml
index de5427a..6f69a74 100644
--- a/tcp_tests/templates/virtual-mcp-ocata-dvr/underlay.yaml
+++ b/tcp_tests/templates/virtual-mcp-ocata-dvr/underlay.yaml
@@ -232,9 +232,6 @@
                   capacity: !os_env NODE_VOLUME_SIZE, 150
                   backing_store: mcp_ubuntu_1604_image
                   format: qcow2
-                - name: cinder
-                  capacity: 50
-                  format: qcow2
                 - name: iso  # Volume with name 'iso' will be used
                              # for store image with cloud-init metadata.
                   capacity: 1
@@ -273,9 +270,6 @@
                   capacity: !os_env NODE_VOLUME_SIZE, 150
                   backing_store: mcp_ubuntu_1604_image
                   format: qcow2
-                - name: cinder
-                  capacity: 50
-                  format: qcow2
                 - name: iso  # Volume with name 'iso' will be used
                              # for store image with cloud-init metadata.
                   capacity: 1
@@ -302,9 +296,6 @@
                   capacity: !os_env NODE_VOLUME_SIZE, 150
                   backing_store: mcp_ubuntu_1604_image
                   format: qcow2
-                - name: cinder
-                  capacity: 50
-                  format: qcow2
                 - name: iso  # Volume with name 'iso' will be used
                              # for store image with cloud-init metadata.
                   capacity: 1
@@ -409,9 +400,6 @@
                   capacity: !os_env NODE_VOLUME_SIZE, 150
                   backing_store: mcp_ubuntu_1604_image
                   format: qcow2
-                - name: cinder
-                  capacity: 50
-                  format: qcow2
                 - name: iso  # Volume with name 'iso' will be used
                              # for store image with cloud-init metadata.
                   capacity: 1
@@ -439,6 +427,9 @@
                   capacity: !os_env NODE_VOLUME_SIZE, 150
                   backing_store: cloudimage1604
                   format: qcow2
+                - name: cinder
+                  capacity: 50
+                  format: qcow2
                 - name: iso  # Volume with name 'iso' will be used
                              # for store image with cloud-init metadata.
                   capacity: 1
@@ -490,6 +481,9 @@
                   capacity: !os_env NODE_VOLUME_SIZE, 150
                   backing_store: cloudimage1604
                   format: qcow2
+                - name: cinder
+                  capacity: 50
+                  format: qcow2
                 - name: iso  # Volume with name 'iso' will be used
                              # for store image with cloud-init metadata.
                   capacity: 1
diff --git a/tcp_tests/templates/virtual-mcp-ocata-ovs-ceph/underlay--user-data-cfg01.yaml b/tcp_tests/templates/virtual-mcp-ocata-ovs-ceph/underlay--user-data-cfg01.yaml
index da7908d..48562ad 100644
--- a/tcp_tests/templates/virtual-mcp-ocata-ovs-ceph/underlay--user-data-cfg01.yaml
+++ b/tcp_tests/templates/virtual-mcp-ocata-ovs-ceph/underlay--user-data-cfg01.yaml
@@ -44,6 +44,13 @@
 
    - echo "nameserver 172.18.208.44" >> /etc/resolv.conf;
 
+   - mkdir -p /srv/salt/reclass/nodes
+   - systemctl enable salt-master
+   - systemctl enable salt-minion
+   - systemctl start salt-master
+   - systemctl start salt-minion
+   - salt-call -l info --timeout=120 test.ping
+
   write_files:
    - path: /etc/network/interfaces
      content: |
diff --git a/tcp_tests/templates/virtual-mcp-ocata-ovs/_context-cookiecutter-mcp-ocata-ovs.yaml b/tcp_tests/templates/virtual-mcp-ocata-ovs/_context-cookiecutter-mcp-ocata-ovs.yaml
index 3e27c69..5dffbd3 100644
--- a/tcp_tests/templates/virtual-mcp-ocata-ovs/_context-cookiecutter-mcp-ocata-ovs.yaml
+++ b/tcp_tests/templates/virtual-mcp-ocata-ovs/_context-cookiecutter-mcp-ocata-ovs.yaml
@@ -54,7 +54,6 @@
   openstack_compute_single_address_ranges: 172.16.10.105-172.16.10.106
   openstack_compute_deploy_address_ranges: 192.168.10.105-192.168.10.106
   openstack_compute_tenant_address_ranges: 10.1.0.105-10.1.0.106
-  openstack_compute_backend_address_ranges: 10.1.0.105-10.1.0.106
   openstack_control_address: 172.16.10.100
   openstack_control_hostname: ctl
   openstack_control_node01_address: 172.16.10.101
diff --git a/tcp_tests/templates/virtual-mcp-ocata-ovs/_context-environment.yaml b/tcp_tests/templates/virtual-mcp-ocata-ovs/_context-environment.yaml
index 3e05cf0..7baf03e 100644
--- a/tcp_tests/templates/virtual-mcp-ocata-ovs/_context-environment.yaml
+++ b/tcp_tests/templates/virtual-mcp-ocata-ovs/_context-environment.yaml
@@ -21,6 +21,7 @@
       - features_designate_bind9_dns
       - features_designate_bind9
       - features_designate_bind9_keystone
+      - features_lvm_backend_control
       - linux_system_codename_xenial
       interfaces:
         ens3:
@@ -38,6 +39,7 @@
       - features_designate_bind9_database
       - features_designate_bind9_dns
       - features_designate_bind9
+      - features_lvm_backend_control
       - linux_system_codename_xenial
       interfaces:
         ens3:
@@ -54,6 +56,7 @@
       - openstack_message_queue
       - features_designate_bind9_database
       - features_designate_bind9
+      - features_lvm_backend_control
       - linux_system_codename_xenial
       interfaces:
         ens3:
@@ -117,6 +120,7 @@
       reclass_storage_name: openstack_compute_rack01
       roles:
       - openstack_compute
+      - features_lvm_backend_volume_vdb
       - linux_system_codename_xenial
       interfaces:
         ens3:
diff --git a/tcp_tests/templates/virtual-mcp-ocata-ovs/salt.yaml b/tcp_tests/templates/virtual-mcp-ocata-ovs/salt.yaml
index 3a3ed3a..c2bd3d7 100644
--- a/tcp_tests/templates/virtual-mcp-ocata-ovs/salt.yaml
+++ b/tcp_tests/templates/virtual-mcp-ocata-ovs/salt.yaml
@@ -24,7 +24,7 @@
 
 {%- if OVERRIDES != '' %}
 {%- for param in OVERRIDES.splitlines() %}
-{%- set key, value = param.replace(' ','').split(':') %}
+{%- set key, value = param.replace(' ','').split(':', 1) %}
 - description: Override cluster parameters
   cmd: |
     salt-call reclass.cluster_meta_set name='{{ key }}' value='{{ value }}' file_name='{{OVERRIDES_FILENAME}}'
diff --git a/tcp_tests/templates/virtual-mcp-ocata-ovs/underlay--user-data-cfg01.yaml b/tcp_tests/templates/virtual-mcp-ocata-ovs/underlay--user-data-cfg01.yaml
index da7908d..48562ad 100644
--- a/tcp_tests/templates/virtual-mcp-ocata-ovs/underlay--user-data-cfg01.yaml
+++ b/tcp_tests/templates/virtual-mcp-ocata-ovs/underlay--user-data-cfg01.yaml
@@ -44,6 +44,13 @@
 
    - echo "nameserver 172.18.208.44" >> /etc/resolv.conf;
 
+   - mkdir -p /srv/salt/reclass/nodes
+   - systemctl enable salt-master
+   - systemctl enable salt-minion
+   - systemctl start salt-master
+   - systemctl start salt-minion
+   - salt-call -l info --timeout=120 test.ping
+
   write_files:
    - path: /etc/network/interfaces
      content: |
diff --git a/tcp_tests/templates/virtual-mcp-ocata-ovs/underlay.yaml b/tcp_tests/templates/virtual-mcp-ocata-ovs/underlay.yaml
index 382dba4..c64bdc8 100644
--- a/tcp_tests/templates/virtual-mcp-ocata-ovs/underlay.yaml
+++ b/tcp_tests/templates/virtual-mcp-ocata-ovs/underlay.yaml
@@ -222,9 +222,6 @@
                   capacity: !os_env NODE_VOLUME_SIZE, 150
                   backing_store: mcp_ubuntu_1604_image
                   format: qcow2
-                - name: cinder
-                  capacity: 50
-                  format: qcow2
                 - name: iso  # Volume with name 'iso' will be used
                              # for store image with cloud-init metadata.
                   capacity: 1
@@ -263,9 +260,6 @@
                   capacity: !os_env NODE_VOLUME_SIZE, 150
                   backing_store: mcp_ubuntu_1604_image
                   format: qcow2
-                - name: cinder
-                  capacity: 50
-                  format: qcow2
                 - name: iso  # Volume with name 'iso' will be used
                              # for store image with cloud-init metadata.
                   capacity: 1
@@ -292,9 +286,6 @@
                   capacity: !os_env NODE_VOLUME_SIZE, 150
                   backing_store: mcp_ubuntu_1604_image
                   format: qcow2
-                - name: cinder
-                  capacity: 50
-                  format: qcow2
                 - name: iso  # Volume with name 'iso' will be used
                              # for store image with cloud-init metadata.
                   capacity: 1
@@ -399,9 +390,6 @@
                   capacity: !os_env NODE_VOLUME_SIZE, 150
                   backing_store: mcp_ubuntu_1604_image
                   format: qcow2
-                - name: cinder
-                  capacity: 50
-                  format: qcow2
                 - name: iso  # Volume with name 'iso' will be used
                              # for store image with cloud-init metadata.
                   capacity: 1
@@ -428,6 +416,9 @@
                   capacity: !os_env NODE_VOLUME_SIZE, 150
                   backing_store: cloudimage1604
                   format: qcow2
+                - name: cinder
+                  capacity: 50
+                  format: qcow2
                 - name: iso  # Volume with name 'iso' will be used
                              # for store image with cloud-init metadata.
                   capacity: 1
@@ -479,6 +470,9 @@
                   capacity: !os_env NODE_VOLUME_SIZE, 150
                   backing_store: cloudimage1604
                   format: qcow2
+                - name: cinder
+                  capacity: 50
+                  format: qcow2
                 - name: iso  # Volume with name 'iso' will be used
                              # for store image with cloud-init metadata.
                   capacity: 1
diff --git a/tcp_tests/templates/virtual-mcp-pike-dvr-ceph-rgw/underlay--user-data-cfg01.yaml b/tcp_tests/templates/virtual-mcp-pike-dvr-ceph-rgw/underlay--user-data-cfg01.yaml
index da7908d..48562ad 100644
--- a/tcp_tests/templates/virtual-mcp-pike-dvr-ceph-rgw/underlay--user-data-cfg01.yaml
+++ b/tcp_tests/templates/virtual-mcp-pike-dvr-ceph-rgw/underlay--user-data-cfg01.yaml
@@ -44,6 +44,13 @@
 
    - echo "nameserver 172.18.208.44" >> /etc/resolv.conf;
 
+   - mkdir -p /srv/salt/reclass/nodes
+   - systemctl enable salt-master
+   - systemctl enable salt-minion
+   - systemctl start salt-master
+   - systemctl start salt-minion
+   - salt-call -l info --timeout=120 test.ping
+
   write_files:
    - path: /etc/network/interfaces
      content: |
diff --git a/tcp_tests/templates/virtual-mcp-pike-dvr-maas/underlay--user-data-cfg01.yaml b/tcp_tests/templates/virtual-mcp-pike-dvr-maas/underlay--user-data-cfg01.yaml
index be74a88..63fb199 100644
--- a/tcp_tests/templates/virtual-mcp-pike-dvr-maas/underlay--user-data-cfg01.yaml
+++ b/tcp_tests/templates/virtual-mcp-pike-dvr-maas/underlay--user-data-cfg01.yaml
@@ -46,6 +46,13 @@
 
    - echo "nameserver 172.18.176.6" >> /etc/resolv.conf;
 
+   - mkdir -p /srv/salt/reclass/nodes
+   - systemctl enable salt-master
+   - systemctl enable salt-minion
+   - systemctl start salt-master
+   - systemctl start salt-minion
+   - salt-call -l info --timeout=120 test.ping
+
   write_files:
    - path: /etc/network/interfaces
      content: |
diff --git a/tcp_tests/templates/virtual-mcp-pike-dvr-ssl-barbican/underlay--user-data-cfg01.yaml b/tcp_tests/templates/virtual-mcp-pike-dvr-ssl-barbican/underlay--user-data-cfg01.yaml
index da7908d..48562ad 100644
--- a/tcp_tests/templates/virtual-mcp-pike-dvr-ssl-barbican/underlay--user-data-cfg01.yaml
+++ b/tcp_tests/templates/virtual-mcp-pike-dvr-ssl-barbican/underlay--user-data-cfg01.yaml
@@ -44,6 +44,13 @@
 
    - echo "nameserver 172.18.208.44" >> /etc/resolv.conf;
 
+   - mkdir -p /srv/salt/reclass/nodes
+   - systemctl enable salt-master
+   - systemctl enable salt-minion
+   - systemctl start salt-master
+   - systemctl start salt-minion
+   - salt-call -l info --timeout=120 test.ping
+
   write_files:
    - path: /etc/network/interfaces
      content: |
diff --git a/tcp_tests/templates/virtual-mcp-pike-dvr-ssl/underlay--user-data-cfg01.yaml b/tcp_tests/templates/virtual-mcp-pike-dvr-ssl/underlay--user-data-cfg01.yaml
index da7908d..48562ad 100644
--- a/tcp_tests/templates/virtual-mcp-pike-dvr-ssl/underlay--user-data-cfg01.yaml
+++ b/tcp_tests/templates/virtual-mcp-pike-dvr-ssl/underlay--user-data-cfg01.yaml
@@ -44,6 +44,13 @@
 
    - echo "nameserver 172.18.208.44" >> /etc/resolv.conf;
 
+   - mkdir -p /srv/salt/reclass/nodes
+   - systemctl enable salt-master
+   - systemctl enable salt-minion
+   - systemctl start salt-master
+   - systemctl start salt-minion
+   - salt-call -l info --timeout=120 test.ping
+
   write_files:
    - path: /etc/network/interfaces
      content: |
diff --git a/tcp_tests/templates/virtual-mcp-pike-dvr/underlay--user-data-cfg01.yaml b/tcp_tests/templates/virtual-mcp-pike-dvr/underlay--user-data-cfg01.yaml
index da7908d..48562ad 100644
--- a/tcp_tests/templates/virtual-mcp-pike-dvr/underlay--user-data-cfg01.yaml
+++ b/tcp_tests/templates/virtual-mcp-pike-dvr/underlay--user-data-cfg01.yaml
@@ -44,6 +44,13 @@
 
    - echo "nameserver 172.18.208.44" >> /etc/resolv.conf;
 
+   - mkdir -p /srv/salt/reclass/nodes
+   - systemctl enable salt-master
+   - systemctl enable salt-minion
+   - systemctl start salt-master
+   - systemctl start salt-minion
+   - salt-call -l info --timeout=120 test.ping
+
   write_files:
    - path: /etc/network/interfaces
      content: |
diff --git a/tcp_tests/templates/virtual-mcp-pike-ovs-ceph/underlay--user-data-cfg01.yaml b/tcp_tests/templates/virtual-mcp-pike-ovs-ceph/underlay--user-data-cfg01.yaml
index da7908d..48562ad 100644
--- a/tcp_tests/templates/virtual-mcp-pike-ovs-ceph/underlay--user-data-cfg01.yaml
+++ b/tcp_tests/templates/virtual-mcp-pike-ovs-ceph/underlay--user-data-cfg01.yaml
@@ -44,6 +44,13 @@
 
    - echo "nameserver 172.18.208.44" >> /etc/resolv.conf;
 
+   - mkdir -p /srv/salt/reclass/nodes
+   - systemctl enable salt-master
+   - systemctl enable salt-minion
+   - systemctl start salt-master
+   - systemctl start salt-minion
+   - salt-call -l info --timeout=120 test.ping
+
   write_files:
    - path: /etc/network/interfaces
      content: |
diff --git a/tcp_tests/templates/virtual-mcp-pike-ovs-l2gw-bgpvpn/underlay--user-data-cfg01.yaml b/tcp_tests/templates/virtual-mcp-pike-ovs-l2gw-bgpvpn/underlay--user-data-cfg01.yaml
index da7908d..48562ad 100644
--- a/tcp_tests/templates/virtual-mcp-pike-ovs-l2gw-bgpvpn/underlay--user-data-cfg01.yaml
+++ b/tcp_tests/templates/virtual-mcp-pike-ovs-l2gw-bgpvpn/underlay--user-data-cfg01.yaml
@@ -44,6 +44,13 @@
 
    - echo "nameserver 172.18.208.44" >> /etc/resolv.conf;
 
+   - mkdir -p /srv/salt/reclass/nodes
+   - systemctl enable salt-master
+   - systemctl enable salt-minion
+   - systemctl start salt-master
+   - systemctl start salt-minion
+   - salt-call -l info --timeout=120 test.ping
+
   write_files:
    - path: /etc/network/interfaces
      content: |
diff --git a/tcp_tests/templates/virtual-mcp-pike-ovs/underlay--user-data-cfg01.yaml b/tcp_tests/templates/virtual-mcp-pike-ovs/underlay--user-data-cfg01.yaml
index da7908d..48562ad 100644
--- a/tcp_tests/templates/virtual-mcp-pike-ovs/underlay--user-data-cfg01.yaml
+++ b/tcp_tests/templates/virtual-mcp-pike-ovs/underlay--user-data-cfg01.yaml
@@ -44,6 +44,13 @@
 
    - echo "nameserver 172.18.208.44" >> /etc/resolv.conf;
 
+   - mkdir -p /srv/salt/reclass/nodes
+   - systemctl enable salt-master
+   - systemctl enable salt-minion
+   - systemctl start salt-master
+   - systemctl start salt-minion
+   - salt-call -l info --timeout=120 test.ping
+
   write_files:
    - path: /etc/network/interfaces
      content: |
diff --git a/tcp_tests/templates/virtual-mcp-sl-os/salt.yaml b/tcp_tests/templates/virtual-mcp-sl-os/salt.yaml
index 2bf4bb3..a1b2c92 100644
--- a/tcp_tests/templates/virtual-mcp-sl-os/salt.yaml
+++ b/tcp_tests/templates/virtual-mcp-sl-os/salt.yaml
@@ -21,7 +21,7 @@
 
 {%- if OVERRIDES != '' %}
 {%- for param in OVERRIDES.splitlines() %}
-{%- set key, value = param.replace(' ','').split(':') %}
+{%- set key, value = param.replace(' ','').split(':', 1) %}
 - description: Override cluster parameters
   cmd: |
     salt-call reclass.cluster_meta_set name='{{ key }}' value='{{ value }}' file_name='{{OVERRIDES_FILENAME}}'
diff --git a/tcp_tests/templates/virtual-mcp-sl-os/underlay--user-data-cfg01.yaml b/tcp_tests/templates/virtual-mcp-sl-os/underlay--user-data-cfg01.yaml
index a8afd05..1018c28 100644
--- a/tcp_tests/templates/virtual-mcp-sl-os/underlay--user-data-cfg01.yaml
+++ b/tcp_tests/templates/virtual-mcp-sl-os/underlay--user-data-cfg01.yaml
@@ -47,6 +47,13 @@
    # Enable grub menu using updated config below
    - update-grub
 
+   - mkdir -p /srv/salt/reclass/nodes
+   - systemctl enable salt-master
+   - systemctl enable salt-minion
+   - systemctl start salt-master
+   - systemctl start salt-minion
+   - salt-call -l info --timeout=120 test.ping
+
   write_files:
    - path: /etc/default/grub.d/97-enable-grub-menu.cfg
      content: |
diff --git a/tcp_tests/templates/virtual-mcp-trusty/salt.yaml b/tcp_tests/templates/virtual-mcp-trusty/salt.yaml
index 7213162..ab04cfb 100644
--- a/tcp_tests/templates/virtual-mcp-trusty/salt.yaml
+++ b/tcp_tests/templates/virtual-mcp-trusty/salt.yaml
@@ -22,7 +22,7 @@
 
 {%- if OVERRIDES != '' %}
 {%- for param in OVERRIDES.splitlines() %}
-{%- set key, value = param.replace(' ','').split(':') %}
+{%- set key, value = param.replace(' ','').split(':', 1) %}
 - description: Override cluster parameters
   cmd: |
     salt-call reclass.cluster_meta_set name='{{ key }}' value='{{ value }}' file_name='{{OVERRIDES_FILENAME}}'
diff --git a/tcp_tests/templates/virtual-offline-pike-ovs-dpdk/underlay--user-data-cfg01.yaml b/tcp_tests/templates/virtual-offline-pike-ovs-dpdk/underlay--user-data-cfg01.yaml
index 79adad5..800a0b1 100644
--- a/tcp_tests/templates/virtual-offline-pike-ovs-dpdk/underlay--user-data-cfg01.yaml
+++ b/tcp_tests/templates/virtual-offline-pike-ovs-dpdk/underlay--user-data-cfg01.yaml
@@ -18,8 +18,6 @@
    expire: False
 
   bootcmd:
-   # Block access to SSH while node is preparing
-   - cloud-init-per once sudo iptables -A INPUT -p tcp --dport 22 -j DROP
    # Enable root access
    - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
    - service sshd restart
@@ -48,15 +46,12 @@
    - swapon /swapfile
    - echo "/swapfile   none    swap    defaults    0   0" >> /etc/fstab
 
-   ############## TCP Cloud cfg01 node ##################
-   - echo "Preparing base OS"
-   - sleep 160;
-   # find /etc/apt/ -type f -exec sed -i "s/ubuntu.com/ubuntu.local.test/g" {} +;
-   ########################################################
-   # Node is ready, allow SSH access
-   - echo "Allow SSH access ..."
-   - sudo iptables -D INPUT -p tcp --dport 22 -j DROP
-   ########################################################
+   - mkdir -p /srv/salt/reclass/nodes
+   - systemctl enable salt-master
+   - systemctl enable salt-minion
+   - systemctl start salt-master
+   - systemctl start salt-minion
+   - salt-call -l info --timeout=120 test.ping
 
   write_files:
    - path: /etc/network/interfaces
diff --git a/tcp_tests/templates/virtual-offline-pike-ovs/underlay--user-data-cfg01.yaml b/tcp_tests/templates/virtual-offline-pike-ovs/underlay--user-data-cfg01.yaml
index 79adad5..800a0b1 100644
--- a/tcp_tests/templates/virtual-offline-pike-ovs/underlay--user-data-cfg01.yaml
+++ b/tcp_tests/templates/virtual-offline-pike-ovs/underlay--user-data-cfg01.yaml
@@ -18,8 +18,6 @@
    expire: False
 
   bootcmd:
-   # Block access to SSH while node is preparing
-   - cloud-init-per once sudo iptables -A INPUT -p tcp --dport 22 -j DROP
    # Enable root access
    - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
    - service sshd restart
@@ -48,15 +46,12 @@
    - swapon /swapfile
    - echo "/swapfile   none    swap    defaults    0   0" >> /etc/fstab
 
-   ############## TCP Cloud cfg01 node ##################
-   - echo "Preparing base OS"
-   - sleep 160;
-   # find /etc/apt/ -type f -exec sed -i "s/ubuntu.com/ubuntu.local.test/g" {} +;
-   ########################################################
-   # Node is ready, allow SSH access
-   - echo "Allow SSH access ..."
-   - sudo iptables -D INPUT -p tcp --dport 22 -j DROP
-   ########################################################
+   - mkdir -p /srv/salt/reclass/nodes
+   - systemctl enable salt-master
+   - systemctl enable salt-minion
+   - systemctl start salt-master
+   - systemctl start salt-minion
+   - salt-call -l info --timeout=120 test.ping
 
   write_files:
    - path: /etc/network/interfaces
diff --git a/tcp_tests/templates/virtual-offline-ssl/underlay--user-data-cfg01.yaml b/tcp_tests/templates/virtual-offline-ssl/underlay--user-data-cfg01.yaml
index 79adad5..800a0b1 100644
--- a/tcp_tests/templates/virtual-offline-ssl/underlay--user-data-cfg01.yaml
+++ b/tcp_tests/templates/virtual-offline-ssl/underlay--user-data-cfg01.yaml
@@ -18,8 +18,6 @@
    expire: False
 
   bootcmd:
-   # Block access to SSH while node is preparing
-   - cloud-init-per once sudo iptables -A INPUT -p tcp --dport 22 -j DROP
    # Enable root access
    - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
    - service sshd restart
@@ -48,15 +46,12 @@
    - swapon /swapfile
    - echo "/swapfile   none    swap    defaults    0   0" >> /etc/fstab
 
-   ############## TCP Cloud cfg01 node ##################
-   - echo "Preparing base OS"
-   - sleep 160;
-   # find /etc/apt/ -type f -exec sed -i "s/ubuntu.com/ubuntu.local.test/g" {} +;
-   ########################################################
-   # Node is ready, allow SSH access
-   - echo "Allow SSH access ..."
-   - sudo iptables -D INPUT -p tcp --dport 22 -j DROP
-   ########################################################
+   - mkdir -p /srv/salt/reclass/nodes
+   - systemctl enable salt-master
+   - systemctl enable salt-minion
+   - systemctl start salt-master
+   - systemctl start salt-minion
+   - salt-call -l info --timeout=120 test.ping
 
   write_files:
    - path: /etc/network/interfaces
diff --git a/tcp_tests/templates/virtual-pike-ovs-dpdk/_salt_generate_cookied-pike-ovs-dpdk.yaml b/tcp_tests/templates/virtual-pike-ovs-dpdk/_salt_generate_cookied-pike-ovs-dpdk.yaml
index 050c4c4..5da2666 100644
--- a/tcp_tests/templates/virtual-pike-ovs-dpdk/_salt_generate_cookied-pike-ovs-dpdk.yaml
+++ b/tcp_tests/templates/virtual-pike-ovs-dpdk/_salt_generate_cookied-pike-ovs-dpdk.yaml
@@ -17,18 +17,18 @@
 - description: "Workaround for combined roles: remove unnecessary classes"
   cmd: |
     set -e;
-    sed -i '/system.reclass.storage.system.physical_control_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    sed -i '/system.reclass.storage.system.stacklight_telemetry_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    sed -i '/system.reclass.storage.system.stacklight_log_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+    sed -i '/system.reclass.storage.system.physical_control_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    sed -i '/system.reclass.storage.system.stacklight_telemetry_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    sed -i '/system.reclass.storage.system.stacklight_log_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
 
     # Start compute node addresses from .105 , as in static models
-    sed -i 's/start: 101/start: 105/g' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+    sed -i 's/start: 101/start: 105/g' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
 
     . /root/venv-reclass-tools/bin/activate;
-    reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-    reclass-tools del-key parameters.reclass.storage.node.stacklight_log_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+    reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
+    reclass-tools del-key parameters.reclass.storage.node.stacklight_log_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config/init.yml;
 
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 5}
diff --git a/tcp_tests/templates/virtual-pike-ovs-dpdk/underlay--user-data-cfg01.yaml b/tcp_tests/templates/virtual-pike-ovs-dpdk/underlay--user-data-cfg01.yaml
index da7908d..48562ad 100644
--- a/tcp_tests/templates/virtual-pike-ovs-dpdk/underlay--user-data-cfg01.yaml
+++ b/tcp_tests/templates/virtual-pike-ovs-dpdk/underlay--user-data-cfg01.yaml
@@ -44,6 +44,13 @@
 
    - echo "nameserver 172.18.208.44" >> /etc/resolv.conf;
 
+   - mkdir -p /srv/salt/reclass/nodes
+   - systemctl enable salt-master
+   - systemctl enable salt-minion
+   - systemctl start salt-master
+   - systemctl start salt-minion
+   - salt-call -l info --timeout=120 test.ping
+
   write_files:
    - path: /etc/network/interfaces
      content: |
diff --git a/tcp_tests/tests/system/test_install_mcp_ovs_pike.py b/tcp_tests/tests/system/test_install_mcp_ovs_pike.py
index 56efe59..7e726cb 100644
--- a/tcp_tests/tests/system/test_install_mcp_ovs_pike.py
+++ b/tcp_tests/tests/system/test_install_mcp_ovs_pike.py
@@ -298,7 +298,7 @@
             tgt='*', fun='cmd.run',
             args='service ntp stop; ntpd -gq; service ntp start')
         if settings.RUN_TEMPEST:
-            tempest_actions.prepare_and_run_tempest(dpdk=True)
+            tempest_actions.prepare_and_run_tempest()
 
         LOG.info("*************** DONE **************")
 
diff --git a/tcp_tests/utils/get_logs.py b/tcp_tests/utils/get_logs.py
new file mode 100755
index 0000000..225f9d7
--- /dev/null
+++ b/tcp_tests/utils/get_logs.py
@@ -0,0 +1,54 @@
+#!/usr/bin/env python
+
+import argparse
+import os
+import sys
+import time
+
+sys.path.append(os.getcwd())
+try:
+    from tcp_tests.fixtures import config_fixtures
+    from tcp_tests.managers import underlay_ssh_manager
+except ImportError:
+    print("ImportError: Run the application from the tcp-qa directory or "
+          "set the PYTHONPATH environment variable to directory which contains"
+          " ./tcp_tests")
+    sys.exit(1)
+
+
+def load_params():
+    """
+    Parse CLI arguments and environment variables
+
+    Returns: ArgumentParser instance
+    """
+    parser = argparse.ArgumentParser(description=(
+        'Download logs and debug info from salt minions'
+    ))
+    default_name_prefix = 'logs_' + time.strftime("%Y%m%d_%H%M%S")
+    parser.add_argument('--archive-name-prefix',
+                        help=('Custom prefix for creating archive name'),
+                        default=default_name_prefix,
+                        type=str)
+    return parser
+
+
+def main():
+    parser = load_params()
+    opts = parser.parse_args()
+
+    tests_configs = os.environ.get('TESTS_CONFIGS', None)
+    if not tests_configs or not os.path.isfile(tests_configs):
+        print("Download logs and debug info from salt minions. "
+              "Please set TESTS_CONFIGS environment variable whith"
+              "the path to INI file with lab metadata.")
+        return 11
+
+    config = config_fixtures.config()
+    underlay = underlay_ssh_manager.UnderlaySSHManager(config)
+
+    underlay.get_logs(opts.archive_name_prefix)
+
+
+if __name__ == '__main__':
+    sys.exit(main())
diff --git a/tcp_tests/utils/run_jenkins_job.py b/tcp_tests/utils/run_jenkins_job.py
index b01f366..acc2e9f 100755
--- a/tcp_tests/utils/run_jenkins_job.py
+++ b/tcp_tests/utils/run_jenkins_job.py
@@ -4,7 +4,6 @@
 import os
 import sys
 
-from devops import error
 import json
 
 sys.path.append(os.getcwd())
@@ -140,7 +139,7 @@
             interval=1,
             verbose=opts.verbose,
             job_output_prefix=opts.job_output_prefix)
-    except error.TimeoutError as e:
+    except Exception as e:
         print(str(e))
         raise