Merge "fix for cookied-mcp-queens-dvr-ssl"
diff --git a/jobs/pipelines/swarm-bootstrap-salt-cluster-devops.groovy b/jobs/pipelines/swarm-bootstrap-salt-cluster-devops.groovy
index 64c8783..570f47f 100644
--- a/jobs/pipelines/swarm-bootstrap-salt-cluster-devops.groovy
+++ b/jobs/pipelines/swarm-bootstrap-salt-cluster-devops.groovy
@@ -38,54 +38,54 @@
         error "'PARENT_WORKSPACE' contains path to non-existing directory ${PARENT_WORKSPACE} on the node '${PARENT_NODE_NAME}'."
     }
     dir("${PARENT_WORKSPACE}") {
+        stage("Cleanup: erase ${ENV_NAME} and remove config drive") {
+            println "Remove environment ${ENV_NAME}"
+            shared.run_cmd("""\
+                dos.py erase ${ENV_NAME} || true
+            """)
+            println "Remove config drive ISO"
+            shared.run_cmd("""\
+                rm /home/jenkins/images/${CFG01_CONFIG_IMAGE_NAME} || true
+            """)
+        }
+
+        if (env.TCP_QA_REFS) {
+            stage("Update working dir to patch ${TCP_QA_REFS}") {
+                shared.update_working_dir()
+            }
+        }
+
+        stage("Create an environment ${ENV_NAME} in disabled state") {
+            // deploy_hardware.xml
+            shared.run_cmd("""\
+                export ENV_NAME=${ENV_NAME}
+                export LAB_CONFIG_NAME=${LAB_CONFIG_NAME}
+                export MANAGER=devops
+                export PYTHONIOENCODING=UTF-8
+                export REPOSITORY_SUITE=${MCP_VERSION}
+                export TEST_GROUP=test_create_environment
+                py.test -vvv -s -p no:django -p no:ipdb --junit-xml=deploy_hardware.xml -k \${TEST_GROUP}
+            """)
+        }
+
+        stage("Generate the model") {
+            shared.generate_cookied_model()
+        }
+
+        stage("Generate config drive ISO") {
+            shared.generate_configdrive_iso()
+        }
+
+        stage("Upload generated config drive ISO into volume on cfg01 node") {
+            shared.run_cmd("""\
+                # Get SALT_MASTER_HOSTNAME to determine the volume name
+                . ./tcp_tests/utils/env_salt
+                virsh vol-upload ${ENV_NAME}_\${SALT_MASTER_HOSTNAME}_config /home/jenkins/images/${CFG01_CONFIG_IMAGE_NAME} --pool default
+                virsh pool-refresh --pool default
+            """)
+        }
+
         try {
-            stage("Cleanup: erase ${ENV_NAME} and remove config drive") {
-                println "Remove environment ${ENV_NAME}"
-                shared.run_cmd("""\
-                    dos.py erase ${ENV_NAME} || true
-                """)
-                println "Remove config drive ISO"
-                shared.run_cmd("""\
-                    rm /home/jenkins/images/${CFG01_CONFIG_IMAGE_NAME} || true
-                """)
-            }
-
-            if (env.TCP_QA_REFS) {
-                stage("Update working dir to patch ${TCP_QA_REFS}") {
-                    shared.update_working_dir()
-                }
-            }
-
-            stage("Create an environment ${ENV_NAME} in disabled state") {
-                // deploy_hardware.xml
-                shared.run_cmd("""\
-                    export ENV_NAME=${ENV_NAME}
-                    export LAB_CONFIG_NAME=${LAB_CONFIG_NAME}
-                    export MANAGER=devops
-                    export PYTHONIOENCODING=UTF-8
-                    export REPOSITORY_SUITE=${MCP_VERSION}
-                    export TEST_GROUP=test_create_environment
-                    py.test -vvv -s -p no:django -p no:ipdb --junit-xml=deploy_hardware.xml -k \${TEST_GROUP}
-                """)
-            }
-
-            stage("Generate the model") {
-                shared.generate_cookied_model()
-            }
-
-            stage("Generate config drive ISO") {
-                shared.generate_configdrive_iso()
-            }
-
-            stage("Upload generated config drive ISO into volume on cfg01 node") {
-                shared.run_cmd("""\
-                    # Get SALT_MASTER_HOSTNAME to determine the volume name
-                    . ./tcp_tests/utils/env_salt
-                    virsh vol-upload ${ENV_NAME}_\${SALT_MASTER_HOSTNAME}_config /home/jenkins/images/${CFG01_CONFIG_IMAGE_NAME} --pool default
-                    virsh pool-refresh --pool default
-                """)
-            }
-
             stage("Run the 'underlay' and 'salt-deployed' fixtures to bootstrap salt cluster") {
                 // deploy_salt.xml
                 shared.run_cmd("""\
@@ -103,7 +103,8 @@
             }
 
           } catch (e) {
-              common.printMsg("Job is failed", "purple")
+              common.printMsg("Saltstack cluster deploy is failed", "purple")
+              shared.download_logs("deploy_salt")
               throw e
           } finally {
             // TODO(ddmitriev): analyze the "def currentResult = currentBuild.result ?: 'SUCCESS'"
diff --git a/jobs/pipelines/swarm-deploy-cicd.groovy b/jobs/pipelines/swarm-deploy-cicd.groovy
index 5ace2ca..7d7fd63 100644
--- a/jobs/pipelines/swarm-deploy-cicd.groovy
+++ b/jobs/pipelines/swarm-deploy-cicd.groovy
@@ -29,29 +29,30 @@
         error "'PARENT_WORKSPACE' contains path to non-existing directory ${PARENT_WORKSPACE} on the node '${PARENT_NODE_NAME}'."
     }
     dir("${PARENT_WORKSPACE}") {
-        try {
 
-            if (! env.STACK_INSTALL) {
-                error "'STACK_INSTALL' must contain one or more comma separated stack names for [deploy_openstack] pipeline"
+        if (! env.STACK_INSTALL) {
+            error "'STACK_INSTALL' must contain one or more comma separated stack names for [deploy_openstack] pipeline"
+        }
+
+        if (env.TCP_QA_REFS) {
+            stage("Update working dir to patch ${TCP_QA_REFS}") {
+                shared.update_working_dir()
+            }
+        }
+
+        // Install core and cicd
+        def stack
+        def timeout
+
+        for (element in "${env.STACK_INSTALL}".split(",")) {
+            if (element.contains(':')) {
+                (stack, timeout) = element.split(':')
+            } else {
+                stack = element
+                timeout = '1800'
             }
 
-            if (env.TCP_QA_REFS) {
-                stage("Update working dir to patch ${TCP_QA_REFS}") {
-                    shared.update_working_dir()
-                }
-            }
-
-            // Install core and cicd
-            def stack
-            def timeout
-
-            for (element in "${env.STACK_INSTALL}".split(",")) {
-                if (element.contains(':')) {
-                    (stack, timeout) = element.split(':')
-                } else {
-                    stack = element
-                    timeout = '1800'
-                }
+            try {
                 stage("Run Jenkins job on salt-master [deploy_openstack:${stack}]") {
                     shared.run_job_on_day01_node(stack, timeout)
                 }
@@ -60,23 +61,25 @@
                     shared.sanity_check_component(stack)
                 }
 
-                stage("Make environment snapshot [${stack}_deployed]") {
-                    shared.devops_snapshot(stack)
+            } catch (e) {
+                common.printMsg("Job is failed", "purple")
+                shared.download_logs("deploy_${stack}")
+                throw e
+            } finally {
+                // TODO(ddmitriev): analyze the "def currentResult = currentBuild.result ?: 'SUCCESS'"
+                // and report appropriate data to TestRail
+                // TODO(ddmitriev): add checks for cicd cluster
+                if ("${env.SHUTDOWN_ENV_ON_TEARDOWN}" == "true") {
+                    shared.run_cmd("""\
+                        dos.py destroy ${ENV_NAME}
+                    """)
                 }
             }
 
-        } catch (e) {
-            common.printMsg("Job is failed", "purple")
-            throw e
-        } finally {
-            // TODO(ddmitriev): analyze the "def currentResult = currentBuild.result ?: 'SUCCESS'"
-            // and report appropriate data to TestRail
-            // TODO(ddmitriev): add checks for cicd cluster
-            if ("${env.SHUTDOWN_ENV_ON_TEARDOWN}" == "true") {
-                shared.run_cmd("""\
-                    dos.py destroy ${ENV_NAME}
-                """)
+            stage("Make environment snapshot [${stack}_deployed]") {
+                shared.devops_snapshot(stack)
             }
-        }
-    }
-}
+
+        } // for
+    } // dir
+} // node
diff --git a/jobs/pipelines/swarm-deploy-platform.groovy b/jobs/pipelines/swarm-deploy-platform.groovy
index 9a6b1d1..42ebc7e 100644
--- a/jobs/pipelines/swarm-deploy-platform.groovy
+++ b/jobs/pipelines/swarm-deploy-platform.groovy
@@ -29,29 +29,31 @@
         error "'PARENT_WORKSPACE' contains path to non-existing directory ${PARENT_WORKSPACE} on the node '${PARENT_NODE_NAME}'."
     }
     dir("${PARENT_WORKSPACE}") {
-        try {
 
-            if (! env.STACK_INSTALL) {
-                error "'STACK_INSTALL' must contain one or more comma separated stack names for [deploy_openstack] pipeline"
+        if (! env.STACK_INSTALL) {
+            error "'STACK_INSTALL' must contain one or more comma separated stack names for [deploy_openstack] pipeline"
+        }
+
+        if (env.TCP_QA_REFS) {
+            stage("Update working dir to patch ${TCP_QA_REFS}") {
+                shared.update_working_dir()
+            }
+        }
+
+        // Install the cluster
+        def stack
+        def timeout
+
+        for (element in "${STACK_INSTALL}".split(",")) {
+            if (element.contains(':')) {
+                (stack, timeout) = element.split(':')
+            } else {
+                stack = element
+                timeout = '1800'
             }
 
-            if (env.TCP_QA_REFS) {
-                stage("Update working dir to patch ${TCP_QA_REFS}") {
-                    shared.update_working_dir()
-                }
-            }
+            try {
 
-            // Install the cluster
-            def stack
-            def timeout
-
-            for (element in "${STACK_INSTALL}".split(",")) {
-                if (element.contains(':')) {
-                    (stack, timeout) = element.split(':')
-                } else {
-                    stack = element
-                    timeout = '1800'
-                }
                 stage("Run Jenkins job on CICD [deploy_openstack:${stack}]") {
                     shared.run_job_on_cicd_nodes(stack, timeout)
                 }
@@ -60,23 +62,25 @@
                     shared.sanity_check_component(stack)
                 }
 
-                stage("Make environment snapshot [${stack}_deployed]") {
-                    shared.devops_snapshot(stack)
+            } catch (e) {
+                common.printMsg("Job is failed", "purple")
+                shared.download_logs("deploy_${stack}")
+                throw e
+            } finally {
+                // TODO(ddmitriev): analyze the "def currentResult = currentBuild.result ?: 'SUCCESS'"
+                // and report appropriate data to TestRail
+                // TODO(ddmitriev): add checks for the installed stacks
+                if ("${env.SHUTDOWN_ENV_ON_TEARDOWN}" == "true") {
+                    shared.run_cmd("""\
+                        dos.py destroy ${ENV_NAME}
+                    """)
                 }
             }
 
-        } catch (e) {
-            common.printMsg("Job is failed", "purple")
-            throw e
-        } finally {
-            // TODO(ddmitriev): analyze the "def currentResult = currentBuild.result ?: 'SUCCESS'"
-            // and report appropriate data to TestRail
-            // TODO(ddmitriev): add checks for the installed stacks
-            if ("${env.SHUTDOWN_ENV_ON_TEARDOWN}" == "true") {
-                shared.run_cmd("""\
-                    dos.py destroy ${ENV_NAME}
-                """)
+            stage("Make environment snapshot [${stack}_deployed]") {
+                shared.devops_snapshot(stack)
             }
-        }
-    }
-}
+
+        } // for
+    } // dir
+} // node
diff --git a/jobs/pipelines/swarm-run-pytest.groovy b/jobs/pipelines/swarm-run-pytest.groovy
index 0dd2d7a..bc411f7 100644
--- a/jobs/pipelines/swarm-run-pytest.groovy
+++ b/jobs/pipelines/swarm-run-pytest.groovy
@@ -72,6 +72,7 @@
                     """)
 
                 def snapshot_name = "test_completed"
+                shared.download_logs("test_completed")
                 shared.run_cmd("""\
                     dos.py suspend ${ENV_NAME}
                     dos.py snapshot ${ENV_NAME} ${snapshot_name}
@@ -86,6 +87,9 @@
 
         } catch (e) {
             common.printMsg("Job is failed", "purple")
+            // Downloading logs usually not needed here
+            // because tests should use the decorator @pytest.mark.grab_versions
+            // shared.download_logs("test_failed")
             throw e
         } finally {
             // TODO(ddmitriev): analyze the "def currentResult = currentBuild.result ?: 'SUCCESS'"
diff --git a/jobs/pipelines/swarm-testrail-report.groovy b/jobs/pipelines/swarm-testrail-report.groovy
index e1fadc7..42027f0 100644
--- a/jobs/pipelines/swarm-testrail-report.groovy
+++ b/jobs/pipelines/swarm-testrail-report.groovy
@@ -76,7 +76,7 @@
                     report_url = report_result.split("\n").each {
                         if (it.contains("[TestRun URL]")) {
                             common.printMsg("Found report URL: " + it.trim().split().last(), "blue")
-                            description += "<a href=" + it.trim().split().last() + ">${testSuiteName}</a>"
+                            description += "\n<a href=" + it.trim().split().last() + ">${testSuiteName}</a>"
                         }
                     }
                 }
@@ -98,7 +98,7 @@
                     report_url = report_result.split("\n").each {
                         if (it.contains("[TestRun URL]")) {
                             common.printMsg("Found report URL: " + it.trim().split().last(), "blue")
-                            description += "<a href=" + it.trim().split().last() + ">${testSuiteName}</a>"
+                            description += "\n<a href=" + it.trim().split().last() + ">${testSuiteName}</a>"
                         }
                     }
                 }
@@ -115,7 +115,7 @@
                     report_url = report_result.split("\n").each {
                         if (it.contains("[TestRun URL]")) {
                             common.printMsg("Found report URL: " + it.trim().split().last(), "blue")
-                            description += "<a href=" + it.trim().split().last() + ">${testSuiteName}</a>"
+                            description += "\n<a href=" + it.trim().split().last() + ">${testSuiteName}</a>"
                         }
                     }
                 }
@@ -144,7 +144,7 @@
                     report_url = report_result.split("\n").each {
                         if (it.contains("[TestRun URL]")) {
                             common.printMsg("Found report URL: " + it.trim().split().last(), "blue")
-                            description += "<a href=" + it.trim().split().last() + ">${testSuiteName}</a>"
+                            description += "\n<a href=" + it.trim().split().last() + ">${testSuiteName}</a>"
                         }
                     }
                 }
@@ -161,7 +161,7 @@
                     report_url = report_result.split("\n").each {
                         if (it.contains("[TestRun URL]")) {
                             common.printMsg("Found report URL: " + it.trim().split().last(), "blue")
-                            description += "<a href=" + it.trim().split().last() + ">${testSuiteName}</a>"
+                            description += "\n<a href=" + it.trim().split().last() + ">${testSuiteName}</a>"
                         }
                     }
                 }
diff --git a/src/com/mirantis/system_qa/SharedPipeline.groovy b/src/com/mirantis/system_qa/SharedPipeline.groovy
index 4a262d2..9b5621d 100644
--- a/src/com/mirantis/system_qa/SharedPipeline.groovy
+++ b/src/com/mirantis/system_qa/SharedPipeline.groovy
@@ -396,6 +396,17 @@
     }
 }
 
+def download_logs(archive_name_prefix) {
+    // Archive and download logs and debug info from salt nodes in the lab
+    // Do not fail in case of error to not lose the original error from the parent exception.
+    def common = new com.mirantis.mk.Common()
+    common.printMsg("Downloading nodes logs by ${archive_name_prefix}", "blue")
+    run_cmd("""\
+        export TESTS_CONFIGS=\$(pwd)/${ENV_NAME}_salt_deployed.ini
+        ./tcp_tests/utils/get_logs.py --archive-name-prefix ${archive_name_prefix} || true
+    """)
+}
+
 def devops_snapshot_info(snapshot_name) {
     // Print helper message after snapshot
     def common = new com.mirantis.mk.Common()
diff --git a/tcp_tests/managers/underlay_ssh_manager.py b/tcp_tests/managers/underlay_ssh_manager.py
index 5e7995c..0bfb463 100644
--- a/tcp_tests/managers/underlay_ssh_manager.py
+++ b/tcp_tests/managers/underlay_ssh_manager.py
@@ -398,12 +398,12 @@
             "docker ps > /root/\$(hostname -f)/dump_docker_ps.txt;"
             "docker service ls > "
             "  /root/\$(hostname -f)/dump_docker_services_ls.txt;"
-            "for SERVICE in \$(docker service ls | awk '{ print $2 }'); "
+            "for SERVICE in \$(docker service ls | awk '{ print \$2 }'); "
             "  do docker service ps --no-trunc 2>&1 \$SERVICE >> "
             "    /root/\$(hostname -f)/dump_docker_service_ps.txt;"
             "  done;"
-            "for SERVICE in \$(docker service ls | awk '{ print $2 }'); "
-            "  do docker service logs 2>&1 \$SERVICE > "
+            "for SERVICE in \$(docker service ls | awk '{ print \$2 }'); "
+            "  do timeout 30 docker service logs --no-trunc 2>&1 \$SERVICE > "
             "    /root/\$(hostname -f)/dump_docker_service_\${SERVICE}_logs;"
             "  done;"
             "vgdisplay > /root/\$(hostname -f)/dump_vgdisplay.txt;"
diff --git a/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/core.yaml b/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/core.yaml
index 6dc4829..bf6c2da 100644
--- a/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/core.yaml
+++ b/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/core.yaml
@@ -1,124 +1,10 @@
 {% from 'cookied-bm-mcp-dvr-vxlan/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% import 'shared-core.yaml' as SHARED_CORE with context %}
 
-# Install support services
-- description: Install keepalived on ctl01
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@keepalived:cluster and *01*' state.sls keepalived
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: true
-
-- description: Install keepalived
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@keepalived:cluster' state.sls keepalived
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: true
-
-- description: Install glusterfs
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@glusterfs:server' state.sls glusterfs.server.service
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Setup glusterfs on primary controller
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@glusterfs:server' state.sls glusterfs.server.setup -b 1
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 2, delay: 5}
-  skip_fail: false
-
-- description: Check the gluster status
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@glusterfs:server' cmd.run 'gluster peer status; gluster volume status' -b 1
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Install RabbitMQ on ctl01
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@rabbitmq:server and *01*' state.sls rabbitmq
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Install RabbitMQ
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@rabbitmq:server' state.sls rabbitmq
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Check the rabbitmq status
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@rabbitmq:server' cmd.run 'rabbitmqctl cluster_status'
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Install Galera on first server
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@galera:master' state.sls galera
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Install Galera on other servers
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@galera:slave' state.sls galera -b 1
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Check mysql status
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@galera:*' mysql.status | grep -A1 -e "wsrep_incoming_addresses\|wsrep_cluster_size"
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: true
-
-
-- description: Install haproxy
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@haproxy:proxy' state.sls haproxy
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Check haproxy status
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@haproxy:proxy' service.status haproxy
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Install nginx on prx nodes
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@nginx:server' state.sls nginx
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Restart rsyslog
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@haproxy:proxy' service.restart rsyslog
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Install memcached on all controllers
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-    -C 'I@memcached:server' state.sls memcached
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: false
-
-- description: Check the VIP
-  cmd: |
-    OPENSTACK_CONTROL_ADDRESS=`salt-call --out=newline_values_only pillar.get _param:openstack_control_address`;
-    echo "_param:openstack_control_address (vip): ${OPENSTACK_CONTROL_ADDRESS}";
-    salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@keepalived:cluster' cmd.run "ip a | grep ${OPENSTACK_CONTROL_ADDRESS}" | grep -B1 ${OPENSTACK_CONTROL_ADDRESS}
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 3, delay: 10}
-  skip_fail: false
+{{ SHARED_CORE.MACRO_INSTALL_KEEPALIVED() }}
+{{ SHARED_CORE.MACRO_INSTALL_GLUSTERFS() }}
+{{ SHARED_CORE.MACRO_INSTALL_RABBITMQ() }}
+{{ SHARED_CORE.MACRO_INSTALL_GALERA() }}
+{{ SHARED_CORE.MACRO_INSTALL_HAPROXY() }}
+{{ SHARED_CORE.MACRO_INSTALL_MEMCACHED() }}
+{{ SHARED_CORE.MACRO_CHECK_VIP() }}
\ No newline at end of file
diff --git a/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/openstack.yaml b/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/openstack.yaml
index 7148d00..1d8cbbf 100644
--- a/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/openstack.yaml
+++ b/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/openstack.yaml
@@ -2,20 +2,13 @@
 {% from 'cookied-bm-mcp-dvr-vxlan/underlay.yaml' import HOSTNAME_CTL01 with context %}
 {% from 'cookied-bm-mcp-dvr-vxlan/underlay.yaml' import HOSTNAME_GTW01 with context %}
 {% from 'cookied-bm-mcp-dvr-vxlan/underlay.yaml' import HOSTNAME_GTW02 with context %}
-{% from 'shared-salt.yaml' import IPV4_NET_EXTERNAL_PREFIX with context %}
-{% from 'shared-salt.yaml' import IPV4_NET_TENANT_PREFIX with context %}
-
 {% import 'shared-salt.yaml' as SHARED with context %}
 {% import 'shared-openstack.yaml' as SHARED_OPENSTACK with context %}
 
 # Install OpenStack control services
-
 {{ SHARED_OPENSTACK.MACRO_INSTALL_KEYSTONE() }}
-
 {{ SHARED_OPENSTACK.MACRO_INSTALL_GLANCE() }}
-
 {{ SHARED_OPENSTACK.MACRO_INSTALL_NOVA() }}
-
 {{ SHARED_OPENSTACK.MACRO_INSTALL_CINDER(INSTALL_VOLUME=false) }}
 
 - description: Install cinder volume
@@ -26,10 +19,7 @@
   skip_fail: false
 
 {{ SHARED_OPENSTACK.MACRO_INSTALL_NEUTRON(INSTALL_GATEWAY=true) }}
-
 {{ SHARED_OPENSTACK.MACRO_INSTALL_HEAT() }}
-
 {{ SHARED_OPENSTACK.MACRO_INSTALL_HORIZON() }}
-
-{{ SHARED_OPENSTACK.MACRO_INSTALL_COMPUTE() }}
+{{ SHARED_OPENSTACK.MACRO_INSTALL_COMPUTE(CELL_MAPPING=true) }}
 
diff --git a/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/salt-context-cookiecutter-openstack_ovs_dvr_vxlan.yaml b/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/salt-context-cookiecutter-openstack_ovs_dvr_vxlan.yaml
index df9deee..7585c41 100644
--- a/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/salt-context-cookiecutter-openstack_ovs_dvr_vxlan.yaml
+++ b/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/salt-context-cookiecutter-openstack_ovs_dvr_vxlan.yaml
@@ -1,7 +1,7 @@
 default_context:
   mcp_version: proposed
   ceph_enabled: 'False'
-  cicd_enabled: 'True'
+  cicd_enabled: 'False'
   cicd_control_node01_address: 10.167.4.91
   cicd_control_node01_hostname: cid01
   cicd_control_node02_address: 10.167.4.92
diff --git a/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/salt-context-lab03-environment.yaml b/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/salt-context-lab03-environment.yaml
index def5353..692cf19 100644
--- a/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/salt-context-lab03-environment.yaml
+++ b/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/salt-context-lab03-environment.yaml
@@ -44,34 +44,29 @@
         enp9s0f1:
           role: bond0_ab_ovs_vlan_ctl
 
-    cmp01.cookied-bm-mcp-dvr-vxlan.local:
+    cmp001.cookied-bm-mcp-dvr-vxlan.local:
       reclass_storage_name: openstack_compute_node01
       roles:
       - openstack_compute
-      - features_lvm_backend
+      - features_lvm_backend_volume_sdb
       - linux_system_codename_xenial
       interfaces:
         enp9s0f0:
           role: single_mgm_dhcp
         enp9s0f1:
           role: bond0_ab_dvr_vxlan_ctl_mesh_floating
-          single_address: 10.167.4.105
-          tenant_address: 10.167.6.105
 
-
-    cmp02.cookied-bm-mcp-dvr-vxlan.local:
+    cmp002.cookied-bm-mcp-dvr-vxlan.local:
       reclass_storage_name: openstack_compute_node02
       roles:
       - openstack_compute
-      - features_lvm_backend
+      - features_lvm_backend_volume_sdb
       - linux_system_codename_xenial
       interfaces:
         enp9s0f0:
           role: single_mgm_dhcp
         enp9s0f1:
           role: bond0_ab_dvr_vxlan_ctl_mesh_floating
-          single_address: 10.167.4.106
-          tenant_address: 10.167.6.106
 
     gtw01.cookied-bm-mcp-dvr-vxlan.local:
       reclass_storage_name: openstack_gateway_node01
diff --git a/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/salt-context-vcp-environment.yaml b/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/salt-context-vcp-environment.yaml
index 37d0b14..6cace03 100644
--- a/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/salt-context-vcp-environment.yaml
+++ b/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/salt-context-vcp-environment.yaml
@@ -4,6 +4,7 @@
       roles:
       - openstack_control_leader
       - linux_system_codename_xenial
+      - features_lvm_backend_control
       interfaces:
         ens2:
           role: single_dhcp
@@ -15,6 +16,7 @@
       roles:
       - openstack_control
       - linux_system_codename_xenial
+      - features_lvm_backend_control
       interfaces:
         ens2:
           role: single_dhcp
@@ -26,6 +28,7 @@
       roles:
       - openstack_control
       - linux_system_codename_xenial
+      - features_lvm_backend_control
       interfaces:
         ens2:
           role: single_dhcp
@@ -119,57 +122,3 @@
           role: single_dhcp
         ens3:
           role: single_ctl
-
-    cid01.cookied-bm-mcp-dvr-vxlan.local:
-      reclass_storage_name: cicd_control_node01
-      roles:
-      - cicd_control_leader
-      - linux_system_codename_xenial
-      interfaces:
-        ens3:
-          role: single_ctl
-
-    cid02.cookied-bm-mcp-dvr-vxlan.local:
-      reclass_storage_name: cicd_control_node02
-      roles:
-      - cicd_control_manager
-      - linux_system_codename_xenial
-      interfaces:
-        ens3:
-          role: single_ctl
-
-    cid03.cookied-bm-mcp-dvr-vxlan.local:
-      reclass_storage_name: cicd_control_node03
-      roles:
-      - cicd_control_manager
-      - linux_system_codename_xenial
-      interfaces:
-        ens3:
-          role: single_ctl
-
-#    mon01.cookied-bm-mcp-dvr-vxlan.local:
-#      reclass_storage_name: stacklight_server_node01
-#      roles:
-#      - stacklightv2_server_leader
-#      - linux_system_codename_xenial
-#      interfaces:
-#        ens3:
-#          role: single_ctl
-#
-#    mon02.cookied-bm-mcp-dvr-vxlan.local:
-#      reclass_storage_name: stacklight_server_node02
-#      roles:
-#      - stacklightv2_server
-#      - linux_system_codename_xenial
-#      interfaces:
-#        ens3:
-#         role: single_ctl
-#
-#    mon03.cookied-bm-mcp-dvr-vxlan.local:
-#      reclass_storage_name: stacklight_server_node03
-#      roles:
-#      - stacklightv2_server
-#      - linux_system_codename_xenial
-#      interfaces:
-#        ens3:
-#          role: single_ctl
diff --git a/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/salt.yaml b/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/salt.yaml
index 81d9096..8804721 100644
--- a/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/salt.yaml
+++ b/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/salt.yaml
@@ -12,53 +12,19 @@
 {% import 'shared-salt.yaml' as SHARED with context %}
 
 {{ SHARED.MACRO_INSTALL_SALT_MASTER() }}
-
 {{ SHARED.MACRO_CLONE_RECLASS_MODELS() }}
-
 {{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='\*') }}
 {{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
-
 {{ SHARED.MACRO_RUN_SALT_MASTER_UNDERLAY_STATES() }}
-
-- description: "WR for changing VCP images path to internal storage"
-  cmd: |
-    set -e;
-    apt-get -y install python-virtualenv python-pip build-essential python-dev libssl-dev;
-    [[ -d /root/venv-reclass-tools ]] || virtualenv /root/venv-reclass-tools;
-    . /root/venv-reclass-tools/bin/activate;
-    pip install git+https://github.com/dis-xcom/reclass-tools;
-    reclass-tools add-key parameters._param.salt_control_xenial_image 'https://apt.mcp.mirantis.net/images/ubuntu-16-04-x64-mcp{{ SHARED.REPOSITORY_SUITE }}.qcow2' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/init.yml;
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 10}
-  skip_fail: false
-
-
 {{ SHARED.MACRO_GENERATE_INVENTORY() }}
 {{ SHARED.MACRO_NETWORKING_WORKAROUNDS() }}
 
-- description: Temporary workaround for removing cinder-volume from CTL nodes
-  cmd: |
-    sed -i 's/\-\ system\.cinder\.volume\.single//g' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/control.yml;
-    sed -i 's/\-\ system\.cinder\.volume\.notification\.messagingv2//g' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/control.yml;
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: true
-
-- description: Temporary workaround for removing virtual gtw nodes
-  cmd: |
-    sed -i 's/\-\ system\.salt\.control\.sizes\.ovs\.compact//g' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/kvm.yml;
-    sed -i 's/\-\ system\.salt\.control\.placement\.ovs\.compact//g' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/kvm.yml;
-  node_name: {{ HOSTNAME_CFG01 }}
-  retry: {count: 1, delay: 5}
-  skip_fail: true
-
 - description: Rerun openssh after env model is generated
   cmd: |
     salt-call state.sls openssh
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
   skip_fail: false
-  
 {{ SHARED.MACRO_BOOTSTRAP_ALL_MINIONS() }}
 
 ########################################
@@ -129,4 +95,3 @@
 {{SHARED.MACRO_CHECK_SALT_VERSION_SERVICES_ON_CFG()}}
 
 {{SHARED.MACRO_CHECK_SALT_VERSION_ON_NODES()}}
-
diff --git a/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/underlay.yaml b/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/underlay.yaml
index a7308e9..8d2bf09 100644
--- a/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/underlay.yaml
+++ b/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/underlay.yaml
@@ -6,8 +6,8 @@
 {% set HOSTNAME_KVM01 = os_env('HOSTNAME_KVM01', 'kvm01.' + DOMAIN_NAME) %}
 {% set HOSTNAME_KVM02 = os_env('HOSTNAME_KVM02', 'kvm02.' + DOMAIN_NAME) %}
 {% set HOSTNAME_KVM03 = os_env('HOSTNAME_KVM03', 'kvm03.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMP01 = os_env('HOSTNAME_CMP01', 'cmp01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMP02 = os_env('HOSTNAME_CMP02', 'cmp02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CMP001 = os_env('HOSTNAME_CMP001', 'cmp001.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CMP002 = os_env('HOSTNAME_CMP002', 'cmp002.' + DOMAIN_NAME) %}
 {% set HOSTNAME_GTW01 = os_env('HOSTNAME_GTW01', 'gtw01.' + DOMAIN_NAME) %}
 {% set HOSTNAME_GTW02 = os_env('HOSTNAME_GTW02', 'gtw02.' + DOMAIN_NAME) %}
 {% set ETH1_IP_ADDRESS_CFG01 = os_env('ETH1_IP_ADDRESS_CFG01', '172.16.164.2') %}
@@ -15,8 +15,8 @@
 {% set ETH0_IP_ADDRESS_KVM01 = os_env('ETH0_IP_ADDRESS_KVM01', '172.16.164.11') %}
 {% set ETH0_IP_ADDRESS_KVM02 = os_env('ETH0_IP_ADDRESS_KVM02', '172.16.164.12') %}
 {% set ETH0_IP_ADDRESS_KVM03 = os_env('ETH0_IP_ADDRESS_KVM03', '172.16.164.13') %}
-{% set ETH0_IP_ADDRESS_CMP01 = os_env('ETH0_IP_ADDRESS_CMP01', '172.16.164.3') %}
-{% set ETH0_IP_ADDRESS_CMP02 = os_env('ETH0_IP_ADDRESS_CMP02', '172.16.164.31') %}
+{% set ETH0_IP_ADDRESS_CMP001 = os_env('ETH0_IP_ADDRESS_CMP001', '172.16.164.3') %}
+{% set ETH0_IP_ADDRESS_CMP002 = os_env('ETH0_IP_ADDRESS_CMP002', '172.16.164.31') %}
 {% set ETH0_IP_ADDRESS_GTW01 = os_env('ETH0_IP_ADDRESS_GTW01', '172.16.164.4') %}
 {% set ETH0_IP_ADDRESS_GTW02 = os_env('ETH0_IP_ADDRESS_GTW02', '172.16.164.5') %}
 
@@ -48,8 +48,8 @@
             default_{{ HOSTNAME_KVM01 }}: {{ ETH0_IP_ADDRESS_KVM01 }}
             default_{{ HOSTNAME_KVM02 }}: {{ ETH0_IP_ADDRESS_KVM02 }}
             default_{{ HOSTNAME_KVM03 }}: {{ ETH0_IP_ADDRESS_KVM03 }}
-            default_{{ HOSTNAME_CMP01 }}: {{ ETH0_IP_ADDRESS_CMP01 }}
-            default_{{ HOSTNAME_CMP02 }}: {{ ETH0_IP_ADDRESS_CMP02 }}
+            default_{{ HOSTNAME_CMP001 }}: {{ ETH0_IP_ADDRESS_CMP001 }}
+            default_{{ HOSTNAME_CMP002 }}: {{ ETH0_IP_ADDRESS_CMP002 }}
             default_{{ HOSTNAME_GTW01 }}: {{ ETH0_IP_ADDRESS_GTW01 }}
             default_{{ HOSTNAME_GTW02 }}: {{ ETH0_IP_ADDRESS_GTW02 }}
           ip_ranges:
@@ -311,14 +311,13 @@
                   parents:
                    - enp9s0f1
 
-
-          - name: {{ HOSTNAME_CMP01 }}
+          - name: {{ HOSTNAME_CMP001 }}
             role: salt_minion
             params:
               ipmi_user: !os_env IPMI_USER
               ipmi_password: !os_env IPMI_PASSWORD
               ipmi_previlegies: OPERATOR
-              ipmi_host: !os_env IPMI_HOST_CMP01  # hostname or IP address
+              ipmi_host: !os_env IPMI_HOST_CMP001  # hostname or IP address
               ipmi_lan_interface: lanplus
               ipmi_port: 623
 
@@ -344,9 +343,9 @@
               interfaces:
                 - label: enp9s0f0
                   l2_network_device: admin
-                  mac_address: !os_env ETH0_MAC_ADDRESS_CMP01
+                  mac_address: !os_env ETH0_MAC_ADDRESS_CMP001
                 - label: enp9s0f1
-                  mac_address: !os_env ETH1_MAC_ADDRESS_CMP01
+                  mac_address: !os_env ETH1_MAC_ADDRESS_CMP001
               network_config:
                 enp9s0f0:
                   networks:
@@ -359,15 +358,13 @@
                    - enp9s0f0
                    - enp9s0f1
 
-
-
-          - name: {{ HOSTNAME_CMP02 }}
+          - name: {{ HOSTNAME_CMP002 }}
             role: salt_minion
             params:
               ipmi_user: !os_env IPMI_USER
               ipmi_password: !os_env IPMI_PASSWORD
               ipmi_previlegies: OPERATOR
-              ipmi_host: !os_env IPMI_HOST_CMP02  # hostname or IP address
+              ipmi_host: !os_env IPMI_HOST_CMP002  # hostname or IP address
               ipmi_lan_interface: lanplus
               ipmi_port: 623
 
@@ -393,9 +390,9 @@
               interfaces:
                 - label: enp9s0f0
                   l2_network_device: admin
-                  mac_address: !os_env ETH0_MAC_ADDRESS_CMP02
+                  mac_address: !os_env ETH0_MAC_ADDRESS_CMP002
                 - label: enp9s0f1
-                  mac_address: !os_env ETH1_MAC_ADDRESS_CMP02
+                  mac_address: !os_env ETH1_MAC_ADDRESS_CMP002
               network_config:
                 enp9s0f0:
                   networks:
@@ -408,7 +405,6 @@
                    - enp9s0f0
                    - enp9s0f1
 
-
           - name: {{ HOSTNAME_GTW01 }}
             role: salt_minion
             params:
diff --git a/tcp_tests/templates/cookied-model-generator/salt_cookied-bm-mcp-dvr-vxlan.yaml b/tcp_tests/templates/cookied-model-generator/salt_cookied-bm-mcp-dvr-vxlan.yaml
index c5d17cf..eb26bae 100644
--- a/tcp_tests/templates/cookied-model-generator/salt_cookied-bm-mcp-dvr-vxlan.yaml
+++ b/tcp_tests/templates/cookied-model-generator/salt_cookied-bm-mcp-dvr-vxlan.yaml
@@ -29,6 +29,14 @@
   retry: {count: 1, delay: 5}
   skip_fail: true
 
+- description: Temporary workaround for removing virtual gtw nodes
+  cmd: |
+    sed -i 's/\-\ system\.salt\.control\.sizes\.ovs\.compact//g' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/kvm.yml;
+    sed -i 's/\-\ system\.salt\.control\.placement\.ovs\.compact//g' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/kvm.yml;
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: true
+
 - description: Temporary WR for correct bridge name according to envoronment templates
   cmd: |
     sed -i 's/br\-ctl/br\_ctl/g' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/kvm.yml;
@@ -39,5 +47,15 @@
   retry: {count: 1, delay: 10}
   skip_fail: false
 
+- description: "WR for PROD-24311"
+  cmd: |
+    set -e;
+    . /root/venv-reclass-tools/bin/activate;
+    reclass-tools add-key parameters._param.salt_control_xenial_image 'https://apt.mcp.mirantis.net/images/ubuntu-16-04-x64-mcp{{ SHARED.REPOSITORY_SUITE }}.qcow2' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/init.yml;
+    reclass-tools add-key parameters._param.salt_control_trusty_image 'https://apt.mcp.mirantis.net/images/ubuntu-14-04-x64-mcp{{ SHARED.REPOSITORY_SUITE }}.qcow2' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/init.yml;
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+
 {{ SHARED.MACRO_GENERATE_INVENTORY(RERUN_SALTMASTER_STATE=true) }}
 
diff --git a/tcp_tests/templates/runtest.yml b/tcp_tests/templates/runtest.yml
index cceaf8b..263dbb0 100644
--- a/tcp_tests/templates/runtest.yml
+++ b/tcp_tests/templates/runtest.yml
@@ -1,10 +1,11 @@
 classes:
 - service.runtest.tempest
+- service.runtest.tempest.services.manila.glance
 parameters:
   _param:
-    runtest_tempest_cfg_dir: /root/test/
+    runtest_tempest_cfg_dir: /tmp/test/
     runtest_tempest_cfg_name: tempest.conf
-    runtest_tempest_public_net: net04_ext
+    runtest_tempest_public_net: public
     tempest_test_target: gtw01*
   neutron:
     client:
@@ -19,25 +20,10 @@
       convert_to_uuid:
         network:
           public_network_id: ${_param:runtest_tempest_public_net}
-      network:
-          floating_network_name: ${_param:runtest_tempest_public_net}
       DEFAULT:
         log_file: tempest.log
-      heat_plugin:
-        floating_network_name: ${_param:runtest_tempest_public_net}
       compute:
-        build_timeout: 600
-        min_microversion: 2.1
-        max_microversion: 2.53
         min_compute_nodes: 2
-        volume_device_name: 'vdc'
-      dns_feature_enabled:
-        api_admin: false
-        api_v1: false
-        api_v2: true
-        api_v2_quotas: true
-        api_v2_root_recordsets: true
-        bug_1573141_fixed: true
       share:
         capability_snapshot_support: True
         run_driver_assisted_migration_tests: False
diff --git a/tcp_tests/utils/get_logs.py b/tcp_tests/utils/get_logs.py
new file mode 100755
index 0000000..225f9d7
--- /dev/null
+++ b/tcp_tests/utils/get_logs.py
@@ -0,0 +1,54 @@
+#!/usr/bin/env python
+
+import argparse
+import os
+import sys
+import time
+
+sys.path.append(os.getcwd())
+try:
+    from tcp_tests.fixtures import config_fixtures
+    from tcp_tests.managers import underlay_ssh_manager
+except ImportError:
+    print("ImportError: Run the application from the tcp-qa directory or "
+          "set the PYTHONPATH environment variable to directory which contains"
+          " ./tcp_tests")
+    sys.exit(1)
+
+
+def load_params():
+    """
+    Parse CLI arguments and environment variables
+
+    Returns: ArgumentParser instance
+    """
+    parser = argparse.ArgumentParser(description=(
+        'Download logs and debug info from salt minions'
+    ))
+    default_name_prefix = 'logs_' + time.strftime("%Y%m%d_%H%M%S")
+    parser.add_argument('--archive-name-prefix',
+                        help=('Custom prefix for creating archive name'),
+                        default=default_name_prefix,
+                        type=str)
+    return parser
+
+
+def main():
+    parser = load_params()
+    opts = parser.parse_args()
+
+    tests_configs = os.environ.get('TESTS_CONFIGS', None)
+    if not tests_configs or not os.path.isfile(tests_configs):
+        print("Download logs and debug info from salt minions. "
+              "Please set TESTS_CONFIGS environment variable whith"
+              "the path to INI file with lab metadata.")
+        return 11
+
+    config = config_fixtures.config()
+    underlay = underlay_ssh_manager.UnderlaySSHManager(config)
+
+    underlay.get_logs(opts.archive_name_prefix)
+
+
+if __name__ == '__main__':
+    sys.exit(main())
diff --git a/tcp_tests/utils/run_jenkins_job.py b/tcp_tests/utils/run_jenkins_job.py
index b01f366..acc2e9f 100755
--- a/tcp_tests/utils/run_jenkins_job.py
+++ b/tcp_tests/utils/run_jenkins_job.py
@@ -4,7 +4,6 @@
 import os
 import sys
 
-from devops import error
 import json
 
 sys.path.append(os.getcwd())
@@ -140,7 +139,7 @@
             interval=1,
             verbose=opts.verbose,
             job_output_prefix=opts.job_output_prefix)
-    except error.TimeoutError as e:
+    except Exception as e:
         print(str(e))
         raise