Merge "Add pipeline to start long test scenarious"
diff --git a/checklist.yaml b/checklist.yaml
index fd03b80..8091d24 100644
--- a/checklist.yaml
+++ b/checklist.yaml
@@ -34,21 +34,33 @@
     status: ProdFailed
     defects: PROD-35212
 
+  - title: "tempest.api.compute.images.test_images.ImagesTestJSON.test_create_image_from_paused_server[id-71bcb732-0261-11e7-9086-fa163e4fa634]"
+    errors:
+      - "502 DELETE https://10.6.0.80:8774/v2.1/images/"
+    status: ProdFailed
+    defects: PROD-35212
+
   - title: "tempest.api.compute.images.test_images.ImagesTestJSON.test_create_image_from_stopped_server[id-aaacd1d0-55a2-4ce8-818a-b5439df8adc9]"
     errors:
       - "502 GET https://10.6.0.80:8774/v2.1/images/"
     status: ProdFailed
     defects: PROD-35212
 
+  - title: "tempest.api.compute.images.test_images.ImagesTestJSON.test_create_image_from_stopped_server[id-aaacd1d0-55a2-4ce8-818a-b5439df8adc9]"
+    errors:
+      - "502 DELETE https://10.6.0.80:8774/v2.1/images/"
+    status: ProdFailed
+    defects: PROD-35212
+
   - title: "tempest.api.compute.images.test_images.ImagesTestJSON.test_create_image_from_suspended_server[id-8ca07fec-0262-11e7-907e-fa163e4fa634]"
     errors:
-      - "502 GET https://10.6.0.80:8774/v2.1/images/"
+      - "502 DELETE https://10.6.0.80:8774/v2.1/images/"
     status: ProdFailed
     defects: PROD-35212
 
   - title: "tempest.api.compute.images.test_images_oneserver.ImagesOneServerTestJSON.test_create_delete_image[id-3731d080-d4c5-4872-b41a-64d0d0021314]"
     errors:
-      - "502 GET https://10.6.0.80:8774/v2.1/images/"
+      - "502 DELETE https://10.6.0.80:8774/v2.1/images/"
     status: ProdFailed
     defects: PROD-35212
 
@@ -82,6 +94,12 @@
     status: ProdFailed
     defects: PROD-35223
 
+  - title: "tempest.api.compute.images.test_images.ImagesTestJSON.test_create_image_from_paused_server[id-71bcb732-0261-11e7-9086-fa163e4fa634]"
+    errors:
+      - "failed to reach ACTIVE state (current state SAVING) within the required time (600 s)."
+    status: ProdFailed
+    defects: PROD-35223
+
   - title: ".setUpClass (tempest.api.compute.images.test_list_image_filters.ListImageFiltersTestJSON)"
     errors:
       - 'resp, body = self.get("images/%s" % image_id)'
@@ -96,6 +114,13 @@
     status: ProdFailed
     defects: PROD-35212
 
+  - title: ".tearDownClass (tempest.api.compute.images.test_list_image_filters.ListImageFiltersTestJSON)"
+    errors:
+      - "raise testtools.MultipleExceptions(*cleanup_errors)"
+      - "Details: 502, <traceback object at"
+    status: ProdFailed
+    defects: PROD-35212
+
   - title: tempest.api.compute.images.test_images.ImagesTestJSON.test_delete_saving_image[id-aa06b52b-2db5-4807-b218-9441f75d74e3]
     errors:
       - resp, body = self.get("images/%s" % image_id)
@@ -109,6 +134,26 @@
     status: ProdFailed
     defects: PROD-35212
 
+  - title: tempest.api.volume.test_volumes_actions.VolumesActionsTest.test_volume_upload[id-d8f1ca95-3d5b-44a3-b8ca-909691c9532d,image]
+    errors:
+      - 502 GET https://10.6.0.80:9292/v2/images/
+    status: ProdFailed
+    defects: PROD-35212
+
+  - title: tempest.api.compute.servers.test_delete_server.DeleteServersTestJSON.test_delete_server_while_in_shelved_state[id-bb0cb402-09dd-4947-b6e5-5e7e1cfa61ad]
+    errors:
+      - 'tempest.lib.exceptions.TimeoutException: Request timed out'
+      - 'failed to reach SHELVED_OFFLOADED status and task state "None" within the required time (600 s). Current status: SHELVED. Current task state: shelving_offloading'
+    status: ProdFailed
+    defects: PROD-35225
+
+  - title: tempest.api.compute.servers.test_server_actions.ServerActionsTestJSON.test_create_backup[id-b963d4f1-94b3-4c40-9e97-7b583f46e470,image]
+    errors:
+      - failed to reach active state (current state saving) within the required time
+    status: ProdFailed
+    defects: PROD-35222
+
+
   - title: tempest.api.compute.servers.test_server_actions.ServerActionsTestJSON.test_create_backup[id-b963d4f1-94b3-4c40-9e97-7b583f46e470,image]
     errors:
       - show_loadbalancer provisioning_status updated to an invalid state of ERROR
@@ -158,6 +203,12 @@
     status: ProdFailed
     defects: PROD-34693
 
+  - title: octavia_tempest_plugin.tests.api.v2.test_load_balancer.LoadBalancerAPITest.test_load_balancer_show[id-826ae612-8717-4c64-a8a7-cb9570a85870]
+    errors:
+      - show_loadbalancer provisioning_status updated to an invalid state of ERROR
+    status: ProdFailed
+    defects: PROD-34693
+
   - title: octavia_tempest_plugin.tests.api.v2.test_load_balancer.LoadBalancerAPITest.test_load_balancer_ipv4_create[id-61c6343c-a5d2-4b9f-8c7d-34ea83f0596b]
     errors:
       - show_loadbalancer provisioning_status updated to an invalid state of ERROR
@@ -170,6 +221,12 @@
     status: ProdFailed
     defects: PROD-34693
 
+  - title: octavia_tempest_plugin.tests.api.v2.test_load_balancer.LoadBalancerAPITest.test_load_balancer_failover[id-fc2e07a6-9776-4559-90c9-141170d4c397]
+    errors:
+      - show_loadbalancer provisioning_status updated to an invalid state of ERROR
+    status: ProdFailed
+    defects: PROD-34693
+
 # ---------------------------------------------------
   - title: "tempest.api.compute.servers.test_server_actions.ServerActionsTestJSON.test_get_console_output_server_id_in_shutoff_status[id-5b65d4e7-4ecd-437c-83c0-d6b79d927568]"
     errors:
@@ -312,6 +369,18 @@
     defects: PROD-35958
     status: ProdFailed
 
+  - title: test_prometheus_alert_count[SystemCpuStealTimeWarning]
+    comment: Some unstabilities after deployment. Error has gone after some time
+    errors:
+      - The CPU steal time was above
+    status: MixedSuccess
+
+  - title: test_prometheus_alert_count[NetdevBudgetRanOutsWarning]
+    comment: Some unstabilities after deployment. Error has gone after some time
+    errors:
+      - The rate of net_rx_action loops terminations
+    status: MixedSuccess
+
 ## ==================================================
 #              LMA2.0_Automated
 ## ==================================================
diff --git a/jobs/pipelines/deploy-cicd-and-run-tests.groovy b/jobs/pipelines/deploy-cicd-and-run-tests.groovy
index 7207b84..1e35c9e 100644
--- a/jobs/pipelines/deploy-cicd-and-run-tests.groovy
+++ b/jobs/pipelines/deploy-cicd-and-run-tests.groovy
@@ -1,3 +1,7 @@
+/*
+*   DEPLOY_EMPTY_NODE             Add extra node to heat stack. Node without a role and with salt-minion
+*/
+
 @Library('tcp-qa')_
 
 def common = new com.mirantis.mk.Common()
@@ -28,6 +32,18 @@
             shared.prepare_working_dir(env_manager)
         }
 
+        // Reboot Hardware before the BM deployments
+        if ("$ENV_NAME".contains("bm-")){
+            reboot_hw_nodes = env.REBOOT_HW_NODES ?: false
+            stage("Reboot HW nodes") {
+                if (reboot_hw_nodes) {
+                    shared.reboot_hardware_nodes()
+                } else {
+                    common.printMsg("REBOOT_HW_NODES is disabled. Skipping this stage...", "blue")
+                }
+            }
+        }
+
         stage("Create environment, generate model, bootstrap the salt-cluster") {
             // steps: "hardware,create_model,salt"
             if (env_manager == 'devops') {
diff --git a/jobs/pipelines/rotation_bm_deployments.groovy b/jobs/pipelines/rotation_bm_deployments.groovy
index 49103a9..222f96a 100644
--- a/jobs/pipelines/rotation_bm_deployments.groovy
+++ b/jobs/pipelines/rotation_bm_deployments.groovy
@@ -61,36 +61,6 @@
         }
     }
 
-    stage("Reboot HW nodes ") {
-        bm_ips = [
-        "185.8.59.227",
-        "185.8.59.229",
-        "5.43.225.88",
-        "5.43.225.112",
-        "5.43.225.208",
-        "5.43.227.118",
-        "185.8.58.248",
-        "185.8.59.222",
-        "5.43.225.228",
-        "5.43.229.28",
-        "5.43.225.23",
-        "185.8.58.9",
-        "185.8.58.246",
-        "185.8.58.243",
-        "185.8.58.244"
-        ]
-
-        withCredentials([
-           [$class          : 'UsernamePasswordMultiBinding',
-           credentialsId   : 'lab_engineer',
-           passwordVariable: 'lab_pass',
-           usernameVariable: 'lab_user']
-        ]) {
-            for (ip in bm_ips) { sh ("ipmitool -H ${ip} -U ${lab_user} -P ${lab_pass} chassis power off")}
-            for (ip in bm_ips) { sh ("ipmitool -H ${ip} -U ${lab_user} -P ${lab_pass} chassis bootdev pxe")}
-        }
-    }
-
     stage("Start deployment") {
       def jobs_map = [:]
       for (stack_name in stacks) {
@@ -106,7 +76,8 @@
       def deploy = build job: "${stack_to_deploy}",
       parameters: [
             string(name: 'PARENT_NODE_NAME', value: env.PARENT_NODE_NAME),
-            string(name: 'OS_CREDENTIALS', value: env.OS_CREDENTIALS)
+            string(name: 'OS_CREDENTIALS', value: env.OS_CREDENTIALS),
+            string(name: 'TCP_QA_REFS', value: env.TCP_QA_REFS)
         ]
     }
   }
diff --git a/jobs/pipelines/swarm-bootstrap-salt-cluster-heat.groovy b/jobs/pipelines/swarm-bootstrap-salt-cluster-heat.groovy
index ba52899..7ae510e 100644
--- a/jobs/pipelines/swarm-bootstrap-salt-cluster-heat.groovy
+++ b/jobs/pipelines/swarm-bootstrap-salt-cluster-heat.groovy
@@ -30,6 +30,7 @@
  *   JENKINS_PIPELINE_BRANCH       Should be set in release/proposed/2019.2.0 when we test non-released version
  *   UPDATE_VERSION                Version of update to deploy
  *   LAB_PARAM_DEFAULTS            Filename placed in tcp_tests/templates/_heat_environments, with default parameters for the heat template
+ *   DEPLOY_EMPTY_NODE             Add extra node to heat stack. Node without a role and with salt-minion
  *
  *   CREATE_JENKINS_NODE_CREDENTIALS   Jenkins username and password with rights to add/delete Jenkins agents
  */
@@ -162,6 +163,7 @@
                             export ENV_NAME=${ENV_NAME}
                             export LAB_CONFIG_NAME=${LAB_CONFIG_NAME}
                             export LAB_PARAM_DEFAULTS=${LAB_PARAM_DEFAULTS}
+                            export DEPLOY_EMPTY_NODE=${DEPLOY_EMPTY_NODE}
                             export LOG_NAME=swarm_test_create_environment.log
                             py.test --cache-clear -vvv -s -p no:django -p no:ipdb --junit-xml=deploy_hardware.xml -k \${TEST_GROUP}
                         """)
diff --git a/jobs/templates/2019.2.0-heat-cicd-pike-dvr-sl.yml b/jobs/templates/2019.2.0-heat-cicd-pike-dvr-sl.yml
index 4a8ebfd..fbd3355 100644
--- a/jobs/templates/2019.2.0-heat-cicd-pike-dvr-sl.yml
+++ b/jobs/templates/2019.2.0-heat-cicd-pike-dvr-sl.yml
@@ -105,6 +105,10 @@
         default: true
         description: ''
         name: SHUTDOWN_ENV_ON_TEARDOWN
+    - bool:
+        default: false
+        description: 'Add extra node to heat stack. Node without a role and with salt-minion'
+        name: DEPLOY_EMPTY_NODE
     - string:
         default: ''
         description: ''
diff --git a/jobs/templates/2019.2.0-heat-cicd-queens-contrail41-sl.yml b/jobs/templates/2019.2.0-heat-cicd-queens-contrail41-sl.yml
index 4da28d4..26c84d9 100644
--- a/jobs/templates/2019.2.0-heat-cicd-queens-contrail41-sl.yml
+++ b/jobs/templates/2019.2.0-heat-cicd-queens-contrail41-sl.yml
@@ -65,6 +65,10 @@
         description: ''
         name: ENV_NAME
         trim: 'false'
+    - bool:
+        default: false
+        description: 'Add extra node to heat stack. Node without a role and with salt-minion'
+        name: DEPLOY_EMPTY_NODE
     - string:
         default: ''
         description: |-
diff --git a/jobs/templates/2019.2.0-heat-cicd-queens-dvr-sl.yml b/jobs/templates/2019.2.0-heat-cicd-queens-dvr-sl.yml
index 2374cb4..91895e8 100644
--- a/jobs/templates/2019.2.0-heat-cicd-queens-dvr-sl.yml
+++ b/jobs/templates/2019.2.0-heat-cicd-queens-dvr-sl.yml
@@ -65,6 +65,10 @@
         description: ''
         name: ENV_NAME
         trim: 'false'
+    - bool:
+        default: false
+        description: 'Add extra node to heat stack. Node without a role and with salt-minion'
+        name: DEPLOY_EMPTY_NODE
     - string:
         default: ''
         description: |-
diff --git a/jobs/templates/bm-cicd-pike-ovs-maas.yml b/jobs/templates/bm-cicd-pike-ovs-maas.yml
index 694630c..6b30a2a 100644
--- a/jobs/templates/bm-cicd-pike-ovs-maas.yml
+++ b/jobs/templates/bm-cicd-pike-ovs-maas.yml
@@ -2,7 +2,6 @@
     project-type: pipeline
     description: '{job-description}'
     concurrent: true
-    description: runs at H(5-15) 00 * * 1
     disabled: false
     name: bm-cicd-pike-ovs-maas
     parameters:
@@ -200,6 +199,10 @@
         default: true
         description: 'Works starting from MCP 2019.2.10 or master. Whether to apply saltstack updates on all nodes in cluster before deployment'
         name: UPGRADE_SALTSTACK
+    - bool:
+        default: true
+        description: Reboot hardware servers and set boot to PXE before the deployment
+        name: REBOOT_HW_NODES
     pipeline-scm:
       lightweight-checkout: false
       scm:
diff --git a/jobs/templates/bm-cicd-queens-ovs-maas.yml b/jobs/templates/bm-cicd-queens-ovs-maas.yml
index ad40bdf..95b0343 100644
--- a/jobs/templates/bm-cicd-queens-ovs-maas.yml
+++ b/jobs/templates/bm-cicd-queens-ovs-maas.yml
@@ -212,6 +212,10 @@
         default: false
         description: 'Works starting from MCP 2019.2.10 or master. Whether to apply saltstack updates on all nodes in cluster before deployment'
         name: UPGRADE_SALTSTACK
+    - bool:
+        default: true
+        description: Reboot hardware servers and set boot to PXE before the deployment
+        name: REBOOT_HW_NODES
     pipeline-scm:
       lightweight-checkout: false
       scm:
diff --git a/jobs/templates/heat-bm-cicd-pike-contrail-sl.yml b/jobs/templates/heat-bm-cicd-pike-contrail-sl.yml
index 3d9c175..531b1e9 100644
--- a/jobs/templates/heat-bm-cicd-pike-contrail-sl.yml
+++ b/jobs/templates/heat-bm-cicd-pike-contrail-sl.yml
@@ -225,6 +225,10 @@
         default: true
         description: 'Works starting from MCP 2019.2.10 or master. Whether to apply saltstack updates on all nodes in cluster before deployment'
         name: UPGRADE_SALTSTACK
+    - bool:
+        default: true
+        description: Reboot hardware servers and set boot to PXE before the deployment
+        name: REBOOT_HW_NODES
     pipeline-scm:
       lightweight-checkout: false
       scm:
diff --git a/jobs/templates/heat-bm-cicd-queens-contrail-sl.yml b/jobs/templates/heat-bm-cicd-queens-contrail-sl.yml
index 2c0de61..09cd0fb 100644
--- a/jobs/templates/heat-bm-cicd-queens-contrail-sl.yml
+++ b/jobs/templates/heat-bm-cicd-queens-contrail-sl.yml
@@ -226,6 +226,10 @@
         default: false
         description: 'Works starting from MCP 2019.2.10 or master. Whether to apply saltstack updates on all nodes in cluster before deployment'
         name: UPGRADE_SALTSTACK
+    - bool:
+        default: true
+        description: Reboot hardware servers and set boot to PXE before the deployment
+        name: REBOOT_HW_NODES
     pipeline-scm:
       lightweight-checkout: false
       scm:
diff --git a/jobs/templates/heat-cicd-pike-dvr-sl.yml b/jobs/templates/heat-cicd-pike-dvr-sl.yml
index f3baf7e..5128bd5 100644
--- a/jobs/templates/heat-cicd-pike-dvr-sl.yml
+++ b/jobs/templates/heat-cicd-pike-dvr-sl.yml
@@ -70,6 +70,10 @@
         description: ''
         name: ENV_NAME
         trim: 'false'
+    - bool:
+        default: false
+        description: 'Add extra node to heat stack. Node without a role and with salt-minion'
+        name: DEPLOY_EMPTY_NODE
     - string:
         default: ''
         description: |-
diff --git a/jobs/templates/heat-cicd-queens-contrail41-sl.yml b/jobs/templates/heat-cicd-queens-contrail41-sl.yml
index 71f1f6a..029c090 100644
--- a/jobs/templates/heat-cicd-queens-contrail41-sl.yml
+++ b/jobs/templates/heat-cicd-queens-contrail41-sl.yml
@@ -65,6 +65,10 @@
         description: ''
         name: ENV_NAME
         trim: 'false'
+    - bool:
+        default: false
+        description: 'Add extra node to heat stack. Node without a role and with salt-minion'
+        name: DEPLOY_EMPTY_NODE
     - string:
         default: ''
         description: |-
diff --git a/jobs/templates/heat-cicd-queens-dvr-sl.yml b/jobs/templates/heat-cicd-queens-dvr-sl.yml
index b0afc13..d3bb4c6 100644
--- a/jobs/templates/heat-cicd-queens-dvr-sl.yml
+++ b/jobs/templates/heat-cicd-queens-dvr-sl.yml
@@ -64,6 +64,10 @@
         description: ''
         name: ENV_NAME
         trim: 'false'
+    - bool:
+        default: true
+        description: 'Add extra node to heat stack. Node without a role and with salt-minion'
+        name: DEPLOY_EMPTY_NODE
     - string:
         default: ''
         description: |-
diff --git a/jobs/templates/released-heat-cicd-pike-contrail41-sl.yml b/jobs/templates/released-heat-cicd-pike-contrail41-sl.yml
index 2a83f7b..7789c7d 100644
--- a/jobs/templates/released-heat-cicd-pike-contrail41-sl.yml
+++ b/jobs/templates/released-heat-cicd-pike-contrail41-sl.yml
@@ -62,6 +62,10 @@
         description: ''
         name: ENV_NAME
         trim: 'false'
+    - bool:
+        default: false
+        description: 'Add extra node to heat stack. Node without a role and with salt-minion'
+        name: DEPLOY_EMPTY_NODE
     - string:
         default: ''
         description: |-
@@ -94,7 +98,7 @@
     - text:
         default: |-
           --keep-duplicates --maxfail=1 \
-            tcp_tests/tests/system/test_3rdparty_suites.py::Test3rdpartySuites::test_run_tempest \
+            tcp_tests/tests/system/test_cvp_pipelines.py::TestCvpPipelines::test_run_cvp_tempest \
             tcp_tests/tests/system/test_cvp_pipelines.py::TestCvpPipelines::test_run_cvp_func_sanity \
             tcp_tests/tests/system/test_cvp_pipelines.py::TestCvpPipelines::test_run_cvp_stacklight \
              \
@@ -106,7 +110,7 @@
             tcp_tests/tests/system/test_mcp_update.py::TestUpdateMcpCluster::test_update_stacklight \
             tcp_tests/tests/system/test_mcp_update.py::TestUpdateMcpCluster::test_update_ceph \
              \
-            tcp_tests/tests/system/test_3rdparty_suites.py::Test3rdpartySuites::test_run_tempest \
+            tcp_tests/tests/system/test_cvp_pipelines.py::TestCvpPipelines::test_run_cvp_tempest \
             tcp_tests/tests/system/test_cvp_pipelines.py::TestCvpPipelines::test_run_cvp_func_sanity \
             tcp_tests/tests/system/test_cvp_pipelines.py::TestCvpPipelines::test_run_cvp_stacklight
 
diff --git a/jobs/templates/released-heat-cicd-pike-dvr-sl.yml b/jobs/templates/released-heat-cicd-pike-dvr-sl.yml
index 7343f06..be08433 100644
--- a/jobs/templates/released-heat-cicd-pike-dvr-sl.yml
+++ b/jobs/templates/released-heat-cicd-pike-dvr-sl.yml
@@ -62,6 +62,10 @@
         description: ''
         name: ENV_NAME
         trim: 'false'
+    - bool:
+        default: false
+        description: 'Add extra node to heat stack. Node without a role and with salt-minion'
+        name: DEPLOY_EMPTY_NODE
     - string:
         default: ''
         description: |-
@@ -94,7 +98,7 @@
     - text:
         default: |-
           --keep-duplicates --maxfail=1 \
-            tcp_tests/tests/system/test_3rdparty_suites.py::Test3rdpartySuites::test_run_tempest \
+            tcp_tests/tests/system/test_cvp_pipelines.py::TestCvpPipelines::test_run_cvp_tempest \
             tcp_tests/tests/system/test_cvp_pipelines.py::TestCvpPipelines::test_run_cvp_func_sanity \
             tcp_tests/tests/system/test_cvp_pipelines.py::TestCvpPipelines::test_run_cvp_stacklight \
              \
@@ -105,7 +109,7 @@
             tcp_tests/tests/system/test_mcp_update.py::TestUpdateMcpCluster::test_update_stacklight \
             tcp_tests/tests/system/test_mcp_update.py::TestUpdateMcpCluster::test_update_ceph \
              \
-            tcp_tests/tests/system/test_3rdparty_suites.py::Test3rdpartySuites::test_run_tempest \
+            tcp_tests/tests/system/test_cvp_pipelines.py::TestCvpPipelines::test_run_cvp_tempest \
             tcp_tests/tests/system/test_cvp_pipelines.py::TestCvpPipelines::test_run_cvp_func_sanity \
             tcp_tests/tests/system/test_cvp_pipelines.py::TestCvpPipelines::test_run_cvp_stacklight
 
diff --git a/jobs/templates/released-heat-cicd-queens-contrail41-sl.yml b/jobs/templates/released-heat-cicd-queens-contrail41-sl.yml
index 7808d83..60a4372 100644
--- a/jobs/templates/released-heat-cicd-queens-contrail41-sl.yml
+++ b/jobs/templates/released-heat-cicd-queens-contrail41-sl.yml
@@ -62,6 +62,10 @@
         description: ''
         name: ENV_NAME
         trim: 'false'
+    - bool:
+        default: false
+        description: 'Add extra node to heat stack. Node without a role and with salt-minion'
+        name: DEPLOY_EMPTY_NODE
     - string:
         default: ''
         description: |-
@@ -92,7 +96,7 @@
     - text:
         default: |-
           --keep-duplicates --maxfail=1 \
-            tcp_tests/tests/system/test_3rdparty_suites.py::Test3rdpartySuites::test_run_tempest \
+            tcp_tests/tests/system/test_cvp_pipelines.py::TestCvpPipelines::test_run_cvp_tempest \
             tcp_tests/tests/system/test_cvp_pipelines.py::TestCvpPipelines::test_run_cvp_func_sanity \
             tcp_tests/tests/system/test_cvp_pipelines.py::TestCvpPipelines::test_run_cvp_stacklight \
              \
@@ -104,7 +108,7 @@
             tcp_tests/tests/system/test_mcp_update.py::TestUpdateMcpCluster::test_update_stacklight \
             tcp_tests/tests/system/test_mcp_update.py::TestUpdateMcpCluster::test_update_ceph \
              \
-            tcp_tests/tests/system/test_3rdparty_suites.py::Test3rdpartySuites::test_run_tempest \
+            tcp_tests/tests/system/test_cvp_pipelines.py::TestCvpPipelines::test_run_cvp_tempest \
             tcp_tests/tests/system/test_cvp_pipelines.py::TestCvpPipelines::test_run_cvp_func_sanity \
             tcp_tests/tests/system/test_cvp_pipelines.py::TestCvpPipelines::test_run_cvp_stacklight
         description: |-
diff --git a/jobs/templates/released-heat-cicd-queens-dvr-sl.yml b/jobs/templates/released-heat-cicd-queens-dvr-sl.yml
index b27cdd7..0c0cb08 100644
--- a/jobs/templates/released-heat-cicd-queens-dvr-sl.yml
+++ b/jobs/templates/released-heat-cicd-queens-dvr-sl.yml
@@ -62,6 +62,10 @@
         description: ''
         name: ENV_NAME
         trim: 'false'
+    - bool:
+        default: false
+        description: 'Add extra node to heat stack. Node without a role and with salt-minion'
+        name: DEPLOY_EMPTY_NODE
     - string:
         default: ''
         description: |-
@@ -94,7 +98,7 @@
     - text:
         default: |-
           --keep-duplicates --maxfail=1 \
-            tcp_tests/tests/system/test_3rdparty_suites.py::Test3rdpartySuites::test_run_tempest \
+            tcp_tests/tests/system/test_cvp_pipelines.py::TestCvpPipelines::test_run_cvp_tempest \
             tcp_tests/tests/system/test_cvp_pipelines.py::TestCvpPipelines::test_run_cvp_func_sanity \
             tcp_tests/tests/system/test_cvp_pipelines.py::TestCvpPipelines::test_run_cvp_stacklight \
              \
@@ -105,7 +109,7 @@
             tcp_tests/tests/system/test_mcp_update.py::TestUpdateMcpCluster::test_update_stacklight \
             tcp_tests/tests/system/test_mcp_update.py::TestUpdateMcpCluster::test_update_ceph \
              \
-            tcp_tests/tests/system/test_3rdparty_suites.py::Test3rdpartySuites::test_run_tempest \
+            tcp_tests/tests/system/test_cvp_pipelines.py::TestCvpPipelines::test_run_cvp_tempest \
             tcp_tests/tests/system/test_cvp_pipelines.py::TestCvpPipelines::test_run_cvp_func_sanity \
             tcp_tests/tests/system/test_cvp_pipelines.py::TestCvpPipelines::test_run_cvp_stacklight
 
diff --git a/src/com/mirantis/system_qa/SharedPipeline.groovy b/src/com/mirantis/system_qa/SharedPipeline.groovy
index ccb0163..c42d43f 100644
--- a/src/com/mirantis/system_qa/SharedPipeline.groovy
+++ b/src/com/mirantis/system_qa/SharedPipeline.groovy
@@ -275,6 +275,7 @@
         def env_lab_ctl_iface = env.LAB_CONTROL_IFACE ?: ''
         def update_repo_custom_tag = env.UPDATE_REPO_CUSTOM_TAG ?: ''
         def update_version = env.UPDATE_VERSION ?: ''
+        def deploy_empty_node = env.DEPLOY_EMPTY_NODE ?: false
         def parameters = [
                 string(name: 'PARENT_NODE_NAME', value: "${NODE_NAME}"),
                 string(name: 'JENKINS_SLAVE_NODE_NAME', value: jenkins_slave_node_name),
@@ -309,6 +310,7 @@
                 string(name: 'LAB_PARAM_DEFAULTS', value: "${LAB_PARAM_DEFAULTS}"),
                 string(name: 'JENKINS_PIPELINE_BRANCH', value: "${jenkins_pipelines_branch}"),
                 booleanParam(name: 'SHUTDOWN_ENV_ON_TEARDOWN', value: false),
+                booleanParam(name: 'DEPLOY_EMPTY_NODE', value: deploy_empty_node),
             ]
 
         build_pipeline_job('swarm-bootstrap-salt-cluster-heat', parameters)
@@ -663,7 +665,8 @@
     // 'error' status is assumed as 'Blocker' in TestRail reporter
 
     // Replace '<' and '>' to '&lt;' and '&gt;' to avoid conflicts between xml tags in the message and JUnit report
-    def String text_filtered = text.replaceAll("<","&lt;").
+    def String text_filtered = text.replaceAll("&", "&amp;").
+        replaceAll("<","&lt;").
         replaceAll(">", "&gt;").
         replaceAll("[^\\x{0009}\\x{000a}\\x{000d}\\x{0020}-\\x{D7FF}\\x{E000}-\\x{FFFD}]+", "")
 
@@ -776,4 +779,34 @@
     }
     return ret
   }
+}
+
+def reboot_hardware_nodes() {
+    bm_ips = [
+        "185.8.59.227",
+        "185.8.59.229",
+        "5.43.225.88",
+        "5.43.225.112",
+        "5.43.225.208",
+        "5.43.227.118",
+        "185.8.58.248",
+        "185.8.59.222",
+        "5.43.225.228",
+        "5.43.229.28",
+        "5.43.225.23",
+        "185.8.58.9",
+        "185.8.58.246",
+        "185.8.58.243",
+        "185.8.58.244"
+        ]
+
+    withCredentials([
+       [$class          : 'UsernamePasswordMultiBinding',
+       credentialsId   : 'lab_engineer',
+       passwordVariable: 'lab_pass',
+       usernameVariable: 'lab_user']
+    ]) {
+        for (ip in bm_ips) { sh ("ipmitool -H ${ip} -U ${lab_user} -P ${lab_pass} chassis power off")}
+        for (ip in bm_ips) { sh ("ipmitool -H ${ip} -U ${lab_user} -P ${lab_pass} chassis bootdev pxe")}
+    }
 }
\ No newline at end of file
diff --git a/tcp_tests/managers/drivetrain_manager.py b/tcp_tests/managers/drivetrain_manager.py
index 3851e8d..f65892e 100644
--- a/tcp_tests/managers/drivetrain_manager.py
+++ b/tcp_tests/managers/drivetrain_manager.py
@@ -49,11 +49,14 @@
         Method accept any param:
             job_parameters=None,
             job_output_prefix='',
+            jenkins_tgt='I@salt:master' or
+              'I@docker:client:stack:jenkins and cid01*'
             start_timeout=1800,
             build_timeout=3600 * 4,
             verbose=False
 
         :param job_name: string
+        :param jenkins_tgt: string,  node to run jenkins in salt-style
         :return: string, Result of passed job, "SUCCESS"| "FAILED" | "UNSTABLE"
         """
         jenkins_url, jenkins_user, jenkins_pass = self.get_jenkins_creds(
@@ -76,12 +79,13 @@
         LOG.info(description)
         LOG.info('\n'.join(stages))
 
+        job_description = "{description}. \n\n{stages}"\
+            .format(description=description,
+                    stages='\n'.join(stages))
+
         if job_result != 'SUCCESS':
             LOG.warning("{0}\n{1}".format(description, '\n'.join(stages)))
-        return job_result
-
-    def start_job_on_cfg_jenkins(self):
-        pass
+        return job_result, job_description
 
     def get_jenkins_creds(self, tgt):
         """
diff --git a/tcp_tests/managers/envmanager_heat.py b/tcp_tests/managers/envmanager_heat.py
index 30c4850..5fe143a 100644
--- a/tcp_tests/managers/envmanager_heat.py
+++ b/tcp_tests/managers/envmanager_heat.py
@@ -345,6 +345,16 @@
                 if wait_for_delete is True:
                     return
                 raise ex
+            except heat_exceptions.HTTPException as ex:
+                # tolerate HTTP timeouts from Heat
+                if ex.code == 504:
+                    raise exceptions.EnvironmentWrongStatus(
+                        self.__config.hardware.heat_stack_name,
+                        status,
+                        "Heat API Temporary Unavailable"
+                    )
+                else:
+                    raise ex
             if st == status:
                 return
             elif st in BAD_STACK_STATUSES:
@@ -547,6 +557,7 @@
             'parameters': {
                 'mcp_version': mcp_version,
                 'env_name': settings.ENV_NAME,
+                'deploy_empty_node': bool(settings.DEPLOY_EMPTY_NODE)
             }
         }
 
diff --git a/tcp_tests/managers/reclass_manager.py b/tcp_tests/managers/reclass_manager.py
index ab6bd72..56116fc 100644
--- a/tcp_tests/managers/reclass_manager.py
+++ b/tcp_tests/managers/reclass_manager.py
@@ -182,6 +182,29 @@
                 path=short_path
             ))
 
+    def merge_context(self, yaml_context, short_path):
+        """
+        Merge
+
+        :param yaml_context: string, yaml with extra context
+        :param short_path: string, path to reclass yaml file.
+            It takes into account default path where the reclass locates.
+            May look like cluster/*/cicd/control/leader.yml
+        :return: None
+        """
+        tmp_file = "/tmp/extra_context.yaml"
+        with open(tmp_file, "w") as f:
+            f.write(yaml_context)
+
+        self.ssh.upload(tmp_file, tmp_file)
+        self.ssh.check_call(
+            "{reclass_tools} merge-context {yaml} \
+            /srv/salt/reclass/classes/{path}".format(
+                reclass_tools=self.reclass_tools_cmd,
+                yaml=tmp_file,
+                path=short_path
+            ))
+
     def commit(self, text_commit):
         self.ssh.check_call(
             "cd /srv/salt/reclass; git add -u && git commit --allow-empty "
diff --git a/tcp_tests/settings.py b/tcp_tests/settings.py
index 5e60b86..18548fb 100644
--- a/tcp_tests/settings.py
+++ b/tcp_tests/settings.py
@@ -33,6 +33,7 @@
 ENV_NAME = os.environ.get("ENV_NAME", None)
 MAKE_SNAPSHOT_STAGES = get_var_as_bool("MAKE_SNAPSHOT_STAGES", True)
 SHUTDOWN_ENV_ON_TEARDOWN = get_var_as_bool('SHUTDOWN_ENV_ON_TEARDOWN', True)
+DEPLOY_EMPTY_NODE = get_var_as_bool('DEPLOY_EMPTY_NODE', False)
 
 LAB_CONFIG_NAME = os.environ.get('LAB_CONFIG_NAME', 'mk22-lab-basic')
 DOMAIN_NAME = os.environ.get('DOMAIN_NAME',
diff --git a/tcp_tests/templates/bm-cicd-pike-ovs-maas/salt-context-cookiecutter-openstack_ovs.yaml b/tcp_tests/templates/bm-cicd-pike-ovs-maas/salt-context-cookiecutter-openstack_ovs.yaml
index 9e111d7..60d28a2 100644
--- a/tcp_tests/templates/bm-cicd-pike-ovs-maas/salt-context-cookiecutter-openstack_ovs.yaml
+++ b/tcp_tests/templates/bm-cicd-pike-ovs-maas/salt-context-cookiecutter-openstack_ovs.yaml
@@ -14,8 +14,8 @@
   cluster_name: bm-cicd-pike-ovs-maas
   compute_bond_mode: active-backup
   compute_primary_first_nic: eth1
-  compute_primary_second_nic: eth2
   context_seed: zEFbUBMME6LFdiL0rJWFgHMdQGgywnDSE9vFYvHgEBeYHb4QJsDl3HqpdaTgqYlF
+  compute_primary_second_nic: eth2
   control_network_netmask: 255.255.255.0
   control_network_subnet: 10.167.11.0/24
   control_vlan: '2404'
@@ -865,4 +865,9 @@
   secrets_encryption_key_id: 'F5CB2ADC36159B03'
   # Used on CI only.
   secrets_encryption_private_key: ''
-  stacklight_ssl_enabled: 'True'
\ No newline at end of file
+  stacklight_ssl_enabled: 'True'
+
+  # Enable Mirantis repo with CVE fixes for xenial
+  updates_mirantis_login: "root"
+  updates_mirantis_password: "r00tme"
+  updates_mirantis_version: "staging"
\ No newline at end of file
diff --git a/tcp_tests/templates/bm-cicd-pike-ovs-maas/underlay.hot b/tcp_tests/templates/bm-cicd-pike-ovs-maas/underlay.hot
index 9dc313a..0d37b6f 100644
--- a/tcp_tests/templates/bm-cicd-pike-ovs-maas/underlay.hot
+++ b/tcp_tests/templates/bm-cicd-pike-ovs-maas/underlay.hot
@@ -49,6 +49,10 @@
   salt_master_control_ip:
     type: string
     default: 10.167.11.5
+  deploy_empty_node:
+    type: boolean
+    default: False
+
 
 resources:
   subnets:
diff --git a/tcp_tests/templates/bm-cicd-queens-ovs-maas/underlay.hot b/tcp_tests/templates/bm-cicd-queens-ovs-maas/underlay.hot
index 03c5679..eac31bf 100644
--- a/tcp_tests/templates/bm-cicd-queens-ovs-maas/underlay.hot
+++ b/tcp_tests/templates/bm-cicd-queens-ovs-maas/underlay.hot
@@ -49,6 +49,9 @@
   salt_master_control_ip:
     type: string
     default: 10.167.11.5
+  deploy_empty_node:
+    type: boolean
+    default: False
 
 resources:
   subnets:
diff --git a/tcp_tests/templates/heat-bm-cicd-pike-contrail-sl/underlay.hot b/tcp_tests/templates/heat-bm-cicd-pike-contrail-sl/underlay.hot
index cffb044..e11335f 100644
--- a/tcp_tests/templates/heat-bm-cicd-pike-contrail-sl/underlay.hot
+++ b/tcp_tests/templates/heat-bm-cicd-pike-contrail-sl/underlay.hot
@@ -51,6 +51,9 @@
   salt_master_control_ip:
     type: string
     default: 10.6.0.15
+  deploy_empty_node:
+    type: boolean
+    default: False
 
 resources:
   subnets:
diff --git a/tcp_tests/templates/heat-bm-cicd-queens-contrail-sl/underlay.hot b/tcp_tests/templates/heat-bm-cicd-queens-contrail-sl/underlay.hot
index 17bbef5..b02b758 100644
--- a/tcp_tests/templates/heat-bm-cicd-queens-contrail-sl/underlay.hot
+++ b/tcp_tests/templates/heat-bm-cicd-queens-contrail-sl/underlay.hot
@@ -51,6 +51,9 @@
   salt_master_control_ip:
     type: string
     default: 10.6.0.15
+  deploy_empty_node:
+    type: boolean
+    default: False
 
 resources:
   subnets:
diff --git a/tcp_tests/templates/heat-cicd-pike-dvr-sl/salt.yaml b/tcp_tests/templates/heat-cicd-pike-dvr-sl/salt.yaml
index 8fa87c5..4796bf0 100644
--- a/tcp_tests/templates/heat-cicd-pike-dvr-sl/salt.yaml
+++ b/tcp_tests/templates/heat-cicd-pike-dvr-sl/salt.yaml
@@ -15,6 +15,7 @@
 {{SHARED.MACRO_CHECK_SALT_VERSION_ON_NODES()}}
 
 {{SHARED.MACRO_IPFLUSH_TENANTS_IFACES()}}
+{{SHARED.DISABLE_EMPTY_NODE()}}
 
 {{SHARED_TEST_TOOLS.MACRO_INSTALL_RECLASS_TOOLS()}}
 
diff --git a/tcp_tests/templates/heat-cicd-pike-dvr-sl/underlay.hot b/tcp_tests/templates/heat-cicd-pike-dvr-sl/underlay.hot
index 4c5e949..c7f6ea6 100644
--- a/tcp_tests/templates/heat-cicd-pike-dvr-sl/underlay.hot
+++ b/tcp_tests/templates/heat-cicd-pike-dvr-sl/underlay.hot
@@ -39,6 +39,9 @@
   salt_master_control_ip:
     type: string
     default: 10.6.0.15
+  deploy_empty_node:
+    type: boolean
+    default: False
 
   key_pair:
     type: string
@@ -1065,6 +1068,35 @@
 
       instance_config_host: { get_attr: [cfg01_node, instance_address] }
 
+  empty_node:
+    type: MCP::SingleInstance2Volumes
+    depends_on: [cfg01_node]
+    condition: { get_param: deploy_empty_node }
+    properties:
+      env_name: { get_param: env_name }
+      mcp_version: { get_param: mcp_version }
+      instance_domain: {get_param: instance_domain}
+      instance_name: xtra
+      role: none
+      instance_flavor: {get_param: osd_flavor}
+      availability_zone: { get_param: vm_availability_zone }
+      underlay_userdata: { get_file: ./underlay-userdata.yaml }
+      control_net_static_ip:
+        list_join:
+        - '.'
+        - [ { get_attr: [subnets, control_net_prefix] }, '205' ]
+      tenant_net_static_ip:
+        list_join:
+        - '.'
+        - [ { get_attr: [subnets, tenant_net_prefix] }, '205' ]
+      external_net_static_ip:
+        list_join:
+        - '.'
+        - [ { get_attr: [subnets, external_net_prefix] }, '205' ]
+
+      instance_config_host: { get_attr: [cfg01_node, instance_address] }
+
+
 outputs:
   foundation_public_ip:
     description: foundation node IP address (floating) from external network
diff --git a/tcp_tests/templates/heat-cicd-queens-contrail41-sl/salt-context-cookiecutter-contrail.yaml b/tcp_tests/templates/heat-cicd-queens-contrail41-sl/salt-context-cookiecutter-contrail.yaml
index b2532f0..db3b07b 100644
--- a/tcp_tests/templates/heat-cicd-queens-contrail41-sl/salt-context-cookiecutter-contrail.yaml
+++ b/tcp_tests/templates/heat-cicd-queens-contrail41-sl/salt-context-cookiecutter-contrail.yaml
@@ -327,3 +327,8 @@
     password_regex: "'^[a-zA-Z0-9~!@#%^&\\*_=+]{32,}$$'"
     password_regex_description: "Your password could contains capital letters, lowercase letters, digits, symbols '~ ! @ # % ^ & * _ = +' and have a minimum length of 32 characters"
     change_password_upon_first_use: False
+
+  # Enable Mirantis repo with CVE fixes for xenial
+  updates_mirantis_login: "root"
+  updates_mirantis_password: "r00tme"
+  updates_mirantis_version: "staging"
\ No newline at end of file
diff --git a/tcp_tests/templates/heat-cicd-queens-contrail41-sl/salt.yaml b/tcp_tests/templates/heat-cicd-queens-contrail41-sl/salt.yaml
index b5958c1..8a1000e 100644
--- a/tcp_tests/templates/heat-cicd-queens-contrail41-sl/salt.yaml
+++ b/tcp_tests/templates/heat-cicd-queens-contrail41-sl/salt.yaml
@@ -10,6 +10,7 @@
 {{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
 
 {{SHARED.MACRO_CHECK_SALT_VERSION_SERVICES_ON_CFG()}}
+{{SHARED.DISABLE_EMPTY_NODE()}}
 
 {{SHARED.MACRO_CHECK_SALT_VERSION_ON_NODES()}}
 
diff --git a/tcp_tests/templates/heat-cicd-queens-contrail41-sl/underlay.hot b/tcp_tests/templates/heat-cicd-queens-contrail41-sl/underlay.hot
index e00f801..0c92b47 100644
--- a/tcp_tests/templates/heat-cicd-queens-contrail41-sl/underlay.hot
+++ b/tcp_tests/templates/heat-cicd-queens-contrail41-sl/underlay.hot
@@ -39,6 +39,9 @@
   salt_master_control_ip:
     type: string
     default: 10.6.0.15
+  deploy_empty_node:
+    type: boolean
+    default: False
 
   key_pair:
     type: string
@@ -983,6 +986,35 @@
         - [ { get_attr: [subnets, external_net_prefix] }, '220' ]
       instance_config_host: { get_attr: [cfg01_node, instance_address] }
 
+  empty_node:
+    type: MCP::SingleInstance2Volumes
+    depends_on: [cfg01_node]
+    condition: { get_param: deploy_empty_node }
+    properties:
+      env_name: { get_param: env_name }
+      mcp_version: { get_param: mcp_version }
+      instance_domain: {get_param: instance_domain}
+      instance_name: xtra
+      role: none
+      instance_flavor: {get_param: osd_flavor}
+      availability_zone: { get_param: vm_availability_zone }
+      underlay_userdata: { get_file: ./underlay-userdata.yaml }
+      control_net_static_ip:
+        list_join:
+        - '.'
+        - [ { get_attr: [subnets, control_net_prefix] }, '205' ]
+      tenant_net_static_ip:
+        list_join:
+        - '.'
+        - [ { get_attr: [subnets, tenant_net_prefix] }, '205' ]
+      external_net_static_ip:
+        list_join:
+        - '.'
+        - [ { get_attr: [subnets, external_net_prefix] }, '205' ]
+
+      instance_config_host: { get_attr: [cfg01_node, instance_address] }
+
+
 outputs:
   foundation_public_ip:
     description: foundation node IP address (floating) from external network
diff --git a/tcp_tests/templates/heat-cicd-queens-dvr-sl/underlay.hot b/tcp_tests/templates/heat-cicd-queens-dvr-sl/underlay.hot
index c3e09fa..1d24327 100644
--- a/tcp_tests/templates/heat-cicd-queens-dvr-sl/underlay.hot
+++ b/tcp_tests/templates/heat-cicd-queens-dvr-sl/underlay.hot
@@ -39,6 +39,9 @@
   salt_master_control_ip:
     type: string
     default: 10.6.0.15
+  deploy_empty_node:
+    type: boolean
+    default: False
 
   key_pair:
     type: string
@@ -1068,6 +1071,7 @@
   empty_node:
     type: MCP::SingleInstance2Volumes
     depends_on: [cfg01_node]
+    condition: { get_param: deploy_empty_node }
     properties:
       env_name: { get_param: env_name }
       mcp_version: { get_param: mcp_version }
diff --git a/tcp_tests/templates/released-heat-cicd-pike-contrail41-sl/salt.yaml b/tcp_tests/templates/released-heat-cicd-pike-contrail41-sl/salt.yaml
index 710f01d..ad307da 100644
--- a/tcp_tests/templates/released-heat-cicd-pike-contrail41-sl/salt.yaml
+++ b/tcp_tests/templates/released-heat-cicd-pike-contrail41-sl/salt.yaml
@@ -12,5 +12,6 @@
 {{ SHARED.MACRO_CHECK_SALT_VERSION_SERVICES_ON_CFG() }}
 
 {{ SHARED.MACRO_CHECK_SALT_VERSION_ON_NODES() }}
+{{SHARED.DISABLE_EMPTY_NODE()}}
 
 {{ SHARED_TEST_TOOLS.MACRO_INSTALL_RECLASS_TOOLS() }}
diff --git a/tcp_tests/templates/released-heat-cicd-pike-contrail41-sl/underlay.hot b/tcp_tests/templates/released-heat-cicd-pike-contrail41-sl/underlay.hot
index 789f6a5..8fc50af 100644
--- a/tcp_tests/templates/released-heat-cicd-pike-contrail41-sl/underlay.hot
+++ b/tcp_tests/templates/released-heat-cicd-pike-contrail41-sl/underlay.hot
@@ -39,6 +39,9 @@
   salt_master_control_ip:
     type: string
     default: 10.6.0.15
+  deploy_empty_node:
+    type: boolean
+    default: False
 
   key_pair:
     type: string
@@ -983,6 +986,35 @@
         - [ { get_attr: [subnets, external_net_prefix] }, '220' ]
       instance_config_host: { get_attr: [cfg01_node, instance_address] }
 
+  empty_node:
+    type: MCP::SingleInstance2Volumes
+    depends_on: [cfg01_node]
+    condition: { get_param: deploy_empty_node }
+    properties:
+      env_name: { get_param: env_name }
+      mcp_version: { get_param: mcp_version }
+      instance_domain: {get_param: instance_domain}
+      instance_name: xtra
+      role: none
+      instance_flavor: {get_param: osd_flavor}
+      availability_zone: { get_param: vm_availability_zone }
+      underlay_userdata: { get_file: ./underlay-userdata.yaml }
+      control_net_static_ip:
+        list_join:
+        - '.'
+        - [ { get_attr: [subnets, control_net_prefix] }, '205' ]
+      tenant_net_static_ip:
+        list_join:
+        - '.'
+        - [ { get_attr: [subnets, tenant_net_prefix] }, '205' ]
+      external_net_static_ip:
+        list_join:
+        - '.'
+        - [ { get_attr: [subnets, external_net_prefix] }, '205' ]
+
+      instance_config_host: { get_attr: [cfg01_node, instance_address] }
+
+
 outputs:
   foundation_public_ip:
     description: foundation node IP address (floating) from external network
diff --git a/tcp_tests/templates/released-heat-cicd-pike-dvr-sl/salt.yaml b/tcp_tests/templates/released-heat-cicd-pike-dvr-sl/salt.yaml
index 9e648aa..fd3eac2 100644
--- a/tcp_tests/templates/released-heat-cicd-pike-dvr-sl/salt.yaml
+++ b/tcp_tests/templates/released-heat-cicd-pike-dvr-sl/salt.yaml
@@ -15,6 +15,7 @@
 {{ SHARED.MACRO_CHECK_SALT_VERSION_ON_NODES() }}
 
 {{ SHARED.MACRO_IPFLUSH_TENANTS_IFACES() }}
+{{SHARED.DISABLE_EMPTY_NODE()}}
 
 {{ SHARED_TEST_TOOLS.MACRO_INSTALL_RECLASS_TOOLS() }}
 {{ SHARED_WORKAROUNDS.MACRO_CEPH_SET_PGNUM() }}
\ No newline at end of file
diff --git a/tcp_tests/templates/released-heat-cicd-pike-dvr-sl/underlay.hot b/tcp_tests/templates/released-heat-cicd-pike-dvr-sl/underlay.hot
index 6d4be47..95fc69e 100644
--- a/tcp_tests/templates/released-heat-cicd-pike-dvr-sl/underlay.hot
+++ b/tcp_tests/templates/released-heat-cicd-pike-dvr-sl/underlay.hot
@@ -39,6 +39,9 @@
   salt_master_control_ip:
     type: string
     default: 10.6.0.15
+  deploy_empty_node:
+    type: boolean
+    default: False
 
   key_pair:
     type: string
@@ -1065,6 +1068,35 @@
 
       instance_config_host: { get_attr: [cfg01_node, instance_address] }
 
+  empty_node:
+    type: MCP::SingleInstance2Volumes
+    depends_on: [cfg01_node]
+    condition: { get_param: deploy_empty_node }
+    properties:
+      env_name: { get_param: env_name }
+      mcp_version: { get_param: mcp_version }
+      instance_domain: {get_param: instance_domain}
+      instance_name: xtra
+      role: none
+      instance_flavor: {get_param: osd_flavor}
+      availability_zone: { get_param: vm_availability_zone }
+      underlay_userdata: { get_file: ./underlay-userdata.yaml }
+      control_net_static_ip:
+        list_join:
+        - '.'
+        - [ { get_attr: [subnets, control_net_prefix] }, '205' ]
+      tenant_net_static_ip:
+        list_join:
+        - '.'
+        - [ { get_attr: [subnets, tenant_net_prefix] }, '205' ]
+      external_net_static_ip:
+        list_join:
+        - '.'
+        - [ { get_attr: [subnets, external_net_prefix] }, '205' ]
+
+      instance_config_host: { get_attr: [cfg01_node, instance_address] }
+
+
 outputs:
   foundation_public_ip:
     description: foundation node IP address (floating) from external network
diff --git a/tcp_tests/templates/released-heat-cicd-queens-contrail41-sl/salt.yaml b/tcp_tests/templates/released-heat-cicd-queens-contrail41-sl/salt.yaml
index de5cfac..f68e844 100644
--- a/tcp_tests/templates/released-heat-cicd-queens-contrail41-sl/salt.yaml
+++ b/tcp_tests/templates/released-heat-cicd-queens-contrail41-sl/salt.yaml
@@ -12,5 +12,6 @@
 {{ SHARED.MACRO_CHECK_SALT_VERSION_SERVICES_ON_CFG() }}
 
 {{ SHARED.MACRO_CHECK_SALT_VERSION_ON_NODES() }}
+{{SHARED.DISABLE_EMPTY_NODE()}}
 
 {{ SHARED_TEST_TOOLS.MACRO_INSTALL_RECLASS_TOOLS() }}
\ No newline at end of file
diff --git a/tcp_tests/templates/released-heat-cicd-queens-contrail41-sl/underlay.hot b/tcp_tests/templates/released-heat-cicd-queens-contrail41-sl/underlay.hot
index e860b6b..d9da3dd 100644
--- a/tcp_tests/templates/released-heat-cicd-queens-contrail41-sl/underlay.hot
+++ b/tcp_tests/templates/released-heat-cicd-queens-contrail41-sl/underlay.hot
@@ -39,6 +39,9 @@
   salt_master_control_ip:
     type: string
     default: 10.6.0.15
+  deploy_empty_node:
+    type: boolean
+    default: False
 
   key_pair:
     type: string
@@ -983,6 +986,35 @@
         - [ { get_attr: [subnets, external_net_prefix] }, '220' ]
       instance_config_host: { get_attr: [cfg01_node, instance_address] }
 
+  empty_node:
+    type: MCP::SingleInstance2Volumes
+    depends_on: [cfg01_node]
+    condition: { get_param: deploy_empty_node }
+    properties:
+      env_name: { get_param: env_name }
+      mcp_version: { get_param: mcp_version }
+      instance_domain: {get_param: instance_domain}
+      instance_name: xtra
+      role: none
+      instance_flavor: {get_param: osd_flavor}
+      availability_zone: { get_param: vm_availability_zone }
+      underlay_userdata: { get_file: ./underlay-userdata.yaml }
+      control_net_static_ip:
+        list_join:
+        - '.'
+        - [ { get_attr: [subnets, control_net_prefix] }, '205' ]
+      tenant_net_static_ip:
+        list_join:
+        - '.'
+        - [ { get_attr: [subnets, tenant_net_prefix] }, '205' ]
+      external_net_static_ip:
+        list_join:
+        - '.'
+        - [ { get_attr: [subnets, external_net_prefix] }, '205' ]
+
+      instance_config_host: { get_attr: [cfg01_node, instance_address] }
+
+
 outputs:
   foundation_public_ip:
     description: foundation node IP address (floating) from external network
diff --git a/tcp_tests/templates/released-heat-cicd-queens-dvr-sl/salt.yaml b/tcp_tests/templates/released-heat-cicd-queens-dvr-sl/salt.yaml
index d4eeaac..2a94e69 100644
--- a/tcp_tests/templates/released-heat-cicd-queens-dvr-sl/salt.yaml
+++ b/tcp_tests/templates/released-heat-cicd-queens-dvr-sl/salt.yaml
@@ -13,5 +13,6 @@
 {{ SHARED.MACRO_CHECK_SALT_VERSION_ON_NODES() }}
 
 {{ SHARED.MACRO_IPFLUSH_TENANTS_IFACES() }}
+{{SHARED.DISABLE_EMPTY_NODE()}}
 
 {{ SHARED_TEST_TOOLS.MACRO_INSTALL_RECLASS_TOOLS() }}
diff --git a/tcp_tests/templates/released-heat-cicd-queens-dvr-sl/underlay.hot b/tcp_tests/templates/released-heat-cicd-queens-dvr-sl/underlay.hot
index 5ecf130..88997d7 100644
--- a/tcp_tests/templates/released-heat-cicd-queens-dvr-sl/underlay.hot
+++ b/tcp_tests/templates/released-heat-cicd-queens-dvr-sl/underlay.hot
@@ -39,6 +39,9 @@
   salt_master_control_ip:
     type: string
     default: 10.6.0.15
+  deploy_empty_node:
+    type: boolean
+    default: False
 
   key_pair:
     type: string
@@ -1065,6 +1068,35 @@
 
       instance_config_host: { get_attr: [cfg01_node, instance_address] }
 
+  empty_node:
+    type: MCP::SingleInstance2Volumes
+    depends_on: [cfg01_node]
+    condition: { get_param: deploy_empty_node }
+    properties:
+      env_name: { get_param: env_name }
+      mcp_version: { get_param: mcp_version }
+      instance_domain: {get_param: instance_domain}
+      instance_name: xtra
+      role: none
+      instance_flavor: {get_param: osd_flavor}
+      availability_zone: { get_param: vm_availability_zone }
+      underlay_userdata: { get_file: ./underlay-userdata.yaml }
+      control_net_static_ip:
+        list_join:
+        - '.'
+        - [ { get_attr: [subnets, control_net_prefix] }, '205' ]
+      tenant_net_static_ip:
+        list_join:
+        - '.'
+        - [ { get_attr: [subnets, tenant_net_prefix] }, '205' ]
+      external_net_static_ip:
+        list_join:
+        - '.'
+        - [ { get_attr: [subnets, external_net_prefix] }, '205' ]
+
+      instance_config_host: { get_attr: [cfg01_node, instance_address] }
+
+
 outputs:
   foundation_public_ip:
     description: foundation node IP address (floating) from external network
diff --git a/tcp_tests/templates/shared-salt.yaml b/tcp_tests/templates/shared-salt.yaml
index 247e7fd..412c7ca 100644
--- a/tcp_tests/templates/shared-salt.yaml
+++ b/tcp_tests/templates/shared-salt.yaml
@@ -645,7 +645,7 @@
     apt-get -y install python-virtualenv python-pip build-essential python-dev libssl-dev;
     [[ -d /root/venv-reclass-tools ]] || virtualenv /root/venv-reclass-tools;
     . /root/venv-reclass-tools/bin/activate;
-    pip install git+https://github.com/dis-xcom/reclass-tools;
+    pip install git+https://gerrit.mcp.mirantis.com/mcp/tcp-qa-reclass-tools;
     reclass-tools del-key parameters.linux.network.interface /srv/salt/reclass/classes/cluster/;
     reclass-tools del-key parameters.linux.network.interface /srv/salt/reclass/classes/system/;
     reclass-tools del-key parameters.linux.network.interface /usr/share/salt-formulas/reclass/;
diff --git a/tcp_tests/templates/shared-test-tools.yaml b/tcp_tests/templates/shared-test-tools.yaml
index 802c222..150c6a9 100644
--- a/tcp_tests/templates/shared-test-tools.yaml
+++ b/tcp_tests/templates/shared-test-tools.yaml
@@ -8,7 +8,7 @@
     apt-get install -y build-essential python-dev virtualenv python-virtualenv;
     virtualenv venv-reclass-tools;
     . venv-reclass-tools/bin/activate;
-    pip install git+git://github.com/dis-xcom/reclass_tools.git
+    pip install git+https://gerrit.mcp.mirantis.com/mcp/tcp-qa-reclass-tools
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 1}
   skip_fail: false
diff --git a/tcp_tests/tests/system/test_backup_restore.py b/tcp_tests/tests/system/test_backup_restore.py
index dddfdcc..c47f8aa 100644
--- a/tcp_tests/tests/system/test_backup_restore.py
+++ b/tcp_tests/tests/system/test_backup_restore.py
@@ -388,16 +388,16 @@
 
         # Execute 'backupninja_backup' pipeline to create a backup
         show_step(1)
-        status = dt.start_job_on_jenkins(
+        job_result, job_description = dt.start_job_on_jenkins(
             job_name=self.BACKUP_JOB_NAME,
             job_parameters=self.BACKUP_JOB_PARAMETERS,
             start_timeout=self.JENKINS_START_TIMEOUT,
             build_timeout=self.JENKINS_BUILD_TIMEOUT
         )
-        assert status == 'SUCCESS', (
+        assert job_result == 'SUCCESS', (
             "'{}' job run status is {} after creating Salt master backup. "
             "Please check the build and executed stages.".format(
-                self.BACKUP_JOB_NAME, status)
+                self.BACKUP_JOB_NAME, job_description)
         )
 
         # Verify that backup is created and all pieces of data are rsynced
@@ -414,16 +414,16 @@
 
         # Restore the backup
         show_step(4)
-        status = dt.start_job_on_jenkins(
+        job_result, job_description = dt.start_job_on_jenkins(
             job_name=self.RESTORE_JOB_NAME,
             job_parameters=self.RESTORE_JOB_PARAMETERS,
             start_timeout=self.JENKINS_START_TIMEOUT,
             build_timeout=self.JENKINS_BUILD_TIMEOUT
         )
-        assert status == 'SUCCESS', (
+        assert job_result == 'SUCCESS', (
             "'{}' job run status is {} after restoring from Salt master "
             "backup. Please check the build and executed stages.".format(
-                self.RESTORE_JOB_NAME, status)
+                self.RESTORE_JOB_NAME, job_description)
         )
 
         # Verify that all pieces of lost/changed data are restored
@@ -659,16 +659,16 @@
 
         # Execute 'backupninja_backup' pipeline to create a backup
         show_step(1)
-        status = dt.start_job_on_jenkins(
+        job_result, job_description = dt.start_job_on_jenkins(
             job_name=self.BACKUP_JOB_NAME,
             job_parameters=self.BACKUP_JOB_PARAMETERS,
             start_timeout=self.JENKINS_START_TIMEOUT,
             build_timeout=self.JENKINS_BUILD_TIMEOUT
         )
-        assert status == 'SUCCESS', (
+        assert job_result == 'SUCCESS', (
             "'{}' job run status is {} after creating MAAS data backup. "
             "Please check the build and executed stages.".format(
-                self.BACKUP_JOB_NAME, status)
+                self.BACKUP_JOB_NAME, job_description)
         )
 
         # Verify that backup is created and all pieces of data are rsynced
@@ -685,16 +685,16 @@
 
         # Restore the backup
         show_step(4)
-        status = dt.start_job_on_jenkins(
+        job_result, job_description = dt.start_job_on_jenkins(
             job_name=self.RESTORE_JOB_NAME,
             job_parameters=self.RESTORE_JOB_PARAMETERS,
             start_timeout=self.JENKINS_START_TIMEOUT,
             build_timeout=self.JENKINS_BUILD_TIMEOUT
         )
-        assert status == 'SUCCESS', (
+        assert job_result == 'SUCCESS', (
             "'{}' job run status is {} after restoring from MAAS "
             "backup. Please check the build and executed stages.".format(
-                self.RESTORE_JOB_NAME, status)
+                self.RESTORE_JOB_NAME, job_description)
         )
 
         # Verify that all pieces of lost/changed data are restored
diff --git a/tcp_tests/tests/system/test_backup_restore_cassandra.py b/tcp_tests/tests/system/test_backup_restore_cassandra.py
index bbbc6f3..5dd695c 100644
--- a/tcp_tests/tests/system/test_backup_restore_cassandra.py
+++ b/tcp_tests/tests/system/test_backup_restore_cassandra.py
@@ -164,12 +164,12 @@
         salt.run_state("I@jenkins:client", "jenkins.client")
         show_step(4)
         job_name = "deploy-cassandra-db-restore"
-        run_cassandra_restore = dt.start_job_on_jenkins(
+        job_result, job_description = dt.start_job_on_jenkins(
             start_timeout=jenkins_start_timeout,
             build_timeout=jenkins_build_timeout,
             job_name=job_name)
 
-        assert run_cassandra_restore == "SUCCESS"
+        assert job_result == "SUCCESS", job_description
         network_presented = self.is_network_restored(
             underlay_actions,
             fixture_network_name,
diff --git a/tcp_tests/tests/system/test_backup_restore_galera.py b/tcp_tests/tests/system/test_backup_restore_galera.py
index 4480f88..49f6234 100644
--- a/tcp_tests/tests/system/test_backup_restore_galera.py
+++ b/tcp_tests/tests/system/test_backup_restore_galera.py
@@ -98,11 +98,11 @@
         job_parameters = {
             'ASK_CONFIRMATION': False
         }
-        backup_galera_pipeline = dt.start_job_on_jenkins(
+        job_result, job_description = dt.start_job_on_jenkins(
             job_name=job_name,
             job_parameters=job_parameters)
 
-        assert backup_galera_pipeline == 'SUCCESS'
+        assert job_result == 'SUCCESS', job_description
 
         # ######################## Run CPV ###########################
         show_step(3)
@@ -122,11 +122,11 @@
                        'test_ceph_status', 'test_prometheus_alert_count',
                        'test_uncommited_changes')
         }
-        run_cvp_sanity = dt.start_job_on_jenkins(
+        job_result, job_description = dt.start_job_on_jenkins(
             job_name=job_name,
             job_parameters=job_cvp_sanity_parameters)
 
-        assert run_cvp_sanity == 'SUCCESS'
+        assert job_result == 'SUCCESS', job_description
 
         # ######################## Run Tempest ###########################
         show_step(4)
@@ -134,11 +134,11 @@
         job_parameters = {
              'TEMPEST_ENDPOINT_TYPE': 'internalURL'
         }
-        run_cvp_tempest = dt.start_job_on_jenkins(
+        job_result, job_description = dt.start_job_on_jenkins(
             job_name=job_name,
             job_parameters=job_parameters)
 
-        assert run_cvp_tempest == 'SUCCESS'
+        assert job_result == 'SUCCESS', job_description
         show_step(5)
         self.create_flavor(underlay_actions, fixture_flavor2, cfg_node)
         # ######################## Run Restore ###########################
@@ -148,11 +148,11 @@
              'RESTORE_TYPE': 'ONLY_RESTORE',
              'ASK_CONFIRMATION': False
         }
-        run_galera_verify_restore = dt.start_job_on_jenkins(
+        job_result, job_description = dt.start_job_on_jenkins(
             job_name=job_name,
             job_parameters=job_parameters)
 
-        assert run_galera_verify_restore == 'SUCCESS'
+        assert job_result == 'SUCCESS', job_description
 
         assert self.is_flavor_restored(underlay_actions,
                                        fixture_flavor1,
@@ -164,20 +164,20 @@
         show_step(7)
 
         job_name = 'cvp-sanity'
-        run_cvp_sanity = dt.start_job_on_jenkins(
+        job_result, job_description = dt.start_job_on_jenkins(
             job_name=job_name,
             job_parameters=job_cvp_sanity_parameters)
 
-        assert run_cvp_sanity == 'SUCCESS'
+        assert job_result == 'SUCCESS', job_description
         # ######################## Run Tempest ###########################
         show_step(8)
         job_name = 'cvp-tempest'
         job_parameters = {
              'TEMPEST_ENDPOINT_TYPE': 'internalURL'
         }
-        run_cvp_tempest = dt.start_job_on_jenkins(
+        job_result, job_description = dt.start_job_on_jenkins(
             job_name=job_name,
             job_parameters=job_parameters)
 
-        assert run_cvp_tempest == 'SUCCESS'
+        assert job_result == 'SUCCESS', job_description
         self.delete_flavor(underlay_actions, fixture_flavor1, cfg_node)
diff --git a/tcp_tests/tests/system/test_backup_restore_zookeeper.py b/tcp_tests/tests/system/test_backup_restore_zookeeper.py
index b06f1f8..cd04eb9 100644
--- a/tcp_tests/tests/system/test_backup_restore_zookeeper.py
+++ b/tcp_tests/tests/system/test_backup_restore_zookeeper.py
@@ -175,11 +175,11 @@
 
         show_step(4)
         job_name = 'deploy-zookeeper-restore'
-        run_zookeeper_restore = dt.start_job_on_jenkins(
+        job_result, job_description = dt.start_job_on_jenkins(
             start_timeout=jenkins_start_timeout,
             build_timeout=jenkins_build_timeout,
             job_name=job_name)
-        assert run_zookeeper_restore == 'SUCCESS'
+        assert job_result == 'SUCCESS', job_description
         network_presented = self.is_network_restored(
             underlay_actions,
             fixture_network_name,
diff --git a/tcp_tests/tests/system/test_ceph_luminous_upgrade.py b/tcp_tests/tests/system/test_ceph_luminous_upgrade.py
new file mode 100644
index 0000000..8e06888
--- /dev/null
+++ b/tcp_tests/tests/system/test_ceph_luminous_upgrade.py
@@ -0,0 +1,66 @@
+import pytest
+
+from tcp_tests import logger
+from tcp_tests import settings
+
+
+LOG = logger.logger
+
+
+class TestCephLuminousUpgrade(object):
+
+    @pytest.mark.grab_versions
+    @pytest.mark.parametrize("_", [settings.ENV_NAME])
+    @pytest.mark.run_mcp_update
+    def test_ceph_luminous_upgrade(self, reclass_actions, salt_actions,
+                                   drivetrain_actions, show_step, _):
+        """ Upgrade Ceph luminous to nautilus
+
+        Scenario:
+            1. Chenge parameters in reclass
+            2. Run Pipeline Ceph - upgrade
+        https://docs.mirantis.com/mcp/master/mcp-operations-guide/
+            update-upgrade/major-upgrade/ceph-upgrade/upgrade-ceph.html
+        """
+        salt = salt_actions
+        reclass = reclass_actions
+        dt = drivetrain_actions
+        # #################### Prepare for upgrade #################
+        show_step(1)
+        reclass.add_key("parameters._param.ceph_version",
+                        "nautilus",
+                        "cluster/*/ceph/init.yml")
+        reclass.add_key(
+            "parameters._param.linux_system_repo_update_mcp_ceph_url",
+            "${_param:linux_system_repo_update_url}/ceph-nautilus/",
+            "cluster/*/infra/init.yml")
+        reclass.add_class("system.ceph.mgr.cluster",
+                          "cluster/*/ceph/mon.yml")
+        salt.cmd_run("cfg01*",
+                     "cd /srv/salt/reclass; git add -u && \
+                         git commit --allow-empty \
+                         -m 'updated repositories for Ceph upgrade'")
+        salt.run_state("*", "saltutil.refresh_pillar")
+
+        # #################### Run Ceph-upgrade #################
+        show_step(2)
+        job_parameters = {
+            "ADMIN_HOST": 'cmn01*',
+            "BACKUP_DIR": '/root',
+            "BACKUP_ENABLED": True,
+            "CLUSTER_FLAGS": 'noout',
+            "ORIGIN_RELEASE": 'luminous',
+            "STAGE_FINALIZE": True,
+            "STAGE_UPGRADE_CLIENT": True,
+            "STAGE_UPGRADE_MGR": True,
+            "STAGE_UPGRADE_MON": True,
+            "STAGE_UPGRADE_OSD": True,
+            "STAGE_UPGRADE_RGW": True,
+            "TARGET_RELEASE": 'nautilus',
+            "WAIT_FOR_HEALTHY": True,
+            "ASK_CONFIRMATION": False
+        }
+        upgrade_ceph = dt.start_job_on_jenkins(
+            job_name='ceph-upgrade',
+            job_parameters=job_parameters)
+        assert upgrade_ceph == 'SUCCESS'
diff --git a/tcp_tests/tests/system/test_ceph_operations.py b/tcp_tests/tests/system/test_ceph_operations.py
index cfff2b5..a22f64c 100644
--- a/tcp_tests/tests/system/test_ceph_operations.py
+++ b/tcp_tests/tests/system/test_ceph_operations.py
@@ -4,6 +4,40 @@
 
 LOG = logger.logger
 
+add_osd_ceph_init_yml = """
+parameters:
+  _param:
+    ceph_osd_node04_hostname: xtra
+    ceph_osd_node04_address: 10.6.0.205
+    ceph_mon_node04_ceph_public_address: #10.166.49.205
+    ceph_osd_system_codename: xenial
+  linux:
+    network:
+      host:
+        xtra:
+          address: ${_param:ceph_osd_node04_address}
+          names:
+          - ${_param:ceph_osd_node04_hostname}
+          - ${_param:ceph_osd_node04_hostname}.${_param:cluster_domain}
+        """
+
+add_osd_config_init_yml = """
+parameters:
+  reclass:
+    storage:
+      node:
+        ceph_osd_node04:
+          name: ${_param:ceph_osd_node04_hostname}
+          domain: ${_param:cluster_domain}
+          classes:
+          - cluster.${_param:cluster_name}.ceph.osd
+          params:
+            salt_master_host: ${_param:reclass_config_master}
+            linux_system_codename:  ${_param:ceph_osd_system_codename}
+            single_address: ${_param:ceph_osd_node04_address}
+            ceph_crush_parent: rack02
+"""
+
 
 @pytest.fixture(scope='module')
 def add_xtra_node_to_salt(salt_actions, underlay_actions, config):
@@ -41,54 +75,18 @@
 
     @pytest.fixture
     def describe_node_in_reclass(self,
-                                 reclass_actions, salt_actions):
+                                 reclass_actions,
+                                 salt_actions):
         LOG.info("Executing pytest SETUP "
                  "from describe_node_in_reclass fixture")
         reclass = reclass_actions
         # ---- cluster/*/ceph/init.yml ---------------
-        path = "cluster/*/ceph/init.yml"
-        reclass.add_key("parameters._param.ceph_osd_node04_hostname",
-                        "xtra",
-                        path)
-        reclass.add_key("parameters._param.ceph_osd_node04_address",
-                        "10.6.0.205",
-                        path)
-        reclass.add_key("parameters._param.ceph_osd_system_codename",
-                        "xenial",
-                        path)
-        reclass.add_key("parameters.linux.network.host.xtra.address",
-                        "${_param:ceph_osd_node04_address}",
-                        path)
-        reclass.add_key(
-            key="parameters.linux.network.host.xtra.names",
-            value="['${_param:ceph_osd_node04_hostname}', "
-            "'${_param:ceph_osd_node04_hostname}.${_param:cluster_domain}']",
-            short_path=path)
+        reclass.merge_context(yaml_context=add_osd_ceph_init_yml,
+                              short_path="cluster/*/ceph/init.yml")
 
         # ------- cluster/infra/config/init.yml -----------
-        path = "cluster/*/infra/config/init.yml"
-        parameter = "parameters.reclass.storage.node.ceph_osd_node04"
-        reclass.add_key(parameter + ".name",
-                        "${_param:ceph_osd_node04_hostname}",
-                        path)
-        reclass.add_key(parameter + ".domain",
-                        "${_param:cluster_domain}",
-                        path)
-        reclass.add_key(parameter + ".classes",
-                        "['cluster.${_param:cluster_name}.ceph.osd']",
-                        path)
-        reclass.add_key(parameter + ".params.salt_master_host",
-                        "${_param:reclass_config_master}",
-                        path)
-        reclass.add_key(parameter + ".params.linux_system_codename",
-                        "${_param:ceph_osd_system_codename}",
-                        path)
-        reclass.add_key(parameter + ".params.single_address",
-                        "${_param:ceph_osd_node04_address}",
-                        path)
-        reclass.add_key(parameter + ".params.ceph_crush_parent",
-                        "rack02",
-                        path)
+        reclass.merge_context(yaml_context=add_osd_config_init_yml,
+                              short_path="cluster/*/infra/config/init.yml")
 
     def test_add_node_process(self, describe_node_in_reclass,
                               drivetrain_actions):
@@ -106,11 +104,11 @@
             'HOST': 'xtra*',
             'HOST_TYPE': 'osd'
             }
-        add_node_pipeline = dt.start_job_on_jenkins(
+        job_result, job_description = dt.start_job_on_jenkins(
             job_name=job_name,
             job_parameters=job_parameters,
             verbose=True)
-        assert add_node_pipeline == 'SUCCESS'
+        assert job_result == 'SUCCESS', job_description
 
     def test_added_node(self):
         # root@osd001:~# ceph osd tree in
@@ -142,11 +140,11 @@
             'HOST': 'xtra*',
             'HOST_TYPE': 'osd'
             }
-        remove_node_pipeline = dt.start_job_on_jenkins(
+        job_result, job_description = dt.start_job_on_jenkins(
             job_name=job_name,
             job_parameters=job_parameters,
             verbose=True)
-        assert remove_node_pipeline == 'SUCCESS'
+        assert job_result == 'SUCCESS', job_description
 
 
 class TestCephMon(object):
diff --git a/tcp_tests/tests/system/test_cvp_pipelines.py b/tcp_tests/tests/system/test_cvp_pipelines.py
index 1523c48..41c95fe 100644
--- a/tcp_tests/tests/system/test_cvp_pipelines.py
+++ b/tcp_tests/tests/system/test_cvp_pipelines.py
@@ -152,7 +152,7 @@
             'TEMPEST_ENDPOINT_TYPE': 'internalURL',
             'TEMPEST_TEST_PATTERN': tempest_pattern,
         }
-        cvp_tempest_result = dt.start_job_on_jenkins(
+        job_result, job_description = dt.start_job_on_jenkins(
             job_name,
             jenkins_tgt='I@docker:client:stack:jenkins and I@salt:master',
             start_timeout=jenkins_start_timeout,
@@ -160,7 +160,7 @@
             verbose=True,
             job_parameters=job_parameters,
             job_output_prefix='[ {job_name}/{build_number}:platform {time} ] ')
-        LOG.info('Job {0} result: {1}'.format(job_name, cvp_tempest_result))
+        LOG.info('Job {0} result: {1}'.format(job_name, job_result))
 
         show_step(4)
         tempest_actions.fetch_arficats(
@@ -421,3 +421,73 @@
         except jenkins.NotFoundException:
             raise jenkins.NotFoundException("{0}\n{1}".format(
                 description, '\n'.join(stages)))
+
+    @pytest.mark.grab_versions
+    @pytest.mark.parametrize("_", [settings.ENV_NAME])
+    @pytest.mark.run_cvp_spt
+    def test_run_cvp_spt(self, salt_actions, show_step, _):
+        """Runner for Pipeline CVP - Functional tests
+
+        Scenario:
+            1. Get CICD Jenkins access credentials from salt
+            2. Run job cvp-spt
+            3. Get passed stages from cvp-spt
+        """
+        salt = salt_actions
+        show_step(1)
+
+        tgt = 'I@docker:client:stack:jenkins and cid01*'
+        jenkins_host = salt.get_single_pillar(
+            tgt=tgt, pillar="jenkins:client:master:host")
+        jenkins_port = salt.get_single_pillar(
+            tgt=tgt, pillar="jenkins:client:master:port")
+        jenkins_protocol = salt.get_single_pillar(
+            tgt=tgt, pillar="jenkins:client:master:proto")
+        jenkins_url = '{0}://{1}:{2}'.format(jenkins_protocol,
+                                             jenkins_host,
+                                             jenkins_port)
+        jenkins_user = salt.get_single_pillar(
+            tgt=tgt, pillar="jenkins:client:master:username")
+        jenkins_pass = salt.get_single_pillar(
+            tgt=tgt, pillar="jenkins:client:master:password")
+        jenkins_start_timeout = 60
+        jenkins_build_timeout = 1800
+
+        job_name = 'cvp-spt'
+
+        job_parameters = {
+            'EXTRA_PARAMS': (
+                """
+                envs:
+                  - tests_set=''
+                  - image_name=TestCirros-0.4.0
+                  - networks=10.6.0.0/24
+                  """),
+        }
+
+        show_step(2)
+        cvp_spt_result = run_jenkins_job.run_job(
+            host=jenkins_url,
+            username=jenkins_user,
+            password=jenkins_pass,
+            start_timeout=jenkins_start_timeout,
+            build_timeout=jenkins_build_timeout,
+            verbose=True,
+            job_name=job_name,
+            job_parameters=job_parameters,
+            job_output_prefix='[ cvp-func/{build_number}:platform {time} ] ')
+
+        show_step(3)
+        (description, stages) = get_jenkins_job_stages.get_deployment_result(
+            host=jenkins_url,
+            username=jenkins_user,
+            password=jenkins_pass,
+            job_name=job_name,
+            build_number='lastBuild')
+
+        LOG.info(description)
+        LOG.info('\n'.join(stages))
+        LOG.info('Job {0} result: {1}'.format(job_name,
+                                              cvp_spt_result))
+        assert cvp_spt_result == 'SUCCESS', "{0}\n{1}".format(
+            description, '\n'.join(stages))
diff --git a/tcp_tests/tests/system/test_mcp_update.py b/tcp_tests/tests/system/test_mcp_update.py
index 23cd61f..12d2aa8 100644
--- a/tcp_tests/tests/system/test_mcp_update.py
+++ b/tcp_tests/tests/system/test_mcp_update.py
@@ -98,41 +98,49 @@
 @pytest.fixture
 def wa_for_galera_clustercheck_password_prod35705(reclass_actions,
                                                   salt_actions):
-    reclass_actions.add_key(
-        "parameters._param.galera_clustercheck_password",
-        "a"*32,
-        "cluster/*/infra/secrets.yml")
-    salt_actions.run_state(
-        "I@galera:master or I@galera:slave", "saltutil.refresh_pillar")
-    salt_actions.enforce_state(
-        "I@galera:master or I@galera:slave", "galera")
-    salt_actions.enforce_state(
-        "I@galera:master or I@galera:slave", "haproxy")
-    reclass_actions.commit("[from TCP-QA] Add galera_clustercheck_password")
+    tgt = "I@galera:master or I@galera:slave"
+    if not salt_actions.get_pillar(tgt,
+                                   "_param:galera_clustercheck_password")[0]:
+        reclass_actions.add_key(
+            "parameters._param.galera_clustercheck_password",
+            "a"*32,
+            "cluster/*/infra/secrets.yml")
+        salt_actions.run_state(tgt, "saltutil.refresh_pillar")
+        salt_actions.enforce_state(tgt, "galera")
+        salt_actions.enforce_state(tgt, "haproxy")
+        reclass_actions.commit(
+          "[from TCP-QA] Add galera_clustercheck_password")
+    else:
+        LOG.info("Skipping WA for Galera Clustercheck Password")
 
 
 @pytest.fixture
 def wa_for_alerta_password_prod35958(reclass_actions,
                                      salt_actions):
-    reclass_actions.add_key(
-        "parameters._param.alerta_admin_api_key_generated",
-        "a"*32,
-        "cluster/*/infra/secrets.yml")
-    reclass_actions.add_key(
-        "parameters._param.alerta_admin_key",
-        "${_param:alerta_admin_api_key_generated}",
-        "cluster/*/stacklight/init.yml")
-    reclass_actions.commit("[from TCP-QA] Add alerta_admin_key")
-    salt_actions.run_state(
-        "I@prometheus:alerta or I@prometheus:alertmanager",
-        "saltutil.refresh_pillar")
-    salt_actions.enforce_state(
-        "I@prometheus:alerta", "prometheus.alerta")
-    salt_actions.enforce_state(
-        "I@prometheus:alertmanager", "prometheus.alertmanager")
-    salt_actions.enforce_state(
-        "I@prometheus:alerta or I@prometheus:alertmanager",
-        "docker.client")
+
+    if not salt_actions.get_pillar("I@prometheus:alerta",
+                                   "_param:alerta_admin_api_key_generated")[0]:
+        reclass_actions.add_key(
+            "parameters._param.alerta_admin_api_key_generated",
+            "a"*32,
+            "cluster/*/infra/secrets.yml")
+        reclass_actions.add_key(
+            "parameters._param.alerta_admin_key",
+            "${_param:alerta_admin_api_key_generated}",
+            "cluster/*/stacklight/init.yml")
+        reclass_actions.commit("[from TCP-QA] Add alerta_admin_key")
+        salt_actions.run_state(
+            "I@prometheus:alerta or I@prometheus:alertmanager",
+            "saltutil.refresh_pillar")
+        salt_actions.enforce_state(
+            "I@prometheus:alerta", "prometheus.alerta")
+        salt_actions.enforce_state(
+            "I@prometheus:alertmanager", "prometheus.alertmanager")
+        salt_actions.enforce_state(
+            "I@prometheus:alerta or I@prometheus:alertmanager",
+            "docker.client")
+    else:
+        LOG.info("Skipping WA for Alerta API key")
 
 
 @pytest.fixture(scope='class')
@@ -189,12 +197,12 @@
         job_parameters = {
             'BRANCHES': 'release/proposed/2019.2.0'
         }
-        update_pipelines = dt.start_job_on_jenkins(
+        job_result, job_description = dt.start_job_on_jenkins(
             job_name=job_name,
             job_parameters=job_parameters,
             verbose=True)
 
-        assert update_pipelines == 'SUCCESS'
+        assert job_result == 'SUCCESS', job_description
 
         # ################### Downstream pipeline-library ####################
         show_step(3)
@@ -202,12 +210,12 @@
         job_parameters = {
             'BRANCHES': 'release/proposed/2019.2.0'
         }
-        update_pipeline_library = dt.start_job_on_jenkins(
+        job_result, job_description = dt.start_job_on_jenkins(
             job_name=job_name,
             job_parameters=job_parameters,
             verbose=True)
 
-        assert update_pipeline_library == 'SUCCESS'
+        assert job_result == 'SUCCESS', job_description
 
         # ################### Start 'Deploy - upgrade MCP Drivetrain' job #####
         show_step(4)
@@ -218,13 +226,13 @@
             'MK_PIPELINES_REFSPEC': 'release/proposed/2019.2.0',
             'TARGET_MCP_VERSION': '2019.2.0'
         }
-        update_drivetrain = dt.start_job_on_jenkins(
+        job_result, job_description = dt.start_job_on_jenkins(
             job_name=job_name,
             job_parameters=job_parameters,
             verbose=True,
             build_timeout=3 * 60 * 60)
 
-        assert update_drivetrain == 'SUCCESS'
+        assert job_result == 'SUCCESS', job_description
 
     @pytest.mark.grab_versions
     @pytest.mark.parametrize("_", [settings.ENV_NAME])
@@ -259,11 +267,11 @@
         show_step(3)
         job_name = 'update-glusterfs'
 
-        update_glusterfs = dt.start_job_on_jenkins(
+        job_result, job_description = dt.start_job_on_jenkins(
             job_name=job_name,
             build_timeout=40 * 60)
 
-        assert update_glusterfs == 'SUCCESS'
+        assert job_result == 'SUCCESS', job_description
 
         # ################ Check GlusterFS version for servers ##############
         show_step(4)
@@ -331,12 +339,12 @@
             'INTERACTIVE': 'false'
         }
 
-        update_galera = dt.start_job_on_jenkins(
+        job_result, job_description = dt.start_job_on_jenkins(
             job_name=job_name,
             job_parameters=job_parameters,
             build_timeout=40 * 60)
 
-        assert update_galera == 'SUCCESS'
+        assert job_result == 'SUCCESS', job_description
 
     @pytest.fixture
     def disable_automatic_failover_neutron_for_test(self, salt_actions):
@@ -465,12 +473,12 @@
             'INTERACTIVE': 'false'
         }
 
-        update_rabbit = dt.start_job_on_jenkins(
+        job_result, job_description = dt.start_job_on_jenkins(
             job_name='deploy-upgrade-rabbitmq',
             job_parameters=job_parameters,
             build_timeout=40 * 60
         )
-        assert update_rabbit == 'SUCCESS'
+        assert job_result == 'SUCCESS', job_description
 
     @pytest.mark.grab_versions
     @pytest.mark.parametrize("_", [settings.ENV_NAME])
@@ -496,11 +504,11 @@
         show_step(2)
         job_parameters = {}
 
-        update_ceph = dt.start_job_on_jenkins(
+        job_result, job_description = dt.start_job_on_jenkins(
             job_name='ceph-update',
             job_parameters=job_parameters)
 
-        assert update_ceph == 'SUCCESS'
+        assert job_result == 'SUCCESS', job_description
 
         # ########## Verify Ceph version #####################################
         show_step(3)
@@ -526,11 +534,11 @@
             "STAGE_UPGRADE_ES_KIBANA": True,
             "STAGE_UPGRADE_SYSTEM_PART": True
         }
-        upgrade_control_pipeline = drivetrain.start_job_on_jenkins(
+        job_result, job_description = drivetrain.start_job_on_jenkins(
             job_name="stacklight-upgrade",
             job_parameters=job_parameters)
 
-        assert upgrade_control_pipeline == 'SUCCESS'
+        assert job_result == 'SUCCESS', job_description
 
 
 @pytest.mark.usefixtures("switch_to_proposed_pipelines",
@@ -579,11 +587,11 @@
             "UPGRADE_SALTSTACK": False,
             "OS_UPGRADE": True,
             "INTERACTIVE": False}
-        upgrade_control_pipeline = drivetrain_actions.start_job_on_jenkins(
+        job_result, job_description = drivetrain_actions.start_job_on_jenkins(
             job_name="deploy-upgrade-control",
             job_parameters=job_parameters)
 
-        assert upgrade_control_pipeline == 'SUCCESS'
+        assert job_result == 'SUCCESS', job_description
 
     @pytest.mark.grab_versions
     @pytest.mark.run_mcp_update
@@ -596,11 +604,11 @@
             "OS_DIST_UPGRADE": True,
             "OS_UPGRADE": True,
             "INTERACTIVE": False}
-        upgrade_data_pipeline = drivetrain_actions.start_job_on_jenkins(
+        job_result, job_description = drivetrain_actions.start_job_on_jenkins(
             job_name="deploy-upgrade-ovs-gateway",
             job_parameters=job_parameters)
 
-        assert upgrade_data_pipeline == 'SUCCESS'
+        assert job_result == 'SUCCESS', job_description
 
     @pytest.mark.grab_versions
     @pytest.mark.run_mcp_update
@@ -611,8 +619,8 @@
             "OS_DIST_UPGRADE": True,
             "OS_UPGRADE": True,
             "INTERACTIVE": False}
-        upgrade_compute_pipeline = drivetrain_actions.start_job_on_jenkins(
+        job_result, job_description = drivetrain_actions.start_job_on_jenkins(
             job_name="deploy-upgrade-compute",
             job_parameters=job_parameters)
 
-        assert upgrade_compute_pipeline == 'SUCCESS'
+        assert job_result == 'SUCCESS', job_description
diff --git a/tcp_tests/tests/system/test_security_updates.py b/tcp_tests/tests/system/test_security_updates.py
index 0e83990..b8242e9 100644
--- a/tcp_tests/tests/system/test_security_updates.py
+++ b/tcp_tests/tests/system/test_security_updates.py
@@ -72,12 +72,13 @@
         :param dt: DrivetrainManager, tcp-qa Drivetrain manager instance
         :return: str, build execution status of cvp-sanity pipeline
         """
-        return dt.start_job_on_jenkins(
+        job_result, job_description = dt.start_job_on_jenkins(
             job_name=self.SANITY_JOB_NAME,
             job_parameters=self.SANITY_JOB_PARAMETERS,
             start_timeout=self.JENKINS_START_TIMEOUT,
             build_timeout=60 * 15
         )
+        assert job_result == "SUCCESS", job_description
 
     def reboot_hw_node(self, ssh, salt, node):
         """Reboot the given node and wait for it to start back
@@ -128,16 +129,16 @@
         # Execute 'deploy-update-package' pipeline to upgrade packages on nodes
         show_step(2)
         self.UPDATE_JOB_PARAMETERS["TARGET_SERVERS"] = role
-        status = dt.start_job_on_jenkins(
+        job_result, job_description = dt.start_job_on_jenkins(
             job_name=self.UPDATE_JOB_NAME,
             job_parameters=self.UPDATE_JOB_PARAMETERS,
             start_timeout=self.JENKINS_START_TIMEOUT,
             build_timeout=60 * 15
         )
-        assert status == 'SUCCESS', (
+        assert job_result == 'SUCCESS', (
             "'{}' job run status is {} after upgrading packages on {} nodes. "
-            "Please check the build and executed stages.".format(
-                self.UPDATE_JOB_NAME, status, role)
+            "Please check the build and executed stages {}".format(
+                self.UPDATE_JOB_NAME, job_result, role, job_description)
         )
 
         # Collect available package upgrades for nodes again
diff --git a/tcp_tests/tests/system/test_upgrade_contrail.py b/tcp_tests/tests/system/test_upgrade_contrail.py
index dee3148..e853d04 100644
--- a/tcp_tests/tests/system/test_upgrade_contrail.py
+++ b/tcp_tests/tests/system/test_upgrade_contrail.py
@@ -44,7 +44,7 @@
         job_parameters = {
             'ASK_CONFIRMATION': False
         }
-        update_control_vms = dt.start_job_on_jenkins(
+        job_result, job_description = dt.start_job_on_jenkins(
             job_name=job_name,
             job_parameters=job_parameters)
-        assert update_control_vms == 'SUCCESS'
+        assert job_result == 'SUCCESS', job_description
diff --git a/tcp_tests/tests/system/test_upgrade_pike_queens.py b/tcp_tests/tests/system/test_upgrade_pike_queens.py
index 3ea2d23..87616eb 100644
--- a/tcp_tests/tests/system/test_upgrade_pike_queens.py
+++ b/tcp_tests/tests/system/test_upgrade_pike_queens.py
@@ -200,33 +200,33 @@
         }
         # ####### Run job for ctl* ###
         job_parameters["TARGET_SERVERS"] = "ctl*"
-        update_control_vms = dt.start_job_on_jenkins(
+        job_result, job_description = dt.start_job_on_jenkins(
             job_name=job_name,
             job_parameters=job_parameters)
-        assert update_control_vms == 'SUCCESS'
+        assert job_result == 'SUCCESS', job_description
 
         if salt_actions.cmd_run("mdb*", "test.ping")[0].keys():
             # ####### Run job for mdb* ###
             job_parameters["TARGET_SERVERS"] = "mdb*"
-            update_control_vms = dt.start_job_on_jenkins(
+            job_result, job_description = dt.start_job_on_jenkins(
                 job_name=job_name,
                 job_parameters=job_parameters)
-            assert update_control_vms == 'SUCCESS'
+            assert job_result == 'SUCCESS', job_description
 
         if salt_actions.cmd_run("kmn*", "test.ping")[0].keys():
             # ####### Run job for kmn* ###
             job_parameters["TARGET_SERVERS"] = "kmn*"
-            update_control_vms = dt.start_job_on_jenkins(
+            job_result, job_description = dt.start_job_on_jenkins(
                 job_name=job_name,
                 job_parameters=job_parameters)
-            assert update_control_vms == 'SUCCESS'
+            assert job_result == 'SUCCESS', job_description
 
         # ####### Run job for prx* ###
         job_parameters["TARGET_SERVERS"] = "prx*"
-        update_control_vms = dt.start_job_on_jenkins(
+        job_result, job_description = dt.start_job_on_jenkins(
             job_name=job_name,
             job_parameters=job_parameters)
-        assert update_control_vms == 'SUCCESS'
+        assert job_result == 'SUCCESS', job_description
 
         # ########## Upgrade gateway nodes  ###########
         show_step(3)
@@ -239,10 +239,10 @@
                 'OS_UPGRADE': True,
                 'TARGET_SERVERS': "gtw*"
             }
-            update_gateway = dt.start_job_on_jenkins(
+            job_result, job_description = dt.start_job_on_jenkins(
                 job_name=job_name,
                 job_parameters=job_parameters)
-            assert update_gateway == 'SUCCESS'
+            assert job_result == 'SUCCESS', job_description
         else:
             LOG.info("This deployment doesn't have gtw* nodes, \
             so skip this step")
@@ -256,10 +256,10 @@
             'OS_UPGRADE': True,
             'TARGET_SERVERS': "cmp*"
         }
-        update_computes = dt.start_job_on_jenkins(
+        job_result, job_description = dt.start_job_on_jenkins(
             job_name=job_name,
             job_parameters=job_parameters)
-        assert update_computes == 'SUCCESS'
+        assert job_result == 'SUCCESS', job_description
 
         # ############ Perform the post-upgrade activities ##########
         show_step(5)
@@ -296,17 +296,17 @@
                        'test_ceph_status', 'test_prometheus_alert_count',
                        'test_uncommited_changes')
         }
-        run_cvp_sanity = dt.start_job_on_jenkins(
+        job_result, job_description = dt.start_job_on_jenkins(
             job_name=job_name,
             job_parameters=job_parameters)
-        assert run_cvp_sanity == 'SUCCESS'
+        assert job_result == 'SUCCESS', job_description
         # ######################## Run Tempest #######################
         show_step(7)
         job_name = 'cvp-tempest'
         job_parameters = {
              'TEMPEST_ENDPOINT_TYPE': 'internalURL'
         }
-        run_cvp_tempest = dt.start_job_on_jenkins(
+        job_result, job_description = dt.start_job_on_jenkins(
             job_name=job_name,
             job_parameters=job_parameters)
-        assert run_cvp_tempest == 'SUCCESS'
+        assert job_result == 'SUCCESS', job_description