Move start_jenkins_job method to the Drivetrain Manager

Change-Id: Iaf507c2559e9d73d51725c8df3dfad4be35c4cb2
diff --git a/tcp_tests/managers/drivetrain_manager.py b/tcp_tests/managers/drivetrain_manager.py
index 6e2fbda..8ef758d 100644
--- a/tcp_tests/managers/drivetrain_manager.py
+++ b/tcp_tests/managers/drivetrain_manager.py
@@ -13,6 +13,11 @@
 #    under the License.
 
 from tcp_tests.managers.execute_commands import ExecuteCommandsMixin
+from tcp_tests.utils import run_jenkins_job
+from tcp_tests.utils import get_jenkins_job_stages
+from tcp_tests import logger
+
+LOG = logger.logger
 
 
 class DrivetrainManager(ExecuteCommandsMixin):
@@ -32,3 +37,70 @@
         self.execute_commands(commands,
                               label='Install Drivetrain Tools')
         self.__config.drivetrain.drivetrain_installed = True
+
+    def start_job_on_cid_jenkins(self, job_name,
+                                 **kwargs):
+        """
+        Starts job with specific parameters on cluster Jenkins
+
+        Method accept any param:
+            job_parameters=None,
+            job_output_prefix='',
+            start_timeout=1800,
+            build_timeout=3600 * 4,
+            verbose=False
+
+        :param job_name: string
+        :return: string, Result of passed job, "SUCCESS"| "FAILED" | "UNSTABLE"
+        """
+        jenkins_url, jenkins_user, jenkins_pass = self.get_jenkins_creds(
+            tgt='I@docker:client:stack:jenkins and cid01*')
+
+        job_result = run_jenkins_job.run_job(
+            host=jenkins_url,
+            username=jenkins_user,
+            password=jenkins_pass,
+            job_name=job_name,
+            **kwargs)
+
+        (description, stages) = get_jenkins_job_stages.get_deployment_result(
+            host=jenkins_url,
+            username=jenkins_user,
+            password=jenkins_pass,
+            job_name=job_name,
+            build_number='lastBuild')
+
+        LOG.info(description)
+        LOG.info('\n'.join(stages))
+
+        if job_result != 'SUCCESS':
+            LOG.warning("{0}\n{1}".format(description, '\n'.join(stages)))
+        return job_result
+
+    def start_job_on_cfg_jenkins(self):
+        pass
+
+    def get_jenkins_creds(self, tgt):
+        """
+        Requests Jenkins's login parameters from pillars from desired node
+
+        :return: tuple {jenkins_url, jenkins_user, jenkins_pass}
+        """
+        jenkins_host = self._salt.get_single_pillar(
+            tgt=tgt, pillar="jenkins:client:master:host")
+        if jenkins_host is None:
+            raise Exception(
+                "Can't find 'jenkins:client:master' pillar on {tgt} node."
+                .format(tgt=tgt))
+        jenkins_port = self._salt.get_single_pillar(
+            tgt=tgt, pillar="jenkins:client:master:port")
+        jenkins_protocol = self._salt.get_single_pillar(
+            tgt=tgt, pillar="jenkins:client:master:proto")
+        jenkins_url = '{0}://{1}:{2}'.format(jenkins_protocol,
+                                             jenkins_host,
+                                             jenkins_port)
+        jenkins_user = self._salt.get_single_pillar(
+            tgt=tgt, pillar="jenkins:client:master:username")
+        jenkins_pass = self._salt.get_single_pillar(
+            tgt=tgt, pillar="jenkins:client:master:password")
+        return jenkins_url, jenkins_user, jenkins_pass
diff --git a/tcp_tests/managers/saltmanager.py b/tcp_tests/managers/saltmanager.py
index 84e4827..2ca0126 100644
--- a/tcp_tests/managers/saltmanager.py
+++ b/tcp_tests/managers/saltmanager.py
@@ -374,29 +374,6 @@
                         password=jenkins_pass)
             )
 
-    def get_cluster_jenkins_creds(self):
-        """
-        Requests cluster Jenkins's login parameters from pillars
-        :return: dict {'url': jenkins_url,
-                       'user': jenkins_user,
-                       'pass': jenkins_pass}
-        """
-        tgt = 'I@docker:client:stack:jenkins and cid01*'
-        jenkins_host = self.get_single_pillar(
-            tgt=tgt, pillar="jenkins:client:master:host")
-        jenkins_port = self.get_single_pillar(
-            tgt=tgt, pillar="jenkins:client:master:port")
-        jenkins_protocol = self.get_single_pillar(
-            tgt=tgt, pillar="jenkins:client:master:proto")
-        jenkins_url = '{0}://{1}:{2}'.format(jenkins_protocol,
-                                             jenkins_host,
-                                             jenkins_port)
-        jenkins_user = self.get_single_pillar(
-            tgt=tgt, pillar="jenkins:client:master:username")
-        jenkins_pass = self.get_single_pillar(
-            tgt=tgt, pillar="jenkins:client:master:password")
-        return {'url': jenkins_url, 'user': jenkins_user, 'pass': jenkins_pass}
-
     def create_env_jenkins_cicd(self):
         """Creates static utils/env_jenkins_cicd file"""
 
diff --git a/tcp_tests/tests/system/test_backup_restore_galera.py b/tcp_tests/tests/system/test_backup_restore_galera.py
index 9309466..96ea98a 100644
--- a/tcp_tests/tests/system/test_backup_restore_galera.py
+++ b/tcp_tests/tests/system/test_backup_restore_galera.py
@@ -2,8 +2,6 @@
 
 from tcp_tests import logger
 from tcp_tests import settings
-from tcp_tests.utils import run_jenkins_job
-from tcp_tests.utils import get_jenkins_job_stages
 
 LOG = logger.logger
 
@@ -16,29 +14,19 @@
     @pytest.mark.grab_versions
     @pytest.mark.parametrize("_", [settings.ENV_NAME])
     @pytest.mark.run_galera_backup_restore
-    def test_backup_restore_galera(self, salt_actions, show_step, _):
+    def test_backup_restore_galera(self, drivetrain_actions,
+                                   show_step, _):
         """Execute backup/restore for galera
 
         Scenario:
-            1. Get CICD Jenkins access credentials from salt
-            2. Run job galera_backup_database
-            3. Run tests with cvp-sanity job
-            4. Run tests with cvp-tempest job
-            5. Run job galera_verify_restore
-            6. If jobs are passed then start tests with cvp-sanity job
-            7. Run tests with cvp-tempest job
+            1. Run job galera_backup_database
+            2. Run tests with cvp-sanity job
+            3. Run tests with cvp-tempest job
+            4. Run job galera_verify_restore
+            5. If jobs are passed then start tests with cvp-sanity job
+            6. Run tests with cvp-tempest job
         """
-        salt = salt_actions
-        jenkins_creds = salt.get_cluster_jenkins_creds()
-
-        # ################### Login Jenkins on cid01 node ###################
-        show_step(1)
-
-        jenkins_url = jenkins_creds.get('url')
-        jenkins_user = jenkins_creds.get('user')
-        jenkins_pass = jenkins_creds.get('pass')
-        jenkins_start_timeout = 60
-        jenkins_build_timeout = 1800
+        dt = drivetrain_actions
 
         # ################## Run backup job #########################
         show_step(2)
@@ -46,28 +34,12 @@
         job_parameters = {
             'ASK_CONFIRMATION': False
         }
-        backup_galera_pipeline = run_jenkins_job.run_job(
-            host=jenkins_url,
-            username=jenkins_user,
-            password=jenkins_pass,
-            start_timeout=jenkins_start_timeout,
-            build_timeout=jenkins_build_timeout,
-            verbose=False,
+        backup_galera_pipeline = dt.start_job_on_cid_jenkins(
             job_name=job_name,
             job_parameters=job_parameters)
 
-        (description, stages) = get_jenkins_job_stages.get_deployment_result(
-            host=jenkins_url,
-            username=jenkins_user,
-            password=jenkins_pass,
-            job_name=job_name,
-            build_number='lastBuild')
+        assert backup_galera_pipeline == 'SUCCESS'
 
-        LOG.info(description)
-        LOG.info('\n'.join(stages))
-
-        assert backup_galera_pipeline == 'SUCCESS', "{0}\n{1}".format(
-            description, '\n'.join(stages))
         # ######################## Run CPV ###########################
         show_step(3)
         job_name = 'cvp-sanity'
@@ -86,57 +58,23 @@
                        'test_ceph_status', 'test_prometheus_alert_count',
                        'test_uncommited_changes')
         }
-        job_parameters = job_cvp_sanity_parameters
-        run_cvp_sanity = run_jenkins_job.run_job(
-            host=jenkins_url,
-            username=jenkins_user,
-            password=jenkins_pass,
-            start_timeout=jenkins_start_timeout,
-            build_timeout=jenkins_build_timeout,
-            verbose=False,
+        run_cvp_sanity = dt.start_job_on_cid_jenkins(
             job_name=job_name,
-            job_parameters=job_parameters)
+            job_parameters=job_cvp_sanity_parameters)
 
-        (description, stages) = get_jenkins_job_stages.get_deployment_result(
-            host=jenkins_url,
-            username=jenkins_user,
-            password=jenkins_pass,
-            job_name=job_name,
-            build_number='lastBuild')
+        assert run_cvp_sanity == 'SUCCESS'
 
-        LOG.info(description)
-        LOG.info('\n'.join(stages))
-
-        assert run_cvp_sanity == 'SUCCESS', "{0}\n{1}".format(
-            description, '\n'.join(stages))
         # ######################## Run Tempest ###########################
         show_step(4)
         job_name = 'cvp-tempest'
         job_parameters = {
              'TEMPEST_ENDPOINT_TYPE': 'internalURL'
         }
-        run_cvp_tempest = run_jenkins_job.run_job(
-            host=jenkins_url,
-            username=jenkins_user,
-            password=jenkins_pass,
-            start_timeout=jenkins_start_timeout,
-            build_timeout=jenkins_build_timeout,
-            verbose=False,
+        run_cvp_tempest = dt.start_job_on_cid_jenkins(
             job_name=job_name,
             job_parameters=job_parameters)
 
-        (description, stages) = get_jenkins_job_stages.get_deployment_result(
-            host=jenkins_url,
-            username=jenkins_user,
-            password=jenkins_pass,
-            job_name=job_name,
-            build_number='lastBuild')
-
-        LOG.info(description)
-        LOG.info('\n'.join(stages))
-
-        assert run_cvp_tempest == 'SUCCESS', "{0}\n{1}".format(
-            description, '\n'.join(stages))
+        assert run_cvp_tempest == 'SUCCESS'
         # ######################## Run Restore ###########################
         show_step(5)
         job_name = 'galera_verify_restore'
@@ -144,79 +82,27 @@
              'RESTORE_TYPE': 'ONLY_RESTORE',
              'ASK_CONFIRMATION': False
         }
-        run_galera_verify_restore = run_jenkins_job.run_job(
-            host=jenkins_url,
-            username=jenkins_user,
-            password=jenkins_pass,
-            start_timeout=jenkins_start_timeout,
-            build_timeout=jenkins_build_timeout,
-            verbose=False,
+        run_galera_verify_restore = dt.start_job_on_cid_jenkins(
             job_name=job_name,
             job_parameters=job_parameters)
 
-        (description, stages) = get_jenkins_job_stages.get_deployment_result(
-            host=jenkins_url,
-            username=jenkins_user,
-            password=jenkins_pass,
-            job_name=job_name,
-            build_number='lastBuild')
-
-        LOG.info(description)
-        LOG.info('\n'.join(stages))
-
-        assert run_galera_verify_restore == 'SUCCESS', "{0}\n{1}".format(
-            description, '\n'.join(stages))
+        assert run_galera_verify_restore == 'SUCCESS'
         # ######################## Run CPV ###########################
         show_step(6)
         job_name = 'cvp-sanity'
-        job_parameters = job_cvp_sanity_parameters
-        run_cvp_sanity = run_jenkins_job.run_job(
-            host=jenkins_url,
-            username=jenkins_user,
-            password=jenkins_pass,
-            start_timeout=jenkins_start_timeout,
-            build_timeout=jenkins_build_timeout,
-            verbose=False,
+        run_cvp_sanity = dt.start_job_on_cid_jenkins(
             job_name=job_name,
-            job_parameters=job_parameters)
+            job_parameters=job_cvp_sanity_parameters)
 
-        (description, stages) = get_jenkins_job_stages.get_deployment_result(
-            host=jenkins_url,
-            username=jenkins_user,
-            password=jenkins_pass,
-            job_name=job_name,
-            build_number='lastBuild')
-
-        LOG.info(description)
-        LOG.info('\n'.join(stages))
-
-        assert run_cvp_sanity == 'SUCCESS', "{0}\n{1}".format(
-            description, '\n'.join(stages))
+        assert run_cvp_sanity == 'SUCCESS'
         # ######################## Run Tempest ###########################
         show_step(7)
         job_name = 'cvp-tempest'
         job_parameters = {
              'TEMPEST_ENDPOINT_TYPE': 'internalURL'
         }
-        run_cvp_tempest = run_jenkins_job.run_job(
-            host=jenkins_url,
-            username=jenkins_user,
-            password=jenkins_pass,
-            start_timeout=jenkins_start_timeout,
-            build_timeout=jenkins_build_timeout,
-            verbose=False,
+        run_cvp_tempest = dt.start_job_on_cid_jenkins(
             job_name=job_name,
             job_parameters=job_parameters)
 
-        (description, stages) = get_jenkins_job_stages.get_deployment_result(
-            host=jenkins_url,
-            username=jenkins_user,
-            password=jenkins_pass,
-            job_name=job_name,
-            build_number='lastBuild')
-
-        LOG.info(description)
-        LOG.info('\n'.join(stages))
-
-        assert run_cvp_tempest == 'SUCCESS', "{0}\n{1}".format(
-            description, '\n'.join(stages))
+        assert run_cvp_tempest == 'SUCCESS'
diff --git a/tcp_tests/tests/system/test_failover_ceph.py b/tcp_tests/tests/system/test_failover_ceph.py
index 2493083..6745f80 100644
--- a/tcp_tests/tests/system/test_failover_ceph.py
+++ b/tcp_tests/tests/system/test_failover_ceph.py
@@ -14,10 +14,7 @@
 import pytest
 
 from devops.helpers import helpers
-
 from tcp_tests import logger
-from tcp_tests.utils import get_jenkins_job_stages
-from tcp_tests.utils import run_jenkins_job
 
 LOG = logger.logger
 
@@ -60,45 +57,12 @@
             for node_name in node_names
         }
 
-    def run_jenkins_job(
-            self, creds, name, parameters, start_timeout, build_timeout):
-        """Execute a Jenkins job with provided parameters
-
-        :param creds: dict, Jenkins url and user credentials
-        :param name: string, Jenkins job to execute
-        :param parameters: dict, parameters for Jenkins job
-        :parameter start_timeout: int, timeout to wait until build is started
-        :parameter build_timeout: int, timeout to wait until build is finished
-        :return: tuple, Jenkins job build execution status, high level
-            description of the build and verbose decription of executed job
-            stages
-        """
-        jenkins_url, jenkins_user, jenkins_pass = (
-            creds['url'], creds['user'], creds['pass'])
-        build_status = run_jenkins_job.run_job(
-            host=jenkins_url,
-            username=jenkins_user,
-            password=jenkins_pass,
-            start_timeout=start_timeout,
-            build_timeout=build_timeout,
-            verbose=False,
-            job_name=name,
-            job_parameters=parameters)
-
-        description, stages = get_jenkins_job_stages.get_deployment_result(
-            host=jenkins_url,
-            username=jenkins_user,
-            password=jenkins_pass,
-            job_name=name,
-            build_number='lastBuild')
-
-        return build_status, description, stages
-
     @pytest.mark.grab_versions
     @pytest.mark.restart_osd_node
     def test_restart_osd_node(
             self,
             salt_actions,
+            drivetrain_actions,
             underlay_actions,
             show_step):
         """Verify that Ceph OSD node is not affected by system restart
@@ -118,6 +82,7 @@
         """
         salt = salt_actions
         ssh = underlay_actions
+        dt = drivetrain_actions
 
         # Find Ceph OSD nodes
         show_step(1)
@@ -153,7 +118,7 @@
 
         # Check Ceph cluster health after node restart
         show_step(4)
-        ceph_health = self.get_ceph_health(ssh, osd_hosts) # noqa
+        ceph_health = self.get_ceph_health(ssh, osd_hosts)  # noqa
         # FIXME: uncomment the check once PROD-31374 is fixed
         # status = all(
         #     ["OK" in status for node, status in ceph_health.items()])
@@ -161,35 +126,31 @@
 
         # Run Tempest smoke test suite
         show_step(5)
-        jenkins_creds = salt.get_cluster_jenkins_creds()
-        status, description, stages = self.run_jenkins_job(
-            jenkins_creds,
-            self.TEMPEST_JOB_NAME,
-            self.TEMPEST_JOB_PARAMETERS,
-            self.JENKINS_START_TIMEOUT,
-            self.JENKINS_BUILD_TIMEOUT
+        status = dt.start_job_on_cid_jenkins(
+            job_name=self.TEMPEST_JOB_NAME,
+            job_parameters=self.TEMPEST_JOB_PARAMETERS,
+            start_timeout=self.JENKINS_START_TIMEOUT,
+            build_timeout=self.JENKINS_BUILD_TIMEOUT
         )
+
         assert status == 'SUCCESS', (
             "'{0}' job run status is {1} after executing Tempest smoke "
-            "tests. Please check the build:\n{2}\n\nExecuted build "
-            "stages:\n{3}".format(
-                self.TEMPEST_JOB_NAME, status, description, '\n'.join(stages))
+            "tests".format(
+                self.TEMPEST_JOB_NAME, status)
         )
 
         # Run Sanity test
         show_step(6)
-        status, description, stages = self.run_jenkins_job(
-            jenkins_creds,
-            self.SANITY_JOB_NAME,
-            self.SANITY_JOB_PARAMETERS,
-            self.JENKINS_START_TIMEOUT,
-            self.JENKINS_BUILD_TIMEOUT
+        status = dt.start_job_on_cid_jenkins(
+            job_name=self.SANITY_JOB_NAME,
+            job_parameters=self.SANITY_JOB_PARAMETERS,
+            start_timeout=self.JENKINS_START_TIMEOUT,
+            build_timeout=self.JENKINS_BUILD_TIMEOUT
         )
         assert status == 'SUCCESS', (
             "'{0}' job run status is {1} after executing selected sanity "
-            "tests. Please check the build:\n{2}\n\nExecuted build "
-            "stages:\n{3}".format(
-                self.SANITY_JOB_NAME, status, description, '\n'.join(stages))
+            "tests".format(
+                self.SANITY_JOB_NAME, status)
         )
 
     @pytest.mark.grab_versions
@@ -197,6 +158,7 @@
     def test_restart_cmn_node(
             self,
             salt_actions,
+            drivetrain_actions,
             underlay_actions,
             show_step):
         """Verify that Ceph CMN node is not affected by system restart
@@ -216,6 +178,7 @@
         """
         salt = salt_actions
         ssh = underlay_actions
+        dt = drivetrain_actions
 
         # Find Ceph CMN nodes
         show_step(1)
@@ -259,35 +222,31 @@
 
         # Run Tempest smoke test suite
         show_step(5)
-        jenkins_creds = salt.get_cluster_jenkins_creds()
-        status, description, stages = self.run_jenkins_job(
-            jenkins_creds,
-            self.TEMPEST_JOB_NAME,
-            self.TEMPEST_JOB_PARAMETERS,
-            self.JENKINS_START_TIMEOUT,
-            self.JENKINS_BUILD_TIMEOUT
+        status = dt.start_job_on_cid_jenkins(
+            job_name=self.TEMPEST_JOB_NAME,
+            job_parameters=self.TEMPEST_JOB_PARAMETERS,
+            start_timeout=self.JENKINS_START_TIMEOUT,
+            build_timeout=self.JENKINS_BUILD_TIMEOUT
         )
+
         assert status == 'SUCCESS', (
             "'{0}' job run status is {1} after executing Tempest smoke "
-            "tests. Please check the build:\n{2}\n\nExecuted build "
-            "stages:\n{3}".format(
-                self.TEMPEST_JOB_NAME, status, description, '\n'.join(stages))
+            "tests".format(
+                self.TEMPEST_JOB_NAME, status)
         )
 
         # Run Sanity test
         show_step(6)
-        status, description, stages = self.run_jenkins_job(
-            jenkins_creds,
-            self.SANITY_JOB_NAME,
-            self.SANITY_JOB_PARAMETERS,
-            self.JENKINS_START_TIMEOUT,
-            self.JENKINS_BUILD_TIMEOUT
+        status = dt.start_job_on_cid_jenkins(
+            job_name=self.SANITY_JOB_NAME,
+            job_parameters=self.SANITY_JOB_PARAMETERS,
+            start_timeout=self.JENKINS_START_TIMEOUT,
+            build_timeout=self.JENKINS_BUILD_TIMEOUT
         )
         assert status == 'SUCCESS', (
             "'{0}' job run status is {1} after executing selected sanity "
-            "tests. Please check the build:\n{2}\n\nExecuted build "
-            "stages:\n{3}".format(
-                self.SANITY_JOB_NAME, status, description, '\n'.join(stages))
+            "tests".format(
+                self.SANITY_JOB_NAME, status)
         )
 
     @pytest.mark.grab_versions
@@ -295,6 +254,7 @@
     def test_restart_rgw_node(
             self,
             salt_actions,
+            drivetrain_actions,
             underlay_actions,
             show_step):
         """Verify that Ceph RGW node is not affected by system restart
@@ -313,6 +273,7 @@
         """
         salt = salt_actions
         ssh = underlay_actions
+        dt = drivetrain_actions
 
         # Find Ceph RGW nodes
         show_step(1)
@@ -356,35 +317,31 @@
 
         # Run Tempest smoke test suite
         show_step(5)
-        jenkins_creds = salt.get_cluster_jenkins_creds()
-        status, description, stages = self.run_jenkins_job(
-            jenkins_creds,
-            self.TEMPEST_JOB_NAME,
-            self.TEMPEST_JOB_PARAMETERS,
-            self.JENKINS_START_TIMEOUT,
-            self.JENKINS_BUILD_TIMEOUT
+        status = dt.start_job_on_cid_jenkins(
+            job_name=self.TEMPEST_JOB_NAME,
+            job_parameters=self.TEMPEST_JOB_PARAMETERS,
+            start_timeout=self.JENKINS_START_TIMEOUT,
+            build_timeout=self.JENKINS_BUILD_TIMEOUT
         )
+
         assert status == 'SUCCESS', (
             "'{0}' job run status is {1} after executing Tempest smoke "
-            "tests. Please check the build:\n{2}\n\nExecuted build "
-            "stages:\n{3}".format(
-                self.TEMPEST_JOB_NAME, status, description, '\n'.join(stages))
+            "tests".format(
+                self.TEMPEST_JOB_NAME, status)
         )
 
         # Run Sanity test
         show_step(6)
-        status, description, stages = self.run_jenkins_job(
-            jenkins_creds,
-            self.SANITY_JOB_NAME,
-            self.SANITY_JOB_PARAMETERS,
-            self.JENKINS_START_TIMEOUT,
-            self.JENKINS_BUILD_TIMEOUT
+        status = dt.start_job_on_cid_jenkins(
+            job_name=self.SANITY_JOB_NAME,
+            job_parameters=self.SANITY_JOB_PARAMETERS,
+            start_timeout=self.JENKINS_START_TIMEOUT,
+            build_timeout=self.JENKINS_BUILD_TIMEOUT
         )
         assert status == 'SUCCESS', (
             "'{0}' job run status is {1} after executing selected sanity "
-            "tests. Please check the build:\n{2}\n\nExecuted build "
-            "stages:\n{3}".format(
-                self.SANITY_JOB_NAME, status, description, '\n'.join(stages))
+            "tests".format(
+                self.SANITY_JOB_NAME, status)
         )
 
     # #######################################################################
diff --git a/tcp_tests/tests/system/test_update.py b/tcp_tests/tests/system/test_update.py
index 9796ade..f3834d3 100644
--- a/tcp_tests/tests/system/test_update.py
+++ b/tcp_tests/tests/system/test_update.py
@@ -2,8 +2,6 @@
 
 from tcp_tests import logger
 from tcp_tests import settings
-from tcp_tests.utils import run_jenkins_job
-from tcp_tests.utils import get_jenkins_job_stages
 
 LOG = logger.logger
 
@@ -17,11 +15,12 @@
     @pytest.mark.grab_versions
     @pytest.mark.parametrize("_", [settings.ENV_NAME])
     @pytest.mark.run_mcp_update
-    def test_update_drivetrain(self, salt_actions, show_step, _):
+    def test_update_drivetrain(self, salt_actions, drivetrain_actions,
+                               show_step, _):
         """Updating DriveTrain component to release/proposed/2019.2.0 version
 
         Scenario:
-            1. Get CICD Jenkins access credentials from salt
+            1. Add workaround for PROD-32751
             2. Run job git-mirror-downstream-mk-pipelines
             3. Run job git-mirror-downstream-pipeline-library
             4. If jobs are passed then start 'Deploy - upgrade MCP Drivetrain'
@@ -29,17 +28,11 @@
         Duration: ~35 min
         """
         salt = salt_actions
-        jenkins_creds = salt.get_cluster_jenkins_creds()
+        dt = drivetrain_actions
 
-        # #################### Login Jenkins on cid01 node ###################
+        # #################### Add workaround for PROD-32751 #################
         show_step(1)
 
-        jenkins_url = jenkins_creds.get('url')
-        jenkins_user = jenkins_creds.get('user')
-        jenkins_pass = jenkins_creds.get('pass')
-        jenkins_start_timeout = 60
-        jenkins_build_timeout = 1800
-
         # FIXME: workaround for PROD-32751
         salt.cmd_run("cfg01*", "cd /srv/salt/reclass; git add -u && \
                         git commit --allow-empty -m 'Cluster model update'")
@@ -50,28 +43,11 @@
         job_parameters = {
             'BRANCHES': 'release/proposed/2019.2.0'
         }
-        update_pipelines = run_jenkins_job.run_job(
-            host=jenkins_url,
-            username=jenkins_user,
-            password=jenkins_pass,
-            start_timeout=jenkins_start_timeout,
-            build_timeout=jenkins_build_timeout,
-            verbose=False,
+        update_pipelines = dt.start_job_on_cid_jenkins(
             job_name=job_name,
             job_parameters=job_parameters)
 
-        (description, stages) = get_jenkins_job_stages.get_deployment_result(
-            host=jenkins_url,
-            username=jenkins_user,
-            password=jenkins_pass,
-            job_name=job_name,
-            build_number='lastBuild')
-
-        LOG.info(description)
-        LOG.info('\n'.join(stages))
-
-        assert update_pipelines == 'SUCCESS', "{0}\n{1}".format(
-            description, '\n'.join(stages))
+        assert update_pipelines == 'SUCCESS'
 
         # ################### Downstream pipeline-library ####################
         show_step(3)
@@ -79,66 +55,32 @@
         job_parameters = {
             'BRANCHES': 'release/proposed/2019.2.0'
         }
-        update_pipeline_library = run_jenkins_job.run_job(
-            host=jenkins_url,
-            username=jenkins_user,
-            password=jenkins_pass,
-            start_timeout=jenkins_start_timeout,
-            build_timeout=jenkins_build_timeout,
-            verbose=False,
+        update_pipeline_library = dt.start_job_on_cid_jenkins(
             job_name=job_name,
             job_parameters=job_parameters)
 
-        (description, stages) = get_jenkins_job_stages.get_deployment_result(
-            host=jenkins_url,
-            username=jenkins_user,
-            password=jenkins_pass,
-            job_name=job_name,
-            build_number='lastBuild')
-
-        LOG.info(description)
-        LOG.info('\n'.join(stages))
-
-        assert update_pipeline_library == 'SUCCESS', "{0}\n{1}".format(
-            description, '\n'.join(stages))
+        assert update_pipeline_library == 'SUCCESS'
 
         # ################### Start 'Deploy - upgrade MCP Drivetrain' job #####
         show_step(4)
 
-        jenkins_build_timeout = 3600
         job_name = 'upgrade-mcp-release'
         job_parameters = {
             'MK_PIPELINES_REFSPEC': 'release/proposed/2019.2.0',
             'TARGET_MCP_VERSION': '2019.2.0'
         }
-        update_drivetrain = run_jenkins_job.run_job(
-            host=jenkins_url,
-            username=jenkins_user,
-            password=jenkins_pass,
-            start_timeout=jenkins_start_timeout,
-            build_timeout=jenkins_build_timeout,
-            verbose=False,
+        update_drivetrain = dt.start_job_on_cid_jenkins(
             job_name=job_name,
-            job_parameters=job_parameters)
+            job_parameters=job_parameters,
+            build_timeout=3600)
 
-        (description, stages) = get_jenkins_job_stages.get_deployment_result(
-            host=jenkins_url,
-            username=jenkins_user,
-            password=jenkins_pass,
-            job_name=job_name,
-            build_number='lastBuild')
-
-        LOG.info(description)
-        LOG.info('\n'.join(stages))
-
-        assert update_drivetrain == 'SUCCESS', "{0}\n{1}".format(
-            description, '\n'.join(stages))
+        assert update_drivetrain == 'SUCCESS'
 
     @pytest.mark.grab_versions
     @pytest.mark.parametrize("_", [settings.ENV_NAME])
     @pytest.mark.run_mcp_update
     def test_update_glusterfs(self, salt_actions, reclass_actions,
-                              show_step, _):
+                              drivetrain_actions, show_step, _):
         """ Upgrade GlusterFS
         Scenario:
         1. In infra/init.yml in Reclass, add the glusterfs_version parameter
@@ -150,10 +92,7 @@
         """
         salt = salt_actions
         reclass = reclass_actions
-        jenkins_creds = salt.get_cluster_jenkins_creds()
-        jenkins_url = jenkins_creds.get('url')
-        jenkins_user = jenkins_creds.get('user')
-        jenkins_pass = jenkins_creds.get('pass')
+        dt = drivetrain_actions
 
         def has_only_similar(param_by_nodes):
             """
@@ -178,29 +117,13 @@
 
         # ############## Start deploy-upgrade-galera job #####################
         show_step(3)
-        jenkins_build_timeout = 40 * 60
         job_name = 'update-glusterfs'
 
-        update_glusterfs = run_jenkins_job.run_job(
-            host=jenkins_url,
-            username=jenkins_user,
-            password=jenkins_pass,
-            build_timeout=jenkins_build_timeout,
-            verbose=False,
-            job_name=job_name)
-
-        (description, stages) = get_jenkins_job_stages.get_deployment_result(
-            host=jenkins_url,
-            username=jenkins_user,
-            password=jenkins_pass,
+        update_glusterfs = dt.start_job_on_cid_jenkins(
             job_name=job_name,
-            build_number='lastBuild')
+            build_timeout=40 * 60)
 
-        LOG.info(description)
-        LOG.info('\n'.join(stages))
-
-        assert update_glusterfs == 'SUCCESS', "{0}\n{1}".format(
-            description, '\n'.join(stages))
+        assert update_glusterfs == 'SUCCESS'
 
         # ################ Check GlusterFS version for servers ##############
         show_step(4)
@@ -223,7 +146,8 @@
     @pytest.mark.grab_versions
     @pytest.mark.parametrize("_", [settings.ENV_NAME])
     @pytest.mark.run_mcp_update
-    def test_update_galera(self, salt_actions, reclass_actions, show_step, _):
+    def test_update_galera(self, salt_actions, reclass_actions,
+                           drivetrain_actions, show_step, _):
         """ Upgrade Galera automatically
 
         Scenario:
@@ -236,7 +160,7 @@
         """
         salt = salt_actions
         reclass = reclass_actions
-        jenkins_creds = salt.get_cluster_jenkins_creds()
+        dt = drivetrain_actions
         # ################### Enable pipeline #################################
         show_step(1)
         reclass.add_class(
@@ -262,33 +186,14 @@
         # #################### Login Jenkins on cid01 node ###################
         show_step(6)
 
-        jenkins_url = jenkins_creds.get('url')
-        jenkins_user = jenkins_creds.get('user')
-        jenkins_pass = jenkins_creds.get('pass')
-        jenkins_build_timeout = 40 * 60
         job_name = 'deploy-upgrade-galera'
         job_parameters = {
             'INTERACTIVE': 'false'
         }
 
-        update_galera = run_jenkins_job.run_job(
-            host=jenkins_url,
-            username=jenkins_user,
-            password=jenkins_pass,
-            build_timeout=jenkins_build_timeout,
-            verbose=False,
+        update_galera = dt.start_job_on_cid_jenkins(
             job_name=job_name,
-            job_parameters=job_parameters)
+            job_parameters=job_parameters,
+            build_timeout=40 * 60)
 
-        (description, stages) = get_jenkins_job_stages.get_deployment_result(
-            host=jenkins_url,
-            username=jenkins_user,
-            password=jenkins_pass,
-            job_name=job_name,
-            build_number='lastBuild')
-
-        LOG.info(description)
-        LOG.info('\n'.join(stages))
-
-        assert update_galera == 'SUCCESS', "{0}\n{1}".format(
-            description, '\n'.join(stages))
+        assert update_galera == 'SUCCESS'