Fix restarting jenkins jobs with known issues on cluster-under-test PROD-37137

Change-Id: Idc52d1001a88137bd93719dcf1ba84abb33af1f2
diff --git a/tcp_tests/utils/run_jenkins_job.py b/tcp_tests/utils/run_jenkins_job.py
index 403cc00..dee3fcb 100755
--- a/tcp_tests/utils/run_jenkins_job.py
+++ b/tcp_tests/utils/run_jenkins_job.py
@@ -128,11 +128,8 @@
         job_params.update(job_parameters)
 
     job_attempts = 2
-    count = 1
-    while (job_attempts != 0):
-        print('Attempt ' + str(count))
-        count += 1
-        job_attempts -= 1
+    for attempt in range(1, job_attempts+1):
+        print('Attempt ' + str(attempt))
         build = jenkins.run_build(job_name,
                                   job_params,
                                   verbose=verbose,
@@ -152,19 +149,24 @@
         except Exception as e:
             print(str(e))
             raise
+        result = jenkins.build_info(name=build[0],
+                                    build_id=build[1])['result']
+        if verbose:
+            print_build_footer(build, result, host)
+
+        if result == "SUCCESS":
+            break
+
         job_log = jenkins.get_build_output(job_name, build[1])
         # Workaround for restart jobs that failed by salt-timeout errors
         # or by sporadic fail in attach disks
-        if ('SaltReqTimeoutError' not in job_log and
-                'not a block device' not in job_log):
-            break
-        else:
+        if ('SaltReqTimeoutError' in job_log or
+                'not a block device' in job_log):
             print('Job returns known infra fail!')
-
-    result = jenkins.build_info(name=build[0],
-                                build_id=build[1])['result']
-    if verbose:
-        print_build_footer(build, result, host)
+            continue
+        else:
+            # do not retry if it's not known issue
+            break
 
     return result