Consider 'Won't Fix' status when analyzing bugs for known issues.

Apply minor fixes for long lines. Split some lengthy messages into smaller parts.

PRODX-36711

Change-Id: I590d57fc17a7c44553246e5ed915bf8c6a4bfacb
diff --git a/testrail_bot/control/celery_tasks/testrail_pipeline.py b/testrail_bot/control/celery_tasks/testrail_pipeline.py
index ba6f0ff..63f2ace 100644
--- a/testrail_bot/control/celery_tasks/testrail_pipeline.py
+++ b/testrail_bot/control/celery_tasks/testrail_pipeline.py
@@ -100,7 +100,9 @@
             return None, -2.0, run_id
 
         status_code = str(results[0]["status_id"])
-        if status_code not in [StatusEnum.test_failed, StatusEnum.product_failed]:
+        if status_code not in [StatusEnum.test_failed,
+                               StatusEnum.product_failed,
+                               StatusEnum.wont_fix]:
             return None, -3.0, run_id
         comment = apply_filters(results[-1]["comment"], bot_run)
         ratio = difflib.SequenceMatcher(
@@ -154,12 +156,15 @@
 
 def get_last_comment(case_id: int, test_run: models.TestRailTestRun) -> str:
     """
-    Retrieve the last comment associated with a test case in a TestRail test run.
+    Retrieve the last comment associated with a test case in a TestRail
+    test run.
 
     :param case_id: An integer representing the ID of the test case.
-    :param test_run: An instance of the TestRailTestRun model that the test case is associated with.
+    :param test_run: An instance of the TestRailTestRun model that the test
+    case is associated with.
 
-    :return: A string containing the filtered last comment for the specified test case in the given test run
+    :return: A string containing the filtered last comment for the specified
+    test case in the given test run
     """
     last_result = test_rail_api.get_result_for_case(
         test_run.run_id, case_id)
@@ -181,51 +186,48 @@
     """
     sim_result, ratio, run_id = process_plan(
         plan_id, case_id, last_comment, test_run)
-    if sim_result is dict:
+    per = round(100.0 * ratio, 2)
+    run_link = f"<a href=https://mirantis.testrail.com/index.php?/runs/" \
+               f"view/{run_id}>{run_id} </a>"
+    if type(sim_result) is dict:
+        prod_link = "<a href=https://mirantis.jira.com/browse/{defect}>" \
+                    "{defect}</a>".format(defect=sim_result["defects"])
+        test_link = "<a href=https://mirantis.testrail.com/index.php?/tests/" \
+                    "view/{test_id}>{test_id}</a>".format(
+                        test_id=sim_result["test_id"])
         if str(sim_result["status_id"]) == StatusEnum.retest:
             update_dict = {
                 "status_id": sim_result["status_id"],
-                "comment": "NOT marked by TestRailBot because it's not a regular fail, "
-                           "similarity with test {} {}%, you can update manualy".format(
-                    sim_result["test_id"], round(100.0 * ratio, 2))
+                "comment": f"NOT marked by TestRailBot because it's not a "
+                           f"regular fail, "
+                           f"similarity with test {sim_result['test_id']} "
+                           f"{per}%, you can update manually"
             }
-            f.write("Found similarity trace on the test <a href=https://mirantis.testrail.com/"
-                    "index.php?/tests/view/{test_id}>{test_id} </a> : \n {dict}\n"
-                    .format(test_id=sim_result["test_id"], dict=update_dict))
+            f.write(f"Found similarity trace on the test {test_link}: \n"
+                    f"{update_dict}\n")
             f.flush()
             return True
         elif ratio > 0.9:
             update_dict = {
                 "status_id": sim_result["status_id"],
-                "comment": "Marked by TestRailBot because "
-                           "of similarity with test {} {}%".format(
-                    sim_result["test_id"], round(100.0 * ratio, 2)),
+                "comment": f"Marked by TestRailBot because of similarity "
+                           f"with test {sim_result['test_id']} {per}%",
                 "defects": sim_result["defects"]
             }
-            f.write("Found similarity defect <a href=https://mirantis.jira.com/browse/"
-                    "{defect}>{defect}</a> on the test <a href=https://mirantis.testrail.com/"
-                    "index.php?/tests/view/{test_id}>{test_id} </a> : \n {dict}\n"
-                    .format(defect=sim_result["defects"],
-                            test_id=sim_result["test_id"],
-                            dict=update_dict))
+            f.write(f"Found similarity defect {prod_link} on the test "
+                    f"{test_link} : \n {update_dict}\n")
             f.flush()
             test_rail_api.add_result(test["id"], update_dict)
             return True
         elif ratio > 0.7:
-            f.write("<b style='color:red;'> Found similarity defect <a href=https://mirantis.jira.com/browse/"
-                    "{defect}>{defect}</a> on the test <a href=https://mirantis.testrail.com/"
-                    "index.php?/tests/view/{test_id}>{test_id} </a>, "
-                    "but NOT marked by TestRailBot because of similarity only"
-                    " {per}%, you can update manually \n </b>"
-                    .format(defect=sim_result["defects"],
-                            test_id=sim_result["test_id"],
-                            per=round(100.0 * ratio, 2)))
+            f.write(f"<b style='color:red;'> Found similarity defect "
+                    f"{prod_link} on the test {test_link}, but NOT marked by "
+                    f"TestRailBot because of similarity only "
+                    f"{per}%, you can update manually \n </b>")
             f.flush()
             return True
-    f.write(
-        "Similarity not found due to similarity:{per}, in run <a href=https://mirantis.testrail.com/"
-        "index.php?/runs/view/{run_id}>{run_id} </a>\n".format(
-            per=round(100.0 * ratio, 2), run_id=run_id))
+    f.write(f"Similarity not found due to similarity:{per}, "
+            f"in run {run_link}\n")
     f.flush()
     return False