Merge "Implement catching of exceptions in celery tasks"
diff --git a/testrail_bot/control/celery_tasks/filters.py b/testrail_bot/control/celery_tasks/filters.py
index f552dfc..e19b5d5 100644
--- a/testrail_bot/control/celery_tasks/filters.py
+++ b/testrail_bot/control/celery_tasks/filters.py
@@ -1,16 +1,16 @@
import re
-def filter_ip(data):
+def filter_ip(data: str) -> str:
ip_addr_regex = re.compile(r'\b(?:[0-9]{1,3}\.){3}[0-9]{1,3}\b')
return re.sub(ip_addr_regex, "x.x.x.x", data)
-def filter_uuid(data):
+def filter_uuid(data: str) -> str:
uuid4hex = re.compile(
r'[0-9a-f]{12}4[0-9a-f]{3}[89ab][0-9a-f]{15}\Z', re.I)
return re.sub(uuid4hex, "xxxx", data)
-def last_traceback_filter(data):
+def last_traceback_filter(data: str) -> str:
return data[data.rfind("Traceback"):]
diff --git a/testrail_bot/control/celery_tasks/test_rail_api.py b/testrail_bot/control/celery_tasks/test_rail_api.py
index 7a27ac9..37baa81 100644
--- a/testrail_bot/control/celery_tasks/test_rail_api.py
+++ b/testrail_bot/control/celery_tasks/test_rail_api.py
@@ -1,6 +1,6 @@
from testrail_api import TestRailAPI, StatusCodeError
-
from django.conf import settings
+from typing import Optional, List
from .enums import StatusEnum
@@ -10,7 +10,7 @@
settings.TESTRAIL_PASSWORD)
-def get_project_id(project_name):
+def get_project_id(project_name: str) -> Optional[int]:
project = list(filter(
lambda x: x["name"] == project_name,
api.projects.get_projects()['projects']))
@@ -19,33 +19,35 @@
else:
return None
-def get_suite_by_id(suite_id):
+
+def get_suite_by_id(suite_id: int) -> dict:
return api.suites.get_suite(suite_id)
-def get_suite_name_by_id(suite_id):
+def get_suite_name_by_id(suite_id: int) -> str:
return api.suites.get_suite(suite_id)['name']
-def get_suite_test_type(suite_id):
+
+def get_suite_test_type(suite_id: int) -> str:
suite_name = get_suite_name_by_id(suite_id)
return suite_name.split(']')[1]
-def get_plans(project_id, plan_name, **kwargs):
+def get_plans(project_id: int, plan_name: str, **kwargs) -> List[int]:
plans = api.plans.get_plans(project_id, **kwargs)['plans']
return [x["id"] for x in filter(
lambda x: plan_name in x["name"], plans)]
-def get_entries(plan_id):
+def get_entries(plan_id: int) -> List[dict]:
return api.plans.get_plan(plan_id)["entries"]
-def get_run_by_id(run_id):
+def get_run_by_id(run_id: int) -> dict:
return api.runs.get_run(run_id)
-def get_run_id(entries, run_name):
+def get_run_id(entries: List[dict], run_name: str) -> Optional[int]:
entries = list(filter(
lambda x: x["name"] == run_name,
entries))
@@ -54,7 +56,7 @@
return entries[0]["runs"][0]["id"]
-def get_result_for_case(run_id, case_id):
+def get_result_for_case(run_id: int, case_id: int) -> Optional[List[dict]]:
try:
results = api.results.get_results_for_case(run_id, case_id)['results']
except StatusCodeError:
@@ -62,10 +64,10 @@
return results
-def get_failed_tests(last_run_id):
+def get_failed_tests(last_run_id: int) -> List[dict]:
return api.tests.get_tests(
last_run_id, status_id=StatusEnum.failed)['tests']
-def add_result(test_id, update_dict):
+def add_result(test_id: int, update_dict: dict) -> None:
api.results.add_result(test_id, **update_dict)
diff --git a/testrail_bot/control/celery_tasks/testrail_pipeline.py b/testrail_bot/control/celery_tasks/testrail_pipeline.py
index 829e054..63f2ace 100644
--- a/testrail_bot/control/celery_tasks/testrail_pipeline.py
+++ b/testrail_bot/control/celery_tasks/testrail_pipeline.py
@@ -1,20 +1,32 @@
+import datetime
import difflib
+from typing import TextIO, List, Tuple, Optional
from . import filters
from .enums import StatusEnum
from . import test_rail_api
-
from .. import models
__all__ = ("process_test_run",)
-def finish_report(report):
+def finish_report(report: models.TestRailReport) -> None:
+ """
+ Adds mark to the TestReport object that report is finished
+ and saves the object to the DB
+ :param report: TestRail Report model
+ """
report.finished = True
report.save()
-def apply_filters(data, test_run):
+def apply_filters(data: str, test_run: models.TestRailTestRun) -> str:
+ """
+ Applies modifiers to text (filtering, masking etc.)
+ :param data: text to modify
+ :param test_run: TestRun object that contains switchers for filters
+ :return: modified text
+ """
if test_run.filter_last_traceback:
data = filters.last_traceback_filter(data)
@@ -29,14 +41,34 @@
data = locals()["custom_filter"](data)
return data
-def get_runs_by_pattern(runs_in_plan, test_pattern, suite_id):
+
+def get_runs_by_pattern(runs_in_plan: List[dict],
+ test_pattern: str,
+ suite_id: int) -> List[int]:
+ """
+ Returns a list of run IDs that are related to a specific Test Suite
+ and have names containing a pattern (test_pattern)
+
+
+ :param runs_in_plan: A list of runs
+ :param test_pattern: A string pattern to match against Test Runs' names
+ :param suite_id: The ID of the Test Suite to which the tests should be
+ related
+
+ :return: a list of IDs
+ """
run = []
for t_run in runs_in_plan:
if test_pattern in t_run['name'] and t_run['suite_id'] == suite_id:
- run.append( t_run['runs'][0]['id'])
+ run.append(t_run['runs'][0]['id'])
return run
-def process_plan(plan_id, case_id, last_comment, bot_run):
+
+def process_plan(plan_id: int,
+ case_id: int,
+ last_comment: str,
+ bot_run: models.TestRailTestRun) -> Tuple[Optional[dict],
+ float, int]:
"""
This function performs a search for a similar failure within a test plan.
@@ -46,14 +78,16 @@
:param bot_run: number of result reports from tab 'Reports'
"""
runs = []
- testrail_run = test_rail_api.get_run_by_id(bot_run.run_id)
+ testrail_run = test_rail_api.get_run_by_id(bot_run.run_id)
suite_id = testrail_run['suite_id']
runs_in_plan = test_rail_api.get_entries(plan_id)
test_pattern = bot_run.test_pattern
if test_pattern:
runs = get_runs_by_pattern(runs_in_plan, test_pattern, suite_id)
else:
- runs = [t_run['runs'][0]['id'] for t_run in runs_in_plan if suite_id == t_run['suite_id']]
+ runs = [t_run['runs'][0]['id']
+ for t_run in runs_in_plan
+ if suite_id == t_run['suite_id']]
results = []
@@ -66,7 +100,9 @@
return None, -2.0, run_id
status_code = str(results[0]["status_id"])
- if status_code not in [StatusEnum.test_failed, StatusEnum.product_failed]:
+ if status_code not in [StatusEnum.test_failed,
+ StatusEnum.product_failed,
+ StatusEnum.wont_fix]:
return None, -3.0, run_id
comment = apply_filters(results[-1]["comment"], bot_run)
ratio = difflib.SequenceMatcher(
@@ -77,7 +113,20 @@
return None, ratio, run_id
-def get_project_id(f, test_run, report):
+def get_project_id(f: TextIO,
+ test_run: models.TestRailTestRun,
+ report: models.TestRailReport) -> Optional[int]:
+ """
+ Returns the TestRail Project ID associated with a specific test run
+
+ :param f: A file-like object for writing log messages
+ :param test_run: An instance of the TestRailTestRun model representing
+ the test run
+ :param report: An instance of the TestRailReport model for reporting
+ purposes
+
+ :return: The TestRail Project ID if found; otherwise, returns None
+ """
project_id = test_rail_api.get_project_id(test_run.project_name)
if not project_id:
f.write("Incorrect Project {}. Stopping processing\n".format(
@@ -88,9 +137,15 @@
return project_id
-def get_plans(test_run, run_date, project_id):
+def get_plans(test_run: models.TestRailTestRun,
+ run_date: datetime.datetime,
+ project_id: int) -> List[int]:
"""
Get plans which will be processed
+
+ :param test_run: TestRun django object
+ :param run_date: retrieve plans created before that date
+ :param project_id: project ID
"""
created_by_id = test_run.created_by_id
kw = {"limit": 100, "created_before": int(run_date)}
@@ -99,63 +154,91 @@
return test_rail_api.get_plans(project_id, test_run.plan_name, **kw)
-def get_last_comment(case_id, test_run):
+def get_last_comment(case_id: int, test_run: models.TestRailTestRun) -> str:
+ """
+ Retrieve the last comment associated with a test case in a TestRail
+ test run.
+
+ :param case_id: An integer representing the ID of the test case.
+ :param test_run: An instance of the TestRailTestRun model that the test
+ case is associated with.
+
+ :return: A string containing the filtered last comment for the specified
+ test case in the given test run
+ """
last_result = test_rail_api.get_result_for_case(
test_run.run_id, case_id)
return apply_filters(
last_result[0]["comment"], test_run)
-def process_old_test(f, plan_id, case_id, last_comment, test_run, test):
+
+def process_old_test(f: TextIO,
+ plan_id: int,
+ case_id: int,
+ last_comment: str,
+ test_run: models.TestRailTestRun,
+ test: dict) -> bool:
+ """
+ Writes to report file similarity info about the TestCase under the test
+
+ :return: Returns False if no similarities found
+ """
sim_result, ratio, run_id = process_plan(
plan_id, case_id, last_comment, test_run)
- if sim_result:
+ per = round(100.0 * ratio, 2)
+ run_link = f"<a href=https://mirantis.testrail.com/index.php?/runs/" \
+ f"view/{run_id}>{run_id} </a>"
+ if type(sim_result) is dict:
+ prod_link = "<a href=https://mirantis.jira.com/browse/{defect}>" \
+ "{defect}</a>".format(defect=sim_result["defects"])
+ test_link = "<a href=https://mirantis.testrail.com/index.php?/tests/" \
+ "view/{test_id}>{test_id}</a>".format(
+ test_id=sim_result["test_id"])
if str(sim_result["status_id"]) == StatusEnum.retest:
update_dict = {
"status_id": sim_result["status_id"],
- "comment": "NOT marked by TestRailBot because it's not a regular fail, "
- "similarity with test {} {}%, you can update manualy".format(
- sim_result["test_id"], round(100.0 * ratio, 2))
+ "comment": f"NOT marked by TestRailBot because it's not a "
+ f"regular fail, "
+ f"similarity with test {sim_result['test_id']} "
+ f"{per}%, you can update manually"
}
- f.write("Found similarity trace on the test <a href=https://mirantis.testrail.com/"
- "index.php?/tests/view/{test_id}>{test_id} </a> : \n {dict}\n"
- .format(test_id=sim_result["test_id"], dict=update_dict))
+ f.write(f"Found similarity trace on the test {test_link}: \n"
+ f"{update_dict}\n")
f.flush()
return True
elif ratio > 0.9:
update_dict = {
"status_id": sim_result["status_id"],
- "comment": "Marked by TestRailBot because "
- "of similarity with test {} {}%".format(
- sim_result["test_id"], round(100.0 * ratio, 2)),
+ "comment": f"Marked by TestRailBot because of similarity "
+ f"with test {sim_result['test_id']} {per}%",
"defects": sim_result["defects"]
}
- f.write("Found similarity defect <a href=https://mirantis.jira.com/browse/"
- "{defect}>{defect}</a> on the test <a href=https://mirantis.testrail.com/"
- "index.php?/tests/view/{test_id}>{test_id} </a> : \n {dict}\n"
- .format(defect=sim_result["defects"],test_id=sim_result["test_id"], dict=update_dict))
+ f.write(f"Found similarity defect {prod_link} on the test "
+ f"{test_link} : \n {update_dict}\n")
f.flush()
test_rail_api.add_result(test["id"], update_dict)
return True
elif ratio > 0.7:
- f.write("<b style='color:red;'> Found similarity defect <a href=https://mirantis.jira.com/browse/"
- "{defect}>{defect}</a> on the test <a href=https://mirantis.testrail.com/"
- "index.php?/tests/view/{test_id}>{test_id} </a>, "
- "but NOT marked by TestRailBot because of similarity only"
- " {per}%, you can update manually \n </b>"
- .format(defect=sim_result["defects"],test_id=sim_result["test_id"], per=round(100.0 * ratio, 2)))
+ f.write(f"<b style='color:red;'> Found similarity defect "
+ f"{prod_link} on the test {test_link}, but NOT marked by "
+ f"TestRailBot because of similarity only "
+ f"{per}%, you can update manually \n </b>")
f.flush()
return True
- f.write(
- "Similarity not found due to similarity:{per}, in run <a href=https://mirantis.testrail.com/"
- "index.php?/runs/view/{run_id}>{run_id} </a>\n".format(
- per=round(100.0 * ratio, 2), run_id=run_id))
+ f.write(f"Similarity not found due to similarity:{per}, "
+ f"in run {run_link}\n")
f.flush()
return False
-def process_test(f, test, test_run, plans):
-
+def process_test(f: TextIO,
+ test: dict,
+ test_run: models.TestRailTestRun,
+ plans: List[int]) -> None:
+ """
+ Starts processing for the TestCase for each TestPlan
+ """
case_id = test["case_id"]
f.write("<b> Processing test <a href=https://mirantis.testrail.com/"
@@ -171,14 +254,16 @@
if found:
break
else:
- f.write("<b style='color:red;'> Automatic test processing failed. Please process "
- "test manualy <a href=https://mirantis.testrail.com/"
+ f.write("<b style='color:red;'> Automatic test processing failed. "
+ "Please process test manualy "
+ "<a href=https://mirantis.testrail.com/"
"index.php?/tests/view/{test_id}>{test_id}"
"</a></b>\n".format(test_id=test["id"]))
f.flush()
-def process_test_run(bot_run_id, report_id, path, run_date):
+def process_test_run(bot_run_id: int, report_id: int, path: str,
+ run_date: datetime.datetime) -> None:
"""
This function processes a created bot test run. It retrieves a list
of test plans to process, gathers the failed tests from the test run,
@@ -191,8 +276,8 @@
:param run_date: date until which to retrieve test plans
"""
report = models.TestRailReport.objects.get(pk=report_id)
+ bot_test_run = models.TestRailTestRun.objects.get(pk=bot_run_id)
with open(path, "w") as f:
- bot_test_run = models.TestRailTestRun.objects.get(pk=bot_run_id)
test_run = test_rail_api.get_run_by_id(bot_test_run.run_id)
f.write("Start processing <a href=https://mirantis.testrail.com/"
"index.php?/runs/view/{id}>{name}"
diff --git a/testrail_bot/control/views.py b/testrail_bot/control/views.py
index b12457b..24f4987 100644
--- a/testrail_bot/control/views.py
+++ b/testrail_bot/control/views.py
@@ -69,7 +69,7 @@
if not run.run_name:
run_name += testrail_run['name']
if run.test_pattern:
- run_name += "-" + run.test_pattern
+ run_name += "-" + run.test_pattern
report_name = "{}-run_id-{}-date-{}".format(
run_name, run.run_id, datetime.datetime.isoformat(datetime.datetime.now()))
path = os.path.join(models.fs.location, report_name)