Reworking the process of acquiring test results for a specific test case
Analyzing results with 'failed,' 'blocked,' and 'retest' statuses and notifying when their count is more than 3
Analyzing test results with both 'blocked' and 'failed' statuses
Enabling simple caching for specific requests to the TestRail API
PRODX-36715
Change-Id: I629e5effefdb0f854c58021d56763ece773f6ea3
diff --git a/testrail_bot/control/celery_tasks/enums.py b/testrail_bot/control/celery_tasks/enums.py
index f7b7251..30ccbab 100644
--- a/testrail_bot/control/celery_tasks/enums.py
+++ b/testrail_bot/control/celery_tasks/enums.py
@@ -1,13 +1,24 @@
-class StatusEnum:
- passed = "1"
- blocked = "2"
- untested = "3"
- retest = "4"
- failed = "5"
- skipped = "6"
- in_progress = "7"
- product_failed = "8"
- test_failed = "9"
- wont_fix = "10"
- mixes_success = "11"
- wont_test = "12"
+from enum import IntEnum
+
+
+class StatusEnum(IntEnum):
+ passed = 1
+ blocked = 2
+ untested = 3
+ retest = 4
+ failed = 5
+ skipped = 6
+ in_progress = 7
+ product_failed = 8
+ test_failed = 9
+ wont_fix = 10
+ mixes_success = 11
+ wont_test = 12
+
+ def __eq__(self, other):
+ if type(other) is str:
+ other = int(other)
+ return super().__eq__(other)
+
+ def __str__(self):
+ return str(self.value)
diff --git a/testrail_bot/control/celery_tasks/tasks.py b/testrail_bot/control/celery_tasks/tasks.py
index 32c3d55..3c5149e 100644
--- a/testrail_bot/control/celery_tasks/tasks.py
+++ b/testrail_bot/control/celery_tasks/tasks.py
@@ -1,19 +1,20 @@
from __future__ import absolute_import, unicode_literals
+import traceback
from celery import shared_task
-
from . import jenkins_pipeline
from . import testrail_pipeline
@shared_task
-def process_run(bot_run_id, report_id, path, run_date):
+def process_run(bot_run_id, report_id, path):
try:
- testrail_pipeline.process_test_run(bot_run_id, report_id, path,
- run_date)
+ testrail_pipeline.process_test_run(bot_run_id, report_id, path)
except BaseException as e:
with open(path, 'a') as f:
+ print(f"Caught next exception: {e}")
+ traceback.print_exc()
f.write("<b style='color:red;background-color:pink'>Task "
"completed unsuccessfully</b>\n")
f.flush()
diff --git a/testrail_bot/control/celery_tasks/test_rail_api.py b/testrail_bot/control/celery_tasks/test_rail_api.py
index 37baa81..0afc319 100644
--- a/testrail_bot/control/celery_tasks/test_rail_api.py
+++ b/testrail_bot/control/celery_tasks/test_rail_api.py
@@ -1,7 +1,8 @@
from testrail_api import TestRailAPI, StatusCodeError
from django.conf import settings
-from typing import Optional, List
+from typing import Optional, List, Iterator
+from functools import lru_cache
from .enums import StatusEnum
api = TestRailAPI(
@@ -10,6 +11,7 @@
settings.TESTRAIL_PASSWORD)
+@lru_cache
def get_project_id(project_name: str) -> Optional[int]:
project = list(filter(
lambda x: x["name"] == project_name,
@@ -20,25 +22,29 @@
return None
+@lru_cache
def get_suite_by_id(suite_id: int) -> dict:
return api.suites.get_suite(suite_id)
+@lru_cache
def get_suite_name_by_id(suite_id: int) -> str:
return api.suites.get_suite(suite_id)['name']
+@lru_cache
def get_suite_test_type(suite_id: int) -> str:
suite_name = get_suite_name_by_id(suite_id)
return suite_name.split(']')[1]
-def get_plans(project_id: int, plan_name: str, **kwargs) -> List[int]:
- plans = api.plans.get_plans(project_id, **kwargs)['plans']
- return [x["id"] for x in filter(
- lambda x: plan_name in x["name"], plans)]
+@lru_cache
+def get_plans(project_id: int, **kwargs) -> List[dict]:
+ plans = api.plans.get_plans(project_id=project_id, **kwargs)['plans']
+ return plans
+@lru_cache
def get_entries(plan_id: int) -> List[dict]:
return api.plans.get_plan(plan_id)["entries"]
@@ -47,6 +53,46 @@
return api.runs.get_run(run_id)
+def get_result_history_for_case(case_id: int,
+ status_id: int = None,
+ project_name: str = "Mirantis Cloud Platform",
+ plan_name: str = None,
+ run_name: str = None,
+ created_after: str = None,
+ created_before: str = None,
+ created_by: int = None) -> \
+ Iterator[List[dict]]:
+ limit_step = 100
+ suite_id = api.cases.get_case(case_id=case_id)["suite_id"]
+ for offset in range(0, 2000, limit_step):
+ plans = get_plans(project_id=get_project_id(project_name),
+ limit=limit_step,
+ offset=offset,
+ created_after=created_after,
+ created_before=created_before,
+ created_by=created_by)
+ if not plans:
+ return
+ for plan in plans:
+ if plan_name and plan_name not in plan["name"]:
+ continue
+ entries = get_entries(plan["id"])
+ for entry in entries:
+ for run in entry["runs"]:
+ if run_name and run_name not in run["name"]:
+ continue
+ if suite_id and run["suite_id"] != suite_id:
+ continue
+ if type(status_id) is list:
+ status_id = ",".join(map(lambda x: str(x), status_id))
+
+ results = get_result_for_case(run_id=run["id"],
+ case_id=case_id,
+ status_id=status_id)
+ if results:
+ yield results
+
+
def get_run_id(entries: List[dict], run_name: str) -> Optional[int]:
entries = list(filter(
lambda x: x["name"] == run_name,
@@ -56,17 +102,22 @@
return entries[0]["runs"][0]["id"]
-def get_result_for_case(run_id: int, case_id: int) -> Optional[List[dict]]:
+def get_result_for_case(run_id: int,
+ case_id: int,
+ **kwargs) -> Optional[List[dict]]:
try:
- results = api.results.get_results_for_case(run_id, case_id)['results']
+ results = api.results.get_results_for_case(run_id, case_id, **kwargs
+ )['results']
except StatusCodeError:
return None
return results
def get_failed_tests(last_run_id: int) -> List[dict]:
- return api.tests.get_tests(
- last_run_id, status_id=StatusEnum.failed)['tests']
+ failed_statuses = [StatusEnum.failed,
+ StatusEnum.blocked]
+ status_id = ",".join(map(str, failed_statuses))
+ return api.tests.get_tests(last_run_id, status_id=status_id)['tests']
def add_result(test_id: int, update_dict: dict) -> None:
diff --git a/testrail_bot/control/celery_tasks/testrail_pipeline.py b/testrail_bot/control/celery_tasks/testrail_pipeline.py
index 4d21342..84ed930 100644
--- a/testrail_bot/control/celery_tasks/testrail_pipeline.py
+++ b/testrail_bot/control/celery_tasks/testrail_pipeline.py
@@ -1,7 +1,8 @@
-import datetime
import difflib
-from typing import TextIO, List, Tuple, Optional
+from typing import TextIO, List, Tuple, Optional, Iterator
+from datetime import datetime as dt
+from datetime import timedelta
from . import filters
from .enums import StatusEnum
from . import test_rail_api
@@ -64,53 +65,51 @@
return run
-def process_plan(plan_id: int,
+def find_fail_with_same_comment(
case_id: int,
last_comment: str,
- bot_run: models.TestRailTestRun) -> Tuple[Optional[dict],
- float, int]:
+ bot_run: models.TestRailTestRun) -> \
+ Iterator[Tuple[Optional[dict], float, int]]:
"""
This function performs a search for a similar failure within a test plan.
- :param plan_id: id of test plan
:param case_id: id of test case for failed test
:param last_comment: last trace for failed test
:param bot_run: number of result reports from tab 'Reports'
"""
- runs = []
- testrail_run = test_rail_api.get_run_by_id(bot_run.run_id)
- suite_id = testrail_run['suite_id']
- runs_in_plan = test_rail_api.get_entries(plan_id)
- test_pattern = bot_run.test_pattern
- if test_pattern:
- runs = get_runs_by_pattern(runs_in_plan, test_pattern, suite_id)
- else:
- runs = [t_run['runs'][0]['id']
- for t_run in runs_in_plan
- if suite_id == t_run['suite_id']]
+ end_lookup_date = dt.strptime(
+ f"{bot_run.timestamp} 23:59:59", "%Y-%m-%d %H:%M:%S")
+ start_lookup_date = end_lookup_date + timedelta(days=-3*30)
+ filters = {
+ "created_by": bot_run.created_by_id,
+ "created_before": int(dt.timestamp(end_lookup_date)),
+ "created_after": int(dt.timestamp(start_lookup_date)),
+ "plan_name": bot_run.plan_name,
+ "run_name": bot_run.run_name,
+ "status_id": [StatusEnum.test_failed,
+ StatusEnum.failed,
+ StatusEnum.blocked,
+ StatusEnum.product_failed,
+ StatusEnum.wont_fix,
+ StatusEnum.retest],
+ }
+ for n, results in enumerate(test_rail_api.get_result_history_for_case(
+ case_id,
+ **filters)):
+ if n >= 500 or not results:
+ yield None, None, None
+ return
- results = []
- ratio = -1.0
- run_id = 0
-
- for run_id in runs:
- results = test_rail_api.get_result_for_case(run_id, case_id)
- if not results:
- return None, -2.0, run_id
-
- status_code = str(results[0]["status_id"])
- if status_code not in [StatusEnum.test_failed,
- StatusEnum.product_failed,
- StatusEnum.wont_fix]:
- return None, -3.0, run_id
comment = apply_filters(results[-1]["comment"], bot_run)
ratio = difflib.SequenceMatcher(
lambda symbol: symbol in [" ", ",", "\n"],
last_comment, comment, autojunk=False).ratio()
+
if ratio > 0.7:
- return results[0], ratio, run_id
- return None, ratio, run_id
+ run_id = test_rail_api.api.tests.get_test(results[0]["test_id"])[
+ "run_id"]
+ yield results[0], ratio, run_id
def get_project_id(f: TextIO,
@@ -138,8 +137,8 @@
def get_plans(test_run: models.TestRailTestRun,
- run_date: datetime.datetime,
- project_id: int) -> List[int]:
+ run_date: dt,
+ project_id: int) -> List[dict]:
"""
Get plans which will be processed
@@ -151,7 +150,7 @@
kw = {"limit": 100, "created_before": int(run_date)}
if created_by_id:
kw["created_by"] = created_by_id
- return test_rail_api.get_plans(project_id, test_run.plan_name, **kw)
+ return test_rail_api.get_plans(project_id, **kw)
def get_last_comment(case_id: int, test_run: models.TestRailTestRun) -> str:
@@ -174,7 +173,6 @@
def process_old_test(f: TextIO,
- plan_id: int,
case_id: int,
last_comment: str,
test_run: models.TestRailTestRun,
@@ -184,34 +182,47 @@
:return: Returns False if no similarities found
"""
- sim_result, ratio, run_id = process_plan(
- plan_id, case_id, last_comment, test_run)
- per = round(100.0 * ratio, 2)
- run_link = f"<a href=https://mirantis.testrail.com/index.php?/runs/" \
- f"view/{run_id}>{run_id} </a>"
- if type(sim_result) is dict:
+ found_unknown_fail = 0
+ for sim_result, ratio, run_id in find_fail_with_same_comment(case_id,
+ last_comment,
+ test_run):
+ if str(test_run.run_id) == str(run_id):
+ continue
+ per = round(100.0 * ratio, 2)
+ run_link = f"<a href=https://mirantis.testrail.com/index.php?/runs/" \
+ f"view/{run_id}>{run_id} </a>"
+ if type(sim_result) is not dict:
+ f.write(f"Similarity not found due to similarity: {per}, "
+ f"in run {run_link}\n")
+ f.flush()
+ return False
+
prod_link = "<a href=https://mirantis.jira.com/browse/{defect}>" \
"{defect}</a>".format(defect=sim_result["defects"])
test_link = "<a href=https://mirantis.testrail.com/index.php?/tests/" \
"view/{test_id}>{test_id}</a>".format(
test_id=sim_result["test_id"])
- if str(sim_result["status_id"]) == StatusEnum.retest:
- update_dict = {
- "status_id": sim_result["status_id"],
- "comment": f"NOT marked by TestRailBot because it's not a "
- f"regular fail, "
- f"similarity with test {sim_result['test_id']} "
- f"{per}%, you can update manually"
- }
- f.write(f"Found similarity trace on the test {test_link}: \n"
- f"{update_dict}\n")
+ status_id = int(sim_result['status_id'])
+ if status_id in [StatusEnum.retest, StatusEnum.failed,
+ StatusEnum.blocked]:
+ f.write(f"Found a similar result on the test "
+ f"{test_link} with similarity {per}% and "
+ f"{StatusEnum(status_id).name} status and {prod_link} "
+ f"defect. <i>Continuing...</i>\n")
f.flush()
- return True
+ found_unknown_fail += 1
+ if found_unknown_fail >= 3:
+ f.write(f"<b style='color:red;'>"
+ f"Detected 3+ consecutive unknown failures\n </b>")
+ f.flush()
+ return False
+ continue
elif ratio > 0.9:
comment = f"Marked by TestRailBot because of similarity " \
f"with test {sim_result['test_id']} {per}%"
# Copy the original comment if it was not created by this bot
if str(sim_result["status_id"]) == StatusEnum.wont_fix \
+ and sim_result["comment"] \
and "Marked by TestRailBot" not in sim_result["comment"]:
comment = sim_result["comment"]
@@ -220,56 +231,56 @@
"comment": comment,
"defects": sim_result["defects"]
}
- f.write(f"Found similarity defect {prod_link} on the test "
- f"{test_link} : \n {update_dict}\n")
+ f.write(f"Found a similar result on the test "
+ f"{test_link} with similarity {per}% and "
+ f"{StatusEnum(status_id).name} status and {prod_link} "
+ f"defect\n"
+ f"<i>Pushing to TestRail {update_dict}</i>\n\n")
f.flush()
test_rail_api.add_result(test["id"], update_dict)
return True
elif ratio > 0.7:
- f.write(f"<b style='color:red;'> Found similarity defect "
- f"{prod_link} on the test {test_link}, but NOT marked by "
- f"TestRailBot because of similarity only "
- f"{per}%, you can update manually \n </b>")
+ f.write(f"<b style='color:red;'> "
+ f"Found a similar result on the test "
+ f"{test_link} with similarity {per}% and "
+ f"{StatusEnum(status_id).name} status and {prod_link} "
+ f"defect,\n but NOT marked by "
+ f"TestRailBot because of similarity only, "
+ f"you can update manually \n </b>")
f.flush()
return True
- f.write(f"Similarity not found due to similarity:{per}, "
- f"in run {run_link}\n")
- f.flush()
- return False
def process_test(f: TextIO,
test: dict,
- test_run: models.TestRailTestRun,
- plans: List[int]) -> None:
+ test_run: models.TestRailTestRun) -> None:
"""
Starts processing for the TestCase for each TestPlan
"""
case_id = test["case_id"]
- f.write("<b> Processing test <a href=https://mirantis.testrail.com/"
- "index.php?/tests/view/{test_id}>{test_id}"
- "</a></b> \n".format(test_id=test["id"]))
+ f.write("<br><b>Proceeding test {title}<br>with id <a "
+ "href=https://mirantis.testrail.com/"
+ "index.php?/tests/view/{id}>[{id}]"
+ "</a></b>\n".format(id=test['id'], title=test["title"]))
f.flush()
last_comment = get_last_comment(case_id, test_run)
- for plan_id in plans[1:]:
- found = process_old_test(
- f, plan_id, case_id, last_comment, test_run, test)
- if found:
- break
+ found = process_old_test(
+ f, case_id, last_comment, test_run, test)
+ if found:
+ return
else:
- f.write("<b style='color:red;'> Automatic test processing failed. "
+ f.write("<b style='color:red;'>Automatic test processing failed. "
"Please process test manually "
"<a href=https://mirantis.testrail.com/"
"index.php?/tests/view/{test_id}>{test_id}"
- "</a></b>\n".format(test_id=test["id"]))
+ "</a></b>\n\n".format(test_id=test["id"]))
f.flush()
-def process_test_run(bot_run_id: int, report_id: int, path: str,
- run_date: datetime.datetime) -> None:
+def process_test_run(bot_run_id: int, report_id: int, path: str) -> None:
"""
This function processes a created bot test run. It retrieves a list
of test plans to process, gathers the failed tests from the test run,
@@ -279,7 +290,6 @@
:param bot_run_id: number of result reports from tab 'Reports'
:param report_id: number of run from tab 'Test Run'
:param path: path to report results
- :param run_date: date until which to retrieve test plans
"""
report = models.TestRailReport.objects.get(pk=report_id)
bot_test_run = models.TestRailTestRun.objects.get(pk=bot_run_id)
@@ -294,12 +304,10 @@
if not project_id:
return
- plans = get_plans(bot_test_run, run_date, project_id)
-
# failed_tests: all failed tests in test run
failed_tests = test_rail_api.get_failed_tests(bot_test_run.run_id)
for test in failed_tests:
- process_test(f, test, bot_test_run, plans)
+ process_test(f, test, bot_test_run)
f.write("Test processing finished")
f.flush()
finish_report(report)
diff --git a/testrail_bot/control/views.py b/testrail_bot/control/views.py
index ba5434b..2c9f0b2 100644
--- a/testrail_bot/control/views.py
+++ b/testrail_bot/control/views.py
@@ -86,7 +86,7 @@
report_name=report_name,
path=path)
report.save()
- process_run.delay(run_id, report.id, path, run_date)
+ process_run.delay(run_id, report.id, path)
return redirect("single_report", report.id)