Implement an analizing for a whole test plan
test_run field can accept an ID for test plan.
Code will detect automatically which Id was entered: TestRun's or TestPlan's
The list of analized test will be stored in the TestRun model
and will not be proceeded in the case of reruning TestRun
PRODX-35956
Change-Id: I91b59a6a6abc932e2026b05428d408a5dec31134
diff --git a/testrail_bot/control/celery_tasks/tasks.py b/testrail_bot/control/celery_tasks/tasks.py
index 3c5149e..71b6783 100644
--- a/testrail_bot/control/celery_tasks/tasks.py
+++ b/testrail_bot/control/celery_tasks/tasks.py
@@ -8,9 +8,10 @@
@shared_task
-def process_run(bot_run_id, report_id, path):
+def process_run(bot_run_id, report_id, path, is_testplan):
try:
- testrail_pipeline.process_test_run(bot_run_id, report_id, path)
+ testrail_pipeline.process_test_run(bot_run_id, report_id, path,
+ is_testplan)
except BaseException as e:
with open(path, 'a') as f:
print(f"Caught next exception: {e}")
diff --git a/testrail_bot/control/celery_tasks/test_rail_api.py b/testrail_bot/control/celery_tasks/test_rail_api.py
index 0afc319..e5932bc 100644
--- a/testrail_bot/control/celery_tasks/test_rail_api.py
+++ b/testrail_bot/control/celery_tasks/test_rail_api.py
@@ -1,8 +1,10 @@
from testrail_api import TestRailAPI, StatusCodeError
+from requests.exceptions import ReadTimeout
from django.conf import settings
from typing import Optional, List, Iterator
from functools import lru_cache
+from retry import retry
from .enums import StatusEnum
api = TestRailAPI(
@@ -53,6 +55,10 @@
return api.runs.get_run(run_id)
+def get_plan_by_id(plan_id: int) -> dict:
+ return api.plans.get_plan(plan_id)
+
+
def get_result_history_for_case(case_id: int,
status_id: int = None,
project_name: str = "Mirantis Cloud Platform",
@@ -102,6 +108,7 @@
return entries[0]["runs"][0]["id"]
+@retry(ReadTimeout, delay=1, jitter=2, tries=3)
def get_result_for_case(run_id: int,
case_id: int,
**kwargs) -> Optional[List[dict]]:
@@ -113,12 +120,30 @@
return results
-def get_failed_tests(last_run_id: int) -> List[dict]:
+def get_failed_tests(last_run_id: int, by_plans=False) -> List[dict]:
failed_statuses = [StatusEnum.failed,
StatusEnum.blocked]
status_id = ",".join(map(str, failed_statuses))
- return api.tests.get_tests(last_run_id, status_id=status_id)['tests']
+ if by_plans:
+ failed_tests = []
+ for entry in get_entries(last_run_id):
+ for run in entry["runs"]:
+ failed_tests += api.tests.get_tests(
+ run_id=run["id"],
+ status_id=status_id)['tests']
+ return failed_tests
+ return api.tests.get_tests(
+ last_run_id, status_id=status_id)['tests']
def add_result(test_id: int, update_dict: dict) -> None:
api.results.add_result(test_id, **update_dict)
+
+
+def is_testplan(plan_id):
+ try:
+ plan = api.plans.get_plan(plan_id)
+ return bool(plan)
+ except StatusCodeError as e:
+ print(f"{e=}")
+ return False
diff --git a/testrail_bot/control/celery_tasks/testrail_pipeline.py b/testrail_bot/control/celery_tasks/testrail_pipeline.py
index 5f7e7c4..175fe74 100644
--- a/testrail_bot/control/celery_tasks/testrail_pipeline.py
+++ b/testrail_bot/control/celery_tasks/testrail_pipeline.py
@@ -21,27 +21,42 @@
report.save()
-def apply_filters(data: str, test_run: models.TestRailTestRun) -> str:
+def apply_filters(data: str,
+ filter_last_traceback: bool,
+ ip_filter: bool,
+ uuid_filter: bool,
+ filter_func: str
+ ) -> str:
"""
- Applies modifiers to text (filtering, masking etc.)
- :param data: text to modify
- :param test_run: TestRun object that contains switchers for filters
- :return: modified text
+ Applies various text modifiers (filtering, masking, etc.) to the input
+ text.
+
+ :param data: The input text to be modified
+ :param filter_last_traceback: A boolean indicating whether to apply the
+ last traceback filter
+ :param ip_filter: A boolean indicating whether to apply the IP filter
+ :param uuid_filter: A boolean indicating whether to apply the UUID filter
+ :param filter_func: A Python function as a string that can be executed
+ to apply custom text filtering
+
+ :return: The modified text after applying the specified filters and
+ functions.
"""
+
if not data:
data = ""
- if test_run.filter_last_traceback:
+ if filter_last_traceback:
data = filters.last_traceback_filter(data)
- if test_run.ip_filter:
+ if ip_filter:
data = filters.filter_ip(data)
- if test_run.uuid_filter:
+ if uuid_filter:
data = filters.filter_uuid(data)
- if test_run.filter_func:
- exec(test_run.filter_func)
+ if filter_func:
+ exec(filter_func)
data = locals()["custom_filter"](data)
return data
@@ -69,26 +84,44 @@
def find_fail_with_same_comment(
- case_id: int,
- last_comment: str,
- bot_run: models.TestRailTestRun) -> \
- Iterator[Tuple[Optional[dict], float, int]]:
+ case_id: int,
+ last_comment: str,
+ plan_name: str,
+ test_pattern: str,
+ created_by_id: int,
+ created_before: int,
+ run_name: str,
+ text_filters: dict,
+) -> Iterator[Tuple[Optional[dict], float, int]]:
"""
- This function performs a search for a similar failure within a test plan.
+ Searches for similar failures within a test plan based on specific
+ criteria.
- :param case_id: id of test case for failed test
- :param last_comment: last trace for failed test
- :param bot_run: number of result reports from tab 'Reports'
- """
+ :param case_id: The ID of the test case for which the failure is
+ being searched
+ :param last_comment: The last comment associated with the failed test
+ :param plan_name: The name of the test plan to search within
+ :param test_pattern: A pattern for filtering test runs
+ :param created_by_id: The ID of the user who created the test plan
+ :param created_before: The date (timestamp) before which the test
+ plan was created
+ :param run_name: The name of the test run
+ :param text_filters: A dictionary of text filters to apply when
+ comparing comments
+
+ :return: An iterator that yields tuples containing information
+ about matching test results, including test result data, similarity
+ ratio, and the associated run ID.
+ """
end_lookup_date = dt.strptime(
- f"{bot_run.timestamp} 23:59:59", "%Y-%m-%d %H:%M:%S")
- start_lookup_date = end_lookup_date + timedelta(days=-3*30)
+ f"{created_before} 23:59:59", "%Y-%m-%d %H:%M:%S")
+ start_lookup_date = end_lookup_date + timedelta(days=-3 * 30)
filters = {
- "created_by": bot_run.created_by_id,
+ "created_by": created_by_id,
"created_before": int(dt.timestamp(end_lookup_date)),
"created_after": int(dt.timestamp(start_lookup_date)),
- "plan_name": bot_run.plan_name,
- "run_name": bot_run.run_name,
+ "plan_name": plan_name,
+ "run_name": run_name,
"status_id": [StatusEnum.test_failed,
StatusEnum.failed,
StatusEnum.blocked,
@@ -104,7 +137,7 @@
yield None, None, None
return
- comment = apply_filters(results[-1]["comment"], bot_run)
+ comment = apply_filters(results[-1]["comment"], **text_filters)
ratio = difflib.SequenceMatcher(
lambda symbol: symbol in [" ", ",", "\n"],
last_comment, comment, autojunk=False).ratio()
@@ -156,44 +189,49 @@
return test_rail_api.get_plans(project_id, **kw)
-def get_last_comment(case_id: int, test_run: models.TestRailTestRun) -> str:
+def get_last_comment(case_id: int, run_id: int, text_filters: dict) -> str:
"""
Retrieve the last comment associated with a test case in a TestRail
test run.
- :param case_id: An integer representing the ID of the test case.
- :param test_run: An instance of the TestRailTestRun model that the test
- case is associated with.
+
+ :param case_id: ID of the test case.
+ :param run_id: ID of the test run for that test case
+ :param text_filters: dictionary with switchers for text filters
:return: A string containing the filtered last comment for the specified
test case in the given test run
"""
last_result = test_rail_api.get_result_for_case(
- test_run.run_id, case_id)
+ run_id, case_id)
return apply_filters(
- last_result[0]["comment"], test_run)
+ last_result[0]["comment"], **text_filters)
def process_old_test(f: TextIO,
case_id: int,
last_comment: str,
- test_run: models.TestRailTestRun,
- test: dict) -> bool:
+ run_id: int,
+ test: dict,
+ testrail_filters: dict,
+ text_filters: dict) -> bool:
"""
Writes to report file similarity info about the TestCase under the test
:return: Returns False if no similarities found
"""
found_unknown_fail = 0
- for sim_result, ratio, run_id in find_fail_with_same_comment(case_id,
- last_comment,
- test_run):
- if str(test_run.run_id) == str(run_id):
+ for sim_result, ratio, old_run_id in find_fail_with_same_comment(
+ case_id,
+ last_comment,
+ text_filters=text_filters,
+ **testrail_filters):
+ if str(run_id) == str(old_run_id):
continue
per = round(100.0 * ratio, 2)
run_link = f"<a href=https://mirantis.testrail.com/index.php?/runs/" \
- f"view/{run_id}>{run_id} </a>"
+ f"view/{old_run_id}>{old_run_id} </a>"
if type(sim_result) is not dict:
f.write(f"Similarity not found due to similarity: {per}, "
f"in run {run_link}\n")
@@ -256,11 +294,13 @@
def process_test(f: TextIO,
test: dict,
- test_run: models.TestRailTestRun) -> None:
+ testrail_filters: dict,
+ text_filters: dict) -> None:
"""
Starts processing for the TestCase for each TestPlan
"""
case_id = test["case_id"]
+ run_id = test["run_id"]
f.write("<br><b>Proceeding test {title}<br>with id <a "
"href=https://mirantis.testrail.com/"
@@ -268,10 +308,10 @@
"</a></b>\n".format(id=test['id'], title=test["title"]))
f.flush()
- last_comment = get_last_comment(case_id, test_run)
+ last_comment = get_last_comment(case_id, run_id, text_filters)
found = process_old_test(
- f, case_id, last_comment, test_run, test)
+ f, case_id, last_comment, run_id, test, testrail_filters, text_filters)
if found:
return
else:
@@ -283,7 +323,8 @@
f.flush()
-def process_test_run(bot_run_id: int, report_id: int, path: str) -> None:
+def process_test_run(bot_run_id: int, report_id: int, path: str,
+ is_testplan: bool) -> None:
"""
This function processes a created bot test run. It retrieves a list
of test plans to process, gathers the failed tests from the test run,
@@ -293,24 +334,41 @@
:param bot_run_id: number of result reports from tab 'Reports'
:param report_id: number of run from tab 'Test Run'
:param path: path to report results
+ :param is_testplan: flag to show that TestPlan will be proceeded instead
+ of TestRun
"""
report = models.TestRailReport.objects.get(pk=report_id)
bot_test_run = models.TestRailTestRun.objects.get(pk=bot_run_id)
with open(path, "w") as f:
- test_run = test_rail_api.get_run_by_id(bot_test_run.run_id)
- f.write("Start processing <a href=https://mirantis.testrail.com/"
- "index.php?/runs/view/{id}>{name}"
- "</a>\n".format(id=test_run['id'], name=test_run['name']))
+ if is_testplan:
+ test_run = test_rail_api.get_plan_by_id(bot_test_run.run_id)
+ run_type = "plan"
+ else:
+ test_run = test_rail_api.get_run_by_id(bot_test_run.run_id)
+ run_type = "run"
+ f.write("Start processing {run_type} <a "
+ "href=https://mirantis.testrail.com/"
+ "index.php?/{run_type}s/view/{id}>{name}"
+ "</a>\n".format(run_type=run_type,
+ id=test_run['id'],
+ name=test_run['name']))
f.flush()
project_id = get_project_id(f, bot_test_run, report)
if not project_id:
return
- # failed_tests: all failed tests in test run
- failed_tests = test_rail_api.get_failed_tests(bot_test_run.run_id)
+ # failed_tests: all failed tests in test run/plan
+ failed_tests = test_rail_api.get_failed_tests(bot_test_run.run_id,
+ by_plans=is_testplan)
for test in failed_tests:
- process_test(f, test, bot_test_run)
+ if test["id"] in bot_test_run.checked_tests:
+ continue
+ process_test(f, test,
+ bot_test_run.testrail_filters,
+ bot_test_run.text_filters)
+ bot_test_run.checked_tests.append(test["id"])
+ bot_test_run.save()
f.write("Test processing finished")
f.flush()
finish_report(report)
diff --git a/testrail_bot/control/migrations/0002_testrailtestrun_checked_tests.py b/testrail_bot/control/migrations/0002_testrailtestrun_checked_tests.py
new file mode 100644
index 0000000..4e62602
--- /dev/null
+++ b/testrail_bot/control/migrations/0002_testrailtestrun_checked_tests.py
@@ -0,0 +1,19 @@
+# Generated by Django 4.2.6 on 2023-11-08 15:28
+
+import control.models
+from django.db import migrations
+
+
+class Migration(migrations.Migration):
+
+ dependencies = [
+ ('control', '0001_initial'),
+ ]
+
+ operations = [
+ migrations.AddField(
+ model_name='testrailtestrun',
+ name='checked_tests',
+ field=control.models.IntegerListField(default=[], editable=False),
+ ),
+ ]
diff --git a/testrail_bot/control/models.py b/testrail_bot/control/models.py
index e7bea45..f0e014f 100644
--- a/testrail_bot/control/models.py
+++ b/testrail_bot/control/models.py
@@ -2,15 +2,42 @@
from django.core.files.storage import FileSystemStorage
from django.db import models
-from django import forms
+
+
+class IntegerListField(models.Field):
+ def __init__(self, *args, **kwargs):
+ kwargs['editable'] = False
+ super(IntegerListField, self).__init__(*args, **kwargs)
+
+ def db_type(self, connection):
+ return 'text'
+
+ def from_db_value(self, value, expression, connection):
+ if not value:
+ return []
+ return [int(x) for x in value.split(',')]
+
+ def to_python(self, value):
+ if isinstance(value, list):
+ return value
+ if not value:
+ return []
+ return [int(x) for x in value.split(',')]
+
+ def get_prep_value(self, value):
+ if not value:
+ return ''
+ return ','.join(str(int(x)) for x in value)
class TestRailTestRun(models.Model):
- project_name = models.CharField(max_length=300, default="Mirantis Cloud Platform")
+ project_name = models.CharField(max_length=300,
+ default="Mirantis Cloud Platform")
plan_name = models.CharField(max_length=300, default="[MCP2.0]OSCORE")
run_name = models.CharField(max_length=300, blank=True)
test_pattern = models.CharField(max_length=300, blank=True)
run_id = models.CharField(max_length=300)
+ checked_tests = IntegerListField(default=list())
created_by_id = models.IntegerField(default='109')
filter_func = models.TextField(null=True, blank=True)
ip_filter = models.BooleanField(default=True)
@@ -18,6 +45,25 @@
filter_last_traceback = models.BooleanField(default=False)
timestamp = models.DateField(default=datetime.date.today())
+ @property
+ def text_filters(self):
+ return {
+ "filter_last_traceback": self.filter_last_traceback,
+ "uuid_filter": self.uuid_filter,
+ "ip_filter": self.ip_filter,
+ "filter_func": self.filter_func,
+ }
+
+ @property
+ def testrail_filters(self):
+ return {
+ "created_by_id": self.created_by_id,
+ "created_before": self.timestamp,
+ "run_name": self.run_name,
+ "plan_name": self.plan_name,
+ "test_pattern": self.test_pattern,
+ }
+
fs = FileSystemStorage()
diff --git a/testrail_bot/control/templates/control/update_run.html b/testrail_bot/control/templates/control/update_run.html
index 8ccd94e..5ca7033 100644
--- a/testrail_bot/control/templates/control/update_run.html
+++ b/testrail_bot/control/templates/control/update_run.html
@@ -59,5 +59,10 @@
</div>
{% endbuttons %}
</form>
+ Already checked tests:
+ {% for test_id in checked_tests %}
+ <a href="https://mirantis.testrail.com/index.php?/tests/view/{{ test_id }}">{{ test_id }}</a>
+ {% endfor %}
+
</div>
{% endblock %}
\ No newline at end of file
diff --git a/testrail_bot/control/views.py b/testrail_bot/control/views.py
index 2c9f0b2..847bc3c 100644
--- a/testrail_bot/control/views.py
+++ b/testrail_bot/control/views.py
@@ -30,7 +30,8 @@
form = forms.TestRunForm(instance=run)
return render(request, "control/update_run.html",
- {"form": form, "run_id": run_id})
+ {"form": form, "run_id": run_id, "checked_tests":
+ run.checked_tests})
def create_run(request):
@@ -64,8 +65,13 @@
def submit_run(request, run_id):
run = models.TestRailTestRun.objects.get(pk=run_id)
- testrail_run = test_rail_api.get_run_by_id(run.run_id)
- run_name = ''
+ is_testplan = test_rail_api.is_testplan(run.run_id)
+
+ if is_testplan:
+ testrail_run = test_rail_api.get_plan_by_id(run.run_id)
+ else:
+ testrail_run = test_rail_api.get_run_by_id(run.run_id)
+ run_name = 'Plan-' if is_testplan else "Run-"
if not run.run_name:
run_name += testrail_run['name']
if run.test_pattern:
@@ -75,19 +81,12 @@
path = os.path.join(models.fs.location, report_name)
with open(path, "w"):
pass
- form = forms.TestRunForm(request.POST)
- run_date = None
- if form.is_valid():
- run_date = datetime.datetime.combine(
- form.cleaned_data["timestamp"] + datetime.timedelta(days=1),
- datetime.datetime.min.time()).timestamp()
report = models.TestRailReport(
report_name=report_name,
path=path)
report.save()
- process_run.delay(run_id, report.id, path)
-
+ process_run.delay(run_id, report.id, path, is_testplan)
return redirect("single_report", report.id)
diff --git a/testrail_bot/requirements.txt b/testrail_bot/requirements.txt
index 5762f72..865ce30 100644
--- a/testrail_bot/requirements.txt
+++ b/testrail_bot/requirements.txt
@@ -7,3 +7,4 @@
uWSGI==2.0.19.1
python-jenkins==1.7.0
matplotlib==3.3.2
+retry==0.9.2