Merge "Add a function to remove html tages from traces"
diff --git a/testrail_bot/control/celery_tasks/schedules_pipeline.py b/testrail_bot/control/celery_tasks/schedules_pipeline.py
index 080519a..5a87374 100644
--- a/testrail_bot/control/celery_tasks/schedules_pipeline.py
+++ b/testrail_bot/control/celery_tasks/schedules_pipeline.py
@@ -1,10 +1,14 @@
 import os
+import logging
 from datetime import datetime, timedelta
+import subprocess
 
 from .. import models
 from . import tasks
 from .test_rail_api import get_planid_by_name
 
+LOG = logging.getLogger(__name__)
+
 
 def task_to_check_today_testplan():
     """
@@ -106,8 +110,64 @@
     """
     today = datetime.today()
     border_date = (today - timedelta(days=30 * 2)).strftime("%Y-%m-%d")
+
     reports = models.TestRailReport.objects.order_by("-created_at").all()
     for report in reports:
         if report.created_at.strftime("%Y-%m-%d") < border_date:
+            LOG.info(f"Deleting {report=}")
             report.delete()
-            print(report.created_at.strftime("%Y-%m-%d"))
+
+    test_results = models.TestResult.objects.order_by("-updated_at").all()
+    for result in test_results:
+        if result.updated_at.strftime("%Y-%m-%d") < border_date:
+            LOG.info(f"Deleting {result=}")
+            result.delete()
+
+
+def task_to_analize_testrail_reports(plan_name: str):
+    """
+    Analise reports for testrail
+    on one day or plan name
+    """
+    today = datetime.today().strftime("%Y-%m-%d")
+    _plan_name = plan_name or f"[MCP2.0]OSCORE-{today}"
+
+    _url = "https://mirantis.testrail.com"
+    _email = os.environ.get("TESTRAIL_EMAIL")
+    _password = os.environ.get("TESTRAIL_PASSWORD")
+    _project_name = "'Mirantis Cloud Platform'"
+    _jira_user = os.environ.get("JIRA_USER")
+    _jira_password = os.environ.get("JIRA_PASSWORD")
+    _sumReportOptions = [
+        "--testrail-host",
+        _url,
+        "--testrail-user",
+        _email,
+        "--testrail-user-key",
+        _password,
+        "--testrail-plan",
+        _plan_name,
+        "--testrail-project",
+        _project_name,
+        "--out-type html",
+        "--push-to-testrail",
+        "--sort-by fails",
+        "--jira-host https://mirantis.jira.com/",
+        "--jira-user",
+        _jira_user,
+        "--jira-password",
+        _jira_password,
+    ]
+    _sumReportOptionsList = map(str, _sumReportOptions)
+    report_script = f"""\
+              export TESTRAIL_URL={_url};
+              export TESTRAIL_USER_EMAIL={_email};
+              export TESTRAIL_USER_KEY={_password};
+              pip install testplan_summary/.;
+              python testplan_summary/report.py \
+              create-report {" ".join(_sumReportOptionsList)}
+              """
+    p = subprocess.Popen(
+        report_script, shell=True, stdout=subprocess.PIPE
+    ).stdout.read()
+    return p
diff --git a/testrail_bot/control/celery_tasks/tasks.py b/testrail_bot/control/celery_tasks/tasks.py
index 3d090c2..dbfb37a 100644
--- a/testrail_bot/control/celery_tasks/tasks.py
+++ b/testrail_bot/control/celery_tasks/tasks.py
@@ -90,3 +90,15 @@
     :return:
     """
     schedules_pipeline.task_to_delete_old_2m_reports()
+
+
+@shared_task
+def analize_testrail_reports(plan_name, *args, **kwargs):
+    """
+    Finds today testplan
+    Creates TestRun with this id
+    Creates Periodic task to analyze created TestRun
+    :String plan_name
+    :return:
+    """
+    schedules_pipeline.task_to_analize_testrail_reports(plan_name)
diff --git a/testrail_bot/control/celery_tasks/testrail_pipeline.py b/testrail_bot/control/celery_tasks/testrail_pipeline.py
index 472046d..f53ef29 100644
--- a/testrail_bot/control/celery_tasks/testrail_pipeline.py
+++ b/testrail_bot/control/celery_tasks/testrail_pipeline.py
@@ -1,6 +1,7 @@
 import datetime
 import difflib
 import json
+import logging
 from datetime import datetime as dt
 from datetime import timedelta
 from itertools import islice
@@ -10,6 +11,7 @@
 from ..jira_manager import JiraIssue
 from . import filters, test_rail_api
 from .enums import StatusEnum
+from ..utils import DBlogger
 
 __all__ = ("process_test_run",)
 
@@ -226,7 +228,7 @@
 
 
 def process_old_test(
-    f: TextIO,
+    logger: logging.Logger,
     case_id: int,
     last_comment: str,
     run_id: int,
@@ -248,11 +250,10 @@
         per = round(100.0 * ratio, 2)
         run_link = test_rail_api.html_link("run", old_run_id, old_run_id)
         if type(sim_result) is not dict:
-            f.write(
+            logger.error(
                 f"Similarity not found due to similarity: {per}, "
                 f"in run {run_link}\n"
             )
-            f.flush()
             return False
 
         prod_link = (
@@ -269,26 +270,27 @@
             StatusEnum.failed,
             StatusEnum.blocked,
         ]:
-            f.write(
+            logger.info(
                 f"Found a similar result on the test "
                 f"{test_link} with similarity {per}% and "
                 f"{StatusEnum(status_id).name} status and {prod_link} "
-                f"defect. <i>Continuing...</i>\n"
+                f"defect. <i>Continuing...</i>"
             )
-            f.flush()
             found_unknown_fail += 1
             if found_unknown_fail >= 10:
-                f.write(
-                    "<b style='color:red;'>"
-                    "Detected 10+ consecutive unknown failures\n </b>"
-                )
-                f.flush()
+                logger.error("Detected 10+ consecutive unknown failures")
                 return False
             continue
         elif ratio > 0.9:
+            testrail_link = (
+                f"[{sim_result['test_id']}]"
+                f"(https://mirantis.testrail.com"
+                f"/index.php?/tests/view/"
+                f"{sim_result['test_id']})"
+            )
             comment = (
                 f"Marked by TestRailBot because of similarity "
-                f"with test {sim_result['test_id']} {per}%"
+                f"with test {testrail_link} {per}%"
             )
             # Copy the original comment if it was not created by this bot
             if (
@@ -303,63 +305,70 @@
                 "comment": comment,
                 "defects": sim_result["defects"],
             }
-            f.write(
+            logger.info(
                 f"Found a similar result on the test "
                 f"{test_link} with similarity {per}% and "
                 f"{StatusEnum(status_id).name} status and {prod_link} "
                 f"defect\n"
                 f"<i style='color:ForestGreen;'>Pushing to TestRail "
                 f"{update_dict}"
-                f"</i>\n\n"
+                f"</i>"
             )
-            f.flush()
             test_rail_api.add_result(test["id"], update_dict)
+            if "Completed" in prod_link:
+                return False
             return True
         elif ratio > 0.7:
-            f.write(
-                f"<b style='color:red;'> "
+            logger.error(
                 f"Found a similar result on the test "
                 f"{test_link} with similarity {per}% and "
                 f"{StatusEnum(status_id).name} status and {prod_link} "
-                f"defect,\n but NOT marked by "
+                f"defect, but NOT marked by "
                 f"TestRailBot because of similarity only, "
-                f"you can update manually \n </b>"
+                f"you can update manually"
             )
-            f.flush()
-            return True
+            return False
 
 
 def process_test(
-    f: TextIO, test: dict, testrail_filters: dict, text_filters: dict
+    test: dict, testrail_filters: dict, text_filters: dict
 ) -> None:
     """
-    Starts processing for the TestCase for each TestPlan
+    Starts processing for the TestResult
     """
+    test_result_report, _ = models.TestResult.objects.get_or_create(
+        result_id=test["id"]
+    )
+    LOG = DBlogger(name=str(test["id"]), storage=test_result_report)
+
     case_id = test["case_id"]
     run_id = test["run_id"]
     run_name = test_rail_api.get_run_name(run_id)
     test_link = test_rail_api.html_link("test", test["id"], test["title"])
     run_link = test_rail_api.html_link("run", run_id, run_name)
 
-    f.write(
-        f"<br><b>Proceeding test {test_link} <br>" f"in {run_link} run</b>\n"
-    )
-    f.flush()
+    LOG.info(f"<b>Proceeding test {test_link} <br> in {run_link} run</b>")
 
     last_comment = get_last_comment(case_id, run_id, text_filters)
 
     found = process_old_test(
-        f, case_id, last_comment, run_id, test, testrail_filters, text_filters
+        logger=LOG,
+        case_id=case_id,
+        last_comment=last_comment,
+        run_id=run_id,
+        test=test,
+        testrail_filters=testrail_filters,
+        text_filters=text_filters,
     )
     if found:
-        return
+        test_result_report.action_needed = not found
     else:
-        f.write(
-            f"<b style='color:red;'>Automatic test processing failed. "
-            "Please process test manually "
-            f"{test_link}</b>\n\n"
+        LOG.error(
+            f"<b>Automatic test processing failed. "
+            f"Please process test manually {test_link}</b>"
         )
-        f.flush()
+        test_result_report.action_needed = True
+    test_result_report.save()
 
 
 def process_test_run(
@@ -377,8 +386,12 @@
     :param is_testplan: flag to show that TestPlan will be proceeded instead
     of TestRun
     """
-    report = models.TestRailReport.objects.get(pk=report_id)
-    bot_test_run = models.TestRailTestRun.objects.get(pk=bot_run_id)
+    report: models.TestRailReport = models.TestRailReport.objects.get(
+        pk=report_id
+    )
+    bot_test_run: models.TestRailTestRun = models.TestRailTestRun.objects.get(
+        pk=bot_run_id
+    )
     with open(path, "a") as f:
         if is_testplan:
             test_run = test_rail_api.get_plan_by_id(bot_test_run.run_id)
@@ -406,8 +419,9 @@
                 and test["id"] in bot_test_run.checked_tests
             ):
                 continue
+            report.test_results.append(test["id"])
+            report.save()
             process_test(
-                f,
                 test,
                 bot_test_run.testrail_filters,
                 bot_test_run.text_filters,
diff --git a/testrail_bot/control/migrations/0014_testresult_testrailreport_test_results.py b/testrail_bot/control/migrations/0014_testresult_testrailreport_test_results.py
new file mode 100644
index 0000000..5555151
--- /dev/null
+++ b/testrail_bot/control/migrations/0014_testresult_testrailreport_test_results.py
@@ -0,0 +1,37 @@
+# Generated by Django 4.2.13 on 2024-07-06 15:35
+
+import control.models
+from django.db import migrations, models
+
+
+class Migration(migrations.Migration):
+
+    dependencies = [
+        ("control", "0013_testrailtestrun_created_at_and_more"),
+    ]
+
+    operations = [
+        migrations.CreateModel(
+            name="TestResult",
+            fields=[
+                (
+                    "id",
+                    models.AutoField(
+                        auto_created=True,
+                        primary_key=True,
+                        serialize=False,
+                        verbose_name="ID",
+                    ),
+                ),
+                ("result_id", models.CharField(max_length=50)),
+                ("text", models.CharField(default="", max_length=10000)),
+                ("action_needed", models.BooleanField(default=True)),
+                ("updated_at", models.DateTimeField(auto_now=True)),
+            ],
+        ),
+        migrations.AddField(
+            model_name="testrailreport",
+            name="test_results",
+            field=control.models.IntegerListField(default=[], editable=False),
+        ),
+    ]
diff --git a/testrail_bot/control/migrations/0015_alter_cronperiodictask_task_name.py b/testrail_bot/control/migrations/0015_alter_cronperiodictask_task_name.py
new file mode 100644
index 0000000..5feb92e
--- /dev/null
+++ b/testrail_bot/control/migrations/0015_alter_cronperiodictask_task_name.py
@@ -0,0 +1,43 @@
+# Generated by Django 4.2.7 on 2024-07-19 09:35
+
+from django.db import migrations, models
+
+
+class Migration(migrations.Migration):
+
+    dependencies = [
+        ("control", "0014_testresult_testrailreport_test_results"),
+    ]
+
+    operations = [
+        migrations.AlterField(
+            model_name="cronperiodictask",
+            name="task_name",
+            field=models.CharField(
+                choices=[
+                    (
+                        "control.celery_tasks.tasks.check_today_testplan",
+                        "Check today testplan",
+                    ),
+                    (
+                        "control.celery_tasks.tasks.check_specific_testplan",
+                        "Check specific testplan",
+                    ),
+                    (
+                        "control.celery_tasks.tasks.delete_old_2m_testruns",
+                        "Delete previous 2-month TestRuns(for bot view)",
+                    ),
+                    (
+                        "control.celery_tasks.tasks.delete_old_2m_reports",
+                        "Delete previous 2-month Reports(for bot view)",
+                    ),
+                    (
+                        "control.celery_tasks.tasks.analize_testrail_reports",
+                        "Summary report on one day",
+                    ),
+                ],
+                default="control.celery_tasks.tasks.check_today_testplan",
+                max_length=300,
+            ),
+        ),
+    ]
diff --git a/testrail_bot/control/models.py b/testrail_bot/control/models.py
index f613292..eb5c7cc 100644
--- a/testrail_bot/control/models.py
+++ b/testrail_bot/control/models.py
@@ -82,10 +82,18 @@
 class TestRailReport(models.Model):
     path = models.FileField(storage=fs, null=True, blank=True, max_length=500)
     report_name = models.CharField(max_length=300)
+    test_results = IntegerListField(default=list())
     finished = models.BooleanField(default=False)
     created_at = models.DateTimeField(auto_now_add=True)
 
 
+class TestResult(models.Model):
+    result_id = models.CharField(max_length=50)
+    text = models.CharField(max_length=10000, default="")
+    action_needed = models.BooleanField(default=True)
+    updated_at = models.DateTimeField(auto_now=True)
+
+
 class ActionLog(models.Model):
     name = models.CharField(max_length=500)
     date = models.DateTimeField(null=True)
@@ -166,6 +174,11 @@
         "Delete previous 2-month Reports(for bot view)",
         [],
     ),
+    (
+        "control.celery_tasks.tasks.analize_testrail_reports",
+        "Summary report on one day",
+        ["plan_name"],
+    ),
 ]
 
 TASK_CHOICES = list(map(lambda x: x[:-1], TASKS))
diff --git a/testrail_bot/control/templates/control/report.html b/testrail_bot/control/templates/control/report.html
index bbe8b01..f82247d 100644
--- a/testrail_bot/control/templates/control/report.html
+++ b/testrail_bot/control/templates/control/report.html
@@ -23,7 +23,7 @@
 function send(){
     $.ajax({
         type: "post",
-        url: "{% url 'single_report' report_id %}",
+        url: "{% url 'single_report' report_obj.id %}",
         dataType: "json",
         headers: {'X-CSRFToken': csrftoken},
         success: function(data) {
@@ -40,7 +40,9 @@
 };
 window.onload = function() {send()};</script>
 
-<a href="{% url 'delete_report' report_id %}">
+
+
+<a href="{% url 'delete_report' report_obj.id %}">
     <button class="btn btn-danger m-1">
         <svg xmlns="http://www.w3.org/2000/svg" x="0px" y="0px" width="1em"
              height="1em" viewBox="0,0,256,256">
@@ -52,4 +54,31 @@
 </a>
 
 <pre id="data" class="p-4 border">{{ report | safe }}</pre>
+
+
+{% for test_result in test_results|dictsortreversed:"action_needed" %}
+
+<div class="card m-3
+{% if not test_result.action_needed %}
+border-success
+{% endif %}
+" data-width="100%">
+    <div class="card-header
+{% if not test_result.action_needed %} bg-success
+{% endif %}
+" >
+        <a style="text-decoration: none; color:inherit" href="#{{ test_result.result_id }}" id="{{ test_result.result_id }}">{{ test_result.result_id }}</a>
+    </div>
+  <div class="card-body">
+      <pre style="overflow: auto;white-space: pre-wrap;">{{ test_result.text | safe }}</pre>
+
+      <a href="{% url 'update_test_result' report_obj.id  test_result.result_id test_result.action_needed|yesno:'0,1' %}"
+      {% if test_result.action_needed %}
+        class="btn btn-primary">Mark as reviewed</a>
+      {% else %}
+        class="btn btn-primary warning">Move to work</a>
+      {% endif %}
+  </div>
+</div>
+{% endfor %}
 {% endblock %}
\ No newline at end of file
diff --git a/testrail_bot/control/urls.py b/testrail_bot/control/urls.py
index d0249f7..90f7375 100644
--- a/testrail_bot/control/urls.py
+++ b/testrail_bot/control/urls.py
@@ -17,6 +17,11 @@
         views.delete_report,
         name="delete_report",
     ),
+    path(
+        "reports/<int:report_id>/update/<int:result_id>/<int:action_needed>",
+        views.update_test_result,
+        name="update_test_result",
+    ),
     path("index/", views.index, name="index"),
     path("help/", views.show_help, name="help"),
     path(
diff --git a/testrail_bot/control/utils.py b/testrail_bot/control/utils.py
index 6c41f58..73ea55f 100644
--- a/testrail_bot/control/utils.py
+++ b/testrail_bot/control/utils.py
@@ -1,6 +1,8 @@
+import logging
 from typing import Any, Callable, Dict, List
 
 from django.core.cache import cache
+from django.db.models import Model
 from parse import parse
 
 
@@ -109,5 +111,35 @@
     return decorator
 
 
+class DBHandler(logging.Handler):
+
+    def __init__(self, storage: Model) -> None:
+        logging.Handler.__init__(self)
+        self.storage = storage
+
+    def emit(self, record: logging.LogRecord) -> None:
+        msg = self.format(record)
+        color = "black"
+        if record.levelno == logging.ERROR:
+            color = "red"
+        if record.levelno == logging.DEBUG:
+            color = "grey"
+        self.storage.text += f"<a style='color:{color}'>{msg} </a>\n"
+        self.storage.save()
+        print(self.storage.text)
+
+
+def DBlogger(name: str, storage: Model) -> logging.Logger:
+    _log = logging.getLogger(name)
+    _log.setLevel(logging.DEBUG)
+    formatter = logging.Formatter(
+        "[%(asctime)s] %(message)s", "%d %b %H:%M:%S"
+    )
+    db_h = DBHandler(storage)
+    db_h.setFormatter(formatter)
+    _log.addHandler(db_h)
+    return _log
+
+
 if __name__ == "__main__":
     pass
diff --git a/testrail_bot/control/views.py b/testrail_bot/control/views.py
index 8a362ef..350f9da 100644
--- a/testrail_bot/control/views.py
+++ b/testrail_bot/control/views.py
@@ -63,7 +63,9 @@
 
 
 def single_report(request, report_id):
-    report = models.TestRailReport.objects.get(pk=report_id)
+    report: models.TestRailReport = models.TestRailReport.objects.get(
+        pk=report_id
+    )
     data = report.path.read().decode("utf-8")
     if (
         request.method == "POST"
@@ -74,13 +76,25 @@
             content_type="application/json",
         )
 
+    test_results = [
+        models.TestResult.objects.filter(result_id=test_id)[0]
+        for test_id in report.test_results
+    ]
     return render(
         request,
         "control/report.html",
-        {"report_id": report.id, "report": data, "finished": report.finished},
+        {"report_obj": report, "report": data, "test_results": test_results},
     )
 
 
+def update_test_result(request, report_id, result_id, action_needed):
+    result = models.TestResult.objects.get(result_id=result_id)
+    result.action_needed = bool(action_needed)
+    result.save()
+    print(result.__dict__)
+    return redirect("single_report", report_id)
+
+
 def delete_report(request, report_id):
     report: models.TestRailReport = models.TestRailReport.objects.get(
         pk=report_id
@@ -298,7 +312,6 @@
 
 
 def save_scheduler(request, pk=None):
-    print(f"{request.POST=}")
     minute, hour, day_of_month, month_of_year, day_of_week = request.POST.get(
         "cron", "* * * * *"
     ).split(" ")
diff --git a/testrail_bot/testplan_summary/report.py b/testrail_bot/testplan_summary/report.py
new file mode 100644
index 0000000..4f86ea4
--- /dev/null
+++ b/testrail_bot/testplan_summary/report.py
@@ -0,0 +1,640 @@
+#!/usr/bin/env python
+import datetime
+import sys
+import logging
+from collections import defaultdict, OrderedDict
+import jira
+import ipdb
+import argparse
+from testrail import TestRail
+from testrail.test import Test
+from functools import lru_cache
+
+logging.basicConfig(format="%(levelname)s:%(message)s", level=logging.INFO)
+LOG = logging.getLogger(__name__)
+
+
+def run_cli():
+    cli = argparse.ArgumentParser(
+        prog="Report generator",
+        description="Command line tool for generate summary report",
+    )
+    commands = cli.add_subparsers(title="Operation commands", dest="command")
+    cli_process = commands.add_parser(
+        "create-report",
+        help="Create summary report",
+        description="Create summary report",
+    )
+    cli_process_link = commands.add_parser(
+        "mark-fails",
+        help="Extract linked bugs from previous reports",
+        description="Extract linked bugs from previous reports"
+        " and mark current",
+    )
+    cli_process.add_argument(
+        "-T",
+        "--testrail-host",
+        dest="testrail_host",
+        required=True,
+        help="TestRail hostname",
+    )
+    cli_process.add_argument(
+        "-U",
+        "--testrail-user",
+        dest="testrail_user",
+        required=True,
+        help="TestRail user email",
+    )
+    cli_process.add_argument(
+        "-K",
+        "--testrail-user-key",
+        dest="testrail_user_key",
+        required=True,
+        help="TestRail user key",
+    )
+    cli_process.add_argument(
+        "-R",
+        "--testrail-plan",
+        dest="testrail_plan",
+        required=True,
+        help="TestRail test plan for analize",
+    )
+    cli_process.add_argument(
+        "-P",
+        "--testrail-project",
+        dest="testrail_project",
+        required=True,
+        help="TestRail project name",
+    )
+    cli_process.add_argument(
+        "--testrail-only-run",
+        dest="testrail_only_run",
+        help="Analize only one run in selected plan",
+    )
+    cli_process.add_argument(
+        "--out-type",
+        dest="out_type",
+        choices=["text", "html", "md", "none"],
+        default="none",
+        help="Select output format for report table. "
+        "By default print nothing (none).",
+    )
+    cli_process.add_argument(
+        "--sort-by",
+        dest="sort_by",
+        default="fails",
+        choices=["fails", "blocks", "project", "priority", "status"],
+        help="Select sorting column. By deafult table sort by fails",
+    )
+    cli_process.add_argument(
+        "--push-to-testrail",
+        dest="push_report_flag",
+        action="store_true",
+        default=False,
+        help="Save report in plan description",
+    )
+    cli_process.add_argument(
+        "-j",
+        "--jira-host",
+        dest="jira_host",
+        required=True,
+        help="JIRA hostname",
+    )
+    cli_process.add_argument(
+        "-u",
+        "--jira-user",
+        dest="jira_user_id",
+        required=True,
+        help="JIRA username",
+    )
+    cli_process.add_argument(
+        "-p",
+        "--jira-password",
+        dest="jira_user_password",
+        required=True,
+        help="JIRA user password",
+    )
+    # link fail bugs parameters
+    cli_process_link.add_argument(
+        "-T",
+        "--testrail-host",
+        dest="testrail_host",
+        required=True,
+        help="TestRail hostname",
+    )
+    cli_process_link.add_argument(
+        "-U",
+        "--testrail-user",
+        dest="testrail_user",
+        required=True,
+        help="TestRail user email",
+    )
+    cli_process_link.add_argument(
+        "-K",
+        "--testrail-user-key",
+        dest="testrail_user_key",
+        required=True,
+        help="TestRail user key",
+    )
+    cli_process_link.add_argument(
+        "-R",
+        "--testrail-plan",
+        dest="testrail_plan",
+        required=True,
+        help="TestRail test plan for analize",
+    )
+    cli_process_link.add_argument(
+        "-M",
+        "--testrail-marked-plan",
+        dest="testrail_marked_plan",
+        required=False,
+        help="TestRail test plan for parse",
+    )
+    cli_process_link.add_argument(
+        "-P",
+        "--testrail-project",
+        dest="testrail_project",
+        required=True,
+        help="TestRail project name",
+    )
+    cli_process_link.add_argument(
+        "--testrail-only-run",
+        dest="testrail_only_run",
+        help="Name to update only specified run in selected plan",
+    )
+    cli_process_link.add_argument(
+        "--push-to-testrail",
+        dest="update_report_flag",
+        action="store_true",
+        default=False,
+        help="Save report in plan description",
+    )
+    if len(sys.argv) == 1:
+        cli.print_help()
+        sys.exit(1)
+    return cli.parse_args()
+
+
+def get_runs(t_client, plan_name, run_name):
+    LOG.info("Get runs from plan - {}".format(plan_name))
+    ret = []
+    plan = t_client.plan(plan_name)
+    if plan:
+        for e in plan.entries:
+            for r in e.runs:
+                LOG.info("Run {} #{}".format(r.name, r.id))
+                if run_name is not None and r.name != run_name:
+                    continue
+                ret.append(r)
+    else:
+        LOG.warning("Plan {} is empty".format(plan_name))
+    return ret
+
+
+def get_all_results(t_client, list_of_runs):
+    ret = []
+    for run in list_of_runs:
+        ret.extend(get_results(t_client, run))
+    return ret
+
+
+def get_all_failed_results(t_client, list_of_runs, result_type):
+    """
+    returned result format:
+    [[run(id,name), result(id,status,defects...), test(id,name..)],
+     [run(id,name), result(id,status,defects...), test(id,name..)],
+                                                                ...]
+    """
+    ret = []
+    for run in list_of_runs:
+        ret.extend(get_failed_results(t_client, run, result_type))
+    return ret
+
+
+@lru_cache()
+def fetch_test(api, test_id, run_id):
+    return Test(api.test_with_id(test_id, run_id))
+
+
+def get_results(t_client, run):
+    LOG.info("Get results for run - {}".format(run.name))
+    results = t_client.results(run)
+    ret = [
+        (run.id, r)
+        for r in results
+        if r.raw_data()["status_id"] is not None
+        and r.raw_data()["defects"] is not None
+    ]
+    for r in ret:
+        run_id, result = r
+        test = fetch_test(result.api, result.raw_data()["test_id"], run_id)
+        LOG.info(
+            "Test {} - {} - {}".format(
+                test.title, result.status.name, ",".join(result.defects)
+            )
+        )
+    return ret
+
+
+def get_failed_results(t_client, run, result_type):
+    """
+    returned result format:
+    [run(id,name),
+     result(id,status,defects...),
+     test(id,name..)]
+    """
+    LOG.info("Get results for run - {}".format(run.name))
+    results = t_client.results(run)
+    results_with_test = []
+    if result_type == "5":
+        ret = [
+            (run, r)
+            for r in results
+            if r.raw_data()["status_id"] is int(result_type)
+            and r.raw_data()["defects"] is None
+        ]
+    else:
+        ret = [
+            (run, r)
+            for r in results
+            if r.raw_data()["status_id"] is not None
+            and r.raw_data()["defects"] is not None
+        ]
+    for r in ret:
+        run, result = r
+        test = fetch_test(result.api, result.raw_data()["test_id"], run.id)
+        LOG.info(
+            "Test {} - {} - {} - {}".format(
+                test.title,
+                result.status.name,
+                result.raw_data()["status_id"],
+                ",".join(result.defects),
+            )
+        )
+        results_with_test.append([run, result, test])
+    return results_with_test
+
+
+def mark_failed_results(t_cl, marked_res, failed_res, t_h):
+    """
+    Extract list tests with defect and compare it with tests to be marked,
+    and add defects and result from marked tests
+    Returned result format:
+    [[target_tests_to_update_with_defect, target_run_id],
+     [target_tests_to_update_with_defect, target_run_id],
+                                                         ...]
+    """
+    LOG.info("Extract marked tests and attach to failed")
+
+    def generate_result(t_c, tst, m_r, m_t):
+        link_comment = "{url}/index.php?/tests/view/{uid}".format(
+            url=t_h, uid=m_t.id
+        )
+        tmp_result = t_c.result()
+        tmp_result.test = tst
+        tmp_result.status = m_r.status
+        tmp_result.comment = "Result taked from: " + link_comment
+        tmp_result.defects = [str(m_r.defects[0])]
+        return tmp_result
+
+    # def check_if_marked():
+    #     if ret.count()
+    ret = []
+    for run, result, test in failed_res:
+        for m_run, m_result, m_test in marked_res:
+            if run.name == m_run.name and test.title == m_test.title:
+                LOG.info(
+                    " MARKED FOUND: Run:{} test: .. {}-{}".format(
+                        run.id, test.title[-72:], m_result.defects[0]
+                    )
+                )
+                ret.append(
+                    [generate_result(t_cl, test, m_result, m_test), run.id]
+                )
+    return ret
+
+
+@lru_cache()
+def get_defect_info(j_client, defect):
+    LOG.info("Get info about issue {}".format(defect))
+    try:
+        issue = j_client.issue(defect)
+    except jira.exceptions.JIRAError as e:
+        if e.status_code == 404:
+            LOG.error("Defect {} wasn't found in Jira".format(defect))
+            return {
+                "id": defect,
+                "title": "Title for #{} not found".format(defect),
+                "project": "Not found",
+                "priority": "Not found",
+                "status": "Not found",
+                "url": "Not found",
+            }
+        else:
+            raise
+    return {
+        "id": issue.key,
+        "title": issue.fields.summary,
+        "project": issue.fields.project.key,
+        "priority": issue.fields.priority.name,
+        "status": issue.fields.status.name,
+        "url": issue.permalink(),
+    }
+
+
+def get_defects_table(jira_client, list_of_results, sort_by):
+    LOG.info("Collect report table")
+    table = defaultdict(dict)
+    for run_id, result in list_of_results:
+        for defect in result.defects:
+            if defect not in table:
+                info = get_defect_info(jira_client, defect)
+                table[defect].update(info)
+                table[defect]["results"] = set([(run_id, result)])
+                if result.status.name.lower() == "blocked":
+                    table[defect]["blocks"] = 1
+                    table[defect]["fails"] = 0
+                else:
+                    table[defect]["fails"] = 1
+                    table[defect]["blocks"] = 0
+            else:
+                table[defect]["results"].add((run_id, result))
+                if result.status.name.lower() == "blocked":
+                    table[defect]["blocks"] += 1
+                else:
+                    table[defect]["fails"] += 1
+    return OrderedDict(
+        sorted(table.items(), key=lambda i: i[1][sort_by], reverse=True)
+    )
+
+
+def get_text_table(table):
+    LOG.info("Generation text table")
+    lines = []
+    line = (
+        "{fails:^5} | {blocks:^5} | {project:^10} | {priority:^15} | "
+        "{status:^15} | {bug:^100} | {tests} "
+    )
+
+    def title_uid(r):
+        run_id, result = r
+        test = fetch_test(result.api, result.raw_data()["test_id"], run_id)
+        return {"title": test.title, "uid": test.id}
+
+    def list_of_defect_tests(results):
+        ret = ["[{title} #{uid}]".format(**title_uid(r)) for r in results]
+        return " ".join(ret)
+
+    lines.append(
+        line.format(
+            fails="FAILS",
+            blocks="BLOCKS",
+            project="PROJECT",
+            priority="PRIORITY",
+            status="STATUS",
+            bug="BUG",
+            tests="TESTS",
+        )
+    )
+    for k in table:
+        one = table[k]
+        data = {
+            "fails": one["fails"],
+            "project": one["project"],
+            "priority": one["priority"],
+            "status": one["status"],
+            "bug": "{uid} {title}".format(uid=one["id"], title=one["title"]),
+            "tests": list_of_defect_tests(one["results"]),
+        }
+        lines.append(line.format(**data))
+    return "\n".join(lines)
+
+
+def get_md_table(table):
+    LOG.info("Generation MD table")
+    lines = []
+    line = "||{fails} | {priority} | " "{status} | {bug} | {tests} |"
+
+    def title_uid_link(r):
+        run_id, result = r
+        test = fetch_test(result.api, result.raw_data()["test_id"], run_id)
+        return {
+            "title": test.title.replace("[", "{").replace("]", "}"),
+            "uid": test.id,
+            "link": "{url}/index.php?/tests/view/{uid}".format(
+                url=test.api._conf()["url"], uid=test.id
+            ),
+        }
+
+    def list_of_defect_tests(results):
+        ret = [
+            "[{title} #{uid}]({link})".format(**title_uid_link(r))
+            for r in results
+        ]
+        return " ".join(ret)
+
+    lines.append(
+        line.format(
+            fails="|:FAILS",
+            project=":PROJECT",
+            priority=":PRIORITY",
+            status=":STATUS",
+            bug=":BUG",
+            tests=":TESTS",
+        )
+    )
+    for k in table:
+        one = table[k]
+        data = {
+            "fails": one["fails"],
+            "project": one["project"],
+            "priority": one["priority"],
+            "status": one["status"],
+            "bug": "[{uid} {title}]({url})".format(
+                uid=one["id"],
+                title=one["title"].replace("[", "{").replace("]", "}"),
+                url=one["url"],
+            ),
+            "tests": list_of_defect_tests(one["results"]),
+        }
+        lines.append(line.format(**data))
+    return "\n".join(lines)
+
+
+def get_html_table(table):
+    LOG.info("Generation HTML table")
+    html = "<table>{lines}</table>"
+    lines = []
+    line = (
+        "<tr><th>{fails:^5}</th><th>{blocks:^5}</th><th>{project:^10}</th>"
+        "<th>{priority:^15}</th>"
+        "<th>{status:^15}</th><th>{bug:^100}</th><th>{tests}</th></tr>"
+    )
+    lines.append(
+        line.format(
+            fails="FAILS",
+            blocks="BLOCKS",
+            project="PROJECT",
+            priority="PRIORITY",
+            status="STATUS",
+            bug="BUG",
+            tests="TESTS",
+        )
+    )
+
+    def title_uid_link(r):
+        run_id, result = r
+        test = fetch_test(result.api, result.raw_data()["test_id"], run_id)
+        return {
+            "title": test.title,
+            "uid": test.id,
+            "link": "{url}/index.php?/tests/view/{uid}".format(
+                url=test.api._conf()["url"], uid=test.id
+            ),
+        }
+
+    def list_of_defect_tests(results):
+        ret = [
+            "<a href='{link}'>{title} #{uid}</a>".format(**title_uid_link(r))
+            for r in results
+        ]
+        return " ".join(ret)
+
+    for k in table:
+        one = table[k]
+        data = {
+            "fails": one["fails"],
+            "blocks": one["blocks"],
+            "project": one["project"],
+            "priority": one["priority"],
+            "status": one["status"],
+            "bug": "<a href='{url}'>{uid} {title}</a>".format(
+                uid=one["id"], title=one["title"], url=one["url"]
+            ),
+            "tests": list_of_defect_tests(one["results"]),
+        }
+        lines.append(line.format(**data))
+    return html.format(lines="".join(lines))
+
+
+def out_table(out_type, table):
+    if out_type == "none":
+        return
+    elif out_type == "html":
+        print(get_html_table(table))
+    elif out_type == "md":
+        print(get_md_table(table))
+    else:
+        print(get_text_table(table))
+
+
+def push_report(t_client, plan_name, table):
+    LOG.info("Push report table into plan - {}".format(plan_name))
+    text = (
+        "Bugs Statistics (generated on {date})\n"
+        "=======================================================\n"
+        "{table}".format(
+            date=datetime.datetime.now().strftime("%a %b %d %H:%M:%S %Y"),
+            table=get_md_table(table),
+        )
+    )
+    plan = t_client.plan(plan_name)
+    if plan:
+        plan.description = text
+        plan.api._post(
+            "update_plan/{}".format(plan.id),
+            {
+                "name": plan.name,
+                "description": plan.description,
+                "milestone_id": plan.milestone.id,
+            },
+        )
+
+
+def update_report(t_client, plan_name, tests_table):
+    LOG.info(
+        "Update report table into plan - {}".format(plan_name)
+        + "\n===\nList tests to udate:"
+    )
+    plan = t_client.plan(plan_name)
+    if plan:
+        for r_test, run in tests_table:
+            t_client.add(r_test)
+            print(r_test.test.title)
+    LOG.info("\n===\nUpdate plan finished - {}".format(plan_name))
+
+
+def create_report(**kwargs):
+    j_host = kwargs.get("jira_host")
+    j_user = kwargs.get("jira_user_id")
+    j_user_pwd = kwargs.get("jira_user_password")
+    t_host = kwargs.get("testrail_host")
+    t_user = kwargs.get("testrail_user")
+    t_user_key = kwargs.get("testrail_user_key")
+    t_plan = kwargs.get("testrail_plan")
+    t_project = kwargs.get("testrail_project")
+    t_a_run = kwargs.get("testrail_only_run")
+    o_type = kwargs.get("out_type")
+    push_report_flag = kwargs.get("push_report_flag")
+    sort_by = kwargs.get("sort_by")
+    t_client = TestRail(email=t_user, key=t_user_key, url=t_host)
+    t_client.set_project_id(t_client.project(t_project).id)
+    j_client = jira.JIRA(j_host, basic_auth=(j_user, j_user_pwd))
+    runs = get_runs(t_client, t_plan, t_a_run)
+    results = get_all_results(t_client, runs)
+    table = get_defects_table(j_client, results, sort_by)
+    out_table(o_type, table)
+    if push_report_flag:
+        push_report(t_client, t_plan, table)
+
+
+def mark_fails(**kwargs):
+    testrail_host = kwargs.get("testrail_host")
+    testrail_user = kwargs.get("testrail_user")
+    testrail_user_key = kwargs.get("testrail_user_key")
+    testrail_plan = kwargs.get("testrail_plan")
+    testrail_m_plan = kwargs.get("testrail_marked_plan")
+    testrail_project = kwargs.get("testrail_project")
+    testrail_active_run = kwargs.get("testrail_only_run")
+    if testrail_active_run == "":
+        testrail_active_run = None
+    update_report_flag = kwargs.get("update_report_flag")
+    testrail_client = TestRail(
+        email=testrail_user, key=testrail_user_key, url=testrail_host
+    )
+    testrail_client.set_project_id(
+        testrail_client.project(testrail_project).id
+    )
+    # Get list runs with marked results
+    marked_runs = get_runs(
+        testrail_client, testrail_m_plan, testrail_active_run
+    )
+    # Get list runs to update
+    runs = get_runs(testrail_client, testrail_plan, testrail_active_run)
+    # Get list (failed, prod_failed, test_failed,skipped..) tests with defects
+    marked_results = get_all_failed_results(
+        testrail_client, marked_runs, "2,3,4,5,6,7,8,9"
+    )
+    # Get list (failed) tests without defects to mark
+    # 5-failed
+    failed_results = get_all_failed_results(testrail_client, runs, "5")
+    # Generate list tests to update based on compare (defected
+    # results for tests with failed and not defected)
+    tests_to_update = mark_failed_results(
+        testrail_client, marked_results, failed_results, testrail_host
+    )
+    if update_report_flag:
+        update_report(testrail_client, testrail_plan, tests_to_update)
+
+
+COMMAND_MAP = {"create-report": create_report, "mark-fails": mark_fails}
+
+
+def main():
+    args = run_cli()
+    COMMAND_MAP[args.command](**vars(args))
+
+
+if __name__ == "__main__":
+    with ipdb.launch_ipdb_on_exception():
+        main()
diff --git a/testrail_bot/testplan_summary/setup.py b/testrail_bot/testplan_summary/setup.py
new file mode 100644
index 0000000..33fb003
--- /dev/null
+++ b/testrail_bot/testplan_summary/setup.py
@@ -0,0 +1,22 @@
+from setuptools import setup
+
+setup(
+    name="testplan-summary",
+    version="0.0.1a",
+    packages=[],
+    url="",
+    license="",
+    author="oscore-qa",
+    author_email="",
+    description="",
+    requires_python=">=3.8",
+    install_requires=[
+        "jira==3.5.2",
+        "testrail-api==1.8.0",
+        "ipdb",
+        "testrail",
+    ],
+    scripts=[
+        "report.py",
+    ],
+)