[rp-reporter] Add tox

Related-Prod: PRODX-53429
Change-Id: I6dde28fe699211a6a193fdeff1e956be2b9a21db
diff --git a/rp_reporter/rp_reporter/batch_reporter.py b/rp_reporter/rp_reporter/batch_reporter.py
index 7dd60d7..577d63d 100755
--- a/rp_reporter/rp_reporter/batch_reporter.py
+++ b/rp_reporter/rp_reporter/batch_reporter.py
@@ -20,8 +20,8 @@
 
 LOG = logging.getLogger("rp_reporter")
 
-def grep(text: List, patterns: List, context_size: int,
-         blacklist: List[str] = None ):
+
+def grep(text: List, patterns: List, context_size: int, blacklist: List[str] = None):
     """Works like `grep -rn`
     returns found text with line before and after
     :param text:
@@ -36,8 +36,7 @@
 
     def found(_line):
         if any(pattern.lower() in _line.lower() for pattern in patterns):
-            if any(blacklisted.lower() in _line.lower()
-                   for blacklisted in blacklist):
+            if any(blacklisted.lower() in _line.lower() for blacklisted in blacklist):
                 return False
             return True
         return False
@@ -65,6 +64,7 @@
     if result:
         yield result
 
+
 class Description:
     def __init__(self):
         self.rockoon_version = None
@@ -77,24 +77,27 @@
     def job_link(self):
         return f"[#{self.job_number} {self.job_status}]({self.job_url}) "
 
-    def add_test_result(self, test_name=None,
-                        testrail_url=None, rp_url=None, statistics=None):
+    def add_test_result(
+        self, test_name=None, testrail_url=None, rp_url=None, statistics=None
+    ):
         testrail_msg = f"[TestRailURL]({testrail_url})" if testrail_url else ""
         self.rp_url = rp_url
-        test_result = (f"{test_name} {testrail_msg} "
-                       f"{statistics}")
+        test_result = f"{test_name} {testrail_msg} " f"{statistics}"
 
         self.test_results.append(test_result)
 
     def __repr__(self):
         # Return each test result on new line with "* " prefix
         test_results = "\n".join(f"* {r}" for r in self.test_results)
-        return (f"\n___\n"
-                f"{self.job_link}\n"
-                f"`{self.rockoon_version}`\n"
-                f"{test_results}\n"
-                f"[RP_URL]({self.rp_url}) \n"
-                f"___\n")
+        return (
+            f"\n___\n"
+            f"{self.job_link}\n"
+            f"`{self.rockoon_version}`\n"
+            f"{test_results}\n"
+            f"[RP_URL]({self.rp_url}) \n"
+            f"___\n"
+        )
+
 
 def get_tags(job: jj.Build):
     tags = dict()
@@ -115,6 +118,7 @@
 
     return tags
 
+
 def get_tags_from_osdpl(osdpl_file):
     tags = dict()
     osdpl_content = yaml.safe_load(open(osdpl_file[0], "rb"))
@@ -125,40 +129,47 @@
         return
     tags["rockoon_version"] = found_osdpl[0]
 
-    if ovn:=jmespath.search("items[*].spec.features.neutron.backend", osdpl_dict):
+    if ovn := jmespath.search("items[*].spec.features.neutron.backend", osdpl_dict):
         tags["neutron.backend"] = ovn[0]
     else:
         tags["neutron.backend"] = "ovs"
 
-    if dvr:=jmespath.search("items[*].spec.features.neutron.dvr.enabled", osdpl_dict):
+    if dvr := jmespath.search("items[*].spec.features.neutron.dvr.enabled", osdpl_dict):
         tags["dvr"] = dvr[0]
     else:
         tags["dvr"] = False
 
-    if vpnaas:=jmespath.search("items[*].spec.features.neutron.extensions.vpnaas.enabled", osdpl_dict):
+    if vpnaas := jmespath.search(
+        "items[*].spec.features.neutron.extensions.vpnaas.enabled", osdpl_dict
+    ):
         tags["vpnaas"] = vpnaas[0]
     else:
         tags["vpnaas"] = False
 
-    if nova_images:=jmespath.search("items[*].spec.features.nova.images.backend", osdpl_dict):
+    if nova_images := jmespath.search(
+        "items[*].spec.features.nova.images.backend", osdpl_dict
+    ):
         tags["nova.images.backend"] = nova_images[0]
 
     return tags
 
-def get_image_from_describe(job_obj, describe_file_pattern, image_pattern,
-                            from_artifactory=False):
+
+def get_image_from_describe(
+    job_obj, describe_file_pattern, image_pattern, from_artifactory=False
+):
     if from_artifactory:
         local_describe_file = job_obj.get_artifacts(describe_file_pattern)
         if not local_describe_file:
             LOG.info(f"Can't find {describe_file_pattern} in {job_obj} artifactory")
         local_describe_file = local_describe_file[0]
     else:
-        describe_file_url = [file_url
-                             for file_url in job_obj.get_link_from_description()
-                             if describe_file_pattern in file_url]
+        describe_file_url = [
+            file_url
+            for file_url in job_obj.get_link_from_description()
+            if describe_file_pattern in file_url
+        ]
         if not describe_file_url:
-            LOG.info(f"Can't find {describe_file_pattern} in {job_obj} "
-                     f"description")
+            LOG.info(f"Can't find {describe_file_pattern} in {job_obj} " f"description")
             return
         temp_dir = Path("/tmp") / uuid.uuid4().hex.upper()
         temp_dir.mkdir(parents=True, exist_ok=True)
@@ -181,7 +192,7 @@
         start_time=timestamp(),
         item_type="STEP",
         description=f"child {job.url} {job.duration} {job.status}",
-        parent_item_id=job_suite_id
+        parent_item_id=job_suite_id,
     )
     match job.status:
         case "SUCCESS":
@@ -203,7 +214,7 @@
         "error",
         "exception",
         "assert",
-        "discovered openstack controller version is"
+        "discovered openstack controller version is",
     ]
 
     blacklist = [
@@ -214,22 +225,22 @@
     ]
 
     all_logs = job.get_logs()
-    for log in grep(text=all_logs, patterns=catch_logs, context_size=8,
-                    blacklist=blacklist):
+    for log in grep(
+        text=all_logs, patterns=catch_logs, context_size=8, blacklist=blacklist
+    ):
         # LOG.error("Attach logs {}".format("\n".join(log)))
-        rp_client.log(time=timestamp(),
-                      message="\n".join(log),
-                      item_id=subjob_item_id
-                      )
+        rp_client.log(time=timestamp(), message="\n".join(log), item_id=subjob_item_id)
         sleep(0.001)
-    rp_client.finish_test_item(item_id=subjob_item_id,
-                               status=status,
-                               end_time=timestamp(),
-                               )
+    rp_client.finish_test_item(
+        item_id=subjob_item_id,
+        status=status,
+        end_time=timestamp(),
+    )
 
-def upload_job(job: str|jj.Build,
-               suite_per_job: bool=False,
-               tags: Optional[dict] =None):
+
+def upload_job(
+    job: str | jj.Build, suite_per_job: bool = False, tags: Optional[dict] = None
+):
     if isinstance(job, str):
         job = jj.Build(job)
     if not tags:
@@ -256,7 +267,7 @@
             name=job.name,
             start_time=timestamp(),
             attributes=tags,
-            description=f"Deployed job {job.url} by {job.triggered_by}"
+            description=f"Deployed job {job.url} by {job.triggered_by}",
         )
         if not rp_client.get_launch_info():
             LOG.error(f"[FIXME] Launch {launch_id} is not created ")
@@ -270,19 +281,20 @@
             item_type="suite",
         )
         reporter.schedule_finishing(job_suite_id)
-    rp_client.log(time=timestamp(),
-                  message=f"Job status: {job.status}",
-                  # item_id=launch_id
-                  )
+    rp_client.log(
+        time=timestamp(),
+        message=f"Job status: {job.status}",
+        # item_id=launch_id
+    )
 
     for child in itertools.chain([job], job.heirs):
         child: jj.Build
-        rp_client.log(time=timestamp(),
-                      message=f"{child} {child.status} {child.url}",
-                      # item_id=launch_id
-                      )
-        report_job_status(job=child, job_suite_id=job_suite_id,
-                          reporter=reporter)
+        rp_client.log(
+            time=timestamp(),
+            message=f"{child} {child.status} {child.url}",
+            # item_id=launch_id
+        )
+        report_job_status(job=child, job_suite_id=job_suite_id, reporter=reporter)
 
         # test_tags = deepcopy(tags)
         test_tags = dict()
@@ -308,54 +320,62 @@
                     continue
 
                 tags.update(get_tags_from_osdpl(osdpl_file))
-                rp_client.update_test_item(
-                    attributes=tags,
-                    item_uuid=launch_id
-                )
+                rp_client.update_test_item(attributes=tags, item_uuid=launch_id)
                 description.rockoon_version = tags.get("rockoon_version")
             case "tempest-runner-k8s":
                 title = "Tempest"
-                test_results_files = [file_url
-                               for file_url in child.get_link_from_description()
-                               if "tempest_report.xml" in file_url]
+                test_results_files = [
+                    file_url
+                    for file_url in child.get_link_from_description()
+                    if "tempest_report.xml" in file_url
+                ]
 
                 image = get_image_from_describe(
-                     job_obj=child,
-                     describe_file_pattern="tempest_pod.describe",
-                     image_pattern="/openstack/tempest:")
+                    job_obj=child,
+                    describe_file_pattern="tempest_pod.describe",
+                    image_pattern="/openstack/tempest:",
+                )
                 suite_description += f"{image}<br>"
 
                 if test_scheme := yaml.safe_load(child.param.TEST_SCHEME):
-                     suite_description += f"regex: {test_scheme.get('regex')}<br>"
+                    suite_description += f"regex: {test_scheme.get('regex')}<br>"
 
             case "stepler-runner-k8s":
                 title = "Stepler"
                 test_results_files = child.get_artifacts("stepler_test_results.xml")
                 if not test_results_files:
-                    LOG.error(f"Can't found 'stepler_test_results.xml' in "
-                              f"{child.url} artifacts")
-                import ipdb; ipdb.set_trace()
+                    LOG.error(
+                        f"Can't found 'stepler_test_results.xml' in "
+                        f"{child.url} artifacts"
+                    )
+                import ipdb
+
+                ipdb.set_trace()
                 image = get_image_from_describe(
                     job_obj=child,
                     describe_file_pattern="stepler_pod.describe",
                     image_pattern="/openstack/stepler:",
-                    from_artifactory=True
-                    )
+                    from_artifactory=True,
+                )
                 suite_description += f"{image}<br>"
 
             case "oscore-si-tests-runner-k8s":
                 title = "SI tests"
-                test_results_files = [file_url
-                               for file_url in child.get_link_from_description()
-                               if "si_test_report.xml" in file_url ]
+                test_results_files = [
+                    file_url
+                    for file_url in child.get_link_from_description()
+                    if "si_test_report.xml" in file_url
+                ]
                 if not test_results_files:
                     LOG.error(f"Can't found 'si_test_report.xml' in {child.url}")
 
             case "oscore-functional-tests-runner":
                 title = "Rockoon Functional"
-                test_results_files = [file_url
-                               for file_url in child.get_link_from_description() 
-                               if "si_test_report.xml" in file_url]
+                test_results_files = [
+                    file_url
+                    for file_url in child.get_link_from_description()
+                    if "si_test_report.xml" in file_url
+                ]
                 if not test_results_files:
                     LOG.error(f"Can't found 'si_test_report.xml' in {child.url}")
 
@@ -366,12 +386,16 @@
                     for file_url in child.get_link_from_description()
                 ]
                 if not test_results_files:
-                    LOG.error(f"Can't found 'test_check_downtime_statistic_result.xml' in {child.url}")
+                    LOG.error(
+                        f"Can't found 'test_check_downtime_statistic_result.xml' in {child.url}"
+                    )
 
             case "collect-openstack-kaas-artifacts":
                 artifactory_url = child.description.split("url: ")[-1]
-                rp_client.log(time=timestamp(),
-                              message=f"Pod Logs {artifactory_url}/pod-logs.tar.gz")
+                rp_client.log(
+                    time=timestamp(),
+                    message=f"Pod Logs {artifactory_url}/pod-logs.tar.gz",
+                )
 
         if child.param.RUN_TESTS:
             suite_description += f"RUN_TESTS = {child.param.RUN_TESTS}<br>"
@@ -380,27 +404,31 @@
         if not test_results_files:
             # We can iterate by child jobs which don't contain any reports
             continue
-        testrail_url = [url
-                        for url in child.get_link_from_description()
-                        if "testrail.com" in url
-                        ]
-        rp_client.log(time=timestamp(),
-                      message=f"Found file to upload: {test_results_files}",
-                      )
+        testrail_url = [
+            url for url in child.get_link_from_description() if "testrail.com" in url
+        ]
+        rp_client.log(
+            time=timestamp(),
+            message=f"Found file to upload: {test_results_files}",
+        )
         report_path = test_results_files[0]
-        LOG.info("=== report_xml {kwargs}".format(
-            kwargs = dict(report_path=report_path,
-                          title = title,
-                          attributes=test_tags,
-                          link=job.url,
-                          description=suite_description,
-                          to_specific_launch=launch_id))
+        LOG.info(
+            "=== report_xml {kwargs}".format(
+                kwargs=dict(
+                    report_path=report_path,
+                    title=title,
+                    attributes=test_tags,
+                    link=job.url,
+                    description=suite_description,
+                    to_specific_launch=launch_id,
+                )
+            )
         )
 
         print(f"(っ・-・)っ Uploading {report_path}")
         reported_suite_id, stats = reporter.report_xml(
             report_path=report_path,
-            title = title,
+            title=title,
             attributes=test_tags,
             link=job.url,
             description=suite_description,
@@ -410,28 +438,32 @@
             test_name=f"{title} {test_tags.get('test', '')} ",
             testrail_url=testrail_url[0] if testrail_url else None,
             rp_url=rp_client.get_launch_ui_url(),
-            statistics=stats
+            statistics=stats,
         )
-        rp_client.log(time=timestamp(),
-                      message=f"Reported with stats: {stats}",
-                      )
+        rp_client.log(
+            time=timestamp(),
+            message=f"Reported with stats: {stats}",
+        )
 
-    rp_client.log(time=timestamp(),
-                  message="Reporting completed",
-                  )
+    rp_client.log(
+        time=timestamp(),
+        message="Reporting completed",
+    )
     if suite_per_job:
         report_url = rp_client.get_launch_ui_url()
-        rp_client.finish_launch(end_time=timestamp(),
-                                attributes=tags,
-                                description=str(description) +
-                                            f"\nPod Logs {artifactory_url}/pod-logs.tar.gz")
+        rp_client.finish_launch(
+            end_time=timestamp(),
+            attributes=tags,
+            description=str(description)
+            + f"\nPod Logs {artifactory_url}/pod-logs.tar.gz",
+        )
         print(f"report is here {report_url}")
         print("Pushing new description to job...")
         try:
-            job.description = (job.description +
-                               f"<br><br> "
-                               f"<a href='{report_url}'>Link to ReportPortal</a> <br>"
-                               )
+            job.description = (
+                job.description + f"<br><br> "
+                f"<a href='{report_url}'>Link to ReportPortal</a> <br>"
+            )
         except Exception as e:
             print(f"Can't push description to {job=}: {e}")
     print(f" ʕノ•ᴥ•ʔノ Completed")
@@ -445,26 +477,30 @@
             continue
         upload_job(job, suite_per_job=True)
 
+
 @click.group()
 def cli():
     pass
 
+
 @cli.command()
 @click.argument("job_url")
 def report_job(job_url):
     upload_job(job_url, suite_per_job=True)
 
+
 @cli.command()
 @click.option("--pattern", default=None, help="Upload only job with pattern")
-@click.argument('view_url')
+@click.argument("view_url")
 def report_view(view_url, pattern):
     """
     :param view_url: Url to the view
     """
     upload_view(view_url, pattern)
 
+
 @cli.command()
-@click.argument('report_path')
+@click.argument("report_path")
 def report_xmlfile(report_path):
     """
     :param report_path: Url or file location of xunit report
@@ -475,12 +511,13 @@
         title=title,
     )
 
+
 @cli.command()
 def version():
-    package_name = 'rp-reporter'
+    package_name = "rp-reporter"
     info = VersionInfo(package_name)
     print("rp-reporter " + info.version_string_with_vcs())
 
 
-if __name__ == '__main__':
+if __name__ == "__main__":
     cli()
diff --git a/rp_reporter/rp_reporter/report_from_xml.py b/rp_reporter/rp_reporter/report_from_xml.py
index 2b044b6..5493938 100644
--- a/rp_reporter/rp_reporter/report_from_xml.py
+++ b/rp_reporter/rp_reporter/report_from_xml.py
@@ -22,7 +22,7 @@
 LOG = logging.getLogger("rp_reporter")
 
 
-def obtain_file(report_path:str) -> Optional[str]:
+def obtain_file(report_path: str) -> Optional[str]:
     """
     Returns file's location in file system
     If report_path is an URL script will download it to /tmp folder
@@ -31,8 +31,7 @@
     """
     if report_path.startswith("http"):
         try:
-            downloaded_file = wget.download(report_path,
-                                        f"/tmp/{uuid.uuid4()}.xml")
+            downloaded_file = wget.download(report_path, f"/tmp/{uuid.uuid4()}.xml")
             return downloaded_file
         except urllib.error.HTTPError:
             print("File is absent")
@@ -43,7 +42,7 @@
         raise FileNotFoundError(report_path)
 
 
-def is_xml(file_path:str) -> bool:
+def is_xml(file_path: str) -> bool:
     try:
         ElementTree.fromstring(open(file_path).read())
         return True
@@ -51,6 +50,7 @@
         print("File is not in XML format")
         return False
 
+
 class TestRunStatistic:
     def __init__(self) -> None:
         self.passed = 0
@@ -83,17 +83,20 @@
             passrate = f"{self.passrate:.2f}%"
         return f"P{self.passed}/F{self.failed}/E{self.errors} {passrate}"
 
+
 class Reporter:
     client: RPClient
+
     def __init__(self, client=None):
         if not client:
-            self.client = RPClient(endpoint=RP_ENDPOINT,
-                  project=RP_PROJECT,
-                  api_key=RP_APIKEY,
-                  is_skipped_an_issue=False,
-                  truncate_attributes=False,
-                  retries=15
-                  )
+            self.client = RPClient(
+                endpoint=RP_ENDPOINT,
+                project=RP_PROJECT,
+                api_key=RP_APIKEY,
+                is_skipped_an_issue=False,
+                truncate_attributes=False,
+                retries=15,
+            )
         self.scheduled = list()
         self.uuid_by_subfolder = dict()
         self.attributes = dict()
@@ -104,17 +107,14 @@
     def finish_all(self) -> None:
         while self.scheduled:
             finishing_item = self.scheduled.pop()
-            self.client.finish_test_item(item_id=finishing_item,
-                                    end_time=timestamp()
-                                    )
+            self.client.finish_test_item(item_id=finishing_item, end_time=timestamp())
         self.reset_cache()
 
     def reset_cache(self) -> None:
         self.scheduled = list()
         self.uuid_by_subfolder = dict()
 
-    def create_subfolders(self,
-                          list_of_subfolders: list, root_folder=None) -> str:
+    def create_subfolders(self, list_of_subfolders: list, root_folder=None) -> str:
         parent_folder = root_folder
         for number, subfolder in enumerate(list_of_subfolders):
             subfolder_fullpath = list_of_subfolders[:number]
@@ -125,24 +125,34 @@
                     name=subfolder,
                     start_time=timestamp(),
                     item_type="suite",
-                    parent_item_id=parent_folder
+                    parent_item_id=parent_folder,
                 )
-                LOG.debug(f"start_test_item {subfolder=} "
-                          f"item_type=suite "
-                          f"name={subfolder} "
-                          f"parent_item_id={parent_folder}")
+                LOG.debug(
+                    f"start_test_item {subfolder=} "
+                    f"item_type=suite "
+                    f"name={subfolder} "
+                    f"parent_item_id={parent_folder}"
+                )
 
                 LOG.info(
                     f"Started suite: uuid={created_folder} for suite "
-                    f"{subfolder=} in {fullpath=} with {parent_folder=}")
+                    f"{subfolder=} in {fullpath=} with {parent_folder=}"
+                )
                 self.schedule_finishing(created_folder)
                 self.uuid_by_subfolder[fullpath] = created_folder
             folder_uuid = self.uuid_by_subfolder.get(fullpath)
             parent_folder = folder_uuid
         return parent_folder
 
-    def report_xml(self, report_path, title, attributes=None,
-                   link=None, description="", to_specific_launch=None):
+    def report_xml(
+        self,
+        report_path,
+        title,
+        attributes=None,
+        link=None,
+        description="",
+        to_specific_launch=None,
+    ):
         ts: xunitparser.TestSuite
         tr: xunitparser.TestResult
         tc: xunitparser.TestCase
@@ -175,20 +185,19 @@
                 attributes=attributes or None,
                 item_type="suite",
                 description=f"{description} <br>"
-                            f"<a href='{link}'>Jenkins Job</a><br> "
-                            f"Uploaded from <a href='{report_path}'>report</a>"
-
+                f"<a href='{link}'>Jenkins Job</a><br> "
+                f"Uploaded from <a href='{report_path}'>report</a>",
             )
-            LOG.debug(
-                f"start_test_item {test_suite=} item_type=suite name={title}")
+            LOG.debug(f"start_test_item {test_suite=} item_type=suite name={title}")
             self.schedule_finishing(item=test_suite)
             root_folder = test_suite
         else:
-            launch_id = self.client.start_launch(name=title,
-                                            start_time=timestamp(),
-                                            attributes=attributes or None,
-                                            description=f"{link} \n uploaded from report {report_path} "
-                                            )
+            launch_id = self.client.start_launch(
+                name=title,
+                start_time=timestamp(),
+                attributes=attributes or None,
+                description=f"{link} \n uploaded from report {report_path} ",
+            )
             LOG.debug(f"start_launch {launch_id=} name={title} ")
             print(f"(^-^)_日 report will be here {self.client.get_launch_ui_url()}")
             root_folder = None
@@ -198,13 +207,14 @@
 
         LOG.info(f"Sending to RP")
 
-        ts_with_progress_bar = click.progressbar(ts,
-                                                 label='Sending to RP',
-                                                 #  item_show_func=lambda a: a.methodname if a is not None,
-                                                 length=tr.testsRun,
-                                                 show_percent=True,
-                                                 bar_template='[%(bar)s] %(info)s %(label)s'
-                                                 )
+        ts_with_progress_bar = click.progressbar(
+            ts,
+            label="Sending to RP",
+            #  item_show_func=lambda a: a.methodname if a is not None,
+            length=tr.testsRun,
+            show_percent=True,
+            bar_template="[%(bar)s] %(info)s %(label)s",
+        )
         with ts_with_progress_bar:
             started_at = time.time()
 
@@ -212,9 +222,12 @@
                 if tc.classname:
                     last_subfolder = self.create_subfolders(
                         list_of_subfolders=tc.classname.split("."),
-                        root_folder=root_folder
+                        root_folder=root_folder,
                     )
-                elif "setup" in tc.methodname.lower() or "teardown" in tc.methodname.lower():
+                elif (
+                    "setup" in tc.methodname.lower()
+                    or "teardown" in tc.methodname.lower()
+                ):
                     # setup and teardown don't have classname but have path in their name like
                     # in tempest:
                     # setUpClass (tempest.api.compute.admin.test_create_server.WindowsServers11Test)
@@ -225,13 +238,13 @@
                         found_text: str = found_text.strip("()")
                     last_subfolder = self.create_subfolders(
                         list_of_subfolders=found_text.split("."),
-                        root_folder=root_folder)
+                        root_folder=root_folder,
+                    )
                     # name = f"{tc.classname}.{tc.methodname}"
                 # else:
                 name = tc.methodname
                 elapsed = time.time() - started_at
-                ts_with_progress_bar.label = f"{elapsed:.2f}s {name}".replace(
-                    "\n", " ")
+                ts_with_progress_bar.label = f"{elapsed:.2f}s {name}".replace("\n", " ")
 
                 # It's a bad way to detect setup\teardown because every testing
                 # framework has his own way to log setup\teardown
@@ -250,12 +263,14 @@
                     start_time=test_started_at,
                     item_type=item_type,
                     description=f"{tc.classname}.{tc.methodname}",
-                    parent_item_id=last_subfolder
+                    parent_item_id=last_subfolder,
                 )
-                LOG.debug(f"start_test_item {item_id=} "
-                          f"{name=} "
-                          f"{item_type=} "
-                          f"parent_item_id={last_subfolder}")
+                LOG.debug(
+                    f"start_test_item {item_id=} "
+                    f"{name=} "
+                    f"{item_type=} "
+                    f"parent_item_id={last_subfolder}"
+                )
                 if not item_id:
                     raise Exception(f"Failed to start test {name}")
                 match tc.result:
@@ -274,36 +289,40 @@
                         raise BaseException(f"Unknown {tc.result=} in xml")
 
                 if tc.message:
-                    self.client.log(time=timestamp(),
-                               message=tc.message,
-                               item_id=item_id
-                               )
+                    self.client.log(
+                        time=timestamp(), message=tc.message, item_id=item_id
+                    )
                 if tc.trace:
                     delimiter = "Traceback (most recent call last):"
                     for n, log in enumerate(tc.trace.split(delimiter)):
                         # All Traceback will be reporter as ERROR
-                        message = delimiter+log if n else log
+                        message = delimiter + log if n else log
                         level = "ERROR" if n else "INFO"
-                        self.client.log(time=timestamp(),
-                                        message=message,
-                                        level=level,
-                                        item_id=item_id
-                                        )
+                        self.client.log(
+                            time=timestamp(),
+                            message=message,
+                            level=level,
+                            item_id=item_id,
+                        )
                         # Sleep to save chronological sorting for logs as is
                         # reportportal has timestamps in milliseconds
                         time.sleep(0.001)
 
                 # timestamp() 1739905243451 in milliseconds
                 # tc.time datetime.timedelta(microseconds=259000)
-                end_time_with_duration = datetime.datetime.fromtimestamp(
-                    int(test_started_at) / 1000) + tc.time
+                end_time_with_duration = (
+                    datetime.datetime.fromtimestamp(int(test_started_at) / 1000)
+                    + tc.time
+                )
                 end_time_in_milliseconds = int(
-                    end_time_with_duration.timestamp() * 1000)
+                    end_time_with_duration.timestamp() * 1000
+                )
 
-                self.client.finish_test_item(item_id=item_id,
-                                        end_time=str(end_time_in_milliseconds),
-                                        status=status,
-                                        )
+                self.client.finish_test_item(
+                    item_id=item_id,
+                    end_time=str(end_time_in_milliseconds),
+                    status=status,
+                )
         self.finish_all()
         if not to_specific_launch:
             self.client.finish_launch(end_time=timestamp())
@@ -313,22 +332,20 @@
 
 if __name__ == "__main__":
 
-    report_path = 'https://artifactory.mcp.mirantis.net/artifactory/oscore-local/jenkins-job-artifacts/oscore-si-tests-runner-k8s/40162/si_test_report.xml'
+    report_path = "https://artifactory.mcp.mirantis.net/artifactory/oscore-local/jenkins-job-artifacts/oscore-si-tests-runner-k8s/40162/si_test_report.xml"
     title = "Rockoon functional"
     attributes = {
         # "openstack_version": "caracal",
         "mosk_version": "mosk-24.3.3"
     }
     link = ""
-    
+
     reporter = Reporter()
     reporter.report_xml(
         report_path=report_path,
-        title = title,
+        title=title,
         link=link,
         # to_specific_launch="432ce97b-2727-4e4c-8303-9f1d966a184e"
-        )
-
-
+    )
 
     # client.terminate()
diff --git a/rp_reporter/rp_reporter/settings.py b/rp_reporter/rp_reporter/settings.py
index c07fe2a..74420bc 100644
--- a/rp_reporter/rp_reporter/settings.py
+++ b/rp_reporter/rp_reporter/settings.py
@@ -8,10 +8,12 @@
 
 RP_CONFIG_FILE = environ.get("RP_CONFIG_FILE") or (Path.home() / ".reportportal_config")
 
+
 def from_conf(key_name):
     if not Path(RP_CONFIG_FILE).exists():
-        LOG.info(f"Can't get {key_name} because config file "
-                    f"not found: {RP_CONFIG_FILE}")
+        LOG.info(
+            f"Can't get {key_name} because config file " f"not found: {RP_CONFIG_FILE}"
+        )
         return None
     with open(RP_CONFIG_FILE) as f:
         yaml_config = yaml.safe_load(f)
@@ -20,26 +22,37 @@
             LOG.info(f"Can't get {key_name} because it's absent in {RP_CONFIG_FILE}")
         return value
 
+
 def call_error(key_name):
-    raise Exception(f"{key_name} should be defined in {RP_CONFIG_FILE} or "
-                    f"by environment variable")
+    raise Exception(
+        f"{key_name} should be defined in {RP_CONFIG_FILE} or "
+        f"by environment variable"
+    )
 
-RP_LOGGING = environ.get('RP_LOGGING') or from_conf('RP_LOGGING') or logging.WARNING
-RP_LOG_FILE = environ.get('RP_LOG_FILE') or from_conf('RP_LOG_FILE')
 
-logging.basicConfig(level=RP_LOGGING,
-                    format='%(asctime)s %(levelname)s - %(filename)s:%(lineno)d (%(funcName)s) - %(message)s',
-                    filename=RP_LOG_FILE,
-                    filemode='w'
-                    )
+RP_LOGGING = environ.get("RP_LOGGING") or from_conf("RP_LOGGING") or logging.WARNING
+RP_LOG_FILE = environ.get("RP_LOG_FILE") or from_conf("RP_LOG_FILE")
 
-RP_APIKEY = environ.get('RP_APIKEY') or from_conf('RP_APIKEY') or call_error("RP_APIKEY")
-RP_ENDPOINT = environ.get('RP_ENDPOINT') or from_conf('RP_ENDPOINT') or call_error("RP_ENDPOINT")
-RP_PROJECT = environ.get('RP_PROJECT') or from_conf('RP_PROJECT') or call_error("RP_PROJECT")
+logging.basicConfig(
+    level=RP_LOGGING,
+    format="%(asctime)s %(levelname)s - %(filename)s:%(lineno)d (%(funcName)s) - %(message)s",
+    filename=RP_LOG_FILE,
+    filemode="w",
+)
+
+RP_APIKEY = (
+    environ.get("RP_APIKEY") or from_conf("RP_APIKEY") or call_error("RP_APIKEY")
+)
+RP_ENDPOINT = (
+    environ.get("RP_ENDPOINT") or from_conf("RP_ENDPOINT") or call_error("RP_ENDPOINT")
+)
+RP_PROJECT = (
+    environ.get("RP_PROJECT") or from_conf("RP_PROJECT") or call_error("RP_PROJECT")
+)
 
 TIME_FORMAT = "%Y-%m-%d"
 
 if __name__ == "__main__":
     LOG.info(f"RP_APIKEY: {RP_APIKEY}")
     LOG.info(f"RP_ENDPOINT: {RP_ENDPOINT}")
-    LOG.info(f"RP_PROJECT: {RP_PROJECT}")
\ No newline at end of file
+    LOG.info(f"RP_PROJECT: {RP_PROJECT}")
diff --git a/rp_reporter/setup.cfg b/rp_reporter/setup.cfg
index 4ad511c..1d0da97 100644
--- a/rp_reporter/setup.cfg
+++ b/rp_reporter/setup.cfg
@@ -1,19 +1,10 @@
 [metadata]
 name = rp-reporter
 summary = Reporting test results to the Report Portal
-description-file = README.md
+description_file = README.md
 license = Apache Software License
-classifiers =
-    Programming Language :: Python
-    Programming Language :: Python :: 3
-    Programming Language :: Python :: 3.9
-    Environment :: OpenStack
-    Intended Audience :: Information Technology
-    Intended Audience :: System Administrators
-    License :: OSI Approved :: Apache Software License
-    Operating System :: POSIX :: Linux
 author = mirantis
-author-email = harhipova@mirantis.com
+author_email = harhipova@mirantis.com
 [global]
 setup-hooks = pbr.hooks.setup_hook
 [files]
@@ -25,3 +16,8 @@
     PyYAML
     wget
     jenkins-jinny @ git+https://github.com/annkapul/jenkins-jinny.git
+
+[extras]
+test =
+    flake8<3.8
+    black
diff --git a/rp_reporter/setup.py b/rp_reporter/setup.py
index bf43123..89441e5 100644
--- a/rp_reporter/setup.py
+++ b/rp_reporter/setup.py
@@ -5,7 +5,7 @@
     setup_requires=['pbr==5.6.0'],
     pbr=True,
     packages=find_packages(),
-    python_requires=">=3.9",
+    python_requires=">=3.8",
     entry_points={
         "console_scripts": [
             "rp-reporter=rp_reporter.batch_reporter:cli",
diff --git a/rp_reporter/tox.ini b/rp_reporter/tox.ini
new file mode 100644
index 0000000..eb36c27
--- /dev/null
+++ b/rp_reporter/tox.ini
@@ -0,0 +1,14 @@
+[tox]
+minversion = 3.1
+skipdist = True
+envlist = py38,py310,py312
+ignore_basepython_conflict = True
+
+[testenv]
+basepython = python3
+usedevelop = True
+deps =
+    .[test]
+commands =
+    black rp_reporter/
+;    flake8 rp_reporter/