Store ci-jobs as test items
It will allow to save/investigate/attach bug-tracking issues
for each job in build
Added pbr
Added new tags
Save link to pod-logs
Add short report for Confluence into LAunch description
Related-Prod: PRODX-48948
Change-Id: Ic921a03b84f7fc9e8098d9069d039b321724ac2f
diff --git a/rp_reporter/README.md b/rp_reporter/README.md
index e6381a3..ddebdf4 100644
--- a/rp_reporter/README.md
+++ b/rp_reporter/README.md
@@ -7,7 +7,8 @@
```
or
```shell
-pip install git+https//gerrit/path_in_gerrit TODO
+pip install "git+https://gerrit.mcp.mirantis.com/mcp/osccore-qa-testing-tools@master#egg=rp-reporter&subdirectory=rp_reporter"
+
```
## Use as a cli
diff --git a/rp_reporter/rp_reporter/batch_reporter.py b/rp_reporter/rp_reporter/batch_reporter.py
index 9f3be01..c152b89 100755
--- a/rp_reporter/rp_reporter/batch_reporter.py
+++ b/rp_reporter/rp_reporter/batch_reporter.py
@@ -1,11 +1,13 @@
import click
import logging
+from collections import deque
import ipdb
import wget
import yaml
import itertools
import jmespath
+from typing import List, Optional
import jenkins_jinny.main as jj
from copy import deepcopy
@@ -13,6 +15,64 @@
LOG = logging.getLogger("rp_reporter")
+def grep(text: List, patterns: List, context_size: int) :
+ """Works like `grep -rn`
+ returns found text with line before and after
+ :param text:
+ :param patterns: list of strings, strings to find
+ :param context_size: count of lines before and after to return
+ :return: Yields list of lines
+ """
+ context = deque(maxlen=context_size)
+ result = list()
+ after_count = 0
+ for num, line in enumerate(text, start=1):
+ context.append(f"{num} {line}")
+ if any(pattern.lower() in line.lower() for pattern in patterns):
+ if not result:
+ result = list(context)
+ after_count = context_size
+ continue
+ if after_count > 0:
+ result.append(f"{num} {line}")
+ after_count -= 1
+ if result and after_count == 0:
+ yield result
+ result = list()
+ if result:
+ yield result
+
+class Description:
+ def __init__(self):
+ self.rockoon_version = None
+ self.test_results = list()
+ self.job_status = None
+ self.job_number = None
+ self.job_url = None
+
+ @property
+ def job_link(self):
+ return f"[#{self.job_number} {self.job_status}]({self.job_url}) "
+
+ def add_test_result(self, test_name=None,
+ testrail_url=None, rp_url=None, statistics=None):
+ testrail_msg = f"[TestRailURL]({testrail_url})" if testrail_url else ""
+
+ test_result = (f"{test_name} {testrail_msg} "
+ f"[RP_URL]({rp_url}) "
+ f"{statistics}")
+
+ self.test_results.append(test_result)
+
+ def __repr__(self):
+ # Return each test result on new line with "* " prefix
+ test_results = "\n".join(f"* {r}" for r in self.test_results)
+ return (f"\n___\n"
+ f"{self.job_link}\n"
+ f"`{self.rockoon_version}`\n"
+ f"{test_results}\n"
+ f"___\n")
+
def get_tags(job: jj.Build):
tags = dict()
tags["start_time"] = job.start_time.strftime("%Y-%m-%d-%H-%M-%S")
@@ -29,7 +89,75 @@
tags["mosk_version"] = "master"
return tags
-def upload_job(job:str, suite_per_job=False, tags=None):
+def get_tags_from_osdpl(osdpl_file):
+ tags = dict()
+ osdpl_content = yaml.safe_load(open(osdpl_file[0], "rb"))
+ osdpl_dict = yaml.safe_load(osdpl_content)
+ found_osdpl = jmespath.search("items[*].status.version", osdpl_dict)
+ if not found_osdpl:
+ LOG.error(f"Can't find osdpl info in {osdpl_file}")
+ return
+ tags["rockoon_version"] = found_osdpl[0]
+
+ if ovn:=jmespath.search("items[*].spec.features.neutron.backend", osdpl_dict):
+ tags["neutron.backend"] = ovn[0]
+
+ if nova_images:=jmespath.search("items[*].spec.features.nova.images.backend", osdpl_dict):
+ tags["nova.images.backend"] = nova_images[0]
+
+ return tags
+
+
+def report_job_status(job: jj.Build, job_suite_id: str, reporter: Reporter):
+ rp_client = reporter.client
+ if not job_suite_id:
+ return
+ subjob_item_id = rp_client.start_test_item(
+ name=job.name,
+ start_time=timestamp(),
+ item_type="STEP",
+ description=f"child {job.url} {job.duration} {job.status}",
+ parent_item_id=job_suite_id
+ )
+ match job.status:
+ case "SUCCESS":
+ status = "PASSED"
+ case "FAILURE":
+ status = "FAILED"
+ case "ABORTED":
+ status = "FAILED"
+ case "UNSTABLE":
+ status = "PASSED"
+ case _:
+ LOG.error(f"Unknown translation {job.status=} to status")
+ status = "FAILED"
+ # add logs to test
+
+ catch_logs = [
+ "failure",
+ "failed",
+ "error",
+ "exception",
+ "assert",
+ "discovered openstack controller version is"
+ ]
+
+ all_logs = job.get_logs()
+ for log in grep(text=all_logs, patterns=catch_logs, context_size=8):
+ # LOG.error("Attach logs {}".format("\n".join(log)))
+ rp_client.log(time=timestamp(),
+ message="\n".join(log),
+ item_id=subjob_item_id
+ )
+ # ipdb.set_trace()
+ rp_client.finish_test_item(item_id=subjob_item_id,
+ status=status,
+ end_time=timestamp(),
+ )
+
+def upload_job(job: str|jj.Build,
+ suite_per_job: bool=False,
+ tags: Optional[dict] =None):
if isinstance(job, str):
job = jj.Build(job)
if not tags:
@@ -39,14 +167,17 @@
tags.update(get_tags(job))
- # deploy_job = job.get_child_jobs("deploy-openstack-k8s-env")[0]
- # for file_location in deploy_job.get_artifacts("deployed.yaml"):
- # osdpl_content = yaml.safe_load_all(open(file_location))
- # yaml.safe_load_all
- # tags["rockoon"] =
reporter = Reporter()
+ description = Description()
rp_client = reporter.client
launch_id = None
+ job_suite_id = None
+ artifactory_url = None
+
+ description.job_status = job.status
+ description.job_number = job.number
+ description.job_url = job.url
+
if suite_per_job:
tags["jenkins_job"] = job.number
launch_id = rp_client.start_launch(
@@ -56,39 +187,42 @@
description=f"Deploy job {job.url}"
)
print(f"(^-^)_日 report will be here {rp_client.get_launch_ui_url()}")
+ job_suite_id = rp_client.start_test_item(
+ name="CI jobs",
+ start_time=timestamp(),
+ item_type="suite",
+ )
+ reporter.schedule_finishing(job_suite_id)
rp_client.log(time=timestamp(),
message=f"Job status: {job.status}",
# item_id=launch_id
)
+
for child in itertools.chain([job], job.heirs):
child: jj.Build
rp_client.log(time=timestamp(),
message=f"{child} {child.status} {child.url}",
# item_id=launch_id
)
+ report_job_status(job=child, job_suite_id=job_suite_id,
+ reporter=reporter)
+
+ # test_tags = deepcopy(tags)
+ test_tags = dict()
print(f"⫍⌕⫎ tests in {child}")
- test_tags = deepcopy(tags)
test_results_files = None
match child.name:
case "deploy-openstack-k8s-env":
- # ipdb.set_trace()
osdpl_file = child.get_artifacts("deployed.yaml")
if not osdpl_file:
LOG.error(f"Can't find osdpl file in {job}")
continue
- osdpl_content = yaml.safe_load(open(osdpl_file[0], "rb"))
- osdpl_dictionary = yaml.safe_load(osdpl_content)
- found_osdpl = jmespath.search("items[*].status.version", osdpl_dictionary)
- if not found_osdpl:
- LOG.error(f"Can't find osdpl info in {job}")
- continue
-
- tags["rockoon_version"] = found_osdpl[0]
+ tags.update(get_tags_from_osdpl(osdpl_file))
rp_client.update_test_item(
attributes=tags,
item_uuid=launch_id
)
-
+ description.rockoon_version = tags.get("rockoon_version")
case "tempest-runner-k8s":
title = "Tempest"
test_results_files = [file_url
@@ -126,13 +260,20 @@
]
if not test_results_files:
LOG.error(f"Can't found 'test_check_downtime_statistic_result.xml' in {child.url}")
-
+ case "collect-openstack-kaas-artifacts":
+ artifactory_url = child.description.split("url: ")[-1]
+ rp_client.log(time=timestamp(),
+ message=f"Pod Logs {artifactory_url}/pod-logs.tar.gz")
if not test_results_files:
# We can iterate by child jobs which don't contain any reports
continue
+ testrail_url = [url
+ for url in child.get_link_from_description()
+ if "testrail.com" in url
+ ]
rp_client.log(time=timestamp(),
message=f"Found file to upload: {test_results_files}",
- item_id=launch_id)
+ )
report_path = test_results_files[0]
LOG.info("=== report_xml {kwargs}".format(
kwargs = dict(report_path=report_path,
@@ -143,25 +284,33 @@
)
print(f"(っ・-・)っ Uploading {report_path}")
- reporter.report_xml(
+ reported_suite_id, stats = reporter.report_xml(
report_path=report_path,
title = title,
attributes=test_tags,
link=job.url,
to_specific_launch=launch_id,
)
+ description.add_test_result(
+ test_name=f"{title} {test_tags.get('test', '')}",
+ testrail_url=testrail_url[0] if testrail_url else None,
+ rp_url=rp_client.get_launch_ui_url(),
+ statistics=stats
+ )
+ rp_client.log(time=timestamp(),
+ message=f"Reported with stats: {stats}",
+ )
- # if suite_per_job:
- #
- # LOG.info(rp_client.get_launch_info())
rp_client.log(time=timestamp(),
message="Reporting completed",
- # item_id=launch_id
)
if suite_per_job:
report_url = rp_client.get_launch_ui_url()
- rp_client.finish_launch(end_time=timestamp())
- print(f"maybe report is here {report_url}")
+ rp_client.finish_launch(end_time=timestamp(),
+ attributes=tags,
+ description=str(description) +
+ f"\nPod Logs {artifactory_url}/pod-logs.tar.gz")
+ print(f"report is here {report_url}")
print(f" ʕノ•ᴥ•ʔノ Completed")
diff --git a/rp_reporter/rp_reporter/report_from_xml.py b/rp_reporter/rp_reporter/report_from_xml.py
index 7fe079b..5b38e6a 100644
--- a/rp_reporter/rp_reporter/report_from_xml.py
+++ b/rp_reporter/rp_reporter/report_from_xml.py
@@ -51,6 +51,35 @@
print("File is not in XML format")
return False
+class TestRunStatistic:
+ def __init__(self) -> None:
+ self.passed = 0
+ self.failed = 0
+ self.errors = 0
+
+ @property
+ def total(self):
+ return self.passed + self.failed + self.errors
+
+ @property
+ def passrate(self):
+ return (self.passed / self.total) * 100
+
+ def add_passed(self):
+ self.passed += 1
+
+ def add_failed(self):
+ self.failed += 1
+
+ def add_error(self):
+ self.errors += 1
+
+ def __repr__(self):
+ if self.passrate < 100:
+ passrate = "`{self.passrate:.2f}%`"
+ else:
+ passrate = "{self.passrate:.2f}%"
+ return f"P{self.passed}/F{self.failed}/E{self.errors} {passrate}"
class Reporter:
client: RPClient
@@ -114,6 +143,7 @@
ts: xunitparser.TestSuite
tr: xunitparser.TestResult
tc: xunitparser.TestCase
+ stats = TestRunStatistic()
report_file = obtain_file(report_path)
if not report_file:
print("Error occurred with file. Interrupting reporting")
@@ -133,6 +163,8 @@
attributes.update(ts.properties)
last_subfolder = None
+ test_suite = None
+ launch_id = None
if to_specific_launch:
test_suite = self.client.start_test_item(
name=title,
@@ -223,12 +255,15 @@
match tc.result:
case "success":
status = "PASSED"
+ stats.add_passed()
case "skipped":
status = "SKIPPED"
case "failure":
status = "FAILED"
+ stats.add_failed()
case "error":
status = "FAILED"
+ stats.add_error()
case _:
raise BaseException(f"Unknown {tc.result=} in xml")
@@ -264,6 +299,7 @@
if not to_specific_launch:
self.client.finish_launch(end_time=timestamp())
LOG.info(self.client.get_launch_info())
+ return (test_suite or launch_id), stats
if __name__ == "__main__":
diff --git a/rp_reporter/setup.cfg b/rp_reporter/setup.cfg
new file mode 100644
index 0000000..628ac4c
--- /dev/null
+++ b/rp_reporter/setup.cfg
@@ -0,0 +1,20 @@
+[metadata]
+name = rp-reporter
+summary = Reporting test results to the Report Portal
+description-file = README.md
+license = Apache Software License
+classifiers =
+ Programming Language :: Python
+ Programming Language :: Python :: 3
+ Programming Language :: Python :: 3.9
+ Environment :: OpenStack
+ Intended Audience :: Information Technology
+ Intended Audience :: System Administrators
+ License :: OSI Approved :: Apache Software License
+ Operating System :: POSIX :: Linux
+author = mirantis
+author-email = harhipova@mirantis.com
+[global]
+setup-hooks = pbr.hooks.setup_hook
+[files]
+packages = rp_reporter
diff --git a/rp_reporter/setup.py b/rp_reporter/setup.py
index 3bf060b..300e758 100644
--- a/rp_reporter/setup.py
+++ b/rp_reporter/setup.py
@@ -2,12 +2,8 @@
setup(
name="rp-reporter",
- version="0.1.0",
- author="Anna Arhipova",
- author_email="harhipova@mirantis.com",
- description="Reporting test results to the Report Portal",
- long_description=open("README.md").read(),
- long_description_content_type="text/markdown",
+ setup_requires=['pbr==5.6.0'],
+ pbr=True,
packages=find_packages(),
install_requires=[
"xunitparser",