Refactor the code of osccore-qa-testing-tools to comply with PEP8.
Related-prod: PRODX-42195
Change-Id: Id05e7584e0d024127ce1bd5042cfe681a1b52e2d
diff --git a/daily_jenkins_job_report/daily_report/config.py b/daily_jenkins_job_report/daily_report/config.py
index ef2778e..d52b691 100755
--- a/daily_jenkins_job_report/daily_report/config.py
+++ b/daily_jenkins_job_report/daily_report/config.py
@@ -1,47 +1,102 @@
# Jenkins API credantials
-USERNAME = 'mcp-oscore-jenkins'
-PASSWORD = 'ahvoNg4mae'
-JENKINS_URL = 'https://ci.mcp.mirantis.net'
+USERNAME = "mcp-oscore-jenkins"
+PASSWORD = "ahvoNg4mae"
+JENKINS_URL = "https://ci.mcp.mirantis.net"
# For updating Google sheets
-GOOGLE_AUTH = '/home/ubuntu/osccore-qa-testing-tools/daily_jenkins_job_report/daily_report/oscore-e717344565a0.json'
-GOOGLE_SHEET_NAME = 'July 2019'
+GOOGLE_AUTH = (
+ "/home/ubuntu/osccore-qa-testing-tools/daily_jenkins_job_report/"
+ "daily_report/oscore-e717344565a0.json"
+)
+GOOGLE_SHEET_NAME = "July 2019"
# For get_jobs_results.py save_results_to_html method
-GENERATED_REPORT = '/var/www/oscore_jobs.com/html/reports/'
-REPORT_TEMPLATE = '/home/ubuntu/osccore-qa-testing-tools/daily_jenkins_job_report/daily_report/templates/report_template.html'
+GENERATED_REPORT = "/var/www/oscore_jobs.com/html/reports/"
+REPORT_TEMPLATE = (
+ "/home/ubuntu/osccore-qa-testing-tools/daily_jenkins_job_report/"
+ "daily_report/templates/report_template.html"
+)
-LIST_OF_COLUMNS = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L',
- 'M', 'N',
- 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z',
- 'AA', 'AB', 'AC',
- 'AD', 'AE', 'AF', 'AG', 'AH', 'AI', 'AJ', 'AK', 'AL', 'AM',
- 'AN', 'AO',
- 'AP', 'AQ', 'AR', 'AS', 'AT', 'AU', 'AV', 'AW', 'AX', 'AY',
- 'AZ', 'BA']
+LIST_OF_COLUMNS = [
+ "A",
+ "B",
+ "C",
+ "D",
+ "E",
+ "F",
+ "G",
+ "H",
+ "I",
+ "J",
+ "K",
+ "L",
+ "M",
+ "N",
+ "O",
+ "P",
+ "Q",
+ "R",
+ "S",
+ "T",
+ "U",
+ "V",
+ "W",
+ "X",
+ "Y",
+ "Z",
+ "AA",
+ "AB",
+ "AC",
+ "AD",
+ "AE",
+ "AF",
+ "AG",
+ "AH",
+ "AI",
+ "AJ",
+ "AK",
+ "AL",
+ "AM",
+ "AN",
+ "AO",
+ "AP",
+ "AQ",
+ "AR",
+ "AS",
+ "AT",
+ "AU",
+ "AV",
+ "AW",
+ "AX",
+ "AY",
+ "AZ",
+ "BA",
+]
# For generating report
-MULTIJOBS = ['oscore-oscc-ci',
- 'oscore-promote-openstack-pike-xenial',
- 'oscore-promote-openstack-queens-xenial',
- 'oscore-test-release-2019.2.0',
- 'oscore-test-release-2018.4.0',
- 'oscore-test-release-2018.8.0',
- 'oscore-test-release-2018.11.0',
- ]
+MULTIJOBS = [
+ "oscore-oscc-ci",
+ "oscore-promote-openstack-pike-xenial",
+ "oscore-promote-openstack-queens-xenial",
+ "oscore-test-release-2019.2.0",
+ "oscore-test-release-2018.4.0",
+ "oscore-test-release-2018.8.0",
+ "oscore-test-release-2018.11.0",
+]
-SINGLEJOBS = ['oscore-test-openstack-upgrade-pike-queens-core-barbican',
- 'oscore-test-openstack-upgrade-pike-queens-core-ssl',
- 'oscore-test-openstack-upgrade-pike-queens-core-extra-ssl',
- 'oscore-test-openstack-upgrade-pike-queens-core-ironic',
- 'oscore-test-mcp-update-core-barbican-queens-2019.2.0-TO-testing',
- 'oscore-test-openstack-upgrade-pike-queens-core-barbican',
- ]
+SINGLEJOBS = [
+ "oscore-test-openstack-upgrade-pike-queens-core-barbican",
+ "oscore-test-openstack-upgrade-pike-queens-core-ssl",
+ "oscore-test-openstack-upgrade-pike-queens-core-extra-ssl",
+ "oscore-test-openstack-upgrade-pike-queens-core-ironic",
+ "oscore-test-mcp-update-core-barbican-queens-2019.2.0-TO-testing",
+ "oscore-test-openstack-upgrade-pike-queens-core-barbican",
+]
# For getting artifacts link for the single jobs
-LOGS_DIRECTORY = '/var/www/oscore_jobs.com/html/oscore_logs/'
+LOGS_DIRECTORY = "/var/www/oscore_jobs.com/html/oscore_logs/"
# Logging
-LOGGER = 'generate_report'
-LOG_FOLDER = '/tmp/'
-LOG_FILENAME = 'daily_jenkins_jobs_report.log'
+LOGGER = "generate_report"
+LOG_FOLDER = "/tmp/"
+LOG_FILENAME = "daily_jenkins_jobs_report.log"
diff --git a/daily_jenkins_job_report/daily_report/generate_report.py b/daily_jenkins_job_report/daily_report/generate_report.py
index d8f187b..3bd1a3a 100755
--- a/daily_jenkins_job_report/daily_report/generate_report.py
+++ b/daily_jenkins_job_report/daily_report/generate_report.py
@@ -17,43 +17,50 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-import config
import datetime
-import jinja2
import logging
import re
+import config
+import jinja2
import update_google_sheets
-
-from get_artifacts_links_single_jobs import update_all_jobs_results_with_artifacts
-
-from jinja2 import Template
+from get_artifacts_links_single_jobs import (
+ update_all_jobs_results_with_artifacts,
+)
from jenkinsapi import custom_exceptions
from jenkinsapi.jenkins import Jenkins
from jenkinsapi.utils.crumb_requester import CrumbRequester
-
+from jinja2 import Template
logging.basicConfig(
- format='[%(asctime)s][%(name)s][%(levelname)s] %(message)s',
- datefmt='%d-%m-%Y %H:%M:%S',
- handlers=[logging.FileHandler('{}{}'.format(
- config.LOG_FOLDER, config.LOG_FILENAME)), logging.StreamHandler()],
- level=logging.INFO)
+ format="[%(asctime)s][%(name)s][%(levelname)s] %(message)s",
+ datefmt="%d-%m-%Y %H:%M:%S",
+ handlers=[
+ logging.FileHandler(
+ "{}{}".format(config.LOG_FOLDER, config.LOG_FILENAME)
+ ),
+ logging.StreamHandler(),
+ ],
+ level=logging.INFO,
+)
logger = logging.getLogger(config.LOGGER)
class GetJobsResults:
def __init__(self):
- self.server = Jenkins(config.JENKINS_URL,
- username=config.USERNAME,
- password=config.PASSWORD,
- requester=CrumbRequester(
- username=config.USERNAME,
- password=config.PASSWORD,
- baseurl=config.JENKINS_URL))
+ self.server = Jenkins(
+ config.JENKINS_URL,
+ username=config.USERNAME,
+ password=config.PASSWORD,
+ requester=CrumbRequester(
+ username=config.USERNAME,
+ password=config.PASSWORD,
+ baseurl=config.JENKINS_URL,
+ ),
+ )
def get_console_output(self, job_name):
- logger.info('Getting console output from: {}'.format(job_name))
+ logger.info(f"Getting console output from: {job_name}")
job = self.server.get_job(job_name)
@@ -62,24 +69,23 @@
return console.lower()
def get_run_jobs_from_console_output(self, job_name):
- logger.info(
- 'Getting run jobs from console output: {}'.format(job_name))
+ logger.info(f"Getting run jobs from console output: {job_name}")
job_console = self.get_console_output(job_name)
- console_list = job_console.split('\n')
+ console_list = job_console.split("\n")
output = []
for i in console_list:
- if 'starting building: oscore-' in i:
+ if "starting building: oscore-" in i:
output.append(i)
- jobs_ids = ''.join(output)
- jobs_ids = jobs_ids.replace('starting building:', ' ')
+ jobs_ids = "".join(output)
+ jobs_ids = jobs_ids.replace("starting building:", " ")
jobs_ids = re.findall(r"oscore-[\w-]+ #\d+", jobs_ids)
res = {}
for i in jobs_ids:
- name_id = i.split(' #')
+ name_id = i.split(" #")
res[name_id[1]] = name_id[0]
return res
@@ -88,8 +94,7 @@
results_multijobs = {}
try:
- logger.info(
- 'Getting IDs multijobs: {} {}'.format(job_name, job_id))
+ logger.info(f"Getting IDs multijobs: {job_name} {job_id}")
job = self.server.get_job(job_name)
@@ -104,51 +109,57 @@
timestamp = build.get_timestamp().timestamp()
try:
- job_name = build_params['COOKIECUTTER_TEMPLATE_CONTEXT_FILE']
+ job_name = build_params["COOKIECUTTER_TEMPLATE_CONTEXT_FILE"]
except KeyError:
try:
- job_name = build_params['STACK_CLUSTER_NAME']
+ job_name = build_params["STACK_CLUSTER_NAME"]
except KeyError:
logger.warning(
- 'KeyError, there are no '
- 'COOKIECUTTER_TEMPLATE_CONTEXT_FILE '
- 'or STACK_CLUSTER_NAME')
+ "KeyError, there are no "
+ "COOKIECUTTER_TEMPLATE_CONTEXT_FILE "
+ "or STACK_CLUSTER_NAME"
+ )
pass
- results_multijobs['build_status'] = build_status
- results_multijobs['job_name'] = job_name
- results_multijobs['baseurl'] = baseurl
- results_multijobs['timestamp'] = timestamp
- logger.info('build status: {} job name: {} baseurl: {}'.format(
- build_status, job_name, baseurl))
+ results_multijobs["build_status"] = build_status
+ results_multijobs["job_name"] = job_name
+ results_multijobs["baseurl"] = baseurl
+ results_multijobs["timestamp"] = timestamp
+ logger.info(
+ f"build status: {build_status} job name: "
+ f"{job_name} baseurl: {baseurl}"
+ )
return results_multijobs
except custom_exceptions.NotFound:
- logger.warning('Exception, NotFound: {}'.format(
- type(custom_exceptions.NotFound)))
- logger.warning('Job was erased. Exception, NotFound: {}'.format(
- job_name))
- results_multijobs['build_status'] = 'No Results'
- results_multijobs['job_name'] = job_name
- results_multijobs['baseurl'] = 'No Results'
- results_multijobs['timestamp'] = '0.0'
+ logger.warning(
+ f"Exception, NotFound: {type(custom_exceptions.NotFound)}"
+ )
+ logger.warning(f"Job was erased. Exception, NotFound: {job_name}")
+ results_multijobs["build_status"] = "No Results"
+ results_multijobs["job_name"] = job_name
+ results_multijobs["baseurl"] = "No Results"
+ results_multijobs["timestamp"] = "0.0"
return results_multijobs
def get_results_multijobs(self, job_names_to_ids):
- logger.info('Getting results multijobs: {}'.format(job_names_to_ids))
+ logger.info(f"Getting results multijobs: {job_names_to_ids}")
list_results = []
for job_id, job_name in job_names_to_ids.items():
- results_multijobs = self.get_job_results(job_id=job_id,
- job_name=job_name)
+ results_multijobs = self.get_job_results(
+ job_id=job_id, job_name=job_name
+ )
list_results.append(results_multijobs)
return list_results
def get_results_singlejobs(self, job_name):
- logger.info('Getting results single jobs: {}'.format(job_name))
+ logger.info(f"Getting results single jobs: {job_name}")
- results_singlejobs = self.get_job_results(job_name=job_name,
- get_last_build=True)
+ results_singlejobs = self.get_job_results(
+ job_name=job_name,
+ get_last_build=True,
+ )
return results_singlejobs
def job(self, job_name):
@@ -156,15 +167,15 @@
def get_all_jobs_results():
- logger.info('Getting all jobs results')
+ logger.info("Getting all jobs results")
jr = GetJobsResults()
m_jobs = {}
for job_name in config.MULTIJOBS:
- logger.info('Getting results multi jobs: {}'.format(job_name))
+ logger.info(f"Getting results multi jobs: {job_name}")
job_names_run_ids = jr.get_run_jobs_from_console_output(job_name)
- logger.info('Jobs names run IDs: {}'.format(job_names_run_ids))
+ logger.info(f"Jobs names run IDs: {job_names_run_ids}")
m_res = jr.get_results_multijobs(job_names_run_ids)
m_jobs[job_name] = m_res
@@ -173,18 +184,18 @@
s_res = jr.get_results_singlejobs(job_name)
s_jobs[job_name] = s_res
- return {'multi_results': m_jobs, 'single_results': s_jobs}
+ return {"multi_results": m_jobs, "single_results": s_jobs}
def save_results_to_html(all_jobs_results):
- filename = datetime.datetime.now().strftime("%d-%m-%Y_%H_%M") + '.html'
- logger.info('Saving results to html file: {}'.format(filename))
+ filename = datetime.datetime.now().strftime("%d-%m-%Y_%H_%M") + ".html"
+ logger.info(f"Saving results to html file: {filename}")
filename = config.GENERATED_REPORT + filename
html = open(config.REPORT_TEMPLATE).read()
template = Template(html)
- with open(filename, 'w') as fh:
+ with open(filename, "w") as fh:
fh.write(template.render(results=all_jobs_results))
return filename
@@ -197,19 +208,19 @@
:return: 2019-03-01 03:49:35
"""
value = float(format)
- return datetime.datetime.utcfromtimestamp(
- value).strftime('%d-%m-%Y %H:%M:%S')
+ return datetime.datetime.utcfromtimestamp(value).strftime(
+ "%d-%m-%Y %H:%M:%S"
+ )
-jinja2.filters.FILTERS['datetimeformat'] = datetimeformat
+jinja2.filters.FILTERS["datetimeformat"] = datetimeformat
-if __name__ == '__main__':
+if __name__ == "__main__":
all_jobs_results = get_all_jobs_results()
- logger.info(f'all_jobs_results: {all_jobs_results}')
+ logger.info(f"all_jobs_results: {all_jobs_results}")
all_jobs_results = update_all_jobs_results_with_artifacts(all_jobs_results)
save_results_to_html(all_jobs_results)
update_google_sheets.update_google_sheet(all_jobs_results)
-
diff --git a/daily_jenkins_job_report/daily_report/get_artifacts_links_single_jobs.py b/daily_jenkins_job_report/daily_report/get_artifacts_links_single_jobs.py
index 70e79da..fc09107 100644
--- a/daily_jenkins_job_report/daily_report/get_artifacts_links_single_jobs.py
+++ b/daily_jenkins_job_report/daily_report/get_artifacts_links_single_jobs.py
@@ -1,19 +1,18 @@
+import re
from pathlib import Path
import config
-import os
-import re
def get_full_filename(job_name, job_id):
if not job_name or not job_id:
return
- full_patch = config.LOGS_DIRECTORY + job_name + '/'
+ full_patch = config.LOGS_DIRECTORY + job_name + "/"
patch_obj = Path(full_patch)
if not patch_obj.exists():
return
- file_obj = list(patch_obj.glob(f'{job_id}*.txt'))
+ file_obj = list(patch_obj.glob(f"{job_id}*.txt"))
if len(file_obj) == 0:
return
return file_obj[0].as_posix()
@@ -26,7 +25,7 @@
with open(file_patch) as file:
for file_line in file:
if "Starting building: oscore-artifatcts-collector" in file_line:
- artifact_id = file_line.split('#')[-1]
+ artifact_id = file_line.split("#")[-1]
return artifact_id.rstrip()
@@ -37,23 +36,30 @@
with open(artifact_file_patch) as file:
for file_line in file:
if """https://artifactory.mcp.mirantis.net/""" in file_line:
- artifact_link = re.findall('https:\/\/artifactory\.mcp\.mirantis\.net\/artifactory\/oscore-local\/[a-zA-Z0-9-.]*\/[0-9-_]*\/', file_line)
+ artifact_link = re.findall(
+ "https:\/\/artifactory\.mcp\.mirantis\.net\/"
+ "artifactory\/oscore-local\/[a-zA-Z0-9-.]*\/[0-9-_]*\/",
+ file_line,
+ )
return artifact_link[0]
def update_all_jobs_results_with_artifacts(all_jobs_results):
- single_jobs_results = all_jobs_results['single_results']
+ single_jobs_results = all_jobs_results["single_results"]
for job in single_jobs_results:
- job_id = all_jobs_results['single_results'][job]['baseurl'].split('/')[-1]
- job_name = all_jobs_results['single_results'][job]['job_name']
+ baseurl = all_jobs_results["single_results"][job]["baseurl"]
+ job_id = baseurl.split("/")[-1]
+ job_name = all_jobs_results["single_results"][job]["job_name"]
file_patch = get_full_filename(job_name, job_id)
artifact_filename_id = get_artifact_filename_id(file_patch)
- artifact_file_patch = get_full_filename(job_name='oscore-artifatcts-collector', job_id=artifact_filename_id)
+ artifact_file_patch = get_full_filename(
+ job_name="oscore-artifatcts-collector",
+ job_id=artifact_filename_id,
+ )
artifact_link = get_artifact_link(artifact_file_patch)
- all_jobs_results['single_results'][job]['artifacts'] = artifact_link
+ all_jobs_results["single_results"][job]["artifacts"] = artifact_link
return all_jobs_results
-
diff --git a/daily_jenkins_job_report/daily_report/update_google_sheets.py b/daily_jenkins_job_report/daily_report/update_google_sheets.py
index 8cb00c5..9791b0e 100755
--- a/daily_jenkins_job_report/daily_report/update_google_sheets.py
+++ b/daily_jenkins_job_report/daily_report/update_google_sheets.py
@@ -1,105 +1,217 @@
-import config
-import generate_report
-from datetime import datetime, timedelta
-
-import gspread
import logging
+from datetime import datetime
+
+import config
+import gspread
from oauth2client.service_account import ServiceAccountCredentials
-
logging.basicConfig(
- format='[%(asctime)s][%(name)s][%(levelname)s] %(message)s',
- datefmt='%d-%m-%Y %H:%M:%S',
- handlers=[logging.FileHandler('{}{}'.format(
- config.LOG_FOLDER, config.LOG_FILENAME)), logging.StreamHandler()],
- level=logging.INFO)
-logger = logging.getLogger('upd_gs')
+ format="[%(asctime)s][%(name)s][%(levelname)s] %(message)s",
+ datefmt="%d-%m-%Y %H:%M:%S",
+ handlers=[
+ logging.FileHandler(
+ "{}{}".format(config.LOG_FOLDER, config.LOG_FILENAME)
+ ),
+ logging.StreamHandler(),
+ ],
+ level=logging.INFO,
+)
+logger = logging.getLogger("upd_gs")
class UpdateGoogleSheets:
"""
Working with Google sheets
"""
+
def __init__(self):
- scope = ['https://spreadsheets.google.com/feeds', 'https://www.googleapis.com/auth/drive']
- credentials = ServiceAccountCredentials.from_json_keyfile_name(config.GOOGLE_AUTH, scope)
+ scope = [
+ "https://spreadsheets.google.com/feeds",
+ "https://www.googleapis.com/auth/drive",
+ ]
+ credentials = ServiceAccountCredentials.from_json_keyfile_name(
+ config.GOOGLE_AUTH, scope
+ )
gc = gspread.authorize(credentials)
- self.wks = gc.open('OSCORE QA daily reports').worksheet(config.GOOGLE_SHEET_NAME)
- logger.info('Opening OSCORE QA daily reports for May 2019')
+ self.wks = gc.open("OSCORE QA daily reports").worksheet(
+ config.GOOGLE_SHEET_NAME
+ )
+ logger.info("Opening OSCORE QA daily reports for May 2019")
def update_gs_jobs(self, gs_jobs, jobs_for_update, column_to_update):
"""
Updates google sheet column with jobs
"""
- logger.info('Updating google sheet jobs')
+ logger.info("Updating google sheet jobs")
for gs in gs_jobs:
- for all_jobs in jobs_for_update['multi_results']:
- for upd in jobs_for_update['multi_results'][all_jobs]:
- if "oscore-oscc-ci" in upd['full_job_name'] and gs['gs_job_name'] == upd['full_job_name'] and upd['build_status'] != 'None':
+ for all_jobs in jobs_for_update["multi_results"]:
+ for upd in jobs_for_update["multi_results"][all_jobs]:
+ if (
+ "oscore-oscc-ci" in upd["full_job_name"]
+ and gs["gs_job_name"] == upd["full_job_name"]
+ and upd["build_status"] != "None"
+ ):
cell = f"{column_to_update}{gs['row_number']}"
- if upd['build_status'] == 'FAILURE':
- upd['build_status'] = 'FAILED'
+ if upd["build_status"] == "FAILURE":
+ upd["build_status"] = "FAILED"
- logger.info(f"{datetime.fromtimestamp(upd['timestamp'])}, {upd['full_job_name']}, {upd['build_status']}")
+ logger.info(
+ f"{datetime.fromtimestamp(upd['timestamp'])}, "
+ f"{upd['full_job_name']}, {upd['build_status']}"
+ )
if not self.wks.acell(cell).value:
- self.wks.update_acell(cell, upd['build_status'])
- if upd['full_job_name'] == 'oscore-oscc-ci_openstack-ovs-core-octavia-pike' or upd['full_job_name'] == 'oscore-oscc-ci_openstack-ovs-core-octavia-queens':
- network_team_cell = get_network_team_cell(column_to_update, gs['row_number'])
- self.wks.update_acell(network_team_cell, 'Area of networking team')
+ self.wks.update_acell(cell, upd["build_status"])
+ # FIXME: need to move this long
+ # strings into variable
+ if (
+ upd["full_job_name"]
+ == "oscore-oscc-ci_openstack-ovs-core-"
+ "octavia-pike"
+ or upd["full_job_name"]
+ == "oscore-oscc-ci_openstack-ovs-"
+ "core-octavia-queens"
+ ):
+ network_team_cell = get_network_team_cell(
+ column_to_update, gs["row_number"]
+ )
+ self.wks.update_acell(
+ network_team_cell,
+ "Area of networking team",
+ )
- if gs['gs_job_name'] == upd['full_job_name'] and upd['build_status'] != 'None' and check_datetime_today(upd['timestamp']):
+ if (
+ gs["gs_job_name"] == upd["full_job_name"]
+ and upd["build_status"] != "None"
+ and check_datetime_today(upd["timestamp"])
+ ):
cell = f"{column_to_update}{gs['row_number']}"
- if upd['build_status'] == 'FAILURE':
- upd['build_status'] = 'FAILED'
+ if upd["build_status"] == "FAILURE":
+ upd["build_status"] = "FAILED"
- logger.info(f"{datetime.fromtimestamp(upd['timestamp'])}, {upd['full_job_name']}, {upd['build_status']}")
+ logger.info(
+ f"{datetime.fromtimestamp(upd['timestamp'])}, "
+ f"{upd['full_job_name']}, {upd['build_status']}"
+ )
if not self.wks.acell(cell).value:
- self.wks.update_acell(cell, upd['build_status'])
- if upd['full_job_name'] == 'oscore-oscc-ci_openstack-ovs-core-octavia-pike' or upd['full_job_name'] == 'oscore-oscc-ci_openstack-ovs-core-octavia-queens':
- network_team_cell = get_network_team_cell(column_to_update, gs['row_number'])
- self.wks.update_acell(network_team_cell, 'Area of networking team')
+ self.wks.update_acell(cell, upd["build_status"])
+ # FIXME: need to move this long
+ # strings into variable
+ if (
+ upd["full_job_name"]
+ == "oscore-oscc-ci_openstack-ovs-core"
+ "-octavia-pike"
+ or upd["full_job_name"]
+ == "oscore-oscc-ci_openstack-ovs-core"
+ "-octavia-queens"
+ ):
+ network_team_cell = get_network_team_cell(
+ column_to_update, gs["row_number"]
+ )
+ self.wks.update_acell(
+ network_team_cell,
+ "Area of networking team",
+ )
- elif ("oscore-oscc-ci" not in upd['full_job_name']) and gs['gs_job_name'] == upd['full_job_name'] and upd['build_status'] != 'None' and not check_datetime_today(upd['timestamp']) and gs['row_number'] and column_to_update:
+ elif (
+ ("oscore-oscc-ci" not in upd["full_job_name"])
+ and gs["gs_job_name"] == upd["full_job_name"]
+ and upd["build_status"] != "None"
+ and not check_datetime_today(upd["timestamp"])
+ and gs["row_number"]
+ and column_to_update
+ ):
cell = f"{column_to_update}{gs['row_number']}"
- logger.info(f"NOT TOADAY: {datetime.fromtimestamp(upd['timestamp'])}, {upd['full_job_name']}, {upd['build_status']}")
+ logger.info(
+ f"NOT TODAY: "
+ f"{datetime.fromtimestamp(upd['timestamp'])},"
+ f"{upd['full_job_name']}, {upd['build_status']}"
+ )
if not self.wks.acell(cell).value:
- self.wks.update_acell(cell, 'NOT EXEQUTED')
- if upd['full_job_name'] == 'oscore-oscc-ci_openstack-ovs-core-octavia-pike' or upd['full_job_name'] == 'oscore-oscc-ci_openstack-ovs-core-octavia-queens':
- network_team_cell = get_network_team_cell(column_to_update, gs['row_number'])
- self.wks.update_acell(network_team_cell, 'Area of networking team')
+ self.wks.update_acell(cell, "NOT EXEСUTED")
+ # FIXME: need to move this long
+ # strings into variable
+ if (
+ upd["full_job_name"]
+ == "oscore-oscc-ci_openstack-ovs-core"
+ "-octavia-pike"
+ or upd["full_job_name"]
+ == "oscore-oscc-ci_openstack-ovs-core"
+ "-octavia-queens"
+ ):
+ network_team_cell = get_network_team_cell(
+ column_to_update, gs["row_number"]
+ )
+ self.wks.update_acell(
+ network_team_cell,
+ "Area of networking team",
+ )
for gs in gs_jobs:
- for single_job in jobs_for_update['single_results']:
- if jobs_for_update['single_results'][single_job]['job_name'] == gs['gs_job_name'] and jobs_for_update['single_results'][single_job]['build_status'] != 'No Results' and jobs_for_update['single_results'][single_job]['build_status'] != 'None':
- logger.info(f'Single jobs, column to update {column_to_update}')
+ for single_job in jobs_for_update["single_results"]:
+ if (
+ jobs_for_update["single_results"][single_job]["job_name"]
+ == gs["gs_job_name"]
+ and jobs_for_update["single_results"][single_job][
+ "build_status"
+ ]
+ != "No Results"
+ and jobs_for_update["single_results"][single_job][
+ "build_status"
+ ]
+ != "None"
+ ):
+ logger.info(
+ f"Single jobs, column to update {column_to_update}"
+ )
cell = f"{column_to_update}{gs['row_number']}"
- if jobs_for_update['single_results'][single_job]['build_status'] == 'FAILURE':
- jobs_for_update['single_results'][single_job]['build_status'] = 'FAILED'
+ if (
+ jobs_for_update["single_results"][single_job][
+ "build_status"
+ ]
+ == "FAILURE"
+ ):
+ jobs_for_update["single_results"][single_job][
+ "build_status"
+ ] = "FAILED"
if not self.wks.acell(cell).value:
- self.wks.update_acell(cell, jobs_for_update['single_results'][single_job]['build_status'])
+ self.wks.update_acell(
+ cell,
+ jobs_for_update["single_results"][single_job][
+ "build_status"
+ ],
+ )
- def get_all_gs_jobs(self, column_number='A'):
+ def get_all_gs_jobs(self, column_number="A"):
"""
Gets all the google sheet jobs for updating from the first column
"""
- logger.info('Getting all the google sheet jobs for updating from the first column')
+ logger.info(
+ "Getting all the google sheet jobs for updating "
+ "from the first column"
+ )
all_jobs = []
# If delete or remove a job from google sheet, update this:
for i in range(3, 40):
- all_jobs.append({'gs_job_name': self.wks.acell(column_number + str(i)).value,
- 'row_number': i,
- 'column_number': column_number})
- logger.info(f'All google jobs: {all_jobs}')
+ all_jobs.append(
+ {
+ "gs_job_name": self.wks.acell(
+ column_number + str(i)
+ ).value,
+ "row_number": i,
+ "column_number": column_number,
+ }
+ )
+ logger.info(f"All google jobs: {all_jobs}")
return all_jobs
def get_today_date_column(self):
@@ -109,10 +221,10 @@
today date.
If they are the same, returns column value from the list_of_columns
"""
- logger.info('Getting date from gs')
+ logger.info("Getting date from gs")
for i in config.LIST_OF_COLUMNS:
- cell = i + '2'
+ cell = i + "2"
now = datetime.now()
if self.wks.acell(cell).value == now.strftime("%Y-%m-%d"):
logger.info(f"{cell}, {i}, {self.wks.acell(cell).value}")
@@ -131,18 +243,18 @@
Combinating this we get the job names.
Returns updated Jenkins results
"""
- logger.info('Updating multy jobs names')
- for key, value in results['multi_results'].items():
+ logger.info("Updating multy jobs names")
+ for key, value in results["multi_results"].items():
for i in value:
- if 'virtual-mcp11-aio' in i['job_name']:
- version = i['baseurl'][72:]
- version = version[:version.find("/")]
- new_name = key + '_' + i['job_name'] + version
- i['full_job_name'] = new_name
+ if "virtual-mcp11-aio" in i["job_name"]:
+ version = i["baseurl"][72:]
+ version = version[: version.find("/")]
+ new_name = key + "_" + i["job_name"] + version
+ i["full_job_name"] = new_name
else:
- new_name = key + '_' + i['job_name']
- i['full_job_name'] = new_name
+ new_name = key + "_" + i["job_name"]
+ i["full_job_name"] = new_name
return results
@@ -154,7 +266,6 @@
to_date_obj = datetime.fromtimestamp(timestamp)
today = datetime.today().date()
- yesterday = datetime.today().date() - timedelta(days=1)
# Returns True or False
return to_date_obj.date() == today
diff --git a/daily_jenkins_job_report/setup.py b/daily_jenkins_job_report/setup.py
index 605b80b..199569e 100755
--- a/daily_jenkins_job_report/setup.py
+++ b/daily_jenkins_job_report/setup.py
@@ -1,11 +1,15 @@
from setuptools import setup
setup(
- name='daily_report',
- version='1.0',
- description='Generates daily report from nightly Jenkins jobs',
- author='Serhii Turivnyi',
- author_email='sturivnyi@mirantis.com',
- packages=['daily_report'], #same as name
- install_requires=['jenkinsapi', 'jinja2', 'setuptools'], #external packages as dependencies
-)
\ No newline at end of file
+ name="daily_report",
+ version="1.0",
+ description="Generates daily report from nightly Jenkins jobs",
+ author="Serhii Turivnyi",
+ author_email="sturivnyi@mirantis.com",
+ packages=["daily_report"], # same as name
+ install_requires=[
+ "jenkinsapi",
+ "jinja2",
+ "setuptools",
+ ], # external packages as dependencies
+)
diff --git a/log_helper/config.py b/log_helper/config.py
index ed2dd8a..d6aa79d 100644
--- a/log_helper/config.py
+++ b/log_helper/config.py
@@ -1,8 +1,8 @@
# Path to directory with logs (e.x. pod-logs)
-LOG_DIR = '/home/roman/Downloads/Test_logs/pod-logs'
+LOG_DIR = "/home/roman/Downloads/Test_logs/pod-logs"
# Path to machine-readable YAML file, generated by report_parcer tool
-TEMPEST_REPORT_YAML = '/home/roman/Downloads/Test_logs/tempest_new.yaml'
+TEMPEST_REPORT_YAML = "/home/roman/Downloads/Test_logs/tempest_new.yaml"
# Path to directory with results of log_helper execution
-RESULTS_DIR = '/home/roman/Downloads/Test_logs/log_helper_result'
+RESULTS_DIR = "/home/roman/Downloads/Test_logs/log_helper_result"
diff --git a/log_helper/log_helper.py b/log_helper/log_helper.py
index 48190c2..8e47a9a 100755
--- a/log_helper/log_helper.py
+++ b/log_helper/log_helper.py
@@ -1,88 +1,123 @@
#!/usr/bin/env python3
+import os
import subprocess
import sys
-import os
-import yaml
from os import path
-import config
+import config
+import yaml
param_is_yaml = False
if len(sys.argv) == 1:
param_is_yaml = path.isfile(config.TEMPEST_REPORT_YAML)
if param_is_yaml is False:
- print('TEMPEST_REPORT_YAML config parameter is not a file')
- raise Exception('TEMPEST_REPORT_YAML config parameter is not a file')
+ print("TEMPEST_REPORT_YAML config parameter is not a file")
+ raise Exception("TEMPEST_REPORT_YAML config parameter is not a file")
def log_gather(resource_id, sub_resource, log_level=None):
- """ Get all log lines related to resource-id
- :param resource_id: ID resource, e.g. request-id, server-id
- :param sub_resource: name of sub_resource log file, e.g subnet.log for neutron resource
- :param log_level: substring for resource_id log: e.g. get only ERROR log level messages, optional
- """
+ """Get all log lines related to resource-id
+ :param resource_id: ID resource, e.g. request-id, server-id
+ :param sub_resource: name of sub_resource log file,
+ e.g subnet.log for neutron resource
+ :param log_level: substring for resource_id log: e.g. get only
+ ERROR log level messages, optional
+ """
try:
directory = os.walk(config.LOG_DIR)
except IndexError:
- print('Parameter <LOG_DIR> is not provided')
- raise ValueError('Parameter <LOG_DIR> is not provided')
+ print("Parameter <LOG_DIR> is not provided")
+ raise ValueError("Parameter <LOG_DIR> is not provided")
if param_is_yaml:
for dirs in directory:
- run_cmd = f"grep -a {resource_id} {dirs[0]}/* >> {config.RESULTS_DIR}/{sub_resource}"
+ run_cmd = (
+ f"grep -a {resource_id} {dirs[0]}/* >> "
+ f"{config.RESULTS_DIR}/{sub_resource}"
+ )
subprocess.run(run_cmd, shell=True)
else:
for dirs in directory:
if log_level:
- run_cmd = f"grep -lE '{resource_id}.*{log_level}|{log_level}.*{resource_id}' {dirs[0]}/* >> '{config.RESULTS_DIR}/tmp.log'"
+ run_cmd = (
+ f"grep -lE '{resource_id}.*{log_level}|{log_level}"
+ f".*{resource_id}' {dirs[0]}/* >> "
+ f"'{config.RESULTS_DIR}/tmp.log'"
+ )
else:
- run_cmd = f"grep -l {resource_id} {dirs[0]}/* >> '{config.RESULTS_DIR}/tmp.log'"
+ run_cmd = (
+ f"grep -l {resource_id} {dirs[0]}/* >> "
+ f"'{config.RESULTS_DIR}/tmp.log'"
+ )
subprocess.run(run_cmd, shell=True)
- with open(config.RESULTS_DIR + '/tmp.log') as f:
+ with open(f"{config.RESULTS_DIR}/tmp.log") as f:
files = f.readlines()
for file in files:
subd = file.split("/")
- log_dir = subd[-4] + "." + subd[-3] + "." + subd[-2]
- log_name = subd[-1].replace('\n', '')
- os.makedirs(os.path.join(config.RESULTS_DIR, sys.argv[1], log_dir), exist_ok=True)
- path = os.path.join(config.RESULTS_DIR, sys.argv[1], log_dir, log_name)
+ log_dir = f"{subd[-4]}.{subd[-3]}.{subd[-2]}"
+ log_name = subd[-1].replace("\n", "")
+ os.makedirs(
+ os.path.join(config.RESULTS_DIR, sys.argv[1], log_dir),
+ exist_ok=True,
+ )
+ path = os.path.join(
+ config.RESULTS_DIR, sys.argv[1], log_dir, log_name
+ )
if log_level:
- run_cmd = f"grep -aE '{resource_id}.*{log_level}|{log_level}.*{resource_id}' {file} >> {path}"
+ run_cmd = (
+ f"grep -aE '{resource_id}.*{log_level}|{log_level}"
+ f".*{resource_id}' {file} >> {path}"
+ )
else:
run_cmd = f"grep -a {resource_id} {file} >> {path}"
- subprocess.run(run_cmd.replace('\n', ''), shell=True)
+ subprocess.run(run_cmd.replace("\n", ""), shell=True)
- os.remove(config.RESULTS_DIR + '/tmp.log')
+ os.remove(f"{config.RESULTS_DIR}/tmp.log")
if param_is_yaml:
- print('Find all the failed tempest tests from YAML file')
+ print("Find all the failed tempest tests from YAML file")
with open(config.TEMPEST_REPORT_YAML) as f:
test_resources = yaml.safe_load(f)
for test in test_resources.items():
# Find all the failed tempest tests from YAML file and gather logs for
# related resources in corresponded folders
- if test[1]['status'] == 'failure':
- print('Collecting logs for ' + test[0])
- os.makedirs(os.path.join(config.RESULTS_DIR, test[0]), exist_ok=True)
- for resource in test[1]['resources']:
- os.makedirs(os.path.join(config.RESULTS_DIR, test[0], resource), exist_ok=True)
- for sub_resource in test[1]['resources'][resource]:
- log_gather(list(test[1]['resources'][resource][sub_resource])[0],
- os.path.join(test[0], resource, sub_resource + '.' + 'log'))
+ if test[1]["status"] == "failure":
+ print(f"Collecting logs for {test[0]}")
+ os.makedirs(
+ os.path.join(config.RESULTS_DIR, test[0]), exist_ok=True
+ )
+ for resource in test[1]["resources"]:
+ os.makedirs(
+ os.path.join(config.RESULTS_DIR, test[0], resource),
+ exist_ok=True,
+ )
+ for sub_resource in test[1]["resources"][resource]:
+ log_gather(
+ list(test[1]["resources"][resource][sub_resource])[0],
+ os.path.join(
+ test[0], resource, sub_resource + "." + "log"
+ ),
+ )
else:
- print('Find all the related log for one specific test or id with error')
+ print("Find all the related log for one specific test or id with error")
os.makedirs(os.path.join(config.RESULTS_DIR, sys.argv[1]), exist_ok=True)
if len(sys.argv) == 3:
- log_gather(sys.argv[1], os.path.join(sys.argv[1], 'test' + '.' + 'log'), log_level=sys.argv[2])
+ log_gather(
+ sys.argv[1],
+ os.path.join(sys.argv[1], "test" + "." + "log"),
+ log_level=sys.argv[2],
+ )
else:
- log_gather(sys.argv[1], os.path.join(sys.argv[1], 'test' + '.' + 'log'))
+ log_gather(
+ sys.argv[1], os.path.join(sys.argv[1], "test" + "." + "log")
+ )
-print('The logger is finished')
+print("The logger is finished")
diff --git a/log_helper/setup.py b/log_helper/setup.py
index 5e73800..e7788a4 100644
--- a/log_helper/setup.py
+++ b/log_helper/setup.py
@@ -1,13 +1,13 @@
from distutils.core import setup
setup(
- name='log_helper',
- version='0.2',
+ name="log_helper",
+ version="0.2",
py_modules=["log_helper", "config"],
- install_requires=['pyyaml'],
- python_requires='>=3.6',
- author='Roman Bubyr',
- author_email='rbubyr@gmail.com',
- description='Openstack log helper tool',
- scripts=['log_helper.py'],
+ install_requires=["pyyaml"],
+ python_requires=">=3.6",
+ author="Roman Bubyr",
+ author_email="rbubyr@gmail.com",
+ description="Openstack log helper tool",
+ scripts=["log_helper.py"],
)
diff --git a/parcing_testrail_results/config.py b/parcing_testrail_results/config.py
index ede2f55..06ae694 100644
--- a/parcing_testrail_results/config.py
+++ b/parcing_testrail_results/config.py
@@ -1,11 +1,11 @@
import os
-TESTRAIL_USER = os.environ.get('TESTRAIL_USER')
-TESTRAIL_PASSWORD = os.environ.get('TESTRAIL_PASSWORD')
+TESTRAIL_USER = os.environ.get("TESTRAIL_USER")
+TESTRAIL_PASSWORD = os.environ.get("TESTRAIL_PASSWORD")
-TESTRAIL_URL = 'https://mirantis.testrail.com'
-TESTRAIL_TOKEN = '0YGnO1TC5NCCQFwgxmsW'
+TESTRAIL_URL = "https://mirantis.testrail.com"
+TESTRAIL_TOKEN = "0YGnO1TC5NCCQFwgxmsW"
TESTRAIL_COOKIES = "9adbe251-4ef1-474c-8ca6-9aaa1fbc5e76"
-LOGGIGNG_FOLDER = '/tmp/'
-LOGGIGNG_UTILS = 'testrail.log'
+LOGGIGNG_FOLDER = "/tmp/"
+LOGGIGNG_UTILS = "testrail.log"
diff --git a/parcing_testrail_results/html_testrail.py b/parcing_testrail_results/html_testrail.py
index 182299d..39eac00 100644
--- a/parcing_testrail_results/html_testrail.py
+++ b/parcing_testrail_results/html_testrail.py
@@ -1,25 +1,30 @@
-import click
-import config
import logging
import re
-import requests
-from bs4 import BeautifulSoup
from difflib import SequenceMatcher
-from testrail import *
+import click
+import config
+import requests
+from bs4 import BeautifulSoup
+from testrail import APIClient
client = APIClient(config.TESTRAIL_URL)
client.user = config.TESTRAIL_USER
client.password = config.TESTRAIL_PASSWORD
-logging.basicConfig(format='[%(asctime)s][%(name)s][%(levelname)s] %(message)s',
- datefmt='%d-%m-%Y %H:%M:%S',
- handlers=[
- logging.FileHandler('{}{}'.format(config.LOGGIGNG_FOLDER, config.LOGGIGNG_UTILS)),
- logging.StreamHandler()],
- level=logging.INFO)
-logger = logging.getLogger('testrail')
+logging.basicConfig(
+ format="[%(asctime)s][%(name)s][%(levelname)s] %(message)s",
+ datefmt="%d-%m-%Y %H:%M:%S",
+ handlers=[
+ logging.FileHandler(
+ "{}{}".format(config.LOGGIGNG_FOLDER, config.LOGGIGNG_UTILS)
+ ),
+ logging.StreamHandler(),
+ ],
+ level=logging.INFO,
+)
+logger = logging.getLogger("testrail")
class GetTestHistory:
@@ -29,48 +34,65 @@
def get_html(self):
token = config.TESTRAIL_TOKEN
- post_url = "https://mirantis.testrail.com/index.php?/tests/ajax_render_history"
+ post_url = (
+ "https://mirantis.testrail.com/index.php?/tests"
+ "/ajax_render_history"
+ )
headers = {
"Accept": "text/plain, */*; q=0.01",
"Accept-Encoding": "gzip, deflate, br",
- "Accept-Language": "en-US,en;q=0.9,es-AR;q=0.8,es;q=0.7,fr-DZ;q=0.6,fr;q=0.5,de-BE;q=0.4,de;q=0.3,ru-UA;q=0.2,ru;q=0.1,uk;q=0.1",
+ "Accept-Language": "en-US,en;q=0.9,es-AR;q=0.8,es;q=0.7,fr-DZ;"
+ "q=0.6,fr;q=0.5,de-BE;q=0.4,de;q=0.3,ru-UA;"
+ "q=0.2,ru;q=0.1,uk;q=0.1",
"Connection": "keep-alive",
"Content-Type": "application/x-www-form-urlencoded; charset=UTF-8",
"Host": "mirantis.testrail.com",
"Origin": "https://mirantis.testrail.com",
- "Proxy-Authorization": "Basic VVZQTnYxLXAybjJlbXhldzB6Z2RkcndwM25vZ2JiaHJ0Zm9ib3pjJmpvaG5kb2VAdXZwbi5tZTpvN3I3cDA4Mml6cHNoZHp6eDBjeHNsZGVudmUzYmNyZg ==",
- "Referer": "https://mirantis.testrail.com/index.php?/tests/view/{}".format(self.test_id),
- "User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Ubuntu Chromium/73.0.3683.86 Chrome/73.0.3683.86 Safari/537.36",
- "X-Requested-With": "XMLHttpRequest"
+ "Proxy-Authorization": "Basic VVZQTnYxLXAybjJlbXhldzB6Z2RkcndwM25v"
+ "Z2JiaHJ0Zm9ib3pjJmpvaG5kb2VAdXZwbi5tZTpvN3I3cDA4Mml6cHNoZHp6eDBj"
+ "eHNsZGVudmUzYmNyZg ==",
+ "Referer": "https://mirantis.testrail.com/index.php?/"
+ "tests/view/{}".format(self.test_id),
+ "User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 "
+ "(KHTML, like Gecko) Ubuntu Chromium/73.0.3683.86 "
+ "Chrome/73.0.3683.86 Safari/537.36",
+ "X-Requested-With": "XMLHttpRequest",
}
cookies = {"tr_session": config.TESTRAIL_COOKIES}
- r = requests.post(post_url, data='test_id={}&limit=50&_token={}'.format(self.test_id, token),
- headers=headers,
- cookies=cookies)
+ r = requests.post(
+ post_url,
+ data=f"test_id={self.test_id}&limit=50&_token={token}",
+ headers=headers,
+ cookies=cookies,
+ )
html_page = r.text
return html_page
def get_old_test_results(self):
- logger.info('Getting old test results from html page')
+ logger.info("Getting old test results from html page")
html_page = self.get_html()
- soup = BeautifulSoup(html_page, 'html.parser')
+ soup = BeautifulSoup(html_page, "html.parser")
page_div = soup.div
tests_history = []
- for tag in page_div.find_all('td', {'class': 'id'}):
+ for tag in page_div.find_all("td", {"class": "id"}):
tag_parent = tag.parent
- test_status = tag_parent.find_all('span', {'class', 'status'})[0].string
- test_id = tag_parent.find_all('a', {'class', 'link-noline'})[0].string[1:]
+ test_status = tag_parent.find_all("span", {"class", "status"})[
+ 0
+ ].string
+ test_id = tag_parent.find_all("a", {"class", "link-noline"})[
+ 0
+ ].string[1:]
- test_data = {'test_status': test_status, 'test_id': test_id}
+ test_data = {"test_status": test_status, "test_id": test_id}
- if test_status == 'TestFailed':
+ if test_status == "TestFailed":
tests_history.append(test_data)
return tests_history
@@ -81,30 +103,30 @@
self.plan_id = plan_id
def get_plan(self):
- logger.info('Getting plan: {}'.format(self.plan_id))
- return client.send_get('get_plan/{}'.format(self.plan_id))
+ logger.info(f"Getting plan: {self.plan_id}")
+ return client.send_get(f"get_plan/{self.plan_id}")
def get_suites(self):
- logger.info('Getting suites')
+ logger.info("Getting suites")
plan = self.get_plan()
all_suites_ids = []
- for suite in plan['entries']:
- siute_id = suite['runs'][0]['id']
+ for suite in plan["entries"]:
+ siute_id = suite["runs"][0]["id"]
all_suites_ids.append(siute_id)
- logger.info('Suite: {}'.format(siute_id))
+ logger.info(f"Suite: {siute_id}")
return all_suites_ids
def get_test(self, test_id):
- logger.info('Getting test: {}'.format(test_id))
- return client.send_get('get_test/{}'.format(test_id))
+ logger.info(f"Getting test: {test_id}")
+ return client.send_get(f"get_test/{test_id}")
def get_tests_results_by_suite(self, suite_id):
- logger.info('Getting tests results by suite (suite_id): {}'.format(suite_id))
- return client.send_get('get_tests/{}'.format(suite_id))
+ logger.info(f"Getting tests results by suite (suite_id): {suite_id}")
+ return client.send_get(f"get_tests/{suite_id}")
def get_all_tests_results(self):
- logger.info('Getting all tests results')
+ logger.info("Getting all tests results")
all_suites = self.get_suites()
all_tests = []
@@ -115,7 +137,7 @@
return all_tests
def get_all_failed_tests(self, test_status=5):
- logger.info('Getting failed tests')
+ logger.info("Getting failed tests")
# test['status_id'] == 5 failed
# test['status_id'] == 9 test failed
# test['status_id'] == 10 infra failed
@@ -124,15 +146,19 @@
failed_tests = []
for tests_in_suite in all_tests_in_all_suites:
for test in tests_in_suite:
- if test['status_id'] == test_status:
- failed_tests.append(test)
+ if test["status_id"] == test_status:
+ failed_tests.append(test)
return failed_tests
def get_test_result(self, test_id):
- logger.info('Getting test result: {}'.format(test_id))
+ logger.info(f"Getting test result: {test_id}")
test = self.get_test(test_id)
- return client.send_get('get_results_for_case/{}/{}'.format(test['run_id'], test['case_id']))
+ return client.send_get(
+ "get_results_for_case/{}/{}".format(
+ test["run_id"], test["case_id"]
+ )
+ )
def update_test_results(self, test_id, defects):
"""
@@ -144,29 +170,35 @@
:param defects: defect to update
:return:
"""
- logger.info('Updating test results test_id: {} with defect: {}'.format(test_id, defects))
+ logger.info(
+ f"Updating test results test_id: {test_id} with defect: {defects}"
+ )
test = self.get_test(test_id)
- return client.send_post('add_result_for_case/{}/{}'.format(test['run_id'], test['case_id']),
- {'status_id': 9, 'comment': 'Updated by R2D2', 'defects': defects})
+ return client.send_post(
+ "add_result_for_case/{}/{}".format(
+ test["run_id"], test["case_id"]
+ ),
+ {"status_id": 9, "comment": "Updated by R2D2", "defects": defects},
+ )
def get_current_test_comment(current_test_results):
- logger.info('Getting current test comment')
+ logger.info("Getting current test comment")
for test_results in current_test_results:
- if 'comment' in test_results:
- if test_results['comment']:
- if len(test_results['comment']) > 50:
- return test_results['comment']
+ if "comment" in test_results:
+ if test_results["comment"]:
+ if len(test_results["comment"]) > 50:
+ return test_results["comment"]
def get_old_tests_comments_ids(old_test_results, failed_tests):
- logger.info('Getting old tests comments ids')
+ logger.info("Getting old tests comments ids")
old_tests_comments_ids = []
for test in old_test_results:
- test_result = failed_tests.get_test_result(test['test_id'])
+ test_result = failed_tests.get_test_result(test["test_id"])
old_tests_comments_ids.append(test_result)
return old_tests_comments_ids
@@ -177,78 +209,114 @@
:param test_comment: string
:return: string
"""
- logger.info('Updating current test comment')
+ logger.info("Updating current test comment")
format_date = r"\d{4}-\d\d-\d\dT\d\d:\d\d:\d\dZ"
- format_uuid_a = r"[0-9a-fA-F]{8}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{12}"
+ format_uuid_a = (
+ r"[0-9a-fA-F]{8}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{4}"
+ r"\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{12}"
+ )
format_uuid_b = r"[0-9a-fA-F]{32}"
format_space = r" "
format_new_line = r"\n"
- for item in format_date, format_uuid_a, format_uuid_b, format_space, format_new_line:
- test_comment = re.sub(item, '', test_comment)
+ for item in (
+ format_date,
+ format_uuid_a,
+ format_uuid_b,
+ format_space,
+ format_new_line,
+ ):
+ test_comment = re.sub(item, "", test_comment)
return test_comment
def update_old_comments(defects_and_comments_from_old_tests):
- logger.info('Updating old test comment')
+ logger.info("Updating old test comment")
for item in defects_and_comments_from_old_tests:
- item['old_test_comment'] = update_test_comment(item['old_test_comment'])
+ item["old_test_comment"] = update_test_comment(
+ item["old_test_comment"]
+ )
return defects_and_comments_from_old_tests
def get_defects_and_comments_from_old_tests(old_tests_comments_ids):
- logger.info('Getting defects and comments from old tests')
+ logger.info("Getting defects and comments from old tests")
data_from_old_tests = []
- old_test_comment = ''
- old_test_defect = ''
+ old_test_comment = ""
+ old_test_defect = ""
for old_test_list in old_tests_comments_ids:
for old_test in old_test_list:
- if old_test['comment']:
- if len(old_test['comment']) > 50:
- old_test_comment = old_test['comment']
+ if old_test["comment"]:
+ if len(old_test["comment"]) > 50:
+ old_test_comment = old_test["comment"]
- if old_test['defects']:
- old_test_defect = old_test['defects']
+ if old_test["defects"]:
+ old_test_defect = old_test["defects"]
if old_test_comment and old_test_defect:
- data_from_old_tests.append({'old_test_comment': old_test_comment, 'old_test_defect': old_test_defect})
+ data_from_old_tests.append(
+ {
+ "old_test_comment": old_test_comment,
+ "old_test_defect": old_test_defect,
+ }
+ )
return data_from_old_tests
-def compare_comments(current_test_comment, defects_and_comments_from_old_tests, desired_ratio=0.7, test_id=''):
- logger.info('Comparing comments')
+def compare_comments(
+ current_test_comment,
+ defects_and_comments_from_old_tests,
+ desired_ratio=0.7,
+ test_id="",
+):
+ logger.info("Comparing comments")
if not desired_ratio:
desired_ratio = 0.75
- defect_for_update = ''
+ defect_for_update = ""
for item in defects_and_comments_from_old_tests:
- m = SequenceMatcher(None, current_test_comment, item['old_test_comment'])
+ m = SequenceMatcher(
+ None, current_test_comment, item["old_test_comment"]
+ )
my_ratio = m.ratio()
- logger.info('Low ratio: {}, Desired ratio {} Test https://mirantis.testrail.com/index.php?/tests/view/{} '
- 'will NOT be updated with issue {}'.format(my_ratio,
- desired_ratio,
- test_id,
- item['old_test_defect']))
+ logger.info(
+ "Low ratio: {}, Desired ratio {} Test "
+ "https://mirantis.testrail.com/index.php?/tests/view/{} "
+ "will NOT be updated with issue {}".format(
+ my_ratio, desired_ratio, test_id, item["old_test_defect"]
+ )
+ )
if my_ratio > desired_ratio:
- logger.info('!!!!! Desired ratio {}, Test Ratio: {} Jira issue: {}'.format(desired_ratio,
- my_ratio,
- item['old_test_defect']))
+ logger.info(
+ "!!!!! Desired ratio {}, Test Ratio: {} Jira issue: {}".format(
+ desired_ratio, my_ratio, item["old_test_defect"]
+ )
+ )
- defect_for_update = item['old_test_defect']
+ defect_for_update = item["old_test_defect"]
return defect_for_update
@click.command()
-@click.option('--run_id', default=1, type=click.STRING, help='Testrail run_id. For example, '
- 'https://mirantis.testrail.com/index.php?/runs/view/63288 '
- 'So run_id will be 63288')
-@click.option('--ratio', type=click.FLOAT, help='The ratio to comapare current console output and old one.')
+@click.option(
+ "--run_id",
+ default=1,
+ type=click.STRING,
+ help="Testrail run_id. For example, "
+ "https://mirantis.testrail.com/index.php?/runs/view/63288 "
+ "So run_id will be 63288",
+)
+@click.option(
+ "--ratio",
+ type=click.FLOAT,
+ help="The ratio to compare current console output and old one.",
+)
def get_failed_tests_history(run_id, ratio):
failed_tests = GetFailedTests(run_id)
@@ -256,30 +324,43 @@
for test in all_failed_tests:
- test_history = GetTestHistory(test['id'])
+ test_history = GetTestHistory(test["id"])
old_test_results = test_history.get_old_test_results()
- curr_tst_res = failed_tests.get_test_result(test['id'])
+ curr_tst_res = failed_tests.get_test_result(test["id"])
current_test_comment = get_current_test_comment(curr_tst_res)
current_test_comment = update_test_comment(current_test_comment)
- old_tests_comments_ids = get_old_tests_comments_ids(old_test_results, failed_tests)
+ old_tests_comments_ids = get_old_tests_comments_ids(
+ old_test_results, failed_tests
+ )
- defects_and_comments_from_old_tests = get_defects_and_comments_from_old_tests(old_tests_comments_ids)
- defects_and_comments_from_old_tests = update_old_comments(defects_and_comments_from_old_tests)
+ defects_and_comments_from_old_tests = (
+ get_defects_and_comments_from_old_tests(old_tests_comments_ids)
+ )
+ defects_and_comments_from_old_tests = update_old_comments(
+ defects_and_comments_from_old_tests
+ )
if defects_and_comments_from_old_tests:
- defect_for_update = compare_comments(current_test_comment,
- defects_and_comments_from_old_tests,
- desired_ratio=ratio,
- test_id=test['id'])
+ defect_for_update = compare_comments(
+ current_test_comment,
+ defects_and_comments_from_old_tests,
+ desired_ratio=ratio,
+ test_id=test["id"],
+ )
if defect_for_update:
- logger.info('!!!!! Updating test-case: https://mirantis.testrail.com/index.php?/tests/view/{} '
- 'with Jira issue {}'.format(test['id'], defect_for_update))
- failed_tests.update_test_results(test_id=test['id'], defects=defect_for_update)
+ logger.info(
+ "!!!!! Updating test-case: https://mirantis.testrail.com"
+ "/index.php?/tests/view/{} "
+ "with Jira issue {}".format(test["id"], defect_for_update)
+ )
+ failed_tests.update_test_results(
+ test_id=test["id"], defects=defect_for_update
+ )
-if __name__ == '__main__':
+if __name__ == "__main__":
get_failed_tests_history()
diff --git a/parcing_testrail_results/testrail.py b/parcing_testrail_results/testrail.py
index 7ed5900..42dfbce 100644
--- a/parcing_testrail_results/testrail.py
+++ b/parcing_testrail_results/testrail.py
@@ -10,88 +10,93 @@
# Copyright Gurock Software GmbH. See license.md for details.
#
-import urllib.request, urllib.error
-import json, base64
+import base64
+import json
import time
+import urllib.error
+import urllib.request
+
class APIClient:
- def __init__(self, base_url):
- self.user = ''
- self.password = ''
- if not base_url.endswith('/'):
- base_url += '/'
- self.__url = base_url + 'index.php?/api/v2/'
+ def __init__(self, base_url):
+ self.user = ""
+ self.password = ""
+ if not base_url.endswith("/"):
+ base_url += "/"
+ self.__url = base_url + "index.php?/api/v2/"
- #
- # Send Get
- #
- # Issues a GET request (read) against the API and returns the result
- # (as Python dict).
- #
- # Arguments:
- #
- # uri The API method to call including parameters
- # (e.g. get_case/1)
- #
- def send_get(self, uri):
- try:
- return self.__send_request('GET', uri, None)
- except APIError:
- print("Got an API Exception. Waiting 30 sec.")
- time.sleep(30)
- return self.__send_request('GET', uri, None)
+ #
+ # Send Get
+ #
+ # Issues a GET request (read) against the API and returns the result
+ # (as Python dict).
+ #
+ # Arguments:
+ #
+ # uri The API method to call including parameters
+ # (e.g. get_case/1)
+ #
+ def send_get(self, uri):
+ try:
+ return self.__send_request("GET", uri, None)
+ except APIError:
+ print("Got an API Exception. Waiting 30 sec.")
+ time.sleep(30)
+ return self.__send_request("GET", uri, None)
- #
- # Send POST
- #
- # Issues a POST request (write) against the API and returns the result
- # (as Python dict).
- #
- # Arguments:
- #
- # uri The API method to call including parameters
- # (e.g. add_case/1)
- # data The data to submit as part of the request (as
- # Python dict, strings must be UTF-8 encoded)
- #
- def send_post(self, uri, data):
- return self.__send_request('POST', uri, data)
+ #
+ # Send POST
+ #
+ # Issues a POST request (write) against the API and returns the result
+ # (as Python dict).
+ #
+ # Arguments:
+ #
+ # uri The API method to call including parameters
+ # (e.g. add_case/1)
+ # data The data to submit as part of the request (as
+ # Python dict, strings must be UTF-8 encoded)
+ #
+ def send_post(self, uri, data):
+ return self.__send_request("POST", uri, data)
- def __send_request(self, method, uri, data):
- url = self.__url + uri
- request = urllib.request.Request(url)
- if (method == 'POST'):
- request.data = bytes(json.dumps(data), 'utf-8')
- auth = str(
- base64.b64encode(
- bytes('%s:%s' % (self.user, self.password), 'utf-8')
- ),
- 'ascii'
- ).strip()
- request.add_header('Authorization', 'Basic %s' % auth)
- request.add_header('Content-Type', 'application/json')
+ def __send_request(self, method, uri, data):
+ url = self.__url + uri
+ request = urllib.request.Request(url)
+ if method == "POST":
+ request.data = bytes(json.dumps(data), "utf-8")
+ auth = str(
+ base64.b64encode(
+ bytes("%s:%s" % (self.user, self.password), "utf-8")
+ ),
+ "ascii",
+ ).strip()
+ request.add_header("Authorization", "Basic %s" % auth)
+ request.add_header("Content-Type", "application/json")
- e = None
- try:
- response = urllib.request.urlopen(request).read()
- except urllib.error.HTTPError as ex:
- response = ex.read()
- e = ex
+ e = None
+ try:
+ response = urllib.request.urlopen(request).read()
+ except urllib.error.HTTPError as ex:
+ response = ex.read()
+ e = ex
- if response:
- result = json.loads(response.decode())
- else:
- result = {}
+ if response:
+ result = json.loads(response.decode())
+ else:
+ result = {}
- if e != None:
- if result and 'error' in result:
- error = '"' + result['error'] + '"'
- else:
- error = 'No additional error message received'
- raise APIError('TestRail API returned HTTP %s (%s)' %
- (e.code, error))
+ if e is not None:
+ if result and "error" in result:
+ error = '"' + result["error"] + '"'
+ else:
+ error = "No additional error message received"
+ raise APIError(
+ "TestRail API returned HTTP %s (%s)" % (e.code, error)
+ )
- return result
+ return result
+
class APIError(Exception):
- pass
\ No newline at end of file
+ pass
diff --git a/save_jenkins_console/base.py b/save_jenkins_console/base.py
index c42c7f9..de6b5bb 100755
--- a/save_jenkins_console/base.py
+++ b/save_jenkins_console/base.py
@@ -1,12 +1,12 @@
-import time
-
from config import JOBS_FOR_GETTING_LOGS_FROM_OUTPUT
from jenkins_api import gathering_data_from_jenkins_all_jenkins_job
def main():
- gathering_data_from_jenkins_all_jenkins_job(all_jenkins_job=JOBS_FOR_GETTING_LOGS_FROM_OUTPUT)
+ gathering_data_from_jenkins_all_jenkins_job(
+ all_jenkins_job=JOBS_FOR_GETTING_LOGS_FROM_OUTPUT
+ )
-if __name__ == '__main__':
+if __name__ == "__main__":
main()
diff --git a/save_jenkins_console/save_jenkins_console/config.py b/save_jenkins_console/save_jenkins_console/config.py
index b0f6cb9..93bee76 100755
--- a/save_jenkins_console/save_jenkins_console/config.py
+++ b/save_jenkins_console/save_jenkins_console/config.py
@@ -1,44 +1,44 @@
# Jenkins API credantials
-USERNAME = 'mcp-oscore-jenkins'
-PASSWORD = 'ahvoNg4mae'
-JENKINS_URL = 'https://ci.mcp.mirantis.net'
+USERNAME = "mcp-oscore-jenkins"
+PASSWORD = "ahvoNg4mae"
+JENKINS_URL = "https://ci.mcp.mirantis.net"
# Jenkins jobs names for https://ci.mcp.mirantis.net
# For example https://ci.mcp.mirantis.net/oscore-tempest-runner
JOBS_FOR_GETTING_LOGS_FROM_OUTPUT = [
- 'oscore-tempest-runner',
- 'oscore-oscc-ci',
- 'oscore-test-cookiecutter-model',
- 'rollout-mcp-env',
- 'oscore-promote-openstack-pike-xenial',
- 'oscore-promote-openstack-queens-xenial',
- 'oscore-tempest-results-checker',
- 'run-deploy-job-on-cfg01',
- 'run-job-on-cfg01-jenkins',
- 'deploy-heat-virtual_mcp11_aio',
- 'oscore-artifatcts-collector',
- 'oscore-formula-systest-virtual_mcp11_aio-queens',
- 'oscore-formula-systest-virtual_mcp11_aio-newton',
- 'oscore-formula-systest-virtual_mcp11_aio-pike',
- 'oscore-formula-systest-virtual_mcp11_aio-mitaka',
- 'oscore-test-openstack-upgrade-pike-queens-core-ssl',
- 'oscore-test-openstack-upgrade-ocata-pike-core',
- 'oscore-test-openstack-upgrade-pike-queens-core-extra-ssl',
- 'oscore-test-openstack-upgrade-pike-queens-core-barbican',
- 'create-heat-stack-for-mcp-env',
- ]
+ "oscore-tempest-runner",
+ "oscore-oscc-ci",
+ "oscore-test-cookiecutter-model",
+ "rollout-mcp-env",
+ "oscore-promote-openstack-pike-xenial",
+ "oscore-promote-openstack-queens-xenial",
+ "oscore-tempest-results-checker",
+ "run-deploy-job-on-cfg01",
+ "run-job-on-cfg01-jenkins",
+ "deploy-heat-virtual_mcp11_aio",
+ "oscore-artifatcts-collector",
+ "oscore-formula-systest-virtual_mcp11_aio-queens",
+ "oscore-formula-systest-virtual_mcp11_aio-newton",
+ "oscore-formula-systest-virtual_mcp11_aio-pike",
+ "oscore-formula-systest-virtual_mcp11_aio-mitaka",
+ "oscore-test-openstack-upgrade-pike-queens-core-ssl",
+ "oscore-test-openstack-upgrade-ocata-pike-core",
+ "oscore-test-openstack-upgrade-pike-queens-core-extra-ssl",
+ "oscore-test-openstack-upgrade-pike-queens-core-barbican",
+ "create-heat-stack-for-mcp-env",
+]
# For database_to_files.py script
# Gets data from databases ans stores them to each directory test file
# where to unpack logs from db
-LOGS_DIRECTORY = '/var/www/oscore_jobs.com/html/oscore_logs/'
+LOGS_DIRECTORY = "/var/www/oscore_jobs.com/html/oscore_logs/"
# files older than in days
FILES_OLDER_THAN = 33
# For logging all the data what is happening in this script
-LOGGIGNG_FOLDER = '/tmp/oscore_qa_logs/'
-LOGGIGNG_JENKINS_API = 'jenkins_api.log'
-LOGGIGNG_UTILS = 'utils.log'
-LOG_FILENAME = 'manage_files.log'
+LOGGIGNG_FOLDER = "/tmp/oscore_qa_logs/"
+LOGGIGNG_JENKINS_API = "jenkins_api.log"
+LOGGIGNG_UTILS = "utils.log"
+LOG_FILENAME = "manage_files.log"
diff --git a/save_jenkins_console/save_jenkins_console/jenkins_api.py b/save_jenkins_console/save_jenkins_console/jenkins_api.py
index dced5e8..7b7a35a 100755
--- a/save_jenkins_console/save_jenkins_console/jenkins_api.py
+++ b/save_jenkins_console/save_jenkins_console/jenkins_api.py
@@ -1,42 +1,51 @@
-import config
+import logging
import os
-import requests
import time
-import utils
+import config
+import requests
+import utils
+from config import (
+ JOBS_FOR_GETTING_LOGS_FROM_OUTPUT,
+ LOGGIGNG_FOLDER,
+ LOGGIGNG_JENKINS_API,
+ LOGS_DIRECTORY,
+)
from jenkinsapi import custom_exceptions
from jenkinsapi.jenkins import Jenkins
from jenkinsapi.utils.crumb_requester import CrumbRequester
-
-import logging
-
-from config import LOGGIGNG_FOLDER
-from config import LOGGIGNG_JENKINS_API
-from config import LOGS_DIRECTORY
-from config import JOBS_FOR_GETTING_LOGS_FROM_OUTPUT
from manage_files import delete_old_files
-logging.basicConfig(format='[%(asctime)s][%(name)s][%(levelname)s] %(message)s',
- datefmt='%d-%m-%Y %H:%M:%S',
- handlers=[
- logging.FileHandler('{}{}'.format(LOGGIGNG_FOLDER, LOGGIGNG_JENKINS_API)),
- logging.StreamHandler()],
- level=logging.INFO)
-logger = logging.getLogger('jenkins_api')
+logging.basicConfig(
+ format="[%(asctime)s][%(name)s][%(levelname)s] %(message)s",
+ datefmt="%d-%m-%Y %H:%M:%S",
+ handlers=[
+ logging.FileHandler(
+ "{}{}".format(LOGGIGNG_FOLDER, LOGGIGNG_JENKINS_API)
+ ),
+ logging.StreamHandler(),
+ ],
+ level=logging.INFO,
+)
+logger = logging.getLogger("jenkins_api")
class GetJobsResults:
"""
Working with Jenkins API and tiny DB
"""
+
def __init__(self, pipeline_job_name):
- self.server = Jenkins(config.JENKINS_URL,
- username=config.USERNAME,
- password=config.PASSWORD,
- requester=CrumbRequester(
- username=config.USERNAME,
- password=config.PASSWORD,
- baseurl=config.JENKINS_URL))
+ self.server = Jenkins(
+ config.JENKINS_URL,
+ username=config.USERNAME,
+ password=config.PASSWORD,
+ requester=CrumbRequester(
+ username=config.USERNAME,
+ password=config.PASSWORD,
+ baseurl=config.JENKINS_URL,
+ ),
+ )
self.job = self.server.get_job(pipeline_job_name)
self.pipeline_job_name = pipeline_job_name
@@ -51,7 +60,9 @@
:return: list. Builds ID's
"""
builds_ids = self.job.get_build_ids()
- logger.info('Getting all builds ids: {}'.format(self.pipeline_job_name))
+ logger.info(
+ "Getting all builds ids: {}".format(self.pipeline_job_name)
+ )
return list(builds_ids)
def get_job_build(self, build_id):
@@ -59,7 +70,7 @@
job_build = self.job.get_build(build_id)
return job_build
except requests.exceptions.HTTPError:
- logger.warning('404 Client Error: Not Found for url')
+ logger.warning("404 Client Error: Not Found for url")
def manage_build_artifacts(self, job_build, build, build_date):
build_artifacts = job_build.get_artifacts()
@@ -68,9 +79,11 @@
if build_artfact:
patch_to_artifact_file = utils.generate_patch_to_artifact_file(
logs_directory=LOGS_DIRECTORY,
- job_name=self.pipeline_job_name)
+ job_name=self.pipeline_job_name,
+ )
saved_artifact_file_patch = build_artfact.save_to_dir(
- patch_to_artifact_file)
+ patch_to_artifact_file
+ )
# rename saved file
new_artifact_file_patch = utils.generate_artifact_file_patch(
@@ -78,14 +91,22 @@
patch_to_artifact_file=patch_to_artifact_file,
pipeline_job_name=self.pipeline_job_name,
build_id=build,
- build_date=build_date)
- if not utils.check_if_file_exists(patch_to_file=new_artifact_file_patch):
+ build_date=build_date,
+ )
+ if not utils.check_if_file_exists(
+ patch_to_file=new_artifact_file_patch
+ ):
new_artifact_filename = utils.rename_artifact_file(
old_artifact_file_patch=saved_artifact_file_patch,
- new_artifact_file_patch=new_artifact_file_patch)
- logger.info(f'new_artifact_filename: {new_artifact_filename}')
+ new_artifact_file_patch=new_artifact_file_patch,
+ )
+ logger.info(
+ f"new_artifact_filename: {new_artifact_filename}"
+ )
- if utils.check_if_file_exists(patch_to_file=saved_artifact_file_patch):
+ if utils.check_if_file_exists(
+ patch_to_file=saved_artifact_file_patch
+ ):
os.remove(saved_artifact_file_patch)
def get_build_artifacts(self, jobs_ids_list):
@@ -96,11 +117,16 @@
:param jobs_ids_list: List of the IDs for the particular job
:return: None if Ok
"""
- logger.info('Pipeline job name, jobs IDs list: {} {}'.format(self.pipeline_job_name, jobs_ids_list))
+ logger.info(
+ "Pipeline job name, jobs IDs list: {} {}".format(
+ self.pipeline_job_name, jobs_ids_list
+ )
+ )
build_counter = 1
for build in jobs_ids_list:
patch_to_file = utils.get_patch_to_file(
- job_name=self.pipeline_job_name, build_id=build)
+ job_name=self.pipeline_job_name, build_id=build
+ )
if not utils.check_if_file_exists(patch_to_file=patch_to_file):
# If a build ID is not in the DB then add it
try:
@@ -112,22 +138,31 @@
job_console = job_build.get_console()
if job_build and job_build_status:
- # Check if Build status is not None than job is not in progress, finished
- logger.info("Saving Buid to file: {}: {} build: {} from {}".format(
- self.pipeline_job_name,
- build,
- build_counter,
- len(jobs_ids_list)))
+ # Check if Build status is not None than job is
+ # not in progress, finished
+ logger.info(
+ "Saving Buid to file: {}: {} "
+ "build: {} from {}".format(
+ self.pipeline_job_name,
+ build,
+ build_counter,
+ len(jobs_ids_list),
+ )
+ )
build_counter += 1
# When job is finished
job_timestamp = job_build.get_timestamp().timestamp()
- build_date = utils.get_date_from_timestamp(job_timestamp)
+ build_date = utils.get_date_from_timestamp(
+ job_timestamp
+ )
# save build artifacts
- self.manage_build_artifacts(job_build=job_build,
- build=build,
- build_date=build_date)
+ self.manage_build_artifacts(
+ job_build=job_build,
+ build=build,
+ build_date=build_date,
+ )
# Save data th the file
utils.save_job_console_to_file(
@@ -136,18 +171,27 @@
build_id=build,
build_date=build_date,
build_status=job_build_status,
- data_to_write=job_console)
+ data_to_write=job_console,
+ )
except custom_exceptions.NotFound:
- logger.warning("Couldn't find a build: {}: {}".format(self.pipeline_job_name, build))
+ logger.warning(
+ "Couldn't find a build: {}: {}".format(
+ self.pipeline_job_name, build
+ )
+ )
continue
continue
- logger.warning("Jenkins log output already saved: {}: {} build: {} from {}".format(
- self.pipeline_job_name,
- build,
- build_counter,
- len(jobs_ids_list)))
- build_counter +=1
+ logger.warning(
+ "Jenkins log output already saved: {}: "
+ "{} build: {} from {}".format(
+ self.pipeline_job_name,
+ build,
+ build_counter,
+ len(jobs_ids_list),
+ )
+ )
+ build_counter += 1
continue
return
@@ -159,14 +203,17 @@
:param all_jenkins_job: list
:return: None if Ok
"""
- logger.info('Gathering data from Jenkins')
+ logger.info("Gathering data from Jenkins")
for pj in all_jenkins_job:
try:
jr = GetJobsResults(pj)
all_jobs_ids = jr.get_all_jobs_ids()
jr.get_build_artifacts(all_jobs_ids)
except requests.exceptions.ConnectionError:
- logger.warning("Got an exception. ConnectionError. Too many API requests waiting for 700 sec")
+ logger.warning(
+ "Got an exception. ConnectionError. "
+ "Too many API requests waiting for 700 sec"
+ )
time.sleep(700)
continue
@@ -177,26 +224,37 @@
:param all_jenkins_job: list
:return: None if Ok
"""
- logger.info('Gathering data from Jenkins parallel')
+ logger.info("Gathering data from Jenkins parallel")
try:
jr = GetJobsResults(one_jenkins_job)
all_jobs_ids = jr.get_all_jobs_ids()
jr.get_build_artifacts(all_jobs_ids)
except requests.exceptions.ConnectionError:
- logger.warning("Got an exception. ConnectionError. Too many API requests waiting for 700 sec")
+ logger.warning(
+ "Got an exception. ConnectionError. "
+ "Too many API requests waiting for 700 sec"
+ )
time.sleep(700)
def get_one_jenkins_job_one_id(jenkins_job_name, job_id):
- logger.info('Getting one Jenkins job: {}: {}'.format(jenkins_job_name, job_id))
+ logger.info(
+ "Getting one Jenkins job: {}: {}".format(jenkins_job_name, job_id)
+ )
try:
jr = GetJobsResults(jenkins_job_name)
jr.get_build_artifacts([int(job_id)])
except requests.exceptions.ConnectionError:
- logger.warning("Got an exception. ConnectionError. Too many API requests waiting for 700 sec")
+ logger.warning(
+ "Got an exception. ConnectionError. "
+ "Too many API requests waiting for 700 sec"
+ )
time.sleep(700)
-if __name__ == '__main__':
- gathering_data_from_jenkins_all_jenkins_job(JOBS_FOR_GETTING_LOGS_FROM_OUTPUT)
+
+if __name__ == "__main__":
+ gathering_data_from_jenkins_all_jenkins_job(
+ JOBS_FOR_GETTING_LOGS_FROM_OUTPUT
+ )
delete_old_files(config.FILES_OLDER_THAN, config.LOGS_DIRECTORY)
diff --git a/save_jenkins_console/save_jenkins_console/manage_files.py b/save_jenkins_console/save_jenkins_console/manage_files.py
index 0836624..070f490 100755
--- a/save_jenkins_console/save_jenkins_console/manage_files.py
+++ b/save_jenkins_console/save_jenkins_console/manage_files.py
@@ -17,18 +17,24 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-import config
import logging
import os
import time
+import config
+
logging.basicConfig(
- format='[%(asctime)s][%(name)s][%(levelname)s] %(message)s',
- datefmt='%d-%m-%Y %H:%M:%S',
- handlers=[logging.FileHandler('{}{}'.format(
- config.LOGGIGNG_FOLDER, config.LOG_FILENAME)), logging.StreamHandler()],
- level=logging.INFO)
-logger = logging.getLogger('delete_old_files')
+ format="[%(asctime)s][%(name)s][%(levelname)s] %(message)s",
+ datefmt="%d-%m-%Y %H:%M:%S",
+ handlers=[
+ logging.FileHandler(
+ "{}{}".format(config.LOGGIGNG_FOLDER, config.LOG_FILENAME)
+ ),
+ logging.StreamHandler(),
+ ],
+ level=logging.INFO,
+)
+logger = logging.getLogger("delete_old_files")
def delete_old_files(days, path):
@@ -36,7 +42,7 @@
time_secs = time.time() - (days * 86400)
for root, dirs, files in os.walk(path, topdown=False):
for file in files:
- full_path = os.path.join(root,file)
+ full_path = os.path.join(root, file)
stat = os.stat(full_path)
if stat.st_mtime <= time_secs:
logger.info("removing: {}".format(full_path))
diff --git a/save_jenkins_console/save_jenkins_console/utils.py b/save_jenkins_console/save_jenkins_console/utils.py
index 4a97455..4de84f3 100755
--- a/save_jenkins_console/save_jenkins_console/utils.py
+++ b/save_jenkins_console/save_jenkins_console/utils.py
@@ -1,27 +1,29 @@
import glob
-import os
import logging
+import os
import time
-
from datetime import datetime
-from config import LOGGIGNG_FOLDER
-from config import LOGGIGNG_UTILS
-from config import LOGS_DIRECTORY
+from config import LOGGIGNG_FOLDER, LOGGIGNG_UTILS, LOGS_DIRECTORY
-logging.basicConfig(format='[%(asctime)s][%(name)s][%(levelname)s] %(message)s',
- datefmt='%d-%m-%Y %H:%M:%S',
- handlers=[
- logging.FileHandler('{}{}'.format(LOGGIGNG_FOLDER, LOGGIGNG_UTILS)),
- logging.StreamHandler()],
- level=logging.INFO)
-logger = logging.getLogger('jenkins_api')
+logging.basicConfig(
+ format="[%(asctime)s][%(name)s][%(levelname)s] %(message)s",
+ datefmt="%d-%m-%Y %H:%M:%S",
+ handlers=[
+ logging.FileHandler("{}{}".format(LOGGIGNG_FOLDER, LOGGIGNG_UTILS)),
+ logging.StreamHandler(),
+ ],
+ level=logging.INFO,
+)
+logger = logging.getLogger("jenkins_api")
def get_date_from_timestamp(timestamp):
if timestamp:
- return datetime.utcfromtimestamp(timestamp).strftime('%d-%m-%Y_%H_%M_%S')
- return ''
+ return datetime.utcfromtimestamp(timestamp).strftime(
+ "%d-%m-%Y_%H_%M_%S"
+ )
+ return ""
def get_last_build_id_from_db(current_database):
@@ -33,23 +35,23 @@
:return: int. Last build
"""
- logger.info('Getting last build id from DB')
+ logger.info("Getting last build id from DB")
build_ids = []
for field in current_database:
- build_ids.append(field['build']['id'])
+ build_ids.append(field["build"]["id"])
# print(build_ids)
- # logger.info('Last build id is: {} {}'.format(current_database, max(build_ids)))
+ # logger.info('Last build id is: {} {}'
+ # .format(current_database, max(build_ids)))
return max(build_ids)
def is_directory_exists(patch_to_directory):
- if patch_to_directory and \
- os.path.exists(patch_to_directory):
+ if patch_to_directory and os.path.exists(patch_to_directory):
return True
return False
-def create_directory(patch_to_directory, directory_to_create=''):
+def create_directory(patch_to_directory, directory_to_create=""):
"""
:param full_patch_to_directory: string
@@ -60,7 +62,7 @@
full_patch_to_directory = patch_to_directory + directory_to_create
if not is_directory_exists(full_patch_to_directory):
- logger.info('Creating directory: {}'.format(full_patch_to_directory))
+ logger.info("Creating directory: {}".format(full_patch_to_directory))
os.makedirs(full_patch_to_directory)
return full_patch_to_directory
@@ -74,12 +76,20 @@
:return: list of matches files
"""
- logger.info('Find all files in {} older than {} days'.format(patch_to_files, older_than))
+ logger.info(
+ "Find all files in {} older than {} days".format(
+ patch_to_files, older_than
+ )
+ )
now = time.time()
cutoff = now - (int(older_than) * 86400)
- files = [os.path.join(dp, f) for dp, dn, filenames in os.walk(patch_to_files) for f in filenames]
+ files = [
+ os.path.join(dp, f)
+ for dp, dn, filenames in os.walk(patch_to_files)
+ for f in filenames
+ ]
found_files = []
for xfile in files:
@@ -88,7 +98,7 @@
c = t.st_ctime
if c < cutoff:
- logger.info('Deleting file: {}'.format(xfile))
+ logger.info("Deleting file: {}".format(xfile))
os.remove(xfile)
found_files.append(xfile)
return files
@@ -103,9 +113,9 @@
:param data_to_write:
:return:
"""
- logger.info('Saving to a log file: {}'.format(filename))
- data_to_write = data_to_write.encode('ascii', 'ignore').decode('ascii')
- with open(filename, 'w') as f:
+ logger.info("Saving to a log file: {}".format(filename))
+ data_to_write = data_to_write.encode("ascii", "ignore").decode("ascii")
+ with open(filename, "w") as f:
f.write(data_to_write)
return filename
@@ -113,26 +123,23 @@
def get_patch_to_file(job_name, build_id):
if job_name:
- patch_to_file = LOGS_DIRECTORY + job_name + '/' + str(build_id) + '*'
+ patch_to_file = LOGS_DIRECTORY + job_name + "/" + str(build_id) + "*"
# logger.info(f'Getting patch to the file {patch_to_file}')
return patch_to_file
- return ''
+ return ""
-def generate_filename(logs_directory,
- build_id,
- build_date,
- job_name,
- build_status):
+def generate_filename(
+ logs_directory, build_id, build_date, job_name, build_status
+):
logger.info(
- 'Saving results to file: {}. Build result: {}'.format(
- job_name, build_status))
- filename = '{0}{1}/{2}_{3}_{4}_{5}.txt'.format(logs_directory,
- job_name,
- build_id,
- build_date,
- job_name,
- build_status)
+ "Saving results to file: {}. Build result: {}".format(
+ job_name, build_status
+ )
+ )
+ filename = "{0}{1}/{2}_{3}_{4}_{5}.txt".format(
+ logs_directory, job_name, build_id, build_date, job_name, build_status
+ )
return filename
@@ -160,43 +167,61 @@
:return: None
"""
for job_name in jobs_names:
- logger.info('Creating directories for logs: {} {}'.format(logs_directory, job_name))
+ logger.info(
+ "Creating directories for logs: {} {}".format(
+ logs_directory, job_name
+ )
+ )
create_directory(logs_directory, job_name)
-def save_job_console_to_file(logs_directory, job_name, build_id, build_date,
- build_status, data_to_write):
- filename = generate_filename(logs_directory=logs_directory, job_name=job_name,
- build_id=build_id, build_date=build_date,
- build_status=build_status)
+def save_job_console_to_file(
+ logs_directory, job_name, build_id, build_date, build_status, data_to_write
+):
+ filename = generate_filename(
+ logs_directory=logs_directory,
+ job_name=job_name,
+ build_id=build_id,
+ build_date=build_date,
+ build_status=build_status,
+ )
- create_directory(patch_to_directory=logs_directory, directory_to_create=job_name)
+ create_directory(
+ patch_to_directory=logs_directory, directory_to_create=job_name
+ )
save_to_file(filename=filename, data_to_write=data_to_write)
def generate_patch_to_artifact_file(logs_directory, job_name):
- return '{0}{1}/'.format(logs_directory, job_name)
+ return "{0}{1}/".format(logs_directory, job_name)
-def generate_artifact_file_patch(saved_artifact_file_patch,
- patch_to_artifact_file,
- pipeline_job_name,
- build_id,
- build_date):
- if 'kubeconfig' in saved_artifact_file_patch:
- artifact_extention = 'txt'
- artifact_filename = saved_artifact_file_patch.split('/')[-1]
- filename = f'{str(build_id)}_{build_date}_{pipeline_job_name}_{artifact_filename}.{artifact_extention}'
+def generate_artifact_file_patch(
+ saved_artifact_file_patch,
+ patch_to_artifact_file,
+ pipeline_job_name,
+ build_id,
+ build_date,
+):
+ if "kubeconfig" in saved_artifact_file_patch:
+ artifact_extention = "txt"
+ artifact_filename = saved_artifact_file_patch.split("/")[-1]
+ filename = (
+ f"{str(build_id)}_{build_date}_{pipeline_job_name}"
+ f"_{artifact_filename}.{artifact_extention}"
+ )
full_patch = patch_to_artifact_file + filename
else:
- artifact_extention = saved_artifact_file_patch.split('.')[-1]
- filename = f'{str(build_id)}_{build_date}_{pipeline_job_name}.{artifact_extention}'
+ artifact_extention = saved_artifact_file_patch.split(".")[-1]
+ filename = (
+ f"{str(build_id)}_{build_date}_{pipeline_job_name}"
+ f".{artifact_extention}"
+ )
full_patch = patch_to_artifact_file + filename
- logger.info(f'Full file patch: {full_patch}')
+ logger.info(f"Full file patch: {full_patch}")
return full_patch
def rename_artifact_file(old_artifact_file_patch, new_artifact_file_patch):
os.rename(old_artifact_file_patch, new_artifact_file_patch)
return new_artifact_file_patch
-
diff --git a/tempest_tests_resources/config.py b/tempest_tests_resources/config.py
index 9a3a450..eda02ac 100644
--- a/tempest_tests_resources/config.py
+++ b/tempest_tests_resources/config.py
@@ -1,10 +1,16 @@
import os
-REPORT_NAME = os.environ.get('REPORT_NAME', 'artifacts/tempest.log')
-TEMPEST_REPORT_XML = os.environ.get('TEMPEST_REPORT_XML', 'artifacts/tempest_report.xml')
+REPORT_NAME = os.environ.get("REPORT_NAME", "artifacts/tempest.log")
+TEMPEST_REPORT_XML = os.environ.get(
+ "TEMPEST_REPORT_XML", "artifacts/tempest_report.xml"
+)
# Results machine readable file
-RESOURCES_FILE_NAME = os.environ.get('RESULT_FILE_NAME', 'artifacts/tempest_resources.yaml')
+RESOURCES_FILE_NAME = os.environ.get(
+ "RESULT_FILE_NAME", "artifacts/tempest_resources.yaml"
+)
# Temporary file
-TEMPORARY_FILE_NAME = os.environ.get('TEMPORARY_FILE_NAME', 'artifacts/tempest_temporary')
+TEMPORARY_FILE_NAME = os.environ.get(
+ "TEMPORARY_FILE_NAME", "artifacts/tempest_temporary"
+)
diff --git a/tempest_tests_resources/report_parser.py b/tempest_tests_resources/report_parser.py
index 0e09f39..b4f842d 100644
--- a/tempest_tests_resources/report_parser.py
+++ b/tempest_tests_resources/report_parser.py
@@ -1,14 +1,12 @@
+import json
+import os
import re
import subprocess
import sys
-import json
-import yaml
-import os
-
import xml.etree.ElementTree as ET
import config
-
+import yaml
# config.py
REPORT_NAME = config.REPORT_NAME
@@ -17,7 +15,7 @@
def simplify_logfile(report_name, temporary_filename):
- """ Simplify full tempest log file and write it to temp file
+ """Simplify full tempest log file and write it to temp file
After simplifying temp file looks like:
...
Request...
@@ -30,56 +28,64 @@
:param report_name: full tempest logs file
:param temporary_filename: simplified file
"""
- run_cmd = f"grep -vEa '(auth/token|keystoneauth|connectionpool)' " \
- f"{report_name} | grep -a -A4 -E '( POST| DELETE| PUT)' " \
- f"> {temporary_filename}"
+ run_cmd = (
+ f"grep -vEa '(auth/token|keystoneauth|connectionpool)' "
+ f"{report_name} | grep -a -A4 -E '( POST| DELETE| PUT)' "
+ f"> {temporary_filename}"
+ )
subprocess.check_output(run_cmd, shell=True)
- run_cmd = f"grep -vEa '(auth/token|keystoneauth|connectionpool)' " \
- f"{report_name} | grep -a -A4 -E '(GET)' " \
- f">> {temporary_filename}"
+ run_cmd = (
+ f"grep -vEa '(auth/token|keystoneauth|connectionpool)' "
+ f"{report_name} | grep -a -A4 -E '(GET)' "
+ f">> {temporary_filename}"
+ )
subprocess.check_output(run_cmd, shell=True)
def get_request_response(temporary_filename):
- """ Get request+testname+response
+ """Get request+testname+response
:param temporary_filename: simplified report filename
:return: list with lines that contains request and response
"""
- with open(temporary_filename, 'r') as temp_file:
+ with open(temporary_filename, "r") as temp_file:
request = []
lines = temp_file.readlines()
for line in lines:
- if line.startswith('--'):
+ if line.startswith("--"):
yield request
request = []
else:
request.append(line)
-def _get_test_name(request,
- methods_skip_list=[
- 'tearDownClass',
- 'tearDown',
- '_run_cleanups',
- 'setUp',
- 'setUpClass',
- 'tearDownClass',]):
+def _get_test_name(
+ request,
+ methods_skip_list=[
+ "tearDownClass",
+ "tearDown",
+ "_run_cleanups",
+ "setUp",
+ "setUpClass",
+ "tearDownClass",
+ ],
+):
"""
:param request: request body
:param methods_skip_list: what methods to skip
:return:
"""
# Skip list to process requests from tests only
- # Process requests from Setup/Tear Down Classes if the argument 'class' exist
+ # Process requests from Setup/Tear Down Classes
+ # if the argument 'class' exist
try:
# regex for: (ClassName:test_name)
- test_name = re.search(r'\((\w+:.+\))', request)[0][1:-1]
+ test_name = re.search(r"\((\w+:.+\))", request)[0][1:-1]
# Skip if method name in skip list
- if test_name.split(':')[1] in methods_skip_list:
+ if test_name.split(":")[1] in methods_skip_list:
return
- return test_name.replace(':', '.')
+ return test_name.replace(":", ".")
except TypeError:
pass
# TODO(imenkov): add logging
@@ -87,38 +93,35 @@
def _get_response_body(response):
- """ Method to get response body as dict
+ """Method to get response body as dict
:param response: line with response
:return: dict with body or empty dict if
body is not readable
"""
try:
# regex to search dict in response
- body = re.search(
- r'(\{.[a-zA-Z]+).+(}|])',
- response)[0]
+ body = re.search(r"(\{.[a-zA-Z]+).+(}|])", response)[0]
if body:
- if 'badRequest' not in body:
+ if "badRequest" not in body:
res = json.loads(body)
return res
except Exception:
- return response.split(
- '_log_request_full')[0].strip(' ').replace(
- "Body: b", "").strip("\'")
+ return (
+ response.split("_log_request_full")[0]
+ .strip(" ")
+ .replace("Body: b", "")
+ .strip("'")
+ )
def _get_openstack_service_name(request):
- #TODO IMENKOV FIX ME
- service_name = re.search(
- r'(?<=\:\/\/).+?(?=(\.))',
- request)[0]
+ # TODO IMENKOV FIX ME
+ service_name = re.search(r"(?<=\:\/\/).+?(?=(\.))", request)[0]
return service_name
def _get_status_code_and_method(request):
- status_code = re.search(
- r'(?<=\): ).+?(?=( http))',
- request)[0]
+ status_code = re.search(r"(?<=\): ).+?(?=( http))", request)[0]
return status_code.split()
@@ -128,15 +131,13 @@
:return: request-id like: req-93636f78-031b-41bc-abb5-9533ab7a3df4
"""
try:
- req_id = re.search(
- r"(?<=\[)req-.+?(?= \])",
- request)
+ req_id = re.search(r"(?<=\[)req-.+?(?= \])", request)
if req_id:
return req_id[0]
except TypeError:
# TODO(imenkov) add logging to track not covered requests
# print(f"Request ID not found for request: {request}")
- return 'req_id_not_found'
+ return "req_id_not_found"
def _get_resource_name_from_request_body(request, os_resource_name=None):
@@ -147,11 +148,12 @@
"""
body = _get_response_body(request)
try:
- name = body.get(os_resource_name, {}).get('name',
- 'resource_name_not_defined')
+ name = body.get(os_resource_name, {}).get(
+ "name", "resource_name_not_defined"
+ )
return name
except AttributeError:
- return 'resource_name_not_defined'
+ return "resource_name_not_defined"
def generate_tests_resources():
@@ -174,9 +176,12 @@
for request in get_request_response(TEMPORARY_FILENAME):
# Get test name from request
- # Process requests from Setup/Tear Down Classes if the argument 'class' exist
- if len(sys.argv) == 2 and sys.argv[1] == 'class':
- methods_skip_list = ['_run_cleanups', ]
+ # Process requests from Setup/Tear Down Classes
+ # if the argument 'class' exist
+ if len(sys.argv) == 2 and sys.argv[1] == "class":
+ methods_skip_list = [
+ "_run_cleanups",
+ ]
test_name = _get_test_name(request[0], methods_skip_list)
else:
test_name = _get_test_name(request[0])
@@ -184,66 +189,83 @@
if not test_name:
continue
- # Generate test recourses only for specific test case if the argument 'test case name' exist
- if len(sys.argv) == 2 and sys.argv[1] != 'class' and sys.argv[1] != 'failed' and sys.argv[1] not in test_name:
+ # Generate test recourses only for specific test case
+ # if the argument 'test case name' exist
+ if (
+ len(sys.argv) == 2
+ and sys.argv[1] != "class"
+ and sys.argv[1] != "failed"
+ and sys.argv[1] not in test_name
+ ):
continue
if not result.get(test_name):
- result[test_name] = {"status": None,
- "resources": {}}
+ result[test_name] = {"status": None, "resources": {}}
status_and_method = _get_status_code_and_method(request[0])
status_code = status_and_method[0]
http_method = status_and_method[1]
openstack_service = _get_openstack_service_name(request[0])
- if not result[test_name]['resources'].get(openstack_service):
- result[test_name]['resources'][openstack_service] = {}
-
+ if not result[test_name]["resources"].get(openstack_service):
+ result[test_name]["resources"][openstack_service] = {}
response_body = _get_response_body(request[-1])
if not isinstance(response_body, dict):
request_id = _get_request_id(request[0])
# Check request body
- os_resource_name = _get_resource_name_from_request_body(
- request[2])
+ os_resource_name = _get_resource_name_from_request_body(request[2])
- if not result[test_name]['resources'][
- openstack_service].get(os_resource_name):
- result[test_name]['resources'][openstack_service][os_resource_name] = {}
+ if not result[test_name]["resources"][openstack_service].get(
+ os_resource_name
+ ):
+ result[test_name]["resources"][openstack_service][
+ os_resource_name
+ ] = {}
- result[test_name]['resources'][
- openstack_service][os_resource_name][request_id] = {
- 'http': {'response_body': response_body,
- 'status_code': status_code,
- 'http_method': http_method}}
+ result[test_name]["resources"][openstack_service][
+ os_resource_name
+ ][request_id] = {
+ "http": {
+ "response_body": response_body,
+ "status_code": status_code,
+ "http_method": http_method,
+ }
+ }
continue
for os_resource_name in response_body.keys():
- if not result[test_name]['resources'][openstack_service].get(
- os_resource_name):
- result[test_name]['resources'][openstack_service][
- os_resource_name] = {}
+ if not result[test_name]["resources"][openstack_service].get(
+ os_resource_name
+ ):
+ result[test_name]["resources"][openstack_service][
+ os_resource_name
+ ] = {}
request_id = _get_request_id(request[0])
- if not result[test_name]['resources'][openstack_service][
- os_resource_name].get(request_id):
+ if not result[test_name]["resources"][openstack_service][
+ os_resource_name
+ ].get(request_id):
- result[test_name]['resources'][openstack_service][
- os_resource_name][request_id] = {
- 'http': {'status_code': status_code,
- 'http_method': http_method}}
+ result[test_name]["resources"][openstack_service][
+ os_resource_name
+ ][request_id] = {
+ "http": {
+ "status_code": status_code,
+ "http_method": http_method,
+ }
+ }
- #TODO (IMENKOV) ADD 400/500
+ # TODO (IMENKOV) ADD 400/500
# Check that response is dict
# In some cases response can contain strings as
# instance logs, hash or lists
if isinstance(response_body[os_resource_name], dict):
- resource_id = response_body[os_resource_name].get('id')
- resource_name = response_body[os_resource_name].get('name')
+ resource_id = response_body[os_resource_name].get("id")
+ resource_name = response_body[os_resource_name].get("name")
else:
resource_id = None
resource_name = None
@@ -257,22 +279,28 @@
# ...
if resource_id:
- result[test_name]['resources'][openstack_service][
- os_resource_name][request_id]['id'] = resource_id
+ result[test_name]["resources"][openstack_service][
+ os_resource_name
+ ][request_id]["id"] = resource_id
if not resource_name:
resource_name = _get_resource_name_from_request_body(
- request[2], os_resource_name)
- result[test_name]['resources'][openstack_service][
- os_resource_name][request_id]['name'] = resource_name
+ request[2], os_resource_name
+ )
+ result[test_name]["resources"][openstack_service][
+ os_resource_name
+ ][request_id]["name"] = resource_name
else:
- result[test_name]['resources'][openstack_service][
- os_resource_name][request_id]['name'] = resource_name
+ result[test_name]["resources"][openstack_service][
+ os_resource_name
+ ][request_id]["name"] = resource_name
# Check if resource doesn't contain IDs - cleanup it
- if not result[test_name]['resources'][openstack_service][
- os_resource_name][request_id]:
- del result[test_name]['resources'][openstack_service][
- os_resource_name][request_id]
+ if not result[test_name]["resources"][openstack_service][
+ os_resource_name
+ ][request_id]:
+ del result[test_name]["resources"][openstack_service][
+ os_resource_name
+ ][request_id]
return result
@@ -281,15 +309,15 @@
tree = ET.parse(report_name)
root = tree.getroot()
- if root[0].tag == 'testsuite':
+ if root[0].tag == "testsuite":
root = root[0]
for child in root:
- classname = child.attrib['classname']
- name = child.attrib['name']
+ classname = child.attrib["classname"]
+ name = child.attrib["name"]
if classname or name:
- short_classname = classname.split('.')[-1]
- short_name = name.split('[')[0]
+ short_classname = classname.split(".")[-1]
+ short_name = name.split("[")[0]
short_test_name = f"{short_classname}.{short_name}"
# (imenkov) mb use it as key
@@ -298,12 +326,14 @@
try:
test_status = child[0].tag
except IndexError:
- test_status = 'passed'
+ test_status = "passed"
- # Generate test recourses only for failed test cases if the argument 'failed' exist
- if len(sys.argv) == 2 and sys.argv[1] == 'failed':
- if test_status != 'failure':
- # we try to remove not 'failed' cases from report with 'partial' name of short_test_name
+ # Generate test recourses only for failed test cases
+ # if the argument 'failed' exist
+ if len(sys.argv) == 2 and sys.argv[1] == "failed":
+ if test_status != "failure":
+ # we try to remove not 'failed' cases from report with
+ # 'partial' name of short_test_name
rz = result.pop(short_test_name, None)
if not rz:
for item in list(result.keys()):
@@ -322,20 +352,26 @@
# rbubyr: it happens because in tempest.log these both tests are
# logged as RecordsetValidationTest:test_cannot_create_MX_with
# should be fixed in tempest tests
- if not result[short_test_name].get('full_test_name'):
- result[short_test_name]['full_test_name'] = []
- result[short_test_name]['full_test_name'].append(full_test_name)
- result[short_test_name]['status'] = test_status
+ if not result[short_test_name].get("full_test_name"):
+ result[short_test_name]["full_test_name"] = []
+ result[short_test_name]["full_test_name"].append(full_test_name)
+ result[short_test_name]["status"] = test_status
- # TODO(rbubyr): some test cases are absent in result dic, these tests won't have resources mapping
+ # TODO(rbubyr): some test cases are absent in result dic,
+ # these tests won't have resources mapping
# because they are skipped in TEMPORARY_FILE_NAME
- # for now we just add to final report only 'failure' test cases which are absent in result dic
+ # for now we just add to final report only 'failure'
+ # test cases which are absent in result dic
# it might be possible to add resources from tempest xml
- elif test_status == 'failure' and len(sys.argv) == 2 and sys.argv[1] == 'failed':
+ elif (
+ test_status == "failure"
+ and len(sys.argv) == 2
+ and sys.argv[1] == "failed"
+ ):
result[short_test_name] = {}
- result[short_test_name]['full_test_name'] = []
- result[short_test_name]['full_test_name'].append(full_test_name)
- result[short_test_name]['status'] = test_status
+ result[short_test_name]["full_test_name"] = []
+ result[short_test_name]["full_test_name"].append(full_test_name)
+ result[short_test_name]["status"] = test_status
def delete_temporary_file(path_to_temp_file):
@@ -350,5 +386,5 @@
# delete_temporary_file(TEMPORARY_FILENAME)
# Write results to yaml file
-with open(TEMPEST_TESTS_RESOURCES, 'w') as res_file:
+with open(TEMPEST_TESTS_RESOURCES, "w") as res_file:
yaml.dump(result, res_file)
diff --git a/testrail_bot/control/admin.py b/testrail_bot/control/admin.py
index 89a2511..2270674 100644
--- a/testrail_bot/control/admin.py
+++ b/testrail_bot/control/admin.py
@@ -1,6 +1,6 @@
from django.contrib import admin
-from .models import TestRailTestRun, TestRailReport
+from .models import TestRailReport, TestRailTestRun
admin.site.register(TestRailTestRun)
admin.site.register(TestRailReport)
diff --git a/testrail_bot/control/apps.py b/testrail_bot/control/apps.py
index a5f7085..7d84cd0 100644
--- a/testrail_bot/control/apps.py
+++ b/testrail_bot/control/apps.py
@@ -2,4 +2,4 @@
class ControlConfig(AppConfig):
- name = 'control'
+ name = "control"
diff --git a/testrail_bot/control/celery_tasks/filters.py b/testrail_bot/control/celery_tasks/filters.py
index 4407eed..b0f0852 100644
--- a/testrail_bot/control/celery_tasks/filters.py
+++ b/testrail_bot/control/celery_tasks/filters.py
@@ -2,17 +2,18 @@
def filter_ip(data: str) -> str:
- ip_addr_regex = re.compile(r'\b(?:[0-9]{1,3}\.){3}[0-9]{1,3}\b')
+ ip_addr_regex = re.compile(r"\b(?:[0-9]{1,3}\.){3}[0-9]{1,3}\b")
return re.sub(ip_addr_regex, "x.x.x.x", data)
def filter_uuid(data: str) -> str:
uuid4hex = re.compile(
- r'[0-9a-f]{12}4[0-9a-f]{3}[89ab][0-9a-f]{15}\Z', re.I)
+ r"[0-9a-f]{12}4[0-9a-f]{3}[89ab][0-9a-f]{15}\Z", re.I
+ )
return re.sub(uuid4hex, "xxxx", data)
def last_traceback_filter(data: str) -> str:
if data.rfind("Traceback") < 0:
return data
- return data[data.rfind("Traceback"):]
+ return data[data.rfind("Traceback") :]
diff --git a/testrail_bot/control/celery_tasks/jenkins_pipeline.py b/testrail_bot/control/celery_tasks/jenkins_pipeline.py
index e3c9904..1d93535 100644
--- a/testrail_bot/control/celery_tasks/jenkins_pipeline.py
+++ b/testrail_bot/control/celery_tasks/jenkins_pipeline.py
@@ -1,21 +1,18 @@
-from datetime import datetime, timedelta, timezone
import json
import os
+from datetime import datetime, timedelta, timezone
from django.conf import settings
-from matplotlib import pyplot as plt
-from matplotlib import dates as mdates
-
from jenkins import Jenkins
+from matplotlib import dates as mdates
+from matplotlib import pyplot as plt
from .. import models
-
-__all__ = ('update_plot',)
+__all__ = ("update_plot",)
-jenkins_client = Jenkins(
- "https://ci.mcp.mirantis.net/")
+jenkins_client = Jenkins("https://ci.mcp.mirantis.net/")
VIEW_NAME = "MCP2.0 Openstack Periodic CI"
@@ -24,13 +21,22 @@
def get_attr(attr):
return getattr(original_obj, attr, 0)
- return datetime(year=2000, month=1, day=1, hour=get_attr("hour"),
- minute=get_attr("minute"), second=get_attr("second"))
+ return datetime(
+ year=2000,
+ month=1,
+ day=1,
+ hour=get_attr("hour"),
+ minute=get_attr("minute"),
+ second=get_attr("second"),
+ )
def build_start_time(timestamp):
- return build_time_obj(datetime.utcfromtimestamp(
- timestamp / 1000)).replace(tzinfo=timezone.utc).timestamp()
+ return (
+ build_time_obj(datetime.utcfromtimestamp(timestamp / 1000))
+ .replace(tzinfo=timezone.utc)
+ .timestamp()
+ )
def process_build(job_name, build):
@@ -38,8 +44,10 @@
if build_info["result"] != "SUCCESS":
return None
- return build_start_time(build_info["timestamp"]), \
- build_info["duration"] / 1000
+ return (
+ build_start_time(build_info["timestamp"]),
+ build_info["duration"] / 1000,
+ )
def calculate_average(values):
@@ -52,15 +60,18 @@
start_times, durations = zip(*filter(None, builds_info))
- avg_start_time = datetime.utcfromtimestamp(
- calculate_average(start_times))
- return {"duration": calculate_average(durations),
- "start_time": avg_start_time}
+ avg_start_time = datetime.utcfromtimestamp(calculate_average(start_times))
+ return {
+ "duration": calculate_average(durations),
+ "start_time": avg_start_time,
+ }
def get_aggregated_build_stats():
- return {job_name["name"]: process_job(job_name["name"])
- for job_name in jenkins_client.get_jobs(view_name=VIEW_NAME)}
+ return {
+ job_name["name"]: process_job(job_name["name"])
+ for job_name in jenkins_client.get_jobs(view_name=VIEW_NAME)
+ }
def get_lines(current, standard_datetime, next_day):
@@ -70,7 +81,8 @@
if end >= next_day:
return [
(standard_datetime, standard_datetime + (end - next_day)),
- (start_time, next_day - timedelta(seconds=1))]
+ (start_time, next_day - timedelta(seconds=1)),
+ ]
return [(start_time, end)]
@@ -78,8 +90,10 @@
def build_data_for_jobs_time_plot(jobs):
standard_datetime = build_time_obj()
next_day = standard_datetime + timedelta(days=1)
- return {job_name: get_lines(jobs[job_name], standard_datetime, next_day)
- for job_name in jobs}
+ return {
+ job_name: get_lines(jobs[job_name], standard_datetime, next_day)
+ for job_name in jobs
+ }
def draw_plot(plot_data):
@@ -100,7 +114,8 @@
# Set date limits
start_time = build_time_obj()
ax.set_xlim(
- start_time, start_time + timedelta(days=1) + timedelta(seconds=1))
+ start_time, start_time + timedelta(days=1) + timedelta(seconds=1)
+ )
# Set y axes limits
jobs_num = len(plot_data) + 1
@@ -136,7 +151,8 @@
try:
log_record = models.ActionLog.objects.get(
- name="update_jenkins_plot")
+ name="update_jenkins_plot"
+ )
except models.ActionLog.DoesNotExist:
log_record = models.ActionLog(name="update_jenkins_plot")
log_record.date = datetime.now()
diff --git a/testrail_bot/control/celery_tasks/schedules_pipeline.py b/testrail_bot/control/celery_tasks/schedules_pipeline.py
index b121a14..1ad090a 100644
--- a/testrail_bot/control/celery_tasks/schedules_pipeline.py
+++ b/testrail_bot/control/celery_tasks/schedules_pipeline.py
@@ -1,9 +1,9 @@
-from datetime import datetime, timedelta, timezone
import os
+from datetime import datetime
from .. import models
-from .test_rail_api import get_planid_by_name
from . import tasks
+from .test_rail_api import get_planid_by_name
def task_to_check_today_testplan():
@@ -17,8 +17,8 @@
today = datetime.today().strftime("%Y-%m-%d")
plan_name = f"[MCP2.0]OSCORE-{today}"
plan_id = get_planid_by_name(
- name=plan_name,
- project_name="Mirantis Cloud Platform")
+ name=plan_name, project_name="Mirantis Cloud Platform"
+ )
if not plan_id:
print(f"Can't found {plan_name} TestPlan")
return
@@ -36,15 +36,17 @@
pass
report_obj, _ = models.TestRailReport.objects.get_or_create(
- report_name=report_name,
- path=path)
+ report_name=report_name, path=path
+ )
report_obj.finished = False
report_obj.save()
- return tasks.process_run(bot_run_id=testrun_obj.id,
- report_id=report_obj.id,
- path=path,
- is_testplan=True)
+ return tasks.process_run(
+ bot_run_id=testrun_obj.id,
+ report_id=report_obj.id,
+ path=path,
+ is_testplan=True,
+ )
def task_to_check_testplan(testplan_id: int):
@@ -68,12 +70,14 @@
pass
report_obj, _ = models.TestRailReport.objects.get_or_create(
- report_name=report_name,
- path=path)
+ report_name=report_name, path=path
+ )
report_obj.finished = False
report_obj.save()
- return tasks.process_run(bot_run_id=testrun_obj.id,
- report_id=report_obj.id,
- path=path,
- is_testplan=True)
+ return tasks.process_run(
+ bot_run_id=testrun_obj.id,
+ report_id=report_obj.id,
+ path=path,
+ is_testplan=True,
+ )
diff --git a/testrail_bot/control/celery_tasks/tasks.py b/testrail_bot/control/celery_tasks/tasks.py
index dcc9cc1..cd18f6d 100644
--- a/testrail_bot/control/celery_tasks/tasks.py
+++ b/testrail_bot/control/celery_tasks/tasks.py
@@ -1,22 +1,26 @@
from __future__ import absolute_import, unicode_literals
import traceback
+
from celery import shared_task
-from . import jenkins_pipeline, testrail_pipeline, schedules_pipeline
+from . import jenkins_pipeline, schedules_pipeline, testrail_pipeline
@shared_task
def process_run(bot_run_id, report_id, path, is_testplan):
try:
- testrail_pipeline.process_test_run(bot_run_id, report_id, path,
- is_testplan)
+ testrail_pipeline.process_test_run(
+ bot_run_id, report_id, path, is_testplan
+ )
except BaseException as e:
- with open(path, 'a') as f:
+ with open(path, "a") as f:
print(f"Caught next exception: {e}")
traceback.print_exc()
- f.write("<b style='color:red;background-color:pink'>Task "
- "completed unsuccessfully</b>\n")
+ f.write(
+ "<b style='color:red;background-color:pink'>Task "
+ "completed unsuccessfully</b>\n"
+ )
f.flush()
@@ -33,6 +37,7 @@
print(f"Caught next exception: {e}")
traceback.print_exc()
from .. import models
+
r = models.SuitePassRate.objects.get(pk=report_id)
r.status = "Unexpected fail"
r.finished = True
@@ -61,4 +66,3 @@
:return:
"""
schedules_pipeline.task_to_check_testplan(testplan_id)
-
diff --git a/testrail_bot/control/celery_tasks/test_rail_api.py b/testrail_bot/control/celery_tasks/test_rail_api.py
index 142f403..47c4e6a 100644
--- a/testrail_bot/control/celery_tasks/test_rail_api.py
+++ b/testrail_bot/control/celery_tasks/test_rail_api.py
@@ -1,61 +1,71 @@
-from testrail_api import TestRailAPI, StatusCodeError
-from requests.exceptions import ReadTimeout
+from typing import Iterator, List, Optional
+
from django.conf import settings
from django.utils.html import escape
-from typing import Optional, List, Iterator
-
+from requests.exceptions import ReadTimeout
from retry import retry
-from .enums import StatusEnum, TimeEnum
+from testrail_api import StatusCodeError, TestRailAPI
+
from ..utils import cached
+from .enums import StatusEnum, TimeEnum
api = TestRailAPI(
"https://mirantis.testrail.com/",
settings.TESTRAIL_EMAIL,
- settings.TESTRAIL_PASSWORD)
+ settings.TESTRAIL_PASSWORD,
+)
@cached()
def get_project_id(project_name: str) -> Optional[int]:
- project = list(filter(
- lambda x: x["name"] == project_name,
- api.projects.get_projects()['projects']))
+ project = list(
+ filter(
+ lambda x: x["name"] == project_name,
+ api.projects.get_projects()["projects"],
+ )
+ )
if project:
return project[0]["id"]
else:
return None
-@cached(timeout=1*TimeEnum.DAYS)
+@cached(timeout=1 * TimeEnum.DAYS)
def get_suite_by_id(suite_id: int) -> dict:
return api.suites.get_suite(suite_id)
@cached()
def get_suite_name_by_id(suite_id: int) -> str:
- return api.suites.get_suite(suite_id)['name']
+ return api.suites.get_suite(suite_id)["name"]
@cached()
def get_suite_test_type(suite_id: int) -> str:
suite_name = get_suite_name_by_id(suite_id)
- return suite_name.split(']')[1]
+ return suite_name.split("]")[1]
-@cached(timeout=1*TimeEnum.HOURS)
+@cached(timeout=1 * TimeEnum.HOURS)
def get_plans(project_id: int, **kwargs) -> List[dict]:
- plans = api.plans.get_plans(project_id=project_id, **kwargs)['plans']
+ plans = api.plans.get_plans(project_id=project_id, **kwargs)["plans"]
return plans
-@cached(timeout=2*TimeEnum.HOURS,
- condition_for_endless_cache=lambda x: x is not None)
-def get_planid_by_name(name: str, project_name: str, **kwargs) \
- -> Optional[int]:
+@cached(
+ timeout=2 * TimeEnum.HOURS,
+ condition_for_endless_cache=lambda x: x is not None,
+)
+def get_planid_by_name(
+ name: str, project_name: str, **kwargs
+) -> Optional[int]:
limit_step = 100
for offset in range(0, 500, limit_step):
- plans = get_plans(project_id=get_project_id(project_name),
- limit=limit_step,
- offset=offset)
+ plans = get_plans(
+ project_id=get_project_id(project_name),
+ limit=limit_step,
+ offset=offset,
+ )
if not plans:
return
for plan in plans:
@@ -65,7 +75,7 @@
return
-@cached(timeout=1*TimeEnum.HOURS)
+@cached(timeout=1 * TimeEnum.HOURS)
def get_entries(plan_id: int) -> List[dict]:
return api.plans.get_plan(plan_id)["entries"]
@@ -83,25 +93,28 @@
return api.plans.get_plan(plan_id)
-def get_result_history_for_case(case_id: int,
- status_id: int = None,
- project_name: str = "Mirantis Cloud Platform",
- plan_name: str = None,
- created_after: str = None,
- created_before: str = None,
- created_by: int = None,
- testrun_pattern: str = None,
- **kwargs) -> \
- Iterator[List[dict]]:
+def get_result_history_for_case(
+ case_id: int,
+ status_id: int = None,
+ project_name: str = "Mirantis Cloud Platform",
+ plan_name: str = None,
+ created_after: str = None,
+ created_before: str = None,
+ created_by: int = None,
+ testrun_pattern: str = None,
+ **kwargs,
+) -> Iterator[List[dict]]:
limit_step = 100
suite_id = api.cases.get_case(case_id=case_id)["suite_id"]
for offset in range(0, 2000, limit_step):
- plans = get_plans(project_id=get_project_id(project_name),
- limit=limit_step,
- offset=offset,
- created_after=created_after,
- created_before=created_before,
- created_by=created_by)
+ plans = get_plans(
+ project_id=get_project_id(project_name),
+ limit=limit_step,
+ offset=offset,
+ created_after=created_after,
+ created_before=created_before,
+ created_by=created_by,
+ )
if not plans:
return
for plan in plans:
@@ -117,50 +130,48 @@
if type(status_id) is list:
status_id = ",".join(map(lambda x: str(x), status_id))
- results = get_result_for_case(run_id=run["id"],
- case_id=case_id,
- status_id=status_id)
+ results = get_result_for_case(
+ run_id=run["id"], case_id=case_id, status_id=status_id
+ )
if results:
yield results
def get_run_id(entries: List[dict], run_name: str) -> Optional[int]:
- entries = list(filter(
- lambda x: x["name"] == run_name,
- entries))
+ entries = list(filter(lambda x: x["name"] == run_name, entries))
if not entries:
return None
return entries[0]["runs"][0]["id"]
-@cached(timeout=2*TimeEnum.HOURS,
- condition_for_endless_cache=lambda x: x is None)
+@cached(
+ timeout=2 * TimeEnum.HOURS, condition_for_endless_cache=lambda x: x is None
+)
@retry(ReadTimeout, delay=1, jitter=2, tries=3)
-def get_result_for_case(run_id: int,
- case_id: int,
- **kwargs) -> Optional[List[dict]]:
+def get_result_for_case(
+ run_id: int, case_id: int, **kwargs
+) -> Optional[List[dict]]:
try:
- results = api.results.get_results_for_case(run_id, case_id, **kwargs
- )['results']
+ results = api.results.get_results_for_case(run_id, case_id, **kwargs)[
+ "results"
+ ]
except StatusCodeError:
return None
return results
def get_failed_tests(last_run_id: int, by_plans=False) -> List[dict]:
- failed_statuses = [StatusEnum.failed,
- StatusEnum.blocked]
+ failed_statuses = [StatusEnum.failed, StatusEnum.blocked]
status_id = ",".join(map(str, failed_statuses))
if by_plans:
failed_tests = []
for entry in get_entries(last_run_id):
for run in entry["runs"]:
failed_tests += api.tests.get_tests(
- run_id=run["id"],
- status_id=status_id)['tests']
+ run_id=run["id"], status_id=status_id
+ )["tests"]
return failed_tests
- return api.tests.get_tests(
- last_run_id, status_id=status_id)['tests']
+ return api.tests.get_tests(last_run_id, status_id=status_id)["tests"]
def add_result(test_id: int, update_dict: dict) -> None:
@@ -178,5 +189,7 @@
def html_link(type: str, id: int, title: str) -> str:
- return f"<a href='https://mirantis.testrail.com/index.php?/{type}s/view/" \
- f"{id}'>{escape(title)}</a>"
+ return (
+ f"<a href='https://mirantis.testrail.com/index.php?/{type}s/view/"
+ f"{id}'>{escape(title)}</a>"
+ )
diff --git a/testrail_bot/control/celery_tasks/testrail_pipeline.py b/testrail_bot/control/celery_tasks/testrail_pipeline.py
index d0eae11..472046d 100644
--- a/testrail_bot/control/celery_tasks/testrail_pipeline.py
+++ b/testrail_bot/control/celery_tasks/testrail_pipeline.py
@@ -1,16 +1,15 @@
import datetime
import difflib
import json
-from typing import TextIO, List, Tuple, Optional, Iterator, Dict
-
from datetime import datetime as dt
from datetime import timedelta
from itertools import islice
-from . import filters
-from .enums import StatusEnum
-from . import test_rail_api
+from typing import Dict, Iterator, List, Optional, TextIO, Tuple
+
from .. import models
from ..jira_manager import JiraIssue
+from . import filters, test_rail_api
+from .enums import StatusEnum
__all__ = ("process_test_run",)
@@ -25,12 +24,13 @@
report.save()
-def apply_filters(data: str,
- filter_last_traceback: bool,
- ip_filter: bool,
- uuid_filter: bool,
- filter_func: str
- ) -> str:
+def apply_filters(
+ data: str,
+ filter_last_traceback: bool,
+ ip_filter: bool,
+ uuid_filter: bool,
+ filter_func: str,
+) -> str:
"""
Applies various text modifiers (filtering, masking, etc.) to the input
text.
@@ -65,9 +65,9 @@
return data
-def get_runs_by_pattern(runs_in_plan: List[dict],
- test_pattern: str,
- suite_id: int) -> List[int]:
+def get_runs_by_pattern(
+ runs_in_plan: List[dict], test_pattern: str, suite_id: int
+) -> List[int]:
"""
Returns a list of run IDs that are related to a specific Test Suite
and have names containing a pattern (test_pattern)
@@ -82,64 +82,68 @@
"""
run = []
for t_run in runs_in_plan:
- if test_pattern in t_run['name'] and t_run['suite_id'] == suite_id:
- run.append(t_run['runs'][0]['id'])
+ if test_pattern in t_run["name"] and t_run["suite_id"] == suite_id:
+ run.append(t_run["runs"][0]["id"])
return run
def find_fail_with_same_comment(
- case_id: int,
- last_comment: str,
- plan_name: str,
- testrun_pattern: str,
- created_by_id: int,
- created_after: int,
- created_before: int,
- text_filters: dict,
+ case_id: int,
+ last_comment: str,
+ plan_name: str,
+ testrun_pattern: str,
+ created_by_id: int,
+ created_after: int,
+ created_before: int,
+ text_filters: dict,
) -> Iterator[Tuple[Optional[dict], float, int]]:
"""
- Searches for similar failures within a test plan based on specific
- criteria.
+ Searches for similar failures within a test plan based on specific
+ criteria.
- :param case_id: The ID of the test case for which the failure is
- being searched
- :param last_comment: The last comment associated with the failed test
- :param plan_name: The name of the test plan to search within
- :param testrun_pattern: A pattern for filtering test runs
- :param created_by_id: The ID of the user who created the test plan
- :param created_after: The date (created_after) after which the test
- plan was created
- :param created_before: The date (created_before) before which the test
- plan was created
- :param run_name: The name of the test run
- :param text_filters: A dictionary of text filters to apply when
- comparing comments
+ :param case_id: The ID of the test case for which the failure is
+ being searched
+ :param last_comment: The last comment associated with the failed test
+ :param plan_name: The name of the test plan to search within
+ :param testrun_pattern: A pattern for filtering test runs
+ :param created_by_id: The ID of the user who created the test plan
+ :param created_after: The date (created_after) after which the test
+ plan was created
+ :param created_before: The date (created_before) before which the test
+ plan was created
+ :param run_name: The name of the test run
+ :param text_filters: A dictionary of text filters to apply when
+ comparing comments
- :return: An iterator that yields tuples containing information
- about matching test results, including test result data, similarity
- ratio, and the associated run ID.
- """
+ :return: An iterator that yields tuples containing information
+ about matching test results, including test result data, similarity
+ ratio, and the associated run ID.
+ """
end_lookup_date = dt.strptime(
- f"{created_before} 23:59:59", "%Y-%m-%d %H:%M:%S")
+ f"{created_before} 23:59:59", "%Y-%m-%d %H:%M:%S"
+ )
start_lookup_date = dt.strptime(
- f"{created_after} 00:00:00", "%Y-%m-%d %H:%M:%S")
+ f"{created_after} 00:00:00", "%Y-%m-%d %H:%M:%S"
+ )
filters = {
"created_by": created_by_id,
"created_before": int(dt.timestamp(end_lookup_date)),
"created_after": int(dt.timestamp(start_lookup_date)),
"plan_name": plan_name,
- "status_id": [StatusEnum.test_failed,
- StatusEnum.failed,
- StatusEnum.blocked,
- StatusEnum.product_failed,
- StatusEnum.wont_fix,
- StatusEnum.retest],
- "testrun_pattern": testrun_pattern
+ "status_id": [
+ StatusEnum.test_failed,
+ StatusEnum.failed,
+ StatusEnum.blocked,
+ StatusEnum.product_failed,
+ StatusEnum.wont_fix,
+ StatusEnum.retest,
+ ],
+ "testrun_pattern": testrun_pattern,
}
- for n, results in enumerate(test_rail_api.get_result_history_for_case(
- case_id,
- **filters)):
+ for n, results in enumerate(
+ test_rail_api.get_result_history_for_case(case_id, **filters)
+ ):
if n >= 500 or not results:
yield None, None, None
return
@@ -147,17 +151,21 @@
comment = apply_filters(results[-1]["comment"], **text_filters)
ratio = difflib.SequenceMatcher(
lambda symbol: symbol in [" ", ",", "\n"],
- last_comment, comment, autojunk=False).ratio()
+ last_comment,
+ comment,
+ autojunk=False,
+ ).ratio()
if ratio > 0.7:
run_id = test_rail_api.api.tests.get_test(results[0]["test_id"])[
- "run_id"]
+ "run_id"
+ ]
yield results[0], ratio, run_id
-def get_project_id(f: TextIO,
- test_run: models.TestRailTestRun,
- report: models.TestRailReport) -> Optional[int]:
+def get_project_id(
+ f: TextIO, test_run: models.TestRailTestRun, report: models.TestRailReport
+) -> Optional[int]:
"""
Returns the TestRail Project ID associated with a specific test run
@@ -171,17 +179,20 @@
"""
project_id = test_rail_api.get_project_id(test_run.project_name)
if not project_id:
- f.write("Incorrect Project {}. Stopping processing\n".format(
- test_run.project_name))
+ f.write(
+ "Incorrect Project {}. Stopping processing\n".format(
+ test_run.project_name
+ )
+ )
f.flush()
finish_report(report)
return None
return project_id
-def get_plans(test_run: models.TestRailTestRun,
- run_date: dt,
- project_id: int) -> List[dict]:
+def get_plans(
+ test_run: models.TestRailTestRun, run_date: dt, project_id: int
+) -> List[dict]:
"""
Get plans which will be processed
@@ -209,20 +220,20 @@
:return: A string containing the filtered last comment for the specified
test case in the given test run
"""
- last_result = test_rail_api.get_result_for_case(
- run_id, case_id)
+ last_result = test_rail_api.get_result_for_case(run_id, case_id)
- return apply_filters(
- last_result[0]["comment"], **text_filters)
+ return apply_filters(last_result[0]["comment"], **text_filters)
-def process_old_test(f: TextIO,
- case_id: int,
- last_comment: str,
- run_id: int,
- test: dict,
- testrail_filters: dict,
- text_filters: dict) -> bool:
+def process_old_test(
+ f: TextIO,
+ case_id: int,
+ last_comment: str,
+ run_id: int,
+ test: dict,
+ testrail_filters: dict,
+ text_filters: dict,
+) -> bool:
"""
Writes to report file similarity info about the TestCase under the test
@@ -230,109 +241,130 @@
"""
found_unknown_fail = 0
for sim_result, ratio, old_run_id in find_fail_with_same_comment(
- case_id,
- last_comment,
- text_filters=text_filters,
- **testrail_filters):
+ case_id, last_comment, text_filters=text_filters, **testrail_filters
+ ):
if str(run_id) == str(old_run_id):
continue
per = round(100.0 * ratio, 2)
- run_link = test_rail_api.html_link('run', old_run_id, old_run_id)
+ run_link = test_rail_api.html_link("run", old_run_id, old_run_id)
if type(sim_result) is not dict:
- f.write(f"Similarity not found due to similarity: {per}, "
- f"in run {run_link}\n")
+ f.write(
+ f"Similarity not found due to similarity: {per}, "
+ f"in run {run_link}\n"
+ )
f.flush()
return False
- prod_link = "None" \
- if str(sim_result["defects"]) == "None" \
+ prod_link = (
+ "None"
+ if str(sim_result["defects"]) == "None"
else JiraIssue(sim_result["defects"]).html()
- test_link = test_rail_api.html_link('test',
- sim_result["test_id"],
- str(sim_result["test_id"]))
- status_id = int(sim_result['status_id'])
- if status_id in [StatusEnum.retest, StatusEnum.failed,
- StatusEnum.blocked]:
- f.write(f"Found a similar result on the test "
- f"{test_link} with similarity {per}% and "
- f"{StatusEnum(status_id).name} status and {prod_link} "
- f"defect. <i>Continuing...</i>\n")
+ )
+ test_link = test_rail_api.html_link(
+ "test", sim_result["test_id"], str(sim_result["test_id"])
+ )
+ status_id = int(sim_result["status_id"])
+ if status_id in [
+ StatusEnum.retest,
+ StatusEnum.failed,
+ StatusEnum.blocked,
+ ]:
+ f.write(
+ f"Found a similar result on the test "
+ f"{test_link} with similarity {per}% and "
+ f"{StatusEnum(status_id).name} status and {prod_link} "
+ f"defect. <i>Continuing...</i>\n"
+ )
f.flush()
found_unknown_fail += 1
if found_unknown_fail >= 10:
- f.write(f"<b style='color:red;'>"
- f"Detected 10+ consecutive unknown failures\n </b>")
+ f.write(
+ "<b style='color:red;'>"
+ "Detected 10+ consecutive unknown failures\n </b>"
+ )
f.flush()
return False
continue
elif ratio > 0.9:
- comment = f"Marked by TestRailBot because of similarity " \
- f"with test {sim_result['test_id']} {per}%"
+ comment = (
+ f"Marked by TestRailBot because of similarity "
+ f"with test {sim_result['test_id']} {per}%"
+ )
# Copy the original comment if it was not created by this bot
- if str(sim_result["status_id"]) == StatusEnum.wont_fix \
- and sim_result["comment"] \
- and "Marked by TestRailBot" not in sim_result["comment"]:
+ if (
+ str(sim_result["status_id"]) == StatusEnum.wont_fix
+ and sim_result["comment"]
+ and "Marked by TestRailBot" not in sim_result["comment"]
+ ):
comment = sim_result["comment"]
update_dict = {
"status_id": sim_result["status_id"],
"comment": comment,
- "defects": sim_result["defects"]
+ "defects": sim_result["defects"],
}
- f.write(f"Found a similar result on the test "
- f"{test_link} with similarity {per}% and "
- f"{StatusEnum(status_id).name} status and {prod_link} "
- f"defect\n"
- f"<i style='color:ForestGreen;'>Pushing to TestRail "
- f"{update_dict}"
- f"</i>\n\n")
+ f.write(
+ f"Found a similar result on the test "
+ f"{test_link} with similarity {per}% and "
+ f"{StatusEnum(status_id).name} status and {prod_link} "
+ f"defect\n"
+ f"<i style='color:ForestGreen;'>Pushing to TestRail "
+ f"{update_dict}"
+ f"</i>\n\n"
+ )
f.flush()
test_rail_api.add_result(test["id"], update_dict)
return True
elif ratio > 0.7:
- f.write(f"<b style='color:red;'> "
- f"Found a similar result on the test "
- f"{test_link} with similarity {per}% and "
- f"{StatusEnum(status_id).name} status and {prod_link} "
- f"defect,\n but NOT marked by "
- f"TestRailBot because of similarity only, "
- f"you can update manually \n </b>")
+ f.write(
+ f"<b style='color:red;'> "
+ f"Found a similar result on the test "
+ f"{test_link} with similarity {per}% and "
+ f"{StatusEnum(status_id).name} status and {prod_link} "
+ f"defect,\n but NOT marked by "
+ f"TestRailBot because of similarity only, "
+ f"you can update manually \n </b>"
+ )
f.flush()
return True
-def process_test(f: TextIO,
- test: dict,
- testrail_filters: dict,
- text_filters: dict) -> None:
+def process_test(
+ f: TextIO, test: dict, testrail_filters: dict, text_filters: dict
+) -> None:
"""
Starts processing for the TestCase for each TestPlan
"""
case_id = test["case_id"]
run_id = test["run_id"]
run_name = test_rail_api.get_run_name(run_id)
- test_link = test_rail_api.html_link('test', test['id'], test["title"])
- run_link = test_rail_api.html_link('run', run_id, run_name)
+ test_link = test_rail_api.html_link("test", test["id"], test["title"])
+ run_link = test_rail_api.html_link("run", run_id, run_name)
- f.write(f"<br><b>Proceeding test {test_link} <br>"
- f"in {run_link} run</b>\n")
+ f.write(
+ f"<br><b>Proceeding test {test_link} <br>" f"in {run_link} run</b>\n"
+ )
f.flush()
last_comment = get_last_comment(case_id, run_id, text_filters)
found = process_old_test(
- f, case_id, last_comment, run_id, test, testrail_filters, text_filters)
+ f, case_id, last_comment, run_id, test, testrail_filters, text_filters
+ )
if found:
return
else:
- f.write(f"<b style='color:red;'>Automatic test processing failed. "
- "Please process test manually "
- f"{test_link}</b>\n\n")
+ f.write(
+ f"<b style='color:red;'>Automatic test processing failed. "
+ "Please process test manually "
+ f"{test_link}</b>\n\n"
+ )
f.flush()
-def process_test_run(bot_run_id: int, report_id: int, path: str,
- is_testplan: bool) -> None:
+def process_test_run(
+ bot_run_id: int, report_id: int, path: str, is_testplan: bool
+) -> None:
"""
This function processes a created bot test run. It retrieves a list
of test plans to process, gathers the failed tests from the test run,
@@ -354,7 +386,9 @@
else:
test_run = test_rail_api.get_run_by_id(bot_test_run.run_id)
run_type = "run"
- link = test_rail_api.html_link(run_type, test_run['id'], test_run['name'])
+ link = test_rail_api.html_link(
+ run_type, test_run["id"], test_run["name"]
+ )
f.write(f"Start processing {run_type} {link}\n")
f.flush()
@@ -363,15 +397,21 @@
return
# failed_tests: all failed tests in test run/plan
- failed_tests = test_rail_api.get_failed_tests(bot_test_run.run_id,
- by_plans=is_testplan)
+ failed_tests = test_rail_api.get_failed_tests(
+ bot_test_run.run_id, by_plans=is_testplan
+ )
for test in failed_tests:
- if bot_test_run.caching_tests_enabled and \
- test["id"] in bot_test_run.checked_tests:
+ if (
+ bot_test_run.caching_tests_enabled
+ and test["id"] in bot_test_run.checked_tests
+ ):
continue
- process_test(f, test,
- bot_test_run.testrail_filters,
- bot_test_run.text_filters)
+ process_test(
+ f,
+ test,
+ bot_test_run.testrail_filters,
+ bot_test_run.text_filters,
+ )
bot_test_run.checked_tests.append(test["id"])
bot_test_run.save()
f.write("Test processing finished")
@@ -379,13 +419,15 @@
finish_report(report)
-def get_cases(project_id: int, suite_id: int,
- limit: int = 250,
- max_limit: int = 1000,
- filter: str = None,
- created_after: int = None,
- created_before: int = None,
- ) -> Iterator[Dict]:
+def get_cases(
+ project_id: int,
+ suite_id: int,
+ limit: int = 250,
+ max_limit: int = 1000,
+ filter: str = None,
+ created_after: int = None,
+ created_before: int = None,
+) -> Iterator[Dict]:
for offset in range(0, max_limit, limit):
cases = test_rail_api.api.cases.get_cases(
project_id=project_id,
@@ -394,7 +436,8 @@
offset=offset,
created_after=created_after,
created_before=created_before,
- filter=filter).get("cases")
+ filter=filter,
+ ).get("cases")
if not cases:
return
@@ -423,10 +466,12 @@
}
"""
- diff_obj: models.DiffOfSuitesPassRates = \
+ diff_obj: models.DiffOfSuitesPassRates = (
models.DiffOfSuitesPassRates.objects.get(pk=diff_id)
+ )
report: models.SuitePassRate = models.SuitePassRate.objects.get(
- pk=report_id)
+ pk=report_id
+ )
suite_id = report.suite_id
project_id = test_rail_api.get_project_id("Mirantis Cloud Platform")
@@ -442,32 +487,40 @@
}
passrate_by_cases = dict()
- params = dict(project_id=project_id,
- suite_id=suite_id,
- filter=diff_obj.test_keyword,
- limit=200)
+ params = dict(
+ project_id=project_id,
+ suite_id=suite_id,
+ filter=diff_obj.test_keyword,
+ limit=200,
+ )
for _n, case in enumerate(get_cases(**params), start=1):
- case_title = case['title']
+ case_title = case["title"]
case_id = case["id"]
report.status = f"Current case: {_n}"
report.save()
# Limit generator to the list with the length defined in the
# DiffOfSuitesPassRates
- last_case_results = list(islice(
- test_rail_api.get_result_history_for_case(case_id, **_filters),
- diff_obj.limit))
+ last_case_results = list(
+ islice(
+ test_rail_api.get_result_history_for_case(case_id, **_filters),
+ diff_obj.limit,
+ )
+ )
passrate_by_cases[case_title] = dict()
- passrate_by_cases[case_title]['case_id'] = case_id
+ passrate_by_cases[case_title]["case_id"] = case_id
if last_case_results:
- passed_tests = [x for x in last_case_results
- if x[-1]["status_id"] == StatusEnum.passed]
- passrate_by_cases[case_title]['rate'] = \
+ passed_tests = [
+ x
+ for x in last_case_results
+ if x[-1]["status_id"] == StatusEnum.passed
+ ]
+ passrate_by_cases[case_title]["rate"] = (
len(passed_tests) * 100 / diff_obj.limit
+ )
else:
- passrate_by_cases[case_title]['rate'] = "No result found"
- report.passrate_by_tests = json.dumps(passrate_by_cases,
- indent=4)
+ passrate_by_cases[case_title]["rate"] = "No result found"
+ report.passrate_by_tests = json.dumps(passrate_by_cases, indent=4)
report.status = f"Current case: {_n}"
report.save()
report.finished = True
diff --git a/testrail_bot/control/forms.py b/testrail_bot/control/forms.py
index 363035b..8d1ef8c 100644
--- a/testrail_bot/control/forms.py
+++ b/testrail_bot/control/forms.py
@@ -1,18 +1,24 @@
from datetime import date
+
from django import forms
-from .models import TestRailTestRun, \
- DiffOfSuitesPassRates, \
- SuitePassRate, \
- CronPeriodicTask
+
+from .models import (
+ CronPeriodicTask,
+ DiffOfSuitesPassRates,
+ SuitePassRate,
+ TestRailTestRun,
+)
class TestRunForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
- self.fields["created_after"].widget = forms.SelectDateWidget(years=[
- date.today().year + i for i in range(-3, 5)])
- self.fields["created_before"].widget = forms.SelectDateWidget(years=[
- date.today().year + i for i in range(-3, 5)])
+ self.fields["created_after"].widget = forms.SelectDateWidget(
+ years=[date.today().year + i for i in range(-3, 5)]
+ )
+ self.fields["created_before"].widget = forms.SelectDateWidget(
+ years=[date.today().year + i for i in range(-3, 5)]
+ )
class Meta:
model = TestRailTestRun
@@ -28,11 +34,10 @@
"ip_filter": "Mask all IP with x.x.x.x",
"uuid_filter": "Mask all UUID with xxxx",
"filter_last_traceback": "Use only traceback to "
- "compare comments",
+ "compare comments",
"created_after": "Search in period from",
"created_before": "till",
- "caching_tests_enabled": "Don't check already checked results"
-
+ "caching_tests_enabled": "Don't check already checked results",
}
help_texts = {
"filter_func": "Leave blank if not used",
@@ -48,7 +53,7 @@
labels = {
"test_keyword": "Pattern to search by tests",
"limit": "Count of tests to define the passrate. Don't recommend "
- "to use a number greater that 10"
+ "to use a number greater that 10",
}
@@ -56,12 +61,17 @@
class Meta:
model = SuitePassRate
fields = ["suite_id"]
- labels = {
- "suite_id": "Suite ID"
- }
+ labels = {"suite_id": "Suite ID"}
class PeriodicTaskForm(forms.ModelForm):
class Meta:
model = CronPeriodicTask
- fields = ["id", "enabled", "name", "cron", "task_name", "testplan_id_arg"]
+ fields = [
+ "id",
+ "enabled",
+ "name",
+ "cron",
+ "task_name",
+ "testplan_id_arg",
+ ]
diff --git a/testrail_bot/control/jira_manager.py b/testrail_bot/control/jira_manager.py
index ee81efa..6068a84 100644
--- a/testrail_bot/control/jira_manager.py
+++ b/testrail_bot/control/jira_manager.py
@@ -1,15 +1,19 @@
from django.conf import settings
from jira import JIRA, Issue
-jira = JIRA(server=settings.JIRA_SERVER,
- basic_auth=(settings.JIRA_USER, settings.JIRA_PASSWORD))
+jira = JIRA(
+ server=settings.JIRA_SERVER,
+ basic_auth=(settings.JIRA_USER, settings.JIRA_PASSWORD),
+)
class JiraIssue:
def __init__(self, key: str):
self.key = key
self._issue = None
- self._style_prod_area = "background-color:aliceblue;border-color:CornflowerBlue;"
+ self._style_prod_area = (
+ "background-color:aliceblue;border-color:CornflowerBlue;"
+ )
self._style_prod_text = "color:blue"
@property
@@ -23,12 +27,16 @@
return f"{settings.JIRA_SERVER}/browse/{self.issue.key}"
def html(self) -> str:
- if self.issue.fields.status.name in ['Completed', 'Released']:
+ if self.issue.fields.status.name in ["Completed", "Released"]:
self._style_prod_area = "background-color:red;border-color:black;"
self._style_prod_text = "color:white"
- return f"<div style='display:inline-block;border:1px solid;{self._style_prod_area}" \
- f"border-radius:5px;padding:2px;'>" \
- f"<a style={self._style_prod_text} href='{self.get_link()}'>" \
- f"{self.key}: {self.issue.fields.summary[:50]}</a>" \
- f"<b style={self._style_prod_text}> {self.issue.fields.status.name}</b>" \
- f"</div>"
+ return (
+ f"<div style='display:inline-block;border:1px solid;"
+ f"{self._style_prod_area}"
+ f"border-radius:5px;padding:2px;'>"
+ f"<a style={self._style_prod_text} href='{self.get_link()}'>"
+ f"{self.key}: {self.issue.fields.summary[:50]}</a>"
+ f"<b style={self._style_prod_text}> "
+ f"{self.issue.fields.status.name}</b>"
+ f"</div>"
+ )
diff --git a/testrail_bot/control/migrations/0001_initial.py b/testrail_bot/control/migrations/0001_initial.py
index b8d4697..a450f5d 100644
--- a/testrail_bot/control/migrations/0001_initial.py
+++ b/testrail_bot/control/migrations/0001_initial.py
@@ -1,6 +1,7 @@
# Generated by Django 4.2.6 on 2023-11-04 10:46
import datetime
+
import django.core.files.storage
from django.db import migrations, models
@@ -9,69 +10,83 @@
initial = True
- dependencies = [
- ]
+ dependencies = []
operations = [
migrations.CreateModel(
- name='ActionLog',
+ name="ActionLog",
fields=[
- ('id', models.AutoField(
- auto_created=True,
- primary_key=True,
- serialize=False,
- verbose_name='ID')),
- ('name', models.CharField(max_length=500)),
- ('date', models.DateTimeField(null=True)),
+ (
+ "id",
+ models.AutoField(
+ auto_created=True,
+ primary_key=True,
+ serialize=False,
+ verbose_name="ID",
+ ),
+ ),
+ ("name", models.CharField(max_length=500)),
+ ("date", models.DateTimeField(null=True)),
],
),
migrations.CreateModel(
- name='TestRailReport',
+ name="TestRailReport",
fields=[
- ('id', models.AutoField(
- auto_created=True,
- primary_key=True,
- serialize=False,
- verbose_name='ID')),
- ('path', models.FileField(
- blank=True,
- max_length=500,
- null=True,
- storage=django.core.files.storage.FileSystemStorage(),
- upload_to='')),
- ('report_name', models.CharField(max_length=300)),
- ('finished', models.BooleanField(default=False)),
- ('created_at', models.DateTimeField(auto_now_add=True)),
+ (
+ "id",
+ models.AutoField(
+ auto_created=True,
+ primary_key=True,
+ serialize=False,
+ verbose_name="ID",
+ ),
+ ),
+ (
+ "path",
+ models.FileField(
+ blank=True,
+ max_length=500,
+ null=True,
+ storage=django.core.files.storage.FileSystemStorage(),
+ upload_to="",
+ ),
+ ),
+ ("report_name", models.CharField(max_length=300)),
+ ("finished", models.BooleanField(default=False)),
+ ("created_at", models.DateTimeField(auto_now_add=True)),
],
),
migrations.CreateModel(
- name='TestRailTestRun',
+ name="TestRailTestRun",
fields=[
- ('id', models.AutoField(
- auto_created=True,
- primary_key=True,
- serialize=False,
- verbose_name='ID')),
- ('project_name', models.CharField(
- default='Mirantis Cloud Platform',
- max_length=300)),
- ('plan_name', models.CharField(
- default='[MCP2.0]OSCORE',
- max_length=300)),
- ('run_name', models.CharField(
- blank=True,
- max_length=300)),
- ('test_pattern', models.CharField(
- blank=True,
- max_length=300)),
- ('run_id', models.CharField(max_length=300)),
- ('created_by_id', models.IntegerField(default='109')),
- ('filter_func', models.TextField(blank=True, null=True)),
- ('ip_filter', models.BooleanField(default=True)),
- ('uuid_filter', models.BooleanField(default=True)),
- ('filter_last_traceback', models.BooleanField(default=False)),
- ('timestamp', models.DateField(
- default=datetime.date.today())),
+ (
+ "id",
+ models.AutoField(
+ auto_created=True,
+ primary_key=True,
+ serialize=False,
+ verbose_name="ID",
+ ),
+ ),
+ (
+ "project_name",
+ models.CharField(
+ default="Mirantis Cloud Platform", max_length=300
+ ),
+ ),
+ (
+ "plan_name",
+ models.CharField(default="[MCP2.0]OSCORE", max_length=300),
+ ),
+ ("run_name", models.CharField(blank=True, max_length=300)),
+ ("test_pattern", models.CharField(blank=True, max_length=300)),
+ ("run_id", models.CharField(max_length=300)),
+ ("created_by_id", models.IntegerField(default="109")),
+ ("filter_func", models.TextField(blank=True, null=True)),
+ ("ip_filter", models.BooleanField(default=True)),
+ ("uuid_filter", models.BooleanField(default=True)),
+ ("filter_last_traceback", models.BooleanField(default=False)),
+ ("timestamp", models.DateField(default=datetime.date.today())),
],
),
]
diff --git a/testrail_bot/control/migrations/0002_testrailtestrun_checked_tests.py b/testrail_bot/control/migrations/0002_testrailtestrun_checked_tests.py
index 4e62602..3ad73d2 100644
--- a/testrail_bot/control/migrations/0002_testrailtestrun_checked_tests.py
+++ b/testrail_bot/control/migrations/0002_testrailtestrun_checked_tests.py
@@ -7,13 +7,13 @@
class Migration(migrations.Migration):
dependencies = [
- ('control', '0001_initial'),
+ ("control", "0001_initial"),
]
operations = [
migrations.AddField(
- model_name='testrailtestrun',
- name='checked_tests',
+ model_name="testrailtestrun",
+ name="checked_tests",
field=control.models.IntegerListField(default=[], editable=False),
),
]
diff --git a/testrail_bot/control/migrations/0003_suitepassrate_diffofsuitespassrates.py b/testrail_bot/control/migrations/0003_suitepassrate_diffofsuitespassrates.py
index 6215a62..13be3e1 100644
--- a/testrail_bot/control/migrations/0003_suitepassrate_diffofsuitespassrates.py
+++ b/testrail_bot/control/migrations/0003_suitepassrate_diffofsuitespassrates.py
@@ -1,36 +1,97 @@
# Generated by Django 4.2.7 on 2023-11-20 17:46
-from django.db import migrations, models
import django.db.models.deletion
+from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
- ('control', '0002_testrailtestrun_checked_tests'),
+ ("control", "0002_testrailtestrun_checked_tests"),
]
operations = [
migrations.CreateModel(
- name='SuitePassRate',
+ name="SuitePassRate",
fields=[
- ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
- ('suite_id', models.CharField(choices=[('10651', '[MCP2.0_ROCKY]Tempest'), ('10635', '[MCP2.0_STEIN]Tempest'), ('10653', '[MCP2.0_TRAIN]Tempest'), ('10710', '[MCP2.0_USSURI]Tempest'), ('10888', '[MCP2.0_VICTORIA]Tempest'), ('11167', '[MCP2.0_WALLABY]Tempest'), ('11188', '[MCP2.0_XENA]Tempest'), ('11170', '[MCP2.0_YOGA]Tempest'), ('11192', '[MCP2.0_ANTELOPE]Tempest'), ('11193', '[MCP2.0_ANTELOPE]Stepler'), ('10886', '[MCP2.0_USSURI]Stepler'), ('10887', '[MCP2.0_VICTORIA]Stepler'), ('11171', '[MCP2.0_YOGA]Stepler')], max_length=20)),
- ('suite_name', models.CharField(blank=True, max_length=100)),
- ('passrate_by_tests', models.JSONField(blank=True, default='{}')),
- ('status', models.TextField(blank=True, max_length=300)),
- ('finished', models.BooleanField(blank=True, default=False)),
+ (
+ "id",
+ models.AutoField(
+ auto_created=True,
+ primary_key=True,
+ serialize=False,
+ verbose_name="ID",
+ ),
+ ),
+ (
+ "suite_id",
+ models.CharField(
+ choices=[
+ ("10651", "[MCP2.0_ROCKY]Tempest"),
+ ("10635", "[MCP2.0_STEIN]Tempest"),
+ ("10653", "[MCP2.0_TRAIN]Tempest"),
+ ("10710", "[MCP2.0_USSURI]Tempest"),
+ ("10888", "[MCP2.0_VICTORIA]Tempest"),
+ ("11167", "[MCP2.0_WALLABY]Tempest"),
+ ("11188", "[MCP2.0_XENA]Tempest"),
+ ("11170", "[MCP2.0_YOGA]Tempest"),
+ ("11192", "[MCP2.0_ANTELOPE]Tempest"),
+ ("11193", "[MCP2.0_ANTELOPE]Stepler"),
+ ("10886", "[MCP2.0_USSURI]Stepler"),
+ ("10887", "[MCP2.0_VICTORIA]Stepler"),
+ ("11171", "[MCP2.0_YOGA]Stepler"),
+ ],
+ max_length=20,
+ ),
+ ),
+ ("suite_name", models.CharField(blank=True, max_length=100)),
+ (
+ "passrate_by_tests",
+ models.JSONField(blank=True, default="{}"),
+ ),
+ ("status", models.TextField(blank=True, max_length=300)),
+ ("finished", models.BooleanField(blank=True, default=False)),
],
),
migrations.CreateModel(
- name='DiffOfSuitesPassRates',
+ name="DiffOfSuitesPassRates",
fields=[
- ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
- ('started_at', models.DateTimeField(auto_created=True, auto_now=True)),
- ('limit', models.IntegerField(blank=True, default=10)),
- ('test_keyword', models.CharField(blank=True, default='', max_length=300)),
- ('report1', models.ForeignKey(blank=True, on_delete=django.db.models.deletion.CASCADE, related_name='report1', to='control.suitepassrate')),
- ('report2', models.ForeignKey(blank=True, on_delete=django.db.models.deletion.CASCADE, related_name='report2', to='control.suitepassrate')),
+ (
+ "id",
+ models.AutoField(
+ auto_created=True,
+ primary_key=True,
+ serialize=False,
+ verbose_name="ID",
+ ),
+ ),
+ (
+ "started_at",
+ models.DateTimeField(auto_created=True, auto_now=True),
+ ),
+ ("limit", models.IntegerField(blank=True, default=10)),
+ (
+ "test_keyword",
+ models.CharField(blank=True, default="", max_length=300),
+ ),
+ (
+ "report1",
+ models.ForeignKey(
+ blank=True,
+ on_delete=django.db.models.deletion.CASCADE,
+ related_name="report1",
+ to="control.suitepassrate",
+ ),
+ ),
+ (
+ "report2",
+ models.ForeignKey(
+ blank=True,
+ on_delete=django.db.models.deletion.CASCADE,
+ related_name="report2",
+ to="control.suitepassrate",
+ ),
+ ),
],
),
]
diff --git a/testrail_bot/control/migrations/0004_rename_timestamp_testrailtestrun_created_before.py b/testrail_bot/control/migrations/0004_rename_timestamp_testrailtestrun_created_before.py
index aee1b1f..f4a345a 100644
--- a/testrail_bot/control/migrations/0004_rename_timestamp_testrailtestrun_created_before.py
+++ b/testrail_bot/control/migrations/0004_rename_timestamp_testrailtestrun_created_before.py
@@ -1,24 +1,25 @@
# Generated by Django 4.2.7 on 2023-11-28 14:07
import datetime
+
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
- ('control', '0003_suitepassrate_diffofsuitespassrates'),
+ ("control", "0003_suitepassrate_diffofsuitespassrates"),
]
operations = [
migrations.RenameField(
- model_name='testrailtestrun',
- old_name='timestamp',
- new_name='created_before',
+ model_name="testrailtestrun",
+ old_name="timestamp",
+ new_name="created_before",
),
migrations.AddField(
- model_name='testrailtestrun',
- name='created_after',
+ model_name="testrailtestrun",
+ name="created_after",
field=models.DateField(default=datetime.date(2023, 8, 30)),
),
]
diff --git a/testrail_bot/control/migrations/0005_alter_suitepassrate_suite_id_and_more.py b/testrail_bot/control/migrations/0005_alter_suitepassrate_suite_id_and_more.py
index 27d098c..6478349 100644
--- a/testrail_bot/control/migrations/0005_alter_suitepassrate_suite_id_and_more.py
+++ b/testrail_bot/control/migrations/0005_alter_suitepassrate_suite_id_and_more.py
@@ -1,19 +1,45 @@
# Generated by Django 4.2.7 on 2023-11-30 13:03
-import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
- ('control', '0004_rename_timestamp_testrailtestrun_created_before'),
+ ("control", "0004_rename_timestamp_testrailtestrun_created_before"),
]
operations = [
migrations.AlterField(
- model_name='suitepassrate',
- name='suite_id',
- field=models.CharField(choices=[('Tempest', (('10651', '[MCP2.0_ROCKY]Tempest'), ('10635', '[MCP2.0_STEIN]Tempest'), ('10653', '[MCP2.0_TRAIN]Tempest'), ('10710', '[MCP2.0_USSURI]Tempest'), ('10888', '[MCP2.0_VICTORIA]Tempest'), ('11167', '[MCP2.0_WALLABY]Tempest'), ('11188', '[MCP2.0_XENA]Tempest'), ('11170', '[MCP2.0_YOGA]Tempest'), ('11192', '[MCP2.0_ANTELOPE]Tempest'))), ('Stepler', (('10886', '[MCP2.0_USSURI]Stepler'), ('10887', '[MCP2.0_VICTORIA]Stepler'), ('11171', '[MCP2.0_YOGA]Stepler'), ('11193', '[MCP2.0_ANTELOPE]Stepler')))], max_length=20),
+ model_name="suitepassrate",
+ name="suite_id",
+ field=models.CharField(
+ choices=[
+ (
+ "Tempest",
+ (
+ ("10651", "[MCP2.0_ROCKY]Tempest"),
+ ("10635", "[MCP2.0_STEIN]Tempest"),
+ ("10653", "[MCP2.0_TRAIN]Tempest"),
+ ("10710", "[MCP2.0_USSURI]Tempest"),
+ ("10888", "[MCP2.0_VICTORIA]Tempest"),
+ ("11167", "[MCP2.0_WALLABY]Tempest"),
+ ("11188", "[MCP2.0_XENA]Tempest"),
+ ("11170", "[MCP2.0_YOGA]Tempest"),
+ ("11192", "[MCP2.0_ANTELOPE]Tempest"),
+ ),
+ ),
+ (
+ "Stepler",
+ (
+ ("10886", "[MCP2.0_USSURI]Stepler"),
+ ("10887", "[MCP2.0_VICTORIA]Stepler"),
+ ("11171", "[MCP2.0_YOGA]Stepler"),
+ ("11193", "[MCP2.0_ANTELOPE]Stepler"),
+ ),
+ ),
+ ],
+ max_length=20,
+ ),
)
]
diff --git a/testrail_bot/control/migrations/0006_alter_testrailtestrun_created_after_and_more.py b/testrail_bot/control/migrations/0006_alter_testrailtestrun_created_after_and_more.py
index 4fcbc75..7e5ec61 100644
--- a/testrail_bot/control/migrations/0006_alter_testrailtestrun_created_after_and_more.py
+++ b/testrail_bot/control/migrations/0006_alter_testrailtestrun_created_after_and_more.py
@@ -1,29 +1,29 @@
# Generated by Django 4.2.7 on 2024-01-10 10:16
-from django.db import migrations, models
import django.utils.timezone
+from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
- ('control', '0005_alter_suitepassrate_suite_id_and_more'),
+ ("control", "0005_alter_suitepassrate_suite_id_and_more"),
]
operations = [
migrations.AlterField(
- model_name='testrailtestrun',
- name='created_after',
+ model_name="testrailtestrun",
+ name="created_after",
field=models.DateField(default=django.utils.timezone.now),
),
migrations.AlterField(
- model_name='testrailtestrun',
- name='created_before',
+ model_name="testrailtestrun",
+ name="created_before",
field=models.DateField(default=django.utils.timezone.now),
),
migrations.AlterField(
- model_name='testrailtestrun',
- name='filter_last_traceback',
+ model_name="testrailtestrun",
+ name="filter_last_traceback",
field=models.BooleanField(default=True),
),
]
diff --git a/testrail_bot/control/migrations/0007_testrailtestrun_caching_tests_enabled.py b/testrail_bot/control/migrations/0007_testrailtestrun_caching_tests_enabled.py
index a33fc75..a902a2b 100644
--- a/testrail_bot/control/migrations/0007_testrailtestrun_caching_tests_enabled.py
+++ b/testrail_bot/control/migrations/0007_testrailtestrun_caching_tests_enabled.py
@@ -6,13 +6,13 @@
class Migration(migrations.Migration):
dependencies = [
- ('control', '0006_alter_testrailtestrun_created_after_and_more'),
+ ("control", "0006_alter_testrailtestrun_created_after_and_more"),
]
operations = [
migrations.AddField(
- model_name='testrailtestrun',
- name='caching_tests_enabled',
+ model_name="testrailtestrun",
+ name="caching_tests_enabled",
field=models.BooleanField(default=False),
),
]
diff --git a/testrail_bot/control/migrations/0008_rename_test_pattern_testrailtestrun_testrun_pattern.py b/testrail_bot/control/migrations/0008_rename_test_pattern_testrailtestrun_testrun_pattern.py
index ec350ca..73a2873 100644
--- a/testrail_bot/control/migrations/0008_rename_test_pattern_testrailtestrun_testrun_pattern.py
+++ b/testrail_bot/control/migrations/0008_rename_test_pattern_testrailtestrun_testrun_pattern.py
@@ -6,13 +6,13 @@
class Migration(migrations.Migration):
dependencies = [
- ('control', '0007_testrailtestrun_caching_tests_enabled'),
+ ("control", "0007_testrailtestrun_caching_tests_enabled"),
]
operations = [
migrations.RenameField(
- model_name='testrailtestrun',
- old_name='test_pattern',
- new_name='testrun_pattern',
+ model_name="testrailtestrun",
+ old_name="test_pattern",
+ new_name="testrun_pattern",
),
]
diff --git a/testrail_bot/control/migrations/0009_cronperiodictask_alter_testrailtestrun_created_after.py b/testrail_bot/control/migrations/0009_cronperiodictask_alter_testrailtestrun_created_after.py
index 443115b..fabe448 100644
--- a/testrail_bot/control/migrations/0009_cronperiodictask_alter_testrailtestrun_created_after.py
+++ b/testrail_bot/control/migrations/0009_cronperiodictask_alter_testrailtestrun_created_after.py
@@ -1,32 +1,47 @@
# Generated by Django 4.2.7 on 2024-02-20 14:18
import control.models
-from django.db import migrations, models
import django.db.models.deletion
+from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
- ('django_celery_beat', '0018_improve_crontab_helptext'),
- ('control', '0008_rename_test_pattern_testrailtestrun_testrun_pattern'),
+ ("django_celery_beat", "0018_improve_crontab_helptext"),
+ (
+ "control",
+ "0008_rename_test_pattern_testrailtestrun_testrun_pattern",
+ ),
]
operations = [
migrations.CreateModel(
- name='CronPeriodicTask',
+ name="CronPeriodicTask",
fields=[
- ('periodictask_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='django_celery_beat.periodictask')),
- ('cron', models.CharField(default='', max_length=300)),
+ (
+ "periodictask_ptr",
+ models.OneToOneField(
+ auto_created=True,
+ on_delete=django.db.models.deletion.CASCADE,
+ parent_link=True,
+ primary_key=True,
+ serialize=False,
+ to="django_celery_beat.periodictask",
+ ),
+ ),
+ ("cron", models.CharField(default="", max_length=300)),
],
options={
- 'ordering': ['id'],
+ "ordering": ["id"],
},
- bases=('django_celery_beat.periodictask',),
+ bases=("django_celery_beat.periodictask",),
),
migrations.AlterField(
- model_name='testrailtestrun',
- name='created_after',
- field=models.DateField(default=control.models.default_created_after),
+ model_name="testrailtestrun",
+ name="created_after",
+ field=models.DateField(
+ default=control.models.default_created_after
+ ),
),
]
diff --git a/testrail_bot/control/migrations/0010_alter_testrailtestrun_options.py b/testrail_bot/control/migrations/0010_alter_testrailtestrun_options.py
index ca00143..1d9afd9 100644
--- a/testrail_bot/control/migrations/0010_alter_testrailtestrun_options.py
+++ b/testrail_bot/control/migrations/0010_alter_testrailtestrun_options.py
@@ -6,12 +6,15 @@
class Migration(migrations.Migration):
dependencies = [
- ('control', '0009_cronperiodictask_alter_testrailtestrun_created_after'),
+ (
+ "control",
+ "0009_cronperiodictask_alter_testrailtestrun_created_after",
+ ),
]
operations = [
migrations.AlterModelOptions(
- name='testrailtestrun',
- options={'ordering': ['-run_id']},
+ name="testrailtestrun",
+ options={"ordering": ["-run_id"]},
),
]
diff --git a/testrail_bot/control/migrations/0011_cronperiodictask_task_name_and_more.py b/testrail_bot/control/migrations/0011_cronperiodictask_task_name_and_more.py
index 2901309..ce5926d 100644
--- a/testrail_bot/control/migrations/0011_cronperiodictask_task_name_and_more.py
+++ b/testrail_bot/control/migrations/0011_cronperiodictask_task_name_and_more.py
@@ -6,18 +6,31 @@
class Migration(migrations.Migration):
dependencies = [
- ('control', '0010_alter_testrailtestrun_options'),
+ ("control", "0010_alter_testrailtestrun_options"),
]
operations = [
migrations.AddField(
- model_name='cronperiodictask',
- name='task_name',
- field=models.CharField(choices=[('control.celery_tasks.tasks.check_today_testplan', 'Check today testplan'), ('control.celery_tasks.tasks.check_specific_testplan', 'Check specific testplan')], default='control.celery_tasks.tasks.check_today_testplan', max_length=300),
+ model_name="cronperiodictask",
+ name="task_name",
+ field=models.CharField(
+ choices=[
+ (
+ "control.celery_tasks.tasks.check_today_testplan",
+ "Check today testplan",
+ ),
+ (
+ "control.celery_tasks.tasks.check_specific_testplan",
+ "Check specific testplan",
+ ),
+ ],
+ default="control.celery_tasks.tasks.check_today_testplan",
+ max_length=300,
+ ),
),
migrations.AddField(
- model_name='cronperiodictask',
- name='testplan_id_arg',
+ model_name="cronperiodictask",
+ name="testplan_id_arg",
field=models.CharField(blank=True, max_length=30, null=True),
),
]
diff --git a/testrail_bot/control/migrations/0012_alter_suitepassrate_suite_id.py b/testrail_bot/control/migrations/0012_alter_suitepassrate_suite_id.py
index 62e0193..4650a53 100644
--- a/testrail_bot/control/migrations/0012_alter_suitepassrate_suite_id.py
+++ b/testrail_bot/control/migrations/0012_alter_suitepassrate_suite_id.py
@@ -6,13 +6,43 @@
class Migration(migrations.Migration):
dependencies = [
- ('control', '0011_cronperiodictask_task_name_and_more'),
+ ("control", "0011_cronperiodictask_task_name_and_more"),
]
operations = [
migrations.AlterField(
- model_name='suitepassrate',
- name='suite_id',
- field=models.CharField(choices=[('Tempest', (('10650', '[MCP2.0_QUEENS]Tempest'), ('10651', '[MCP2.0_ROCKY]Tempest'), ('10635', '[MCP2.0_STEIN]Tempest'), ('10653', '[MCP2.0_TRAIN]Tempest'), ('10710', '[MCP2.0_USSURI]Tempest'), ('10888', '[MCP2.0_VICTORIA]Tempest'), ('11167', '[MCP2.0_WALLABY]Tempest'), ('11188', '[MCP2.0_XENA]Tempest'), ('11170', '[MCP2.0_YOGA]Tempest'), ('11192', '[MCP2.0_ANTELOPE]Tempest'), ('11195', '[MCP2.0_CARACAL]Tempest'))), ('Stepler', (('10886', '[MCP2.0_USSURI]Stepler'), ('10887', '[MCP2.0_VICTORIA]Stepler'), ('11171', '[MCP2.0_YOGA]Stepler'), ('11193', '[MCP2.0_ANTELOPE]Stepler'), ('11196', '[MCP2.0_CARACAL]Stepler')))], max_length=20),
+ model_name="suitepassrate",
+ name="suite_id",
+ field=models.CharField(
+ choices=[
+ (
+ "Tempest",
+ (
+ ("10650", "[MCP2.0_QUEENS]Tempest"),
+ ("10651", "[MCP2.0_ROCKY]Tempest"),
+ ("10635", "[MCP2.0_STEIN]Tempest"),
+ ("10653", "[MCP2.0_TRAIN]Tempest"),
+ ("10710", "[MCP2.0_USSURI]Tempest"),
+ ("10888", "[MCP2.0_VICTORIA]Tempest"),
+ ("11167", "[MCP2.0_WALLABY]Tempest"),
+ ("11188", "[MCP2.0_XENA]Tempest"),
+ ("11170", "[MCP2.0_YOGA]Tempest"),
+ ("11192", "[MCP2.0_ANTELOPE]Tempest"),
+ ("11195", "[MCP2.0_CARACAL]Tempest"),
+ ),
+ ),
+ (
+ "Stepler",
+ (
+ ("10886", "[MCP2.0_USSURI]Stepler"),
+ ("10887", "[MCP2.0_VICTORIA]Stepler"),
+ ("11171", "[MCP2.0_YOGA]Stepler"),
+ ("11193", "[MCP2.0_ANTELOPE]Stepler"),
+ ("11196", "[MCP2.0_CARACAL]Stepler"),
+ ),
+ ),
+ ],
+ max_length=20,
+ ),
),
]
diff --git a/testrail_bot/control/models.py b/testrail_bot/control/models.py
index a8a0910..b438dec 100644
--- a/testrail_bot/control/models.py
+++ b/testrail_bot/control/models.py
@@ -6,28 +6,28 @@
class IntegerListField(models.Field):
def __init__(self, *args, **kwargs):
- kwargs['editable'] = False
+ kwargs["editable"] = False
super(IntegerListField, self).__init__(*args, **kwargs)
def db_type(self, connection):
- return 'text'
+ return "text"
def from_db_value(self, value, expression, connection):
if not value:
return []
- return [int(x) for x in value.split(',')]
+ return [int(x) for x in value.split(",")]
def to_python(self, value):
if isinstance(value, list):
return value
if not value:
return []
- return [int(x) for x in value.split(',')]
+ return [int(x) for x in value.split(",")]
def get_prep_value(self, value):
if not value:
- return ''
- return ','.join(str(int(x)) for x in value)
+ return ""
+ return ",".join(str(int(x)) for x in value)
def default_created_after():
@@ -35,15 +35,16 @@
class TestRailTestRun(models.Model):
- project_name = models.CharField(max_length=300,
- default="Mirantis Cloud Platform")
+ project_name = models.CharField(
+ max_length=300, default="Mirantis Cloud Platform"
+ )
plan_name = models.CharField(max_length=300, default="[MCP2.0]OSCORE")
run_name = models.CharField(max_length=300, blank=True)
testrun_pattern = models.CharField(max_length=300, blank=True)
run_id = models.CharField(max_length=300)
checked_tests = IntegerListField(default=list())
caching_tests_enabled = models.BooleanField(default=False)
- created_by_id = models.IntegerField(default='109')
+ created_by_id = models.IntegerField(default="109")
filter_func = models.TextField(null=True, blank=True)
ip_filter = models.BooleanField(default=True)
uuid_filter = models.BooleanField(default=True)
@@ -91,26 +92,32 @@
class SuitePassRate(models.Model):
SUITE_CHOICES = [
- ("Tempest", (
- ("10650", "[MCP2.0_QUEENS]Tempest"),
- ("10651", "[MCP2.0_ROCKY]Tempest"),
- ("10635", "[MCP2.0_STEIN]Tempest"),
- ("10653", "[MCP2.0_TRAIN]Tempest"),
- ("10710", "[MCP2.0_USSURI]Tempest"),
- ("10888", "[MCP2.0_VICTORIA]Tempest"),
- ("11167", "[MCP2.0_WALLABY]Tempest"),
- ("11188", "[MCP2.0_XENA]Tempest"),
- ("11170", "[MCP2.0_YOGA]Tempest"),
- ("11192", "[MCP2.0_ANTELOPE]Tempest"),
- ("11195", "[MCP2.0_CARACAL]Tempest"))
- ),
- ("Stepler", (
- ("10886", "[MCP2.0_USSURI]Stepler"),
- ("10887", "[MCP2.0_VICTORIA]Stepler"),
- ("11171", "[MCP2.0_YOGA]Stepler"),
- ("11193", "[MCP2.0_ANTELOPE]Stepler"),
- ("11196", "[MCP2.0_CARACAL]Stepler"))
- ),
+ (
+ "Tempest",
+ (
+ ("10650", "[MCP2.0_QUEENS]Tempest"),
+ ("10651", "[MCP2.0_ROCKY]Tempest"),
+ ("10635", "[MCP2.0_STEIN]Tempest"),
+ ("10653", "[MCP2.0_TRAIN]Tempest"),
+ ("10710", "[MCP2.0_USSURI]Tempest"),
+ ("10888", "[MCP2.0_VICTORIA]Tempest"),
+ ("11167", "[MCP2.0_WALLABY]Tempest"),
+ ("11188", "[MCP2.0_XENA]Tempest"),
+ ("11170", "[MCP2.0_YOGA]Tempest"),
+ ("11192", "[MCP2.0_ANTELOPE]Tempest"),
+ ("11195", "[MCP2.0_CARACAL]Tempest"),
+ ),
+ ),
+ (
+ "Stepler",
+ (
+ ("10886", "[MCP2.0_USSURI]Stepler"),
+ ("10887", "[MCP2.0_VICTORIA]Stepler"),
+ ("11171", "[MCP2.0_YOGA]Stepler"),
+ ("11193", "[MCP2.0_ANTELOPE]Stepler"),
+ ("11196", "[MCP2.0_CARACAL]Stepler"),
+ ),
+ ),
]
suite_id = models.CharField(max_length=20, choices=SUITE_CHOICES)
suite_name = models.CharField(max_length=100, blank=True)
@@ -122,38 +129,42 @@
class DiffOfSuitesPassRates(models.Model):
limit = models.IntegerField(default=10, blank=True)
test_keyword = models.CharField(default="", max_length=300, blank=True)
- report1 = models.ForeignKey(to=SuitePassRate,
- related_name="report1",
- on_delete=models.CASCADE,
- blank=True)
- report2 = models.ForeignKey(to=SuitePassRate,
- related_name="report2",
- on_delete=models.CASCADE,
- blank=True)
- started_at = models.DateTimeField(auto_created=True,
- auto_now=True)
+ report1 = models.ForeignKey(
+ to=SuitePassRate,
+ related_name="report1",
+ on_delete=models.CASCADE,
+ blank=True,
+ )
+ report2 = models.ForeignKey(
+ to=SuitePassRate,
+ related_name="report2",
+ on_delete=models.CASCADE,
+ blank=True,
+ )
+ started_at = models.DateTimeField(auto_created=True, auto_now=True)
TASKS = [
- ("control.celery_tasks.tasks.check_today_testplan",
- "Check today testplan",
- []
- ),
- ("control.celery_tasks.tasks.check_specific_testplan",
- "Check specific testplan",
- ["testplan_id_arg"]
- ),
- ]
+ (
+ "control.celery_tasks.tasks.check_today_testplan",
+ "Check today testplan",
+ [],
+ ),
+ (
+ "control.celery_tasks.tasks.check_specific_testplan",
+ "Check specific testplan",
+ ["testplan_id_arg"],
+ ),
+]
-TASK_CHOICES = list(map(lambda x: x[:-1], TASKS))
+TASK_CHOICES = list(map(lambda x: x[:-1], TASKS))
class CronPeriodicTask(PeriodicTask):
- cron = models.CharField(default="",
- max_length=300,
- blank=False)
- task_name = models.CharField(max_length=300, choices=TASK_CHOICES,
- default=TASK_CHOICES[0][0])
+ cron = models.CharField(default="", max_length=300, blank=False)
+ task_name = models.CharField(
+ max_length=300, choices=TASK_CHOICES, default=TASK_CHOICES[0][0]
+ )
testplan_id_arg = models.CharField(max_length=30, blank=True, null=True)
class Meta:
diff --git a/testrail_bot/control/tests.py b/testrail_bot/control/tests.py
index 7ce503c..a39b155 100644
--- a/testrail_bot/control/tests.py
+++ b/testrail_bot/control/tests.py
@@ -1,3 +1 @@
-from django.test import TestCase
-
# Create your tests here.
diff --git a/testrail_bot/control/urls.py b/testrail_bot/control/urls.py
index d385891..d0249f7 100644
--- a/testrail_bot/control/urls.py
+++ b/testrail_bot/control/urls.py
@@ -2,7 +2,6 @@
from . import views
-
urlpatterns = [
path("", views.redirect_to_index, name="redirect"),
path("runs/", views.create_run, name="create_run"),
@@ -10,28 +9,41 @@
path("runs/<int:run_id>/submit/", views.submit_run, name="submit_run"),
path("runs/<int:run_id>/delete/", views.delete_run, name="delete_run"),
path("reports/", views.list_reports, name="list_reports"),
- path("reports/<int:report_id>/", views.single_report, name="single_report"),
- path("reports/<int:report_id>/delete", views.delete_report,
- name="delete_report"),
- path('index/', views.index, name='index'),
+ path(
+ "reports/<int:report_id>/", views.single_report, name="single_report"
+ ),
+ path(
+ "reports/<int:report_id>/delete",
+ views.delete_report,
+ name="delete_report",
+ ),
+ path("index/", views.index, name="index"),
path("help/", views.show_help, name="help"),
- path("update_jenkins_plot",
- views.update_jenkins_plot,
- name="update_jenkins"),
+ path(
+ "update_jenkins_plot", views.update_jenkins_plot, name="update_jenkins"
+ ),
path("jenkins_plot", views.jenkins_plot, name="jenkins_plot"),
path("schedulers/", views.schedulers, name="schedulers"),
-
path("scheduler/new/", views.scheduler, name="create_scheduler"),
path("scheduler/<int:pk>/", views.scheduler, name="scheduler"),
- path("scheduler/<int:pk>/save", views.save_scheduler, name="save_scheduler"),
- path("scheduler/<int:pk>/delete/", views.delete_scheduler,
- name="delete_scheduler"),
-
+ path(
+ "scheduler/<int:pk>/save", views.save_scheduler, name="save_scheduler"
+ ),
+ path(
+ "scheduler/<int:pk>/delete/",
+ views.delete_scheduler,
+ name="delete_scheduler",
+ ),
path("compare_suites/new/", views.compare_suites, name="compare_suites"),
- path("compare_suites/",
- views.list_of_comparing_reports,
- name="list_of_comparing_reports"),
+ path(
+ "compare_suites/",
+ views.list_of_comparing_reports,
+ name="list_of_comparing_reports",
+ ),
path("compare_suites/submit/", views.submit_suites, name="submit_suites"),
- path("compare_suites/<int:report_id>/", views.report_comparing_suites,
- name="report_comparing_suites"),
+ path(
+ "compare_suites/<int:report_id>/",
+ views.report_comparing_suites,
+ name="report_comparing_suites",
+ ),
]
diff --git a/testrail_bot/control/utils.py b/testrail_bot/control/utils.py
index 2038aba..6c41f58 100644
--- a/testrail_bot/control/utils.py
+++ b/testrail_bot/control/utils.py
@@ -1,6 +1,7 @@
-from parse import parse
-from typing import Dict, List, Callable, Any
+from typing import Any, Callable, Dict, List
+
from django.core.cache import cache
+from parse import parse
def parse_title(test_name):
@@ -22,7 +23,7 @@
return f"{r['test_title']}[{r['id']}]"
except TypeError:
# return file_name.test_class.test_name in other complicated cases
- return '.'.join(test_name.split(".")[:3])
+ return ".".join(test_name.split(".")[:3])
def short_names_for_dict(_dict):
@@ -36,16 +37,17 @@
return __dict
-def get_dict_diff(dict1: dict,
- dict2: dict,
- compare_by_key=None) -> Dict[str, List]:
+def get_dict_diff(
+ dict1: dict, dict2: dict, compare_by_key=None
+) -> Dict[str, List]:
all_keys = sorted(set(list(dict1.keys()) + list(dict2.keys())))
result = dict()
for k in all_keys:
if compare_by_key:
if dict1.get(k, {}).get(compare_by_key) == dict2.get(k, {}).get(
- compare_by_key):
+ compare_by_key
+ ):
continue
else:
if dict1.get(k) == dict2.get(k):
@@ -61,9 +63,10 @@
return r
-def cached(timeout: int = None,
- condition_for_endless_cache: Callable = lambda x: False
- ) -> Callable:
+def cached(
+ timeout: int = None,
+ condition_for_endless_cache: Callable = lambda x: False,
+) -> Callable:
"""
:param timeout: (in seconds) usage accordingly
https://docs.djangoproject.com/en/4.2/topics/cache/#basic-usage
@@ -74,17 +77,18 @@
:return: decorator
"""
+
def decorator(func: Callable) -> Callable:
def wrapper(*args, **kwargs) -> Any:
- cache_key = f'{func.__name__}_{args}_{kwargs}'
- cache_key = replace_all(cache_key, "{}()\'\" .,:", "_")
+ cache_key = f"{func.__name__}_{args}_{kwargs}"
+ cache_key = replace_all(cache_key, "{}()'\" .,:", "_")
cached_value = cache.get(cache_key)
if cached_value is None:
print(f"{func.__name__} MISS")
result = func(*args, **kwargs)
- _timeout = None \
- if condition_for_endless_cache(result) \
- else timeout
+ _timeout = (
+ None if condition_for_endless_cache(result) else timeout
+ )
cache.set(cache_key, result, timeout=_timeout)
return result
@@ -99,7 +103,9 @@
# # ENDFIXME
return cached_value
+
return wrapper
+
return decorator
diff --git a/testrail_bot/control/views.py b/testrail_bot/control/views.py
index e6655ad..8a362ef 100644
--- a/testrail_bot/control/views.py
+++ b/testrail_bot/control/views.py
@@ -1,16 +1,18 @@
import datetime
import json
import os
-from .celery_tasks import test_rail_api
-from django.shortcuts import render, redirect, HttpResponse
+from django.shortcuts import HttpResponse, redirect, render
from django_celery_beat.models import CrontabSchedule, PeriodicTasks
-from . import models
-from . import forms
-from .celery_tasks.tasks import process_run, update_plot_data, \
- get_test_passability_in_suite
-from .utils import short_names_for_dict, get_dict_diff
+from . import forms, models
+from .celery_tasks import test_rail_api
+from .celery_tasks.tasks import (
+ get_test_passability_in_suite,
+ process_run,
+ update_plot_data,
+)
+from .utils import get_dict_diff, short_names_for_dict
def index(request):
@@ -32,9 +34,11 @@
else:
form = forms.TestRunForm(instance=run)
- return render(request, "control/update_run.html",
- {"form": form, "run_id": run_id, "checked_tests":
- run.checked_tests})
+ return render(
+ request,
+ "control/update_run.html",
+ {"form": form, "run_id": run_id, "checked_tests": run.checked_tests},
+ )
def create_run(request):
@@ -46,8 +50,9 @@
else:
form = forms.TestRunForm()
- form.fields["created_after"].initial = datetime.date.today() + \
- datetime.timedelta(days=-3 * 30)
+ form.fields["created_after"].initial = (
+ datetime.date.today() + datetime.timedelta(days=-3 * 30)
+ )
form.fields["created_before"].initial = datetime.date.today()
return render(request, "control/update_run.html", {"form": form})
@@ -60,20 +65,26 @@
def single_report(request, report_id):
report = models.TestRailReport.objects.get(pk=report_id)
data = report.path.read().decode("utf-8")
- if request.method == "POST" \
- and request.META.get('HTTP_X_REQUESTED_WITH') == 'XMLHttpRequest':
+ if (
+ request.method == "POST"
+ and request.META.get("HTTP_X_REQUESTED_WITH") == "XMLHttpRequest"
+ ):
return HttpResponse(
json.dumps({"data": data, "finished": report.finished}),
- content_type="application/json")
+ content_type="application/json",
+ )
- return render(request, "control/report.html",
- {"report_id": report.id,
- "report": data,
- "finished": report.finished})
+ return render(
+ request,
+ "control/report.html",
+ {"report_id": report.id, "report": data, "finished": report.finished},
+ )
def delete_report(request, report_id):
- report:models.TestRailReport = models.TestRailReport.objects.get(pk=report_id)
+ report: models.TestRailReport = models.TestRailReport.objects.get(
+ pk=report_id
+ )
try:
os.remove(report.path.path)
except FileNotFoundError:
@@ -94,8 +105,9 @@
_name = f"Plan {testrail_run['name']}"
else:
parent_plan_id = testrail_run["plan_id"]
- parent_plan_name = \
- test_rail_api.get_plan_by_id(parent_plan_id)["name"]
+ parent_plan_name = test_rail_api.get_plan_by_id(parent_plan_id)[
+ "name"
+ ]
_name = f"Run {testrail_run['name']} from {parent_plan_name}"
run.run_name = _name
run.save()
@@ -103,14 +115,13 @@
report_name = "{}-run_id-{}-date-{}".format(
run.run_name,
run.run_id,
- datetime.datetime.isoformat(datetime.datetime.now()))
+ datetime.datetime.isoformat(datetime.datetime.now()),
+ )
path = os.path.join(models.fs.location, report_name)
with open(path, "w"):
pass
- report = models.TestRailReport(
- report_name=report_name,
- path=path)
+ report = models.TestRailReport(report_name=report_name, path=path)
report.save()
process_run.delay(run_id, report.id, path, is_testplan)
return redirect("single_report", report.id)
@@ -133,7 +144,8 @@
except models.ActionLog.DoesNotExist:
pass
update = models.ActionLog(
- name="update_plot_started", date=datetime.datetime.now())
+ name="update_plot_started", date=datetime.datetime.now()
+ )
update.save()
update_plot_data.delay()
return HttpResponse("Started Update", status=200)
@@ -142,7 +154,8 @@
def jenkins_plot(request):
try:
update_date = models.ActionLog.objects.get(
- name="update_jenkins_plot").date
+ name="update_jenkins_plot"
+ ).date
except models.ActionLog.DoesNotExist:
update_date = None
try:
@@ -157,13 +170,18 @@
try:
with open(job_names_path, "r") as f:
job_names = json.load(f)
- except:
+ except Exception:
pass
return render(
- request, "control/jenkins_plot.html",
- {"update_date": update_date, "update_started": update_started,
- "job_names": enumerate(job_names, 1)})
+ request,
+ "control/jenkins_plot.html",
+ {
+ "update_date": update_date,
+ "update_started": update_started,
+ "job_names": enumerate(job_names, 1),
+ },
+ )
def submit_suites(request):
@@ -180,7 +198,7 @@
report1=report1,
report2=report2,
limit=form.cleaned_data["limit"],
- test_keyword=form.cleaned_data["test_keyword"]
+ test_keyword=form.cleaned_data["test_keyword"],
)
diff_model.save()
get_test_passability_in_suite.delay(diff_model.id, report1.id)
@@ -194,60 +212,72 @@
return submit_suites(request)
diff_form = forms.DiffPassRatesForm()
- report1_form = forms.SuitePassRateForm(prefix='first')
- report2_form = forms.SuitePassRateForm(prefix='second')
+ report1_form = forms.SuitePassRateForm(prefix="first")
+ report2_form = forms.SuitePassRateForm(prefix="second")
return render(
- request, "control/compare_suites.html",
+ request,
+ "control/compare_suites.html",
{
- "diff_form": diff_form,
- "report1_form": report1_form,
- "report2_form": report2_form,
- "finished": None
- })
+ "diff_form": diff_form,
+ "report1_form": report1_form,
+ "report2_form": report2_form,
+ "finished": None,
+ },
+ )
def list_of_comparing_reports(request):
list_of_reports = models.DiffOfSuitesPassRates.objects.all()
return render(
- request, "control/list_comparing_suites.html",
- {
- "reports": list_of_reports
- })
+ request,
+ "control/list_comparing_suites.html",
+ {"reports": list_of_reports},
+ )
def report_comparing_suites(request, report_id):
report = models.DiffOfSuitesPassRates.objects.get(pk=report_id)
- passrate1 = short_names_for_dict(json.loads(
- report.report1.passrate_by_tests))
- passrate2 = short_names_for_dict(json.loads(
- report.report2.passrate_by_tests))
+ passrate1 = short_names_for_dict(
+ json.loads(report.report1.passrate_by_tests)
+ )
+ passrate2 = short_names_for_dict(
+ json.loads(report.report2.passrate_by_tests)
+ )
- diff_table = get_dict_diff(dict1=passrate1,
- dict2=passrate2,
- compare_by_key="rate")
+ diff_table = get_dict_diff(
+ dict1=passrate1, dict2=passrate2, compare_by_key="rate"
+ )
diff_form = forms.DiffPassRatesForm(instance=report)
- report1_form = forms.SuitePassRateForm(instance=report.report1,
- prefix="first")
- report2_form = forms.SuitePassRateForm(instance=report.report2,
- prefix="second")
+ report1_form = forms.SuitePassRateForm(
+ instance=report.report1, prefix="first"
+ )
+ report2_form = forms.SuitePassRateForm(
+ instance=report.report2, prefix="second"
+ )
return render(
- request, "control/compare_suites.html",
- {"diff_form": diff_form,
- "report1_form": report1_form,
- "report2_form": report2_form,
- "report1": report.report1,
- "report2": report.report2,
- "is_finished": report.report1.finished and report.report2.finished,
- "diff_table": diff_table})
+ request,
+ "control/compare_suites.html",
+ {
+ "diff_form": diff_form,
+ "report1_form": report1_form,
+ "report2_form": report2_form,
+ "report1": report.report1,
+ "report2": report.report2,
+ "is_finished": report.report1.finished and report.report2.finished,
+ "diff_table": diff_table,
+ },
+ )
def schedulers(request):
return render(
- request, "control/schedulers.html",
- {"schedulers": models.CronPeriodicTask.objects.all()})
+ request,
+ "control/schedulers.html",
+ {"schedulers": models.CronPeriodicTask.objects.all()},
+ )
def scheduler(request, pk=None):
@@ -261,33 +291,31 @@
form = forms.PeriodicTaskForm()
return render(
- request, "control/scheduler.html",
- {
- "form": form,
- "pk": pk,
- "TASKS": models.TASKS
- }
+ request,
+ "control/scheduler.html",
+ {"form": form, "pk": pk, "TASKS": models.TASKS},
)
def save_scheduler(request, pk=None):
print(f"{request.POST=}")
- minute, hour, day_of_month, month_of_year, day_of_week = \
- request.POST.get("cron", "* * * * *").split(" ")
+ minute, hour, day_of_month, month_of_year, day_of_week = request.POST.get(
+ "cron", "* * * * *"
+ ).split(" ")
if pk is None:
sch = CrontabSchedule.objects.create(
minute=minute,
hour=hour,
day_of_month=day_of_month,
month_of_year=month_of_year,
- day_of_week=day_of_week
+ day_of_week=day_of_week,
)
task = models.CronPeriodicTask.objects.create(
crontab=sch,
cron=request.POST.get("cron"),
name=request.POST.get("name"),
task_name=request.POST.get("task_name"),
- enabled=request.POST.get("enabled") == 'on',
+ enabled=request.POST.get("enabled") == "on",
testplan_id_arg=request.POST.get("testplan_id_arg"),
)
else:
@@ -303,7 +331,7 @@
hour=hour,
day_of_month=day_of_month,
month_of_year=month_of_year,
- day_of_week=day_of_week
+ day_of_week=day_of_week,
)
if not form.is_valid():
print(f"{form.errors=}")
@@ -311,12 +339,9 @@
form.save()
PeriodicTasks.update_changed()
return render(
- request, "control/scheduler.html",
- {
- "form": form,
- "pk": task.id,
- "TASKS": models.TASKS
- }
+ request,
+ "control/scheduler.html",
+ {"form": form, "pk": task.id, "TASKS": models.TASKS},
)
diff --git a/testrail_bot/manage.py b/testrail_bot/manage.py
index ff64df6..14ab9a8 100755
--- a/testrail_bot/manage.py
+++ b/testrail_bot/manage.py
@@ -5,7 +5,7 @@
def main():
- os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'testrail_bot.settings')
+ os.environ.setdefault("DJANGO_SETTINGS_MODULE", "testrail_bot.settings")
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
@@ -17,5 +17,5 @@
execute_from_command_line(sys.argv)
-if __name__ == '__main__':
+if __name__ == "__main__":
main()
diff --git a/testrail_bot/testrail_bot/__init__.py b/testrail_bot/testrail_bot/__init__.py
index 070e835..0165ba0 100644
--- a/testrail_bot/testrail_bot/__init__.py
+++ b/testrail_bot/testrail_bot/__init__.py
@@ -4,4 +4,4 @@
# Django starts so that shared_task will use this app.
from .celery import app as celery_app
-__all__ = ('celery_app',)
+__all__ = ("celery_app",)
diff --git a/testrail_bot/testrail_bot/asgi.py b/testrail_bot/testrail_bot/asgi.py
index be76c4c..9d066a5 100644
--- a/testrail_bot/testrail_bot/asgi.py
+++ b/testrail_bot/testrail_bot/asgi.py
@@ -11,6 +11,6 @@
from django.core.asgi import get_asgi_application
-os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'testrail_bot.settings')
+os.environ.setdefault("DJANGO_SETTINGS_MODULE", "testrail_bot.settings")
application = get_asgi_application()
diff --git a/testrail_bot/testrail_bot/celery.py b/testrail_bot/testrail_bot/celery.py
index 9e4cd7b..310932d 100644
--- a/testrail_bot/testrail_bot/celery.py
+++ b/testrail_bot/testrail_bot/celery.py
@@ -4,15 +4,15 @@
from celery import Celery
-os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'testrail_bot.settings')
+os.environ.setdefault("DJANGO_SETTINGS_MODULE", "testrail_bot.settings")
-app = Celery('testrail_bot')
+app = Celery("testrail_bot")
-app.config_from_object('django.conf:settings', namespace='CELERY')
+app.config_from_object("django.conf:settings", namespace="CELERY")
app.autodiscover_tasks()
@app.task(bind=True)
def debug_task(self):
- print('Request: {0!r}'.format(self.request))
+ print("Request: {0!r}".format(self.request))
diff --git a/testrail_bot/testrail_bot/settings.py b/testrail_bot/testrail_bot/settings.py
index 1b7f1ef..4436916 100644
--- a/testrail_bot/testrail_bot/settings.py
+++ b/testrail_bot/testrail_bot/settings.py
@@ -31,49 +31,47 @@
# Application definition
INSTALLED_APPS = [
- 'control.apps.ControlConfig',
- 'bootstrap5',
- 'django.contrib.admin',
- 'django.contrib.auth',
- 'django.contrib.contenttypes',
- 'django.contrib.sessions',
- 'django.contrib.messages',
- 'django.contrib.staticfiles',
- 'django_celery_beat',
+ "control.apps.ControlConfig",
+ "bootstrap5",
+ "django.contrib.admin",
+ "django.contrib.auth",
+ "django.contrib.contenttypes",
+ "django.contrib.sessions",
+ "django.contrib.messages",
+ "django.contrib.staticfiles",
+ "django_celery_beat",
]
MIDDLEWARE = [
- 'django.middleware.security.SecurityMiddleware',
- 'django.contrib.sessions.middleware.SessionMiddleware',
- 'django.middleware.common.CommonMiddleware',
- 'django.middleware.csrf.CsrfViewMiddleware',
- 'django.contrib.auth.middleware.AuthenticationMiddleware',
- 'django.contrib.messages.middleware.MessageMiddleware',
- 'django.middleware.clickjacking.XFrameOptionsMiddleware',
+ "django.middleware.security.SecurityMiddleware",
+ "django.contrib.sessions.middleware.SessionMiddleware",
+ "django.middleware.common.CommonMiddleware",
+ "django.middleware.csrf.CsrfViewMiddleware",
+ "django.contrib.auth.middleware.AuthenticationMiddleware",
+ "django.contrib.messages.middleware.MessageMiddleware",
+ "django.middleware.clickjacking.XFrameOptionsMiddleware",
]
-ROOT_URLCONF = 'testrail_bot.urls'
+ROOT_URLCONF = "testrail_bot.urls"
TEMPLATES = [
{
- 'BACKEND': 'django.template.backends.django.DjangoTemplates',
- 'DIRS': [],
- 'APP_DIRS': True,
- 'OPTIONS': {
- 'context_processors': [
- 'django.template.context_processors.debug',
- 'django.template.context_processors.request',
- 'django.contrib.auth.context_processors.auth',
- 'django.contrib.messages.context_processors.messages',
+ "BACKEND": "django.template.backends.django.DjangoTemplates",
+ "DIRS": [],
+ "APP_DIRS": True,
+ "OPTIONS": {
+ "context_processors": [
+ "django.template.context_processors.debug",
+ "django.template.context_processors.request",
+ "django.contrib.auth.context_processors.auth",
+ "django.contrib.messages.context_processors.messages",
],
- 'libraries': {
- 'custom_tags': 'control.template_tags.custom_tags'
- }
+ "libraries": {"custom_tags": "control.template_tags.custom_tags"},
},
},
]
-WSGI_APPLICATION = 'testrail_bot.wsgi.application'
+WSGI_APPLICATION = "testrail_bot.wsgi.application"
# Database
@@ -82,7 +80,9 @@
DATABASES = {
"default": {
"ENGINE": os.environ.get("SQL_ENGINE", "django.db.backends.sqlite3"),
- "NAME": os.environ.get("SQL_DATABASE", os.path.join(BASE_DIR, "db.sqlite3")),
+ "NAME": os.environ.get(
+ "SQL_DATABASE", os.path.join(BASE_DIR, "db.sqlite3")
+ ),
"USER": os.environ.get("SQL_USER", "user"),
"PASSWORD": os.environ.get("SQL_PASSWORD", "password"),
"HOST": os.environ.get("SQL_HOST", "localhost"),
@@ -90,7 +90,7 @@
}
}
-DEFAULT_AUTO_FIELD = 'django.db.models.AutoField'
+DEFAULT_AUTO_FIELD = "django.db.models.AutoField"
CACHES = {
"default": {
@@ -104,16 +104,20 @@
AUTH_PASSWORD_VALIDATORS = [
{
- 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
+ "NAME": "django.contrib.auth.password_validation"
+ ".UserAttributeSimilarityValidator",
},
{
- 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
+ "NAME": "django.contrib.auth.password_validation"
+ ".MinimumLengthValidator",
},
{
- 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
+ "NAME": "django.contrib.auth.password_validation"
+ ".CommonPasswordValidator",
},
{
- 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
+ "NAME": "django.contrib.auth.password_validation"
+ ".NumericPasswordValidator",
},
]
@@ -121,9 +125,9 @@
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
-LANGUAGE_CODE = 'en-us'
+LANGUAGE_CODE = "en-us"
-TIME_ZONE = 'UTC'
+TIME_ZONE = "UTC"
USE_I18N = True
@@ -135,8 +139,8 @@
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
-STATIC_URL = '/staticfiles/'
-STATIC_ROOT = '/staticfiles'
+STATIC_URL = "/staticfiles/"
+STATIC_ROOT = "/staticfiles"
STATICFILES_DIRS = (
os.path.join(BASE_DIR, "media/images"),
@@ -148,11 +152,11 @@
# Celery configs
-CELERY_BROKER_URL = 'redis://redis:6379'
-CELERY_RESULT_BACKEND = 'redis://redis:6379'
-CELERY_ACCEPT_CONTENT = ['application/json']
-CELERY_TASK_SERIALIZER = 'json'
-CELERY_RESULT_SERIALIZER = 'json'
+CELERY_BROKER_URL = "redis://redis:6379"
+CELERY_RESULT_BACKEND = "redis://redis:6379"
+CELERY_ACCEPT_CONTENT = ["application/json"]
+CELERY_TASK_SERIALIZER = "json"
+CELERY_RESULT_SERIALIZER = "json"
# TestRail configs
TESTRAIL_EMAIL = os.environ.get("TESTRAIL_EMAIL", default="123")
@@ -161,5 +165,6 @@
# Jira settings
JIRA_USER = os.environ.get("JIRA_USER")
JIRA_PASSWORD = os.environ.get("JIRA_PASSWORD")
-JIRA_SERVER = os.environ.get("JIRA_SERVER",
- default="https://mirantis.jira.com")
+JIRA_SERVER = os.environ.get(
+ "JIRA_SERVER", default="https://mirantis.jira.com"
+)
diff --git a/testrail_bot/testrail_bot/urls.py b/testrail_bot/testrail_bot/urls.py
index d8b6e77..ecb7151 100644
--- a/testrail_bot/testrail_bot/urls.py
+++ b/testrail_bot/testrail_bot/urls.py
@@ -13,11 +13,11 @@
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
+
from django.contrib import admin
from django.urls import include, path
-
urlpatterns = [
path("", include("control.urls")),
- path('admin/', admin.site.urls),
+ path("admin/", admin.site.urls),
]
diff --git a/testrail_bot/testrail_bot/wsgi.py b/testrail_bot/testrail_bot/wsgi.py
index 5e7bafa..a8dde5e 100644
--- a/testrail_bot/testrail_bot/wsgi.py
+++ b/testrail_bot/testrail_bot/wsgi.py
@@ -11,6 +11,6 @@
from django.core.wsgi import get_wsgi_application
-os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'testrail_bot.settings')
+os.environ.setdefault("DJANGO_SETTINGS_MODULE", "testrail_bot.settings")
application = get_wsgi_application()
diff --git a/testrail_upload_suites/base.py b/testrail_upload_suites/base.py
index 76b6936..ae71166 100644
--- a/testrail_upload_suites/base.py
+++ b/testrail_upload_suites/base.py
@@ -1,5 +1,5 @@
-from testrail import *
import config
+from testrail import APIClient
class Base:
@@ -10,40 +10,40 @@
self.project = self._get_project(config.PROJECT)
def _get_project(self, project_name):
- projects_uri = 'get_projects'
- projects = self.client.send_get(uri=projects_uri)['projects']
+ projects_uri = "get_projects"
+ projects = self.client.send_get(uri=projects_uri)["projects"]
for project in projects:
- if project['name'] == project_name:
+ if project["name"] == project_name:
return project
return None
def send_post_add_result(self, some_id, bug, status_id, add_result):
- add_result['status_id'] = status_id
- add_result['custom_launchpad_bug'] = bug
- send_add_result = 'add_result/' + str(some_id)
+ add_result["status_id"] = status_id
+ add_result["custom_launchpad_bug"] = bug
+ send_add_result = "add_result/" + str(some_id)
return self.client.send_post(send_add_result, add_result)
def get_plans(self, project_id): # !
- return self.client.send_get('get_plans/{0}'.format(project_id))
+ return self.client.send_get("get_plans/{0}".format(project_id))
def get_plan(self, plan_id): # !
- return self.client.send_get('get_plan/{0}'.format(plan_id))
+ return self.client.send_get("get_plan/{0}".format(plan_id))
def is_test_plan_exist(self, test_plan_name):
- runs = self.get_plans(self.project['id'])
- if True in map(lambda item: item['name'] == test_plan_name, runs):
+ runs = self.get_plans(self.project["id"])
+ if True in map(lambda item: item["name"] == test_plan_name, runs):
return True
return False
def get_tests(self, plan_id): # !
- return self.client.send_get('get_tests/{0}'.format(plan_id))
+ return self.client.send_get("get_tests/{0}".format(plan_id))
def get_test_runs(self, plan_id, pattern=None):
plans_runs = self.get_plan(plan_id) # !get_plans
runs = []
- for run in plans_runs['entries']:
+ for run in plans_runs["entries"]:
if pattern:
- if pattern in run['name']:
+ if pattern in run["name"]:
runs.append(run)
else:
runs.append(run)
@@ -52,8 +52,8 @@
def get_tempest_runs(self, plan_id):
runs = self.get_plan(plan_id) # !get_plans
tempest_runs = []
- for run in runs['entries']:
- if 'Tempest' in run['name']:
+ for run in runs["entries"]:
+ if "Tempest" in run["name"]:
tempest_runs.append(run)
return tempest_runs
@@ -61,121 +61,132 @@
all_tests = self.get_tests(run_id)
test_ids = []
for test in all_tests:
- if test['status_id'] == 5:
- test_ids.append(test['id'])
+ if test["status_id"] == 5:
+ test_ids.append(test["id"])
return test_ids
def get_test_result(self, test_id):
- return self.client.send_get('get_results/{0}'.format(test_id))
+ return self.client.send_get("get_results/{0}".format(test_id))
def get_test_results_for_run(self, run_id):
- return self.client.send_get('get_results_for_run/{0}'.format(run_id))
+ return self.client.send_get("get_results_for_run/{0}".format(run_id))
def get_results_for_case(self, run_id, case_id):
- return self.client.send_get('get_results_for_case/{0}/{1}'.
- format(run_id, case_id))
+ return self.client.send_get(
+ "get_results_for_case/{0}/{1}".format(run_id, case_id)
+ )
def get_test(self, test_id):
- return self.client.send_get('get_test/{0}'.format(test_id))
+ return self.client.send_get("get_test/{0}".format(test_id))
def get_runs(self, run_id):
- return self.client.send_get('get_runs/{0}'.format(run_id))
+ return self.client.send_get("get_runs/{0}".format(run_id))
def get_run(self, run_id):
- return self.client.send_get('get_run/{0}'.format(run_id))
+ return self.client.send_get("get_run/{0}".format(run_id))
def get_milestones(self):
- milestones_uri = 'get_milestones/{project_id}'.format(
- project_id=self.project['id'])
- return self.client.send_get(uri=milestones_uri)['milestones']
+ milestones_uri = "get_milestones/{project_id}".format(
+ project_id=self.project["id"]
+ )
+ return self.client.send_get(uri=milestones_uri)["milestones"]
def get_milestone(self, milestone_id):
- milestone_uri = 'get_milestone/{milestone_id}'.format(
- milestone_id=milestone_id)
+ milestone_uri = "get_milestone/{milestone_id}".format(
+ milestone_id=milestone_id
+ )
return self.client.send_get(uri=milestone_uri)
def get_milestone_by_name(self, name):
for milestone in self.get_milestones():
- if milestone['name'] == name:
- return self.get_milestone(milestone_id=milestone['id'])
+ if milestone["name"] == name:
+ return self.get_milestone(milestone_id=milestone["id"])
def add_plan(self, name, description, milestone_id, entries):
- add_plan_uri = 'add_plan/{project_id}'.format(
- project_id=self.project['id'])
+ add_plan_uri = "add_plan/{project_id}".format(
+ project_id=self.project["id"]
+ )
new_plan = {
- 'name': name,
- 'description': description,
- 'milestone_id': milestone_id,
- 'entries': entries # entries=[]
+ "name": name,
+ "description": description,
+ "milestone_id": milestone_id,
+ "entries": entries, # entries=[]
}
return self.client.send_post(add_plan_uri, new_plan)
def add_plan_entry(self, project_id, new_run):
- add_plan_uri = 'add_plan_entry/{project_id}'.format(
- project_id=project_id)
+ add_plan_uri = "add_plan_entry/{project_id}".format(
+ project_id=project_id
+ )
return self.client.send_post(add_plan_uri, new_run)
def get_suites(self):
- suites_uri = 'get_suites/{project_id}'.format(
- project_id=self.project['id'])
+ suites_uri = "get_suites/{project_id}".format(
+ project_id=self.project["id"]
+ )
return self.client.send_get(uri=suites_uri)
def get_suite(self, suite_id):
- suite_uri = 'get_suite/{suite_id}'.format(suite_id=suite_id)
+ suite_uri = "get_suite/{suite_id}".format(suite_id=suite_id)
return self.client.send_get(uri=suite_uri)
def get_suite_by_name(self, name):
for suite in self.get_suites():
- if suite['name'] == name:
- return self.get_suite(suite_id=suite['id'])
+ if suite["name"] == name:
+ return self.get_suite(suite_id=suite["id"])
def get_plan_by_name(self, name):
for plan in self.get_plans(13):
- if plan['name'] == name:
- return self.get_plan(plan['id'])
+ if plan["name"] == name:
+ return self.get_plan(plan["id"])
def add_result(self, test_id, result_to_add):
- return self.client.send_post('add_result/{0}'.format(test_id['id']),
- result_to_add)
+ return self.client.send_post(
+ "add_result/{0}".format(test_id["id"]), result_to_add
+ )
def add_suite(self, name, description=None):
- return self.client.send_post('add_suite/' + str(self.project['id']),
- dict(name=name, description=description))
+ return self.client.send_post(
+ "add_suite/" + str(self.project["id"]),
+ dict(name=name, description=description),
+ )
def get_sections(self, suite_id):
- sections_uri = 'get_sections/{project_id}&suite_id={suite_id}'.format(
- project_id=self.project['id'],
- suite_id=suite_id
+ sections_uri = "get_sections/{project_id}&suite_id={suite_id}".format(
+ project_id=self.project["id"], suite_id=suite_id
)
return self.client.send_get(sections_uri)
def get_section(self, section_id):
- section_uri = 'get_section/{section_id}'.format(section_id=section_id)
+ section_uri = "get_section/{section_id}".format(section_id=section_id)
return self.client.send_get(section_uri)
def get_section_by_name(self, suite_id, section_name):
for section in self.get_sections(suite_id=suite_id):
- if section['name'] == section_name:
- return self.get_section(section_id=section['id'])
+ if section["name"] == section_name:
+ return self.get_section(section_id=section["id"])
def add_section(self, suite_id, name, parent_id=None):
- return self.client.send_post('add_section/' + str(self.project['id']),
- dict(suite_id=suite_id, name=name,
- parent_id=parent_id))
+ return self.client.send_post(
+ "add_section/" + str(self.project["id"]),
+ dict(suite_id=suite_id, name=name, parent_id=parent_id),
+ )
def delete_section(self, section_id):
# Not working bug in testrail
section = self.get_section(section_id)
- print('SECTION', section)
+ print("SECTION", section)
try:
- deleted = self.client.send_post('delete_section/{}'.format(section_id), section)
- print('DELETED', deleted)
+ deleted = self.client.send_post(
+ "delete_section/{}".format(section_id), section
+ )
+ print("DELETED", deleted)
except Exception:
pass
return
def add_case(self, section_id, case):
- add_case_uri = 'add_case/{section_id}'.format(section_id=section_id)
+ add_case_uri = "add_case/{section_id}".format(section_id=section_id)
return self.client.send_post(add_case_uri, case)
@staticmethod
@@ -183,39 +194,43 @@
results = {"results": []}
for test in tests:
- results["results"].append({
- "test_id": test['id'],
- "status_id": status_id,
- "comment": 'Deploy failed',
- })
+ results["results"].append(
+ {
+ "test_id": test["id"],
+ "status_id": status_id,
+ "comment": "Deploy failed",
+ }
+ )
return results
@staticmethod
def get_result_by_name():
result = config.RESULT
- if result == 'Blocked':
+ if result == "Blocked":
return 2
- elif result == 'Passed':
+ elif result == "Passed":
return 1
- elif result == 'Failed':
+ elif result == "Failed":
return 5
- elif result == 'ProdFailed':
+ elif result == "ProdFailed":
return 8
- elif result == 'Skipped':
+ elif result == "Skipped":
return 6
@staticmethod
def get_id_of_tempest_runs(tempest_runs):
tempest_runs_ids = {} # []
for i in tempest_runs:
- for item in i['runs']:
- tempest_runs_ids.update({item['id']: item['name']})
+ for item in i["runs"]:
+ tempest_runs_ids.update({item["id"]: item["name"]})
return tempest_runs_ids
@staticmethod
def get_last_tempest_run(get_plans):
for plans in get_plans:
# print dict
- if (plans.get(u'passed_count') > 1000 or plans.get(
- u'blocked_count') > 1000)and '9.1' in plans.get(u'name'):
- return plans.get(u'id')
+ if (
+ plans.get("passed_count") > 1000
+ or plans.get("blocked_count") > 1000
+ ) and "9.1" in plans.get("name"):
+ return plans.get("id")
diff --git a/testrail_upload_suites/config.py b/testrail_upload_suites/config.py
index be5fdb4..9a4aa10 100644
--- a/testrail_upload_suites/config.py
+++ b/testrail_upload_suites/config.py
@@ -1,14 +1,14 @@
import os
-URL = os.environ.get('TESTRAIL_URL')
-USER = os.environ.get('TESTRAIL_USER')
-PROJECT = os.environ.get('TESTRAIL_PROJECT')
-PASSWORD = os.environ.get('TESTRAIL_PASSWORD')
+URL = os.environ.get("TESTRAIL_URL")
+USER = os.environ.get("TESTRAIL_USER")
+PROJECT = os.environ.get("TESTRAIL_PROJECT")
+PASSWORD = os.environ.get("TESTRAIL_PASSWORD")
-MILESTONE = os.environ.get('TESTRAIL_MILESTONE')
-SUITE = os.environ.get('TESTRAIL_SUITE')
-PLAN_NAME = os.environ.get('TESTRAIL_PLAN_NAME')
-RESULT = os.environ.get('TESTRAIL_RESULT')
+MILESTONE = os.environ.get("TESTRAIL_MILESTONE")
+SUITE = os.environ.get("TESTRAIL_SUITE")
+PLAN_NAME = os.environ.get("TESTRAIL_PLAN_NAME")
+RESULT = os.environ.get("TESTRAIL_RESULT")
# Use test IDs for titles of TestRail test cases like
@@ -23,22 +23,22 @@
UPLOAD_THREADS_COUNT = 4
SECTIONS_MAP = {
- "Telemetry": ["telemetry_tempest_plugin."],
- "Glance": ["image."],
- "Keystone": ["identity."],
- "Neutron": ["network."],
- "Nova": ["compute."],
- "Swift": ["object_storage."],
- "Scenario": ["tempest.scenario."],
- "Manila": ["manila_tempest_tests."],
- "Ironic": ["ironic_tempest_plugin."],
- "Heat": ["heat_tempest_plugin."],
- "Designate": ["designate_tempest_plugin."],
- "Barbican": ["barbican_tempest_plugin."],
- "Horizon": ["tempest_horizon."]
+ "Telemetry": ["telemetry_tempest_plugin."],
+ "Glance": ["image."],
+ "Keystone": ["identity."],
+ "Neutron": ["network."],
+ "Nova": ["compute."],
+ "Swift": ["object_storage."],
+ "Scenario": ["tempest.scenario."],
+ "Manila": ["manila_tempest_tests."],
+ "Ironic": ["ironic_tempest_plugin."],
+ "Heat": ["heat_tempest_plugin."],
+ "Designate": ["designate_tempest_plugin."],
+ "Barbican": ["barbican_tempest_plugin."],
+ "Horizon": ["tempest_horizon."],
}
# Logging
-LOGGER = 'upload_suite'
-LOG_FOLDER = '/tmp/'
-LOG_FILENAME = 'upload_suite.log'
+LOGGER = "upload_suite"
+LOG_FOLDER = "/tmp/"
+LOG_FILENAME = "upload_suite.log"
diff --git a/testrail_upload_suites/testrail.py b/testrail_upload_suites/testrail.py
index a2c6523..e56c633 100644
--- a/testrail_upload_suites/testrail.py
+++ b/testrail_upload_suites/testrail.py
@@ -11,18 +11,19 @@
# Copyright Gurock Software GmbH. See license.md for details.
#
-import requests
-import json
import base64
+import json
+
+import requests
class APIClient:
def __init__(self, base_url):
- self.user = ''
- self.password = ''
- if not base_url.endswith('/'):
- base_url += '/'
- self.__url = base_url + 'index.php?/api/v2/'
+ self.user = ""
+ self.password = ""
+ if not base_url.endswith("/"):
+ base_url += "/"
+ self.__url = base_url + "index.php?/api/v2/"
#
# Send Get
@@ -39,7 +40,7 @@
# Used only for 'get_attachment/:attachment_id'
#
def send_get(self, uri, filepath=None):
- return self.__send_request('GET', uri, filepath)
+ return self.__send_request("GET", uri, filepath)
#
# Send POST
@@ -57,48 +58,51 @@
# to the file
#
def send_post(self, uri, data):
- return self.__send_request('POST', uri, data)
+ return self.__send_request("POST", uri, data)
def __send_request(self, method, uri, data):
url = self.__url + uri
auth = str(
base64.b64encode(
- bytes('%s:%s' % (self.user, self.password), 'utf-8')
+ bytes("%s:%s" % (self.user, self.password), "utf-8")
),
- 'ascii'
+ "ascii",
).strip()
- headers = {'Authorization': 'Basic ' + auth}
+ headers = {"Authorization": "Basic " + auth}
- if method == 'POST':
- if uri[:14] == 'add_attachment': # add_attachment API method
- files = {'attachment': (open(data, 'rb'))}
+ if method == "POST":
+ if uri[:14] == "add_attachment": # add_attachment API method
+ files = {"attachment": (open(data, "rb"))}
response = requests.post(url, headers=headers, files=files)
- files['attachment'].close()
+ files["attachment"].close()
else:
- headers['Content-Type'] = 'application/json'
- payload = bytes(json.dumps(data), 'utf-8')
+ headers["Content-Type"] = "application/json"
+ payload = bytes(json.dumps(data), "utf-8")
response = requests.post(url, headers=headers, data=payload)
else:
- headers['Content-Type'] = 'application/json'
+ headers["Content-Type"] = "application/json"
response = requests.get(url, headers=headers)
if response.status_code > 201:
try:
error = response.json()
- except: # response.content not formatted as JSON
+ except Exception: # response.content not formatted as JSON
error = str(response.content)
- raise APIError('TestRail API returned HTTP %s (%s)' % (response.status_code, error))
+ raise APIError(
+ "TestRail API returned HTTP %s (%s)"
+ % (response.status_code, error)
+ )
else:
- if uri[:15] == 'get_attachment/': # Expecting file, not JSON
+ if uri[:15] == "get_attachment/": # Expecting file, not JSON
try:
- open(data, 'wb').write(response.content)
- return (data)
- except:
- return ("Error saving attachment.")
+ open(data, "wb").write(response.content)
+ return data
+ except Exception:
+ return "Error saving attachment."
else:
return response.json()
class APIError(Exception):
- pass
\ No newline at end of file
+ pass
diff --git a/testrail_upload_suites/upload_suite.py b/testrail_upload_suites/upload_suite.py
index 3cabf05..ade9cd5 100644
--- a/testrail_upload_suites/upload_suite.py
+++ b/testrail_upload_suites/upload_suite.py
@@ -14,22 +14,25 @@
# License for the specific language governing permissions and limitations
# under the License.
-import os
import logging
+import os
import sys
+import config
from base import Base
from testrail import APIError
-import config
-
-
logging.basicConfig(
- format='[%(asctime)s][%(name)s][%(levelname)s] %(message)s',
- datefmt='%d-%m-%Y %H:%M:%S',
- handlers=[logging.FileHandler('{}{}'.format(
- config.LOG_FOLDER, config.LOG_FILENAME)), logging.StreamHandler()],
- level=logging.INFO)
+ format="[%(asctime)s][%(name)s][%(levelname)s] %(message)s",
+ datefmt="%d-%m-%Y %H:%M:%S",
+ handlers=[
+ logging.FileHandler(
+ "{}{}".format(config.LOG_FOLDER, config.LOG_FILENAME)
+ ),
+ logging.StreamHandler(),
+ ],
+ level=logging.INFO,
+)
logger = logging.getLogger(config.LOGGER)
@@ -49,8 +52,9 @@
return tags
-def create_tr_test_cases(test_cases, milestone_id, type_id=1, priority_id=4,
- qa_team=4):
+def create_tr_test_cases(
+ test_cases, milestone_id, type_id=1, priority_id=4, qa_team=4
+):
tr_test_cases = []
for test_case_name in test_cases:
@@ -68,8 +72,11 @@
test_case = {
"milestone_id": milestone_id,
"section": section,
- "title": (("%s.%s" % (test_class, test_name)) if config.USE_TEST_IDs
- else test_name),
+ "title": (
+ ("%s.%s" % (test_class, test_name))
+ if config.USE_TEST_IDs
+ else test_name
+ ),
"type_id": type_id,
"priority_id": priority_id,
"custom_qa_team": qa_team,
@@ -78,7 +85,7 @@
"custom_test_group": test_class,
"custom_test_case_description": test_name,
"custom_test_case_steps": [{"Run test": "passed"}],
- "custom_report_label": report_label
+ "custom_report_label": report_label,
}
tr_test_cases.append(test_case)
@@ -109,8 +116,9 @@
logger.info("Reading tests file '%s'..." % tests_file_path)
with open(tests_file_path) as f:
test_cases = [test for test in f.read().split("\n") if test]
- logger.info("Tests file '%s' has been successfully read."
- % tests_file_path)
+ logger.info(
+ "Tests file '%s' has been successfully read." % tests_file_path
+ )
else:
raise Exception("Tests file '%s' doesn't exist!" % tests_file_path)
@@ -125,8 +133,10 @@
suite = call.get_suite_by_name(config.SUITE)
if not suite:
- logger.info("Tests suite '%s' not found. "
- "Creating tests suite..." % config.SUITE)
+ logger.info(
+ "Tests suite '%s' not found. "
+ "Creating tests suite..." % config.SUITE
+ )
suite = call.add_suite(config.SUITE)
logger.info("Tests suite has benn successfully created.")
@@ -134,10 +144,12 @@
logger.info("Creating test cases for TestRail...")
tr_test_cases = create_tr_test_cases(
- test_cases, milestone["id"],
+ test_cases,
+ milestone["id"],
type_id=config.TEST_CASE_TYPE_ID,
priority_id=config.TEST_CASE_PRIORITY_ID,
- qa_team=config.QA_TEAM)
+ qa_team=config.QA_TEAM,
+ )
logger.info("Test cases have been successfully created.")
sections_map = {}
diff --git a/tox.ini b/tox.ini
index 7ed5f88..969d416 100644
--- a/tox.ini
+++ b/tox.ini
@@ -7,5 +7,7 @@
envdir={toxworkdir}/pep8
deps =
black
+ flake8
commands =
- black daily_jenkins_job_report log_helper parcing_testrail_results save_jenkins_console tempest_tests_resources testrail_bot testrail_upload_suites update_testrail_xml upload2testrail_docker -l 79 --diff
+ flake8 --ignore=E203,W605,W503 --max-line-length 79 daily_jenkins_job_report log_helper parcing_testrail_results save_jenkins_console tempest_tests_resources testrail_bot testrail_upload_suites update_testrail_xml upload2testrail_docker
+ black daily_jenkins_job_report log_helper parcing_testrail_results save_jenkins_console tempest_tests_resources testrail_bot testrail_upload_suites update_testrail_xml upload2testrail_docker --line-length 79 --diff --check
diff --git a/update_testrail_xml/client.py b/update_testrail_xml/client.py
index 94bc5f6..a1b45a9 100644
--- a/update_testrail_xml/client.py
+++ b/update_testrail_xml/client.py
@@ -1,4 +1,5 @@
from __future__ import absolute_import
+
import logging
import time
@@ -6,7 +7,7 @@
logger = logging.getLogger(__name__)
-requests_logger = logging.getLogger('requests.packages.urllib3')
+requests_logger = logging.getLogger("requests.packages.urllib3")
requests_logger.setLevel(logging.WARNING)
@@ -17,8 +18,10 @@
def find_all(self, **kwargs):
filtered = ItemSet(
- x for x in self
- if all(getattr(x, k) == v for k, v in kwargs.items()))
+ x
+ for x in self
+ if all(getattr(x, k) == v for k, v in kwargs.items())
+ )
filtered._item_class = self._item_class
return filtered
@@ -32,8 +35,8 @@
class Collection(object):
- _list_url = 'get_{name}s'
- _add_url = 'add_{name}'
+ _list_url = "get_{name}s"
+ _add_url = "add_{name}"
def __init__(self, item_class=None, parent_id=None, **kwargs):
self._item_class = item_class
@@ -46,12 +49,12 @@
name = self._item_class._api_name()
if id is None:
items = self._list(name)
- if 'error' in items:
+ if "error" in items:
raise Exception(items)
- if name == 'project':
- items = items['projects']
- if name == 'case':
- items = items['cases']
+ if name == "project":
+ items = items["projects"]
+ if name == "case":
+ items = items["cases"]
items = ItemSet(self._to_object(x) for x in items)
items._item_class = self._item_class
return items
@@ -60,7 +63,7 @@
return self._item_class.get(id)
def __repr__(self):
- return '<Collection of {}>'.format(self._item_class.__name__)
+ return "<Collection of {}>".format(self._item_class.__name__)
def _to_object(self, data):
return self._item_class(**data)
@@ -69,8 +72,8 @@
params = params or {}
url = self._list_url.format(name=name)
if self.parent_id is not None:
- url += '/{}'.format(self.parent_id)
- return self._handler('GET', url, params=params)
+ url += "/{}".format(self.parent_id)
+ return self._handler("GET", url, params=params)
def find_all(self, **kwargs):
return self().find_all(**kwargs)
@@ -92,10 +95,10 @@
class Item(object):
- _get_url = 'get_{name}/{id}'
- _update_url = 'update_{name}/{id}'
+ _get_url = "get_{name}/{id}"
+ _update_url = "update_{name}/{id}"
_handler = None
- _repr_field = 'name'
+ _repr_field = "name"
def __init__(self, id=None, **kwargs):
self.id = id
@@ -112,29 +115,30 @@
raise AttributeError
def __setattr__(self, name, value):
- if '_data' in self.__dict__ and name not in self.__dict__:
- self.__dict__['_data'][name] = value
+ if "_data" in self.__dict__ and name not in self.__dict__:
+ self.__dict__["_data"][name] = value
else:
self.__dict__[name] = value
def __repr__(self):
- name = getattr(self, self._repr_field, '')
+ name = getattr(self, self._repr_field, "")
name = repr(name)
- return '<{c.__name__}({s.id}) {name} at 0x{id:x}>'.format(
- s=self, c=self.__class__, id=id(self), name=name)
+ return "<{c.__name__}({s.id}) {name} at 0x{id:x}>".format(
+ s=self, c=self.__class__, id=id(self), name=name
+ )
@classmethod
def get(cls, id):
name = cls._api_name()
url = cls._get_url.format(name=name, id=id)
- result = cls._handler('GET', url)
- if 'error' in result:
+ result = cls._handler("GET", url)
+ if "error" in result:
raise Exception(result)
return cls(**result)
def update(self):
url = self._update_url.format(name=self._api_name(), id=self.id)
- self._handler('POST', url, json=self.data)
+ self._handler("POST", url, json=self.data)
@property
def data(self):
@@ -152,8 +156,10 @@
def cases(self):
return CaseCollection(
Case,
- _list_url='get_cases/{}&suite_id={}'.format(self.project_id,
- self.id))
+ _list_url="get_cases/{}&suite_id={}".format(
+ self.project_id, self.id
+ ),
+ )
class CaseCollection(Collection):
@@ -165,18 +171,20 @@
class Plan(Item):
- def __init__(self,
- name,
- description=None,
- milestone_id=None,
- entries=None,
- id=None,
- **kwargs):
+ def __init__(
+ self,
+ name,
+ description=None,
+ milestone_id=None,
+ entries=None,
+ id=None,
+ **kwargs
+ ):
add_kwargs = {
- 'name': name,
- 'description': description,
- 'milestone_id': milestone_id,
- 'entries': entries or [],
+ "name": name,
+ "description": description,
+ "milestone_id": milestone_id,
+ "entries": entries or [],
}
kwargs.update(add_kwargs)
return super(self.__class__, self).__init__(id, **kwargs)
@@ -186,14 +194,14 @@
def __init__(self, base_url, username, password):
self.username = username
self.password = password
- self.base_url = base_url.rstrip('/') + '/index.php?/api/v2/'
+ self.base_url = base_url.rstrip("/") + "/index.php?/api/v2/"
Item._handler = self._query
def _query(self, method, url, **kwargs):
url = self.base_url + url
- headers = {'Content-type': 'application/json'}
- logger.debug('Make {} request to {}'.format(method, url))
+ headers = {"Content-type": "application/json"}
+ logger.debug("Make {} request to {}".format(method, url))
for _ in range(5):
response = requests.request(
method,
@@ -201,7 +209,8 @@
allow_redirects=False,
auth=(self.username, self.password),
headers=headers,
- **kwargs)
+ **kwargs
+ )
# To many requests
if response.status_code == 429:
time.sleep(60)
@@ -210,13 +219,15 @@
break
# Redirect or error
if response.status_code >= 300:
- raise requests.HTTPError("Wrong response:\n"
- "status_code: {0.status_code}\n"
- "headers: {0.headers}\n"
- "content: '{0.content}'".format(response),
- response=response)
+ raise requests.HTTPError(
+ "Wrong response:\n"
+ "status_code: {0.status_code}\n"
+ "headers: {0.headers}\n"
+ "content: '{0.content}'".format(response),
+ response=response,
+ )
result = response.json()
- if 'error' in result:
+ if "error" in result:
logger.warning(result)
return result
@@ -231,8 +242,9 @@
self.conditions = conditions
def __str__(self):
- conditions = ', '.join(['{}="{}"'.format(x, y)
- for (x, y) in self.conditions.items()])
- return u'{type} with {conditions}'.format(
- type=self.item_class._api_name().title(),
- conditions=conditions)
\ No newline at end of file
+ conditions = ", ".join(
+ ['{}="{}"'.format(x, y) for (x, y) in self.conditions.items()]
+ )
+ return "{type} with {conditions}".format(
+ type=self.item_class._api_name().title(), conditions=conditions
+ )
diff --git a/update_testrail_xml/cmd.py b/update_testrail_xml/cmd.py
index bec9097..ea7a5f3 100644
--- a/update_testrail_xml/cmd.py
+++ b/update_testrail_xml/cmd.py
@@ -9,13 +9,13 @@
from reporter import Reporter
-warnings.simplefilter('always', DeprecationWarning)
+warnings.simplefilter("always", DeprecationWarning)
logger = logging.getLogger(__name__)
if sys.version_info[0] == 3:
str_cls = str
else:
- str_cls = eval('unicode')
+ str_cls = eval("unicode")
def filename(string):
@@ -30,140 +30,164 @@
def parse_args(args):
defaults = {
- 'TESTRAIL_URL': 'https://mirantis.testrail.com',
- 'TESTRAIL_USER': 'user@example.com',
- 'TESTRAIL_PASSWORD': 'password',
- 'TESTRAIL_PROJECT': 'Mirantis OpenStack',
- 'TESTRAIL_MILESTONE': '9.0',
- 'TESTRAIL_TEST_SUITE': '[{0.testrail_milestone}] MOSQA',
- 'XUNIT_REPORT': 'report.xml',
- 'OUTPUT_XUNIT_REPORT': 'output_report.xml',
- 'XUNIT_NAME_TEMPLATE': '{id}',
- 'TESTRAIL_NAME_TEMPLATE': '{custom_report_label}',
- 'TRACE_LEN': 100,
- 'ISO_ID': None,
- 'TESTRAIL_PLAN_NAME': None,
- 'ENV_DESCRIPTION': '',
- 'TEST_RESULTS_LINK': '',
- 'PASTE_BASE_URL': None
+ "TESTRAIL_URL": "https://mirantis.testrail.com",
+ "TESTRAIL_USER": "user@example.com",
+ "TESTRAIL_PASSWORD": "password",
+ "TESTRAIL_PROJECT": "Mirantis OpenStack",
+ "TESTRAIL_MILESTONE": "9.0",
+ "TESTRAIL_TEST_SUITE": "[{0.testrail_milestone}] MOSQA",
+ "XUNIT_REPORT": "report.xml",
+ "OUTPUT_XUNIT_REPORT": "output_report.xml",
+ "XUNIT_NAME_TEMPLATE": "{id}",
+ "TESTRAIL_NAME_TEMPLATE": "{custom_report_label}",
+ "TRACE_LEN": 100,
+ "ISO_ID": None,
+ "TESTRAIL_PLAN_NAME": None,
+ "ENV_DESCRIPTION": "",
+ "TEST_RESULTS_LINK": "",
+ "PASTE_BASE_URL": None,
}
defaults = {k: os.environ.get(k, v) for k, v in defaults.items()}
- parser = argparse.ArgumentParser(description='xUnit to testrail reporter')
+ parser = argparse.ArgumentParser(description="xUnit to testrail reporter")
parser.add_argument(
- 'xunit_report',
+ "xunit_report",
type=filename,
- default=defaults['XUNIT_REPORT'],
- help='xUnit report XML file')
+ default=defaults["XUNIT_REPORT"],
+ help="xUnit report XML file",
+ )
parser.add_argument(
- '--output-xunit-report',
+ "--output-xunit-report",
type=str_cls,
- default=defaults['OUTPUT_XUNIT_REPORT'],
- help='Output xUnit report XML file after update')
+ default=defaults["OUTPUT_XUNIT_REPORT"],
+ help="Output xUnit report XML file after update",
+ )
parser.add_argument(
- '--xunit-name-template',
+ "--xunit-name-template",
type=str_cls,
- default=defaults['XUNIT_NAME_TEMPLATE'],
- help='template for xUnit cases to make id string')
+ default=defaults["XUNIT_NAME_TEMPLATE"],
+ help="template for xUnit cases to make id string",
+ )
parser.add_argument(
- '--testrail-name-template',
+ "--testrail-name-template",
type=str_cls,
- default=defaults['TESTRAIL_NAME_TEMPLATE'],
- help='template for TestRail cases to make id string')
+ default=defaults["TESTRAIL_NAME_TEMPLATE"],
+ help="template for TestRail cases to make id string",
+ )
parser.add_argument(
- '--env-description',
+ "--env-description",
type=str_cls,
- default=defaults['ENV_DESCRIPTION'],
- help='env deploy type description (for TestRun name)')
+ default=defaults["ENV_DESCRIPTION"],
+ help="env deploy type description (for TestRun name)",
+ )
group = parser.add_mutually_exclusive_group()
group.add_argument(
- '--iso-id',
+ "--iso-id",
type=str_cls,
- default=defaults['ISO_ID'],
- help='id of build Fuel iso (DEPRECATED)')
+ default=defaults["ISO_ID"],
+ help="id of build Fuel iso (DEPRECATED)",
+ )
group.add_argument(
- '--testrail-plan-name',
+ "--testrail-plan-name",
type=str_cls,
- default=defaults['TESTRAIL_PLAN_NAME'],
- help='name of test plan to be displayed in testrail')
+ default=defaults["TESTRAIL_PLAN_NAME"],
+ help="name of test plan to be displayed in testrail",
+ )
parser.add_argument(
- '--test-results-link',
+ "--test-results-link",
type=str_cls,
- default=defaults['TEST_RESULTS_LINK'],
- help='link to test job results')
+ default=defaults["TEST_RESULTS_LINK"],
+ help="link to test job results",
+ )
parser.add_argument(
- '--testrail-url',
+ "--testrail-url",
type=str_cls,
- default=defaults['TESTRAIL_URL'],
- help='base url of testrail')
+ default=defaults["TESTRAIL_URL"],
+ help="base url of testrail",
+ )
parser.add_argument(
- '--testrail-user',
+ "--testrail-user",
type=str_cls,
- default=defaults['TESTRAIL_USER'],
- help='testrail user')
+ default=defaults["TESTRAIL_USER"],
+ help="testrail user",
+ )
parser.add_argument(
- '--testrail-password',
+ "--testrail-password",
type=str_cls,
- default=defaults['TESTRAIL_PASSWORD'],
- help='testrail password')
+ default=defaults["TESTRAIL_PASSWORD"],
+ help="testrail password",
+ )
parser.add_argument(
- '--testrail-project',
+ "--testrail-project",
type=str_cls,
- default=defaults['TESTRAIL_PROJECT'],
- help='testrail project name')
+ default=defaults["TESTRAIL_PROJECT"],
+ help="testrail project name",
+ )
parser.add_argument(
- '--testrail-milestone',
+ "--testrail-milestone",
type=str_cls,
- default=defaults['TESTRAIL_MILESTONE'],
- help='testrail project milestone')
+ default=defaults["TESTRAIL_MILESTONE"],
+ help="testrail project milestone",
+ )
parser.add_argument(
- '--testrail-suite',
+ "--testrail-suite",
type=str_cls,
- default=defaults['TESTRAIL_TEST_SUITE'],
- help='testrail project suite name')
+ default=defaults["TESTRAIL_TEST_SUITE"],
+ help="testrail project suite name",
+ )
parser.add_argument(
- '--trace-len',
+ "--trace-len",
type=int,
- default=defaults['TRACE_LEN'],
- help='testrail project name')
+ default=defaults["TRACE_LEN"],
+ help="testrail project name",
+ )
parser.add_argument(
- '--send-skipped',
- action='store_true',
+ "--send-skipped",
+ action="store_true",
default=False,
- help='send skipped cases to testrail')
+ help="send skipped cases to testrail",
+ )
parser.add_argument(
- '--send-duplicates',
- action='store_true',
+ "--send-duplicates",
+ action="store_true",
default=False,
- help='send duplicated cases to testrail')
+ help="send duplicated cases to testrail",
+ )
parser.add_argument(
- '--paste-url',
+ "--paste-url",
type=str_cls,
- default=defaults['PASTE_BASE_URL'],
- help=('pastebin service JSON API URL to send test case logs and trace,'
- ' example: http://localhost:5000/'))
+ default=defaults["PASTE_BASE_URL"],
+ help=(
+ "pastebin service JSON API URL to send test case logs and trace,"
+ " example: http://localhost:5000/"
+ ),
+ )
parser.add_argument(
- '--testrail-run-update',
- dest='use_test_run_if_exists',
- action='store_true',
+ "--testrail-run-update",
+ dest="use_test_run_if_exists",
+ action="store_true",
default=False,
- help='don\'t create new test run if such already exists')
+ help="don't create new test run if such already exists",
+ )
parser.add_argument(
- '--dry-run', '-n',
- action='store_true',
+ "--dry-run",
+ "-n",
+ action="store_true",
default=False,
- help='Just print mapping table')
+ help="Just print mapping table",
+ )
parser.add_argument(
- '--verbose',
- '-v',
- action='store_true',
+ "--verbose",
+ "-v",
+ action="store_true",
default=False,
- help='Verbose mode')
+ help="Verbose mode",
+ )
return parser.parse_args(args)
@@ -175,16 +199,19 @@
args = parse_args(args)
if not args.testrail_plan_name:
- args.testrail_plan_name = ('{0.testrail_milestone} iso '
- '#{0.iso_id}').format(args)
+ args.testrail_plan_name = (
+ "{0.testrail_milestone} iso " "#{0.iso_id}"
+ ).format(args)
- msg = ("--iso-id parameter is DEPRECATED. "
- "It is recommended to use --testrail-plan-name parameter.")
+ msg = (
+ "--iso-id parameter is DEPRECATED. "
+ "It is recommended to use --testrail-plan-name parameter."
+ )
warnings.warn(msg, DeprecationWarning)
logger_dict = dict(stream=sys.stderr)
if args.verbose:
- logger_dict['level'] = logging.DEBUG
+ logger_dict["level"] = logging.DEBUG
logging.basicConfig(**logger_dict)
trace_len = args.trace_len
@@ -194,7 +221,8 @@
output_xunit_report=args.output_xunit_report,
env_description=args.env_description,
test_results_link=args.test_results_link,
- paste_url=args.paste_url)
+ paste_url=args.paste_url,
+ )
suite = args.testrail_suite.format(args)
reporter.config_testrail(
base_url=args.testrail_url,
@@ -204,7 +232,8 @@
tests_suite=suite,
send_skipped=args.send_skipped,
send_duplicates=args.send_duplicates,
- use_test_run_if_exists=args.use_test_run_if_exists)
+ use_test_run_if_exists=args.use_test_run_if_exists,
+ )
empty_classnames = reporter.get_empty_classnames()
all_empty_cases = reporter.get_testcases(empty_classnames)
@@ -215,7 +244,7 @@
reporter.delete_temporary_file()
-if __name__ == '__main__':
+if __name__ == "__main__":
try:
main()
except Exception:
diff --git a/update_testrail_xml/reporter.py b/update_testrail_xml/reporter.py
index 4c04e09..438759c 100644
--- a/update_testrail_xml/reporter.py
+++ b/update_testrail_xml/reporter.py
@@ -1,15 +1,14 @@
from __future__ import absolute_import, print_function
-from functools import wraps
+import logging
import os
import re
-import six
-
import xml.etree.ElementTree as ET
+from functools import wraps
+import six
from client import Client as TrClient
-import logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
@@ -27,12 +26,16 @@
class Reporter(object):
- def __init__(self,
- xunit_report,
- output_xunit_report,
- env_description,
- test_results_link,
- paste_url, *args, **kwargs):
+ def __init__(
+ self,
+ xunit_report,
+ output_xunit_report,
+ env_description,
+ test_results_link,
+ paste_url,
+ *args,
+ **kwargs
+ ):
self._config = {}
self._cache = {}
self.xunit_report = xunit_report
@@ -43,17 +46,22 @@
super(Reporter, self).__init__(*args, **kwargs)
- def config_testrail(self,
- base_url,
- username,
- password,
- project,
- tests_suite,
- send_skipped=False,
- use_test_run_if_exists=False, send_duplicates=False):
- self._config['testrail'] = dict(base_url=base_url,
- username=username,
- password=password, )
+ def config_testrail(
+ self,
+ base_url,
+ username,
+ password,
+ project,
+ tests_suite,
+ send_skipped=False,
+ use_test_run_if_exists=False,
+ send_duplicates=False,
+ ):
+ self._config["testrail"] = dict(
+ base_url=base_url,
+ username=username,
+ password=password,
+ )
self.project_name = project
self.tests_suite_name = tests_suite
self.send_skipped = send_skipped
@@ -62,7 +70,7 @@
@property
def testrail_client(self):
- return TrClient(**self._config['testrail'])
+ return TrClient(**self._config["testrail"])
@property
@memoize
@@ -79,55 +87,62 @@
def cases(self):
return self.suite.cases()
-# ================================================================
+ # ================================================================
- temporary_filename = 'temporary_xunit_report.xml'
- logger.info(' Temporrary filename is: {}'.format(temporary_filename))
+ temporary_filename = "temporary_xunit_report.xml"
+ logger.info(" Temporrary filename is: {}".format(temporary_filename))
def describe_testrail_case(self, case):
return {
k: v
- for k, v in case.data.items() if isinstance(v, six.string_types)
+ for k, v in case.data.items()
+ if isinstance(v, six.string_types)
}
def get_cases(self):
"""Get all the testcases from the server"""
- logger.info(' Start gerring cases from the Testrail')
+ logger.info(" Start gerring cases from the Testrail")
cases_data = []
cases = self.suite.cases()
for case in cases:
case_data = self.describe_testrail_case(case)
cases_data.append(case_data)
- logger.info(' Cases were got from the Testrail')
+ logger.info(" Cases were got from the Testrail")
return cases_data
def get_empty_classnames(self):
tree = ET.parse(self.xunit_report)
root = tree.getroot()
- if root[0].tag == 'testsuite':
+ if root[0].tag == "testsuite":
root = root[0]
classnames = []
for child in root:
- if child.attrib['classname'] == '' and child[0].tag == 'failure':
- m = re.search(r'\(.*\)', child.attrib['name'])
+ if child.attrib["classname"] == "" and child[0].tag == "failure":
+ m = re.search(r"\(.*\)", child.attrib["name"])
classname = m.group()[1:-1]
- classnames.append({'classname': classname,
- 'data': child[0].text,
- 'fullname': child.attrib.get('name')})
+ classnames.append(
+ {
+ "classname": classname,
+ "data": child[0].text,
+ "fullname": child.attrib.get("name"),
+ }
+ )
- logger.info(' Got empty classnames from xml file')
+ logger.info(" Got empty classnames from xml file")
return classnames
def get_testcases(self, empty_classnames):
needed_cases = []
for empty_classname in empty_classnames:
- updated_case = {'classname': empty_classname['classname'],
- 'name': '.' + empty_classname['fullname'],
- 'data': empty_classname['data']}
+ updated_case = {
+ "classname": empty_classname["classname"],
+ "name": "." + empty_classname["fullname"],
+ "data": empty_classname["data"],
+ }
needed_cases.append(updated_case)
- logger.info(' Got test cases for updating xml file')
+ logger.info(" Got test cases for updating xml file")
return needed_cases
def update_testcases(self, cases):
@@ -136,73 +151,107 @@
for case in cases:
testcase = ET.Element("testcase")
- testcase.attrib['classname'] = "{}".format(case['classname'])
- testcase.attrib['name'] = "{}".format(case['name'])
- testcase.attrib['time'] = "0.000"
+ testcase.attrib["classname"] = "{}".format(case["classname"])
+ testcase.attrib["name"] = "{}".format(case["name"])
+ testcase.attrib["time"] = "0.000"
- skip = ET.SubElement(testcase, 'failure')
- skip.text = case['data']
+ skip = ET.SubElement(testcase, "failure")
+ skip.text = case["data"]
root.append(testcase)
for _ in cases:
for child in root:
try:
- if child.attrib['classname'] == "":
+ if child.attrib["classname"] == "":
root.remove(child)
except KeyError:
pass
- logger.info(' Create temporrary file: {}'.format(str(self.temporary_filename)))
+ logger.info(
+ " Create temporrary file: {}".format(str(self.temporary_filename))
+ )
tree = ET.ElementTree(root)
tree.write(self.temporary_filename)
- logger.info(' Temporrary file was created: {}'.format(self.check_file_exists(self.temporary_filename)))
+ logger.info(
+ " Temporrary file was created: {}".format(
+ self.check_file_exists(self.temporary_filename)
+ )
+ )
+
+ @staticmethod
+ def get_duplicates(all_cases):
+ return sorted(
+ list(set([x for x in all_cases if all_cases.count(x) > 1]))
+ )
def delete_duplicates(self):
- logger.info(' Start deleting duplicates from xml file: {}'.format(self.temporary_filename))
+ logger.info(
+ " Start deleting duplicates from xml file: {}".format(
+ self.temporary_filename
+ )
+ )
tree = ET.parse(self.temporary_filename)
root = tree.getroot()
- if root[0].tag == 'testsuite':
+ if root[0].tag == "testsuite":
root = root[0]
all_cases = []
for child in root:
try:
- all_cases.append((child.attrib['classname'], child.attrib['name']))
+ all_cases.append(
+ (child.attrib["classname"], child.attrib["name"])
+ )
except KeyError:
pass
# Get duplicates
- for_stack = lambda all_cases: sorted(list(set([x for x in all_cases if all_cases.count(x) > 1])))
- duplicate_cases = for_stack(all_cases)
+ duplicate_cases = self.get_duplicates(all_cases)
# Remove duplicates from xml
for case in duplicate_cases:
for child in root:
try:
- if child.attrib['classname'] == case[0] and child.attrib['name'] == case[1]:
- if (child.attrib['time'] == '0.000' or
- hasattr(next(iter(child), None), 'tag') and child[0].tag == 'error'):
+ if (
+ child.attrib["classname"] == case[0]
+ and child.attrib["name"] == case[1]
+ ):
+ if (
+ child.attrib["time"] == "0.000"
+ or hasattr(next(iter(child), None), "tag")
+ and child[0].tag == "error"
+ ):
child.clear()
except KeyError:
pass
- logger.info(' Start saving results to the file: {}'.format(self.output_xunit_report))
+ logger.info(
+ " Start saving results to the file: {}".format(
+ self.output_xunit_report
+ )
+ )
tree = ET.ElementTree(root)
tree.write(self.temporary_filename)
- logger.info(' {} file was created: {}'.format(self.output_xunit_report, self.check_file_exists(self.output_xunit_report)))
+ logger.info(
+ " {} file was created: {}".format(
+ self.output_xunit_report,
+ self.check_file_exists(self.output_xunit_report),
+ )
+ )
def cut_long_traces(self, trace_len):
tree = ET.parse(self.temporary_filename)
root = tree.getroot()
- if root[0].tag == 'testsuite':
+ if root[0].tag == "testsuite":
root = root[0]
for child in root:
try:
- if child[0].text.count('\n') > trace_len:
- ind = [m.start() for m in re.finditer('\n', child[0].text)][-trace_len]
+ if child[0].text.count("\n") > trace_len:
+ ind = [
+ m.start() for m in re.finditer("\n", child[0].text)
+ ][-trace_len]
old = child[0].text[ind:]
child[0].text = old
except IndexError:
@@ -215,4 +264,8 @@
def delete_temporary_file(self):
os.remove(self.temporary_filename)
- logger.info(' Temporrary file exists: {}'.format(self.check_file_exists(self.temporary_filename)))
+ logger.info(
+ " Temporrary file exists: {}".format(
+ self.check_file_exists(self.temporary_filename)
+ )
+ )