Add an option to generate tempest YAML file for specific test case only
Related-prod: PRODX-34274
Change-Id: I5c0a3c92779b1ce8eaf6350b54e0c2307aaceadd
diff --git a/tempest_tests_resources/README.md b/tempest_tests_resources/README.md
index 638a678..40e7574 100644
--- a/tempest_tests_resources/README.md
+++ b/tempest_tests_resources/README.md
@@ -2,11 +2,27 @@
======================
This tool creating machine readable YAML file with all resources used in Tempest tests
+The machine readable YAML file has the following format:
+
+ """
+ <test identifier>:
+ status: PASSED|FAILED
+ resources:
+ <openstack-service name (nova|neutron|glance|keystone)>:
+ <resource-name (port|server|security-group|router)>:
+ <request-id (req-xxxx) >:
+ name: <resource name (test-port-mytest|test-vm-)>
+ id/uuid: <resource id>
+ requst: <exact request>
+ http:
+ error: <if exists>
+ status_code: <>
+ """
How to use
----------
-Update your env variabled or add report and result file to artifacts dir
+Update your env variables or add report and result file to artifacts dir
```
export REPORT_NAME='' \
export TEMPORARY_FILE_NAME='' \
@@ -14,7 +30,11 @@
export TEMPEST_REPORT_XML=''
```
-Run report parser script:
+where REPORT_NAME is a tempest log file from your tempest run, TEMPORARY_FILE_NAME is any name of temporary file,
+RESULT_FILE_NAME is name of the machine readable YAML file and TEMPEST_REPORT_XML is tempest report XML from your
+tempest run.
+
+Run report parser script to create a Yaml report for all test cases:
``python3 report_parser.py``
@@ -22,3 +42,17 @@
``python3 report_parser.py class``
+or to create a Yaml report for one specific test case:
+
+``python3 report_parser.py <test case name>``
+
+ for example:
+
+ ``python3 report_parser.py test_show_hypervisor_with_non_admin_user``
+
+
+or to create a Yaml report for failed only test cases:
+
+``python3 report_parser.py failed``
+
+
diff --git a/tempest_tests_resources/report_parser.py b/tempest_tests_resources/report_parser.py
index 60d78c9..0e09f39 100644
--- a/tempest_tests_resources/report_parser.py
+++ b/tempest_tests_resources/report_parser.py
@@ -172,22 +172,22 @@
result = {}
for request in get_request_response(TEMPORARY_FILENAME):
-
# Get test name from request
- test_name = _get_test_name(request[0])
+
# Process requests from Setup/Tear Down Classes if the argument 'class' exist
- try:
- if sys.argv[1] == 'class':
- methods_skip_list = ['_run_cleanups', ]
- test_name = _get_test_name(request[0], methods_skip_list)
- else:
- test_name = _get_test_name(request[0])
- except Exception:
- pass
+ if len(sys.argv) == 2 and sys.argv[1] == 'class':
+ methods_skip_list = ['_run_cleanups', ]
+ test_name = _get_test_name(request[0], methods_skip_list)
+ else:
+ test_name = _get_test_name(request[0])
if not test_name:
continue
+ # Generate test recourses only for specific test case if the argument 'test case name' exist
+ if len(sys.argv) == 2 and sys.argv[1] != 'class' and sys.argv[1] != 'failed' and sys.argv[1] not in test_name:
+ continue
+
if not result.get(test_name):
result[test_name] = {"status": None,
"resources": {}}
@@ -300,6 +300,17 @@
except IndexError:
test_status = 'passed'
+ # Generate test recourses only for failed test cases if the argument 'failed' exist
+ if len(sys.argv) == 2 and sys.argv[1] == 'failed':
+ if test_status != 'failure':
+ # we try to remove not 'failed' cases from report with 'partial' name of short_test_name
+ rz = result.pop(short_test_name, None)
+ if not rz:
+ for item in list(result.keys()):
+ if item in short_test_name:
+ result.pop(item)
+ continue
+
if short_test_name and short_test_name in result:
# TODO(imenkov): how to avoid issue
# we can see in report 2 tests:
@@ -308,11 +319,24 @@
#
# but they logged as one:
# RecordsetValidationTest:test_cannot_create_MX_with
+ # rbubyr: it happens because in tempest.log these both tests are
+ # logged as RecordsetValidationTest:test_cannot_create_MX_with
+ # should be fixed in tempest tests
if not result[short_test_name].get('full_test_name'):
result[short_test_name]['full_test_name'] = []
result[short_test_name]['full_test_name'].append(full_test_name)
result[short_test_name]['status'] = test_status
+ # TODO(rbubyr): some test cases are absent in result dic, these tests won't have resources mapping
+ # because they are skipped in TEMPORARY_FILE_NAME
+ # for now we just add to final report only 'failure' test cases which are absent in result dic
+ # it might be possible to add resources from tempest xml
+ elif test_status == 'failure' and len(sys.argv) == 2 and sys.argv[1] == 'failed':
+ result[short_test_name] = {}
+ result[short_test_name]['full_test_name'] = []
+ result[short_test_name]['full_test_name'].append(full_test_name)
+ result[short_test_name]['status'] = test_status
+
def delete_temporary_file(path_to_temp_file):
os.remove(path_to_temp_file)