summaryrefslogtreecommitdiffstats
path: root/scripts/lib/resulttool
diff options
context:
space:
mode:
Diffstat (limited to 'scripts/lib/resulttool')
-rw-r--r--scripts/lib/resulttool/junit.py77
-rwxr-xr-xscripts/lib/resulttool/manualexecution.py2
-rw-r--r--scripts/lib/resulttool/regression.py3
-rw-r--r--scripts/lib/resulttool/report.py2
-rw-r--r--scripts/lib/resulttool/resultutils.py76
-rw-r--r--scripts/lib/resulttool/store.py27
6 files changed, 167 insertions, 20 deletions
diff --git a/scripts/lib/resulttool/junit.py b/scripts/lib/resulttool/junit.py
new file mode 100644
index 0000000000..c7a53dc550
--- /dev/null
+++ b/scripts/lib/resulttool/junit.py
@@ -0,0 +1,77 @@
1# resulttool - report test results in JUnit XML format
2#
3# Copyright (c) 2024, Siemens AG.
4#
5# SPDX-License-Identifier: GPL-2.0-only
6#
7
8import os
9import re
10import xml.etree.ElementTree as ET
11import resulttool.resultutils as resultutils
12
13def junit(args, logger):
14 testresults = resultutils.load_resultsdata(args.json_file, configmap=resultutils.store_map)
15
16 total_time = 0
17 skipped = 0
18 failures = 0
19 errors = 0
20
21 for tests in testresults.values():
22 results = tests[next(reversed(tests))].get("result", {})
23
24 for result_id, result in results.items():
25 # filter out ptestresult.rawlogs and ptestresult.sections
26 if re.search(r'\.test_', result_id):
27 total_time += result.get("duration", 0)
28
29 if result['status'] == "FAILED":
30 failures += 1
31 elif result['status'] == "ERROR":
32 errors += 1
33 elif result['status'] == "SKIPPED":
34 skipped += 1
35
36 testsuites_node = ET.Element("testsuites")
37 testsuites_node.set("time", "%s" % total_time)
38 testsuite_node = ET.SubElement(testsuites_node, "testsuite")
39 testsuite_node.set("name", "Testimage")
40 testsuite_node.set("time", "%s" % total_time)
41 testsuite_node.set("tests", "%s" % len(results))
42 testsuite_node.set("failures", "%s" % failures)
43 testsuite_node.set("errors", "%s" % errors)
44 testsuite_node.set("skipped", "%s" % skipped)
45
46 for result_id, result in results.items():
47 if re.search(r'\.test_', result_id):
48 testcase_node = ET.SubElement(testsuite_node, "testcase", {
49 "name": result_id,
50 "classname": "Testimage",
51 "time": str(result['duration'])
52 })
53 if result['status'] == "SKIPPED":
54 ET.SubElement(testcase_node, "skipped", message=result['log'])
55 elif result['status'] == "FAILED":
56 ET.SubElement(testcase_node, "failure", message=result['log'])
57 elif result['status'] == "ERROR":
58 ET.SubElement(testcase_node, "error", message=result['log'])
59
60 tree = ET.ElementTree(testsuites_node)
61
62 if args.junit_xml_path is None:
63 args.junit_xml_path = os.environ['BUILDDIR'] + '/tmp/log/oeqa/junit.xml'
64 tree.write(args.junit_xml_path, encoding='UTF-8', xml_declaration=True)
65
66 logger.info('Saved JUnit XML report as %s' % args.junit_xml_path)
67
68def register_commands(subparsers):
69 """Register subcommands from this plugin"""
70 parser_build = subparsers.add_parser('junit', help='create test report in JUnit XML format',
71 description='generate unit test report in JUnit XML format based on the latest test results in the testresults.json.',
72 group='analysis')
73 parser_build.set_defaults(func=junit)
74 parser_build.add_argument('json_file',
75 help='json file should point to the testresults.json')
76 parser_build.add_argument('-j', '--junit_xml_path',
77 help='junit xml path allows setting the path of the generated test report. The default location is <build_dir>/tmp/log/oeqa/junit.xml')
diff --git a/scripts/lib/resulttool/manualexecution.py b/scripts/lib/resulttool/manualexecution.py
index ecb27c5933..ae0861ac6b 100755
--- a/scripts/lib/resulttool/manualexecution.py
+++ b/scripts/lib/resulttool/manualexecution.py
@@ -22,7 +22,7 @@ def load_json_file(f):
22def write_json_file(f, json_data): 22def write_json_file(f, json_data):
23 os.makedirs(os.path.dirname(f), exist_ok=True) 23 os.makedirs(os.path.dirname(f), exist_ok=True)
24 with open(f, 'w') as filedata: 24 with open(f, 'w') as filedata:
25 filedata.write(json.dumps(json_data, sort_keys=True, indent=4)) 25 filedata.write(json.dumps(json_data, sort_keys=True, indent=1))
26 26
27class ManualTestRunner(object): 27class ManualTestRunner(object):
28 28
diff --git a/scripts/lib/resulttool/regression.py b/scripts/lib/resulttool/regression.py
index 10e7d13841..33b3119c54 100644
--- a/scripts/lib/resulttool/regression.py
+++ b/scripts/lib/resulttool/regression.py
@@ -212,6 +212,8 @@ def compare_result(logger, base_name, target_name, base_result, target_result, d
212 212
213 if base_result and target_result: 213 if base_result and target_result:
214 for k in base_result: 214 for k in base_result:
215 if k in ['ptestresult.rawlogs', 'ptestresult.sections']:
216 continue
215 base_testcase = base_result[k] 217 base_testcase = base_result[k]
216 base_status = base_testcase.get('status') 218 base_status = base_testcase.get('status')
217 if base_status: 219 if base_status:
@@ -422,6 +424,7 @@ def register_commands(subparsers):
422 help='(optional) filter the base results to this result ID') 424 help='(optional) filter the base results to this result ID')
423 parser_build.add_argument('-t', '--target-result-id', default='', 425 parser_build.add_argument('-t', '--target-result-id', default='',
424 help='(optional) filter the target results to this result ID') 426 help='(optional) filter the target results to this result ID')
427 parser_build.add_argument('-l', '--limit', default=REGRESSIONS_DISPLAY_LIMIT, help="Maximum number of changes to display per test. Can be set to 0 to print all changes")
425 428
426 parser_build = subparsers.add_parser('regression-git', help='regression git analysis', 429 parser_build = subparsers.add_parser('regression-git', help='regression git analysis',
427 description='regression analysis comparing base result set to target ' 430 description='regression analysis comparing base result set to target '
diff --git a/scripts/lib/resulttool/report.py b/scripts/lib/resulttool/report.py
index a349510ab8..1c100b00ab 100644
--- a/scripts/lib/resulttool/report.py
+++ b/scripts/lib/resulttool/report.py
@@ -256,7 +256,7 @@ class ResultsTextReport(object):
256 if selected_test_case_only: 256 if selected_test_case_only:
257 print_selected_testcase_result(raw_results, selected_test_case_only) 257 print_selected_testcase_result(raw_results, selected_test_case_only)
258 else: 258 else:
259 print(json.dumps(raw_results, sort_keys=True, indent=4)) 259 print(json.dumps(raw_results, sort_keys=True, indent=1))
260 else: 260 else:
261 print('Could not find raw test result for %s' % raw_test) 261 print('Could not find raw test result for %s' % raw_test)
262 return 0 262 return 0
diff --git a/scripts/lib/resulttool/resultutils.py b/scripts/lib/resulttool/resultutils.py
index c5521d81bd..b8fc79a6ac 100644
--- a/scripts/lib/resulttool/resultutils.py
+++ b/scripts/lib/resulttool/resultutils.py
@@ -14,8 +14,11 @@ import scriptpath
14import copy 14import copy
15import urllib.request 15import urllib.request
16import posixpath 16import posixpath
17import logging
17scriptpath.add_oe_lib_path() 18scriptpath.add_oe_lib_path()
18 19
20logger = logging.getLogger('resulttool')
21
19flatten_map = { 22flatten_map = {
20 "oeselftest": [], 23 "oeselftest": [],
21 "runtime": [], 24 "runtime": [],
@@ -31,13 +34,19 @@ regression_map = {
31 "manual": ['TEST_TYPE', 'TEST_MODULE', 'IMAGE_BASENAME', 'MACHINE'] 34 "manual": ['TEST_TYPE', 'TEST_MODULE', 'IMAGE_BASENAME', 'MACHINE']
32} 35}
33store_map = { 36store_map = {
34 "oeselftest": ['TEST_TYPE'], 37 "oeselftest": ['TEST_TYPE', 'TESTSERIES', 'MACHINE'],
35 "runtime": ['TEST_TYPE', 'DISTRO', 'MACHINE', 'IMAGE_BASENAME'], 38 "runtime": ['TEST_TYPE', 'DISTRO', 'MACHINE', 'IMAGE_BASENAME'],
36 "sdk": ['TEST_TYPE', 'MACHINE', 'SDKMACHINE', 'IMAGE_BASENAME'], 39 "sdk": ['TEST_TYPE', 'MACHINE', 'SDKMACHINE', 'IMAGE_BASENAME'],
37 "sdkext": ['TEST_TYPE', 'MACHINE', 'SDKMACHINE', 'IMAGE_BASENAME'], 40 "sdkext": ['TEST_TYPE', 'MACHINE', 'SDKMACHINE', 'IMAGE_BASENAME'],
38 "manual": ['TEST_TYPE', 'TEST_MODULE', 'MACHINE', 'IMAGE_BASENAME'] 41 "manual": ['TEST_TYPE', 'TEST_MODULE', 'MACHINE', 'IMAGE_BASENAME']
39} 42}
40 43
44rawlog_sections = {
45 "ptestresult.rawlogs": "ptest",
46 "ltpresult.rawlogs": "ltp",
47 "ltpposixresult.rawlogs": "ltpposix"
48}
49
41def is_url(p): 50def is_url(p):
42 """ 51 """
43 Helper for determining if the given path is a URL 52 Helper for determining if the given path is a URL
@@ -108,21 +117,57 @@ def filter_resultsdata(results, resultid):
108 newresults[r][i] = results[r][i] 117 newresults[r][i] = results[r][i]
109 return newresults 118 return newresults
110 119
111def strip_ptestresults(results): 120def strip_logs(results):
112 newresults = copy.deepcopy(results) 121 newresults = copy.deepcopy(results)
113 #for a in newresults2:
114 # newresults = newresults2[a]
115 for res in newresults: 122 for res in newresults:
116 if 'result' not in newresults[res]: 123 if 'result' not in newresults[res]:
117 continue 124 continue
118 if 'ptestresult.rawlogs' in newresults[res]['result']: 125 for logtype in rawlog_sections:
119 del newresults[res]['result']['ptestresult.rawlogs'] 126 if logtype in newresults[res]['result']:
127 del newresults[res]['result'][logtype]
120 if 'ptestresult.sections' in newresults[res]['result']: 128 if 'ptestresult.sections' in newresults[res]['result']:
121 for i in newresults[res]['result']['ptestresult.sections']: 129 for i in newresults[res]['result']['ptestresult.sections']:
122 if 'log' in newresults[res]['result']['ptestresult.sections'][i]: 130 if 'log' in newresults[res]['result']['ptestresult.sections'][i]:
123 del newresults[res]['result']['ptestresult.sections'][i]['log'] 131 del newresults[res]['result']['ptestresult.sections'][i]['log']
124 return newresults 132 return newresults
125 133
134# For timing numbers, crazy amounts of precision don't make sense and just confuse
135# the logs. For numbers over 1, trim to 3 decimal places, for numbers less than 1,
136# trim to 4 significant digits
137def trim_durations(results):
138 for res in results:
139 if 'result' not in results[res]:
140 continue
141 for entry in results[res]['result']:
142 if 'duration' in results[res]['result'][entry]:
143 duration = results[res]['result'][entry]['duration']
144 if duration > 1:
145 results[res]['result'][entry]['duration'] = float("%.3f" % duration)
146 elif duration < 1:
147 results[res]['result'][entry]['duration'] = float("%.4g" % duration)
148 return results
149
150def handle_cleanups(results):
151 # Remove pointless path duplication from old format reproducibility results
152 for res2 in results:
153 try:
154 section = results[res2]['result']['reproducible']['files']
155 for pkgtype in section:
156 for filelist in section[pkgtype].copy():
157 if section[pkgtype][filelist] and type(section[pkgtype][filelist][0]) == dict:
158 newlist = []
159 for entry in section[pkgtype][filelist]:
160 newlist.append(entry["reference"].split("/./")[1])
161 section[pkgtype][filelist] = newlist
162
163 except KeyError:
164 pass
165 # Remove pointless duplicate rawlogs data
166 try:
167 del results[res2]['result']['reproducible.rawlogs']
168 except KeyError:
169 pass
170
126def decode_log(logdata): 171def decode_log(logdata):
127 if isinstance(logdata, str): 172 if isinstance(logdata, str):
128 return logdata 173 return logdata
@@ -155,9 +200,6 @@ def generic_get_rawlogs(sectname, results):
155 return None 200 return None
156 return decode_log(results[sectname]['log']) 201 return decode_log(results[sectname]['log'])
157 202
158def ptestresult_get_rawlogs(results):
159 return generic_get_rawlogs('ptestresult.rawlogs', results)
160
161def save_resultsdata(results, destdir, fn="testresults.json", ptestjson=False, ptestlogs=False): 203def save_resultsdata(results, destdir, fn="testresults.json", ptestjson=False, ptestlogs=False):
162 for res in results: 204 for res in results:
163 if res: 205 if res:
@@ -167,16 +209,20 @@ def save_resultsdata(results, destdir, fn="testresults.json", ptestjson=False, p
167 os.makedirs(os.path.dirname(dst), exist_ok=True) 209 os.makedirs(os.path.dirname(dst), exist_ok=True)
168 resultsout = results[res] 210 resultsout = results[res]
169 if not ptestjson: 211 if not ptestjson:
170 resultsout = strip_ptestresults(results[res]) 212 resultsout = strip_logs(results[res])
213 trim_durations(resultsout)
214 handle_cleanups(resultsout)
171 with open(dst, 'w') as f: 215 with open(dst, 'w') as f:
172 f.write(json.dumps(resultsout, sort_keys=True, indent=4)) 216 f.write(json.dumps(resultsout, sort_keys=True, indent=1))
173 for res2 in results[res]: 217 for res2 in results[res]:
174 if ptestlogs and 'result' in results[res][res2]: 218 if ptestlogs and 'result' in results[res][res2]:
175 seriesresults = results[res][res2]['result'] 219 seriesresults = results[res][res2]['result']
176 rawlogs = ptestresult_get_rawlogs(seriesresults) 220 for logtype in rawlog_sections:
177 if rawlogs is not None: 221 logdata = generic_get_rawlogs(logtype, seriesresults)
178 with open(dst.replace(fn, "ptest-raw.log"), "w+") as f: 222 if logdata is not None:
179 f.write(rawlogs) 223 logger.info("Extracting " + rawlog_sections[logtype] + "-raw.log")
224 with open(dst.replace(fn, rawlog_sections[logtype] + "-raw.log"), "w+") as f:
225 f.write(logdata)
180 if 'ptestresult.sections' in seriesresults: 226 if 'ptestresult.sections' in seriesresults:
181 for i in seriesresults['ptestresult.sections']: 227 for i in seriesresults['ptestresult.sections']:
182 sectionlog = ptestresult_get_log(seriesresults, i) 228 sectionlog = ptestresult_get_log(seriesresults, i)
diff --git a/scripts/lib/resulttool/store.py b/scripts/lib/resulttool/store.py
index e0951f0a8f..b143334e69 100644
--- a/scripts/lib/resulttool/store.py
+++ b/scripts/lib/resulttool/store.py
@@ -65,18 +65,35 @@ def store(args, logger):
65 65
66 for r in revisions: 66 for r in revisions:
67 results = revisions[r] 67 results = revisions[r]
68 if args.revision and r[0] != args.revision:
69 logger.info('skipping %s as non-matching' % r[0])
70 continue
68 keywords = {'commit': r[0], 'branch': r[1], "commit_count": r[2]} 71 keywords = {'commit': r[0], 'branch': r[1], "commit_count": r[2]}
69 subprocess.check_call(["find", tempdir, "!", "-path", "./.git/*", "-delete"]) 72 subprocess.check_call(["find", tempdir, "-name", "testresults.json", "!", "-path", "./.git/*", "-delete"])
70 resultutils.save_resultsdata(results, tempdir, ptestlogs=True) 73 resultutils.save_resultsdata(results, tempdir, ptestlogs=True)
71 74
72 logger.info('Storing test result into git repository %s' % args.git_dir) 75 logger.info('Storing test result into git repository %s' % args.git_dir)
73 76
74 gitarchive.gitarchive(tempdir, args.git_dir, False, False, 77 excludes = []
78 if args.logfile_archive:
79 excludes = ['*.log', "*.log.zst"]
80
81 tagname = gitarchive.gitarchive(tempdir, args.git_dir, False, False,
75 "Results of {branch}:{commit}", "branch: {branch}\ncommit: {commit}", "{branch}", 82 "Results of {branch}:{commit}", "branch: {branch}\ncommit: {commit}", "{branch}",
76 False, "{branch}/{commit_count}-g{commit}/{tag_number}", 83 False, "{branch}/{commit_count}-g{commit}/{tag_number}",
77 'Test run #{tag_number} of {branch}:{commit}', '', 84 'Test run #{tag_number} of {branch}:{commit}', '',
78 [], [], False, keywords, logger) 85 excludes, [], False, keywords, logger)
79 86
87 if args.logfile_archive:
88 logdir = args.logfile_archive + "/" + tagname
89 shutil.copytree(tempdir, logdir)
90 os.chmod(logdir, 0o755)
91 for root, dirs, files in os.walk(logdir):
92 for name in files:
93 if not name.endswith(".log"):
94 continue
95 f = os.path.join(root, name)
96 subprocess.run(["zstd", f, "--rm"], check=True, capture_output=True)
80 finally: 97 finally:
81 subprocess.check_call(["rm", "-rf", tempdir]) 98 subprocess.check_call(["rm", "-rf", tempdir])
82 99
@@ -102,3 +119,7 @@ def register_commands(subparsers):
102 help='add executed-by configuration to each result file') 119 help='add executed-by configuration to each result file')
103 parser_build.add_argument('-t', '--extra-test-env', default='', 120 parser_build.add_argument('-t', '--extra-test-env', default='',
104 help='add extra test environment data to each result file configuration') 121 help='add extra test environment data to each result file configuration')
122 parser_build.add_argument('-r', '--revision', default='',
123 help='only store data for the specified revision')
124 parser_build.add_argument('-l', '--logfile-archive', default='',
125 help='directory to separately archive log files along with a copy of the results')