summaryrefslogtreecommitdiffstats
path: root/scripts/lib/resulttool
diff options
context:
space:
mode:
Diffstat (limited to 'scripts/lib/resulttool')
-rwxr-xr-xscripts/lib/resulttool/manualexecution.py2
-rw-r--r--scripts/lib/resulttool/report.py2
-rw-r--r--scripts/lib/resulttool/resultutils.py76
-rw-r--r--scripts/lib/resulttool/store.py26
4 files changed, 86 insertions, 20 deletions
diff --git a/scripts/lib/resulttool/manualexecution.py b/scripts/lib/resulttool/manualexecution.py
index ecb27c5933..ae0861ac6b 100755
--- a/scripts/lib/resulttool/manualexecution.py
+++ b/scripts/lib/resulttool/manualexecution.py
@@ -22,7 +22,7 @@ def load_json_file(f):
22def write_json_file(f, json_data): 22def write_json_file(f, json_data):
23 os.makedirs(os.path.dirname(f), exist_ok=True) 23 os.makedirs(os.path.dirname(f), exist_ok=True)
24 with open(f, 'w') as filedata: 24 with open(f, 'w') as filedata:
25 filedata.write(json.dumps(json_data, sort_keys=True, indent=4)) 25 filedata.write(json.dumps(json_data, sort_keys=True, indent=1))
26 26
27class ManualTestRunner(object): 27class ManualTestRunner(object):
28 28
diff --git a/scripts/lib/resulttool/report.py b/scripts/lib/resulttool/report.py
index a349510ab8..1c100b00ab 100644
--- a/scripts/lib/resulttool/report.py
+++ b/scripts/lib/resulttool/report.py
@@ -256,7 +256,7 @@ class ResultsTextReport(object):
256 if selected_test_case_only: 256 if selected_test_case_only:
257 print_selected_testcase_result(raw_results, selected_test_case_only) 257 print_selected_testcase_result(raw_results, selected_test_case_only)
258 else: 258 else:
259 print(json.dumps(raw_results, sort_keys=True, indent=4)) 259 print(json.dumps(raw_results, sort_keys=True, indent=1))
260 else: 260 else:
261 print('Could not find raw test result for %s' % raw_test) 261 print('Could not find raw test result for %s' % raw_test)
262 return 0 262 return 0
diff --git a/scripts/lib/resulttool/resultutils.py b/scripts/lib/resulttool/resultutils.py
index c5521d81bd..b8fc79a6ac 100644
--- a/scripts/lib/resulttool/resultutils.py
+++ b/scripts/lib/resulttool/resultutils.py
@@ -14,8 +14,11 @@ import scriptpath
14import copy 14import copy
15import urllib.request 15import urllib.request
16import posixpath 16import posixpath
17import logging
17scriptpath.add_oe_lib_path() 18scriptpath.add_oe_lib_path()
18 19
20logger = logging.getLogger('resulttool')
21
19flatten_map = { 22flatten_map = {
20 "oeselftest": [], 23 "oeselftest": [],
21 "runtime": [], 24 "runtime": [],
@@ -31,13 +34,19 @@ regression_map = {
31 "manual": ['TEST_TYPE', 'TEST_MODULE', 'IMAGE_BASENAME', 'MACHINE'] 34 "manual": ['TEST_TYPE', 'TEST_MODULE', 'IMAGE_BASENAME', 'MACHINE']
32} 35}
33store_map = { 36store_map = {
34 "oeselftest": ['TEST_TYPE'], 37 "oeselftest": ['TEST_TYPE', 'TESTSERIES', 'MACHINE'],
35 "runtime": ['TEST_TYPE', 'DISTRO', 'MACHINE', 'IMAGE_BASENAME'], 38 "runtime": ['TEST_TYPE', 'DISTRO', 'MACHINE', 'IMAGE_BASENAME'],
36 "sdk": ['TEST_TYPE', 'MACHINE', 'SDKMACHINE', 'IMAGE_BASENAME'], 39 "sdk": ['TEST_TYPE', 'MACHINE', 'SDKMACHINE', 'IMAGE_BASENAME'],
37 "sdkext": ['TEST_TYPE', 'MACHINE', 'SDKMACHINE', 'IMAGE_BASENAME'], 40 "sdkext": ['TEST_TYPE', 'MACHINE', 'SDKMACHINE', 'IMAGE_BASENAME'],
38 "manual": ['TEST_TYPE', 'TEST_MODULE', 'MACHINE', 'IMAGE_BASENAME'] 41 "manual": ['TEST_TYPE', 'TEST_MODULE', 'MACHINE', 'IMAGE_BASENAME']
39} 42}
40 43
44rawlog_sections = {
45 "ptestresult.rawlogs": "ptest",
46 "ltpresult.rawlogs": "ltp",
47 "ltpposixresult.rawlogs": "ltpposix"
48}
49
41def is_url(p): 50def is_url(p):
42 """ 51 """
43 Helper for determining if the given path is a URL 52 Helper for determining if the given path is a URL
@@ -108,21 +117,57 @@ def filter_resultsdata(results, resultid):
108 newresults[r][i] = results[r][i] 117 newresults[r][i] = results[r][i]
109 return newresults 118 return newresults
110 119
111def strip_ptestresults(results): 120def strip_logs(results):
112 newresults = copy.deepcopy(results) 121 newresults = copy.deepcopy(results)
113 #for a in newresults2:
114 # newresults = newresults2[a]
115 for res in newresults: 122 for res in newresults:
116 if 'result' not in newresults[res]: 123 if 'result' not in newresults[res]:
117 continue 124 continue
118 if 'ptestresult.rawlogs' in newresults[res]['result']: 125 for logtype in rawlog_sections:
119 del newresults[res]['result']['ptestresult.rawlogs'] 126 if logtype in newresults[res]['result']:
127 del newresults[res]['result'][logtype]
120 if 'ptestresult.sections' in newresults[res]['result']: 128 if 'ptestresult.sections' in newresults[res]['result']:
121 for i in newresults[res]['result']['ptestresult.sections']: 129 for i in newresults[res]['result']['ptestresult.sections']:
122 if 'log' in newresults[res]['result']['ptestresult.sections'][i]: 130 if 'log' in newresults[res]['result']['ptestresult.sections'][i]:
123 del newresults[res]['result']['ptestresult.sections'][i]['log'] 131 del newresults[res]['result']['ptestresult.sections'][i]['log']
124 return newresults 132 return newresults
125 133
134# For timing numbers, crazy amounts of precision don't make sense and just confuse
135# the logs. For numbers over 1, trim to 3 decimal places, for numbers less than 1,
136# trim to 4 significant digits
137def trim_durations(results):
138 for res in results:
139 if 'result' not in results[res]:
140 continue
141 for entry in results[res]['result']:
142 if 'duration' in results[res]['result'][entry]:
143 duration = results[res]['result'][entry]['duration']
144 if duration > 1:
145 results[res]['result'][entry]['duration'] = float("%.3f" % duration)
146 elif duration < 1:
147 results[res]['result'][entry]['duration'] = float("%.4g" % duration)
148 return results
149
150def handle_cleanups(results):
151 # Remove pointless path duplication from old format reproducibility results
152 for res2 in results:
153 try:
154 section = results[res2]['result']['reproducible']['files']
155 for pkgtype in section:
156 for filelist in section[pkgtype].copy():
157 if section[pkgtype][filelist] and type(section[pkgtype][filelist][0]) == dict:
158 newlist = []
159 for entry in section[pkgtype][filelist]:
160 newlist.append(entry["reference"].split("/./")[1])
161 section[pkgtype][filelist] = newlist
162
163 except KeyError:
164 pass
165 # Remove pointless duplicate rawlogs data
166 try:
167 del results[res2]['result']['reproducible.rawlogs']
168 except KeyError:
169 pass
170
126def decode_log(logdata): 171def decode_log(logdata):
127 if isinstance(logdata, str): 172 if isinstance(logdata, str):
128 return logdata 173 return logdata
@@ -155,9 +200,6 @@ def generic_get_rawlogs(sectname, results):
155 return None 200 return None
156 return decode_log(results[sectname]['log']) 201 return decode_log(results[sectname]['log'])
157 202
158def ptestresult_get_rawlogs(results):
159 return generic_get_rawlogs('ptestresult.rawlogs', results)
160
161def save_resultsdata(results, destdir, fn="testresults.json", ptestjson=False, ptestlogs=False): 203def save_resultsdata(results, destdir, fn="testresults.json", ptestjson=False, ptestlogs=False):
162 for res in results: 204 for res in results:
163 if res: 205 if res:
@@ -167,16 +209,20 @@ def save_resultsdata(results, destdir, fn="testresults.json", ptestjson=False, p
167 os.makedirs(os.path.dirname(dst), exist_ok=True) 209 os.makedirs(os.path.dirname(dst), exist_ok=True)
168 resultsout = results[res] 210 resultsout = results[res]
169 if not ptestjson: 211 if not ptestjson:
170 resultsout = strip_ptestresults(results[res]) 212 resultsout = strip_logs(results[res])
213 trim_durations(resultsout)
214 handle_cleanups(resultsout)
171 with open(dst, 'w') as f: 215 with open(dst, 'w') as f:
172 f.write(json.dumps(resultsout, sort_keys=True, indent=4)) 216 f.write(json.dumps(resultsout, sort_keys=True, indent=1))
173 for res2 in results[res]: 217 for res2 in results[res]:
174 if ptestlogs and 'result' in results[res][res2]: 218 if ptestlogs and 'result' in results[res][res2]:
175 seriesresults = results[res][res2]['result'] 219 seriesresults = results[res][res2]['result']
176 rawlogs = ptestresult_get_rawlogs(seriesresults) 220 for logtype in rawlog_sections:
177 if rawlogs is not None: 221 logdata = generic_get_rawlogs(logtype, seriesresults)
178 with open(dst.replace(fn, "ptest-raw.log"), "w+") as f: 222 if logdata is not None:
179 f.write(rawlogs) 223 logger.info("Extracting " + rawlog_sections[logtype] + "-raw.log")
224 with open(dst.replace(fn, rawlog_sections[logtype] + "-raw.log"), "w+") as f:
225 f.write(logdata)
180 if 'ptestresult.sections' in seriesresults: 226 if 'ptestresult.sections' in seriesresults:
181 for i in seriesresults['ptestresult.sections']: 227 for i in seriesresults['ptestresult.sections']:
182 sectionlog = ptestresult_get_log(seriesresults, i) 228 sectionlog = ptestresult_get_log(seriesresults, i)
diff --git a/scripts/lib/resulttool/store.py b/scripts/lib/resulttool/store.py
index e0951f0a8f..578910d234 100644
--- a/scripts/lib/resulttool/store.py
+++ b/scripts/lib/resulttool/store.py
@@ -65,18 +65,34 @@ def store(args, logger):
65 65
66 for r in revisions: 66 for r in revisions:
67 results = revisions[r] 67 results = revisions[r]
68 if args.revision and r[0] != args.revision:
69 logger.info('skipping %s as non-matching' % r[0])
70 continue
68 keywords = {'commit': r[0], 'branch': r[1], "commit_count": r[2]} 71 keywords = {'commit': r[0], 'branch': r[1], "commit_count": r[2]}
69 subprocess.check_call(["find", tempdir, "!", "-path", "./.git/*", "-delete"]) 72 subprocess.check_call(["find", tempdir, "-name", "testresults.json", "!", "-path", "./.git/*", "-delete"])
70 resultutils.save_resultsdata(results, tempdir, ptestlogs=True) 73 resultutils.save_resultsdata(results, tempdir, ptestlogs=True)
71 74
72 logger.info('Storing test result into git repository %s' % args.git_dir) 75 logger.info('Storing test result into git repository %s' % args.git_dir)
73 76
74 gitarchive.gitarchive(tempdir, args.git_dir, False, False, 77 excludes = []
78 if args.logfile_archive:
79 excludes = ['*.log', "*.log.zst"]
80
81 tagname = gitarchive.gitarchive(tempdir, args.git_dir, False, False,
75 "Results of {branch}:{commit}", "branch: {branch}\ncommit: {commit}", "{branch}", 82 "Results of {branch}:{commit}", "branch: {branch}\ncommit: {commit}", "{branch}",
76 False, "{branch}/{commit_count}-g{commit}/{tag_number}", 83 False, "{branch}/{commit_count}-g{commit}/{tag_number}",
77 'Test run #{tag_number} of {branch}:{commit}', '', 84 'Test run #{tag_number} of {branch}:{commit}', '',
78 [], [], False, keywords, logger) 85 excludes, [], False, keywords, logger)
79 86
87 if args.logfile_archive:
88 logdir = args.logfile_archive + "/" + tagname
89 shutil.copytree(tempdir, logdir)
90 for root, dirs, files in os.walk(logdir):
91 for name in files:
92 if not name.endswith(".log"):
93 continue
94 f = os.path.join(root, name)
95 subprocess.run(["zstd", f, "--rm"], check=True, capture_output=True)
80 finally: 96 finally:
81 subprocess.check_call(["rm", "-rf", tempdir]) 97 subprocess.check_call(["rm", "-rf", tempdir])
82 98
@@ -102,3 +118,7 @@ def register_commands(subparsers):
102 help='add executed-by configuration to each result file') 118 help='add executed-by configuration to each result file')
103 parser_build.add_argument('-t', '--extra-test-env', default='', 119 parser_build.add_argument('-t', '--extra-test-env', default='',
104 help='add extra test environment data to each result file configuration') 120 help='add extra test environment data to each result file configuration')
121 parser_build.add_argument('-r', '--revision', default='',
122 help='only store data for the specified revision')
123 parser_build.add_argument('-l', '--logfile-archive', default='',
124 help='directory to separately archive log files along with a copy of the results')