summaryrefslogtreecommitdiffstats
path: root/scripts/lib
diff options
context:
space:
mode:
authorAlexis Lothoré <alexis.lothore@bootlin.com>2023-07-21 13:02:06 +0200
committerRichard Purdie <richard.purdie@linuxfoundation.org>2023-07-25 15:27:33 +0100
commitae1d3786936d45307a744186ba3dab743a027134 (patch)
tree6b8ee7b5ca4028ebd83ac781c05d5a46d44887b2 /scripts/lib
parent915d601b1cc4865fe1804670881805cc86c664e0 (diff)
downloadpoky-ae1d3786936d45307a744186ba3dab743a027134.tar.gz
scripts/resulttool: add mention about new detected tests
Some regression reports show a lot of "PASSED->None" transitions. When such big lot of identical transitions are observed, it could be that tests are now failing, but it could also be that some tests has been renamed. To detect such case, add a log in regression report to report the number of new tests (i.e: tests that are present in target results but not in base result). This new log also allows to know about newly added tests bases (From OE-Core rev: 01b5cefd07e01c7407bc663842b8a8d502358a6d) Signed-off-by: Alexis Lothoré <alexis.lothore@bootlin.com> Signed-off-by: Alexandre Belloni <alexandre.belloni@bootlin.com> Signed-off-by: Richard Purdie <richard.purdie@linuxfoundation.org>
Diffstat (limited to 'scripts/lib')
-rw-r--r--scripts/lib/resulttool/regression.py16
1 files changed, 14 insertions, 2 deletions
diff --git a/scripts/lib/resulttool/regression.py b/scripts/lib/resulttool/regression.py
index 1facbcd85e..f80a9182a9 100644
--- a/scripts/lib/resulttool/regression.py
+++ b/scripts/lib/resulttool/regression.py
@@ -178,6 +178,8 @@ def compare_result(logger, base_name, target_name, base_result, target_result):
178 base_result = base_result.get('result') 178 base_result = base_result.get('result')
179 target_result = target_result.get('result') 179 target_result = target_result.get('result')
180 result = {} 180 result = {}
181 new_tests = 0
182
181 if base_result and target_result: 183 if base_result and target_result:
182 for k in base_result: 184 for k in base_result:
183 base_testcase = base_result[k] 185 base_testcase = base_result[k]
@@ -189,6 +191,13 @@ def compare_result(logger, base_name, target_name, base_result, target_result):
189 result[k] = {'base': base_status, 'target': target_status} 191 result[k] = {'base': base_status, 'target': target_status}
190 else: 192 else:
191 logger.error('Failed to retrieved base test case status: %s' % k) 193 logger.error('Failed to retrieved base test case status: %s' % k)
194
195 # Also count new tests that were not present in base results: it
196 # could be newly added tests, but it could also highlights some tests
197 # renames or fixed faulty ptests
198 for k in target_result:
199 if k not in base_result:
200 new_tests += 1
192 if result: 201 if result:
193 new_pass_count = sum(test['target'] is not None and test['target'].startswith("PASS") for test in result.values()) 202 new_pass_count = sum(test['target'] is not None and test['target'].startswith("PASS") for test in result.values())
194 # Print a regression report only if at least one test has a regression status (FAIL, SKIPPED, absent...) 203 # Print a regression report only if at least one test has a regression status (FAIL, SKIPPED, absent...)
@@ -200,10 +209,13 @@ def compare_result(logger, base_name, target_name, base_result, target_result):
200 if new_pass_count > 0: 209 if new_pass_count > 0:
201 resultstring += f' Additionally, {new_pass_count} previously failing test(s) is/are now passing\n' 210 resultstring += f' Additionally, {new_pass_count} previously failing test(s) is/are now passing\n'
202 else: 211 else:
203 resultstring = "Improvement: %s\n %s\n (+%d test(s) passing)" % (base_name, target_name, new_pass_count) 212 resultstring = "Improvement: %s\n %s\n (+%d test(s) passing)\n" % (base_name, target_name, new_pass_count)
204 result = None 213 result = None
205 else: 214 else:
206 resultstring = "Match: %s\n %s" % (base_name, target_name) 215 resultstring = "Match: %s\n %s\n" % (base_name, target_name)
216
217 if new_tests > 0:
218 resultstring += f' Additionally, {new_tests} new test(s) is/are present\n'
207 return result, resultstring 219 return result, resultstring
208 220
209def get_results(logger, source): 221def get_results(logger, source):