summaryrefslogtreecommitdiffstats
path: root/scripts/lib/resulttool
diff options
context:
space:
mode:
Diffstat (limited to 'scripts/lib/resulttool')
-rw-r--r--scripts/lib/resulttool/__init__.py0
-rw-r--r--scripts/lib/resulttool/junit.py77
-rw-r--r--scripts/lib/resulttool/log.py107
-rwxr-xr-xscripts/lib/resulttool/manualexecution.py235
-rw-r--r--scripts/lib/resulttool/merge.py46
-rw-r--r--scripts/lib/resulttool/regression.py450
-rw-r--r--scripts/lib/resulttool/report.py315
-rw-r--r--scripts/lib/resulttool/resultutils.py274
-rw-r--r--scripts/lib/resulttool/store.py125
-rw-r--r--scripts/lib/resulttool/template/test_report_full_text.txt79
10 files changed, 0 insertions, 1708 deletions
diff --git a/scripts/lib/resulttool/__init__.py b/scripts/lib/resulttool/__init__.py
deleted file mode 100644
index e69de29bb2..0000000000
--- a/scripts/lib/resulttool/__init__.py
+++ /dev/null
diff --git a/scripts/lib/resulttool/junit.py b/scripts/lib/resulttool/junit.py
deleted file mode 100644
index c7a53dc550..0000000000
--- a/scripts/lib/resulttool/junit.py
+++ /dev/null
@@ -1,77 +0,0 @@
1# resulttool - report test results in JUnit XML format
2#
3# Copyright (c) 2024, Siemens AG.
4#
5# SPDX-License-Identifier: GPL-2.0-only
6#
7
8import os
9import re
10import xml.etree.ElementTree as ET
11import resulttool.resultutils as resultutils
12
13def junit(args, logger):
14 testresults = resultutils.load_resultsdata(args.json_file, configmap=resultutils.store_map)
15
16 total_time = 0
17 skipped = 0
18 failures = 0
19 errors = 0
20
21 for tests in testresults.values():
22 results = tests[next(reversed(tests))].get("result", {})
23
24 for result_id, result in results.items():
25 # filter out ptestresult.rawlogs and ptestresult.sections
26 if re.search(r'\.test_', result_id):
27 total_time += result.get("duration", 0)
28
29 if result['status'] == "FAILED":
30 failures += 1
31 elif result['status'] == "ERROR":
32 errors += 1
33 elif result['status'] == "SKIPPED":
34 skipped += 1
35
36 testsuites_node = ET.Element("testsuites")
37 testsuites_node.set("time", "%s" % total_time)
38 testsuite_node = ET.SubElement(testsuites_node, "testsuite")
39 testsuite_node.set("name", "Testimage")
40 testsuite_node.set("time", "%s" % total_time)
41 testsuite_node.set("tests", "%s" % len(results))
42 testsuite_node.set("failures", "%s" % failures)
43 testsuite_node.set("errors", "%s" % errors)
44 testsuite_node.set("skipped", "%s" % skipped)
45
46 for result_id, result in results.items():
47 if re.search(r'\.test_', result_id):
48 testcase_node = ET.SubElement(testsuite_node, "testcase", {
49 "name": result_id,
50 "classname": "Testimage",
51 "time": str(result['duration'])
52 })
53 if result['status'] == "SKIPPED":
54 ET.SubElement(testcase_node, "skipped", message=result['log'])
55 elif result['status'] == "FAILED":
56 ET.SubElement(testcase_node, "failure", message=result['log'])
57 elif result['status'] == "ERROR":
58 ET.SubElement(testcase_node, "error", message=result['log'])
59
60 tree = ET.ElementTree(testsuites_node)
61
62 if args.junit_xml_path is None:
63 args.junit_xml_path = os.environ['BUILDDIR'] + '/tmp/log/oeqa/junit.xml'
64 tree.write(args.junit_xml_path, encoding='UTF-8', xml_declaration=True)
65
66 logger.info('Saved JUnit XML report as %s' % args.junit_xml_path)
67
68def register_commands(subparsers):
69 """Register subcommands from this plugin"""
70 parser_build = subparsers.add_parser('junit', help='create test report in JUnit XML format',
71 description='generate unit test report in JUnit XML format based on the latest test results in the testresults.json.',
72 group='analysis')
73 parser_build.set_defaults(func=junit)
74 parser_build.add_argument('json_file',
75 help='json file should point to the testresults.json')
76 parser_build.add_argument('-j', '--junit_xml_path',
77 help='junit xml path allows setting the path of the generated test report. The default location is <build_dir>/tmp/log/oeqa/junit.xml')
diff --git a/scripts/lib/resulttool/log.py b/scripts/lib/resulttool/log.py
deleted file mode 100644
index 15148ca288..0000000000
--- a/scripts/lib/resulttool/log.py
+++ /dev/null
@@ -1,107 +0,0 @@
1# resulttool - Show logs
2#
3# Copyright (c) 2019 Garmin International
4#
5# SPDX-License-Identifier: GPL-2.0-only
6#
7import os
8import resulttool.resultutils as resultutils
9
10def show_ptest(result, ptest, logger):
11 logdata = resultutils.ptestresult_get_log(result, ptest)
12 if logdata is not None:
13 print(logdata)
14 return 0
15
16 print("ptest '%s' log not found" % ptest)
17 return 1
18
19def show_reproducible(result, reproducible, logger):
20 try:
21 print(result['reproducible'][reproducible]['diffoscope.text'])
22 return 0
23
24 except KeyError:
25 print("reproducible '%s' not found" % reproducible)
26 return 1
27
28def log(args, logger):
29 results = resultutils.load_resultsdata(args.source)
30
31 for _, run_name, _, r in resultutils.test_run_results(results):
32 if args.list_ptest:
33 print('\n'.join(sorted(r['ptestresult.sections'].keys())))
34
35 if args.dump_ptest:
36 for sectname in ['ptestresult.sections', 'ltpposixresult.sections', 'ltpresult.sections']:
37 if sectname in r:
38 for name, ptest in r[sectname].items():
39 logdata = resultutils.generic_get_log(sectname, r, name)
40 if logdata is not None:
41 dest_dir = args.dump_ptest
42 if args.prepend_run:
43 dest_dir = os.path.join(dest_dir, run_name)
44 if not sectname.startswith("ptest"):
45 dest_dir = os.path.join(dest_dir, sectname.split(".")[0])
46
47 os.makedirs(dest_dir, exist_ok=True)
48 dest = os.path.join(dest_dir, '%s.log' % name)
49 if os.path.exists(dest):
50 print("Overlapping ptest logs found, skipping %s. The '--prepend-run' option would avoid this" % name)
51 continue
52 print(dest)
53 with open(dest, 'w') as f:
54 f.write(logdata)
55
56 if args.raw_ptest:
57 found = False
58 for sectname in ['ptestresult.rawlogs', 'ltpposixresult.rawlogs', 'ltpresult.rawlogs']:
59 rawlog = resultutils.generic_get_rawlogs(sectname, r)
60 if rawlog is not None:
61 print(rawlog)
62 found = True
63 if not found:
64 print('Raw ptest logs not found')
65 return 1
66
67 if args.raw_reproducible:
68 if 'reproducible.rawlogs' in r:
69 print(r['reproducible.rawlogs']['log'])
70 else:
71 print('Raw reproducible logs not found')
72 return 1
73
74 for ptest in args.ptest:
75 if not show_ptest(r, ptest, logger):
76 return 1
77
78 for reproducible in args.reproducible:
79 if not show_reproducible(r, reproducible, logger):
80 return 1
81
82def register_commands(subparsers):
83 """Register subcommands from this plugin"""
84 parser = subparsers.add_parser('log', help='show logs',
85 description='show the logs from test results',
86 group='analysis')
87 parser.set_defaults(func=log)
88 parser.add_argument('source',
89 help='the results file/directory/URL to import')
90 parser.add_argument('--list-ptest', action='store_true',
91 help='list the ptest test names')
92 parser.add_argument('--ptest', action='append', default=[],
93 help='show logs for a ptest')
94 parser.add_argument('--dump-ptest', metavar='DIR',
95 help='Dump all ptest log files to the specified directory.')
96 parser.add_argument('--reproducible', action='append', default=[],
97 help='show logs for a reproducible test')
98 parser.add_argument('--prepend-run', action='store_true',
99 help='''Dump ptest results to a subdirectory named after the test run when using --dump-ptest.
100 Required if more than one test run is present in the result file''')
101 parser.add_argument('--raw', action='store_true',
102 help='show raw (ptest) logs. Deprecated. Alias for "--raw-ptest"', dest='raw_ptest')
103 parser.add_argument('--raw-ptest', action='store_true',
104 help='show raw ptest log')
105 parser.add_argument('--raw-reproducible', action='store_true',
106 help='show raw reproducible build logs')
107
diff --git a/scripts/lib/resulttool/manualexecution.py b/scripts/lib/resulttool/manualexecution.py
deleted file mode 100755
index ae0861ac6b..0000000000
--- a/scripts/lib/resulttool/manualexecution.py
+++ /dev/null
@@ -1,235 +0,0 @@
1# test case management tool - manual execution from testopia test cases
2#
3# Copyright (c) 2018, Intel Corporation.
4#
5# SPDX-License-Identifier: GPL-2.0-only
6#
7
8import argparse
9import json
10import os
11import sys
12import datetime
13import re
14import copy
15from oeqa.core.runner import OETestResultJSONHelper
16
17
18def load_json_file(f):
19 with open(f, "r") as filedata:
20 return json.load(filedata)
21
22def write_json_file(f, json_data):
23 os.makedirs(os.path.dirname(f), exist_ok=True)
24 with open(f, 'w') as filedata:
25 filedata.write(json.dumps(json_data, sort_keys=True, indent=1))
26
27class ManualTestRunner(object):
28
29 def _get_test_module(self, case_file):
30 return os.path.basename(case_file).split('.')[0]
31
32 def _get_input(self, config):
33 while True:
34 output = input('{} = '.format(config))
35 if re.match('^[a-z0-9-.]+$', output):
36 break
37 print('Only lowercase alphanumeric, hyphen and dot are allowed. Please try again')
38 return output
39
40 def _get_available_config_options(self, config_options, test_module, target_config):
41 avail_config_options = None
42 if test_module in config_options:
43 avail_config_options = config_options[test_module].get(target_config)
44 return avail_config_options
45
46 def _choose_config_option(self, options):
47 while True:
48 output = input('{} = '.format('Option index number'))
49 if output in options:
50 break
51 print('Only integer index inputs from above available configuration options are allowed. Please try again.')
52 return options[output]
53
54 def _get_config(self, config_options, test_module):
55 from oeqa.utils.metadata import get_layers
56 from oeqa.utils.commands import get_bb_var
57 from resulttool.resultutils import store_map
58
59 layers = get_layers(get_bb_var('BBLAYERS'))
60 configurations = {}
61 configurations['LAYERS'] = layers
62 configurations['STARTTIME'] = datetime.datetime.now().strftime('%Y%m%d%H%M%S')
63 configurations['TEST_TYPE'] = 'manual'
64 configurations['TEST_MODULE'] = test_module
65
66 extra_config = set(store_map['manual']) - set(configurations)
67 for config in sorted(extra_config):
68 avail_config_options = self._get_available_config_options(config_options, test_module, config)
69 if avail_config_options:
70 print('---------------------------------------------')
71 print('These are available configuration #%s options:' % config)
72 print('---------------------------------------------')
73 for option, _ in sorted(avail_config_options.items(), key=lambda x: int(x[0])):
74 print('%s: %s' % (option, avail_config_options[option]))
75 print('Please select configuration option, enter the integer index number.')
76 value_conf = self._choose_config_option(avail_config_options)
77 print('---------------------------------------------\n')
78 else:
79 print('---------------------------------------------')
80 print('This is configuration #%s. Please provide configuration value(use "None" if not applicable).' % config)
81 print('---------------------------------------------')
82 value_conf = self._get_input('Configuration Value')
83 print('---------------------------------------------\n')
84 configurations[config] = value_conf
85 return configurations
86
87 def _execute_test_steps(self, case):
88 test_result = {}
89 print('------------------------------------------------------------------------')
90 print('Executing test case: %s' % case['test']['@alias'])
91 print('------------------------------------------------------------------------')
92 print('You have total %s test steps to be executed.' % len(case['test']['execution']))
93 print('------------------------------------------------------------------------\n')
94 for step, _ in sorted(case['test']['execution'].items(), key=lambda x: int(x[0])):
95 print('Step %s: %s' % (step, case['test']['execution'][step]['action']))
96 expected_output = case['test']['execution'][step]['expected_results']
97 if expected_output:
98 print('Expected output: %s' % expected_output)
99 while True:
100 done = input('\nPlease provide test results: (P)assed/(F)ailed/(B)locked/(S)kipped? \n').lower()
101 result_types = {'p':'PASSED',
102 'f':'FAILED',
103 'b':'BLOCKED',
104 's':'SKIPPED'}
105 if done in result_types:
106 for r in result_types:
107 if done == r:
108 res = result_types[r]
109 if res == 'FAILED':
110 log_input = input('\nPlease enter the error and the description of the log: (Ex:log:211 Error Bitbake)\n')
111 test_result.update({case['test']['@alias']: {'status': '%s' % res, 'log': '%s' % log_input}})
112 else:
113 test_result.update({case['test']['@alias']: {'status': '%s' % res}})
114 break
115 print('Invalid input!')
116 return test_result
117
118 def _get_write_dir(self):
119 return os.environ['BUILDDIR'] + '/tmp/log/manual/'
120
121 def run_test(self, case_file, config_options_file, testcase_config_file):
122 test_module = self._get_test_module(case_file)
123 cases = load_json_file(case_file)
124 config_options = {}
125 if config_options_file:
126 config_options = load_json_file(config_options_file)
127 configurations = self._get_config(config_options, test_module)
128 result_id = 'manual_%s_%s' % (test_module, configurations['STARTTIME'])
129 test_results = {}
130 if testcase_config_file:
131 test_case_config = load_json_file(testcase_config_file)
132 test_case_to_execute = test_case_config['testcases']
133 for case in copy.deepcopy(cases) :
134 if case['test']['@alias'] not in test_case_to_execute:
135 cases.remove(case)
136
137 print('\nTotal number of test cases in this test suite: %s\n' % len(cases))
138 for c in cases:
139 test_result = self._execute_test_steps(c)
140 test_results.update(test_result)
141 return configurations, result_id, self._get_write_dir(), test_results
142
143 def _get_true_false_input(self, input_message):
144 yes_list = ['Y', 'YES']
145 no_list = ['N', 'NO']
146 while True:
147 more_config_option = input(input_message).upper()
148 if more_config_option in yes_list or more_config_option in no_list:
149 break
150 print('Invalid input!')
151 if more_config_option in no_list:
152 return False
153 return True
154
155 def make_config_option_file(self, logger, case_file, config_options_file):
156 config_options = {}
157 if config_options_file:
158 config_options = load_json_file(config_options_file)
159 new_test_module = self._get_test_module(case_file)
160 print('Creating configuration options file for test module: %s' % new_test_module)
161 new_config_options = {}
162
163 while True:
164 config_name = input('\nPlease provide test configuration to create:\n').upper()
165 new_config_options[config_name] = {}
166 while True:
167 config_value = self._get_input('Configuration possible option value')
168 config_option_index = len(new_config_options[config_name]) + 1
169 new_config_options[config_name][config_option_index] = config_value
170 more_config_option = self._get_true_false_input('\nIs there more configuration option input: (Y)es/(N)o\n')
171 if not more_config_option:
172 break
173 more_config = self._get_true_false_input('\nIs there more configuration to create: (Y)es/(N)o\n')
174 if not more_config:
175 break
176
177 if new_config_options:
178 config_options[new_test_module] = new_config_options
179 if not config_options_file:
180 config_options_file = os.path.join(self._get_write_dir(), 'manual_config_options.json')
181 write_json_file(config_options_file, config_options)
182 logger.info('Configuration option file created at %s' % config_options_file)
183
184 def make_testcase_config_file(self, logger, case_file, testcase_config_file):
185 if testcase_config_file:
186 if os.path.exists(testcase_config_file):
187 print('\nTest configuration file with name %s already exists. Please provide a unique file name' % (testcase_config_file))
188 return 0
189
190 if not testcase_config_file:
191 testcase_config_file = os.path.join(self._get_write_dir(), "testconfig_new.json")
192
193 testcase_config = {}
194 cases = load_json_file(case_file)
195 new_test_module = self._get_test_module(case_file)
196 new_testcase_config = {}
197 new_testcase_config['testcases'] = []
198
199 print('\nAdd testcases for this configuration file:')
200 for case in cases:
201 print('\n' + case['test']['@alias'])
202 add_tc_config = self._get_true_false_input('\nDo you want to add this test case to test configuration : (Y)es/(N)o\n')
203 if add_tc_config:
204 new_testcase_config['testcases'].append(case['test']['@alias'])
205 write_json_file(testcase_config_file, new_testcase_config)
206 logger.info('Testcase Configuration file created at %s' % testcase_config_file)
207
208def manualexecution(args, logger):
209 testrunner = ManualTestRunner()
210 if args.make_config_options_file:
211 testrunner.make_config_option_file(logger, args.file, args.config_options_file)
212 return 0
213 if args.make_testcase_config_file:
214 testrunner.make_testcase_config_file(logger, args.file, args.testcase_config_file)
215 return 0
216 configurations, result_id, write_dir, test_results = testrunner.run_test(args.file, args.config_options_file, args.testcase_config_file)
217 resultjsonhelper = OETestResultJSONHelper()
218 resultjsonhelper.dump_testresult_file(write_dir, configurations, result_id, test_results)
219 return 0
220
221def register_commands(subparsers):
222 """Register subcommands from this plugin"""
223 parser_build = subparsers.add_parser('manualexecution', help='helper script for results populating during manual test execution.',
224 description='helper script for results populating during manual test execution. You can find manual test case JSON file in meta/lib/oeqa/manual/',
225 group='manualexecution')
226 parser_build.set_defaults(func=manualexecution)
227 parser_build.add_argument('file', help='specify path to manual test case JSON file.Note: Please use \"\" to encapsulate the file path.')
228 parser_build.add_argument('-c', '--config-options-file', default='',
229 help='the config options file to import and used as available configuration option selection or make config option file')
230 parser_build.add_argument('-m', '--make-config-options-file', action='store_true',
231 help='make the configuration options file based on provided inputs')
232 parser_build.add_argument('-t', '--testcase-config-file', default='',
233 help='the testcase configuration file to enable user to run a selected set of test case or make a testcase configuration file')
234 parser_build.add_argument('-d', '--make-testcase-config-file', action='store_true',
235 help='make the testcase configuration file to run a set of test cases based on user selection') \ No newline at end of file
diff --git a/scripts/lib/resulttool/merge.py b/scripts/lib/resulttool/merge.py
deleted file mode 100644
index 18b4825a18..0000000000
--- a/scripts/lib/resulttool/merge.py
+++ /dev/null
@@ -1,46 +0,0 @@
1# resulttool - merge multiple testresults.json files into a file or directory
2#
3# Copyright (c) 2019, Intel Corporation.
4# Copyright (c) 2019, Linux Foundation
5#
6# SPDX-License-Identifier: GPL-2.0-only
7#
8
9import os
10import json
11import resulttool.resultutils as resultutils
12
13def merge(args, logger):
14 configvars = {}
15 if not args.not_add_testseries:
16 configvars = resultutils.extra_configvars.copy()
17 if args.executed_by:
18 configvars['EXECUTED_BY'] = args.executed_by
19 if resultutils.is_url(args.target_results) or os.path.isdir(args.target_results):
20 results = resultutils.load_resultsdata(args.target_results, configmap=resultutils.store_map, configvars=configvars)
21 resultutils.append_resultsdata(results, args.base_results, configmap=resultutils.store_map, configvars=configvars)
22 resultutils.save_resultsdata(results, args.target_results)
23 else:
24 results = resultutils.load_resultsdata(args.base_results, configmap=resultutils.flatten_map, configvars=configvars)
25 if os.path.exists(args.target_results):
26 resultutils.append_resultsdata(results, args.target_results, configmap=resultutils.flatten_map, configvars=configvars)
27 resultutils.save_resultsdata(results, os.path.dirname(args.target_results), fn=os.path.basename(args.target_results))
28
29 logger.info('Merged results to %s' % os.path.dirname(args.target_results))
30
31 return 0
32
33def register_commands(subparsers):
34 """Register subcommands from this plugin"""
35 parser_build = subparsers.add_parser('merge', help='merge test result files/directories/URLs',
36 description='merge the results from multiple files/directories/URLs into the target file or directory',
37 group='setup')
38 parser_build.set_defaults(func=merge)
39 parser_build.add_argument('base_results',
40 help='the results file/directory/URL to import')
41 parser_build.add_argument('target_results',
42 help='the target file or directory to merge the base_results with')
43 parser_build.add_argument('-t', '--not-add-testseries', action='store_true',
44 help='do not add testseries configuration to results')
45 parser_build.add_argument('-x', '--executed-by', default='',
46 help='add executed-by configuration to each result file')
diff --git a/scripts/lib/resulttool/regression.py b/scripts/lib/resulttool/regression.py
deleted file mode 100644
index 33b3119c54..0000000000
--- a/scripts/lib/resulttool/regression.py
+++ /dev/null
@@ -1,450 +0,0 @@
1# resulttool - regression analysis
2#
3# Copyright (c) 2019, Intel Corporation.
4# Copyright (c) 2019, Linux Foundation
5#
6# SPDX-License-Identifier: GPL-2.0-only
7#
8
9import resulttool.resultutils as resultutils
10
11from oeqa.utils.git import GitRepo
12import oeqa.utils.gitarchive as gitarchive
13
14METADATA_MATCH_TABLE = {
15 "oeselftest": "OESELFTEST_METADATA"
16}
17
18OESELFTEST_METADATA_GUESS_TABLE={
19 "trigger-build-posttrigger": {
20 "run_all_tests": False,
21 "run_tests":["buildoptions.SourceMirroring.test_yocto_source_mirror"],
22 "skips": None,
23 "machine": None,
24 "select_tags":None,
25 "exclude_tags": None
26 },
27 "reproducible": {
28 "run_all_tests": False,
29 "run_tests":["reproducible"],
30 "skips": None,
31 "machine": None,
32 "select_tags":None,
33 "exclude_tags": None
34 },
35 "arch-qemu-quick": {
36 "run_all_tests": True,
37 "run_tests":None,
38 "skips": None,
39 "machine": None,
40 "select_tags":["machine"],
41 "exclude_tags": None
42 },
43 "arch-qemu-full-x86-or-x86_64": {
44 "run_all_tests": True,
45 "run_tests":None,
46 "skips": None,
47 "machine": None,
48 "select_tags":["machine", "toolchain-system"],
49 "exclude_tags": None
50 },
51 "arch-qemu-full-others": {
52 "run_all_tests": True,
53 "run_tests":None,
54 "skips": None,
55 "machine": None,
56 "select_tags":["machine", "toolchain-user"],
57 "exclude_tags": None
58 },
59 "selftest": {
60 "run_all_tests": True,
61 "run_tests":None,
62 "skips": ["distrodata.Distrodata.test_checkpkg", "buildoptions.SourceMirroring.test_yocto_source_mirror", "reproducible"],
63 "machine": None,
64 "select_tags":None,
65 "exclude_tags": ["machine", "toolchain-system", "toolchain-user"]
66 },
67 "bringup": {
68 "run_all_tests": True,
69 "run_tests":None,
70 "skips": ["distrodata.Distrodata.test_checkpkg", "buildoptions.SourceMirroring.test_yocto_source_mirror"],
71 "machine": None,
72 "select_tags":None,
73 "exclude_tags": ["machine", "toolchain-system", "toolchain-user"]
74 }
75}
76
77STATUS_STRINGS = {
78 "None": "No matching test result"
79}
80
81REGRESSIONS_DISPLAY_LIMIT=50
82
83MISSING_TESTS_BANNER = "-------------------------- Missing tests --------------------------"
84ADDITIONAL_DATA_BANNER = "--------------------- Matches and improvements --------------------"
85
86def test_has_at_least_one_matching_tag(test, tag_list):
87 return "oetags" in test and any(oetag in tag_list for oetag in test["oetags"])
88
89def all_tests_have_at_least_one_matching_tag(results, tag_list):
90 return all(test_has_at_least_one_matching_tag(test_result, tag_list) or test_name.startswith("ptestresult") for (test_name, test_result) in results.items())
91
92def any_test_have_any_matching_tag(results, tag_list):
93 return any(test_has_at_least_one_matching_tag(test, tag_list) for test in results.values())
94
95def have_skipped_test(result, test_prefix):
96 return all( result[test]['status'] == "SKIPPED" for test in result if test.startswith(test_prefix))
97
98def have_all_tests_skipped(result, test_prefixes_list):
99 return all(have_skipped_test(result, test_prefix) for test_prefix in test_prefixes_list)
100
101def guess_oeselftest_metadata(results):
102 """
103 When an oeselftest test result is lacking OESELFTEST_METADATA, we can try to guess it based on results content.
104 Check results for specific values (absence/presence of oetags, number and name of executed tests...),
105 and if it matches one of known configuration from autobuilder configuration, apply guessed OSELFTEST_METADATA
106 to it to allow proper test filtering.
107 This guessing process is tightly coupled to config.json in autobuilder. It should trigger less and less,
108 as new tests will have OESELFTEST_METADATA properly appended at test reporting time
109 """
110
111 if len(results) == 1 and "buildoptions.SourceMirroring.test_yocto_source_mirror" in results:
112 return OESELFTEST_METADATA_GUESS_TABLE['trigger-build-posttrigger']
113 elif all(result.startswith("reproducible") for result in results):
114 return OESELFTEST_METADATA_GUESS_TABLE['reproducible']
115 elif all_tests_have_at_least_one_matching_tag(results, ["machine"]):
116 return OESELFTEST_METADATA_GUESS_TABLE['arch-qemu-quick']
117 elif all_tests_have_at_least_one_matching_tag(results, ["machine", "toolchain-system"]):
118 return OESELFTEST_METADATA_GUESS_TABLE['arch-qemu-full-x86-or-x86_64']
119 elif all_tests_have_at_least_one_matching_tag(results, ["machine", "toolchain-user"]):
120 return OESELFTEST_METADATA_GUESS_TABLE['arch-qemu-full-others']
121 elif not any_test_have_any_matching_tag(results, ["machine", "toolchain-user", "toolchain-system"]):
122 if have_all_tests_skipped(results, ["distrodata.Distrodata.test_checkpkg", "buildoptions.SourceMirroring.test_yocto_source_mirror", "reproducible"]):
123 return OESELFTEST_METADATA_GUESS_TABLE['selftest']
124 elif have_all_tests_skipped(results, ["distrodata.Distrodata.test_checkpkg", "buildoptions.SourceMirroring.test_yocto_source_mirror"]):
125 return OESELFTEST_METADATA_GUESS_TABLE['bringup']
126
127 return None
128
129
130def metadata_matches(base_configuration, target_configuration):
131 """
132 For passed base and target, check test type. If test type matches one of
133 properties described in METADATA_MATCH_TABLE, compare metadata if it is
134 present in base. Return true if metadata matches, or if base lacks some
135 data (either TEST_TYPE or the corresponding metadata)
136 """
137 test_type = base_configuration.get('TEST_TYPE')
138 if test_type not in METADATA_MATCH_TABLE:
139 return True
140
141 metadata_key = METADATA_MATCH_TABLE.get(test_type)
142 if target_configuration.get(metadata_key) != base_configuration.get(metadata_key):
143 return False
144
145 return True
146
147
148def machine_matches(base_configuration, target_configuration):
149 return base_configuration.get('MACHINE') == target_configuration.get('MACHINE')
150
151
152def can_be_compared(logger, base, target):
153 """
154 Some tests are not relevant to be compared, for example some oeselftest
155 run with different tests sets or parameters. Return true if tests can be
156 compared
157 """
158 ret = True
159 base_configuration = base['configuration']
160 target_configuration = target['configuration']
161
162 # Older test results lack proper OESELFTEST_METADATA: if not present, try to guess it based on tests results.
163 if base_configuration.get('TEST_TYPE') == 'oeselftest' and 'OESELFTEST_METADATA' not in base_configuration:
164 guess = guess_oeselftest_metadata(base['result'])
165 if guess is None:
166 logger.error(f"ERROR: did not manage to guess oeselftest metadata for {base_configuration['STARTTIME']}")
167 else:
168 logger.debug(f"Enriching {base_configuration['STARTTIME']} with {guess}")
169 base_configuration['OESELFTEST_METADATA'] = guess
170 if target_configuration.get('TEST_TYPE') == 'oeselftest' and 'OESELFTEST_METADATA' not in target_configuration:
171 guess = guess_oeselftest_metadata(target['result'])
172 if guess is None:
173 logger.error(f"ERROR: did not manage to guess oeselftest metadata for {target_configuration['STARTTIME']}")
174 else:
175 logger.debug(f"Enriching {target_configuration['STARTTIME']} with {guess}")
176 target_configuration['OESELFTEST_METADATA'] = guess
177
178 # Test runs with LTP results in should only be compared with other runs with LTP tests in them
179 if base_configuration.get('TEST_TYPE') == 'runtime' and any(result.startswith("ltpresult") for result in base['result']):
180 ret = target_configuration.get('TEST_TYPE') == 'runtime' and any(result.startswith("ltpresult") for result in target['result'])
181
182 return ret and metadata_matches(base_configuration, target_configuration) \
183 and machine_matches(base_configuration, target_configuration)
184
185def get_status_str(raw_status):
186 raw_status_lower = raw_status.lower() if raw_status else "None"
187 return STATUS_STRINGS.get(raw_status_lower, raw_status)
188
189def get_additional_info_line(new_pass_count, new_tests):
190 result=[]
191 if new_tests:
192 result.append(f'+{new_tests} test(s) present')
193 if new_pass_count:
194 result.append(f'+{new_pass_count} test(s) now passing')
195
196 if not result:
197 return ""
198
199 return ' -> ' + ', '.join(result) + '\n'
200
201def compare_result(logger, base_name, target_name, base_result, target_result, display_limit=None):
202 base_result = base_result.get('result')
203 target_result = target_result.get('result')
204 result = {}
205 new_tests = 0
206 regressions = {}
207 resultstring = ""
208 new_tests = 0
209 new_pass_count = 0
210
211 display_limit = int(display_limit) if display_limit else REGRESSIONS_DISPLAY_LIMIT
212
213 if base_result and target_result:
214 for k in base_result:
215 if k in ['ptestresult.rawlogs', 'ptestresult.sections']:
216 continue
217 base_testcase = base_result[k]
218 base_status = base_testcase.get('status')
219 if base_status:
220 target_testcase = target_result.get(k, {})
221 target_status = target_testcase.get('status')
222 if base_status != target_status:
223 result[k] = {'base': base_status, 'target': target_status}
224 else:
225 logger.error('Failed to retrieved base test case status: %s' % k)
226
227 # Also count new tests that were not present in base results: it
228 # could be newly added tests, but it could also highlights some tests
229 # renames or fixed faulty ptests
230 for k in target_result:
231 if k not in base_result:
232 new_tests += 1
233 if result:
234 new_pass_count = sum(test['target'] is not None and test['target'].startswith("PASS") for test in result.values())
235 # Print a regression report only if at least one test has a regression status (FAIL, SKIPPED, absent...)
236 if new_pass_count < len(result):
237 resultstring = "Regression: %s\n %s\n" % (base_name, target_name)
238 for k in sorted(result):
239 if not result[k]['target'] or not result[k]['target'].startswith("PASS"):
240 # Differentiate each ptest kind when listing regressions
241 key_parts = k.split('.')
242 key = '.'.join(key_parts[:2]) if k.startswith('ptest') else key_parts[0]
243 # Append new regression to corresponding test family
244 regressions[key] = regressions.setdefault(key, []) + [' %s: %s -> %s\n' % (k, get_status_str(result[k]['base']), get_status_str(result[k]['target']))]
245 resultstring += f" Total: {sum([len(regressions[r]) for r in regressions])} new regression(s):\n"
246 for k in regressions:
247 resultstring += f" {len(regressions[k])} regression(s) for {k}\n"
248 count_to_print=min([display_limit, len(regressions[k])]) if display_limit > 0 else len(regressions[k])
249 resultstring += ''.join(regressions[k][:count_to_print])
250 if count_to_print < len(regressions[k]):
251 resultstring+=' [...]\n'
252 if new_pass_count > 0:
253 resultstring += f' Additionally, {new_pass_count} previously failing test(s) is/are now passing\n'
254 if new_tests > 0:
255 resultstring += f' Additionally, {new_tests} new test(s) is/are present\n'
256 else:
257 resultstring = "%s\n%s\n" % (base_name, target_name)
258 result = None
259 else:
260 resultstring = "%s\n%s\n" % (base_name, target_name)
261
262 if not result:
263 additional_info = get_additional_info_line(new_pass_count, new_tests)
264 if additional_info:
265 resultstring += additional_info
266
267 return result, resultstring
268
269def get_results(logger, source):
270 return resultutils.load_resultsdata(source, configmap=resultutils.regression_map)
271
272def regression(args, logger):
273 base_results = get_results(logger, args.base_result)
274 target_results = get_results(logger, args.target_result)
275
276 regression_common(args, logger, base_results, target_results)
277
278# Some test case naming is poor and contains random strings, particularly lttng/babeltrace.
279# Truncating the test names works since they contain file and line number identifiers
280# which allows us to match them without the random components.
281def fixup_ptest_names(results, logger):
282 for r in results:
283 for i in results[r]:
284 tests = list(results[r][i]['result'].keys())
285 for test in tests:
286 new = None
287 if test.startswith(("ptestresult.lttng-tools.", "ptestresult.babeltrace.", "ptestresult.babeltrace2")) and "_-_" in test:
288 new = test.split("_-_")[0]
289 elif test.startswith(("ptestresult.curl.")) and "__" in test:
290 new = test.split("__")[0]
291 elif test.startswith(("ptestresult.dbus.")) and "__" in test:
292 new = test.split("__")[0]
293 elif test.startswith("ptestresult.binutils") and "build-st-" in test:
294 new = test.split(" ")[0]
295 elif test.startswith("ptestresult.gcc") and "/tmp/runtest." in test:
296 new = ".".join(test.split(".")[:2])
297 if new:
298 results[r][i]['result'][new] = results[r][i]['result'][test]
299 del results[r][i]['result'][test]
300
301def regression_common(args, logger, base_results, target_results):
302 if args.base_result_id:
303 base_results = resultutils.filter_resultsdata(base_results, args.base_result_id)
304 if args.target_result_id:
305 target_results = resultutils.filter_resultsdata(target_results, args.target_result_id)
306
307 fixup_ptest_names(base_results, logger)
308 fixup_ptest_names(target_results, logger)
309
310 matches = []
311 regressions = []
312 notfound = []
313
314 for a in base_results:
315 if a in target_results:
316 base = list(base_results[a].keys())
317 target = list(target_results[a].keys())
318 # We may have multiple base/targets which are for different configurations. Start by
319 # removing any pairs which match
320 for c in base.copy():
321 for b in target.copy():
322 if not can_be_compared(logger, base_results[a][c], target_results[a][b]):
323 continue
324 res, resstr = compare_result(logger, c, b, base_results[a][c], target_results[a][b], args.limit)
325 if not res:
326 matches.append(resstr)
327 base.remove(c)
328 target.remove(b)
329 break
330 # Should only now see regressions, we may not be able to match multiple pairs directly
331 for c in base:
332 for b in target:
333 if not can_be_compared(logger, base_results[a][c], target_results[a][b]):
334 continue
335 res, resstr = compare_result(logger, c, b, base_results[a][c], target_results[a][b], args.limit)
336 if res:
337 regressions.append(resstr)
338 else:
339 notfound.append("%s not found in target" % a)
340 print("\n".join(sorted(regressions)))
341 print("\n" + MISSING_TESTS_BANNER + "\n")
342 print("\n".join(sorted(notfound)))
343 print("\n" + ADDITIONAL_DATA_BANNER + "\n")
344 print("\n".join(sorted(matches)))
345 return 0
346
347def regression_git(args, logger):
348 base_results = {}
349 target_results = {}
350
351 tag_name = "{branch}/{commit_number}-g{commit}/{tag_number}"
352 repo = GitRepo(args.repo)
353
354 revs = gitarchive.get_test_revs(logger, repo, tag_name, branch=args.branch)
355
356 if args.branch2:
357 revs2 = gitarchive.get_test_revs(logger, repo, tag_name, branch=args.branch2)
358 if not len(revs2):
359 logger.error("No revisions found to compare against")
360 return 1
361 if not len(revs):
362 logger.error("No revision to report on found")
363 return 1
364 else:
365 if len(revs) < 2:
366 logger.error("Only %d tester revisions found, unable to generate report" % len(revs))
367 return 1
368
369 # Pick revisions
370 if args.commit:
371 if args.commit_number:
372 logger.warning("Ignoring --commit-number as --commit was specified")
373 index1 = gitarchive.rev_find(revs, 'commit', args.commit)
374 elif args.commit_number:
375 index1 = gitarchive.rev_find(revs, 'commit_number', args.commit_number)
376 else:
377 index1 = len(revs) - 1
378
379 if args.branch2:
380 revs2.append(revs[index1])
381 index1 = len(revs2) - 1
382 revs = revs2
383
384 if args.commit2:
385 if args.commit_number2:
386 logger.warning("Ignoring --commit-number2 as --commit2 was specified")
387 index2 = gitarchive.rev_find(revs, 'commit', args.commit2)
388 elif args.commit_number2:
389 index2 = gitarchive.rev_find(revs, 'commit_number', args.commit_number2)
390 else:
391 if index1 > 0:
392 index2 = index1 - 1
393 # Find the closest matching commit number for comparision
394 # In future we could check the commit is a common ancestor and
395 # continue back if not but this good enough for now
396 while index2 > 0 and revs[index2].commit_number > revs[index1].commit_number:
397 index2 = index2 - 1
398 else:
399 logger.error("Unable to determine the other commit, use "
400 "--commit2 or --commit-number2 to specify it")
401 return 1
402
403 logger.info("Comparing:\n%s\nto\n%s\n" % (revs[index1], revs[index2]))
404
405 base_results = resultutils.git_get_result(repo, revs[index1][2])
406 target_results = resultutils.git_get_result(repo, revs[index2][2])
407
408 regression_common(args, logger, base_results, target_results)
409
410 return 0
411
412def register_commands(subparsers):
413 """Register subcommands from this plugin"""
414
415 parser_build = subparsers.add_parser('regression', help='regression file/directory analysis',
416 description='regression analysis comparing the base set of results to the target results',
417 group='analysis')
418 parser_build.set_defaults(func=regression)
419 parser_build.add_argument('base_result',
420 help='base result file/directory/URL for the comparison')
421 parser_build.add_argument('target_result',
422 help='target result file/directory/URL to compare with')
423 parser_build.add_argument('-b', '--base-result-id', default='',
424 help='(optional) filter the base results to this result ID')
425 parser_build.add_argument('-t', '--target-result-id', default='',
426 help='(optional) filter the target results to this result ID')
427 parser_build.add_argument('-l', '--limit', default=REGRESSIONS_DISPLAY_LIMIT, help="Maximum number of changes to display per test. Can be set to 0 to print all changes")
428
429 parser_build = subparsers.add_parser('regression-git', help='regression git analysis',
430 description='regression analysis comparing base result set to target '
431 'result set',
432 group='analysis')
433 parser_build.set_defaults(func=regression_git)
434 parser_build.add_argument('repo',
435 help='the git repository containing the data')
436 parser_build.add_argument('-b', '--base-result-id', default='',
437 help='(optional) default select regression based on configurations unless base result '
438 'id was provided')
439 parser_build.add_argument('-t', '--target-result-id', default='',
440 help='(optional) default select regression based on configurations unless target result '
441 'id was provided')
442
443 parser_build.add_argument('--branch', '-B', default='master', help="Branch to find commit in")
444 parser_build.add_argument('--branch2', help="Branch to find comparision revisions in")
445 parser_build.add_argument('--commit', help="Revision to search for")
446 parser_build.add_argument('--commit-number', help="Revision number to search for, redundant if --commit is specified")
447 parser_build.add_argument('--commit2', help="Revision to compare with")
448 parser_build.add_argument('--commit-number2', help="Revision number to compare with, redundant if --commit2 is specified")
449 parser_build.add_argument('-l', '--limit', default=REGRESSIONS_DISPLAY_LIMIT, help="Maximum number of changes to display per test. Can be set to 0 to print all changes")
450
diff --git a/scripts/lib/resulttool/report.py b/scripts/lib/resulttool/report.py
deleted file mode 100644
index 1c100b00ab..0000000000
--- a/scripts/lib/resulttool/report.py
+++ /dev/null
@@ -1,315 +0,0 @@
1# test result tool - report text based test results
2#
3# Copyright (c) 2019, Intel Corporation.
4# Copyright (c) 2019, Linux Foundation
5#
6# SPDX-License-Identifier: GPL-2.0-only
7#
8
9import os
10import glob
11import json
12import resulttool.resultutils as resultutils
13from oeqa.utils.git import GitRepo
14import oeqa.utils.gitarchive as gitarchive
15
16
17class ResultsTextReport(object):
18 def __init__(self):
19 self.ptests = {}
20 self.ltptests = {}
21 self.ltpposixtests = {}
22 self.result_types = {'passed': ['PASSED', 'passed', 'PASS', 'XFAIL'],
23 'failed': ['FAILED', 'failed', 'FAIL', 'ERROR', 'error', 'UNKNOWN', 'XPASS'],
24 'skipped': ['SKIPPED', 'skipped', 'UNSUPPORTED', 'UNTESTED', 'UNRESOLVED']}
25
26
27 def handle_ptest_result(self, k, status, result, machine):
28 if machine not in self.ptests:
29 self.ptests[machine] = {}
30
31 if k == 'ptestresult.sections':
32 # Ensure tests without any test results still show up on the report
33 for suite in result['ptestresult.sections']:
34 if suite not in self.ptests[machine]:
35 self.ptests[machine][suite] = {
36 'passed': 0, 'failed': 0, 'skipped': 0, 'duration' : '-',
37 'failed_testcases': [], "testcases": set(),
38 }
39 if 'duration' in result['ptestresult.sections'][suite]:
40 self.ptests[machine][suite]['duration'] = result['ptestresult.sections'][suite]['duration']
41 if 'timeout' in result['ptestresult.sections'][suite]:
42 self.ptests[machine][suite]['duration'] += " T"
43 return True
44
45 # process test result
46 try:
47 _, suite, test = k.split(".", 2)
48 except ValueError:
49 return True
50
51 # Handle 'glib-2.0'
52 if 'ptestresult.sections' in result and suite not in result['ptestresult.sections']:
53 try:
54 _, suite, suite1, test = k.split(".", 3)
55 if suite + "." + suite1 in result['ptestresult.sections']:
56 suite = suite + "." + suite1
57 except ValueError:
58 pass
59
60 if suite not in self.ptests[machine]:
61 self.ptests[machine][suite] = {
62 'passed': 0, 'failed': 0, 'skipped': 0, 'duration' : '-',
63 'failed_testcases': [], "testcases": set(),
64 }
65
66 # do not process duplicate results
67 if test in self.ptests[machine][suite]["testcases"]:
68 print("Warning duplicate ptest result '{}.{}' for {}".format(suite, test, machine))
69 return False
70
71 for tk in self.result_types:
72 if status in self.result_types[tk]:
73 self.ptests[machine][suite][tk] += 1
74 self.ptests[machine][suite]["testcases"].add(test)
75 return True
76
77 def handle_ltptest_result(self, k, status, result, machine):
78 if machine not in self.ltptests:
79 self.ltptests[machine] = {}
80
81 if k == 'ltpresult.sections':
82 # Ensure tests without any test results still show up on the report
83 for suite in result['ltpresult.sections']:
84 if suite not in self.ltptests[machine]:
85 self.ltptests[machine][suite] = {'passed': 0, 'failed': 0, 'skipped': 0, 'duration' : '-', 'failed_testcases': []}
86 if 'duration' in result['ltpresult.sections'][suite]:
87 self.ltptests[machine][suite]['duration'] = result['ltpresult.sections'][suite]['duration']
88 if 'timeout' in result['ltpresult.sections'][suite]:
89 self.ltptests[machine][suite]['duration'] += " T"
90 return
91 try:
92 _, suite, test = k.split(".", 2)
93 except ValueError:
94 return
95 # Handle 'glib-2.0'
96 if 'ltpresult.sections' in result and suite not in result['ltpresult.sections']:
97 try:
98 _, suite, suite1, test = k.split(".", 3)
99 if suite + "." + suite1 in result['ltpresult.sections']:
100 suite = suite + "." + suite1
101 except ValueError:
102 pass
103 if suite not in self.ltptests[machine]:
104 self.ltptests[machine][suite] = {'passed': 0, 'failed': 0, 'skipped': 0, 'duration' : '-', 'failed_testcases': []}
105 for tk in self.result_types:
106 if status in self.result_types[tk]:
107 self.ltptests[machine][suite][tk] += 1
108
109 def handle_ltpposixtest_result(self, k, status, result, machine):
110 if machine not in self.ltpposixtests:
111 self.ltpposixtests[machine] = {}
112
113 if k == 'ltpposixresult.sections':
114 # Ensure tests without any test results still show up on the report
115 for suite in result['ltpposixresult.sections']:
116 if suite not in self.ltpposixtests[machine]:
117 self.ltpposixtests[machine][suite] = {'passed': 0, 'failed': 0, 'skipped': 0, 'duration' : '-', 'failed_testcases': []}
118 if 'duration' in result['ltpposixresult.sections'][suite]:
119 self.ltpposixtests[machine][suite]['duration'] = result['ltpposixresult.sections'][suite]['duration']
120 return
121 try:
122 _, suite, test = k.split(".", 2)
123 except ValueError:
124 return
125 # Handle 'glib-2.0'
126 if 'ltpposixresult.sections' in result and suite not in result['ltpposixresult.sections']:
127 try:
128 _, suite, suite1, test = k.split(".", 3)
129 if suite + "." + suite1 in result['ltpposixresult.sections']:
130 suite = suite + "." + suite1
131 except ValueError:
132 pass
133 if suite not in self.ltpposixtests[machine]:
134 self.ltpposixtests[machine][suite] = {'passed': 0, 'failed': 0, 'skipped': 0, 'duration' : '-', 'failed_testcases': []}
135 for tk in self.result_types:
136 if status in self.result_types[tk]:
137 self.ltpposixtests[machine][suite][tk] += 1
138
139 def get_aggregated_test_result(self, logger, testresult, machine):
140 test_count_report = {'passed': 0, 'failed': 0, 'skipped': 0, 'failed_testcases': []}
141 result = testresult.get('result', [])
142 for k in result:
143 test_status = result[k].get('status', [])
144 if k.startswith("ptestresult."):
145 if not self.handle_ptest_result(k, test_status, result, machine):
146 continue
147 elif k.startswith("ltpresult."):
148 self.handle_ltptest_result(k, test_status, result, machine)
149 elif k.startswith("ltpposixresult."):
150 self.handle_ltpposixtest_result(k, test_status, result, machine)
151
152 # process result if it was not skipped by a handler
153 for tk in self.result_types:
154 if test_status in self.result_types[tk]:
155 test_count_report[tk] += 1
156 if test_status in self.result_types['failed']:
157 test_count_report['failed_testcases'].append(k)
158 return test_count_report
159
160 def print_test_report(self, template_file_name, test_count_reports):
161 from jinja2 import Environment, FileSystemLoader
162 script_path = os.path.dirname(os.path.realpath(__file__))
163 file_loader = FileSystemLoader(script_path + '/template')
164 env = Environment(loader=file_loader, trim_blocks=True)
165 template = env.get_template(template_file_name)
166 havefailed = False
167 reportvalues = []
168 machines = []
169 cols = ['passed', 'failed', 'skipped']
170 maxlen = {'passed' : 0, 'failed' : 0, 'skipped' : 0, 'result_id': 0, 'testseries' : 0, 'ptest' : 0 ,'ltptest': 0, 'ltpposixtest': 0}
171 for line in test_count_reports:
172 total_tested = line['passed'] + line['failed'] + line['skipped']
173 vals = {}
174 vals['result_id'] = line['result_id']
175 vals['testseries'] = line['testseries']
176 vals['sort'] = line['testseries'] + "_" + line['result_id']
177 vals['failed_testcases'] = line['failed_testcases']
178 for k in cols:
179 if total_tested:
180 vals[k] = "%d (%s%%)" % (line[k], format(line[k] / total_tested * 100, '.0f'))
181 else:
182 vals[k] = "0 (0%)"
183 for k in maxlen:
184 if k in vals and len(vals[k]) > maxlen[k]:
185 maxlen[k] = len(vals[k])
186 reportvalues.append(vals)
187 if line['failed_testcases']:
188 havefailed = True
189 if line['machine'] not in machines:
190 machines.append(line['machine'])
191 reporttotalvalues = {}
192 for k in cols:
193 reporttotalvalues[k] = '%s' % sum([line[k] for line in test_count_reports])
194 reporttotalvalues['count'] = '%s' % len(test_count_reports)
195 for (machine, report) in self.ptests.items():
196 for ptest in self.ptests[machine]:
197 if len(ptest) > maxlen['ptest']:
198 maxlen['ptest'] = len(ptest)
199 for (machine, report) in self.ltptests.items():
200 for ltptest in self.ltptests[machine]:
201 if len(ltptest) > maxlen['ltptest']:
202 maxlen['ltptest'] = len(ltptest)
203 for (machine, report) in self.ltpposixtests.items():
204 for ltpposixtest in self.ltpposixtests[machine]:
205 if len(ltpposixtest) > maxlen['ltpposixtest']:
206 maxlen['ltpposixtest'] = len(ltpposixtest)
207 output = template.render(reportvalues=reportvalues,
208 reporttotalvalues=reporttotalvalues,
209 havefailed=havefailed,
210 machines=machines,
211 ptests=self.ptests,
212 ltptests=self.ltptests,
213 ltpposixtests=self.ltpposixtests,
214 maxlen=maxlen)
215 print(output)
216
217 def view_test_report(self, logger, source_dir, branch, commit, tag, use_regression_map, raw_test, selected_test_case_only):
218 def print_selected_testcase_result(testresults, selected_test_case_only):
219 for testsuite in testresults:
220 for resultid in testresults[testsuite]:
221 result = testresults[testsuite][resultid]['result']
222 test_case_result = result.get(selected_test_case_only, {})
223 if test_case_result.get('status'):
224 print('Found selected test case result for %s from %s' % (selected_test_case_only,
225 resultid))
226 print(test_case_result['status'])
227 else:
228 print('Could not find selected test case result for %s from %s' % (selected_test_case_only,
229 resultid))
230 if test_case_result.get('log'):
231 print(test_case_result['log'])
232 test_count_reports = []
233 configmap = resultutils.store_map
234 if use_regression_map:
235 configmap = resultutils.regression_map
236 if commit:
237 if tag:
238 logger.warning("Ignoring --tag as --commit was specified")
239 tag_name = "{branch}/{commit_number}-g{commit}/{tag_number}"
240 repo = GitRepo(source_dir)
241 revs = gitarchive.get_test_revs(logger, repo, tag_name, branch=branch)
242 rev_index = gitarchive.rev_find(revs, 'commit', commit)
243 testresults = resultutils.git_get_result(repo, revs[rev_index][2], configmap=configmap)
244 elif tag:
245 repo = GitRepo(source_dir)
246 testresults = resultutils.git_get_result(repo, [tag], configmap=configmap)
247 else:
248 testresults = resultutils.load_resultsdata(source_dir, configmap=configmap)
249 if raw_test:
250 raw_results = {}
251 for testsuite in testresults:
252 result = testresults[testsuite].get(raw_test, {})
253 if result:
254 raw_results[testsuite] = {raw_test: result}
255 if raw_results:
256 if selected_test_case_only:
257 print_selected_testcase_result(raw_results, selected_test_case_only)
258 else:
259 print(json.dumps(raw_results, sort_keys=True, indent=1))
260 else:
261 print('Could not find raw test result for %s' % raw_test)
262 return 0
263 if selected_test_case_only:
264 print_selected_testcase_result(testresults, selected_test_case_only)
265 return 0
266 for testsuite in testresults:
267 for resultid in testresults[testsuite]:
268 skip = False
269 result = testresults[testsuite][resultid]
270 machine = result['configuration']['MACHINE']
271
272 # Check to see if there is already results for these kinds of tests for the machine
273 for key in result['result'].keys():
274 testtype = str(key).split('.')[0]
275 if ((machine in self.ltptests and testtype == "ltpiresult" and self.ltptests[machine]) or
276 (machine in self.ltpposixtests and testtype == "ltpposixresult" and self.ltpposixtests[machine])):
277 print("Already have test results for %s on %s, skipping %s" %(str(key).split('.')[0], machine, resultid))
278 skip = True
279 break
280 if skip:
281 break
282
283 test_count_report = self.get_aggregated_test_result(logger, result, machine)
284 test_count_report['machine'] = machine
285 test_count_report['testseries'] = result['configuration']['TESTSERIES']
286 test_count_report['result_id'] = resultid
287 test_count_reports.append(test_count_report)
288 self.print_test_report('test_report_full_text.txt', test_count_reports)
289
290def report(args, logger):
291 report = ResultsTextReport()
292 report.view_test_report(logger, args.source_dir, args.branch, args.commit, args.tag, args.use_regression_map,
293 args.raw_test_only, args.selected_test_case_only)
294 return 0
295
296def register_commands(subparsers):
297 """Register subcommands from this plugin"""
298 parser_build = subparsers.add_parser('report', help='summarise test results',
299 description='print a text-based summary of the test results',
300 group='analysis')
301 parser_build.set_defaults(func=report)
302 parser_build.add_argument('source_dir',
303 help='source file/directory/URL that contain the test result files to summarise')
304 parser_build.add_argument('--branch', '-B', default='master', help="Branch to find commit in")
305 parser_build.add_argument('--commit', help="Revision to report")
306 parser_build.add_argument('-t', '--tag', default='',
307 help='source_dir is a git repository, report on the tag specified from that repository')
308 parser_build.add_argument('-m', '--use_regression_map', action='store_true',
309 help='instead of the default "store_map", use the "regression_map" for report')
310 parser_build.add_argument('-r', '--raw_test_only', default='',
311 help='output raw test result only for the user provided test result id')
312 parser_build.add_argument('-s', '--selected_test_case_only', default='',
313 help='output selected test case result for the user provided test case id, if both test '
314 'result id and test case id are provided then output the selected test case result '
315 'from the provided test result id')
diff --git a/scripts/lib/resulttool/resultutils.py b/scripts/lib/resulttool/resultutils.py
deleted file mode 100644
index b8fc79a6ac..0000000000
--- a/scripts/lib/resulttool/resultutils.py
+++ /dev/null
@@ -1,274 +0,0 @@
1# resulttool - common library/utility functions
2#
3# Copyright (c) 2019, Intel Corporation.
4# Copyright (c) 2019, Linux Foundation
5#
6# SPDX-License-Identifier: GPL-2.0-only
7#
8
9import os
10import base64
11import zlib
12import json
13import scriptpath
14import copy
15import urllib.request
16import posixpath
17import logging
18scriptpath.add_oe_lib_path()
19
20logger = logging.getLogger('resulttool')
21
22flatten_map = {
23 "oeselftest": [],
24 "runtime": [],
25 "sdk": [],
26 "sdkext": [],
27 "manual": []
28}
29regression_map = {
30 "oeselftest": ['TEST_TYPE', 'MACHINE'],
31 "runtime": ['TESTSERIES', 'TEST_TYPE', 'IMAGE_BASENAME', 'MACHINE', 'IMAGE_PKGTYPE', 'DISTRO'],
32 "sdk": ['TESTSERIES', 'TEST_TYPE', 'IMAGE_BASENAME', 'MACHINE', 'SDKMACHINE'],
33 "sdkext": ['TESTSERIES', 'TEST_TYPE', 'IMAGE_BASENAME', 'MACHINE', 'SDKMACHINE'],
34 "manual": ['TEST_TYPE', 'TEST_MODULE', 'IMAGE_BASENAME', 'MACHINE']
35}
36store_map = {
37 "oeselftest": ['TEST_TYPE', 'TESTSERIES', 'MACHINE'],
38 "runtime": ['TEST_TYPE', 'DISTRO', 'MACHINE', 'IMAGE_BASENAME'],
39 "sdk": ['TEST_TYPE', 'MACHINE', 'SDKMACHINE', 'IMAGE_BASENAME'],
40 "sdkext": ['TEST_TYPE', 'MACHINE', 'SDKMACHINE', 'IMAGE_BASENAME'],
41 "manual": ['TEST_TYPE', 'TEST_MODULE', 'MACHINE', 'IMAGE_BASENAME']
42}
43
44rawlog_sections = {
45 "ptestresult.rawlogs": "ptest",
46 "ltpresult.rawlogs": "ltp",
47 "ltpposixresult.rawlogs": "ltpposix"
48}
49
50def is_url(p):
51 """
52 Helper for determining if the given path is a URL
53 """
54 return p.startswith('http://') or p.startswith('https://')
55
56extra_configvars = {'TESTSERIES': ''}
57
58#
59# Load the json file and append the results data into the provided results dict
60#
61def append_resultsdata(results, f, configmap=store_map, configvars=extra_configvars):
62 if type(f) is str:
63 if is_url(f):
64 with urllib.request.urlopen(f) as response:
65 data = json.loads(response.read().decode('utf-8'))
66 url = urllib.parse.urlparse(f)
67 testseries = posixpath.basename(posixpath.dirname(url.path))
68 else:
69 with open(f, "r") as filedata:
70 try:
71 data = json.load(filedata)
72 except json.decoder.JSONDecodeError:
73 print("Cannot decode {}. Possible corruption. Skipping.".format(f))
74 data = ""
75 testseries = os.path.basename(os.path.dirname(f))
76 else:
77 data = f
78 for res in data:
79 if "configuration" not in data[res] or "result" not in data[res]:
80 raise ValueError("Test results data without configuration or result section?")
81 for config in configvars:
82 if config == "TESTSERIES" and "TESTSERIES" not in data[res]["configuration"]:
83 data[res]["configuration"]["TESTSERIES"] = testseries
84 continue
85 if config not in data[res]["configuration"]:
86 data[res]["configuration"][config] = configvars[config]
87 testtype = data[res]["configuration"].get("TEST_TYPE")
88 if testtype not in configmap:
89 raise ValueError("Unknown test type %s" % testtype)
90 testpath = "/".join(data[res]["configuration"].get(i) for i in configmap[testtype])
91 if testpath not in results:
92 results[testpath] = {}
93 results[testpath][res] = data[res]
94
95#
96# Walk a directory and find/load results data
97# or load directly from a file
98#
99def load_resultsdata(source, configmap=store_map, configvars=extra_configvars):
100 results = {}
101 if is_url(source) or os.path.isfile(source):
102 append_resultsdata(results, source, configmap, configvars)
103 return results
104 for root, dirs, files in os.walk(source):
105 for name in files:
106 f = os.path.join(root, name)
107 if name == "testresults.json":
108 append_resultsdata(results, f, configmap, configvars)
109 return results
110
111def filter_resultsdata(results, resultid):
112 newresults = {}
113 for r in results:
114 for i in results[r]:
115 if i == resultsid:
116 newresults[r] = {}
117 newresults[r][i] = results[r][i]
118 return newresults
119
120def strip_logs(results):
121 newresults = copy.deepcopy(results)
122 for res in newresults:
123 if 'result' not in newresults[res]:
124 continue
125 for logtype in rawlog_sections:
126 if logtype in newresults[res]['result']:
127 del newresults[res]['result'][logtype]
128 if 'ptestresult.sections' in newresults[res]['result']:
129 for i in newresults[res]['result']['ptestresult.sections']:
130 if 'log' in newresults[res]['result']['ptestresult.sections'][i]:
131 del newresults[res]['result']['ptestresult.sections'][i]['log']
132 return newresults
133
134# For timing numbers, crazy amounts of precision don't make sense and just confuse
135# the logs. For numbers over 1, trim to 3 decimal places, for numbers less than 1,
136# trim to 4 significant digits
137def trim_durations(results):
138 for res in results:
139 if 'result' not in results[res]:
140 continue
141 for entry in results[res]['result']:
142 if 'duration' in results[res]['result'][entry]:
143 duration = results[res]['result'][entry]['duration']
144 if duration > 1:
145 results[res]['result'][entry]['duration'] = float("%.3f" % duration)
146 elif duration < 1:
147 results[res]['result'][entry]['duration'] = float("%.4g" % duration)
148 return results
149
150def handle_cleanups(results):
151 # Remove pointless path duplication from old format reproducibility results
152 for res2 in results:
153 try:
154 section = results[res2]['result']['reproducible']['files']
155 for pkgtype in section:
156 for filelist in section[pkgtype].copy():
157 if section[pkgtype][filelist] and type(section[pkgtype][filelist][0]) == dict:
158 newlist = []
159 for entry in section[pkgtype][filelist]:
160 newlist.append(entry["reference"].split("/./")[1])
161 section[pkgtype][filelist] = newlist
162
163 except KeyError:
164 pass
165 # Remove pointless duplicate rawlogs data
166 try:
167 del results[res2]['result']['reproducible.rawlogs']
168 except KeyError:
169 pass
170
171def decode_log(logdata):
172 if isinstance(logdata, str):
173 return logdata
174 elif isinstance(logdata, dict):
175 if "compressed" in logdata:
176 data = logdata.get("compressed")
177 data = base64.b64decode(data.encode("utf-8"))
178 data = zlib.decompress(data)
179 return data.decode("utf-8", errors='ignore')
180 return None
181
182def generic_get_log(sectionname, results, section):
183 if sectionname not in results:
184 return None
185 if section not in results[sectionname]:
186 return None
187
188 ptest = results[sectionname][section]
189 if 'log' not in ptest:
190 return None
191 return decode_log(ptest['log'])
192
193def ptestresult_get_log(results, section):
194 return generic_get_log('ptestresult.sections', results, section)
195
196def generic_get_rawlogs(sectname, results):
197 if sectname not in results:
198 return None
199 if 'log' not in results[sectname]:
200 return None
201 return decode_log(results[sectname]['log'])
202
203def save_resultsdata(results, destdir, fn="testresults.json", ptestjson=False, ptestlogs=False):
204 for res in results:
205 if res:
206 dst = destdir + "/" + res + "/" + fn
207 else:
208 dst = destdir + "/" + fn
209 os.makedirs(os.path.dirname(dst), exist_ok=True)
210 resultsout = results[res]
211 if not ptestjson:
212 resultsout = strip_logs(results[res])
213 trim_durations(resultsout)
214 handle_cleanups(resultsout)
215 with open(dst, 'w') as f:
216 f.write(json.dumps(resultsout, sort_keys=True, indent=1))
217 for res2 in results[res]:
218 if ptestlogs and 'result' in results[res][res2]:
219 seriesresults = results[res][res2]['result']
220 for logtype in rawlog_sections:
221 logdata = generic_get_rawlogs(logtype, seriesresults)
222 if logdata is not None:
223 logger.info("Extracting " + rawlog_sections[logtype] + "-raw.log")
224 with open(dst.replace(fn, rawlog_sections[logtype] + "-raw.log"), "w+") as f:
225 f.write(logdata)
226 if 'ptestresult.sections' in seriesresults:
227 for i in seriesresults['ptestresult.sections']:
228 sectionlog = ptestresult_get_log(seriesresults, i)
229 if sectionlog is not None:
230 with open(dst.replace(fn, "ptest-%s.log" % i), "w+") as f:
231 f.write(sectionlog)
232
233def git_get_result(repo, tags, configmap=store_map):
234 git_objs = []
235 for tag in tags:
236 files = repo.run_cmd(['ls-tree', "--name-only", "-r", tag]).splitlines()
237 git_objs.extend([tag + ':' + f for f in files if f.endswith("testresults.json")])
238
239 def parse_json_stream(data):
240 """Parse multiple concatenated JSON objects"""
241 objs = []
242 json_d = ""
243 for line in data.splitlines():
244 if line == '}{':
245 json_d += '}'
246 objs.append(json.loads(json_d))
247 json_d = '{'
248 else:
249 json_d += line
250 objs.append(json.loads(json_d))
251 return objs
252
253 # Optimize by reading all data with one git command
254 results = {}
255 for obj in parse_json_stream(repo.run_cmd(['show'] + git_objs + ['--'])):
256 append_resultsdata(results, obj, configmap=configmap)
257
258 return results
259
260def test_run_results(results):
261 """
262 Convenient generator function that iterates over all test runs that have a
263 result section.
264
265 Generates a tuple of:
266 (result json file path, test run name, test run (dict), test run "results" (dict))
267 for each test run that has a "result" section
268 """
269 for path in results:
270 for run_name, test_run in results[path].items():
271 if not 'result' in test_run:
272 continue
273 yield path, run_name, test_run, test_run['result']
274
diff --git a/scripts/lib/resulttool/store.py b/scripts/lib/resulttool/store.py
deleted file mode 100644
index b143334e69..0000000000
--- a/scripts/lib/resulttool/store.py
+++ /dev/null
@@ -1,125 +0,0 @@
1# resulttool - store test results
2#
3# Copyright (c) 2019, Intel Corporation.
4# Copyright (c) 2019, Linux Foundation
5#
6# SPDX-License-Identifier: GPL-2.0-only
7#
8
9import tempfile
10import os
11import subprocess
12import json
13import shutil
14import scriptpath
15scriptpath.add_bitbake_lib_path()
16scriptpath.add_oe_lib_path()
17import resulttool.resultutils as resultutils
18import oeqa.utils.gitarchive as gitarchive
19
20
21def store(args, logger):
22 tempdir = tempfile.mkdtemp(prefix='testresults.')
23 try:
24 configvars = resultutils.extra_configvars.copy()
25 if args.executed_by:
26 configvars['EXECUTED_BY'] = args.executed_by
27 if args.extra_test_env:
28 configvars['EXTRA_TEST_ENV'] = args.extra_test_env
29 results = {}
30 logger.info('Reading files from %s' % args.source)
31 if resultutils.is_url(args.source) or os.path.isfile(args.source):
32 resultutils.append_resultsdata(results, args.source, configvars=configvars)
33 else:
34 for root, dirs, files in os.walk(args.source):
35 for name in files:
36 f = os.path.join(root, name)
37 if name == "testresults.json":
38 resultutils.append_resultsdata(results, f, configvars=configvars)
39 elif args.all:
40 dst = f.replace(args.source, tempdir + "/")
41 os.makedirs(os.path.dirname(dst), exist_ok=True)
42 shutil.copyfile(f, dst)
43
44 revisions = {}
45
46 if not results and not args.all:
47 if args.allow_empty:
48 logger.info("No results found to store")
49 return 0
50 logger.error("No results found to store")
51 return 1
52
53 # Find the branch/commit/commit_count and ensure they all match
54 for suite in results:
55 for result in results[suite]:
56 config = results[suite][result]['configuration']['LAYERS']['meta']
57 revision = (config['commit'], config['branch'], str(config['commit_count']))
58 if revision not in revisions:
59 revisions[revision] = {}
60 if suite not in revisions[revision]:
61 revisions[revision][suite] = {}
62 revisions[revision][suite][result] = results[suite][result]
63
64 logger.info("Found %d revisions to store" % len(revisions))
65
66 for r in revisions:
67 results = revisions[r]
68 if args.revision and r[0] != args.revision:
69 logger.info('skipping %s as non-matching' % r[0])
70 continue
71 keywords = {'commit': r[0], 'branch': r[1], "commit_count": r[2]}
72 subprocess.check_call(["find", tempdir, "-name", "testresults.json", "!", "-path", "./.git/*", "-delete"])
73 resultutils.save_resultsdata(results, tempdir, ptestlogs=True)
74
75 logger.info('Storing test result into git repository %s' % args.git_dir)
76
77 excludes = []
78 if args.logfile_archive:
79 excludes = ['*.log', "*.log.zst"]
80
81 tagname = gitarchive.gitarchive(tempdir, args.git_dir, False, False,
82 "Results of {branch}:{commit}", "branch: {branch}\ncommit: {commit}", "{branch}",
83 False, "{branch}/{commit_count}-g{commit}/{tag_number}",
84 'Test run #{tag_number} of {branch}:{commit}', '',
85 excludes, [], False, keywords, logger)
86
87 if args.logfile_archive:
88 logdir = args.logfile_archive + "/" + tagname
89 shutil.copytree(tempdir, logdir)
90 os.chmod(logdir, 0o755)
91 for root, dirs, files in os.walk(logdir):
92 for name in files:
93 if not name.endswith(".log"):
94 continue
95 f = os.path.join(root, name)
96 subprocess.run(["zstd", f, "--rm"], check=True, capture_output=True)
97 finally:
98 subprocess.check_call(["rm", "-rf", tempdir])
99
100 return 0
101
102def register_commands(subparsers):
103 """Register subcommands from this plugin"""
104 parser_build = subparsers.add_parser('store', help='store test results into a git repository',
105 description='takes a results file or directory of results files and stores '
106 'them into the destination git repository, splitting out the results '
107 'files as configured',
108 group='setup')
109 parser_build.set_defaults(func=store)
110 parser_build.add_argument('source',
111 help='source file/directory/URL that contain the test result files to be stored')
112 parser_build.add_argument('git_dir',
113 help='the location of the git repository to store the results in')
114 parser_build.add_argument('-a', '--all', action='store_true',
115 help='include all files, not just testresults.json files')
116 parser_build.add_argument('-e', '--allow-empty', action='store_true',
117 help='don\'t error if no results to store are found')
118 parser_build.add_argument('-x', '--executed-by', default='',
119 help='add executed-by configuration to each result file')
120 parser_build.add_argument('-t', '--extra-test-env', default='',
121 help='add extra test environment data to each result file configuration')
122 parser_build.add_argument('-r', '--revision', default='',
123 help='only store data for the specified revision')
124 parser_build.add_argument('-l', '--logfile-archive', default='',
125 help='directory to separately archive log files along with a copy of the results')
diff --git a/scripts/lib/resulttool/template/test_report_full_text.txt b/scripts/lib/resulttool/template/test_report_full_text.txt
deleted file mode 100644
index 2efba2ef6f..0000000000
--- a/scripts/lib/resulttool/template/test_report_full_text.txt
+++ /dev/null
@@ -1,79 +0,0 @@
1==============================================================================================================
2Test Result Status Summary (Counts/Percentages sorted by testseries, ID)
3==============================================================================================================
4--------------------------------------------------------------------------------------------------------------
5{{ 'Test Series'.ljust(maxlen['testseries']) }} | {{ 'ID'.ljust(maxlen['result_id']) }} | {{ 'Passed'.ljust(maxlen['passed']) }} | {{ 'Failed'.ljust(maxlen['failed']) }} | {{ 'Skipped'.ljust(maxlen['skipped']) }}
6--------------------------------------------------------------------------------------------------------------
7{% for report in reportvalues |sort(attribute='sort') %}
8{{ report.testseries.ljust(maxlen['testseries']) }} | {{ report.result_id.ljust(maxlen['result_id']) }} | {{ (report.passed|string).ljust(maxlen['passed']) }} | {{ (report.failed|string).ljust(maxlen['failed']) }} | {{ (report.skipped|string).ljust(maxlen['skipped']) }}
9{% endfor %}
10--------------------------------------------------------------------------------------------------------------
11{{ 'Total'.ljust(maxlen['testseries']) }} | {{ reporttotalvalues['count'].ljust(maxlen['result_id']) }} | {{ reporttotalvalues['passed'].ljust(maxlen['passed']) }} | {{ reporttotalvalues['failed'].ljust(maxlen['failed']) }} | {{ reporttotalvalues['skipped'].ljust(maxlen['skipped']) }}
12--------------------------------------------------------------------------------------------------------------
13
14{% for machine in machines %}
15{% if ptests[machine] %}
16==============================================================================================================
17{{ machine }} PTest Result Summary
18==============================================================================================================
19--------------------------------------------------------------------------------------------------------------
20{{ 'Recipe'.ljust(maxlen['ptest']) }} | {{ 'Passed'.ljust(maxlen['passed']) }} | {{ 'Failed'.ljust(maxlen['failed']) }} | {{ 'Skipped'.ljust(maxlen['skipped']) }} | {{ 'Time(s)'.ljust(10) }}
21--------------------------------------------------------------------------------------------------------------
22{% for ptest in ptests[machine] |sort %}
23{{ ptest.ljust(maxlen['ptest']) }} | {{ (ptests[machine][ptest]['passed']|string).ljust(maxlen['passed']) }} | {{ (ptests[machine][ptest]['failed']|string).ljust(maxlen['failed']) }} | {{ (ptests[machine][ptest]['skipped']|string).ljust(maxlen['skipped']) }} | {{ (ptests[machine][ptest]['duration']|string) }}
24{% endfor %}
25--------------------------------------------------------------------------------------------------------------
26
27{% endif %}
28{% endfor %}
29
30{% for machine in machines %}
31{% if ltptests[machine] %}
32==============================================================================================================
33{{ machine }} Ltp Test Result Summary
34==============================================================================================================
35--------------------------------------------------------------------------------------------------------------
36{{ 'Recipe'.ljust(maxlen['ltptest']) }} | {{ 'Passed'.ljust(maxlen['passed']) }} | {{ 'Failed'.ljust(maxlen['failed']) }} | {{ 'Skipped'.ljust(maxlen['skipped']) }} | {{ 'Time(s)'.ljust(10) }}
37--------------------------------------------------------------------------------------------------------------
38{% for ltptest in ltptests[machine] |sort %}
39{{ ltptest.ljust(maxlen['ltptest']) }} | {{ (ltptests[machine][ltptest]['passed']|string).ljust(maxlen['passed']) }} | {{ (ltptests[machine][ltptest]['failed']|string).ljust(maxlen['failed']) }} | {{ (ltptests[machine][ltptest]['skipped']|string).ljust(maxlen['skipped']) }} | {{ (ltptests[machine][ltptest]['duration']|string) }}
40{% endfor %}
41--------------------------------------------------------------------------------------------------------------
42
43{% endif %}
44{% endfor %}
45
46{% for machine in machines %}
47{% if ltpposixtests[machine] %}
48==============================================================================================================
49{{ machine }} Ltp Posix Result Summary
50==============================================================================================================
51--------------------------------------------------------------------------------------------------------------
52{{ 'Recipe'.ljust(maxlen['ltpposixtest']) }} | {{ 'Passed'.ljust(maxlen['passed']) }} | {{ 'Failed'.ljust(maxlen['failed']) }} | {{ 'Skipped'.ljust(maxlen['skipped']) }} | {{ 'Time(s)'.ljust(10) }}
53--------------------------------------------------------------------------------------------------------------
54{% for ltpposixtest in ltpposixtests[machine] |sort %}
55{{ ltpposixtest.ljust(maxlen['ltpposixtest']) }} | {{ (ltpposixtests[machine][ltpposixtest]['passed']|string).ljust(maxlen['passed']) }} | {{ (ltpposixtests[machine][ltpposixtest]['failed']|string).ljust(maxlen['failed']) }} | {{ (ltpposixtests[machine][ltpposixtest]['skipped']|string).ljust(maxlen['skipped']) }} | {{ (ltpposixtests[machine][ltpposixtest]['duration']|string) }}
56{% endfor %}
57--------------------------------------------------------------------------------------------------------------
58
59{% endif %}
60{% endfor %}
61
62
63==============================================================================================================
64Failed test cases (sorted by testseries, ID)
65==============================================================================================================
66{% if havefailed %}
67--------------------------------------------------------------------------------------------------------------
68{% for report in reportvalues |sort(attribute='sort') %}
69{% if report.failed_testcases %}
70testseries | result_id : {{ report.testseries }} | {{ report.result_id }}
71{% for testcase in report.failed_testcases %}
72 {{ testcase }}
73{% endfor %}
74{% endif %}
75{% endfor %}
76--------------------------------------------------------------------------------------------------------------
77{% else %}
78There were no test failures
79{% endif %}