diff options
author | Yeoh Ee Peng <ee.peng.yeoh@intel.com> | 2019-02-14 13:50:37 +0800 |
---|---|---|
committer | Richard Purdie <richard.purdie@linuxfoundation.org> | 2019-02-21 12:34:00 +0000 |
commit | 1fd5ebdb06224489ad056e261962e23ece36fc87 (patch) | |
tree | 790b33a5498a9f97642ead84ce66dfd354bd8626 /scripts/lib/resulttool/regression.py | |
parent | 95bd530b772f97e7329749b403bf9e2dff12ff7f (diff) | |
download | poky-1fd5ebdb06224489ad056e261962e23ece36fc87.tar.gz |
resulttool: enable merge, store, report and regression analysis
OEQA outputs test results into json files and these files were
archived by Autobuilder during QA releases. Example: each oe-selftest
run by Autobuilder for different host distro generate a
testresults.json file.
These scripts were developed as a test result tools to manage
these testresults.json file.
Using the "store" operation, user can store multiple testresults.json
files as well as the pre-configured directories used to hold those files.
Using the "merge" operation, user can merge multiple testresults.json
files to a target file.
Using the "report" operation, user can view the test result summary
for all available testresults.json files inside a ordinary directory
or a git repository.
Using the "regression-file" operation, user can perform regression
analysis on testresults.json files specified. Using the "regression-dir"
and "regression-git" operations, user can perform regression analysis
on directory and git accordingly.
These resulttool operations expect the testresults.json file to use
the json format below.
{
"<testresult_1>": {
"configuration": {
"<config_name_1>": "<config_value_1>",
"<config_name_2>": "<config_value_2>",
...
"<config_name_n>": "<config_value_n>",
},
"result": {
"<testcase_namespace_1>": {
"status": "<PASSED or FAILED or ERROR or SKIPPED>",
"log": "<failure or error logging>"
},
"<testcase_namespace_2>": {
"status": "<PASSED or FAILED or ERROR or SKIPPED>",
"log": "<failure or error logging>"
},
...
"<testcase_namespace_n>": {
"status": "<PASSED or FAILED or ERROR or SKIPPED>",
"log": "<failure or error logging>"
},
}
},
...
"<testresult_n>": {
"configuration": {
"<config_name_1>": "<config_value_1>",
"<config_name_2>": "<config_value_2>",
...
"<config_name_n>": "<config_value_n>",
},
"result": {
"<testcase_namespace_1>": {
"status": "<PASSED or FAILED or ERROR or SKIPPED>",
"log": "<failure or error logging>"
},
"<testcase_namespace_2>": {
"status": "<PASSED or FAILED or ERROR or SKIPPED>",
"log": "<failure or error logging>"
},
...
"<testcase_namespace_n>": {
"status": "<PASSED or FAILED or ERROR or SKIPPED>",
"log": "<failure or error logging>"
},
}
},
}
To use these scripts, first source oe environment, then run the
entry point script to look for help.
$ resulttool
To store test result from oeqa automated tests, execute the below
$ resulttool store <source_dir> <git_branch>
To merge multiple testresults.json files, execute the below
$ resulttool merge <base_result_file> <target_result_file>
To report test report, execute the below
$ resulttool report <source_dir>
To perform regression file analysis, execute the below
$ resulttool regression-file <base_result_file> <target_result_file>
To perform regression dir analysis, execute the below
$ resulttool regression-dir <base_result_dir> <target_result_dir>
To perform regression git analysis, execute the below
$ resulttool regression-git <source_dir> <base_branch> <target_branch>
[YOCTO# 13012]
[YOCTO# 12654]
(From OE-Core rev: 78a322d7be402a5b9b5abf26ad35670a8535408a)
Signed-off-by: Yeoh Ee Peng <ee.peng.yeoh@intel.com>
Signed-off-by: Richard Purdie <richard.purdie@linuxfoundation.org>
Diffstat (limited to 'scripts/lib/resulttool/regression.py')
-rw-r--r-- | scripts/lib/resulttool/regression.py | 208 |
1 files changed, 208 insertions, 0 deletions
diff --git a/scripts/lib/resulttool/regression.py b/scripts/lib/resulttool/regression.py new file mode 100644 index 0000000000..bee3fb011a --- /dev/null +++ b/scripts/lib/resulttool/regression.py | |||
@@ -0,0 +1,208 @@ | |||
1 | # test result tool - regression analysis | ||
2 | # | ||
3 | # Copyright (c) 2019, Intel Corporation. | ||
4 | # | ||
5 | # This program is free software; you can redistribute it and/or modify it | ||
6 | # under the terms and conditions of the GNU General Public License, | ||
7 | # version 2, as published by the Free Software Foundation. | ||
8 | # | ||
9 | # This program is distributed in the hope it will be useful, but WITHOUT | ||
10 | # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
11 | # FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
12 | # more details. | ||
13 | # | ||
14 | from resulttool.resultsutils import load_json_file, get_dict_value, pop_dict_element | ||
15 | import json | ||
16 | |||
17 | class ResultsRegressionSelector(object): | ||
18 | |||
19 | def get_results_unique_configurations(self, logger, results): | ||
20 | unique_configurations_map = {"oeselftest": ['TEST_TYPE', 'HOST_DISTRO', 'MACHINE'], | ||
21 | "runtime": ['TEST_TYPE', 'IMAGE_BASENAME', 'MACHINE'], | ||
22 | "sdk": ['TEST_TYPE', 'IMAGE_BASENAME', 'MACHINE', 'SDKMACHINE'], | ||
23 | "sdkext": ['TEST_TYPE', 'IMAGE_BASENAME', 'MACHINE', 'SDKMACHINE']} | ||
24 | results_unique_configs = {} | ||
25 | for k in results: | ||
26 | result = results[k] | ||
27 | result_configs = get_dict_value(logger, result, 'configuration') | ||
28 | result_test_type = get_dict_value(logger, result_configs, 'TEST_TYPE') | ||
29 | unique_configuration_keys = get_dict_value(logger, unique_configurations_map, result_test_type) | ||
30 | result_unique_config = {} | ||
31 | for ck in unique_configuration_keys: | ||
32 | config_value = get_dict_value(logger, result_configs, ck) | ||
33 | if config_value: | ||
34 | result_unique_config[ck] = config_value | ||
35 | results_unique_configs[k] = result_unique_config | ||
36 | return results_unique_configs | ||
37 | |||
38 | def get_regression_base_target_pair(self, logger, base_results, target_results): | ||
39 | base_configs = self.get_results_unique_configurations(logger, base_results) | ||
40 | logger.debug('Retrieved base configuration: config=%s' % base_configs) | ||
41 | target_configs = self.get_results_unique_configurations(logger, target_results) | ||
42 | logger.debug('Retrieved target configuration: config=%s' % target_configs) | ||
43 | regression_pair = {} | ||
44 | for bk in base_configs: | ||
45 | base_config = base_configs[bk] | ||
46 | for tk in target_configs: | ||
47 | target_config = target_configs[tk] | ||
48 | if base_config == target_config: | ||
49 | if bk in regression_pair: | ||
50 | regression_pair[bk].append(tk) | ||
51 | else: | ||
52 | regression_pair[bk] = [tk] | ||
53 | return regression_pair | ||
54 | |||
55 | def run_regression_with_regression_pairing(self, logger, regression_pair, base_results, target_results): | ||
56 | regression = ResultsRegression() | ||
57 | for base in regression_pair: | ||
58 | for target in regression_pair[base]: | ||
59 | print('Getting regression for base=%s target=%s' % (base, target)) | ||
60 | regression.run(logger, base_results[base], target_results[target]) | ||
61 | |||
62 | class ResultsRegression(object): | ||
63 | |||
64 | def print_regression_result(self, result): | ||
65 | if result: | ||
66 | print('============================Start Regression============================') | ||
67 | print('Only print regression if base status not equal target') | ||
68 | print('<test case> : <base status> -> <target status>') | ||
69 | print('========================================================================') | ||
70 | for k in result: | ||
71 | print(k, ':', result[k]['base'], '->', result[k]['target']) | ||
72 | print('==============================End Regression==============================') | ||
73 | |||
74 | def get_regression_result(self, logger, base_result, target_result): | ||
75 | base_result = get_dict_value(logger, base_result, 'result') | ||
76 | target_result = get_dict_value(logger, target_result, 'result') | ||
77 | result = {} | ||
78 | if base_result and target_result: | ||
79 | logger.debug('Getting regression result') | ||
80 | for k in base_result: | ||
81 | base_testcase = base_result[k] | ||
82 | base_status = get_dict_value(logger, base_testcase, 'status') | ||
83 | if base_status: | ||
84 | target_testcase = get_dict_value(logger, target_result, k) | ||
85 | target_status = get_dict_value(logger, target_testcase, 'status') | ||
86 | if base_status != target_status: | ||
87 | result[k] = {'base': base_status, 'target': target_status} | ||
88 | else: | ||
89 | logger.error('Failed to retrieved base test case status: %s' % k) | ||
90 | return result | ||
91 | |||
92 | def run(self, logger, base_result, target_result): | ||
93 | if base_result and target_result: | ||
94 | result = self.get_regression_result(logger, base_result, target_result) | ||
95 | logger.debug('Retrieved regression result =%s' % result) | ||
96 | self.print_regression_result(result) | ||
97 | else: | ||
98 | logger.error('Input data objects must not be empty (base_result=%s, target_result=%s)' % | ||
99 | (base_result, target_result)) | ||
100 | |||
101 | def get_results_from_directory(logger, source_dir): | ||
102 | from resulttool.merge import ResultsMerge | ||
103 | from resulttool.resultsutils import get_directory_files | ||
104 | result_files = get_directory_files(source_dir, ['.git'], 'testresults.json') | ||
105 | base_results = {} | ||
106 | for file in result_files: | ||
107 | merge = ResultsMerge() | ||
108 | results = merge.get_test_results(logger, file, '') | ||
109 | base_results = merge.merge_results(base_results, results) | ||
110 | return base_results | ||
111 | |||
112 | def remove_testcases_to_optimize_regression_runtime(logger, results): | ||
113 | test_case_removal = ['ptestresult.rawlogs', 'ptestresult.sections'] | ||
114 | for r in test_case_removal: | ||
115 | for k in results: | ||
116 | result = get_dict_value(logger, results[k], 'result') | ||
117 | pop_dict_element(logger, result, r) | ||
118 | |||
119 | def regression_file(args, logger): | ||
120 | base_results = load_json_file(args.base_result_file) | ||
121 | print('Successfully loaded base test results from: %s' % args.base_result_file) | ||
122 | target_results = load_json_file(args.target_result_file) | ||
123 | print('Successfully loaded target test results from: %s' % args.target_result_file) | ||
124 | remove_testcases_to_optimize_regression_runtime(logger, base_results) | ||
125 | remove_testcases_to_optimize_regression_runtime(logger, target_results) | ||
126 | if args.base_result_id and args.target_result_id: | ||
127 | base_result = get_dict_value(logger, base_results, base_result_id) | ||
128 | print('Getting base test result with result_id=%s' % base_result_id) | ||
129 | target_result = get_dict_value(logger, target_results, target_result_id) | ||
130 | print('Getting target test result with result_id=%s' % target_result_id) | ||
131 | regression = ResultsRegression() | ||
132 | regression.run(logger, base_result, target_result) | ||
133 | else: | ||
134 | regression = ResultsRegressionSelector() | ||
135 | regression_pair = regression.get_regression_base_target_pair(logger, base_results, target_results) | ||
136 | logger.debug('Retrieved regression pair=%s' % regression_pair) | ||
137 | regression.run_regression_with_regression_pairing(logger, regression_pair, base_results, target_results) | ||
138 | return 0 | ||
139 | |||
140 | def regression_directory(args, logger): | ||
141 | base_results = get_results_from_directory(logger, args.base_result_directory) | ||
142 | target_results = get_results_from_directory(logger, args.target_result_directory) | ||
143 | remove_testcases_to_optimize_regression_runtime(logger, base_results) | ||
144 | remove_testcases_to_optimize_regression_runtime(logger, target_results) | ||
145 | regression = ResultsRegressionSelector() | ||
146 | regression_pair = regression.get_regression_base_target_pair(logger, base_results, target_results) | ||
147 | logger.debug('Retrieved regression pair=%s' % regression_pair) | ||
148 | regression.run_regression_with_regression_pairing(logger, regression_pair, base_results, target_results) | ||
149 | return 0 | ||
150 | |||
151 | def regression_git(args, logger): | ||
152 | from resulttool.resultsutils import checkout_git_dir | ||
153 | base_results = {} | ||
154 | target_results = {} | ||
155 | if checkout_git_dir(args.source_dir, args.base_git_branch): | ||
156 | base_results = get_results_from_directory(logger, args.source_dir) | ||
157 | if checkout_git_dir(args.source_dir, args.target_git_branch): | ||
158 | target_results = get_results_from_directory(logger, args.source_dir) | ||
159 | if base_results and target_results: | ||
160 | remove_testcases_to_optimize_regression_runtime(logger, base_results) | ||
161 | remove_testcases_to_optimize_regression_runtime(logger, target_results) | ||
162 | regression = ResultsRegressionSelector() | ||
163 | regression_pair = regression.get_regression_base_target_pair(logger, base_results, target_results) | ||
164 | logger.debug('Retrieved regression pair=%s' % regression_pair) | ||
165 | regression.run_regression_with_regression_pairing(logger, regression_pair, base_results, target_results) | ||
166 | return 0 | ||
167 | |||
168 | def register_commands(subparsers): | ||
169 | """Register subcommands from this plugin""" | ||
170 | parser_build = subparsers.add_parser('regression-file', help='regression file analysis', | ||
171 | description='regression analysis comparing base result set to target ' | ||
172 | 'result set', | ||
173 | group='analysis') | ||
174 | parser_build.set_defaults(func=regression_file) | ||
175 | parser_build.add_argument('base_result_file', | ||
176 | help='base result file provide the base result set') | ||
177 | parser_build.add_argument('target_result_file', | ||
178 | help='target result file provide the target result set for comparison with base result') | ||
179 | parser_build.add_argument('-b', '--base-result-id', default='', | ||
180 | help='(optional) default select regression based on configurations unless base result ' | ||
181 | 'id was provided') | ||
182 | parser_build.add_argument('-t', '--target-result-id', default='', | ||
183 | help='(optional) default select regression based on configurations unless target result ' | ||
184 | 'id was provided') | ||
185 | |||
186 | parser_build = subparsers.add_parser('regression-dir', help='regression directory analysis', | ||
187 | description='regression analysis comparing base result set to target ' | ||
188 | 'result set', | ||
189 | group='analysis') | ||
190 | parser_build.set_defaults(func=regression_directory) | ||
191 | parser_build.add_argument('base_result_directory', | ||
192 | help='base result directory provide the files for base result set') | ||
193 | parser_build.add_argument('target_result_directory', | ||
194 | help='target result file provide the files for target result set for comparison with ' | ||
195 | 'base result') | ||
196 | |||
197 | parser_build = subparsers.add_parser('regression-git', help='regression git analysis', | ||
198 | description='regression analysis comparing base result set to target ' | ||
199 | 'result set', | ||
200 | group='analysis') | ||
201 | parser_build.set_defaults(func=regression_git) | ||
202 | parser_build.add_argument('source_dir', | ||
203 | help='source directory that contain the git repository with test result files') | ||
204 | parser_build.add_argument('base_git_branch', | ||
205 | help='base git branch that provide the files for base result set') | ||
206 | parser_build.add_argument('target_git_branch', | ||
207 | help='target git branch that provide the files for target result set for comparison with ' | ||
208 | 'base result') | ||