diff options
Diffstat (limited to 'scripts')
-rw-r--r-- | scripts/lib/resulttool/__init__.py | 0 | ||||
-rw-r--r-- | scripts/lib/resulttool/merge.py | 71 | ||||
-rw-r--r-- | scripts/lib/resulttool/regression.py | 208 | ||||
-rw-r--r-- | scripts/lib/resulttool/report.py | 113 | ||||
-rw-r--r-- | scripts/lib/resulttool/resultsutils.py | 67 | ||||
-rw-r--r-- | scripts/lib/resulttool/store.py | 110 | ||||
-rw-r--r-- | scripts/lib/resulttool/template/test_report_full_text.txt | 35 | ||||
-rwxr-xr-x | scripts/resulttool | 84 |
8 files changed, 688 insertions, 0 deletions
diff --git a/scripts/lib/resulttool/__init__.py b/scripts/lib/resulttool/__init__.py new file mode 100644 index 0000000000..e69de29bb2 --- /dev/null +++ b/scripts/lib/resulttool/__init__.py | |||
diff --git a/scripts/lib/resulttool/merge.py b/scripts/lib/resulttool/merge.py new file mode 100644 index 0000000000..1d9cfafd41 --- /dev/null +++ b/scripts/lib/resulttool/merge.py | |||
@@ -0,0 +1,71 @@ | |||
1 | # test result tool - merge multiple testresults.json files | ||
2 | # | ||
3 | # Copyright (c) 2019, Intel Corporation. | ||
4 | # | ||
5 | # This program is free software; you can redistribute it and/or modify it | ||
6 | # under the terms and conditions of the GNU General Public License, | ||
7 | # version 2, as published by the Free Software Foundation. | ||
8 | # | ||
9 | # This program is distributed in the hope it will be useful, but WITHOUT | ||
10 | # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
11 | # FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
12 | # more details. | ||
13 | # | ||
14 | from resulttool.resultsutils import load_json_file, get_dict_value, dump_json_data | ||
15 | import os | ||
16 | import json | ||
17 | |||
18 | class ResultsMerge(object): | ||
19 | |||
20 | def get_test_results(self, logger, file, result_id): | ||
21 | results = load_json_file(file) | ||
22 | if result_id: | ||
23 | result = get_dict_value(logger, results, result_id) | ||
24 | if result: | ||
25 | return {result_id: result} | ||
26 | return result | ||
27 | return results | ||
28 | |||
29 | def merge_results(self, base_results, target_results): | ||
30 | for k in target_results: | ||
31 | base_results[k] = target_results[k] | ||
32 | return base_results | ||
33 | |||
34 | def _get_write_dir(self): | ||
35 | basepath = os.environ['BUILDDIR'] | ||
36 | return basepath + '/tmp/' | ||
37 | |||
38 | def dump_merged_results(self, results, output_dir): | ||
39 | file_output_dir = output_dir if output_dir else self._get_write_dir() | ||
40 | dump_json_data(file_output_dir, 'testresults.json', results) | ||
41 | print('Successfully merged results to: %s' % os.path.join(file_output_dir, 'testresults.json')) | ||
42 | |||
43 | def run(self, logger, base_result_file, target_result_file, target_result_id, output_dir): | ||
44 | base_results = self.get_test_results(logger, base_result_file, '') | ||
45 | target_results = self.get_test_results(logger, target_result_file, target_result_id) | ||
46 | if base_results and target_results: | ||
47 | merged_results = self.merge_results(base_results, target_results) | ||
48 | self.dump_merged_results(merged_results, output_dir) | ||
49 | |||
50 | def merge(args, logger): | ||
51 | merge = ResultsMerge() | ||
52 | merge.run(logger, args.base_result_file, args.target_result_file, args.target_result_id, args.output_dir) | ||
53 | return 0 | ||
54 | |||
55 | def register_commands(subparsers): | ||
56 | """Register subcommands from this plugin""" | ||
57 | parser_build = subparsers.add_parser('merge', help='merge test results', | ||
58 | description='merge results from multiple files', | ||
59 | group='setup') | ||
60 | parser_build.set_defaults(func=merge) | ||
61 | parser_build.add_argument('base_result_file', | ||
62 | help='base result file provide the base result set') | ||
63 | parser_build.add_argument('target_result_file', | ||
64 | help='target result file provide the target result set for merging into the ' | ||
65 | 'base result set') | ||
66 | parser_build.add_argument('-t', '--target-result-id', default='', | ||
67 | help='(optional) default merge all result sets available from target to base ' | ||
68 | 'unless specific target result id was provided') | ||
69 | parser_build.add_argument('-o', '--output-dir', default='', | ||
70 | help='(optional) default write merged results to <poky>/build/tmp/ unless specific ' | ||
71 | 'output directory was provided') | ||
diff --git a/scripts/lib/resulttool/regression.py b/scripts/lib/resulttool/regression.py new file mode 100644 index 0000000000..bee3fb011a --- /dev/null +++ b/scripts/lib/resulttool/regression.py | |||
@@ -0,0 +1,208 @@ | |||
1 | # test result tool - regression analysis | ||
2 | # | ||
3 | # Copyright (c) 2019, Intel Corporation. | ||
4 | # | ||
5 | # This program is free software; you can redistribute it and/or modify it | ||
6 | # under the terms and conditions of the GNU General Public License, | ||
7 | # version 2, as published by the Free Software Foundation. | ||
8 | # | ||
9 | # This program is distributed in the hope it will be useful, but WITHOUT | ||
10 | # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
11 | # FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
12 | # more details. | ||
13 | # | ||
14 | from resulttool.resultsutils import load_json_file, get_dict_value, pop_dict_element | ||
15 | import json | ||
16 | |||
17 | class ResultsRegressionSelector(object): | ||
18 | |||
19 | def get_results_unique_configurations(self, logger, results): | ||
20 | unique_configurations_map = {"oeselftest": ['TEST_TYPE', 'HOST_DISTRO', 'MACHINE'], | ||
21 | "runtime": ['TEST_TYPE', 'IMAGE_BASENAME', 'MACHINE'], | ||
22 | "sdk": ['TEST_TYPE', 'IMAGE_BASENAME', 'MACHINE', 'SDKMACHINE'], | ||
23 | "sdkext": ['TEST_TYPE', 'IMAGE_BASENAME', 'MACHINE', 'SDKMACHINE']} | ||
24 | results_unique_configs = {} | ||
25 | for k in results: | ||
26 | result = results[k] | ||
27 | result_configs = get_dict_value(logger, result, 'configuration') | ||
28 | result_test_type = get_dict_value(logger, result_configs, 'TEST_TYPE') | ||
29 | unique_configuration_keys = get_dict_value(logger, unique_configurations_map, result_test_type) | ||
30 | result_unique_config = {} | ||
31 | for ck in unique_configuration_keys: | ||
32 | config_value = get_dict_value(logger, result_configs, ck) | ||
33 | if config_value: | ||
34 | result_unique_config[ck] = config_value | ||
35 | results_unique_configs[k] = result_unique_config | ||
36 | return results_unique_configs | ||
37 | |||
38 | def get_regression_base_target_pair(self, logger, base_results, target_results): | ||
39 | base_configs = self.get_results_unique_configurations(logger, base_results) | ||
40 | logger.debug('Retrieved base configuration: config=%s' % base_configs) | ||
41 | target_configs = self.get_results_unique_configurations(logger, target_results) | ||
42 | logger.debug('Retrieved target configuration: config=%s' % target_configs) | ||
43 | regression_pair = {} | ||
44 | for bk in base_configs: | ||
45 | base_config = base_configs[bk] | ||
46 | for tk in target_configs: | ||
47 | target_config = target_configs[tk] | ||
48 | if base_config == target_config: | ||
49 | if bk in regression_pair: | ||
50 | regression_pair[bk].append(tk) | ||
51 | else: | ||
52 | regression_pair[bk] = [tk] | ||
53 | return regression_pair | ||
54 | |||
55 | def run_regression_with_regression_pairing(self, logger, regression_pair, base_results, target_results): | ||
56 | regression = ResultsRegression() | ||
57 | for base in regression_pair: | ||
58 | for target in regression_pair[base]: | ||
59 | print('Getting regression for base=%s target=%s' % (base, target)) | ||
60 | regression.run(logger, base_results[base], target_results[target]) | ||
61 | |||
62 | class ResultsRegression(object): | ||
63 | |||
64 | def print_regression_result(self, result): | ||
65 | if result: | ||
66 | print('============================Start Regression============================') | ||
67 | print('Only print regression if base status not equal target') | ||
68 | print('<test case> : <base status> -> <target status>') | ||
69 | print('========================================================================') | ||
70 | for k in result: | ||
71 | print(k, ':', result[k]['base'], '->', result[k]['target']) | ||
72 | print('==============================End Regression==============================') | ||
73 | |||
74 | def get_regression_result(self, logger, base_result, target_result): | ||
75 | base_result = get_dict_value(logger, base_result, 'result') | ||
76 | target_result = get_dict_value(logger, target_result, 'result') | ||
77 | result = {} | ||
78 | if base_result and target_result: | ||
79 | logger.debug('Getting regression result') | ||
80 | for k in base_result: | ||
81 | base_testcase = base_result[k] | ||
82 | base_status = get_dict_value(logger, base_testcase, 'status') | ||
83 | if base_status: | ||
84 | target_testcase = get_dict_value(logger, target_result, k) | ||
85 | target_status = get_dict_value(logger, target_testcase, 'status') | ||
86 | if base_status != target_status: | ||
87 | result[k] = {'base': base_status, 'target': target_status} | ||
88 | else: | ||
89 | logger.error('Failed to retrieved base test case status: %s' % k) | ||
90 | return result | ||
91 | |||
92 | def run(self, logger, base_result, target_result): | ||
93 | if base_result and target_result: | ||
94 | result = self.get_regression_result(logger, base_result, target_result) | ||
95 | logger.debug('Retrieved regression result =%s' % result) | ||
96 | self.print_regression_result(result) | ||
97 | else: | ||
98 | logger.error('Input data objects must not be empty (base_result=%s, target_result=%s)' % | ||
99 | (base_result, target_result)) | ||
100 | |||
101 | def get_results_from_directory(logger, source_dir): | ||
102 | from resulttool.merge import ResultsMerge | ||
103 | from resulttool.resultsutils import get_directory_files | ||
104 | result_files = get_directory_files(source_dir, ['.git'], 'testresults.json') | ||
105 | base_results = {} | ||
106 | for file in result_files: | ||
107 | merge = ResultsMerge() | ||
108 | results = merge.get_test_results(logger, file, '') | ||
109 | base_results = merge.merge_results(base_results, results) | ||
110 | return base_results | ||
111 | |||
112 | def remove_testcases_to_optimize_regression_runtime(logger, results): | ||
113 | test_case_removal = ['ptestresult.rawlogs', 'ptestresult.sections'] | ||
114 | for r in test_case_removal: | ||
115 | for k in results: | ||
116 | result = get_dict_value(logger, results[k], 'result') | ||
117 | pop_dict_element(logger, result, r) | ||
118 | |||
119 | def regression_file(args, logger): | ||
120 | base_results = load_json_file(args.base_result_file) | ||
121 | print('Successfully loaded base test results from: %s' % args.base_result_file) | ||
122 | target_results = load_json_file(args.target_result_file) | ||
123 | print('Successfully loaded target test results from: %s' % args.target_result_file) | ||
124 | remove_testcases_to_optimize_regression_runtime(logger, base_results) | ||
125 | remove_testcases_to_optimize_regression_runtime(logger, target_results) | ||
126 | if args.base_result_id and args.target_result_id: | ||
127 | base_result = get_dict_value(logger, base_results, base_result_id) | ||
128 | print('Getting base test result with result_id=%s' % base_result_id) | ||
129 | target_result = get_dict_value(logger, target_results, target_result_id) | ||
130 | print('Getting target test result with result_id=%s' % target_result_id) | ||
131 | regression = ResultsRegression() | ||
132 | regression.run(logger, base_result, target_result) | ||
133 | else: | ||
134 | regression = ResultsRegressionSelector() | ||
135 | regression_pair = regression.get_regression_base_target_pair(logger, base_results, target_results) | ||
136 | logger.debug('Retrieved regression pair=%s' % regression_pair) | ||
137 | regression.run_regression_with_regression_pairing(logger, regression_pair, base_results, target_results) | ||
138 | return 0 | ||
139 | |||
140 | def regression_directory(args, logger): | ||
141 | base_results = get_results_from_directory(logger, args.base_result_directory) | ||
142 | target_results = get_results_from_directory(logger, args.target_result_directory) | ||
143 | remove_testcases_to_optimize_regression_runtime(logger, base_results) | ||
144 | remove_testcases_to_optimize_regression_runtime(logger, target_results) | ||
145 | regression = ResultsRegressionSelector() | ||
146 | regression_pair = regression.get_regression_base_target_pair(logger, base_results, target_results) | ||
147 | logger.debug('Retrieved regression pair=%s' % regression_pair) | ||
148 | regression.run_regression_with_regression_pairing(logger, regression_pair, base_results, target_results) | ||
149 | return 0 | ||
150 | |||
151 | def regression_git(args, logger): | ||
152 | from resulttool.resultsutils import checkout_git_dir | ||
153 | base_results = {} | ||
154 | target_results = {} | ||
155 | if checkout_git_dir(args.source_dir, args.base_git_branch): | ||
156 | base_results = get_results_from_directory(logger, args.source_dir) | ||
157 | if checkout_git_dir(args.source_dir, args.target_git_branch): | ||
158 | target_results = get_results_from_directory(logger, args.source_dir) | ||
159 | if base_results and target_results: | ||
160 | remove_testcases_to_optimize_regression_runtime(logger, base_results) | ||
161 | remove_testcases_to_optimize_regression_runtime(logger, target_results) | ||
162 | regression = ResultsRegressionSelector() | ||
163 | regression_pair = regression.get_regression_base_target_pair(logger, base_results, target_results) | ||
164 | logger.debug('Retrieved regression pair=%s' % regression_pair) | ||
165 | regression.run_regression_with_regression_pairing(logger, regression_pair, base_results, target_results) | ||
166 | return 0 | ||
167 | |||
168 | def register_commands(subparsers): | ||
169 | """Register subcommands from this plugin""" | ||
170 | parser_build = subparsers.add_parser('regression-file', help='regression file analysis', | ||
171 | description='regression analysis comparing base result set to target ' | ||
172 | 'result set', | ||
173 | group='analysis') | ||
174 | parser_build.set_defaults(func=regression_file) | ||
175 | parser_build.add_argument('base_result_file', | ||
176 | help='base result file provide the base result set') | ||
177 | parser_build.add_argument('target_result_file', | ||
178 | help='target result file provide the target result set for comparison with base result') | ||
179 | parser_build.add_argument('-b', '--base-result-id', default='', | ||
180 | help='(optional) default select regression based on configurations unless base result ' | ||
181 | 'id was provided') | ||
182 | parser_build.add_argument('-t', '--target-result-id', default='', | ||
183 | help='(optional) default select regression based on configurations unless target result ' | ||
184 | 'id was provided') | ||
185 | |||
186 | parser_build = subparsers.add_parser('regression-dir', help='regression directory analysis', | ||
187 | description='regression analysis comparing base result set to target ' | ||
188 | 'result set', | ||
189 | group='analysis') | ||
190 | parser_build.set_defaults(func=regression_directory) | ||
191 | parser_build.add_argument('base_result_directory', | ||
192 | help='base result directory provide the files for base result set') | ||
193 | parser_build.add_argument('target_result_directory', | ||
194 | help='target result file provide the files for target result set for comparison with ' | ||
195 | 'base result') | ||
196 | |||
197 | parser_build = subparsers.add_parser('regression-git', help='regression git analysis', | ||
198 | description='regression analysis comparing base result set to target ' | ||
199 | 'result set', | ||
200 | group='analysis') | ||
201 | parser_build.set_defaults(func=regression_git) | ||
202 | parser_build.add_argument('source_dir', | ||
203 | help='source directory that contain the git repository with test result files') | ||
204 | parser_build.add_argument('base_git_branch', | ||
205 | help='base git branch that provide the files for base result set') | ||
206 | parser_build.add_argument('target_git_branch', | ||
207 | help='target git branch that provide the files for target result set for comparison with ' | ||
208 | 'base result') | ||
diff --git a/scripts/lib/resulttool/report.py b/scripts/lib/resulttool/report.py new file mode 100644 index 0000000000..ab5de1f3a7 --- /dev/null +++ b/scripts/lib/resulttool/report.py | |||
@@ -0,0 +1,113 @@ | |||
1 | # test result tool - report text based test results | ||
2 | # | ||
3 | # Copyright (c) 2019, Intel Corporation. | ||
4 | # | ||
5 | # This program is free software; you can redistribute it and/or modify it | ||
6 | # under the terms and conditions of the GNU General Public License, | ||
7 | # version 2, as published by the Free Software Foundation. | ||
8 | # | ||
9 | # This program is distributed in the hope it will be useful, but WITHOUT | ||
10 | # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
11 | # FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
12 | # more details. | ||
13 | # | ||
14 | import os | ||
15 | import glob | ||
16 | import json | ||
17 | from resulttool.resultsutils import checkout_git_dir, load_json_file, get_dict_value, get_directory_files | ||
18 | |||
19 | class ResultsTextReport(object): | ||
20 | |||
21 | def get_aggregated_test_result(self, logger, testresult): | ||
22 | test_count_report = {'passed': 0, 'failed': 0, 'skipped': 0, 'failed_testcases': []} | ||
23 | result_types = {'passed': ['PASSED', 'passed'], | ||
24 | 'failed': ['FAILED', 'failed', 'ERROR', 'error', 'UNKNOWN'], | ||
25 | 'skipped': ['SKIPPED', 'skipped']} | ||
26 | result = get_dict_value(logger, testresult, 'result') | ||
27 | for k in result: | ||
28 | test_status = get_dict_value(logger, result[k], 'status') | ||
29 | for tk in result_types: | ||
30 | if test_status in result_types[tk]: | ||
31 | test_count_report[tk] += 1 | ||
32 | if test_status in result_types['failed']: | ||
33 | test_count_report['failed_testcases'].append(k) | ||
34 | return test_count_report | ||
35 | |||
36 | def get_test_result_percentage(self, test_result_count): | ||
37 | total_tested = test_result_count['passed'] + test_result_count['failed'] + test_result_count['skipped'] | ||
38 | test_percent_report = {'passed': 0, 'failed': 0, 'skipped': 0} | ||
39 | for k in test_percent_report: | ||
40 | test_percent_report[k] = format(test_result_count[k] / total_tested * 100, '.2f') | ||
41 | return test_percent_report | ||
42 | |||
43 | def add_test_configurations(self, test_report, source_dir, file, result_id): | ||
44 | test_report['file_dir'] = self._get_short_file_dir(source_dir, file) | ||
45 | test_report['result_id'] = result_id | ||
46 | test_report['test_file_dir_result_id'] = '%s_%s' % (test_report['file_dir'], test_report['result_id']) | ||
47 | |||
48 | def _get_short_file_dir(self, source_dir, file): | ||
49 | file_dir = os.path.dirname(file) | ||
50 | source_dir = source_dir[:-1] if source_dir[-1] == '/' else source_dir | ||
51 | if file_dir == source_dir: | ||
52 | return 'None' | ||
53 | return file_dir.replace(source_dir, '') | ||
54 | |||
55 | def get_max_string_len(self, test_result_list, key, default_max_len): | ||
56 | max_len = default_max_len | ||
57 | for test_result in test_result_list: | ||
58 | value_len = len(test_result[key]) | ||
59 | if value_len > max_len: | ||
60 | max_len = value_len | ||
61 | return max_len | ||
62 | |||
63 | def print_test_report(self, template_file_name, test_count_reports, test_percent_reports, | ||
64 | max_len_dir, max_len_result_id): | ||
65 | from jinja2 import Environment, FileSystemLoader | ||
66 | script_path = os.path.dirname(os.path.realpath(__file__)) | ||
67 | file_loader = FileSystemLoader(script_path + '/template') | ||
68 | env = Environment(loader=file_loader, trim_blocks=True) | ||
69 | template = env.get_template(template_file_name) | ||
70 | output = template.render(test_count_reports=test_count_reports, | ||
71 | test_percent_reports=test_percent_reports, | ||
72 | max_len_dir=max_len_dir, | ||
73 | max_len_result_id=max_len_result_id) | ||
74 | print('Printing text-based test report:') | ||
75 | print(output) | ||
76 | |||
77 | def view_test_report(self, logger, source_dir, git_branch): | ||
78 | if git_branch: | ||
79 | checkout_git_dir(source_dir, git_branch) | ||
80 | test_count_reports = [] | ||
81 | test_percent_reports = [] | ||
82 | for file in get_directory_files(source_dir, ['.git'], 'testresults.json'): | ||
83 | logger.debug('Computing result for test result file: %s' % file) | ||
84 | testresults = load_json_file(file) | ||
85 | for k in testresults: | ||
86 | test_count_report = self.get_aggregated_test_result(logger, testresults[k]) | ||
87 | test_percent_report = self.get_test_result_percentage(test_count_report) | ||
88 | self.add_test_configurations(test_count_report, source_dir, file, k) | ||
89 | self.add_test_configurations(test_percent_report, source_dir, file, k) | ||
90 | test_count_reports.append(test_count_report) | ||
91 | test_percent_reports.append(test_percent_report) | ||
92 | max_len_dir = self.get_max_string_len(test_count_reports, 'file_dir', len('file_dir')) | ||
93 | max_len_result_id = self.get_max_string_len(test_count_reports, 'result_id', len('result_id')) | ||
94 | self.print_test_report('test_report_full_text.txt', test_count_reports, test_percent_reports, | ||
95 | max_len_dir, max_len_result_id) | ||
96 | |||
97 | def report(args, logger): | ||
98 | report = ResultsTextReport() | ||
99 | report.view_test_report(logger, args.source_dir, args.git_branch) | ||
100 | return 0 | ||
101 | |||
102 | def register_commands(subparsers): | ||
103 | """Register subcommands from this plugin""" | ||
104 | parser_build = subparsers.add_parser('report', help='report test result summary', | ||
105 | description='report text-based test result summary from the source directory', | ||
106 | group='analysis') | ||
107 | parser_build.set_defaults(func=report) | ||
108 | parser_build.add_argument('source_dir', | ||
109 | help='source directory that contain the test result files for reporting') | ||
110 | parser_build.add_argument('-b', '--git-branch', default='', | ||
111 | help='(optional) default assume source directory contains all available files for ' | ||
112 | 'reporting unless a git branch was provided where it will try to checkout ' | ||
113 | 'the provided git branch assuming source directory was a git repository') | ||
diff --git a/scripts/lib/resulttool/resultsutils.py b/scripts/lib/resulttool/resultsutils.py new file mode 100644 index 0000000000..368786922c --- /dev/null +++ b/scripts/lib/resulttool/resultsutils.py | |||
@@ -0,0 +1,67 @@ | |||
1 | # test result tool - utilities | ||
2 | # | ||
3 | # Copyright (c) 2019, Intel Corporation. | ||
4 | # | ||
5 | # This program is free software; you can redistribute it and/or modify it | ||
6 | # under the terms and conditions of the GNU General Public License, | ||
7 | # version 2, as published by the Free Software Foundation. | ||
8 | # | ||
9 | # This program is distributed in the hope it will be useful, but WITHOUT | ||
10 | # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
11 | # FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
12 | # more details. | ||
13 | # | ||
14 | import os | ||
15 | import json | ||
16 | import scriptpath | ||
17 | scriptpath.add_oe_lib_path() | ||
18 | from oeqa.utils.git import GitRepo, GitError | ||
19 | |||
20 | def load_json_file(file): | ||
21 | with open(file, "r") as f: | ||
22 | return json.load(f) | ||
23 | |||
24 | def dump_json_data(write_dir, file_name, json_data): | ||
25 | file_content = json.dumps(json_data, sort_keys=True, indent=4) | ||
26 | file_path = os.path.join(write_dir, file_name) | ||
27 | with open(file_path, 'w') as the_file: | ||
28 | the_file.write(file_content) | ||
29 | |||
30 | def get_dict_value(logger, dict, key): | ||
31 | try: | ||
32 | return dict[key] | ||
33 | except KeyError: | ||
34 | if logger: | ||
35 | logger.debug('Faced KeyError exception: dict=%s: key=%s' % (dict, key)) | ||
36 | return None | ||
37 | except TypeError: | ||
38 | if logger: | ||
39 | logger.debug('Faced TypeError exception: dict=%s: key=%s' % (dict, key)) | ||
40 | return None | ||
41 | |||
42 | def pop_dict_element(logger, dict, key): | ||
43 | try: | ||
44 | dict.pop(key) | ||
45 | except KeyError: | ||
46 | if logger: | ||
47 | logger.debug('Faced KeyError exception: dict=%s: key=%s' % (dict, key)) | ||
48 | except AttributeError: | ||
49 | if logger: | ||
50 | logger.debug('Faced AttributeError exception: dict=%s: key=%s' % (dict, key)) | ||
51 | |||
52 | def checkout_git_dir(git_dir, git_branch): | ||
53 | try: | ||
54 | repo = GitRepo(git_dir, is_topdir=True) | ||
55 | repo.run_cmd('checkout %s' % git_branch) | ||
56 | return True | ||
57 | except GitError: | ||
58 | return False | ||
59 | |||
60 | def get_directory_files(source_dir, excludes, file): | ||
61 | files_in_dir = [] | ||
62 | for root, dirs, files in os.walk(source_dir, topdown=True): | ||
63 | [dirs.remove(d) for d in list(dirs) if d in excludes] | ||
64 | for name in files: | ||
65 | if name == file: | ||
66 | files_in_dir.append(os.path.join(root, name)) | ||
67 | return files_in_dir \ No newline at end of file | ||
diff --git a/scripts/lib/resulttool/store.py b/scripts/lib/resulttool/store.py new file mode 100644 index 0000000000..2c6fd8492c --- /dev/null +++ b/scripts/lib/resulttool/store.py | |||
@@ -0,0 +1,110 @@ | |||
1 | # test result tool - store test results | ||
2 | # | ||
3 | # Copyright (c) 2019, Intel Corporation. | ||
4 | # | ||
5 | # This program is free software; you can redistribute it and/or modify it | ||
6 | # under the terms and conditions of the GNU General Public License, | ||
7 | # version 2, as published by the Free Software Foundation. | ||
8 | # | ||
9 | # This program is distributed in the hope it will be useful, but WITHOUT | ||
10 | # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
11 | # FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
12 | # more details. | ||
13 | # | ||
14 | import datetime | ||
15 | import tempfile | ||
16 | import os | ||
17 | import subprocess | ||
18 | import scriptpath | ||
19 | scriptpath.add_bitbake_lib_path() | ||
20 | scriptpath.add_oe_lib_path() | ||
21 | from resulttool.resultsutils import checkout_git_dir | ||
22 | try: | ||
23 | import bb | ||
24 | except ImportError: | ||
25 | pass | ||
26 | |||
27 | class ResultsGitStore(object): | ||
28 | |||
29 | def _get_output_dir(self): | ||
30 | basepath = os.environ['BUILDDIR'] | ||
31 | return basepath + '/testresults_%s/' % datetime.datetime.now().strftime("%Y%m%d%H%M%S") | ||
32 | |||
33 | def _create_temporary_workspace_dir(self): | ||
34 | return tempfile.mkdtemp(prefix='testresults.') | ||
35 | |||
36 | def _remove_temporary_workspace_dir(self, workspace_dir): | ||
37 | return subprocess.run(["rm", "-rf", workspace_dir]) | ||
38 | |||
39 | def _oe_copy_files(self, source_dir, destination_dir): | ||
40 | from oe.path import copytree | ||
41 | copytree(source_dir, destination_dir) | ||
42 | |||
43 | def _copy_files(self, source_dir, destination_dir, copy_ignore=None): | ||
44 | from shutil import copytree | ||
45 | copytree(source_dir, destination_dir, ignore=copy_ignore) | ||
46 | |||
47 | def _store_files_to_git(self, logger, file_dir, git_dir, git_branch, commit_msg_subject, commit_msg_body): | ||
48 | logger.debug('Storing test result into git repository (%s) and branch (%s)' | ||
49 | % (git_dir, git_branch)) | ||
50 | return subprocess.run(["oe-git-archive", | ||
51 | file_dir, | ||
52 | "-g", git_dir, | ||
53 | "-b", git_branch, | ||
54 | "--commit-msg-subject", commit_msg_subject, | ||
55 | "--commit-msg-body", commit_msg_body]) | ||
56 | |||
57 | def store_to_existing(self, logger, source_dir, git_dir, git_branch): | ||
58 | logger.debug('Storing files to existing git repository and branch') | ||
59 | from shutil import ignore_patterns | ||
60 | dest_dir = self._create_temporary_workspace_dir() | ||
61 | dest_top_dir = os.path.join(dest_dir, 'top_dir') | ||
62 | self._copy_files(git_dir, dest_top_dir, copy_ignore=ignore_patterns('.git')) | ||
63 | self._oe_copy_files(source_dir, dest_top_dir) | ||
64 | self._store_files_to_git(logger, dest_top_dir, git_dir, git_branch, | ||
65 | 'Store as existing git and branch', 'Store as existing git repository and branch') | ||
66 | self._remove_temporary_workspace_dir(dest_dir) | ||
67 | return git_dir | ||
68 | |||
69 | def store_to_existing_with_new_branch(self, logger, source_dir, git_dir, git_branch): | ||
70 | logger.debug('Storing files to existing git repository with new branch') | ||
71 | self._store_files_to_git(logger, source_dir, git_dir, git_branch, | ||
72 | 'Store as existing git with new branch', | ||
73 | 'Store as existing git repository with new branch') | ||
74 | return git_dir | ||
75 | |||
76 | def store_to_new(self, logger, source_dir, git_branch): | ||
77 | logger.debug('Storing files to new git repository') | ||
78 | output_dir = self._get_output_dir() | ||
79 | self._store_files_to_git(logger, source_dir, output_dir, git_branch, | ||
80 | 'Store as new', 'Store as new git repository') | ||
81 | return output_dir | ||
82 | |||
83 | def store(self, logger, source_dir, git_dir, git_branch): | ||
84 | if git_dir: | ||
85 | if checkout_git_dir(git_dir, git_branch): | ||
86 | self.store_to_existing(logger, source_dir, git_dir, git_branch) | ||
87 | else: | ||
88 | self.store_to_existing_with_new_branch(logger, source_dir, git_dir, git_branch) | ||
89 | else: | ||
90 | self.store_to_new(logger, source_dir, git_branch) | ||
91 | |||
92 | def store(args, logger): | ||
93 | gitstore = ResultsGitStore() | ||
94 | gitstore.store(logger, args.source_dir, args.git_dir, args.git_branch) | ||
95 | return 0 | ||
96 | |||
97 | def register_commands(subparsers): | ||
98 | """Register subcommands from this plugin""" | ||
99 | parser_build = subparsers.add_parser('store', help='store test result files and directories into git repository', | ||
100 | description='store the testresults.json files and related directories ' | ||
101 | 'from the source directory into the destination git repository ' | ||
102 | 'with the given git branch', | ||
103 | group='setup') | ||
104 | parser_build.set_defaults(func=store) | ||
105 | parser_build.add_argument('source_dir', | ||
106 | help='source directory that contain the test result files and directories to be stored') | ||
107 | parser_build.add_argument('git_branch', help='git branch used for store') | ||
108 | parser_build.add_argument('-d', '--git-dir', default='', | ||
109 | help='(optional) default store to new <top_dir>/<build>/<testresults_datetime> ' | ||
110 | 'directory unless provided with existing git repository as destination') | ||
diff --git a/scripts/lib/resulttool/template/test_report_full_text.txt b/scripts/lib/resulttool/template/test_report_full_text.txt new file mode 100644 index 0000000000..bc4874ba4b --- /dev/null +++ b/scripts/lib/resulttool/template/test_report_full_text.txt | |||
@@ -0,0 +1,35 @@ | |||
1 | ============================================================================================================== | ||
2 | Test Report (Count of passed, failed, skipped group by file_dir, result_id) | ||
3 | ============================================================================================================== | ||
4 | -------------------------------------------------------------------------------------------------------------- | ||
5 | {{ 'file_dir'.ljust(max_len_dir) }} | {{ 'result_id'.ljust(max_len_result_id) }} | {{ 'passed'.ljust(10) }} | {{ 'failed'.ljust(10) }} | {{ 'skipped'.ljust(10) }} | ||
6 | -------------------------------------------------------------------------------------------------------------- | ||
7 | {% for report in test_count_reports |sort(attribute='test_file_dir_result_id') %} | ||
8 | {{ report.file_dir.ljust(max_len_dir) }} | {{ report.result_id.ljust(max_len_result_id) }} | {{ (report.passed|string).ljust(10) }} | {{ (report.failed|string).ljust(10) }} | {{ (report.skipped|string).ljust(10) }} | ||
9 | {% endfor %} | ||
10 | -------------------------------------------------------------------------------------------------------------- | ||
11 | |||
12 | ============================================================================================================== | ||
13 | Test Report (Percent of passed, failed, skipped group by file_dir, result_id) | ||
14 | ============================================================================================================== | ||
15 | -------------------------------------------------------------------------------------------------------------- | ||
16 | {{ 'file_dir'.ljust(max_len_dir) }} | {{ 'result_id'.ljust(max_len_result_id) }} | {{ 'passed_%'.ljust(10) }} | {{ 'failed_%'.ljust(10) }} | {{ 'skipped_%'.ljust(10) }} | ||
17 | -------------------------------------------------------------------------------------------------------------- | ||
18 | {% for report in test_percent_reports |sort(attribute='test_file_dir_result_id') %} | ||
19 | {{ report.file_dir.ljust(max_len_dir) }} | {{ report.result_id.ljust(max_len_result_id) }} | {{ (report.passed|string).ljust(10) }} | {{ (report.failed|string).ljust(10) }} | {{ (report.skipped|string).ljust(10) }} | ||
20 | {% endfor %} | ||
21 | -------------------------------------------------------------------------------------------------------------- | ||
22 | |||
23 | ============================================================================================================== | ||
24 | Test Report (Failed test cases group by file_dir, result_id) | ||
25 | ============================================================================================================== | ||
26 | -------------------------------------------------------------------------------------------------------------- | ||
27 | {% for report in test_count_reports |sort(attribute='test_file_dir_result_id') %} | ||
28 | {% if report.failed_testcases %} | ||
29 | file_dir | result_id : {{ report.file_dir }} | {{ report.result_id }} | ||
30 | {% for testcase in report.failed_testcases %} | ||
31 | {{ testcase }} | ||
32 | {% endfor %} | ||
33 | {% endif %} | ||
34 | {% endfor %} | ||
35 | -------------------------------------------------------------------------------------------------------------- \ No newline at end of file | ||
diff --git a/scripts/resulttool b/scripts/resulttool new file mode 100755 index 0000000000..ebb5fc81c9 --- /dev/null +++ b/scripts/resulttool | |||
@@ -0,0 +1,84 @@ | |||
1 | #!/usr/bin/env python3 | ||
2 | # | ||
3 | # test results tool - tool for testresults.json (merge test results, regression analysis) | ||
4 | # | ||
5 | # To look for help information. | ||
6 | # $ resulttool | ||
7 | # | ||
8 | # To store test result from oeqa automated tests, execute the below | ||
9 | # $ resulttool store <source_dir> <git_branch> | ||
10 | # | ||
11 | # To merge test results, execute the below | ||
12 | # $ resulttool merge <base_result_file> <target_result_file> | ||
13 | # | ||
14 | # To report test report, execute the below | ||
15 | # $ resulttool report <source_dir> | ||
16 | # | ||
17 | # To perform regression file analysis, execute the below | ||
18 | # $ resulttool regression-file <base_result_file> <target_result_file> | ||
19 | # | ||
20 | # Copyright (c) 2019, Intel Corporation. | ||
21 | # | ||
22 | # This program is free software; you can redistribute it and/or modify it | ||
23 | # under the terms and conditions of the GNU General Public License, | ||
24 | # version 2, as published by the Free Software Foundation. | ||
25 | # | ||
26 | # This program is distributed in the hope it will be useful, but WITHOUT | ||
27 | # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
28 | # FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
29 | # more details. | ||
30 | # | ||
31 | |||
32 | import os | ||
33 | import sys | ||
34 | import argparse | ||
35 | import logging | ||
36 | script_path = os.path.dirname(os.path.realpath(__file__)) | ||
37 | lib_path = script_path + '/lib' | ||
38 | sys.path = sys.path + [lib_path] | ||
39 | import argparse_oe | ||
40 | import scriptutils | ||
41 | import resulttool.merge | ||
42 | import resulttool.store | ||
43 | import resulttool.regression | ||
44 | import resulttool.report | ||
45 | logger = scriptutils.logger_create('resulttool') | ||
46 | |||
47 | def _validate_user_input_arguments(args): | ||
48 | if hasattr(args, "source_dir"): | ||
49 | if not os.path.isdir(args.source_dir): | ||
50 | logger.error('source_dir argument need to be a directory : %s' % args.source_dir) | ||
51 | return False | ||
52 | return True | ||
53 | |||
54 | def main(): | ||
55 | parser = argparse_oe.ArgumentParser(description="OpenEmbedded test results tool.", | ||
56 | epilog="Use %(prog)s <subcommand> --help to get help on a specific command") | ||
57 | parser.add_argument('-d', '--debug', help='enable debug output', action='store_true') | ||
58 | parser.add_argument('-q', '--quiet', help='print only errors', action='store_true') | ||
59 | subparsers = parser.add_subparsers(dest="subparser_name", title='subcommands', metavar='<subcommand>') | ||
60 | subparsers.required = True | ||
61 | subparsers.add_subparser_group('setup', 'setup', 200) | ||
62 | resulttool.merge.register_commands(subparsers) | ||
63 | resulttool.store.register_commands(subparsers) | ||
64 | subparsers.add_subparser_group('analysis', 'analysis', 100) | ||
65 | resulttool.regression.register_commands(subparsers) | ||
66 | resulttool.report.register_commands(subparsers) | ||
67 | |||
68 | args = parser.parse_args() | ||
69 | if args.debug: | ||
70 | logger.setLevel(logging.DEBUG) | ||
71 | elif args.quiet: | ||
72 | logger.setLevel(logging.ERROR) | ||
73 | |||
74 | if not _validate_user_input_arguments(args): | ||
75 | return -1 | ||
76 | |||
77 | try: | ||
78 | ret = args.func(args, logger) | ||
79 | except argparse_oe.ArgumentUsageError as ae: | ||
80 | parser.error_subcommand(ae.message, ae.subcommand) | ||
81 | return ret | ||
82 | |||
83 | if __name__ == "__main__": | ||
84 | sys.exit(main()) | ||