diff options
author | Richard Purdie <richard.purdie@linuxfoundation.org> | 2019-02-16 18:13:00 +0000 |
---|---|---|
committer | Richard Purdie <richard.purdie@linuxfoundation.org> | 2019-02-21 12:34:00 +0000 |
commit | 47eb3d00e9d6a66aee1283dab29af8117a006d6d (patch) | |
tree | 9d574720d23da3edeab5648f05fb5d2ee95c9cd2 /scripts | |
parent | beed7523b667affea71d37d88d2f5c19c935d159 (diff) | |
download | poky-47eb3d00e9d6a66aee1283dab29af8117a006d6d.tar.gz |
resulttool: Improvements to allow integration to the autobuilder
This is a combined patch of the various tweaks and improvements I
made to resulttool:
* Avoid subprocess.run() as its a python 3.6 feature and we
have autobuilder workers with 3.5.
* Avoid python keywords as variable names
* Simplify dict accesses using .get()
* Rename resultsutils -> resultutils to match the resultstool ->
resulttool rename
* Formalised the handling of "file_name" to "TESTSERIES" which the code
will now add into the json configuration data if its not present, based
on the directory name.
* When we don't have failed test cases, print something saying so
instead of an empty table
* Tweak the table headers in the report to be more readable (reference
"Test Series" instead if file_id and ID instead of results_id)
* Improve/simplify the max string length handling
* Merge the counts and percentage data into one table in the report
since printing two reports of the same data confuses the user
* Removed the confusing header in the regression report
* Show matches, then regressions, then unmatched runs in the regression
report, also remove chatting unneeded output
* Try harder to "pair" up matching configurations to reduce noise in
the regressions report
* Abstracted the "mapping" table concept used to pairing in the
regression code to general code in resultutils
* Created multiple mappings for results analysis, results storage and
'flattening' results data in a merge
* Simplify the merge command to take a source and a destination,
letting the destination be a directory or a file, removing the need for
an output directory parameter
* Add the 'IMAGE_PKGTYPE' and 'DISTRO' config options to the regression
mappings
* Have the store command place the testresults files in a layout from
the mapping, making commits into the git repo for results storage more
useful for simple comparison purposes
* Set the oe-git-archive tag format appropriately for oeqa results
storage (and simplify the commit messages closer to their defaults)
* Fix oe-git-archive to use the commit/branch data from the results file
* Cleaned up the command option help to match other changes
* Follow the model of git branch/tag processing used by oe-build-perf-report
and use that to read the data using git show to avoid branch change
* Add ptest summary to the report command
* Update the tests to match the above changes
(From OE-Core rev: ff2c029b568f70aa9960dde04ddd207829812ea0)
Signed-off-by: Richard Purdie <richard.purdie@linuxfoundation.org>
Diffstat (limited to 'scripts')
-rwxr-xr-x | scripts/lib/resulttool/manualexecution.py | 8 | ||||
-rw-r--r-- | scripts/lib/resulttool/merge.py | 69 | ||||
-rw-r--r-- | scripts/lib/resulttool/regression.py | 328 | ||||
-rw-r--r-- | scripts/lib/resulttool/report.py | 157 | ||||
-rw-r--r-- | scripts/lib/resulttool/resultsutils.py | 67 | ||||
-rw-r--r-- | scripts/lib/resulttool/resultutils.py | 127 | ||||
-rw-r--r-- | scripts/lib/resulttool/store.py | 136 | ||||
-rw-r--r-- | scripts/lib/resulttool/template/test_report_full_text.txt | 33 | ||||
-rwxr-xr-x | scripts/resulttool | 7 |
9 files changed, 482 insertions, 450 deletions
diff --git a/scripts/lib/resulttool/manualexecution.py b/scripts/lib/resulttool/manualexecution.py index 64ec581a9f..ecdc4e7a7a 100755 --- a/scripts/lib/resulttool/manualexecution.py +++ b/scripts/lib/resulttool/manualexecution.py | |||
@@ -18,7 +18,11 @@ import sys | |||
18 | import datetime | 18 | import datetime |
19 | import re | 19 | import re |
20 | from oeqa.core.runner import OETestResultJSONHelper | 20 | from oeqa.core.runner import OETestResultJSONHelper |
21 | from resulttool.resultsutils import load_json_file | 21 | |
22 | def load_json_file(file): | ||
23 | with open(file, "r") as f: | ||
24 | return json.load(f) | ||
25 | |||
22 | 26 | ||
23 | class ManualTestRunner(object): | 27 | class ManualTestRunner(object): |
24 | def __init__(self): | 28 | def __init__(self): |
@@ -134,4 +138,4 @@ def register_commands(subparsers): | |||
134 | description='helper script for results populating during manual test execution. You can find manual test case JSON file in meta/lib/oeqa/manual/', | 138 | description='helper script for results populating during manual test execution. You can find manual test case JSON file in meta/lib/oeqa/manual/', |
135 | group='manualexecution') | 139 | group='manualexecution') |
136 | parser_build.set_defaults(func=manualexecution) | 140 | parser_build.set_defaults(func=manualexecution) |
137 | parser_build.add_argument('file', help='specify path to manual test case JSON file.Note: Please use \"\" to encapsulate the file path.') \ No newline at end of file | 141 | parser_build.add_argument('file', help='specify path to manual test case JSON file.Note: Please use \"\" to encapsulate the file path.') |
diff --git a/scripts/lib/resulttool/merge.py b/scripts/lib/resulttool/merge.py index 1d9cfafd41..3e4b7a38ad 100644 --- a/scripts/lib/resulttool/merge.py +++ b/scripts/lib/resulttool/merge.py | |||
@@ -1,6 +1,7 @@ | |||
1 | # test result tool - merge multiple testresults.json files | 1 | # resulttool - merge multiple testresults.json files into a file or directory |
2 | # | 2 | # |
3 | # Copyright (c) 2019, Intel Corporation. | 3 | # Copyright (c) 2019, Intel Corporation. |
4 | # Copyright (c) 2019, Linux Foundation | ||
4 | # | 5 | # |
5 | # This program is free software; you can redistribute it and/or modify it | 6 | # This program is free software; you can redistribute it and/or modify it |
6 | # under the terms and conditions of the GNU General Public License, | 7 | # under the terms and conditions of the GNU General Public License, |
@@ -11,61 +12,31 @@ | |||
11 | # FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | 12 | # FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for |
12 | # more details. | 13 | # more details. |
13 | # | 14 | # |
14 | from resulttool.resultsutils import load_json_file, get_dict_value, dump_json_data | ||
15 | import os | 15 | import os |
16 | import json | 16 | import json |
17 | 17 | import resulttool.resultutils as resultutils | |
18 | class ResultsMerge(object): | ||
19 | |||
20 | def get_test_results(self, logger, file, result_id): | ||
21 | results = load_json_file(file) | ||
22 | if result_id: | ||
23 | result = get_dict_value(logger, results, result_id) | ||
24 | if result: | ||
25 | return {result_id: result} | ||
26 | return result | ||
27 | return results | ||
28 | |||
29 | def merge_results(self, base_results, target_results): | ||
30 | for k in target_results: | ||
31 | base_results[k] = target_results[k] | ||
32 | return base_results | ||
33 | |||
34 | def _get_write_dir(self): | ||
35 | basepath = os.environ['BUILDDIR'] | ||
36 | return basepath + '/tmp/' | ||
37 | |||
38 | def dump_merged_results(self, results, output_dir): | ||
39 | file_output_dir = output_dir if output_dir else self._get_write_dir() | ||
40 | dump_json_data(file_output_dir, 'testresults.json', results) | ||
41 | print('Successfully merged results to: %s' % os.path.join(file_output_dir, 'testresults.json')) | ||
42 | |||
43 | def run(self, logger, base_result_file, target_result_file, target_result_id, output_dir): | ||
44 | base_results = self.get_test_results(logger, base_result_file, '') | ||
45 | target_results = self.get_test_results(logger, target_result_file, target_result_id) | ||
46 | if base_results and target_results: | ||
47 | merged_results = self.merge_results(base_results, target_results) | ||
48 | self.dump_merged_results(merged_results, output_dir) | ||
49 | 18 | ||
50 | def merge(args, logger): | 19 | def merge(args, logger): |
51 | merge = ResultsMerge() | 20 | if os.path.isdir(args.target_results): |
52 | merge.run(logger, args.base_result_file, args.target_result_file, args.target_result_id, args.output_dir) | 21 | results = resultutils.load_resultsdata(args.target_results, configmap=resultutils.store_map) |
22 | resultutils.append_resultsdata(results, args.base_results, configmap=resultutils.store_map) | ||
23 | resultutils.save_resultsdata(results, args.target_results) | ||
24 | else: | ||
25 | results = resultutils.load_resultsdata(args.base_results, configmap=resultutils.flatten_map) | ||
26 | if os.path.exists(args.target_results): | ||
27 | resultutils.append_resultsdata(results, args.target_results, configmap=resultutils.flatten_map) | ||
28 | resultutils.save_resultsdata(results, os.path.dirname(args.target_results), fn=os.path.basename(args.target_results)) | ||
29 | |||
53 | return 0 | 30 | return 0 |
54 | 31 | ||
55 | def register_commands(subparsers): | 32 | def register_commands(subparsers): |
56 | """Register subcommands from this plugin""" | 33 | """Register subcommands from this plugin""" |
57 | parser_build = subparsers.add_parser('merge', help='merge test results', | 34 | parser_build = subparsers.add_parser('merge', help='merge test result files/directories', |
58 | description='merge results from multiple files', | 35 | description='merge the results from multiple files/directories into the target file or directory', |
59 | group='setup') | 36 | group='setup') |
60 | parser_build.set_defaults(func=merge) | 37 | parser_build.set_defaults(func=merge) |
61 | parser_build.add_argument('base_result_file', | 38 | parser_build.add_argument('base_results', |
62 | help='base result file provide the base result set') | 39 | help='the results file/directory to import') |
63 | parser_build.add_argument('target_result_file', | 40 | parser_build.add_argument('target_results', |
64 | help='target result file provide the target result set for merging into the ' | 41 | help='the target file or directory to merge the base_results with') |
65 | 'base result set') | 42 | |
66 | parser_build.add_argument('-t', '--target-result-id', default='', | ||
67 | help='(optional) default merge all result sets available from target to base ' | ||
68 | 'unless specific target result id was provided') | ||
69 | parser_build.add_argument('-o', '--output-dir', default='', | ||
70 | help='(optional) default write merged results to <poky>/build/tmp/ unless specific ' | ||
71 | 'output directory was provided') | ||
diff --git a/scripts/lib/resulttool/regression.py b/scripts/lib/resulttool/regression.py index bee3fb011a..ff77332fa9 100644 --- a/scripts/lib/resulttool/regression.py +++ b/scripts/lib/resulttool/regression.py | |||
@@ -1,6 +1,7 @@ | |||
1 | # test result tool - regression analysis | 1 | # resulttool - regression analysis |
2 | # | 2 | # |
3 | # Copyright (c) 2019, Intel Corporation. | 3 | # Copyright (c) 2019, Intel Corporation. |
4 | # Copyright (c) 2019, Linux Foundation | ||
4 | # | 5 | # |
5 | # This program is free software; you can redistribute it and/or modify it | 6 | # This program is free software; you can redistribute it and/or modify it |
6 | # under the terms and conditions of the GNU General Public License, | 7 | # under the terms and conditions of the GNU General Public License, |
@@ -11,171 +12,170 @@ | |||
11 | # FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | 12 | # FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for |
12 | # more details. | 13 | # more details. |
13 | # | 14 | # |
14 | from resulttool.resultsutils import load_json_file, get_dict_value, pop_dict_element | 15 | import resulttool.resultutils as resultutils |
15 | import json | 16 | import json |
16 | 17 | ||
17 | class ResultsRegressionSelector(object): | 18 | from oeqa.utils.git import GitRepo |
18 | 19 | import oeqa.utils.gitarchive as gitarchive | |
19 | def get_results_unique_configurations(self, logger, results): | 20 | |
20 | unique_configurations_map = {"oeselftest": ['TEST_TYPE', 'HOST_DISTRO', 'MACHINE'], | 21 | def compare_result(logger, base_name, target_name, base_result, target_result): |
21 | "runtime": ['TEST_TYPE', 'IMAGE_BASENAME', 'MACHINE'], | 22 | base_result = base_result.get('result') |
22 | "sdk": ['TEST_TYPE', 'IMAGE_BASENAME', 'MACHINE', 'SDKMACHINE'], | 23 | target_result = target_result.get('result') |
23 | "sdkext": ['TEST_TYPE', 'IMAGE_BASENAME', 'MACHINE', 'SDKMACHINE']} | 24 | result = {} |
24 | results_unique_configs = {} | 25 | if base_result and target_result: |
25 | for k in results: | 26 | for k in base_result: |
26 | result = results[k] | 27 | base_testcase = base_result[k] |
27 | result_configs = get_dict_value(logger, result, 'configuration') | 28 | base_status = base_testcase.get('status') |
28 | result_test_type = get_dict_value(logger, result_configs, 'TEST_TYPE') | 29 | if base_status: |
29 | unique_configuration_keys = get_dict_value(logger, unique_configurations_map, result_test_type) | 30 | target_testcase = target_result.get(k, {}) |
30 | result_unique_config = {} | 31 | target_status = target_testcase.get('status') |
31 | for ck in unique_configuration_keys: | 32 | if base_status != target_status: |
32 | config_value = get_dict_value(logger, result_configs, ck) | 33 | result[k] = {'base': base_status, 'target': target_status} |
33 | if config_value: | 34 | else: |
34 | result_unique_config[ck] = config_value | 35 | logger.error('Failed to retrieved base test case status: %s' % k) |
35 | results_unique_configs[k] = result_unique_config | 36 | if result: |
36 | return results_unique_configs | 37 | resultstring = "Regression: %s\n %s\n" % (base_name, target_name) |
37 | 38 | for k in result: | |
38 | def get_regression_base_target_pair(self, logger, base_results, target_results): | 39 | resultstring += ' %s: %s -> %s\n' % (k, result[k]['base'], result[k]['target']) |
39 | base_configs = self.get_results_unique_configurations(logger, base_results) | ||
40 | logger.debug('Retrieved base configuration: config=%s' % base_configs) | ||
41 | target_configs = self.get_results_unique_configurations(logger, target_results) | ||
42 | logger.debug('Retrieved target configuration: config=%s' % target_configs) | ||
43 | regression_pair = {} | ||
44 | for bk in base_configs: | ||
45 | base_config = base_configs[bk] | ||
46 | for tk in target_configs: | ||
47 | target_config = target_configs[tk] | ||
48 | if base_config == target_config: | ||
49 | if bk in regression_pair: | ||
50 | regression_pair[bk].append(tk) | ||
51 | else: | ||
52 | regression_pair[bk] = [tk] | ||
53 | return regression_pair | ||
54 | |||
55 | def run_regression_with_regression_pairing(self, logger, regression_pair, base_results, target_results): | ||
56 | regression = ResultsRegression() | ||
57 | for base in regression_pair: | ||
58 | for target in regression_pair[base]: | ||
59 | print('Getting regression for base=%s target=%s' % (base, target)) | ||
60 | regression.run(logger, base_results[base], target_results[target]) | ||
61 | |||
62 | class ResultsRegression(object): | ||
63 | |||
64 | def print_regression_result(self, result): | ||
65 | if result: | ||
66 | print('============================Start Regression============================') | ||
67 | print('Only print regression if base status not equal target') | ||
68 | print('<test case> : <base status> -> <target status>') | ||
69 | print('========================================================================') | ||
70 | for k in result: | ||
71 | print(k, ':', result[k]['base'], '->', result[k]['target']) | ||
72 | print('==============================End Regression==============================') | ||
73 | |||
74 | def get_regression_result(self, logger, base_result, target_result): | ||
75 | base_result = get_dict_value(logger, base_result, 'result') | ||
76 | target_result = get_dict_value(logger, target_result, 'result') | ||
77 | result = {} | ||
78 | if base_result and target_result: | ||
79 | logger.debug('Getting regression result') | ||
80 | for k in base_result: | ||
81 | base_testcase = base_result[k] | ||
82 | base_status = get_dict_value(logger, base_testcase, 'status') | ||
83 | if base_status: | ||
84 | target_testcase = get_dict_value(logger, target_result, k) | ||
85 | target_status = get_dict_value(logger, target_testcase, 'status') | ||
86 | if base_status != target_status: | ||
87 | result[k] = {'base': base_status, 'target': target_status} | ||
88 | else: | ||
89 | logger.error('Failed to retrieved base test case status: %s' % k) | ||
90 | return result | ||
91 | |||
92 | def run(self, logger, base_result, target_result): | ||
93 | if base_result and target_result: | ||
94 | result = self.get_regression_result(logger, base_result, target_result) | ||
95 | logger.debug('Retrieved regression result =%s' % result) | ||
96 | self.print_regression_result(result) | ||
97 | else: | ||
98 | logger.error('Input data objects must not be empty (base_result=%s, target_result=%s)' % | ||
99 | (base_result, target_result)) | ||
100 | |||
101 | def get_results_from_directory(logger, source_dir): | ||
102 | from resulttool.merge import ResultsMerge | ||
103 | from resulttool.resultsutils import get_directory_files | ||
104 | result_files = get_directory_files(source_dir, ['.git'], 'testresults.json') | ||
105 | base_results = {} | ||
106 | for file in result_files: | ||
107 | merge = ResultsMerge() | ||
108 | results = merge.get_test_results(logger, file, '') | ||
109 | base_results = merge.merge_results(base_results, results) | ||
110 | return base_results | ||
111 | |||
112 | def remove_testcases_to_optimize_regression_runtime(logger, results): | ||
113 | test_case_removal = ['ptestresult.rawlogs', 'ptestresult.sections'] | ||
114 | for r in test_case_removal: | ||
115 | for k in results: | ||
116 | result = get_dict_value(logger, results[k], 'result') | ||
117 | pop_dict_element(logger, result, r) | ||
118 | |||
119 | def regression_file(args, logger): | ||
120 | base_results = load_json_file(args.base_result_file) | ||
121 | print('Successfully loaded base test results from: %s' % args.base_result_file) | ||
122 | target_results = load_json_file(args.target_result_file) | ||
123 | print('Successfully loaded target test results from: %s' % args.target_result_file) | ||
124 | remove_testcases_to_optimize_regression_runtime(logger, base_results) | ||
125 | remove_testcases_to_optimize_regression_runtime(logger, target_results) | ||
126 | if args.base_result_id and args.target_result_id: | ||
127 | base_result = get_dict_value(logger, base_results, base_result_id) | ||
128 | print('Getting base test result with result_id=%s' % base_result_id) | ||
129 | target_result = get_dict_value(logger, target_results, target_result_id) | ||
130 | print('Getting target test result with result_id=%s' % target_result_id) | ||
131 | regression = ResultsRegression() | ||
132 | regression.run(logger, base_result, target_result) | ||
133 | else: | 40 | else: |
134 | regression = ResultsRegressionSelector() | 41 | resultstring = "Match: %s\n %s" % (base_name, target_name) |
135 | regression_pair = regression.get_regression_base_target_pair(logger, base_results, target_results) | 42 | return result, resultstring |
136 | logger.debug('Retrieved regression pair=%s' % regression_pair) | 43 | |
137 | regression.run_regression_with_regression_pairing(logger, regression_pair, base_results, target_results) | 44 | def get_results(logger, source): |
138 | return 0 | 45 | return resultutils.load_resultsdata(source, configmap=resultutils.regression_map) |
46 | |||
47 | def regression(args, logger): | ||
48 | base_results = get_results(logger, args.base_result) | ||
49 | target_results = get_results(logger, args.target_result) | ||
50 | |||
51 | regression_common(args, logger, base_results, target_results) | ||
52 | |||
53 | def regression_common(args, logger, base_results, target_results): | ||
54 | if args.base_result_id: | ||
55 | base_results = resultutils.filter_resultsdata(base_results, args.base_result_id) | ||
56 | if args.target_result_id: | ||
57 | target_results = resultutils.filter_resultsdata(target_results, args.target_result_id) | ||
58 | |||
59 | matches = [] | ||
60 | regressions = [] | ||
61 | notfound = [] | ||
62 | |||
63 | for a in base_results: | ||
64 | if a in target_results: | ||
65 | base = list(base_results[a].keys()) | ||
66 | target = list(target_results[a].keys()) | ||
67 | # We may have multiple base/targets which are for different configurations. Start by | ||
68 | # removing any pairs which match | ||
69 | for c in base.copy(): | ||
70 | for b in target.copy(): | ||
71 | res, resstr = compare_result(logger, c, b, base_results[a][c], target_results[a][b]) | ||
72 | if not res: | ||
73 | matches.append(resstr) | ||
74 | base.remove(c) | ||
75 | target.remove(b) | ||
76 | break | ||
77 | # Should only now see regressions, we may not be able to match multiple pairs directly | ||
78 | for c in base: | ||
79 | for b in target: | ||
80 | res, resstr = compare_result(logger, c, b, base_results[a][c], target_results[a][b]) | ||
81 | if res: | ||
82 | regressions.append(resstr) | ||
83 | else: | ||
84 | notfound.append("%s not found in target" % a) | ||
85 | print("\n".join(matches)) | ||
86 | print("\n".join(regressions)) | ||
87 | print("\n".join(notfound)) | ||
139 | 88 | ||
140 | def regression_directory(args, logger): | ||
141 | base_results = get_results_from_directory(logger, args.base_result_directory) | ||
142 | target_results = get_results_from_directory(logger, args.target_result_directory) | ||
143 | remove_testcases_to_optimize_regression_runtime(logger, base_results) | ||
144 | remove_testcases_to_optimize_regression_runtime(logger, target_results) | ||
145 | regression = ResultsRegressionSelector() | ||
146 | regression_pair = regression.get_regression_base_target_pair(logger, base_results, target_results) | ||
147 | logger.debug('Retrieved regression pair=%s' % regression_pair) | ||
148 | regression.run_regression_with_regression_pairing(logger, regression_pair, base_results, target_results) | ||
149 | return 0 | 89 | return 0 |
150 | 90 | ||
151 | def regression_git(args, logger): | 91 | def regression_git(args, logger): |
152 | from resulttool.resultsutils import checkout_git_dir | ||
153 | base_results = {} | 92 | base_results = {} |
154 | target_results = {} | 93 | target_results = {} |
155 | if checkout_git_dir(args.source_dir, args.base_git_branch): | 94 | |
156 | base_results = get_results_from_directory(logger, args.source_dir) | 95 | tag_name = "{branch}/{commit_number}-g{commit}/{tag_number}" |
157 | if checkout_git_dir(args.source_dir, args.target_git_branch): | 96 | repo = GitRepo(args.repo) |
158 | target_results = get_results_from_directory(logger, args.source_dir) | 97 | |
159 | if base_results and target_results: | 98 | revs = gitarchive.get_test_revs(logger, repo, tag_name, branch=args.branch) |
160 | remove_testcases_to_optimize_regression_runtime(logger, base_results) | 99 | |
161 | remove_testcases_to_optimize_regression_runtime(logger, target_results) | 100 | if args.branch2: |
162 | regression = ResultsRegressionSelector() | 101 | revs2 = gitarchive.get_test_revs(logger, repo, tag_name, branch=args.branch2) |
163 | regression_pair = regression.get_regression_base_target_pair(logger, base_results, target_results) | 102 | if not len(revs2): |
164 | logger.debug('Retrieved regression pair=%s' % regression_pair) | 103 | logger.error("No revisions found to compare against") |
165 | regression.run_regression_with_regression_pairing(logger, regression_pair, base_results, target_results) | 104 | return 1 |
105 | if not len(revs): | ||
106 | logger.error("No revision to report on found") | ||
107 | return 1 | ||
108 | else: | ||
109 | if len(revs) < 2: | ||
110 | logger.error("Only %d tester revisions found, unable to generate report" % len(revs)) | ||
111 | return 1 | ||
112 | |||
113 | # Pick revisions | ||
114 | if args.commit: | ||
115 | if args.commit_number: | ||
116 | logger.warning("Ignoring --commit-number as --commit was specified") | ||
117 | index1 = gitarchive.rev_find(revs, 'commit', args.commit) | ||
118 | elif args.commit_number: | ||
119 | index1 = gitarchive.rev_find(revs, 'commit_number', args.commit_number) | ||
120 | else: | ||
121 | index1 = len(revs) - 1 | ||
122 | |||
123 | if args.branch2: | ||
124 | revs2.append(revs[index1]) | ||
125 | index1 = len(revs2) - 1 | ||
126 | revs = revs2 | ||
127 | |||
128 | if args.commit2: | ||
129 | if args.commit_number2: | ||
130 | logger.warning("Ignoring --commit-number2 as --commit2 was specified") | ||
131 | index2 = gitarchive.rev_find(revs, 'commit', args.commit2) | ||
132 | elif args.commit_number2: | ||
133 | index2 = gitarchive.rev_find(revs, 'commit_number', args.commit_number2) | ||
134 | else: | ||
135 | if index1 > 0: | ||
136 | index2 = index1 - 1 | ||
137 | # Find the closest matching commit number for comparision | ||
138 | # In future we could check the commit is a common ancestor and | ||
139 | # continue back if not but this good enough for now | ||
140 | while index2 > 0 and revs[index2].commit_number > revs[index1].commit_number: | ||
141 | index2 = index2 - 1 | ||
142 | else: | ||
143 | logger.error("Unable to determine the other commit, use " | ||
144 | "--commit2 or --commit-number2 to specify it") | ||
145 | return 1 | ||
146 | |||
147 | logger.info("Comparing:\n%s\nto\n%s\n" % (revs[index1], revs[index2])) | ||
148 | |||
149 | base_results = resultutils.git_get_result(repo, revs[index1][2]) | ||
150 | target_results = resultutils.git_get_result(repo, revs[index2][2]) | ||
151 | |||
152 | regression_common(args, logger, base_results, target_results) | ||
153 | |||
166 | return 0 | 154 | return 0 |
167 | 155 | ||
168 | def register_commands(subparsers): | 156 | def register_commands(subparsers): |
169 | """Register subcommands from this plugin""" | 157 | """Register subcommands from this plugin""" |
170 | parser_build = subparsers.add_parser('regression-file', help='regression file analysis', | 158 | |
159 | parser_build = subparsers.add_parser('regression', help='regression file/directory analysis', | ||
160 | description='regression analysis comparing the base set of results to the target results', | ||
161 | group='analysis') | ||
162 | parser_build.set_defaults(func=regression) | ||
163 | parser_build.add_argument('base_result', | ||
164 | help='base result file/directory for the comparison') | ||
165 | parser_build.add_argument('target_result', | ||
166 | help='target result file/directory to compare with') | ||
167 | parser_build.add_argument('-b', '--base-result-id', default='', | ||
168 | help='(optional) filter the base results to this result ID') | ||
169 | parser_build.add_argument('-t', '--target-result-id', default='', | ||
170 | help='(optional) filter the target results to this result ID') | ||
171 | |||
172 | parser_build = subparsers.add_parser('regression-git', help='regression git analysis', | ||
171 | description='regression analysis comparing base result set to target ' | 173 | description='regression analysis comparing base result set to target ' |
172 | 'result set', | 174 | 'result set', |
173 | group='analysis') | 175 | group='analysis') |
174 | parser_build.set_defaults(func=regression_file) | 176 | parser_build.set_defaults(func=regression_git) |
175 | parser_build.add_argument('base_result_file', | 177 | parser_build.add_argument('repo', |
176 | help='base result file provide the base result set') | 178 | help='the git repository containing the data') |
177 | parser_build.add_argument('target_result_file', | ||
178 | help='target result file provide the target result set for comparison with base result') | ||
179 | parser_build.add_argument('-b', '--base-result-id', default='', | 179 | parser_build.add_argument('-b', '--base-result-id', default='', |
180 | help='(optional) default select regression based on configurations unless base result ' | 180 | help='(optional) default select regression based on configurations unless base result ' |
181 | 'id was provided') | 181 | 'id was provided') |
@@ -183,26 +183,10 @@ def register_commands(subparsers): | |||
183 | help='(optional) default select regression based on configurations unless target result ' | 183 | help='(optional) default select regression based on configurations unless target result ' |
184 | 'id was provided') | 184 | 'id was provided') |
185 | 185 | ||
186 | parser_build = subparsers.add_parser('regression-dir', help='regression directory analysis', | 186 | parser_build.add_argument('--branch', '-B', default='master', help="Branch to find commit in") |
187 | description='regression analysis comparing base result set to target ' | 187 | parser_build.add_argument('--branch2', help="Branch to find comparision revisions in") |
188 | 'result set', | 188 | parser_build.add_argument('--commit', help="Revision to search for") |
189 | group='analysis') | 189 | parser_build.add_argument('--commit-number', help="Revision number to search for, redundant if --commit is specified") |
190 | parser_build.set_defaults(func=regression_directory) | 190 | parser_build.add_argument('--commit2', help="Revision to compare with") |
191 | parser_build.add_argument('base_result_directory', | 191 | parser_build.add_argument('--commit-number2', help="Revision number to compare with, redundant if --commit2 is specified") |
192 | help='base result directory provide the files for base result set') | ||
193 | parser_build.add_argument('target_result_directory', | ||
194 | help='target result file provide the files for target result set for comparison with ' | ||
195 | 'base result') | ||
196 | 192 | ||
197 | parser_build = subparsers.add_parser('regression-git', help='regression git analysis', | ||
198 | description='regression analysis comparing base result set to target ' | ||
199 | 'result set', | ||
200 | group='analysis') | ||
201 | parser_build.set_defaults(func=regression_git) | ||
202 | parser_build.add_argument('source_dir', | ||
203 | help='source directory that contain the git repository with test result files') | ||
204 | parser_build.add_argument('base_git_branch', | ||
205 | help='base git branch that provide the files for base result set') | ||
206 | parser_build.add_argument('target_git_branch', | ||
207 | help='target git branch that provide the files for target result set for comparison with ' | ||
208 | 'base result') | ||
diff --git a/scripts/lib/resulttool/report.py b/scripts/lib/resulttool/report.py index ab5de1f3a7..2f5ea308e2 100644 --- a/scripts/lib/resulttool/report.py +++ b/scripts/lib/resulttool/report.py | |||
@@ -1,6 +1,7 @@ | |||
1 | # test result tool - report text based test results | 1 | # test result tool - report text based test results |
2 | # | 2 | # |
3 | # Copyright (c) 2019, Intel Corporation. | 3 | # Copyright (c) 2019, Intel Corporation. |
4 | # Copyright (c) 2019, Linux Foundation | ||
4 | # | 5 | # |
5 | # This program is free software; you can redistribute it and/or modify it | 6 | # This program is free software; you can redistribute it and/or modify it |
6 | # under the terms and conditions of the GNU General Public License, | 7 | # under the terms and conditions of the GNU General Public License, |
@@ -14,100 +15,120 @@ | |||
14 | import os | 15 | import os |
15 | import glob | 16 | import glob |
16 | import json | 17 | import json |
17 | from resulttool.resultsutils import checkout_git_dir, load_json_file, get_dict_value, get_directory_files | 18 | import resulttool.resultutils as resultutils |
19 | from oeqa.utils.git import GitRepo | ||
20 | import oeqa.utils.gitarchive as gitarchive | ||
21 | |||
18 | 22 | ||
19 | class ResultsTextReport(object): | 23 | class ResultsTextReport(object): |
24 | def __init__(self): | ||
25 | self.ptests = {} | ||
26 | self.result_types = {'passed': ['PASSED', 'passed'], | ||
27 | 'failed': ['FAILED', 'failed', 'ERROR', 'error', 'UNKNOWN'], | ||
28 | 'skipped': ['SKIPPED', 'skipped']} | ||
29 | |||
30 | |||
31 | def handle_ptest_result(self, k, status, result): | ||
32 | if k == 'ptestresult.sections': | ||
33 | return | ||
34 | _, suite, test = k.split(".", 2) | ||
35 | # Handle 'glib-2.0' | ||
36 | if suite not in result['ptestresult.sections']: | ||
37 | try: | ||
38 | _, suite, suite1, test = k.split(".", 3) | ||
39 | if suite + "." + suite1 in result['ptestresult.sections']: | ||
40 | suite = suite + "." + suite1 | ||
41 | except ValueError: | ||
42 | pass | ||
43 | if suite not in self.ptests: | ||
44 | self.ptests[suite] = {'passed': 0, 'failed': 0, 'skipped': 0, 'duration' : '-', 'failed_testcases': []} | ||
45 | for tk in self.result_types: | ||
46 | if status in self.result_types[tk]: | ||
47 | self.ptests[suite][tk] += 1 | ||
48 | if suite in result['ptestresult.sections']: | ||
49 | if 'duration' in result['ptestresult.sections'][suite]: | ||
50 | self.ptests[suite]['duration'] = result['ptestresult.sections'][suite]['duration'] | ||
51 | if 'timeout' in result['ptestresult.sections'][suite]: | ||
52 | self.ptests[suite]['duration'] += " T" | ||
20 | 53 | ||
21 | def get_aggregated_test_result(self, logger, testresult): | 54 | def get_aggregated_test_result(self, logger, testresult): |
22 | test_count_report = {'passed': 0, 'failed': 0, 'skipped': 0, 'failed_testcases': []} | 55 | test_count_report = {'passed': 0, 'failed': 0, 'skipped': 0, 'failed_testcases': []} |
23 | result_types = {'passed': ['PASSED', 'passed'], | 56 | result = testresult.get('result', []) |
24 | 'failed': ['FAILED', 'failed', 'ERROR', 'error', 'UNKNOWN'], | ||
25 | 'skipped': ['SKIPPED', 'skipped']} | ||
26 | result = get_dict_value(logger, testresult, 'result') | ||
27 | for k in result: | 57 | for k in result: |
28 | test_status = get_dict_value(logger, result[k], 'status') | 58 | test_status = result[k].get('status', []) |
29 | for tk in result_types: | 59 | for tk in self.result_types: |
30 | if test_status in result_types[tk]: | 60 | if test_status in self.result_types[tk]: |
31 | test_count_report[tk] += 1 | 61 | test_count_report[tk] += 1 |
32 | if test_status in result_types['failed']: | 62 | if test_status in self.result_types['failed']: |
33 | test_count_report['failed_testcases'].append(k) | 63 | test_count_report['failed_testcases'].append(k) |
64 | if k.startswith("ptestresult."): | ||
65 | self.handle_ptest_result(k, test_status, result) | ||
34 | return test_count_report | 66 | return test_count_report |
35 | 67 | ||
36 | def get_test_result_percentage(self, test_result_count): | 68 | def print_test_report(self, template_file_name, test_count_reports): |
37 | total_tested = test_result_count['passed'] + test_result_count['failed'] + test_result_count['skipped'] | ||
38 | test_percent_report = {'passed': 0, 'failed': 0, 'skipped': 0} | ||
39 | for k in test_percent_report: | ||
40 | test_percent_report[k] = format(test_result_count[k] / total_tested * 100, '.2f') | ||
41 | return test_percent_report | ||
42 | |||
43 | def add_test_configurations(self, test_report, source_dir, file, result_id): | ||
44 | test_report['file_dir'] = self._get_short_file_dir(source_dir, file) | ||
45 | test_report['result_id'] = result_id | ||
46 | test_report['test_file_dir_result_id'] = '%s_%s' % (test_report['file_dir'], test_report['result_id']) | ||
47 | |||
48 | def _get_short_file_dir(self, source_dir, file): | ||
49 | file_dir = os.path.dirname(file) | ||
50 | source_dir = source_dir[:-1] if source_dir[-1] == '/' else source_dir | ||
51 | if file_dir == source_dir: | ||
52 | return 'None' | ||
53 | return file_dir.replace(source_dir, '') | ||
54 | |||
55 | def get_max_string_len(self, test_result_list, key, default_max_len): | ||
56 | max_len = default_max_len | ||
57 | for test_result in test_result_list: | ||
58 | value_len = len(test_result[key]) | ||
59 | if value_len > max_len: | ||
60 | max_len = value_len | ||
61 | return max_len | ||
62 | |||
63 | def print_test_report(self, template_file_name, test_count_reports, test_percent_reports, | ||
64 | max_len_dir, max_len_result_id): | ||
65 | from jinja2 import Environment, FileSystemLoader | 69 | from jinja2 import Environment, FileSystemLoader |
66 | script_path = os.path.dirname(os.path.realpath(__file__)) | 70 | script_path = os.path.dirname(os.path.realpath(__file__)) |
67 | file_loader = FileSystemLoader(script_path + '/template') | 71 | file_loader = FileSystemLoader(script_path + '/template') |
68 | env = Environment(loader=file_loader, trim_blocks=True) | 72 | env = Environment(loader=file_loader, trim_blocks=True) |
69 | template = env.get_template(template_file_name) | 73 | template = env.get_template(template_file_name) |
70 | output = template.render(test_count_reports=test_count_reports, | 74 | havefailed = False |
71 | test_percent_reports=test_percent_reports, | 75 | haveptest = bool(self.ptests) |
72 | max_len_dir=max_len_dir, | 76 | reportvalues = [] |
73 | max_len_result_id=max_len_result_id) | 77 | cols = ['passed', 'failed', 'skipped'] |
74 | print('Printing text-based test report:') | 78 | maxlen = {'passed' : 0, 'failed' : 0, 'skipped' : 0, 'result_id': 0, 'testseries' : 0, 'ptest' : 0 } |
79 | for line in test_count_reports: | ||
80 | total_tested = line['passed'] + line['failed'] + line['skipped'] | ||
81 | vals = {} | ||
82 | vals['result_id'] = line['result_id'] | ||
83 | vals['testseries'] = line['testseries'] | ||
84 | vals['sort'] = line['testseries'] + "_" + line['result_id'] | ||
85 | vals['failed_testcases'] = line['failed_testcases'] | ||
86 | for k in cols: | ||
87 | vals[k] = "%d (%s%%)" % (line[k], format(line[k] / total_tested * 100, '.0f')) | ||
88 | for k in maxlen: | ||
89 | if k in vals and len(vals[k]) > maxlen[k]: | ||
90 | maxlen[k] = len(vals[k]) | ||
91 | reportvalues.append(vals) | ||
92 | if line['failed_testcases']: | ||
93 | havefailed = True | ||
94 | for ptest in self.ptests: | ||
95 | if len(ptest) > maxlen['ptest']: | ||
96 | maxlen['ptest'] = len(ptest) | ||
97 | output = template.render(reportvalues=reportvalues, | ||
98 | havefailed=havefailed, | ||
99 | haveptest=haveptest, | ||
100 | ptests=self.ptests, | ||
101 | maxlen=maxlen) | ||
75 | print(output) | 102 | print(output) |
76 | 103 | ||
77 | def view_test_report(self, logger, source_dir, git_branch): | 104 | def view_test_report(self, logger, source_dir, tag): |
78 | if git_branch: | ||
79 | checkout_git_dir(source_dir, git_branch) | ||
80 | test_count_reports = [] | 105 | test_count_reports = [] |
81 | test_percent_reports = [] | 106 | if tag: |
82 | for file in get_directory_files(source_dir, ['.git'], 'testresults.json'): | 107 | repo = GitRepo(source_dir) |
83 | logger.debug('Computing result for test result file: %s' % file) | 108 | testresults = resultutils.git_get_result(repo, [tag]) |
84 | testresults = load_json_file(file) | 109 | else: |
85 | for k in testresults: | 110 | testresults = resultutils.load_resultsdata(source_dir) |
86 | test_count_report = self.get_aggregated_test_result(logger, testresults[k]) | 111 | for testsuite in testresults: |
87 | test_percent_report = self.get_test_result_percentage(test_count_report) | 112 | for resultid in testresults[testsuite]: |
88 | self.add_test_configurations(test_count_report, source_dir, file, k) | 113 | result = testresults[testsuite][resultid] |
89 | self.add_test_configurations(test_percent_report, source_dir, file, k) | 114 | test_count_report = self.get_aggregated_test_result(logger, result) |
115 | test_count_report['testseries'] = result['configuration']['TESTSERIES'] | ||
116 | test_count_report['result_id'] = resultid | ||
90 | test_count_reports.append(test_count_report) | 117 | test_count_reports.append(test_count_report) |
91 | test_percent_reports.append(test_percent_report) | 118 | self.print_test_report('test_report_full_text.txt', test_count_reports) |
92 | max_len_dir = self.get_max_string_len(test_count_reports, 'file_dir', len('file_dir')) | ||
93 | max_len_result_id = self.get_max_string_len(test_count_reports, 'result_id', len('result_id')) | ||
94 | self.print_test_report('test_report_full_text.txt', test_count_reports, test_percent_reports, | ||
95 | max_len_dir, max_len_result_id) | ||
96 | 119 | ||
97 | def report(args, logger): | 120 | def report(args, logger): |
98 | report = ResultsTextReport() | 121 | report = ResultsTextReport() |
99 | report.view_test_report(logger, args.source_dir, args.git_branch) | 122 | report.view_test_report(logger, args.source_dir, args.tag) |
100 | return 0 | 123 | return 0 |
101 | 124 | ||
102 | def register_commands(subparsers): | 125 | def register_commands(subparsers): |
103 | """Register subcommands from this plugin""" | 126 | """Register subcommands from this plugin""" |
104 | parser_build = subparsers.add_parser('report', help='report test result summary', | 127 | parser_build = subparsers.add_parser('report', help='summarise test results', |
105 | description='report text-based test result summary from the source directory', | 128 | description='print a text-based summary of the test results', |
106 | group='analysis') | 129 | group='analysis') |
107 | parser_build.set_defaults(func=report) | 130 | parser_build.set_defaults(func=report) |
108 | parser_build.add_argument('source_dir', | 131 | parser_build.add_argument('source_dir', |
109 | help='source directory that contain the test result files for reporting') | 132 | help='source file/directory that contain the test result files to summarise') |
110 | parser_build.add_argument('-b', '--git-branch', default='', | 133 | parser_build.add_argument('-t', '--tag', default='', |
111 | help='(optional) default assume source directory contains all available files for ' | 134 | help='source_dir is a git repository, report on the tag specified from that repository') |
112 | 'reporting unless a git branch was provided where it will try to checkout ' | ||
113 | 'the provided git branch assuming source directory was a git repository') | ||
diff --git a/scripts/lib/resulttool/resultsutils.py b/scripts/lib/resulttool/resultsutils.py deleted file mode 100644 index 368786922c..0000000000 --- a/scripts/lib/resulttool/resultsutils.py +++ /dev/null | |||
@@ -1,67 +0,0 @@ | |||
1 | # test result tool - utilities | ||
2 | # | ||
3 | # Copyright (c) 2019, Intel Corporation. | ||
4 | # | ||
5 | # This program is free software; you can redistribute it and/or modify it | ||
6 | # under the terms and conditions of the GNU General Public License, | ||
7 | # version 2, as published by the Free Software Foundation. | ||
8 | # | ||
9 | # This program is distributed in the hope it will be useful, but WITHOUT | ||
10 | # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
11 | # FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
12 | # more details. | ||
13 | # | ||
14 | import os | ||
15 | import json | ||
16 | import scriptpath | ||
17 | scriptpath.add_oe_lib_path() | ||
18 | from oeqa.utils.git import GitRepo, GitError | ||
19 | |||
20 | def load_json_file(file): | ||
21 | with open(file, "r") as f: | ||
22 | return json.load(f) | ||
23 | |||
24 | def dump_json_data(write_dir, file_name, json_data): | ||
25 | file_content = json.dumps(json_data, sort_keys=True, indent=4) | ||
26 | file_path = os.path.join(write_dir, file_name) | ||
27 | with open(file_path, 'w') as the_file: | ||
28 | the_file.write(file_content) | ||
29 | |||
30 | def get_dict_value(logger, dict, key): | ||
31 | try: | ||
32 | return dict[key] | ||
33 | except KeyError: | ||
34 | if logger: | ||
35 | logger.debug('Faced KeyError exception: dict=%s: key=%s' % (dict, key)) | ||
36 | return None | ||
37 | except TypeError: | ||
38 | if logger: | ||
39 | logger.debug('Faced TypeError exception: dict=%s: key=%s' % (dict, key)) | ||
40 | return None | ||
41 | |||
42 | def pop_dict_element(logger, dict, key): | ||
43 | try: | ||
44 | dict.pop(key) | ||
45 | except KeyError: | ||
46 | if logger: | ||
47 | logger.debug('Faced KeyError exception: dict=%s: key=%s' % (dict, key)) | ||
48 | except AttributeError: | ||
49 | if logger: | ||
50 | logger.debug('Faced AttributeError exception: dict=%s: key=%s' % (dict, key)) | ||
51 | |||
52 | def checkout_git_dir(git_dir, git_branch): | ||
53 | try: | ||
54 | repo = GitRepo(git_dir, is_topdir=True) | ||
55 | repo.run_cmd('checkout %s' % git_branch) | ||
56 | return True | ||
57 | except GitError: | ||
58 | return False | ||
59 | |||
60 | def get_directory_files(source_dir, excludes, file): | ||
61 | files_in_dir = [] | ||
62 | for root, dirs, files in os.walk(source_dir, topdown=True): | ||
63 | [dirs.remove(d) for d in list(dirs) if d in excludes] | ||
64 | for name in files: | ||
65 | if name == file: | ||
66 | files_in_dir.append(os.path.join(root, name)) | ||
67 | return files_in_dir \ No newline at end of file | ||
diff --git a/scripts/lib/resulttool/resultutils.py b/scripts/lib/resulttool/resultutils.py new file mode 100644 index 0000000000..06cceef796 --- /dev/null +++ b/scripts/lib/resulttool/resultutils.py | |||
@@ -0,0 +1,127 @@ | |||
1 | # resulttool - common library/utility functions | ||
2 | # | ||
3 | # Copyright (c) 2019, Intel Corporation. | ||
4 | # Copyright (c) 2019, Linux Foundation | ||
5 | # | ||
6 | # This program is free software; you can redistribute it and/or modify it | ||
7 | # under the terms and conditions of the GNU General Public License, | ||
8 | # version 2, as published by the Free Software Foundation. | ||
9 | # | ||
10 | # This program is distributed in the hope it will be useful, but WITHOUT | ||
11 | # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
12 | # FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
13 | # more details. | ||
14 | # | ||
15 | import os | ||
16 | import json | ||
17 | import scriptpath | ||
18 | scriptpath.add_oe_lib_path() | ||
19 | |||
20 | flatten_map = { | ||
21 | "oeselftest": [], | ||
22 | "runtime": [], | ||
23 | "sdk": [], | ||
24 | "sdkext": [] | ||
25 | } | ||
26 | regression_map = { | ||
27 | "oeselftest": ['TEST_TYPE', 'MACHINE'], | ||
28 | "runtime": ['TESTSERIES', 'TEST_TYPE', 'IMAGE_BASENAME', 'MACHINE', 'IMAGE_PKGTYPE', 'DISTRO'], | ||
29 | "sdk": ['TESTSERIES', 'TEST_TYPE', 'IMAGE_BASENAME', 'MACHINE', 'SDKMACHINE'], | ||
30 | "sdkext": ['TESTSERIES', 'TEST_TYPE', 'IMAGE_BASENAME', 'MACHINE', 'SDKMACHINE'] | ||
31 | } | ||
32 | store_map = { | ||
33 | "oeselftest": ['TEST_TYPE'], | ||
34 | "runtime": ['TEST_TYPE', 'DISTRO', 'MACHINE', 'IMAGE_BASENAME'], | ||
35 | "sdk": ['TEST_TYPE', 'MACHINE', 'SDKMACHINE', 'IMAGE_BASENAME'], | ||
36 | "sdkext": ['TEST_TYPE', 'MACHINE', 'SDKMACHINE', 'IMAGE_BASENAME'] | ||
37 | } | ||
38 | |||
39 | # | ||
40 | # Load the json file and append the results data into the provided results dict | ||
41 | # | ||
42 | def append_resultsdata(results, f, configmap=store_map): | ||
43 | if type(f) is str: | ||
44 | with open(f, "r") as filedata: | ||
45 | data = json.load(filedata) | ||
46 | else: | ||
47 | data = f | ||
48 | for res in data: | ||
49 | if "configuration" not in data[res] or "result" not in data[res]: | ||
50 | raise ValueError("Test results data without configuration or result section?") | ||
51 | if "TESTSERIES" not in data[res]["configuration"]: | ||
52 | data[res]["configuration"]["TESTSERIES"] = os.path.basename(os.path.dirname(f)) | ||
53 | testtype = data[res]["configuration"].get("TEST_TYPE") | ||
54 | if testtype not in configmap: | ||
55 | raise ValueError("Unknown test type %s" % testtype) | ||
56 | configvars = configmap[testtype] | ||
57 | testpath = "/".join(data[res]["configuration"].get(i) for i in configmap[testtype]) | ||
58 | if testpath not in results: | ||
59 | results[testpath] = {} | ||
60 | if 'ptestresult.rawlogs' in data[res]['result']: | ||
61 | del data[res]['result']['ptestresult.rawlogs'] | ||
62 | if 'ptestresult.sections' in data[res]['result']: | ||
63 | for i in data[res]['result']['ptestresult.sections']: | ||
64 | del data[res]['result']['ptestresult.sections'][i]['log'] | ||
65 | results[testpath][res] = data[res] | ||
66 | |||
67 | # | ||
68 | # Walk a directory and find/load results data | ||
69 | # or load directly from a file | ||
70 | # | ||
71 | def load_resultsdata(source, configmap=store_map): | ||
72 | results = {} | ||
73 | if os.path.isfile(source): | ||
74 | append_resultsdata(results, source, configmap) | ||
75 | return results | ||
76 | for root, dirs, files in os.walk(source): | ||
77 | for name in files: | ||
78 | f = os.path.join(root, name) | ||
79 | if name == "testresults.json": | ||
80 | append_resultsdata(results, f, configmap) | ||
81 | return results | ||
82 | |||
83 | def filter_resultsdata(results, resultid): | ||
84 | newresults = {} | ||
85 | for r in results: | ||
86 | for i in results[r]: | ||
87 | if i == resultsid: | ||
88 | newresults[r] = {} | ||
89 | newresults[r][i] = results[r][i] | ||
90 | return newresults | ||
91 | |||
92 | def save_resultsdata(results, destdir, fn="testresults.json"): | ||
93 | for res in results: | ||
94 | if res: | ||
95 | dst = destdir + "/" + res + "/" + fn | ||
96 | else: | ||
97 | dst = destdir + "/" + fn | ||
98 | os.makedirs(os.path.dirname(dst), exist_ok=True) | ||
99 | with open(dst, 'w') as f: | ||
100 | f.write(json.dumps(results[res], sort_keys=True, indent=4)) | ||
101 | |||
102 | def git_get_result(repo, tags): | ||
103 | git_objs = [] | ||
104 | for tag in tags: | ||
105 | files = repo.run_cmd(['ls-tree', "--name-only", "-r", tag]).splitlines() | ||
106 | git_objs.extend([tag + ':' + f for f in files if f.endswith("testresults.json")]) | ||
107 | |||
108 | def parse_json_stream(data): | ||
109 | """Parse multiple concatenated JSON objects""" | ||
110 | objs = [] | ||
111 | json_d = "" | ||
112 | for line in data.splitlines(): | ||
113 | if line == '}{': | ||
114 | json_d += '}' | ||
115 | objs.append(json.loads(json_d)) | ||
116 | json_d = '{' | ||
117 | else: | ||
118 | json_d += line | ||
119 | objs.append(json.loads(json_d)) | ||
120 | return objs | ||
121 | |||
122 | # Optimize by reading all data with one git command | ||
123 | results = {} | ||
124 | for obj in parse_json_stream(repo.run_cmd(['show'] + git_objs + ['--'])): | ||
125 | append_resultsdata(results, obj) | ||
126 | |||
127 | return results | ||
diff --git a/scripts/lib/resulttool/store.py b/scripts/lib/resulttool/store.py index 2c6fd8492c..6744fb3c05 100644 --- a/scripts/lib/resulttool/store.py +++ b/scripts/lib/resulttool/store.py | |||
@@ -1,6 +1,7 @@ | |||
1 | # test result tool - store test results | 1 | # resulttool - store test results |
2 | # | 2 | # |
3 | # Copyright (c) 2019, Intel Corporation. | 3 | # Copyright (c) 2019, Intel Corporation. |
4 | # Copyright (c) 2019, Linux Foundation | ||
4 | # | 5 | # |
5 | # This program is free software; you can redistribute it and/or modify it | 6 | # This program is free software; you can redistribute it and/or modify it |
6 | # under the terms and conditions of the GNU General Public License, | 7 | # under the terms and conditions of the GNU General Public License, |
@@ -11,100 +12,81 @@ | |||
11 | # FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | 12 | # FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for |
12 | # more details. | 13 | # more details. |
13 | # | 14 | # |
14 | import datetime | ||
15 | import tempfile | 15 | import tempfile |
16 | import os | 16 | import os |
17 | import subprocess | 17 | import subprocess |
18 | import json | ||
19 | import shutil | ||
18 | import scriptpath | 20 | import scriptpath |
19 | scriptpath.add_bitbake_lib_path() | 21 | scriptpath.add_bitbake_lib_path() |
20 | scriptpath.add_oe_lib_path() | 22 | scriptpath.add_oe_lib_path() |
21 | from resulttool.resultsutils import checkout_git_dir | 23 | import resulttool.resultutils as resultutils |
22 | try: | 24 | import oeqa.utils.gitarchive as gitarchive |
23 | import bb | ||
24 | except ImportError: | ||
25 | pass | ||
26 | 25 | ||
27 | class ResultsGitStore(object): | ||
28 | 26 | ||
29 | def _get_output_dir(self): | 27 | def store(args, logger): |
30 | basepath = os.environ['BUILDDIR'] | 28 | tempdir = tempfile.mkdtemp(prefix='testresults.') |
31 | return basepath + '/testresults_%s/' % datetime.datetime.now().strftime("%Y%m%d%H%M%S") | 29 | try: |
32 | 30 | results = {} | |
33 | def _create_temporary_workspace_dir(self): | 31 | logger.info('Reading files from %s' % args.source) |
34 | return tempfile.mkdtemp(prefix='testresults.') | 32 | for root, dirs, files in os.walk(args.source): |
35 | 33 | for name in files: | |
36 | def _remove_temporary_workspace_dir(self, workspace_dir): | 34 | f = os.path.join(root, name) |
37 | return subprocess.run(["rm", "-rf", workspace_dir]) | 35 | if name == "testresults.json": |
38 | 36 | resultutils.append_resultsdata(results, f) | |
39 | def _oe_copy_files(self, source_dir, destination_dir): | 37 | elif args.all: |
40 | from oe.path import copytree | 38 | dst = f.replace(args.source, tempdir + "/") |
41 | copytree(source_dir, destination_dir) | 39 | os.makedirs(os.path.dirname(dst), exist_ok=True) |
40 | shutil.copyfile(f, dst) | ||
41 | resultutils.save_resultsdata(results, tempdir) | ||
42 | 42 | ||
43 | def _copy_files(self, source_dir, destination_dir, copy_ignore=None): | 43 | if not results and not args.all: |
44 | from shutil import copytree | 44 | if args.allow_empty: |
45 | copytree(source_dir, destination_dir, ignore=copy_ignore) | 45 | logger.info("No results found to store") |
46 | return 0 | ||
47 | logger.error("No results found to store") | ||
48 | return 1 | ||
46 | 49 | ||
47 | def _store_files_to_git(self, logger, file_dir, git_dir, git_branch, commit_msg_subject, commit_msg_body): | 50 | keywords = {'branch': None, 'commit': None, 'commit_count': None} |
48 | logger.debug('Storing test result into git repository (%s) and branch (%s)' | ||
49 | % (git_dir, git_branch)) | ||
50 | return subprocess.run(["oe-git-archive", | ||
51 | file_dir, | ||
52 | "-g", git_dir, | ||
53 | "-b", git_branch, | ||
54 | "--commit-msg-subject", commit_msg_subject, | ||
55 | "--commit-msg-body", commit_msg_body]) | ||
56 | 51 | ||
57 | def store_to_existing(self, logger, source_dir, git_dir, git_branch): | 52 | # Find the branch/commit/commit_count and ensure they all match |
58 | logger.debug('Storing files to existing git repository and branch') | 53 | for suite in results: |
59 | from shutil import ignore_patterns | 54 | for result in results[suite]: |
60 | dest_dir = self._create_temporary_workspace_dir() | 55 | config = results[suite][result]['configuration']['LAYERS']['meta'] |
61 | dest_top_dir = os.path.join(dest_dir, 'top_dir') | 56 | for k in keywords: |
62 | self._copy_files(git_dir, dest_top_dir, copy_ignore=ignore_patterns('.git')) | 57 | if keywords[k] is None: |
63 | self._oe_copy_files(source_dir, dest_top_dir) | 58 | keywords[k] = config.get(k) |
64 | self._store_files_to_git(logger, dest_top_dir, git_dir, git_branch, | 59 | if config.get(k) != keywords[k]: |
65 | 'Store as existing git and branch', 'Store as existing git repository and branch') | 60 | logger.error("Mismatched source commit/branch/count: %s vs %s" % (config.get(k), keywords[k])) |
66 | self._remove_temporary_workspace_dir(dest_dir) | 61 | return 1 |
67 | return git_dir | ||
68 | 62 | ||
69 | def store_to_existing_with_new_branch(self, logger, source_dir, git_dir, git_branch): | 63 | logger.info('Storing test result into git repository %s' % args.git_dir) |
70 | logger.debug('Storing files to existing git repository with new branch') | ||
71 | self._store_files_to_git(logger, source_dir, git_dir, git_branch, | ||
72 | 'Store as existing git with new branch', | ||
73 | 'Store as existing git repository with new branch') | ||
74 | return git_dir | ||
75 | 64 | ||
76 | def store_to_new(self, logger, source_dir, git_branch): | 65 | gitarchive.gitarchive(tempdir, args.git_dir, False, False, |
77 | logger.debug('Storing files to new git repository') | 66 | "Results of {branch}:{commit}", "branch: {branch}\ncommit: {commit}", "{branch}", |
78 | output_dir = self._get_output_dir() | 67 | False, "{branch}/{commit_count}-g{commit}/{tag_number}", |
79 | self._store_files_to_git(logger, source_dir, output_dir, git_branch, | 68 | 'Test run #{tag_number} of {branch}:{commit}', '', |
80 | 'Store as new', 'Store as new git repository') | 69 | [], [], False, keywords, logger) |
81 | return output_dir | ||
82 | 70 | ||
83 | def store(self, logger, source_dir, git_dir, git_branch): | 71 | finally: |
84 | if git_dir: | 72 | subprocess.check_call(["rm", "-rf", tempdir]) |
85 | if checkout_git_dir(git_dir, git_branch): | ||
86 | self.store_to_existing(logger, source_dir, git_dir, git_branch) | ||
87 | else: | ||
88 | self.store_to_existing_with_new_branch(logger, source_dir, git_dir, git_branch) | ||
89 | else: | ||
90 | self.store_to_new(logger, source_dir, git_branch) | ||
91 | 73 | ||
92 | def store(args, logger): | ||
93 | gitstore = ResultsGitStore() | ||
94 | gitstore.store(logger, args.source_dir, args.git_dir, args.git_branch) | ||
95 | return 0 | 74 | return 0 |
96 | 75 | ||
97 | def register_commands(subparsers): | 76 | def register_commands(subparsers): |
98 | """Register subcommands from this plugin""" | 77 | """Register subcommands from this plugin""" |
99 | parser_build = subparsers.add_parser('store', help='store test result files and directories into git repository', | 78 | parser_build = subparsers.add_parser('store', help='store test results into a git repository', |
100 | description='store the testresults.json files and related directories ' | 79 | description='takes a results file or directory of results files and stores ' |
101 | 'from the source directory into the destination git repository ' | 80 | 'them into the destination git repository, splitting out the results ' |
102 | 'with the given git branch', | 81 | 'files as configured', |
103 | group='setup') | 82 | group='setup') |
104 | parser_build.set_defaults(func=store) | 83 | parser_build.set_defaults(func=store) |
105 | parser_build.add_argument('source_dir', | 84 | parser_build.add_argument('source', |
106 | help='source directory that contain the test result files and directories to be stored') | 85 | help='source file or directory that contain the test result files to be stored') |
107 | parser_build.add_argument('git_branch', help='git branch used for store') | 86 | parser_build.add_argument('git_dir', |
108 | parser_build.add_argument('-d', '--git-dir', default='', | 87 | help='the location of the git repository to store the results in') |
109 | help='(optional) default store to new <top_dir>/<build>/<testresults_datetime> ' | 88 | parser_build.add_argument('-a', '--all', action='store_true', |
110 | 'directory unless provided with existing git repository as destination') | 89 | help='include all files, not just testresults.json files') |
90 | parser_build.add_argument('-e', '--allow-empty', action='store_true', | ||
91 | help='don\'t error if no results to store are found') | ||
92 | |||
diff --git a/scripts/lib/resulttool/template/test_report_full_text.txt b/scripts/lib/resulttool/template/test_report_full_text.txt index bc4874ba4b..5081594cf2 100644 --- a/scripts/lib/resulttool/template/test_report_full_text.txt +++ b/scripts/lib/resulttool/template/test_report_full_text.txt | |||
@@ -1,35 +1,44 @@ | |||
1 | ============================================================================================================== | 1 | ============================================================================================================== |
2 | Test Report (Count of passed, failed, skipped group by file_dir, result_id) | 2 | Test Result Status Summary (Counts/Percentages sorted by testseries, ID) |
3 | ============================================================================================================== | 3 | ============================================================================================================== |
4 | -------------------------------------------------------------------------------------------------------------- | 4 | -------------------------------------------------------------------------------------------------------------- |
5 | {{ 'file_dir'.ljust(max_len_dir) }} | {{ 'result_id'.ljust(max_len_result_id) }} | {{ 'passed'.ljust(10) }} | {{ 'failed'.ljust(10) }} | {{ 'skipped'.ljust(10) }} | 5 | {{ 'Test Series'.ljust(maxlen['testseries']) }} | {{ 'ID'.ljust(maxlen['result_id']) }} | {{ 'Passed'.ljust(maxlen['passed']) }} | {{ 'Failed'.ljust(maxlen['failed']) }} | {{ 'Skipped'.ljust(maxlen['skipped']) }} |
6 | -------------------------------------------------------------------------------------------------------------- | 6 | -------------------------------------------------------------------------------------------------------------- |
7 | {% for report in test_count_reports |sort(attribute='test_file_dir_result_id') %} | 7 | {% for report in reportvalues |sort(attribute='sort') %} |
8 | {{ report.file_dir.ljust(max_len_dir) }} | {{ report.result_id.ljust(max_len_result_id) }} | {{ (report.passed|string).ljust(10) }} | {{ (report.failed|string).ljust(10) }} | {{ (report.skipped|string).ljust(10) }} | 8 | {{ report.testseries.ljust(maxlen['testseries']) }} | {{ report.result_id.ljust(maxlen['result_id']) }} | {{ (report.passed|string).ljust(maxlen['passed']) }} | {{ (report.failed|string).ljust(maxlen['failed']) }} | {{ (report.skipped|string).ljust(maxlen['skipped']) }} |
9 | {% endfor %} | 9 | {% endfor %} |
10 | -------------------------------------------------------------------------------------------------------------- | 10 | -------------------------------------------------------------------------------------------------------------- |
11 | 11 | ||
12 | {% if haveptest %} | ||
12 | ============================================================================================================== | 13 | ============================================================================================================== |
13 | Test Report (Percent of passed, failed, skipped group by file_dir, result_id) | 14 | PTest Result Summary |
14 | ============================================================================================================== | 15 | ============================================================================================================== |
15 | -------------------------------------------------------------------------------------------------------------- | 16 | -------------------------------------------------------------------------------------------------------------- |
16 | {{ 'file_dir'.ljust(max_len_dir) }} | {{ 'result_id'.ljust(max_len_result_id) }} | {{ 'passed_%'.ljust(10) }} | {{ 'failed_%'.ljust(10) }} | {{ 'skipped_%'.ljust(10) }} | 17 | {{ 'Recipe'.ljust(maxlen['ptest']) }} | {{ 'Passed'.ljust(maxlen['passed']) }} | {{ 'Failed'.ljust(maxlen['failed']) }} | {{ 'Skipped'.ljust(maxlen['skipped']) }} | {{ 'Time(s)'.ljust(10) }} |
17 | -------------------------------------------------------------------------------------------------------------- | 18 | -------------------------------------------------------------------------------------------------------------- |
18 | {% for report in test_percent_reports |sort(attribute='test_file_dir_result_id') %} | 19 | {% for ptest in ptests %} |
19 | {{ report.file_dir.ljust(max_len_dir) }} | {{ report.result_id.ljust(max_len_result_id) }} | {{ (report.passed|string).ljust(10) }} | {{ (report.failed|string).ljust(10) }} | {{ (report.skipped|string).ljust(10) }} | 20 | {{ ptest.ljust(maxlen['ptest']) }} | {{ (ptests[ptest]['passed']|string).ljust(maxlen['passed']) }} | {{ (ptests[ptest]['failed']|string).ljust(maxlen['failed']) }} | {{ (ptests[ptest]['skipped']|string).ljust(maxlen['skipped']) }} | {{ (ptests[ptest]['duration']|string) }} |
20 | {% endfor %} | 21 | {% endfor %} |
21 | -------------------------------------------------------------------------------------------------------------- | 22 | -------------------------------------------------------------------------------------------------------------- |
22 | 23 | ||
24 | {% else %} | ||
25 | There was no ptest data | ||
26 | {% endif %} | ||
27 | |||
23 | ============================================================================================================== | 28 | ============================================================================================================== |
24 | Test Report (Failed test cases group by file_dir, result_id) | 29 | Failed test cases (sorted by testseries, ID) |
25 | ============================================================================================================== | 30 | ============================================================================================================== |
31 | {% if havefailed %} | ||
26 | -------------------------------------------------------------------------------------------------------------- | 32 | -------------------------------------------------------------------------------------------------------------- |
27 | {% for report in test_count_reports |sort(attribute='test_file_dir_result_id') %} | 33 | {% for report in reportvalues |sort(attribute='sort') %} |
28 | {% if report.failed_testcases %} | 34 | {% if report.failed_testcases %} |
29 | file_dir | result_id : {{ report.file_dir }} | {{ report.result_id }} | 35 | testseries | result_id : {{ report.testseries }} | {{ report.result_id }} |
30 | {% for testcase in report.failed_testcases %} | 36 | {% for testcase in report.failed_testcases %} |
31 | {{ testcase }} | 37 | {{ testcase }} |
32 | {% endfor %} | 38 | {% endfor %} |
33 | {% endif %} | 39 | {% endif %} |
34 | {% endfor %} | 40 | {% endfor %} |
35 | -------------------------------------------------------------------------------------------------------------- \ No newline at end of file | 41 | -------------------------------------------------------------------------------------------------------------- |
42 | {% else %} | ||
43 | There were no test failures | ||
44 | {% endif %} | ||
diff --git a/scripts/resulttool b/scripts/resulttool index 13430e192a..5a89e1c9be 100755 --- a/scripts/resulttool +++ b/scripts/resulttool | |||
@@ -1,11 +1,12 @@ | |||
1 | #!/usr/bin/env python3 | 1 | #!/usr/bin/env python3 |
2 | # | 2 | # |
3 | # test results tool - tool for testresults.json (merge test results, regression analysis) | 3 | # test results tool - tool for manipulating OEQA test result json files |
4 | # (merge results, summarise results, regression analysis, generate manual test results file) | ||
4 | # | 5 | # |
5 | # To look for help information. | 6 | # To look for help information. |
6 | # $ resulttool | 7 | # $ resulttool |
7 | # | 8 | # |
8 | # To store test result from oeqa automated tests, execute the below | 9 | # To store test results from oeqa automated tests, execute the below |
9 | # $ resulttool store <source_dir> <git_branch> | 10 | # $ resulttool store <source_dir> <git_branch> |
10 | # | 11 | # |
11 | # To merge test results, execute the below | 12 | # To merge test results, execute the below |
@@ -58,7 +59,7 @@ def _validate_user_input_arguments(args): | |||
58 | return True | 59 | return True |
59 | 60 | ||
60 | def main(): | 61 | def main(): |
61 | parser = argparse_oe.ArgumentParser(description="OpenEmbedded test results tool.", | 62 | parser = argparse_oe.ArgumentParser(description="OEQA test result manipulation tool.", |
62 | epilog="Use %(prog)s <subcommand> --help to get help on a specific command") | 63 | epilog="Use %(prog)s <subcommand> --help to get help on a specific command") |
63 | parser.add_argument('-d', '--debug', help='enable debug output', action='store_true') | 64 | parser.add_argument('-d', '--debug', help='enable debug output', action='store_true') |
64 | parser.add_argument('-q', '--quiet', help='print only errors', action='store_true') | 65 | parser.add_argument('-q', '--quiet', help='print only errors', action='store_true') |