diff options
author | Richard Purdie <richard.purdie@linuxfoundation.org> | 2019-02-16 18:13:00 +0000 |
---|---|---|
committer | Richard Purdie <richard.purdie@linuxfoundation.org> | 2019-03-26 15:38:27 +0000 |
commit | c7eb843d7cfba62b371103e7c13aeb33bb6cb682 (patch) | |
tree | 1f630e358ad8612b569b81b165feb72242249be9 /meta | |
parent | 075cd5e7fe4458563e73cf96177288d68b76e69a (diff) | |
download | poky-c7eb843d7cfba62b371103e7c13aeb33bb6cb682.tar.gz |
resulttool: Improvements to allow integration to the autobuilder
This is a combined patch of the various tweaks and improvements I
made to resulttool:
* Avoid subprocess.run() as its a python 3.6 feature and we
have autobuilder workers with 3.5.
* Avoid python keywords as variable names
* Simplify dict accesses using .get()
* Rename resultsutils -> resultutils to match the resultstool ->
resulttool rename
* Formalised the handling of "file_name" to "TESTSERIES" which the code
will now add into the json configuration data if its not present, based
on the directory name.
* When we don't have failed test cases, print something saying so
instead of an empty table
* Tweak the table headers in the report to be more readable (reference
"Test Series" instead if file_id and ID instead of results_id)
* Improve/simplify the max string length handling
* Merge the counts and percentage data into one table in the report
since printing two reports of the same data confuses the user
* Removed the confusing header in the regression report
* Show matches, then regressions, then unmatched runs in the regression
report, also remove chatting unneeded output
* Try harder to "pair" up matching configurations to reduce noise in
the regressions report
* Abstracted the "mapping" table concept used to pairing in the
regression code to general code in resultutils
* Created multiple mappings for results analysis, results storage and
'flattening' results data in a merge
* Simplify the merge command to take a source and a destination,
letting the destination be a directory or a file, removing the need for
an output directory parameter
* Add the 'IMAGE_PKGTYPE' and 'DISTRO' config options to the regression
mappings
* Have the store command place the testresults files in a layout from
the mapping, making commits into the git repo for results storage more
useful for simple comparison purposes
* Set the oe-git-archive tag format appropriately for oeqa results
storage (and simplify the commit messages closer to their defaults)
* Fix oe-git-archive to use the commit/branch data from the results file
* Cleaned up the command option help to match other changes
* Follow the model of git branch/tag processing used by oe-build-perf-report
and use that to read the data using git show to avoid branch change
* Add ptest summary to the report command
* Update the tests to match the above changes
(From OE-Core rev: e4195565d2a50046d4378c97f7a593c41bed51bd)
Signed-off-by: Richard Purdie <richard.purdie@linuxfoundation.org>
Diffstat (limited to 'meta')
-rw-r--r-- | meta/lib/oeqa/selftest/cases/resulttooltests.py | 106 |
1 files changed, 48 insertions, 58 deletions
diff --git a/meta/lib/oeqa/selftest/cases/resulttooltests.py b/meta/lib/oeqa/selftest/cases/resulttooltests.py index 7bf1ec60c1..0a089c0b7f 100644 --- a/meta/lib/oeqa/selftest/cases/resulttooltests.py +++ b/meta/lib/oeqa/selftest/cases/resulttooltests.py | |||
@@ -4,13 +4,46 @@ basepath = os.path.abspath(os.path.dirname(__file__) + '/../../../../../') | |||
4 | lib_path = basepath + '/scripts/lib' | 4 | lib_path = basepath + '/scripts/lib' |
5 | sys.path = sys.path + [lib_path] | 5 | sys.path = sys.path + [lib_path] |
6 | from resulttool.report import ResultsTextReport | 6 | from resulttool.report import ResultsTextReport |
7 | from resulttool.regression import ResultsRegressionSelector, ResultsRegression | 7 | from resulttool import regression as regression |
8 | from resulttool.merge import ResultsMerge | 8 | from resulttool import resultutils as resultutils |
9 | from resulttool.store import ResultsGitStore | ||
10 | from resulttool.resultsutils import checkout_git_dir | ||
11 | from oeqa.selftest.case import OESelftestTestCase | 9 | from oeqa.selftest.case import OESelftestTestCase |
12 | 10 | ||
13 | class ResultToolTests(OESelftestTestCase): | 11 | class ResultToolTests(OESelftestTestCase): |
12 | base_results_data = {'base_result1': {'configuration': {"TEST_TYPE": "runtime", | ||
13 | "TESTSERIES": "series1", | ||
14 | "IMAGE_BASENAME": "image", | ||
15 | "IMAGE_PKGTYPE": "ipk", | ||
16 | "DISTRO": "mydistro", | ||
17 | "MACHINE": "qemux86"}, | ||
18 | 'result': {}}, | ||
19 | 'base_result2': {'configuration': {"TEST_TYPE": "runtime", | ||
20 | "TESTSERIES": "series1", | ||
21 | "IMAGE_BASENAME": "image", | ||
22 | "IMAGE_PKGTYPE": "ipk", | ||
23 | "DISTRO": "mydistro", | ||
24 | "MACHINE": "qemux86-64"}, | ||
25 | 'result': {}}} | ||
26 | target_results_data = {'target_result1': {'configuration': {"TEST_TYPE": "runtime", | ||
27 | "TESTSERIES": "series1", | ||
28 | "IMAGE_BASENAME": "image", | ||
29 | "IMAGE_PKGTYPE": "ipk", | ||
30 | "DISTRO": "mydistro", | ||
31 | "MACHINE": "qemux86"}, | ||
32 | 'result': {}}, | ||
33 | 'target_result2': {'configuration': {"TEST_TYPE": "runtime", | ||
34 | "TESTSERIES": "series1", | ||
35 | "IMAGE_BASENAME": "image", | ||
36 | "IMAGE_PKGTYPE": "ipk", | ||
37 | "DISTRO": "mydistro", | ||
38 | "MACHINE": "qemux86"}, | ||
39 | 'result': {}}, | ||
40 | 'target_result3': {'configuration': {"TEST_TYPE": "runtime", | ||
41 | "TESTSERIES": "series1", | ||
42 | "IMAGE_BASENAME": "image", | ||
43 | "IMAGE_PKGTYPE": "ipk", | ||
44 | "DISTRO": "mydistro", | ||
45 | "MACHINE": "qemux86-64"}, | ||
46 | 'result': {}}} | ||
14 | 47 | ||
15 | def test_report_can_aggregate_test_result(self): | 48 | def test_report_can_aggregate_test_result(self): |
16 | result_data = {'result': {'test1': {'status': 'PASSED'}, | 49 | result_data = {'result': {'test1': {'status': 'PASSED'}, |
@@ -25,23 +58,12 @@ class ResultToolTests(OESelftestTestCase): | |||
25 | self.assertTrue(result_report['skipped'] == 1, msg="Skipped count not correct:%s" % result_report['skipped']) | 58 | self.assertTrue(result_report['skipped'] == 1, msg="Skipped count not correct:%s" % result_report['skipped']) |
26 | 59 | ||
27 | def test_regression_can_get_regression_base_target_pair(self): | 60 | def test_regression_can_get_regression_base_target_pair(self): |
28 | base_results_data = {'base_result1': {'configuration': {"TEST_TYPE": "oeselftest", | 61 | |
29 | "HOST": "centos-7"}}, | 62 | results = {} |
30 | 'base_result2': {'configuration': {"TEST_TYPE": "oeselftest", | 63 | resultutils.append_resultsdata(results, ResultToolTests.base_results_data) |
31 | "HOST": "centos-7", | 64 | resultutils.append_resultsdata(results, ResultToolTests.target_results_data) |
32 | "MACHINE": "qemux86-64"}}} | 65 | self.assertTrue('target_result1' in results['runtime/mydistro/qemux86/image'], msg="Pair not correct:%s" % results) |
33 | target_results_data = {'target_result1': {'configuration': {"TEST_TYPE": "oeselftest", | 66 | self.assertTrue('target_result3' in results['runtime/mydistro/qemux86-64/image'], msg="Pair not correct:%s" % results) |
34 | "HOST": "centos-7"}}, | ||
35 | 'target_result2': {'configuration': {"TEST_TYPE": "oeselftest", | ||
36 | "HOST": "centos-7", | ||
37 | "MACHINE": "qemux86"}}, | ||
38 | 'target_result3': {'configuration': {"TEST_TYPE": "oeselftest", | ||
39 | "HOST": "centos-7", | ||
40 | "MACHINE": "qemux86-64"}}} | ||
41 | regression = ResultsRegressionSelector() | ||
42 | pair = regression.get_regression_base_target_pair(self.logger, base_results_data, target_results_data) | ||
43 | self.assertTrue('target_result1' in pair['base_result1'], msg="Pair not correct:%s" % pair['base_result1']) | ||
44 | self.assertTrue('target_result3' in pair['base_result2'], msg="Pair not correct:%s" % pair['base_result2']) | ||
45 | 67 | ||
46 | def test_regrresion_can_get_regression_result(self): | 68 | def test_regrresion_can_get_regression_result(self): |
47 | base_result_data = {'result': {'test1': {'status': 'PASSED'}, | 69 | base_result_data = {'result': {'test1': {'status': 'PASSED'}, |
@@ -54,8 +76,7 @@ class ResultToolTests(OESelftestTestCase): | |||
54 | 'test3': {'status': 'PASSED'}, | 76 | 'test3': {'status': 'PASSED'}, |
55 | 'test4': {'status': 'ERROR'}, | 77 | 'test4': {'status': 'ERROR'}, |
56 | 'test5': {'status': 'SKIPPED'}}} | 78 | 'test5': {'status': 'SKIPPED'}}} |
57 | regression = ResultsRegression() | 79 | result, text = regression.compare_result(self.logger, "BaseTestRunName", "TargetTestRunName", base_result_data, target_result_data) |
58 | result = regression.get_regression_result(self.logger, base_result_data, target_result_data) | ||
59 | self.assertTrue(result['test2']['base'] == 'PASSED', | 80 | self.assertTrue(result['test2']['base'] == 'PASSED', |
60 | msg="regression not correct:%s" % result['test2']['base']) | 81 | msg="regression not correct:%s" % result['test2']['base']) |
61 | self.assertTrue(result['test2']['target'] == 'FAILED', | 82 | self.assertTrue(result['test2']['target'] == 'FAILED', |
@@ -66,39 +87,8 @@ class ResultToolTests(OESelftestTestCase): | |||
66 | msg="regression not correct:%s" % result['test3']['target']) | 87 | msg="regression not correct:%s" % result['test3']['target']) |
67 | 88 | ||
68 | def test_merge_can_merged_results(self): | 89 | def test_merge_can_merged_results(self): |
69 | base_results_data = {'base_result1': {}, | 90 | results = {} |
70 | 'base_result2': {}} | 91 | resultutils.append_resultsdata(results, ResultToolTests.base_results_data, configmap=resultutils.flatten_map) |
71 | target_results_data = {'target_result1': {}, | 92 | resultutils.append_resultsdata(results, ResultToolTests.target_results_data, configmap=resultutils.flatten_map) |
72 | 'target_result2': {}, | 93 | self.assertEqual(len(results[''].keys()), 5, msg="Flattened results not correct %s" % str(results)) |
73 | 'target_result3': {}} | ||
74 | |||
75 | merge = ResultsMerge() | ||
76 | results = merge.merge_results(base_results_data, target_results_data) | ||
77 | self.assertTrue(len(results.keys()) == 5, msg="merge not correct:%s" % len(results.keys())) | ||
78 | |||
79 | def test_store_can_store_to_new_git_repository(self): | ||
80 | basepath = os.path.abspath(os.path.dirname(__file__) + '/../../') | ||
81 | source_dir = basepath + '/files/testresults' | ||
82 | git_branch = 'qa-cycle-2.7' | ||
83 | store = ResultsGitStore() | ||
84 | output_dir = store.store_to_new(self.logger, source_dir, git_branch) | ||
85 | self.assertTrue(checkout_git_dir(output_dir, git_branch), msg="store to new git repository failed:%s" % | ||
86 | output_dir) | ||
87 | store._remove_temporary_workspace_dir(output_dir) | ||
88 | 94 | ||
89 | def test_store_can_store_to_existing(self): | ||
90 | basepath = os.path.abspath(os.path.dirname(__file__) + '/../../') | ||
91 | source_dir = basepath + '/files/testresults' | ||
92 | git_branch = 'qa-cycle-2.6' | ||
93 | store = ResultsGitStore() | ||
94 | output_dir = store.store_to_new(self.logger, source_dir, git_branch) | ||
95 | self.assertTrue(checkout_git_dir(output_dir, git_branch), msg="store to new git repository failed:%s" % | ||
96 | output_dir) | ||
97 | git_branch = 'qa-cycle-2.7' | ||
98 | output_dir = store.store_to_existing_with_new_branch(self.logger, source_dir, output_dir, git_branch) | ||
99 | self.assertTrue(checkout_git_dir(output_dir, git_branch), msg="store to existing git repository failed:%s" % | ||
100 | output_dir) | ||
101 | output_dir = store.store_to_existing(self.logger, source_dir, output_dir, git_branch) | ||
102 | self.assertTrue(checkout_git_dir(output_dir, git_branch), msg="store to existing git repository failed:%s" % | ||
103 | output_dir) | ||
104 | store._remove_temporary_workspace_dir(output_dir) | ||