diff options
author | Richard Purdie <richard.purdie@linuxfoundation.org> | 2018-11-02 13:13:43 +0000 |
---|---|---|
committer | Richard Purdie <richard.purdie@linuxfoundation.org> | 2018-11-07 23:08:55 +0000 |
commit | fcf55f58ae73b4e54ea853c816c0fef0f33ca46c (patch) | |
tree | ccacfad735ff72dbf2ee5695fd25ea1223229480 | |
parent | d21cf7ca124eac07f46534d12851ebdd7e8d3bcb (diff) | |
download | poky-fcf55f58ae73b4e54ea853c816c0fef0f33ca46c.tar.gz |
oeqa/runtime/ptest: Inject results+logs into stored json results file
This allows the ptest results from ptest-runner, run in an image to be
transferred over to the resulting json results output.
Each test is given a pass/skip/fail so individual results can be monitored
and the raw log output from the ptest-runner is also dumped into the
results json file as this means after the fact debugging becomes much easier.
Currently the log output is not split up per test but that would make a good
future enhancement.
I attempted to implement this as python subTests however it failed as the
output was too confusing, subTests don't support any kind of log
output handling, subTest successes aren't logged and it was making things
far more complex than they needed to be.
We mark ptest-runner as "EXPECTEDFAILURE" since its unlikely every ptest
will pass currently and we don't want that to fail the whole image test run.
Its assumed there would be later analysis of the json output to determine
regressions. We do have to change the test runner code so that
'unexpectedsuccess' is not a failure.
Also, the test names are manipuated to remove spaces and brackets with
"_" used as a replacement and any duplicate occurrences truncated.
(From OE-Core rev: a13e088942e2a3c3521e98954a394e61a15234e8)
Signed-off-by: Richard Purdie <richard.purdie@linuxfoundation.org>
-rw-r--r-- | meta/lib/oeqa/core/runner.py | 8 | ||||
-rw-r--r-- | meta/lib/oeqa/runtime/cases/ptest.py | 21 |
2 files changed, 27 insertions, 2 deletions
diff --git a/meta/lib/oeqa/core/runner.py b/meta/lib/oeqa/core/runner.py index 67756c3867..034f223240 100644 --- a/meta/lib/oeqa/core/runner.py +++ b/meta/lib/oeqa/core/runner.py | |||
@@ -122,7 +122,11 @@ class OETestResult(_TestResult): | |||
122 | 122 | ||
123 | def logDetails(self, json_file_dir=None, configuration=None, result_id=None): | 123 | def logDetails(self, json_file_dir=None, configuration=None, result_id=None): |
124 | self.tc.logger.info("RESULTS:") | 124 | self.tc.logger.info("RESULTS:") |
125 | |||
125 | result = {} | 126 | result = {} |
127 | if hasattr(self.tc, "extraresults"): | ||
128 | result = self.tc.extraresults | ||
129 | |||
126 | for case_name in self.tc._registry['cases']: | 130 | for case_name in self.tc._registry['cases']: |
127 | case = self.tc._registry['cases'][case_name] | 131 | case = self.tc._registry['cases'][case_name] |
128 | 132 | ||
@@ -148,6 +152,10 @@ class OETestResult(_TestResult): | |||
148 | tresultjsonhelper = OETestResultJSONHelper() | 152 | tresultjsonhelper = OETestResultJSONHelper() |
149 | tresultjsonhelper.dump_testresult_file(json_file_dir, configuration, result_id, result) | 153 | tresultjsonhelper.dump_testresult_file(json_file_dir, configuration, result_id, result) |
150 | 154 | ||
155 | def wasSuccessful(self): | ||
156 | # Override as we unexpected successes aren't failures for us | ||
157 | return (len(self.failures) == len(self.errors) == 0) | ||
158 | |||
151 | class OEListTestsResult(object): | 159 | class OEListTestsResult(object): |
152 | def wasSuccessful(self): | 160 | def wasSuccessful(self): |
153 | return True | 161 | return True |
diff --git a/meta/lib/oeqa/runtime/cases/ptest.py b/meta/lib/oeqa/runtime/cases/ptest.py index f60a433d59..77ae7b6b86 100644 --- a/meta/lib/oeqa/runtime/cases/ptest.py +++ b/meta/lib/oeqa/runtime/cases/ptest.py | |||
@@ -1,3 +1,6 @@ | |||
1 | import unittest | ||
2 | import pprint | ||
3 | |||
1 | from oeqa.runtime.case import OERuntimeTestCase | 4 | from oeqa.runtime.case import OERuntimeTestCase |
2 | from oeqa.core.decorator.depends import OETestDepends | 5 | from oeqa.core.decorator.depends import OETestDepends |
3 | from oeqa.core.decorator.oeid import OETestID | 6 | from oeqa.core.decorator.oeid import OETestID |
@@ -49,6 +52,7 @@ class PtestRunnerTest(OERuntimeTestCase): | |||
49 | @OETestID(1600) | 52 | @OETestID(1600) |
50 | @skipIfNotFeature('ptest', 'Test requires ptest to be in DISTRO_FEATURES') | 53 | @skipIfNotFeature('ptest', 'Test requires ptest to be in DISTRO_FEATURES') |
51 | @OETestDepends(['ssh.SSHTest.test_ssh']) | 54 | @OETestDepends(['ssh.SSHTest.test_ssh']) |
55 | @unittest.expectedFailure | ||
52 | def test_ptestrunner(self): | 56 | def test_ptestrunner(self): |
53 | status, output = self.target.run('which ptest-runner', 0) | 57 | status, output = self.target.run('which ptest-runner', 0) |
54 | if status != 0: | 58 | if status != 0: |
@@ -76,6 +80,11 @@ class PtestRunnerTest(OERuntimeTestCase): | |||
76 | # status != 0 is OK since some ptest tests may fail | 80 | # status != 0 is OK since some ptest tests may fail |
77 | self.assertTrue(status != 127, msg="Cannot execute ptest-runner!") | 81 | self.assertTrue(status != 127, msg="Cannot execute ptest-runner!") |
78 | 82 | ||
83 | if not hasattr(self.tc, "extraresults"): | ||
84 | self.tc.extraresults = {} | ||
85 | extras = self.tc.extraresults | ||
86 | extras['ptestresult.rawlogs'] = {'log': output} | ||
87 | |||
79 | # Parse and save results | 88 | # Parse and save results |
80 | parse_result = self.parse_ptest(ptest_runner_log) | 89 | parse_result = self.parse_ptest(ptest_runner_log) |
81 | parse_result.log_as_files(ptest_log_dir, test_status = ['pass','fail', 'skip']) | 90 | parse_result.log_as_files(ptest_log_dir, test_status = ['pass','fail', 'skip']) |
@@ -84,10 +93,18 @@ class PtestRunnerTest(OERuntimeTestCase): | |||
84 | os.remove(ptest_log_dir_link) | 93 | os.remove(ptest_log_dir_link) |
85 | os.symlink(os.path.basename(ptest_log_dir), ptest_log_dir_link) | 94 | os.symlink(os.path.basename(ptest_log_dir), ptest_log_dir_link) |
86 | 95 | ||
96 | trans = str.maketrans("()", "__") | ||
97 | resmap = {'pass': 'PASSED', 'skip': 'SKIPPED', 'fail': 'FAILED'} | ||
98 | for section in parse_result.result_dict: | ||
99 | for test, result in parse_result.result_dict[section]: | ||
100 | testname = "ptestresult." + section + "." + "_".join(test.translate(trans).split()) | ||
101 | extras[testname] = {'status': resmap[result]} | ||
102 | |||
87 | failed_tests = {} | 103 | failed_tests = {} |
88 | for section in parse_result.result_dict: | 104 | for section in parse_result.result_dict: |
89 | failed_testcases = [ test for test, result in parse_result.result_dict[section] if result == 'fail' ] | 105 | failed_testcases = [ "_".join(test.translate(trans).split()) for test, result in parse_result.result_dict[section] if result == 'fail' ] |
90 | if failed_testcases: | 106 | if failed_testcases: |
91 | failed_tests[section] = failed_testcases | 107 | failed_tests[section] = failed_testcases |
92 | 108 | ||
93 | self.assertFalse(failed_tests, msg = "Failed ptests: %s" %(str(failed_tests))) | 109 | if failed_tests: |
110 | self.fail("Failed ptests:\n%s" % pprint.pformat(failed_tests)) | ||