diff options
author | Mardegan, Alberto <AMardegan@luxoft.com> | 2019-04-25 08:03:28 +0000 |
---|---|---|
committer | Richard Purdie <richard.purdie@linuxfoundation.org> | 2019-04-26 10:09:08 +0100 |
commit | e03d103e10dea28e6cc5ed2136e3d6d133d6c0ef (patch) | |
tree | 7cb38c1e7ba042e4c9d04e70d44252a374ae3fe0 | |
parent | 9d997dff7b5506a069a886ece15ed433965f4194 (diff) | |
download | poky-e03d103e10dea28e6cc5ed2136e3d6d133d6c0ef.tar.gz |
oeqa/core/runner: dump stdout and stderr of each test case
Some CI pipelines might perform further processing of the test output
(for instance, to plot some metrics into a chart). However, Since `thud`
we switched away from the XML-based jUnit reporting, and at the same
time we lost the ability of collecting the stdout and stderr of the
various tests.
We now restore this functionality by adding `stdout` and `stderr` keys
to the JSON reports. This behavior is off by default; in order to enable
it, one must set the `TESTREPORT_FULLLOGS` variable in the bitbake
configuration.
(From OE-Core rev: fd0048630ece5b21efb3a79e97046be0ab2a1514)
Signed-off-by: Alberto Mardegan <amardegan@luxoft.com>
Signed-off-by: Richard Purdie <richard.purdie@linuxfoundation.org>
-rw-r--r-- | meta/classes/testimage.bbclass | 3 | ||||
-rw-r--r-- | meta/lib/oeqa/core/runner.py | 20 |
2 files changed, 18 insertions, 5 deletions
diff --git a/meta/classes/testimage.bbclass b/meta/classes/testimage.bbclass index ff1c53b93e..9bb5a5cb0b 100644 --- a/meta/classes/testimage.bbclass +++ b/meta/classes/testimage.bbclass | |||
@@ -316,7 +316,8 @@ def testimage_main(d): | |||
316 | configuration = get_testimage_configuration(d, 'runtime', machine) | 316 | configuration = get_testimage_configuration(d, 'runtime', machine) |
317 | results.logDetails(get_testimage_json_result_dir(d), | 317 | results.logDetails(get_testimage_json_result_dir(d), |
318 | configuration, | 318 | configuration, |
319 | get_testimage_result_id(configuration)) | 319 | get_testimage_result_id(configuration), |
320 | dump_streams=d.getVar('TESTREPORT_FULLLOGS')) | ||
320 | results.logSummary(pn) | 321 | results.logSummary(pn) |
321 | if not results.wasSuccessful(): | 322 | if not results.wasSuccessful(): |
322 | bb.fatal('%s - FAILED - check the task log and the ssh log' % pn, forcelog=True) | 323 | bb.fatal('%s - FAILED - check the task log and the ssh log' % pn, forcelog=True) |
diff --git a/meta/lib/oeqa/core/runner.py b/meta/lib/oeqa/core/runner.py index df88b85f1c..478b7b6683 100644 --- a/meta/lib/oeqa/core/runner.py +++ b/meta/lib/oeqa/core/runner.py | |||
@@ -7,6 +7,7 @@ import unittest | |||
7 | import logging | 7 | import logging |
8 | import re | 8 | import re |
9 | import json | 9 | import json |
10 | import sys | ||
10 | 11 | ||
11 | from unittest import TextTestResult as _TestResult | 12 | from unittest import TextTestResult as _TestResult |
12 | from unittest import TextTestRunner as _TestRunner | 13 | from unittest import TextTestRunner as _TestRunner |
@@ -45,6 +46,9 @@ class OETestResult(_TestResult): | |||
45 | 46 | ||
46 | self.tc = tc | 47 | self.tc = tc |
47 | 48 | ||
49 | # stdout and stderr for each test case | ||
50 | self.logged_output = {} | ||
51 | |||
48 | def startTest(self, test): | 52 | def startTest(self, test): |
49 | # May have been set by concurrencytest | 53 | # May have been set by concurrencytest |
50 | if test.id() not in self.starttime: | 54 | if test.id() not in self.starttime: |
@@ -53,6 +57,9 @@ class OETestResult(_TestResult): | |||
53 | 57 | ||
54 | def stopTest(self, test): | 58 | def stopTest(self, test): |
55 | self.endtime[test.id()] = time.time() | 59 | self.endtime[test.id()] = time.time() |
60 | if self.buffer: | ||
61 | self.logged_output[test.id()] = ( | ||
62 | sys.stdout.getvalue(), sys.stderr.getvalue()) | ||
56 | super(OETestResult, self).stopTest(test) | 63 | super(OETestResult, self).stopTest(test) |
57 | if test.id() in self.progressinfo: | 64 | if test.id() in self.progressinfo: |
58 | self.tc.logger.info(self.progressinfo[test.id()]) | 65 | self.tc.logger.info(self.progressinfo[test.id()]) |
@@ -118,7 +125,8 @@ class OETestResult(_TestResult): | |||
118 | self.successes.append((test, None)) | 125 | self.successes.append((test, None)) |
119 | super(OETestResult, self).addSuccess(test) | 126 | super(OETestResult, self).addSuccess(test) |
120 | 127 | ||
121 | def logDetails(self, json_file_dir=None, configuration=None, result_id=None): | 128 | def logDetails(self, json_file_dir=None, configuration=None, result_id=None, |
129 | dump_streams=False): | ||
122 | self.tc.logger.info("RESULTS:") | 130 | self.tc.logger.info("RESULTS:") |
123 | 131 | ||
124 | result = {} | 132 | result = {} |
@@ -144,10 +152,14 @@ class OETestResult(_TestResult): | |||
144 | if status not in logs: | 152 | if status not in logs: |
145 | logs[status] = [] | 153 | logs[status] = [] |
146 | logs[status].append("RESULTS - %s - Testcase %s: %s%s" % (case.id(), oeid, status, t)) | 154 | logs[status].append("RESULTS - %s - Testcase %s: %s%s" % (case.id(), oeid, status, t)) |
155 | report = {'status': status} | ||
147 | if log: | 156 | if log: |
148 | result[case.id()] = {'status': status, 'log': log} | 157 | report['log'] = log |
149 | else: | 158 | if dump_streams and case.id() in self.logged_output: |
150 | result[case.id()] = {'status': status} | 159 | (stdout, stderr) = self.logged_output[case.id()] |
160 | report['stdout'] = stdout | ||
161 | report['stderr'] = stderr | ||
162 | result[case.id()] = report | ||
151 | 163 | ||
152 | for i in ['PASSED', 'SKIPPED', 'EXPECTEDFAIL', 'ERROR', 'FAILED', 'UNKNOWN']: | 164 | for i in ['PASSED', 'SKIPPED', 'EXPECTEDFAIL', 'ERROR', 'FAILED', 'UNKNOWN']: |
153 | if i not in logs: | 165 | if i not in logs: |