summaryrefslogtreecommitdiffstats
path: root/meta/lib/oeqa
diff options
context:
space:
mode:
authorMardegan, Alberto <AMardegan@luxoft.com>2019-04-25 08:03:28 +0000
committerRichard Purdie <richard.purdie@linuxfoundation.org>2019-05-12 09:13:39 +0100
commit6322fb5b2bb43fc26a6badd621b3832c7f4275a9 (patch)
tree963eadf2bd8c602a8a495206a1bf56f0a5d38303 /meta/lib/oeqa
parente2568f68b728587b5a2abe4c1a000fa7d149ea5f (diff)
downloadpoky-6322fb5b2bb43fc26a6badd621b3832c7f4275a9.tar.gz
oeqa/core/runner: dump stdout and stderr of each test case
Some CI pipelines might perform further processing of the test output (for instance, to plot some metrics into a chart). However, Since `thud` we switched away from the XML-based jUnit reporting, and at the same time we lost the ability of collecting the stdout and stderr of the various tests. We now restore this functionality by adding `stdout` and `stderr` keys to the JSON reports. This behavior is off by default; in order to enable it, one must set the `TESTREPORT_FULLLOGS` variable in the bitbake configuration. (From OE-Core rev: fd57b34d7c8a120273d65cd361be208fbdaeff50) Signed-off-by: Alberto Mardegan <amardegan@luxoft.com> Signed-off-by: Richard Purdie <richard.purdie@linuxfoundation.org> Signed-off-by: Armin Kuster <akuster808@gmail.com> Signed-off-by: Richard Purdie <richard.purdie@linuxfoundation.org>
Diffstat (limited to 'meta/lib/oeqa')
-rw-r--r--meta/lib/oeqa/core/runner.py20
1 files changed, 16 insertions, 4 deletions
diff --git a/meta/lib/oeqa/core/runner.py b/meta/lib/oeqa/core/runner.py
index df88b85f1c..478b7b6683 100644
--- a/meta/lib/oeqa/core/runner.py
+++ b/meta/lib/oeqa/core/runner.py
@@ -7,6 +7,7 @@ import unittest
7import logging 7import logging
8import re 8import re
9import json 9import json
10import sys
10 11
11from unittest import TextTestResult as _TestResult 12from unittest import TextTestResult as _TestResult
12from unittest import TextTestRunner as _TestRunner 13from unittest import TextTestRunner as _TestRunner
@@ -45,6 +46,9 @@ class OETestResult(_TestResult):
45 46
46 self.tc = tc 47 self.tc = tc
47 48
49 # stdout and stderr for each test case
50 self.logged_output = {}
51
48 def startTest(self, test): 52 def startTest(self, test):
49 # May have been set by concurrencytest 53 # May have been set by concurrencytest
50 if test.id() not in self.starttime: 54 if test.id() not in self.starttime:
@@ -53,6 +57,9 @@ class OETestResult(_TestResult):
53 57
54 def stopTest(self, test): 58 def stopTest(self, test):
55 self.endtime[test.id()] = time.time() 59 self.endtime[test.id()] = time.time()
60 if self.buffer:
61 self.logged_output[test.id()] = (
62 sys.stdout.getvalue(), sys.stderr.getvalue())
56 super(OETestResult, self).stopTest(test) 63 super(OETestResult, self).stopTest(test)
57 if test.id() in self.progressinfo: 64 if test.id() in self.progressinfo:
58 self.tc.logger.info(self.progressinfo[test.id()]) 65 self.tc.logger.info(self.progressinfo[test.id()])
@@ -118,7 +125,8 @@ class OETestResult(_TestResult):
118 self.successes.append((test, None)) 125 self.successes.append((test, None))
119 super(OETestResult, self).addSuccess(test) 126 super(OETestResult, self).addSuccess(test)
120 127
121 def logDetails(self, json_file_dir=None, configuration=None, result_id=None): 128 def logDetails(self, json_file_dir=None, configuration=None, result_id=None,
129 dump_streams=False):
122 self.tc.logger.info("RESULTS:") 130 self.tc.logger.info("RESULTS:")
123 131
124 result = {} 132 result = {}
@@ -144,10 +152,14 @@ class OETestResult(_TestResult):
144 if status not in logs: 152 if status not in logs:
145 logs[status] = [] 153 logs[status] = []
146 logs[status].append("RESULTS - %s - Testcase %s: %s%s" % (case.id(), oeid, status, t)) 154 logs[status].append("RESULTS - %s - Testcase %s: %s%s" % (case.id(), oeid, status, t))
155 report = {'status': status}
147 if log: 156 if log:
148 result[case.id()] = {'status': status, 'log': log} 157 report['log'] = log
149 else: 158 if dump_streams and case.id() in self.logged_output:
150 result[case.id()] = {'status': status} 159 (stdout, stderr) = self.logged_output[case.id()]
160 report['stdout'] = stdout
161 report['stderr'] = stderr
162 result[case.id()] = report
151 163
152 for i in ['PASSED', 'SKIPPED', 'EXPECTEDFAIL', 'ERROR', 'FAILED', 'UNKNOWN']: 164 for i in ['PASSED', 'SKIPPED', 'EXPECTEDFAIL', 'ERROR', 'FAILED', 'UNKNOWN']:
153 if i not in logs: 165 if i not in logs: