summaryrefslogtreecommitdiffstats
path: root/scripts/lib/resulttool
diff options
context:
space:
mode:
authorArmin Kuster <akuster808@gmail.com>2019-04-22 06:32:38 -0600
committerRichard Purdie <richard.purdie@linuxfoundation.org>2019-04-29 14:16:30 +0100
commit9c820850369aa0d53298291d3c77526d1817becd (patch)
tree27da14e82977d05e3387201567cff17651bec513 /scripts/lib/resulttool
parent244cbcce0ecc4691a9ddfb0a44dc487ff7af0670 (diff)
downloadpoky-9c820850369aa0d53298291d3c77526d1817becd.tar.gz
resulttool: add ltp test support
(From OE-Core rev: 966795aa2c6960aca11a04e87f415256faf26957) Signed-off-by: Armin Kuster <akuster808@gmail.com> Signed-off-by: Richard Purdie <richard.purdie@linuxfoundation.org>
Diffstat (limited to 'scripts/lib/resulttool')
-rw-r--r--scripts/lib/resulttool/report.py43
-rw-r--r--scripts/lib/resulttool/template/test_report_full_text.txt17
2 files changed, 59 insertions, 1 deletions
diff --git a/scripts/lib/resulttool/report.py b/scripts/lib/resulttool/report.py
index 8ae42728e4..2a749459b4 100644
--- a/scripts/lib/resulttool/report.py
+++ b/scripts/lib/resulttool/report.py
@@ -23,6 +23,7 @@ import oeqa.utils.gitarchive as gitarchive
23class ResultsTextReport(object): 23class ResultsTextReport(object):
24 def __init__(self): 24 def __init__(self):
25 self.ptests = {} 25 self.ptests = {}
26 self.ltptests = {}
26 self.result_types = {'passed': ['PASSED', 'passed'], 27 self.result_types = {'passed': ['PASSED', 'passed'],
27 'failed': ['FAILED', 'failed', 'ERROR', 'error', 'UNKNOWN'], 28 'failed': ['FAILED', 'failed', 'ERROR', 'error', 'UNKNOWN'],
28 'skipped': ['SKIPPED', 'skipped']} 29 'skipped': ['SKIPPED', 'skipped']}
@@ -57,6 +58,38 @@ class ResultsTextReport(object):
57 if status in self.result_types[tk]: 58 if status in self.result_types[tk]:
58 self.ptests[suite][tk] += 1 59 self.ptests[suite][tk] += 1
59 60
61 def handle_ltptest_result(self, k, status, result):
62 if k == 'ltpresult.sections':
63 # Ensure tests without any test results still show up on the report
64 for suite in result['ltpresult.sections']:
65 if suite not in self.ltptests:
66 self.ltptests[suite] = {'passed': 0, 'failed': 0, 'skipped': 0, 'duration' : '-', 'failed_testcases': []}
67 if 'duration' in result['ltpresult.sections'][suite]:
68 self.ltptests[suite]['duration'] = result['ltpresult.sections'][suite]['duration']
69 if 'timeout' in result['ltpresult.sections'][suite]:
70 self.ltptests[suite]['duration'] += " T"
71 return
72 try:
73 _, suite, test = k.split(".", 2)
74 except ValueError:
75 return
76 # Handle 'glib-2.0'
77 if 'ltpresult.sections' in result and suite not in result['ltpresult.sections']:
78 try:
79 _, suite, suite1, test = k.split(".", 3)
80 print("split2: %s %s %s" % (suite, suite1, test))
81 if suite + "." + suite1 in result['ltpresult.sections']:
82 suite = suite + "." + suite1
83 except ValueError:
84 pass
85 if suite not in self.ltptests:
86 self.ltptests[suite] = {'passed': 0, 'failed': 0, 'skipped': 0, 'duration' : '-', 'failed_testcases': []}
87 for tk in self.result_types:
88 if status in self.result_types[tk]:
89 self.ltptests[suite][tk] += 1
90
91 def get_aggregated_test_result(self, logger, testresult):
92 test_count_report = {'passed': 0, 'failed': 0, 'skipped': 0, 'failed_testcases': []}
60 def get_aggregated_test_result(self, logger, testresult): 93 def get_aggregated_test_result(self, logger, testresult):
61 test_count_report = {'passed': 0, 'failed': 0, 'skipped': 0, 'failed_testcases': []} 94 test_count_report = {'passed': 0, 'failed': 0, 'skipped': 0, 'failed_testcases': []}
62 result = testresult.get('result', []) 95 result = testresult.get('result', [])
@@ -69,6 +102,8 @@ class ResultsTextReport(object):
69 test_count_report['failed_testcases'].append(k) 102 test_count_report['failed_testcases'].append(k)
70 if k.startswith("ptestresult."): 103 if k.startswith("ptestresult."):
71 self.handle_ptest_result(k, test_status, result) 104 self.handle_ptest_result(k, test_status, result)
105 if k.startswith("ltpresult."):
106 self.handle_ltptest_result(k, test_status, result)
72 return test_count_report 107 return test_count_report
73 108
74 def print_test_report(self, template_file_name, test_count_reports): 109 def print_test_report(self, template_file_name, test_count_reports):
@@ -79,9 +114,10 @@ class ResultsTextReport(object):
79 template = env.get_template(template_file_name) 114 template = env.get_template(template_file_name)
80 havefailed = False 115 havefailed = False
81 haveptest = bool(self.ptests) 116 haveptest = bool(self.ptests)
117 haveltp = bool(self.ltptests)
82 reportvalues = [] 118 reportvalues = []
83 cols = ['passed', 'failed', 'skipped'] 119 cols = ['passed', 'failed', 'skipped']
84 maxlen = {'passed' : 0, 'failed' : 0, 'skipped' : 0, 'result_id': 0, 'testseries' : 0, 'ptest' : 0 } 120 maxlen = {'passed' : 0, 'failed' : 0, 'skipped' : 0, 'result_id': 0, 'testseries' : 0, 'ptest' : 0 ,'ltptest': 0}
85 for line in test_count_reports: 121 for line in test_count_reports:
86 total_tested = line['passed'] + line['failed'] + line['skipped'] 122 total_tested = line['passed'] + line['failed'] + line['skipped']
87 vals = {} 123 vals = {}
@@ -100,10 +136,15 @@ class ResultsTextReport(object):
100 for ptest in self.ptests: 136 for ptest in self.ptests:
101 if len(ptest) > maxlen['ptest']: 137 if len(ptest) > maxlen['ptest']:
102 maxlen['ptest'] = len(ptest) 138 maxlen['ptest'] = len(ptest)
139 for ltptest in self.ltptests:
140 if len(ltptest) > maxlen['ltptest']:
141 maxlen['ltptest'] = len(ltptest)
103 output = template.render(reportvalues=reportvalues, 142 output = template.render(reportvalues=reportvalues,
104 havefailed=havefailed, 143 havefailed=havefailed,
105 haveptest=haveptest, 144 haveptest=haveptest,
106 ptests=self.ptests, 145 ptests=self.ptests,
146 haveltp=haveltp,
147 ltptests=self.ltptests,
107 maxlen=maxlen) 148 maxlen=maxlen)
108 print(output) 149 print(output)
109 150
diff --git a/scripts/lib/resulttool/template/test_report_full_text.txt b/scripts/lib/resulttool/template/test_report_full_text.txt
index 590f35c7dd..6ecd5bce59 100644
--- a/scripts/lib/resulttool/template/test_report_full_text.txt
+++ b/scripts/lib/resulttool/template/test_report_full_text.txt
@@ -25,6 +25,23 @@ PTest Result Summary
25There was no ptest data 25There was no ptest data
26{% endif %} 26{% endif %}
27 27
28{% if haveltp %}
29==============================================================================================================
30Ltp Test Result Summary
31==============================================================================================================
32--------------------------------------------------------------------------------------------------------------
33{{ 'Recipe'.ljust(maxlen['ltptest']) }} | {{ 'Passed'.ljust(maxlen['passed']) }} | {{ 'Failed'.ljust(maxlen['failed']) }} | {{ 'Skipped'.ljust(maxlen['skipped']) }} | {{ 'Time(s)'.ljust(10) }}
34--------------------------------------------------------------------------------------------------------------
35{% for ltptest in ltptests |sort %}
36{{ ltptest.ljust(maxlen['ltptest']) }} | {{ (ltptests[ltptest]['passed']|string).ljust(maxlen['passed']) }} | {{ (ltptests[ltptest]['failed']|string).ljust(maxlen['failed']) }} | {{ (ltptests[ltptest]['skipped']|string).ljust(maxlen['skipped']) }} | {{ (ltptests[ltptest]['duration']|string) }}
37{% endfor %}
38--------------------------------------------------------------------------------------------------------------
39
40{% else %}
41There was no LTP Test data
42{% endif %}
43
44
28============================================================================================================== 45==============================================================================================================
29Failed test cases (sorted by testseries, ID) 46Failed test cases (sorted by testseries, ID)
30============================================================================================================== 47==============================================================================================================