diff options
| author | Armin Kuster <akuster808@gmail.com> | 2019-04-22 06:32:38 -0600 |
|---|---|---|
| committer | Richard Purdie <richard.purdie@linuxfoundation.org> | 2019-06-18 11:28:58 +0100 |
| commit | 937b52a3107e4acae2f37b3c3401077ab496985c (patch) | |
| tree | 72224d19c0267371b0a5a26c41dda4f7d023680f /scripts | |
| parent | 913df0de3515a9b3dc28b0de1a6c352aba510916 (diff) | |
| download | poky-937b52a3107e4acae2f37b3c3401077ab496985c.tar.gz | |
resulttool: add ltp test support
(From OE-Core rev: 60e5b81d90a3a0bcaad6ea407d9aebdc99d4a80b)
Signed-off-by: Armin Kuster <akuster808@gmail.com>
Signed-off-by: Richard Purdie <richard.purdie@linuxfoundation.org>
Signed-off-by: Armin Kuster <akuster808@gmail.com>
Signed-off-by: Richard Purdie <richard.purdie@linuxfoundation.org>
Diffstat (limited to 'scripts')
| -rw-r--r-- | scripts/lib/resulttool/report.py | 43 | ||||
| -rw-r--r-- | scripts/lib/resulttool/template/test_report_full_text.txt | 17 |
2 files changed, 59 insertions, 1 deletions
diff --git a/scripts/lib/resulttool/report.py b/scripts/lib/resulttool/report.py index b1c9d13418..a98f2393e6 100644 --- a/scripts/lib/resulttool/report.py +++ b/scripts/lib/resulttool/report.py | |||
| @@ -17,6 +17,7 @@ import oeqa.utils.gitarchive as gitarchive | |||
| 17 | class ResultsTextReport(object): | 17 | class ResultsTextReport(object): |
| 18 | def __init__(self): | 18 | def __init__(self): |
| 19 | self.ptests = {} | 19 | self.ptests = {} |
| 20 | self.ltptests = {} | ||
| 20 | self.result_types = {'passed': ['PASSED', 'passed'], | 21 | self.result_types = {'passed': ['PASSED', 'passed'], |
| 21 | 'failed': ['FAILED', 'failed', 'ERROR', 'error', 'UNKNOWN'], | 22 | 'failed': ['FAILED', 'failed', 'ERROR', 'error', 'UNKNOWN'], |
| 22 | 'skipped': ['SKIPPED', 'skipped']} | 23 | 'skipped': ['SKIPPED', 'skipped']} |
| @@ -51,6 +52,38 @@ class ResultsTextReport(object): | |||
| 51 | if status in self.result_types[tk]: | 52 | if status in self.result_types[tk]: |
| 52 | self.ptests[suite][tk] += 1 | 53 | self.ptests[suite][tk] += 1 |
| 53 | 54 | ||
| 55 | def handle_ltptest_result(self, k, status, result): | ||
| 56 | if k == 'ltpresult.sections': | ||
| 57 | # Ensure tests without any test results still show up on the report | ||
| 58 | for suite in result['ltpresult.sections']: | ||
| 59 | if suite not in self.ltptests: | ||
| 60 | self.ltptests[suite] = {'passed': 0, 'failed': 0, 'skipped': 0, 'duration' : '-', 'failed_testcases': []} | ||
| 61 | if 'duration' in result['ltpresult.sections'][suite]: | ||
| 62 | self.ltptests[suite]['duration'] = result['ltpresult.sections'][suite]['duration'] | ||
| 63 | if 'timeout' in result['ltpresult.sections'][suite]: | ||
| 64 | self.ltptests[suite]['duration'] += " T" | ||
| 65 | return | ||
| 66 | try: | ||
| 67 | _, suite, test = k.split(".", 2) | ||
| 68 | except ValueError: | ||
| 69 | return | ||
| 70 | # Handle 'glib-2.0' | ||
| 71 | if 'ltpresult.sections' in result and suite not in result['ltpresult.sections']: | ||
| 72 | try: | ||
| 73 | _, suite, suite1, test = k.split(".", 3) | ||
| 74 | print("split2: %s %s %s" % (suite, suite1, test)) | ||
| 75 | if suite + "." + suite1 in result['ltpresult.sections']: | ||
| 76 | suite = suite + "." + suite1 | ||
| 77 | except ValueError: | ||
| 78 | pass | ||
| 79 | if suite not in self.ltptests: | ||
| 80 | self.ltptests[suite] = {'passed': 0, 'failed': 0, 'skipped': 0, 'duration' : '-', 'failed_testcases': []} | ||
| 81 | for tk in self.result_types: | ||
| 82 | if status in self.result_types[tk]: | ||
| 83 | self.ltptests[suite][tk] += 1 | ||
| 84 | |||
| 85 | def get_aggregated_test_result(self, logger, testresult): | ||
| 86 | test_count_report = {'passed': 0, 'failed': 0, 'skipped': 0, 'failed_testcases': []} | ||
| 54 | def get_aggregated_test_result(self, logger, testresult): | 87 | def get_aggregated_test_result(self, logger, testresult): |
| 55 | test_count_report = {'passed': 0, 'failed': 0, 'skipped': 0, 'failed_testcases': []} | 88 | test_count_report = {'passed': 0, 'failed': 0, 'skipped': 0, 'failed_testcases': []} |
| 56 | result = testresult.get('result', []) | 89 | result = testresult.get('result', []) |
| @@ -63,6 +96,8 @@ class ResultsTextReport(object): | |||
| 63 | test_count_report['failed_testcases'].append(k) | 96 | test_count_report['failed_testcases'].append(k) |
| 64 | if k.startswith("ptestresult."): | 97 | if k.startswith("ptestresult."): |
| 65 | self.handle_ptest_result(k, test_status, result) | 98 | self.handle_ptest_result(k, test_status, result) |
| 99 | if k.startswith("ltpresult."): | ||
| 100 | self.handle_ltptest_result(k, test_status, result) | ||
| 66 | return test_count_report | 101 | return test_count_report |
| 67 | 102 | ||
| 68 | def print_test_report(self, template_file_name, test_count_reports): | 103 | def print_test_report(self, template_file_name, test_count_reports): |
| @@ -73,9 +108,10 @@ class ResultsTextReport(object): | |||
| 73 | template = env.get_template(template_file_name) | 108 | template = env.get_template(template_file_name) |
| 74 | havefailed = False | 109 | havefailed = False |
| 75 | haveptest = bool(self.ptests) | 110 | haveptest = bool(self.ptests) |
| 111 | haveltp = bool(self.ltptests) | ||
| 76 | reportvalues = [] | 112 | reportvalues = [] |
| 77 | cols = ['passed', 'failed', 'skipped'] | 113 | cols = ['passed', 'failed', 'skipped'] |
| 78 | maxlen = {'passed' : 0, 'failed' : 0, 'skipped' : 0, 'result_id': 0, 'testseries' : 0, 'ptest' : 0 } | 114 | maxlen = {'passed' : 0, 'failed' : 0, 'skipped' : 0, 'result_id': 0, 'testseries' : 0, 'ptest' : 0 ,'ltptest': 0} |
| 79 | for line in test_count_reports: | 115 | for line in test_count_reports: |
| 80 | total_tested = line['passed'] + line['failed'] + line['skipped'] | 116 | total_tested = line['passed'] + line['failed'] + line['skipped'] |
| 81 | vals = {} | 117 | vals = {} |
| @@ -94,10 +130,15 @@ class ResultsTextReport(object): | |||
| 94 | for ptest in self.ptests: | 130 | for ptest in self.ptests: |
| 95 | if len(ptest) > maxlen['ptest']: | 131 | if len(ptest) > maxlen['ptest']: |
| 96 | maxlen['ptest'] = len(ptest) | 132 | maxlen['ptest'] = len(ptest) |
| 133 | for ltptest in self.ltptests: | ||
| 134 | if len(ltptest) > maxlen['ltptest']: | ||
| 135 | maxlen['ltptest'] = len(ltptest) | ||
| 97 | output = template.render(reportvalues=reportvalues, | 136 | output = template.render(reportvalues=reportvalues, |
| 98 | havefailed=havefailed, | 137 | havefailed=havefailed, |
| 99 | haveptest=haveptest, | 138 | haveptest=haveptest, |
| 100 | ptests=self.ptests, | 139 | ptests=self.ptests, |
| 140 | haveltp=haveltp, | ||
| 141 | ltptests=self.ltptests, | ||
| 101 | maxlen=maxlen) | 142 | maxlen=maxlen) |
| 102 | print(output) | 143 | print(output) |
| 103 | 144 | ||
diff --git a/scripts/lib/resulttool/template/test_report_full_text.txt b/scripts/lib/resulttool/template/test_report_full_text.txt index 590f35c7dd..6ecd5bce59 100644 --- a/scripts/lib/resulttool/template/test_report_full_text.txt +++ b/scripts/lib/resulttool/template/test_report_full_text.txt | |||
| @@ -25,6 +25,23 @@ PTest Result Summary | |||
| 25 | There was no ptest data | 25 | There was no ptest data |
| 26 | {% endif %} | 26 | {% endif %} |
| 27 | 27 | ||
| 28 | {% if haveltp %} | ||
| 29 | ============================================================================================================== | ||
| 30 | Ltp Test Result Summary | ||
| 31 | ============================================================================================================== | ||
| 32 | -------------------------------------------------------------------------------------------------------------- | ||
| 33 | {{ 'Recipe'.ljust(maxlen['ltptest']) }} | {{ 'Passed'.ljust(maxlen['passed']) }} | {{ 'Failed'.ljust(maxlen['failed']) }} | {{ 'Skipped'.ljust(maxlen['skipped']) }} | {{ 'Time(s)'.ljust(10) }} | ||
| 34 | -------------------------------------------------------------------------------------------------------------- | ||
| 35 | {% for ltptest in ltptests |sort %} | ||
| 36 | {{ ltptest.ljust(maxlen['ltptest']) }} | {{ (ltptests[ltptest]['passed']|string).ljust(maxlen['passed']) }} | {{ (ltptests[ltptest]['failed']|string).ljust(maxlen['failed']) }} | {{ (ltptests[ltptest]['skipped']|string).ljust(maxlen['skipped']) }} | {{ (ltptests[ltptest]['duration']|string) }} | ||
| 37 | {% endfor %} | ||
| 38 | -------------------------------------------------------------------------------------------------------------- | ||
| 39 | |||
| 40 | {% else %} | ||
| 41 | There was no LTP Test data | ||
| 42 | {% endif %} | ||
| 43 | |||
| 44 | |||
| 28 | ============================================================================================================== | 45 | ============================================================================================================== |
| 29 | Failed test cases (sorted by testseries, ID) | 46 | Failed test cases (sorted by testseries, ID) |
| 30 | ============================================================================================================== | 47 | ============================================================================================================== |
