diff options
| -rw-r--r-- | meta/lib/oeqa/core/runner.py | 8 | ||||
| -rw-r--r-- | meta/lib/oeqa/runtime/cases/ptest.py | 21 |
2 files changed, 27 insertions, 2 deletions
diff --git a/meta/lib/oeqa/core/runner.py b/meta/lib/oeqa/core/runner.py index 67756c3867..034f223240 100644 --- a/meta/lib/oeqa/core/runner.py +++ b/meta/lib/oeqa/core/runner.py | |||
| @@ -122,7 +122,11 @@ class OETestResult(_TestResult): | |||
| 122 | 122 | ||
| 123 | def logDetails(self, json_file_dir=None, configuration=None, result_id=None): | 123 | def logDetails(self, json_file_dir=None, configuration=None, result_id=None): |
| 124 | self.tc.logger.info("RESULTS:") | 124 | self.tc.logger.info("RESULTS:") |
| 125 | |||
| 125 | result = {} | 126 | result = {} |
| 127 | if hasattr(self.tc, "extraresults"): | ||
| 128 | result = self.tc.extraresults | ||
| 129 | |||
| 126 | for case_name in self.tc._registry['cases']: | 130 | for case_name in self.tc._registry['cases']: |
| 127 | case = self.tc._registry['cases'][case_name] | 131 | case = self.tc._registry['cases'][case_name] |
| 128 | 132 | ||
| @@ -148,6 +152,10 @@ class OETestResult(_TestResult): | |||
| 148 | tresultjsonhelper = OETestResultJSONHelper() | 152 | tresultjsonhelper = OETestResultJSONHelper() |
| 149 | tresultjsonhelper.dump_testresult_file(json_file_dir, configuration, result_id, result) | 153 | tresultjsonhelper.dump_testresult_file(json_file_dir, configuration, result_id, result) |
| 150 | 154 | ||
| 155 | def wasSuccessful(self): | ||
| 156 | # Override as we unexpected successes aren't failures for us | ||
| 157 | return (len(self.failures) == len(self.errors) == 0) | ||
| 158 | |||
| 151 | class OEListTestsResult(object): | 159 | class OEListTestsResult(object): |
| 152 | def wasSuccessful(self): | 160 | def wasSuccessful(self): |
| 153 | return True | 161 | return True |
diff --git a/meta/lib/oeqa/runtime/cases/ptest.py b/meta/lib/oeqa/runtime/cases/ptest.py index f60a433d59..77ae7b6b86 100644 --- a/meta/lib/oeqa/runtime/cases/ptest.py +++ b/meta/lib/oeqa/runtime/cases/ptest.py | |||
| @@ -1,3 +1,6 @@ | |||
| 1 | import unittest | ||
| 2 | import pprint | ||
| 3 | |||
| 1 | from oeqa.runtime.case import OERuntimeTestCase | 4 | from oeqa.runtime.case import OERuntimeTestCase |
| 2 | from oeqa.core.decorator.depends import OETestDepends | 5 | from oeqa.core.decorator.depends import OETestDepends |
| 3 | from oeqa.core.decorator.oeid import OETestID | 6 | from oeqa.core.decorator.oeid import OETestID |
| @@ -49,6 +52,7 @@ class PtestRunnerTest(OERuntimeTestCase): | |||
| 49 | @OETestID(1600) | 52 | @OETestID(1600) |
| 50 | @skipIfNotFeature('ptest', 'Test requires ptest to be in DISTRO_FEATURES') | 53 | @skipIfNotFeature('ptest', 'Test requires ptest to be in DISTRO_FEATURES') |
| 51 | @OETestDepends(['ssh.SSHTest.test_ssh']) | 54 | @OETestDepends(['ssh.SSHTest.test_ssh']) |
| 55 | @unittest.expectedFailure | ||
| 52 | def test_ptestrunner(self): | 56 | def test_ptestrunner(self): |
| 53 | status, output = self.target.run('which ptest-runner', 0) | 57 | status, output = self.target.run('which ptest-runner', 0) |
| 54 | if status != 0: | 58 | if status != 0: |
| @@ -76,6 +80,11 @@ class PtestRunnerTest(OERuntimeTestCase): | |||
| 76 | # status != 0 is OK since some ptest tests may fail | 80 | # status != 0 is OK since some ptest tests may fail |
| 77 | self.assertTrue(status != 127, msg="Cannot execute ptest-runner!") | 81 | self.assertTrue(status != 127, msg="Cannot execute ptest-runner!") |
| 78 | 82 | ||
| 83 | if not hasattr(self.tc, "extraresults"): | ||
| 84 | self.tc.extraresults = {} | ||
| 85 | extras = self.tc.extraresults | ||
| 86 | extras['ptestresult.rawlogs'] = {'log': output} | ||
| 87 | |||
| 79 | # Parse and save results | 88 | # Parse and save results |
| 80 | parse_result = self.parse_ptest(ptest_runner_log) | 89 | parse_result = self.parse_ptest(ptest_runner_log) |
| 81 | parse_result.log_as_files(ptest_log_dir, test_status = ['pass','fail', 'skip']) | 90 | parse_result.log_as_files(ptest_log_dir, test_status = ['pass','fail', 'skip']) |
| @@ -84,10 +93,18 @@ class PtestRunnerTest(OERuntimeTestCase): | |||
| 84 | os.remove(ptest_log_dir_link) | 93 | os.remove(ptest_log_dir_link) |
| 85 | os.symlink(os.path.basename(ptest_log_dir), ptest_log_dir_link) | 94 | os.symlink(os.path.basename(ptest_log_dir), ptest_log_dir_link) |
| 86 | 95 | ||
| 96 | trans = str.maketrans("()", "__") | ||
| 97 | resmap = {'pass': 'PASSED', 'skip': 'SKIPPED', 'fail': 'FAILED'} | ||
| 98 | for section in parse_result.result_dict: | ||
| 99 | for test, result in parse_result.result_dict[section]: | ||
| 100 | testname = "ptestresult." + section + "." + "_".join(test.translate(trans).split()) | ||
| 101 | extras[testname] = {'status': resmap[result]} | ||
| 102 | |||
| 87 | failed_tests = {} | 103 | failed_tests = {} |
| 88 | for section in parse_result.result_dict: | 104 | for section in parse_result.result_dict: |
| 89 | failed_testcases = [ test for test, result in parse_result.result_dict[section] if result == 'fail' ] | 105 | failed_testcases = [ "_".join(test.translate(trans).split()) for test, result in parse_result.result_dict[section] if result == 'fail' ] |
| 90 | if failed_testcases: | 106 | if failed_testcases: |
| 91 | failed_tests[section] = failed_testcases | 107 | failed_tests[section] = failed_testcases |
| 92 | 108 | ||
| 93 | self.assertFalse(failed_tests, msg = "Failed ptests: %s" %(str(failed_tests))) | 109 | if failed_tests: |
| 110 | self.fail("Failed ptests:\n%s" % pprint.pformat(failed_tests)) | ||
