summaryrefslogtreecommitdiffstats
path: root/meta/lib/oeqa/runtime/cases/ptest.py
diff options
context:
space:
mode:
Diffstat (limited to 'meta/lib/oeqa/runtime/cases/ptest.py')
-rw-r--r--meta/lib/oeqa/runtime/cases/ptest.py21
1 files changed, 19 insertions, 2 deletions
diff --git a/meta/lib/oeqa/runtime/cases/ptest.py b/meta/lib/oeqa/runtime/cases/ptest.py
index f60a433d59..77ae7b6b86 100644
--- a/meta/lib/oeqa/runtime/cases/ptest.py
+++ b/meta/lib/oeqa/runtime/cases/ptest.py
@@ -1,3 +1,6 @@
1import unittest
2import pprint
3
1from oeqa.runtime.case import OERuntimeTestCase 4from oeqa.runtime.case import OERuntimeTestCase
2from oeqa.core.decorator.depends import OETestDepends 5from oeqa.core.decorator.depends import OETestDepends
3from oeqa.core.decorator.oeid import OETestID 6from oeqa.core.decorator.oeid import OETestID
@@ -49,6 +52,7 @@ class PtestRunnerTest(OERuntimeTestCase):
49 @OETestID(1600) 52 @OETestID(1600)
50 @skipIfNotFeature('ptest', 'Test requires ptest to be in DISTRO_FEATURES') 53 @skipIfNotFeature('ptest', 'Test requires ptest to be in DISTRO_FEATURES')
51 @OETestDepends(['ssh.SSHTest.test_ssh']) 54 @OETestDepends(['ssh.SSHTest.test_ssh'])
55 @unittest.expectedFailure
52 def test_ptestrunner(self): 56 def test_ptestrunner(self):
53 status, output = self.target.run('which ptest-runner', 0) 57 status, output = self.target.run('which ptest-runner', 0)
54 if status != 0: 58 if status != 0:
@@ -76,6 +80,11 @@ class PtestRunnerTest(OERuntimeTestCase):
76 # status != 0 is OK since some ptest tests may fail 80 # status != 0 is OK since some ptest tests may fail
77 self.assertTrue(status != 127, msg="Cannot execute ptest-runner!") 81 self.assertTrue(status != 127, msg="Cannot execute ptest-runner!")
78 82
83 if not hasattr(self.tc, "extraresults"):
84 self.tc.extraresults = {}
85 extras = self.tc.extraresults
86 extras['ptestresult.rawlogs'] = {'log': output}
87
79 # Parse and save results 88 # Parse and save results
80 parse_result = self.parse_ptest(ptest_runner_log) 89 parse_result = self.parse_ptest(ptest_runner_log)
81 parse_result.log_as_files(ptest_log_dir, test_status = ['pass','fail', 'skip']) 90 parse_result.log_as_files(ptest_log_dir, test_status = ['pass','fail', 'skip'])
@@ -84,10 +93,18 @@ class PtestRunnerTest(OERuntimeTestCase):
84 os.remove(ptest_log_dir_link) 93 os.remove(ptest_log_dir_link)
85 os.symlink(os.path.basename(ptest_log_dir), ptest_log_dir_link) 94 os.symlink(os.path.basename(ptest_log_dir), ptest_log_dir_link)
86 95
96 trans = str.maketrans("()", "__")
97 resmap = {'pass': 'PASSED', 'skip': 'SKIPPED', 'fail': 'FAILED'}
98 for section in parse_result.result_dict:
99 for test, result in parse_result.result_dict[section]:
100 testname = "ptestresult." + section + "." + "_".join(test.translate(trans).split())
101 extras[testname] = {'status': resmap[result]}
102
87 failed_tests = {} 103 failed_tests = {}
88 for section in parse_result.result_dict: 104 for section in parse_result.result_dict:
89 failed_testcases = [ test for test, result in parse_result.result_dict[section] if result == 'fail' ] 105 failed_testcases = [ "_".join(test.translate(trans).split()) for test, result in parse_result.result_dict[section] if result == 'fail' ]
90 if failed_testcases: 106 if failed_testcases:
91 failed_tests[section] = failed_testcases 107 failed_tests[section] = failed_testcases
92 108
93 self.assertFalse(failed_tests, msg = "Failed ptests: %s" %(str(failed_tests))) 109 if failed_tests:
110 self.fail("Failed ptests:\n%s" % pprint.pformat(failed_tests))