diff options
author | Mazliana <mazliana.mohamad@intel.com> | 2019-03-11 16:30:33 +0800 |
---|---|---|
committer | Richard Purdie <richard.purdie@linuxfoundation.org> | 2019-03-18 22:59:58 +0000 |
commit | 2c4996548535a92da22bf4c6a9ce80139e3417f9 (patch) | |
tree | 955dee5fa8cc91075a7657936aca9d0576bf392d /scripts/lib | |
parent | aeb87d01397f6769095a784415f6f8b58d2c8eb9 (diff) | |
download | poky-2c4996548535a92da22bf4c6a9ce80139e3417f9.tar.gz |
resulttool/manualexecution: To output right test case id
We found that manualexecution does not capture test suite values
correctly if there are more than one test suite in test cases.
After verification has made we found out we should retrieved
full test cases value <test_module.test_suite.test_case> from
oeqa/manual/ json file rather than split it them into new
variables test_suite and test_cases.
(From OE-Core rev: db08ec5f48badd5be2fa24cc0d89a4386ab44a59)
Signed-off-by: Mazliana <mazliana.mohamad@intel.com>
Signed-off-by: Richard Purdie <richard.purdie@linuxfoundation.org>
Diffstat (limited to 'scripts/lib')
-rwxr-xr-x | scripts/lib/resulttool/manualexecution.py | 15 |
1 files changed, 6 insertions, 9 deletions
diff --git a/scripts/lib/resulttool/manualexecution.py b/scripts/lib/resulttool/manualexecution.py index a44cc86190..6487cd9bff 100755 --- a/scripts/lib/resulttool/manualexecution.py +++ b/scripts/lib/resulttool/manualexecution.py | |||
@@ -29,8 +29,7 @@ class ManualTestRunner(object): | |||
29 | def __init__(self): | 29 | def __init__(self): |
30 | self.jdata = '' | 30 | self.jdata = '' |
31 | self.test_module = '' | 31 | self.test_module = '' |
32 | self.test_suite = '' | 32 | self.test_cases_id = '' |
33 | self.test_cases = '' | ||
34 | self.configuration = '' | 33 | self.configuration = '' |
35 | self.starttime = '' | 34 | self.starttime = '' |
36 | self.result_id = '' | 35 | self.result_id = '' |
@@ -38,11 +37,10 @@ class ManualTestRunner(object): | |||
38 | 37 | ||
39 | def _get_testcases(self, file): | 38 | def _get_testcases(self, file): |
40 | self.jdata = load_json_file(file) | 39 | self.jdata = load_json_file(file) |
41 | self.test_cases = [] | 40 | self.test_cases_id = [] |
42 | self.test_module = self.jdata[0]['test']['@alias'].split('.', 2)[0] | 41 | self.test_module = self.jdata[0]['test']['@alias'].split('.', 2)[0] |
43 | self.test_suite = self.jdata[0]['test']['@alias'].split('.', 2)[1] | ||
44 | for i in self.jdata: | 42 | for i in self.jdata: |
45 | self.test_cases.append(i['test']['@alias'].split('.', 2)[2]) | 43 | self.test_cases_id.append(i['test']['@alias']) |
46 | 44 | ||
47 | def _get_input(self, config): | 45 | def _get_input(self, config): |
48 | while True: | 46 | while True: |
@@ -81,10 +79,9 @@ class ManualTestRunner(object): | |||
81 | 79 | ||
82 | def _execute_test_steps(self, test_id): | 80 | def _execute_test_steps(self, test_id): |
83 | test_result = {} | 81 | test_result = {} |
84 | testcase_id = self.test_module + '.' + self.test_suite + '.' + self.test_cases[test_id] | ||
85 | total_steps = len(self.jdata[test_id]['test']['execution'].keys()) | 82 | total_steps = len(self.jdata[test_id]['test']['execution'].keys()) |
86 | print('------------------------------------------------------------------------') | 83 | print('------------------------------------------------------------------------') |
87 | print('Executing test case:' + '' '' + self.test_cases[test_id]) | 84 | print('Executing test case:' + '' '' + self.test_cases_id[test_id]) |
88 | print('------------------------------------------------------------------------') | 85 | print('------------------------------------------------------------------------') |
89 | print('You have total ' + str(total_steps) + ' test steps to be executed.') | 86 | print('You have total ' + str(total_steps) + ' test steps to be executed.') |
90 | print('------------------------------------------------------------------------\n') | 87 | print('------------------------------------------------------------------------\n') |
@@ -105,9 +102,9 @@ class ManualTestRunner(object): | |||
105 | res = result_types[r] | 102 | res = result_types[r] |
106 | if res == 'FAILED': | 103 | if res == 'FAILED': |
107 | log_input = input('\nPlease enter the error and the description of the log: (Ex:log:211 Error Bitbake)\n') | 104 | log_input = input('\nPlease enter the error and the description of the log: (Ex:log:211 Error Bitbake)\n') |
108 | test_result.update({testcase_id: {'status': '%s' % res, 'log': '%s' % log_input}}) | 105 | test_result.update({self.test_cases_id[test_id]: {'status': '%s' % res, 'log': '%s' % log_input}}) |
109 | else: | 106 | else: |
110 | test_result.update({testcase_id: {'status': '%s' % res}}) | 107 | test_result.update({self.test_cases_id[test_id]: {'status': '%s' % res}}) |
111 | break | 108 | break |
112 | print('Invalid input!') | 109 | print('Invalid input!') |
113 | return test_result | 110 | return test_result |