summaryrefslogtreecommitdiffstats
path: root/scripts/lib/resulttool/manualexecution.py
diff options
context:
space:
mode:
authorYeoh Ee Peng <ee.peng.yeoh@intel.com>2019-04-04 16:48:38 +0800
committerRichard Purdie <richard.purdie@linuxfoundation.org>2019-04-04 22:57:32 +0100
commit996d1f4518f6d96406fb1d1849e368a3832610c1 (patch)
tree22828987f4aae83b690d2519c6c6969acea9e3fd /scripts/lib/resulttool/manualexecution.py
parentf1e470d8fa09917897b579687433605fc8177be9 (diff)
downloadpoky-996d1f4518f6d96406fb1d1849e368a3832610c1.tar.gz
resulttool/manualexecution: Refactor and simplify codebase
Simplify and removed unnecessary codes. Refactor to allow pythonic loop. (From OE-Core rev: 84c6a992e1114685194f6e8a554bce7753c090cc) Signed-off-by: Yeoh Ee Peng <ee.peng.yeoh@intel.com> Signed-off-by: Richard Purdie <richard.purdie@linuxfoundation.org>
Diffstat (limited to 'scripts/lib/resulttool/manualexecution.py')
-rwxr-xr-xscripts/lib/resulttool/manualexecution.py56
1 files changed, 20 insertions, 36 deletions
diff --git a/scripts/lib/resulttool/manualexecution.py b/scripts/lib/resulttool/manualexecution.py
index 9a29b0b273..c94f98126b 100755
--- a/scripts/lib/resulttool/manualexecution.py
+++ b/scripts/lib/resulttool/manualexecution.py
@@ -24,24 +24,12 @@ def load_json_file(file):
24 with open(file, "r") as f: 24 with open(file, "r") as f:
25 return json.load(f) 25 return json.load(f)
26 26
27
28class ManualTestRunner(object): 27class ManualTestRunner(object):
29 def __init__(self):
30 self.jdata = ''
31 self.test_module = ''
32 self.test_cases_id = ''
33 self.configuration = ''
34 self.starttime = ''
35 self.result_id = ''
36 self.write_dir = ''
37 28
38 def _get_testcases(self, file): 29 def _get_testcases(self, file):
39 self.jdata = load_json_file(file) 30 self.jdata = load_json_file(file)
40 self.test_cases_id = []
41 self.test_module = self.jdata[0]['test']['@alias'].split('.', 2)[0] 31 self.test_module = self.jdata[0]['test']['@alias'].split('.', 2)[0]
42 for i in self.jdata: 32
43 self.test_cases_id.append(i['test']['@alias'])
44
45 def _get_input(self, config): 33 def _get_input(self, config):
46 while True: 34 while True:
47 output = input('{} = '.format(config)) 35 output = input('{} = '.format(config))
@@ -67,45 +55,42 @@ class ManualTestRunner(object):
67 extra_config = set(store_map['manual']) - set(self.configuration) 55 extra_config = set(store_map['manual']) - set(self.configuration)
68 for config in sorted(extra_config): 56 for config in sorted(extra_config):
69 print('---------------------------------------------') 57 print('---------------------------------------------')
70 print('This is configuration #%s. Please provide configuration value(use "None" if not applicable).' 58 print('This is configuration #%s. Please provide configuration value(use "None" if not applicable).' % config)
71 % config)
72 print('---------------------------------------------') 59 print('---------------------------------------------')
73 value_conf = self._get_input('Configuration Value') 60 value_conf = self._get_input('Configuration Value')
74 print('---------------------------------------------\n') 61 print('---------------------------------------------\n')
75 self.configuration[config] = value_conf 62 self.configuration[config] = value_conf
76 63
77 def _create_result_id(self): 64 def _create_result_id(self):
78 self.result_id = 'manual_' + self.test_module + '_' + self.starttime 65 self.result_id = 'manual_%s_%s' % (self.test_module, self.starttime)
79 66
80 def _execute_test_steps(self, test_id): 67 def _execute_test_steps(self, test):
81 test_result = {} 68 test_result = {}
82 total_steps = len(self.jdata[test_id]['test']['execution'].keys())
83 print('------------------------------------------------------------------------') 69 print('------------------------------------------------------------------------')
84 print('Executing test case:' + '' '' + self.test_cases_id[test_id]) 70 print('Executing test case: %s' % test['test']['@alias'])
85 print('------------------------------------------------------------------------') 71 print('------------------------------------------------------------------------')
86 print('You have total ' + str(total_steps) + ' test steps to be executed.') 72 print('You have total %s test steps to be executed.' % len(test['test']['execution']))
87 print('------------------------------------------------------------------------\n') 73 print('------------------------------------------------------------------------\n')
88 for step, _ in sorted(self.jdata[test_id]['test']['execution'].items(), key=lambda x: int(x[0])): 74 for step, _ in sorted(test['test']['execution'].items(), key=lambda x: int(x[0])):
89 print('Step %s: ' % step + self.jdata[test_id]['test']['execution']['%s' % step]['action']) 75 print('Step %s: %s' % (step, test['test']['execution'][step]['action']))
90 expected_output = self.jdata[test_id]['test']['execution']['%s' % step]['expected_results'] 76 expected_output = test['test']['execution'][step]['expected_results']
91 if expected_output: 77 if expected_output:
92 print('Expected output: ' + expected_output) 78 print('Expected output: %s' % expected_output)
93 while True: 79 while True:
94 done = input('\nPlease provide test results: (P)assed/(F)ailed/(B)locked/(S)kipped? \n') 80 done = input('\nPlease provide test results: (P)assed/(F)ailed/(B)locked/(S)kipped? \n').lower()
95 done = done.lower()
96 result_types = {'p':'PASSED', 81 result_types = {'p':'PASSED',
97 'f':'FAILED', 82 'f':'FAILED',
98 'b':'BLOCKED', 83 'b':'BLOCKED',
99 's':'SKIPPED'} 84 's':'SKIPPED'}
100 if done in result_types: 85 if done in result_types:
101 for r in result_types: 86 for r in result_types:
102 if done == r: 87 if done == r:
103 res = result_types[r] 88 res = result_types[r]
104 if res == 'FAILED': 89 if res == 'FAILED':
105 log_input = input('\nPlease enter the error and the description of the log: (Ex:log:211 Error Bitbake)\n') 90 log_input = input('\nPlease enter the error and the description of the log: (Ex:log:211 Error Bitbake)\n')
106 test_result.update({self.test_cases_id[test_id]: {'status': '%s' % res, 'log': '%s' % log_input}}) 91 test_result.update({test['test']['@alias']: {'status': '%s' % res, 'log': '%s' % log_input}})
107 else: 92 else:
108 test_result.update({self.test_cases_id[test_id]: {'status': '%s' % res}}) 93 test_result.update({test['test']['@alias']: {'status': '%s' % res}})
109 break 94 break
110 print('Invalid input!') 95 print('Invalid input!')
111 return test_result 96 return test_result
@@ -120,9 +105,9 @@ class ManualTestRunner(object):
120 self._create_result_id() 105 self._create_result_id()
121 self._create_write_dir() 106 self._create_write_dir()
122 test_results = {} 107 test_results = {}
123 print('\nTotal number of test cases in this test suite: ' + '%s\n' % len(self.jdata)) 108 print('\nTotal number of test cases in this test suite: %s\n' % len(self.jdata))
124 for i in range(0, len(self.jdata)): 109 for t in self.jdata:
125 test_result = self._execute_test_steps(i) 110 test_result = self._execute_test_steps(t)
126 test_results.update(test_result) 111 test_results.update(test_result)
127 return self.configuration, self.result_id, self.write_dir, test_results 112 return self.configuration, self.result_id, self.write_dir, test_results
128 113
@@ -130,8 +115,7 @@ def manualexecution(args, logger):
130 testrunner = ManualTestRunner() 115 testrunner = ManualTestRunner()
131 get_configuration, get_result_id, get_write_dir, get_test_results = testrunner.run_test(args.file) 116 get_configuration, get_result_id, get_write_dir, get_test_results = testrunner.run_test(args.file)
132 resultjsonhelper = OETestResultJSONHelper() 117 resultjsonhelper = OETestResultJSONHelper()
133 resultjsonhelper.dump_testresult_file(get_write_dir, get_configuration, get_result_id, 118 resultjsonhelper.dump_testresult_file(get_write_dir, get_configuration, get_result_id, get_test_results)
134 get_test_results)
135 return 0 119 return 0
136 120
137def register_commands(subparsers): 121def register_commands(subparsers):