diff options
author | Yeoh Ee Peng <ee.peng.yeoh@intel.com> | 2019-04-11 13:20:49 +0800 |
---|---|---|
committer | Richard Purdie <richard.purdie@linuxfoundation.org> | 2019-04-12 09:29:06 +0100 |
commit | 1576ce540baff83c0d9eb40f3cdb0166b254e1a8 (patch) | |
tree | 0487cd6446f86128306075c5eabcaab70cd5fd19 /scripts/lib/resulttool | |
parent | 97f37fbda1dd75b101318c8e245d70fdd37d2b68 (diff) | |
download | poky-1576ce540baff83c0d9eb40f3cdb0166b254e1a8.tar.gz |
resulttool/manualexecution: Refactor and remove duplicate code
Remove duplicate codes. Replace unnecessary class variables with
local variables. Rename variables and arguments with simple and
standard name.
(From OE-Core rev: 6bef61d36f3328fad003c0dc9c010d3f76ba96d8)
Signed-off-by: Yeoh Ee Peng <ee.peng.yeoh@intel.com>
Signed-off-by: Richard Purdie <richard.purdie@linuxfoundation.org>
Diffstat (limited to 'scripts/lib/resulttool')
-rwxr-xr-x | scripts/lib/resulttool/manualexecution.py | 87 |
1 files changed, 40 insertions, 47 deletions
diff --git a/scripts/lib/resulttool/manualexecution.py b/scripts/lib/resulttool/manualexecution.py index 12ef90d6af..ea44219d4e 100755 --- a/scripts/lib/resulttool/manualexecution.py +++ b/scripts/lib/resulttool/manualexecution.py | |||
@@ -20,9 +20,9 @@ import re | |||
20 | from oeqa.core.runner import OETestResultJSONHelper | 20 | from oeqa.core.runner import OETestResultJSONHelper |
21 | 21 | ||
22 | 22 | ||
23 | def load_json_file(file): | 23 | def load_json_file(f): |
24 | with open(file, "r") as f: | 24 | with open(f, "r") as filedata: |
25 | return json.load(f) | 25 | return json.load(filedata) |
26 | 26 | ||
27 | def write_json_file(f, json_data): | 27 | def write_json_file(f, json_data): |
28 | os.makedirs(os.path.dirname(f), exist_ok=True) | 28 | os.makedirs(os.path.dirname(f), exist_ok=True) |
@@ -31,9 +31,8 @@ def write_json_file(f, json_data): | |||
31 | 31 | ||
32 | class ManualTestRunner(object): | 32 | class ManualTestRunner(object): |
33 | 33 | ||
34 | def _get_testcases(self, file): | 34 | def _get_test_module(self, case_file): |
35 | self.jdata = load_json_file(file) | 35 | return os.path.basename(case_file).split('.')[0] |
36 | self.test_module = self.jdata[0]['test']['@alias'].split('.', 2)[0] | ||
37 | 36 | ||
38 | def _get_input(self, config): | 37 | def _get_input(self, config): |
39 | while True: | 38 | while True: |
@@ -57,23 +56,21 @@ class ManualTestRunner(object): | |||
57 | print('Only integer index inputs from above available configuration options are allowed. Please try again.') | 56 | print('Only integer index inputs from above available configuration options are allowed. Please try again.') |
58 | return options[output] | 57 | return options[output] |
59 | 58 | ||
60 | def _create_config(self, config_options): | 59 | def _get_config(self, config_options, test_module): |
61 | from oeqa.utils.metadata import get_layers | 60 | from oeqa.utils.metadata import get_layers |
62 | from oeqa.utils.commands import get_bb_var | 61 | from oeqa.utils.commands import get_bb_var |
63 | from resulttool.resultutils import store_map | 62 | from resulttool.resultutils import store_map |
64 | 63 | ||
65 | layers = get_layers(get_bb_var('BBLAYERS')) | 64 | layers = get_layers(get_bb_var('BBLAYERS')) |
66 | self.configuration = {} | 65 | configurations = {} |
67 | self.configuration['LAYERS'] = layers | 66 | configurations['LAYERS'] = layers |
68 | current_datetime = datetime.datetime.now() | 67 | configurations['STARTTIME'] = datetime.datetime.now().strftime('%Y%m%d%H%M%S') |
69 | self.starttime = current_datetime.strftime('%Y%m%d%H%M%S') | 68 | configurations['TEST_TYPE'] = 'manual' |
70 | self.configuration['STARTTIME'] = self.starttime | 69 | configurations['TEST_MODULE'] = test_module |
71 | self.configuration['TEST_TYPE'] = 'manual' | 70 | |
72 | self.configuration['TEST_MODULE'] = self.test_module | 71 | extra_config = set(store_map['manual']) - set(configurations) |
73 | |||
74 | extra_config = set(store_map['manual']) - set(self.configuration) | ||
75 | for config in sorted(extra_config): | 72 | for config in sorted(extra_config): |
76 | avail_config_options = self._get_available_config_options(config_options, self.test_module, config) | 73 | avail_config_options = self._get_available_config_options(config_options, test_module, config) |
77 | if avail_config_options: | 74 | if avail_config_options: |
78 | print('---------------------------------------------') | 75 | print('---------------------------------------------') |
79 | print('These are available configuration #%s options:' % config) | 76 | print('These are available configuration #%s options:' % config) |
@@ -89,21 +86,19 @@ class ManualTestRunner(object): | |||
89 | print('---------------------------------------------') | 86 | print('---------------------------------------------') |
90 | value_conf = self._get_input('Configuration Value') | 87 | value_conf = self._get_input('Configuration Value') |
91 | print('---------------------------------------------\n') | 88 | print('---------------------------------------------\n') |
92 | self.configuration[config] = value_conf | 89 | configurations[config] = value_conf |
93 | 90 | return configurations | |
94 | def _create_result_id(self): | ||
95 | self.result_id = 'manual_%s_%s' % (self.test_module, self.starttime) | ||
96 | 91 | ||
97 | def _execute_test_steps(self, test): | 92 | def _execute_test_steps(self, case): |
98 | test_result = {} | 93 | test_result = {} |
99 | print('------------------------------------------------------------------------') | 94 | print('------------------------------------------------------------------------') |
100 | print('Executing test case: %s' % test['test']['@alias']) | 95 | print('Executing test case: %s' % case['test']['@alias']) |
101 | print('------------------------------------------------------------------------') | 96 | print('------------------------------------------------------------------------') |
102 | print('You have total %s test steps to be executed.' % len(test['test']['execution'])) | 97 | print('You have total %s test steps to be executed.' % len(case['test']['execution'])) |
103 | print('------------------------------------------------------------------------\n') | 98 | print('------------------------------------------------------------------------\n') |
104 | for step, _ in sorted(test['test']['execution'].items(), key=lambda x: int(x[0])): | 99 | for step, _ in sorted(case['test']['execution'].items(), key=lambda x: int(x[0])): |
105 | print('Step %s: %s' % (step, test['test']['execution'][step]['action'])) | 100 | print('Step %s: %s' % (step, case['test']['execution'][step]['action'])) |
106 | expected_output = test['test']['execution'][step]['expected_results'] | 101 | expected_output = case['test']['execution'][step]['expected_results'] |
107 | if expected_output: | 102 | if expected_output: |
108 | print('Expected output: %s' % expected_output) | 103 | print('Expected output: %s' % expected_output) |
109 | while True: | 104 | while True: |
@@ -118,31 +113,30 @@ class ManualTestRunner(object): | |||
118 | res = result_types[r] | 113 | res = result_types[r] |
119 | if res == 'FAILED': | 114 | if res == 'FAILED': |
120 | log_input = input('\nPlease enter the error and the description of the log: (Ex:log:211 Error Bitbake)\n') | 115 | log_input = input('\nPlease enter the error and the description of the log: (Ex:log:211 Error Bitbake)\n') |
121 | test_result.update({test['test']['@alias']: {'status': '%s' % res, 'log': '%s' % log_input}}) | 116 | test_result.update({case['test']['@alias']: {'status': '%s' % res, 'log': '%s' % log_input}}) |
122 | else: | 117 | else: |
123 | test_result.update({test['test']['@alias']: {'status': '%s' % res}}) | 118 | test_result.update({case['test']['@alias']: {'status': '%s' % res}}) |
124 | break | 119 | break |
125 | print('Invalid input!') | 120 | print('Invalid input!') |
126 | return test_result | 121 | return test_result |
127 | 122 | ||
128 | def _create_write_dir(self): | 123 | def _get_write_dir(self): |
129 | basepath = os.environ['BUILDDIR'] | 124 | return os.environ['BUILDDIR'] + '/tmp/log/manual/' |
130 | self.write_dir = basepath + '/tmp/log/manual/' | ||
131 | 125 | ||
132 | def run_test(self, file, config_options_file): | 126 | def run_test(self, case_file, config_options_file): |
133 | self._get_testcases(file) | 127 | test_module = self._get_test_module(case_file) |
128 | cases = load_json_file(case_file) | ||
134 | config_options = {} | 129 | config_options = {} |
135 | if config_options_file: | 130 | if config_options_file: |
136 | config_options = load_json_file(config_options_file) | 131 | config_options = load_json_file(config_options_file) |
137 | self._create_config(config_options) | 132 | configurations = self._get_config(config_options, test_module) |
138 | self._create_result_id() | 133 | result_id = 'manual_%s_%s' % (test_module, configurations['STARTTIME']) |
139 | self._create_write_dir() | ||
140 | test_results = {} | 134 | test_results = {} |
141 | print('\nTotal number of test cases in this test suite: %s\n' % len(self.jdata)) | 135 | print('\nTotal number of test cases in this test suite: %s\n' % len(cases)) |
142 | for t in self.jdata: | 136 | for c in cases: |
143 | test_result = self._execute_test_steps(t) | 137 | test_result = self._execute_test_steps(c) |
144 | test_results.update(test_result) | 138 | test_results.update(test_result) |
145 | return self.configuration, self.result_id, self.write_dir, test_results | 139 | return configurations, result_id, self._get_write_dir(), test_results |
146 | 140 | ||
147 | def _get_true_false_input(self, input_message): | 141 | def _get_true_false_input(self, input_message): |
148 | yes_list = ['Y', 'YES'] | 142 | yes_list = ['Y', 'YES'] |
@@ -156,11 +150,11 @@ class ManualTestRunner(object): | |||
156 | return False | 150 | return False |
157 | return True | 151 | return True |
158 | 152 | ||
159 | def make_config_option_file(self, logger, manual_case_file, config_options_file): | 153 | def make_config_option_file(self, logger, case_file, config_options_file): |
160 | config_options = {} | 154 | config_options = {} |
161 | if config_options_file: | 155 | if config_options_file: |
162 | config_options = load_json_file(config_options_file) | 156 | config_options = load_json_file(config_options_file) |
163 | new_test_module = os.path.basename(manual_case_file).split('.')[0] | 157 | new_test_module = self._get_test_module(case_file) |
164 | print('Creating configuration options file for test module: %s' % new_test_module) | 158 | print('Creating configuration options file for test module: %s' % new_test_module) |
165 | new_config_options = {} | 159 | new_config_options = {} |
166 | 160 | ||
@@ -181,8 +175,7 @@ class ManualTestRunner(object): | |||
181 | if new_config_options: | 175 | if new_config_options: |
182 | config_options[new_test_module] = new_config_options | 176 | config_options[new_test_module] = new_config_options |
183 | if not config_options_file: | 177 | if not config_options_file: |
184 | self._create_write_dir() | 178 | config_options_file = os.path.join(self._get_write_dir(), 'manual_config_options.json') |
185 | config_options_file = os.path.join(self.write_dir, 'manual_config_options.json') | ||
186 | write_json_file(config_options_file, config_options) | 179 | write_json_file(config_options_file, config_options) |
187 | logger.info('Configuration option file created at %s' % config_options_file) | 180 | logger.info('Configuration option file created at %s' % config_options_file) |
188 | 181 | ||
@@ -191,9 +184,9 @@ def manualexecution(args, logger): | |||
191 | if args.make_config_options_file: | 184 | if args.make_config_options_file: |
192 | testrunner.make_config_option_file(logger, args.file, args.config_options_file) | 185 | testrunner.make_config_option_file(logger, args.file, args.config_options_file) |
193 | return 0 | 186 | return 0 |
194 | get_configuration, get_result_id, get_write_dir, get_test_results = testrunner.run_test(args.file, args.config_options_file) | 187 | configurations, result_id, write_dir, test_results = testrunner.run_test(args.file, args.config_options_file) |
195 | resultjsonhelper = OETestResultJSONHelper() | 188 | resultjsonhelper = OETestResultJSONHelper() |
196 | resultjsonhelper.dump_testresult_file(get_write_dir, get_configuration, get_result_id, get_test_results) | 189 | resultjsonhelper.dump_testresult_file(write_dir, configurations, result_id, test_results) |
197 | return 0 | 190 | return 0 |
198 | 191 | ||
199 | def register_commands(subparsers): | 192 | def register_commands(subparsers): |