diff options
Diffstat (limited to 'scripts/lib/resulttool/manualexecution.py')
| -rwxr-xr-x | scripts/lib/resulttool/manualexecution.py | 235 |
1 files changed, 0 insertions, 235 deletions
diff --git a/scripts/lib/resulttool/manualexecution.py b/scripts/lib/resulttool/manualexecution.py deleted file mode 100755 index ae0861ac6b..0000000000 --- a/scripts/lib/resulttool/manualexecution.py +++ /dev/null | |||
| @@ -1,235 +0,0 @@ | |||
| 1 | # test case management tool - manual execution from testopia test cases | ||
| 2 | # | ||
| 3 | # Copyright (c) 2018, Intel Corporation. | ||
| 4 | # | ||
| 5 | # SPDX-License-Identifier: GPL-2.0-only | ||
| 6 | # | ||
| 7 | |||
| 8 | import argparse | ||
| 9 | import json | ||
| 10 | import os | ||
| 11 | import sys | ||
| 12 | import datetime | ||
| 13 | import re | ||
| 14 | import copy | ||
| 15 | from oeqa.core.runner import OETestResultJSONHelper | ||
| 16 | |||
| 17 | |||
| 18 | def load_json_file(f): | ||
| 19 | with open(f, "r") as filedata: | ||
| 20 | return json.load(filedata) | ||
| 21 | |||
| 22 | def write_json_file(f, json_data): | ||
| 23 | os.makedirs(os.path.dirname(f), exist_ok=True) | ||
| 24 | with open(f, 'w') as filedata: | ||
| 25 | filedata.write(json.dumps(json_data, sort_keys=True, indent=1)) | ||
| 26 | |||
| 27 | class ManualTestRunner(object): | ||
| 28 | |||
| 29 | def _get_test_module(self, case_file): | ||
| 30 | return os.path.basename(case_file).split('.')[0] | ||
| 31 | |||
| 32 | def _get_input(self, config): | ||
| 33 | while True: | ||
| 34 | output = input('{} = '.format(config)) | ||
| 35 | if re.match('^[a-z0-9-.]+$', output): | ||
| 36 | break | ||
| 37 | print('Only lowercase alphanumeric, hyphen and dot are allowed. Please try again') | ||
| 38 | return output | ||
| 39 | |||
| 40 | def _get_available_config_options(self, config_options, test_module, target_config): | ||
| 41 | avail_config_options = None | ||
| 42 | if test_module in config_options: | ||
| 43 | avail_config_options = config_options[test_module].get(target_config) | ||
| 44 | return avail_config_options | ||
| 45 | |||
| 46 | def _choose_config_option(self, options): | ||
| 47 | while True: | ||
| 48 | output = input('{} = '.format('Option index number')) | ||
| 49 | if output in options: | ||
| 50 | break | ||
| 51 | print('Only integer index inputs from above available configuration options are allowed. Please try again.') | ||
| 52 | return options[output] | ||
| 53 | |||
| 54 | def _get_config(self, config_options, test_module): | ||
| 55 | from oeqa.utils.metadata import get_layers | ||
| 56 | from oeqa.utils.commands import get_bb_var | ||
| 57 | from resulttool.resultutils import store_map | ||
| 58 | |||
| 59 | layers = get_layers(get_bb_var('BBLAYERS')) | ||
| 60 | configurations = {} | ||
| 61 | configurations['LAYERS'] = layers | ||
| 62 | configurations['STARTTIME'] = datetime.datetime.now().strftime('%Y%m%d%H%M%S') | ||
| 63 | configurations['TEST_TYPE'] = 'manual' | ||
| 64 | configurations['TEST_MODULE'] = test_module | ||
| 65 | |||
| 66 | extra_config = set(store_map['manual']) - set(configurations) | ||
| 67 | for config in sorted(extra_config): | ||
| 68 | avail_config_options = self._get_available_config_options(config_options, test_module, config) | ||
| 69 | if avail_config_options: | ||
| 70 | print('---------------------------------------------') | ||
| 71 | print('These are available configuration #%s options:' % config) | ||
| 72 | print('---------------------------------------------') | ||
| 73 | for option, _ in sorted(avail_config_options.items(), key=lambda x: int(x[0])): | ||
| 74 | print('%s: %s' % (option, avail_config_options[option])) | ||
| 75 | print('Please select configuration option, enter the integer index number.') | ||
| 76 | value_conf = self._choose_config_option(avail_config_options) | ||
| 77 | print('---------------------------------------------\n') | ||
| 78 | else: | ||
| 79 | print('---------------------------------------------') | ||
| 80 | print('This is configuration #%s. Please provide configuration value(use "None" if not applicable).' % config) | ||
| 81 | print('---------------------------------------------') | ||
| 82 | value_conf = self._get_input('Configuration Value') | ||
| 83 | print('---------------------------------------------\n') | ||
| 84 | configurations[config] = value_conf | ||
| 85 | return configurations | ||
| 86 | |||
| 87 | def _execute_test_steps(self, case): | ||
| 88 | test_result = {} | ||
| 89 | print('------------------------------------------------------------------------') | ||
| 90 | print('Executing test case: %s' % case['test']['@alias']) | ||
| 91 | print('------------------------------------------------------------------------') | ||
| 92 | print('You have total %s test steps to be executed.' % len(case['test']['execution'])) | ||
| 93 | print('------------------------------------------------------------------------\n') | ||
| 94 | for step, _ in sorted(case['test']['execution'].items(), key=lambda x: int(x[0])): | ||
| 95 | print('Step %s: %s' % (step, case['test']['execution'][step]['action'])) | ||
| 96 | expected_output = case['test']['execution'][step]['expected_results'] | ||
| 97 | if expected_output: | ||
| 98 | print('Expected output: %s' % expected_output) | ||
| 99 | while True: | ||
| 100 | done = input('\nPlease provide test results: (P)assed/(F)ailed/(B)locked/(S)kipped? \n').lower() | ||
| 101 | result_types = {'p':'PASSED', | ||
| 102 | 'f':'FAILED', | ||
| 103 | 'b':'BLOCKED', | ||
| 104 | 's':'SKIPPED'} | ||
| 105 | if done in result_types: | ||
| 106 | for r in result_types: | ||
| 107 | if done == r: | ||
| 108 | res = result_types[r] | ||
| 109 | if res == 'FAILED': | ||
| 110 | log_input = input('\nPlease enter the error and the description of the log: (Ex:log:211 Error Bitbake)\n') | ||
| 111 | test_result.update({case['test']['@alias']: {'status': '%s' % res, 'log': '%s' % log_input}}) | ||
| 112 | else: | ||
| 113 | test_result.update({case['test']['@alias']: {'status': '%s' % res}}) | ||
| 114 | break | ||
| 115 | print('Invalid input!') | ||
| 116 | return test_result | ||
| 117 | |||
| 118 | def _get_write_dir(self): | ||
| 119 | return os.environ['BUILDDIR'] + '/tmp/log/manual/' | ||
| 120 | |||
| 121 | def run_test(self, case_file, config_options_file, testcase_config_file): | ||
| 122 | test_module = self._get_test_module(case_file) | ||
| 123 | cases = load_json_file(case_file) | ||
| 124 | config_options = {} | ||
| 125 | if config_options_file: | ||
| 126 | config_options = load_json_file(config_options_file) | ||
| 127 | configurations = self._get_config(config_options, test_module) | ||
| 128 | result_id = 'manual_%s_%s' % (test_module, configurations['STARTTIME']) | ||
| 129 | test_results = {} | ||
| 130 | if testcase_config_file: | ||
| 131 | test_case_config = load_json_file(testcase_config_file) | ||
| 132 | test_case_to_execute = test_case_config['testcases'] | ||
| 133 | for case in copy.deepcopy(cases) : | ||
| 134 | if case['test']['@alias'] not in test_case_to_execute: | ||
| 135 | cases.remove(case) | ||
| 136 | |||
| 137 | print('\nTotal number of test cases in this test suite: %s\n' % len(cases)) | ||
| 138 | for c in cases: | ||
| 139 | test_result = self._execute_test_steps(c) | ||
| 140 | test_results.update(test_result) | ||
| 141 | return configurations, result_id, self._get_write_dir(), test_results | ||
| 142 | |||
| 143 | def _get_true_false_input(self, input_message): | ||
| 144 | yes_list = ['Y', 'YES'] | ||
| 145 | no_list = ['N', 'NO'] | ||
| 146 | while True: | ||
| 147 | more_config_option = input(input_message).upper() | ||
| 148 | if more_config_option in yes_list or more_config_option in no_list: | ||
| 149 | break | ||
| 150 | print('Invalid input!') | ||
| 151 | if more_config_option in no_list: | ||
| 152 | return False | ||
| 153 | return True | ||
| 154 | |||
| 155 | def make_config_option_file(self, logger, case_file, config_options_file): | ||
| 156 | config_options = {} | ||
| 157 | if config_options_file: | ||
| 158 | config_options = load_json_file(config_options_file) | ||
| 159 | new_test_module = self._get_test_module(case_file) | ||
| 160 | print('Creating configuration options file for test module: %s' % new_test_module) | ||
| 161 | new_config_options = {} | ||
| 162 | |||
| 163 | while True: | ||
| 164 | config_name = input('\nPlease provide test configuration to create:\n').upper() | ||
| 165 | new_config_options[config_name] = {} | ||
| 166 | while True: | ||
| 167 | config_value = self._get_input('Configuration possible option value') | ||
| 168 | config_option_index = len(new_config_options[config_name]) + 1 | ||
| 169 | new_config_options[config_name][config_option_index] = config_value | ||
| 170 | more_config_option = self._get_true_false_input('\nIs there more configuration option input: (Y)es/(N)o\n') | ||
| 171 | if not more_config_option: | ||
| 172 | break | ||
| 173 | more_config = self._get_true_false_input('\nIs there more configuration to create: (Y)es/(N)o\n') | ||
| 174 | if not more_config: | ||
| 175 | break | ||
| 176 | |||
| 177 | if new_config_options: | ||
| 178 | config_options[new_test_module] = new_config_options | ||
| 179 | if not config_options_file: | ||
| 180 | config_options_file = os.path.join(self._get_write_dir(), 'manual_config_options.json') | ||
| 181 | write_json_file(config_options_file, config_options) | ||
| 182 | logger.info('Configuration option file created at %s' % config_options_file) | ||
| 183 | |||
| 184 | def make_testcase_config_file(self, logger, case_file, testcase_config_file): | ||
| 185 | if testcase_config_file: | ||
| 186 | if os.path.exists(testcase_config_file): | ||
| 187 | print('\nTest configuration file with name %s already exists. Please provide a unique file name' % (testcase_config_file)) | ||
| 188 | return 0 | ||
| 189 | |||
| 190 | if not testcase_config_file: | ||
| 191 | testcase_config_file = os.path.join(self._get_write_dir(), "testconfig_new.json") | ||
| 192 | |||
| 193 | testcase_config = {} | ||
| 194 | cases = load_json_file(case_file) | ||
| 195 | new_test_module = self._get_test_module(case_file) | ||
| 196 | new_testcase_config = {} | ||
| 197 | new_testcase_config['testcases'] = [] | ||
| 198 | |||
| 199 | print('\nAdd testcases for this configuration file:') | ||
| 200 | for case in cases: | ||
| 201 | print('\n' + case['test']['@alias']) | ||
| 202 | add_tc_config = self._get_true_false_input('\nDo you want to add this test case to test configuration : (Y)es/(N)o\n') | ||
| 203 | if add_tc_config: | ||
| 204 | new_testcase_config['testcases'].append(case['test']['@alias']) | ||
| 205 | write_json_file(testcase_config_file, new_testcase_config) | ||
| 206 | logger.info('Testcase Configuration file created at %s' % testcase_config_file) | ||
| 207 | |||
| 208 | def manualexecution(args, logger): | ||
| 209 | testrunner = ManualTestRunner() | ||
| 210 | if args.make_config_options_file: | ||
| 211 | testrunner.make_config_option_file(logger, args.file, args.config_options_file) | ||
| 212 | return 0 | ||
| 213 | if args.make_testcase_config_file: | ||
| 214 | testrunner.make_testcase_config_file(logger, args.file, args.testcase_config_file) | ||
| 215 | return 0 | ||
| 216 | configurations, result_id, write_dir, test_results = testrunner.run_test(args.file, args.config_options_file, args.testcase_config_file) | ||
| 217 | resultjsonhelper = OETestResultJSONHelper() | ||
| 218 | resultjsonhelper.dump_testresult_file(write_dir, configurations, result_id, test_results) | ||
| 219 | return 0 | ||
| 220 | |||
| 221 | def register_commands(subparsers): | ||
| 222 | """Register subcommands from this plugin""" | ||
| 223 | parser_build = subparsers.add_parser('manualexecution', help='helper script for results populating during manual test execution.', | ||
| 224 | description='helper script for results populating during manual test execution. You can find manual test case JSON file in meta/lib/oeqa/manual/', | ||
| 225 | group='manualexecution') | ||
| 226 | parser_build.set_defaults(func=manualexecution) | ||
| 227 | parser_build.add_argument('file', help='specify path to manual test case JSON file.Note: Please use \"\" to encapsulate the file path.') | ||
| 228 | parser_build.add_argument('-c', '--config-options-file', default='', | ||
| 229 | help='the config options file to import and used as available configuration option selection or make config option file') | ||
| 230 | parser_build.add_argument('-m', '--make-config-options-file', action='store_true', | ||
| 231 | help='make the configuration options file based on provided inputs') | ||
| 232 | parser_build.add_argument('-t', '--testcase-config-file', default='', | ||
| 233 | help='the testcase configuration file to enable user to run a selected set of test case or make a testcase configuration file') | ||
| 234 | parser_build.add_argument('-d', '--make-testcase-config-file', action='store_true', | ||
| 235 | help='make the testcase configuration file to run a set of test cases based on user selection') \ No newline at end of file | ||
