diff options
Diffstat (limited to 'scripts/lib/resulttool/manualexecution.py')
-rwxr-xr-x | scripts/lib/resulttool/manualexecution.py | 137 |
1 files changed, 137 insertions, 0 deletions
diff --git a/scripts/lib/resulttool/manualexecution.py b/scripts/lib/resulttool/manualexecution.py new file mode 100755 index 0000000000..64ec581a9f --- /dev/null +++ b/scripts/lib/resulttool/manualexecution.py | |||
@@ -0,0 +1,137 @@ | |||
1 | # test case management tool - manual execution from testopia test cases | ||
2 | # | ||
3 | # Copyright (c) 2018, Intel Corporation. | ||
4 | # | ||
5 | # This program is free software; you can redistribute it and/or modify it | ||
6 | # under the terms and conditions of the GNU General Public License, | ||
7 | # version 2, as published by the Free Software Foundation. | ||
8 | # | ||
9 | # This program is distributed in the hope it will be useful, but WITHOUT | ||
10 | # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
11 | # FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
12 | # more details. | ||
13 | # | ||
14 | import argparse | ||
15 | import json | ||
16 | import os | ||
17 | import sys | ||
18 | import datetime | ||
19 | import re | ||
20 | from oeqa.core.runner import OETestResultJSONHelper | ||
21 | from resulttool.resultsutils import load_json_file | ||
22 | |||
23 | class ManualTestRunner(object): | ||
24 | def __init__(self): | ||
25 | self.jdata = '' | ||
26 | self.test_module = '' | ||
27 | self.test_suite = '' | ||
28 | self.test_cases = '' | ||
29 | self.configuration = '' | ||
30 | self.starttime = '' | ||
31 | self.result_id = '' | ||
32 | self.write_dir = '' | ||
33 | |||
34 | def _get_testcases(self, file): | ||
35 | self.jdata = load_json_file(file) | ||
36 | self.test_cases = [] | ||
37 | self.test_module = self.jdata[0]['test']['@alias'].split('.', 2)[0] | ||
38 | self.test_suite = self.jdata[0]['test']['@alias'].split('.', 2)[1] | ||
39 | for i in self.jdata: | ||
40 | self.test_cases.append(i['test']['@alias'].split('.', 2)[2]) | ||
41 | |||
42 | def _get_input(self, config): | ||
43 | while True: | ||
44 | output = input('{} = '.format(config)) | ||
45 | if re.match('^[a-zA-Z0-9_]+$', output): | ||
46 | break | ||
47 | print('Only alphanumeric and underscore are allowed. Please try again') | ||
48 | return output | ||
49 | |||
50 | def _create_config(self): | ||
51 | self.configuration = {} | ||
52 | while True: | ||
53 | try: | ||
54 | conf_total = int(input('\nPlease provide how many configuration you want to save \n')) | ||
55 | break | ||
56 | except ValueError: | ||
57 | print('Invalid input. Please provide input as a number not character.') | ||
58 | for i in range(conf_total): | ||
59 | print('---------------------------------------------') | ||
60 | print('This is configuration #%s ' % (i + 1) + '. Please provide configuration name and its value') | ||
61 | print('---------------------------------------------') | ||
62 | name_conf = self._get_input('Configuration Name') | ||
63 | value_conf = self._get_input('Configuration Value') | ||
64 | print('---------------------------------------------\n') | ||
65 | self.configuration[name_conf.upper()] = value_conf | ||
66 | current_datetime = datetime.datetime.now() | ||
67 | self.starttime = current_datetime.strftime('%Y%m%d%H%M%S') | ||
68 | self.configuration['STARTTIME'] = self.starttime | ||
69 | self.configuration['TEST_TYPE'] = self.test_module | ||
70 | |||
71 | def _create_result_id(self): | ||
72 | self.result_id = 'manual_' + self.test_module + '_' + self.starttime | ||
73 | |||
74 | def _execute_test_steps(self, test_id): | ||
75 | test_result = {} | ||
76 | testcase_id = self.test_module + '.' + self.test_suite + '.' + self.test_cases[test_id] | ||
77 | total_steps = len(self.jdata[test_id]['test']['execution'].keys()) | ||
78 | print('------------------------------------------------------------------------') | ||
79 | print('Executing test case:' + '' '' + self.test_cases[test_id]) | ||
80 | print('------------------------------------------------------------------------') | ||
81 | print('You have total ' + str(total_steps) + ' test steps to be executed.') | ||
82 | print('------------------------------------------------------------------------\n') | ||
83 | for step in sorted((self.jdata[test_id]['test']['execution']).keys()): | ||
84 | print('Step %s: ' % step + self.jdata[test_id]['test']['execution']['%s' % step]['action']) | ||
85 | print('Expected output: ' + self.jdata[test_id]['test']['execution']['%s' % step]['expected_results']) | ||
86 | done = input('\nPlease press ENTER when you are done to proceed to next step.\n') | ||
87 | while True: | ||
88 | done = input('\nPlease provide test results: (P)assed/(F)ailed/(B)locked/(S)kipped? \n') | ||
89 | done = done.lower() | ||
90 | result_types = {'p':'PASSED', | ||
91 | 'f':'FAILED', | ||
92 | 'b':'BLOCKED', | ||
93 | 's':'SKIPPED'} | ||
94 | if done in result_types: | ||
95 | for r in result_types: | ||
96 | if done == r: | ||
97 | res = result_types[r] | ||
98 | if res == 'FAILED': | ||
99 | log_input = input('\nPlease enter the error and the description of the log: (Ex:log:211 Error Bitbake)\n') | ||
100 | test_result.update({testcase_id: {'status': '%s' % res, 'log': '%s' % log_input}}) | ||
101 | else: | ||
102 | test_result.update({testcase_id: {'status': '%s' % res}}) | ||
103 | break | ||
104 | print('Invalid input!') | ||
105 | return test_result | ||
106 | |||
107 | def _create_write_dir(self): | ||
108 | basepath = os.environ['BUILDDIR'] | ||
109 | self.write_dir = basepath + '/tmp/log/manual/' | ||
110 | |||
111 | def run_test(self, file): | ||
112 | self._get_testcases(file) | ||
113 | self._create_config() | ||
114 | self._create_result_id() | ||
115 | self._create_write_dir() | ||
116 | test_results = {} | ||
117 | print('\nTotal number of test cases in this test suite: ' + '%s\n' % len(self.jdata)) | ||
118 | for i in range(0, len(self.jdata)): | ||
119 | test_result = self._execute_test_steps(i) | ||
120 | test_results.update(test_result) | ||
121 | return self.configuration, self.result_id, self.write_dir, test_results | ||
122 | |||
123 | def manualexecution(args, logger): | ||
124 | testrunner = ManualTestRunner() | ||
125 | get_configuration, get_result_id, get_write_dir, get_test_results = testrunner.run_test(args.file) | ||
126 | resultjsonhelper = OETestResultJSONHelper() | ||
127 | resultjsonhelper.dump_testresult_file(get_write_dir, get_configuration, get_result_id, | ||
128 | get_test_results) | ||
129 | return 0 | ||
130 | |||
131 | def register_commands(subparsers): | ||
132 | """Register subcommands from this plugin""" | ||
133 | parser_build = subparsers.add_parser('manualexecution', help='helper script for results populating during manual test execution.', | ||
134 | description='helper script for results populating during manual test execution. You can find manual test case JSON file in meta/lib/oeqa/manual/', | ||
135 | group='manualexecution') | ||
136 | parser_build.set_defaults(func=manualexecution) | ||
137 | parser_build.add_argument('file', help='specify path to manual test case JSON file.Note: Please use \"\" to encapsulate the file path.') \ No newline at end of file | ||