summaryrefslogtreecommitdiffstats
path: root/scripts
diff options
context:
space:
mode:
authorMazliana <mazliana.mohamad@intel.com>2019-02-14 13:50:38 +0800
committerRichard Purdie <richard.purdie@linuxfoundation.org>2019-03-26 15:38:22 +0000
commit12385bc69b1e4f7e61638e201783f6a56136ba29 (patch)
tree61b86ed8bb4bc38ccd715d11f8174c238ce98ef2 /scripts
parent661fa1335b6832a1d13b13e8ca4354cdef213358 (diff)
downloadpoky-12385bc69b1e4f7e61638e201783f6a56136ba29.tar.gz
scripts/resulttool: enable manual execution and result creation
Integrated “manualexecution” operation to resulttool scripts. Manual execution script is a helper script to execute all manual test cases in baseline command, which consists of user guideline steps and the expected results. The last step will ask user to provide their input to execute result. The input options are passed/failed/blocked/skipped status. The result given will be written in testresults.json including log error from the user input and configuration if there is any.The output test result for json file is created by using OEQA library. The configuration part is manually key-in by the user. The system allow user to specify how many configuration they want to add and they need to define the required configuration name and value pair. In QA perspective, "configuration" means the test environments and parameters used during QA setup before testing can be carry out. Example of configurations: image used for boot up, host machine distro used, poky configurations, etc. The purpose of adding the configuration is to standardize the output test result format between automation and manual execution. To use these scripts, first source oe environment, then run the entry point script to look for help. $ resulttool To execute manual test cases, execute the below $ resulttool manualexecution <manualjsonfile> By default testresults.json store in <build_dir>/tmp/log/manual/ [YOCTO #12651] (From OE-Core rev: 07054cc2691fd2822028a3fd55185af457f79ebf) Signed-off-by: Mazliana <mazliana.mohamad@intel.com> Signed-off-by: Richard Purdie <richard.purdie@linuxfoundation.org>
Diffstat (limited to 'scripts')
-rwxr-xr-xscripts/lib/resulttool/manualexecution.py137
-rwxr-xr-xscripts/resulttool8
2 files changed, 145 insertions, 0 deletions
diff --git a/scripts/lib/resulttool/manualexecution.py b/scripts/lib/resulttool/manualexecution.py
new file mode 100755
index 0000000000..64ec581a9f
--- /dev/null
+++ b/scripts/lib/resulttool/manualexecution.py
@@ -0,0 +1,137 @@
1# test case management tool - manual execution from testopia test cases
2#
3# Copyright (c) 2018, Intel Corporation.
4#
5# This program is free software; you can redistribute it and/or modify it
6# under the terms and conditions of the GNU General Public License,
7# version 2, as published by the Free Software Foundation.
8#
9# This program is distributed in the hope it will be useful, but WITHOUT
10# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12# more details.
13#
14import argparse
15import json
16import os
17import sys
18import datetime
19import re
20from oeqa.core.runner import OETestResultJSONHelper
21from resulttool.resultsutils import load_json_file
22
23class ManualTestRunner(object):
24 def __init__(self):
25 self.jdata = ''
26 self.test_module = ''
27 self.test_suite = ''
28 self.test_cases = ''
29 self.configuration = ''
30 self.starttime = ''
31 self.result_id = ''
32 self.write_dir = ''
33
34 def _get_testcases(self, file):
35 self.jdata = load_json_file(file)
36 self.test_cases = []
37 self.test_module = self.jdata[0]['test']['@alias'].split('.', 2)[0]
38 self.test_suite = self.jdata[0]['test']['@alias'].split('.', 2)[1]
39 for i in self.jdata:
40 self.test_cases.append(i['test']['@alias'].split('.', 2)[2])
41
42 def _get_input(self, config):
43 while True:
44 output = input('{} = '.format(config))
45 if re.match('^[a-zA-Z0-9_]+$', output):
46 break
47 print('Only alphanumeric and underscore are allowed. Please try again')
48 return output
49
50 def _create_config(self):
51 self.configuration = {}
52 while True:
53 try:
54 conf_total = int(input('\nPlease provide how many configuration you want to save \n'))
55 break
56 except ValueError:
57 print('Invalid input. Please provide input as a number not character.')
58 for i in range(conf_total):
59 print('---------------------------------------------')
60 print('This is configuration #%s ' % (i + 1) + '. Please provide configuration name and its value')
61 print('---------------------------------------------')
62 name_conf = self._get_input('Configuration Name')
63 value_conf = self._get_input('Configuration Value')
64 print('---------------------------------------------\n')
65 self.configuration[name_conf.upper()] = value_conf
66 current_datetime = datetime.datetime.now()
67 self.starttime = current_datetime.strftime('%Y%m%d%H%M%S')
68 self.configuration['STARTTIME'] = self.starttime
69 self.configuration['TEST_TYPE'] = self.test_module
70
71 def _create_result_id(self):
72 self.result_id = 'manual_' + self.test_module + '_' + self.starttime
73
74 def _execute_test_steps(self, test_id):
75 test_result = {}
76 testcase_id = self.test_module + '.' + self.test_suite + '.' + self.test_cases[test_id]
77 total_steps = len(self.jdata[test_id]['test']['execution'].keys())
78 print('------------------------------------------------------------------------')
79 print('Executing test case:' + '' '' + self.test_cases[test_id])
80 print('------------------------------------------------------------------------')
81 print('You have total ' + str(total_steps) + ' test steps to be executed.')
82 print('------------------------------------------------------------------------\n')
83 for step in sorted((self.jdata[test_id]['test']['execution']).keys()):
84 print('Step %s: ' % step + self.jdata[test_id]['test']['execution']['%s' % step]['action'])
85 print('Expected output: ' + self.jdata[test_id]['test']['execution']['%s' % step]['expected_results'])
86 done = input('\nPlease press ENTER when you are done to proceed to next step.\n')
87 while True:
88 done = input('\nPlease provide test results: (P)assed/(F)ailed/(B)locked/(S)kipped? \n')
89 done = done.lower()
90 result_types = {'p':'PASSED',
91 'f':'FAILED',
92 'b':'BLOCKED',
93 's':'SKIPPED'}
94 if done in result_types:
95 for r in result_types:
96 if done == r:
97 res = result_types[r]
98 if res == 'FAILED':
99 log_input = input('\nPlease enter the error and the description of the log: (Ex:log:211 Error Bitbake)\n')
100 test_result.update({testcase_id: {'status': '%s' % res, 'log': '%s' % log_input}})
101 else:
102 test_result.update({testcase_id: {'status': '%s' % res}})
103 break
104 print('Invalid input!')
105 return test_result
106
107 def _create_write_dir(self):
108 basepath = os.environ['BUILDDIR']
109 self.write_dir = basepath + '/tmp/log/manual/'
110
111 def run_test(self, file):
112 self._get_testcases(file)
113 self._create_config()
114 self._create_result_id()
115 self._create_write_dir()
116 test_results = {}
117 print('\nTotal number of test cases in this test suite: ' + '%s\n' % len(self.jdata))
118 for i in range(0, len(self.jdata)):
119 test_result = self._execute_test_steps(i)
120 test_results.update(test_result)
121 return self.configuration, self.result_id, self.write_dir, test_results
122
123def manualexecution(args, logger):
124 testrunner = ManualTestRunner()
125 get_configuration, get_result_id, get_write_dir, get_test_results = testrunner.run_test(args.file)
126 resultjsonhelper = OETestResultJSONHelper()
127 resultjsonhelper.dump_testresult_file(get_write_dir, get_configuration, get_result_id,
128 get_test_results)
129 return 0
130
131def register_commands(subparsers):
132 """Register subcommands from this plugin"""
133 parser_build = subparsers.add_parser('manualexecution', help='helper script for results populating during manual test execution.',
134 description='helper script for results populating during manual test execution. You can find manual test case JSON file in meta/lib/oeqa/manual/',
135 group='manualexecution')
136 parser_build.set_defaults(func=manualexecution)
137 parser_build.add_argument('file', help='specify path to manual test case JSON file.Note: Please use \"\" to encapsulate the file path.') \ No newline at end of file
diff --git a/scripts/resulttool b/scripts/resulttool
index ebb5fc81c9..13430e192a 100755
--- a/scripts/resulttool
+++ b/scripts/resulttool
@@ -17,6 +17,11 @@
17# To perform regression file analysis, execute the below 17# To perform regression file analysis, execute the below
18# $ resulttool regression-file <base_result_file> <target_result_file> 18# $ resulttool regression-file <base_result_file> <target_result_file>
19# 19#
20# To execute manual test cases, execute the below
21# $ resulttool manualexecution <manualjsonfile>
22#
23# By default testresults.json for manualexecution store in <build>/tmp/log/manual/
24#
20# Copyright (c) 2019, Intel Corporation. 25# Copyright (c) 2019, Intel Corporation.
21# 26#
22# This program is free software; you can redistribute it and/or modify it 27# This program is free software; you can redistribute it and/or modify it
@@ -42,6 +47,7 @@ import resulttool.merge
42import resulttool.store 47import resulttool.store
43import resulttool.regression 48import resulttool.regression
44import resulttool.report 49import resulttool.report
50import resulttool.manualexecution
45logger = scriptutils.logger_create('resulttool') 51logger = scriptutils.logger_create('resulttool')
46 52
47def _validate_user_input_arguments(args): 53def _validate_user_input_arguments(args):
@@ -58,6 +64,8 @@ def main():
58 parser.add_argument('-q', '--quiet', help='print only errors', action='store_true') 64 parser.add_argument('-q', '--quiet', help='print only errors', action='store_true')
59 subparsers = parser.add_subparsers(dest="subparser_name", title='subcommands', metavar='<subcommand>') 65 subparsers = parser.add_subparsers(dest="subparser_name", title='subcommands', metavar='<subcommand>')
60 subparsers.required = True 66 subparsers.required = True
67 subparsers.add_subparser_group('manualexecution', 'manual testcases', 300)
68 resulttool.manualexecution.register_commands(subparsers)
61 subparsers.add_subparser_group('setup', 'setup', 200) 69 subparsers.add_subparser_group('setup', 'setup', 200)
62 resulttool.merge.register_commands(subparsers) 70 resulttool.merge.register_commands(subparsers)
63 resulttool.store.register_commands(subparsers) 71 resulttool.store.register_commands(subparsers)