summaryrefslogtreecommitdiffstats
path: root/scripts
diff options
context:
space:
mode:
authorsangeeta jain <sangeeta.jain@intel.com>2019-04-19 16:22:26 +0800
committerRichard Purdie <richard.purdie@linuxfoundation.org>2019-04-26 10:09:08 +0100
commitebeecd60c288dac7f6ad4e964d2e440edbd7f2de (patch)
treecd4c8537895526970fb6dc6e61cbec317bc2183c /scripts
parent852808d4c1e20004ae5e2319889e2766401173c1 (diff)
downloadpoky-ebeecd60c288dac7f6ad4e964d2e440edbd7f2de.tar.gz
resulttool/manualexecution: Enable test case configuration option
Current manualexecution required user to exceute all test cases defined inside a "modulename.json" file in oeqa/manual There are cases when all test cases all not required to run for a module on specific DUT. Enable manualexecution to have the optional feature where it will use pre-defined json format test case configuration file where user will be able to select test cases from the "modulename.json" instead of running all of them. This will help in reducing testing time and reporting unneccesary skip or failures. Example pre-defined json format test case configuration file (for build-applince): { "testcases" : [ "build-appliance.build-appliance.Create_core-image-sato-sdk_using_build_appliance", "build-appliance.build-appliance.Build_a_image_without_error_(added_recipe)" ] } (From OE-Core rev: 3950c28a34f94b5907d37b579bdaee5a59794652) Signed-off-by: sangeeta jain <sangeeta.jain@intel.com> Signed-off-by: Richard Purdie <richard.purdie@linuxfoundation.org>
Diffstat (limited to 'scripts')
-rwxr-xr-xscripts/lib/resulttool/manualexecution.py14
1 files changed, 12 insertions, 2 deletions
diff --git a/scripts/lib/resulttool/manualexecution.py b/scripts/lib/resulttool/manualexecution.py
index ea44219d4e..dc368f36fc 100755
--- a/scripts/lib/resulttool/manualexecution.py
+++ b/scripts/lib/resulttool/manualexecution.py
@@ -17,6 +17,7 @@ import os
17import sys 17import sys
18import datetime 18import datetime
19import re 19import re
20import copy
20from oeqa.core.runner import OETestResultJSONHelper 21from oeqa.core.runner import OETestResultJSONHelper
21 22
22 23
@@ -123,7 +124,7 @@ class ManualTestRunner(object):
123 def _get_write_dir(self): 124 def _get_write_dir(self):
124 return os.environ['BUILDDIR'] + '/tmp/log/manual/' 125 return os.environ['BUILDDIR'] + '/tmp/log/manual/'
125 126
126 def run_test(self, case_file, config_options_file): 127 def run_test(self, case_file, config_options_file, testcase_config_file):
127 test_module = self._get_test_module(case_file) 128 test_module = self._get_test_module(case_file)
128 cases = load_json_file(case_file) 129 cases = load_json_file(case_file)
129 config_options = {} 130 config_options = {}
@@ -132,6 +133,13 @@ class ManualTestRunner(object):
132 configurations = self._get_config(config_options, test_module) 133 configurations = self._get_config(config_options, test_module)
133 result_id = 'manual_%s_%s' % (test_module, configurations['STARTTIME']) 134 result_id = 'manual_%s_%s' % (test_module, configurations['STARTTIME'])
134 test_results = {} 135 test_results = {}
136 if testcase_config_file:
137 test_case_config = load_json_file(testcase_config_file)
138 test_case_to_execute = test_case_config['testcases']
139 for case in copy.deepcopy(cases) :
140 if case['test']['@alias'] not in test_case_to_execute:
141 cases.remove(case)
142
135 print('\nTotal number of test cases in this test suite: %s\n' % len(cases)) 143 print('\nTotal number of test cases in this test suite: %s\n' % len(cases))
136 for c in cases: 144 for c in cases:
137 test_result = self._execute_test_steps(c) 145 test_result = self._execute_test_steps(c)
@@ -184,7 +192,7 @@ def manualexecution(args, logger):
184 if args.make_config_options_file: 192 if args.make_config_options_file:
185 testrunner.make_config_option_file(logger, args.file, args.config_options_file) 193 testrunner.make_config_option_file(logger, args.file, args.config_options_file)
186 return 0 194 return 0
187 configurations, result_id, write_dir, test_results = testrunner.run_test(args.file, args.config_options_file) 195 configurations, result_id, write_dir, test_results = testrunner.run_test(args.file, args.config_options_file, args.testcase_config_file)
188 resultjsonhelper = OETestResultJSONHelper() 196 resultjsonhelper = OETestResultJSONHelper()
189 resultjsonhelper.dump_testresult_file(write_dir, configurations, result_id, test_results) 197 resultjsonhelper.dump_testresult_file(write_dir, configurations, result_id, test_results)
190 return 0 198 return 0
@@ -200,3 +208,5 @@ def register_commands(subparsers):
200 help='the config options file to import and used as available configuration option selection or make config option file') 208 help='the config options file to import and used as available configuration option selection or make config option file')
201 parser_build.add_argument('-m', '--make-config-options-file', action='store_true', 209 parser_build.add_argument('-m', '--make-config-options-file', action='store_true',
202 help='make the configuration options file based on provided inputs') 210 help='make the configuration options file based on provided inputs')
211 parser_build.add_argument('-t', '--testcase-config-file', default='',
212 help='the testcase configuration file to enable user to run a selected set of test case') \ No newline at end of file