diff options
| author | Leonardo Sandoval <leonardo.sandoval.gonzalez@linux.intel.com> | 2017-05-25 15:20:56 -0500 |
|---|---|---|
| committer | Richard Purdie <richard.purdie@linuxfoundation.org> | 2017-06-06 19:02:43 +0100 |
| commit | 10c512b60d1167122b5fe778b93838dca3def717 (patch) | |
| tree | 129c800bd1b0618729f6228430d8654d289d7542 /scripts/oe-selftest | |
| parent | d09938a608fa3a97e1f91a21738b45062ef708c3 (diff) | |
| download | poky-10c512b60d1167122b5fe778b93838dca3def717.tar.gz | |
scripts/oe-selftest: Migrate to new framework into oeqa.selftest.context
The new OEQA framework aims to re-use code into the different Test
components.
The previous oe-selftest implements it-self loading, run, and list test
cases in a non-standard way (unittest base) and other functionalities
like logging that is now on oeqa core. This ends on a compact oe-selftest
script.
All needed command line options was migrated but there are some of them
pending of implementation and others deprecated.
Deprecated options:
list-tags: The tag functionality into the old oeqa framework isn't
work, the selftest doesn't has tag decorators.
{run, list}-tests-by: Ambiguos options it accepts all the posibilites module,
class, name, id or tag.
Remaining to implement:
coverage: It enables covrage reports over a test run, currently isn't on
on use and some bugs [1], i filed a bug to add support to OEQA core module in
this way other Test components could enable it.
repository: It push XML results into a git repository and isn't in use,
i filed a bug to implement this into OEQA core module. [2]
[1] https://bugzilla.yoctoproject.org/show_bug.cgi?id=11582#c0
[2] https://bugzilla.yoctoproject.org/show_bug.cgi?id=11583#c0
(From OE-Core rev: 3b2a20eee4a39f40287bf67545839eaa09fc892d)
Signed-off-by: Leonardo Sandoval <leonardo.sandoval.gonzalez@linux.intel.com>
Signed-off-by: Aníbal Limón <anibal.limon@linux.intel.com>
Signed-off-by: Richard Purdie <richard.purdie@linuxfoundation.org>
Diffstat (limited to 'scripts/oe-selftest')
| -rwxr-xr-x | scripts/oe-selftest | 737 |
1 files changed, 26 insertions, 711 deletions
diff --git a/scripts/oe-selftest b/scripts/oe-selftest index 490915759f..b200acee13 100755 --- a/scripts/oe-selftest +++ b/scripts/oe-selftest | |||
| @@ -1,6 +1,6 @@ | |||
| 1 | #!/usr/bin/env python3 | 1 | #!/usr/bin/env python3 |
| 2 | 2 | ||
| 3 | # Copyright (c) 2013 Intel Corporation | 3 | # Copyright (c) 2013-2017 Intel Corporation |
| 4 | # | 4 | # |
| 5 | # This program is free software; you can redistribute it and/or modify | 5 | # This program is free software; you can redistribute it and/or modify |
| 6 | # it under the terms of the GNU General Public License version 2 as | 6 | # it under the terms of the GNU General Public License version 2 as |
| @@ -25,732 +25,47 @@ | |||
| 25 | # E.g: "oe-selftest -r bblayers.BitbakeLayers" will run just the BitbakeLayers class from meta/lib/oeqa/selftest/bblayers.py | 25 | # E.g: "oe-selftest -r bblayers.BitbakeLayers" will run just the BitbakeLayers class from meta/lib/oeqa/selftest/bblayers.py |
| 26 | 26 | ||
| 27 | 27 | ||
| 28 | |||
| 28 | import os | 29 | import os |
| 29 | import sys | 30 | import sys |
| 30 | import unittest | ||
| 31 | import logging | ||
| 32 | import argparse | 31 | import argparse |
| 33 | import subprocess | 32 | import logging |
| 34 | import time as t | ||
| 35 | import re | ||
| 36 | import fnmatch | ||
| 37 | import collections | ||
| 38 | import imp | ||
| 39 | 33 | ||
| 40 | sys.path.insert(0, os.path.dirname(os.path.realpath(__file__)) + '/lib') | 34 | scripts_path = os.path.dirname(os.path.realpath(__file__)) |
| 35 | lib_path = scripts_path + '/lib' | ||
| 36 | sys.path = sys.path + [lib_path] | ||
| 37 | import argparse_oe | ||
| 38 | import scriptutils | ||
| 41 | import scriptpath | 39 | import scriptpath |
| 42 | scriptpath.add_bitbake_lib_path() | ||
| 43 | scriptpath.add_oe_lib_path() | 40 | scriptpath.add_oe_lib_path() |
| 44 | import argparse_oe | 41 | scriptpath.add_bitbake_lib_path() |
| 45 | |||
| 46 | import oeqa.selftest | ||
| 47 | import oeqa.utils.ftools as ftools | ||
| 48 | from oeqa.utils.commands import runCmd, get_bb_var, get_test_layer | ||
| 49 | from oeqa.utils.metadata import metadata_from_bb, write_metadata_file | ||
| 50 | from oeqa.selftest.base import oeSelfTest, get_available_machines | ||
| 51 | |||
| 52 | try: | ||
| 53 | import xmlrunner | ||
| 54 | from xmlrunner.result import _XMLTestResult as TestResult | ||
| 55 | from xmlrunner import XMLTestRunner as _TestRunner | ||
| 56 | except ImportError: | ||
| 57 | # use the base runner instead | ||
| 58 | from unittest import TextTestResult as TestResult | ||
| 59 | from unittest import TextTestRunner as _TestRunner | ||
| 60 | |||
| 61 | log_prefix = "oe-selftest-" + t.strftime("%Y%m%d-%H%M%S") | ||
| 62 | |||
| 63 | def logger_create(): | ||
| 64 | log_file = log_prefix + ".log" | ||
| 65 | if os.path.lexists("oe-selftest.log"): | ||
| 66 | os.remove("oe-selftest.log") | ||
| 67 | os.symlink(log_file, "oe-selftest.log") | ||
| 68 | |||
| 69 | log = logging.getLogger("selftest") | ||
| 70 | log.setLevel(logging.DEBUG) | ||
| 71 | |||
| 72 | fh = logging.FileHandler(filename=log_file, mode='w') | ||
| 73 | fh.setLevel(logging.DEBUG) | ||
| 74 | |||
| 75 | ch = logging.StreamHandler(sys.stdout) | ||
| 76 | ch.setLevel(logging.INFO) | ||
| 77 | |||
| 78 | formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s') | ||
| 79 | fh.setFormatter(formatter) | ||
| 80 | ch.setFormatter(formatter) | ||
| 81 | |||
| 82 | log.addHandler(fh) | ||
| 83 | log.addHandler(ch) | ||
| 84 | 42 | ||
| 85 | return log | 43 | from oeqa.utils import load_test_components |
| 44 | from oeqa.core.exception import OEQAPreRun | ||
| 86 | 45 | ||
| 87 | log = logger_create() | 46 | logger = scriptutils.logger_create('oe-selftest') |
| 88 | 47 | ||
| 89 | def get_args_parser(): | 48 | def main(): |
| 90 | description = "Script that runs unit tests against bitbake and other Yocto related tools. The goal is to validate tools functionality and metadata integrity. Refer to https://wiki.yoctoproject.org/wiki/Oe-selftest for more information." | 49 | description = "Script that runs unit tests against bitbake and other Yocto related tools. The goal is to validate tools functionality and metadata integrity. Refer to https://wiki.yoctoproject.org/wiki/Oe-selftest for more information." |
| 91 | parser = argparse_oe.ArgumentParser(description=description) | 50 | parser = argparse_oe.ArgumentParser(description=description) |
| 92 | group = parser.add_mutually_exclusive_group(required=True) | ||
| 93 | group.add_argument('-r', '--run-tests', required=False, action='store', nargs='*', dest="run_tests", default=None, help='Select what tests to run (modules, classes or test methods). Format should be: <module>.<class>.<test_method>') | ||
| 94 | group.add_argument('-a', '--run-all-tests', required=False, action="store_true", dest="run_all_tests", default=False, help='Run all (unhidden) tests') | ||
| 95 | group.add_argument('-m', '--list-modules', required=False, action="store_true", dest="list_modules", default=False, help='List all available test modules.') | ||
| 96 | group.add_argument('--list-classes', required=False, action="store_true", dest="list_allclasses", default=False, help='List all available test classes.') | ||
| 97 | parser.add_argument('--coverage', action="store_true", help="Run code coverage when testing") | ||
| 98 | parser.add_argument('--coverage-source', dest="coverage_source", nargs="+", help="Specifiy the directories to take coverage from") | ||
| 99 | parser.add_argument('--coverage-include', dest="coverage_include", nargs="+", help="Specify extra patterns to include into the coverage measurement") | ||
| 100 | parser.add_argument('--coverage-omit', dest="coverage_omit", nargs="+", help="Specify with extra patterns to exclude from the coverage measurement") | ||
| 101 | group.add_argument('--run-tests-by', required=False, dest='run_tests_by', default=False, nargs='*', | ||
| 102 | help='run-tests-by <name|class|module|id|tag> <list of tests|classes|modules|ids|tags>') | ||
| 103 | group.add_argument('--list-tests-by', required=False, dest='list_tests_by', default=False, nargs='*', | ||
| 104 | help='list-tests-by <name|class|module|id|tag> <list of tests|classes|modules|ids|tags>') | ||
| 105 | group.add_argument('-l', '--list-tests', required=False, action="store_true", dest="list_tests", default=False, | ||
| 106 | help='List all available tests.') | ||
| 107 | group.add_argument('--list-tags', required=False, dest='list_tags', default=False, action="store_true", | ||
| 108 | help='List all tags that have been set to test cases.') | ||
| 109 | parser.add_argument('--machine', required=False, dest='machine', choices=['random', 'all'], default=None, | ||
| 110 | help='Run tests on different machines (random/all).') | ||
| 111 | parser.add_argument('--repository', required=False, dest='repository', default='', action='store', | ||
| 112 | help='Submit test results to a repository') | ||
| 113 | return parser | ||
| 114 | |||
| 115 | builddir = None | ||
| 116 | |||
| 117 | |||
| 118 | def preflight_check(): | ||
| 119 | |||
| 120 | global builddir | ||
| 121 | |||
| 122 | log.info("Checking that everything is in order before running the tests") | ||
| 123 | |||
| 124 | if not os.environ.get("BUILDDIR"): | ||
| 125 | log.error("BUILDDIR isn't set. Did you forget to source your build environment setup script?") | ||
| 126 | return False | ||
| 127 | |||
| 128 | builddir = os.environ.get("BUILDDIR") | ||
| 129 | if os.getcwd() != builddir: | ||
| 130 | log.info("Changing cwd to %s" % builddir) | ||
| 131 | os.chdir(builddir) | ||
| 132 | |||
| 133 | if not "meta-selftest" in get_bb_var("BBLAYERS"): | ||
| 134 | log.warn("meta-selftest layer not found in BBLAYERS, adding it") | ||
| 135 | meta_selftestdir = os.path.join( | ||
| 136 | get_bb_var("BBLAYERS_FETCH_DIR"), | ||
| 137 | 'meta-selftest') | ||
| 138 | if os.path.isdir(meta_selftestdir): | ||
| 139 | runCmd("bitbake-layers add-layer %s" %meta_selftestdir) | ||
| 140 | else: | ||
| 141 | log.error("could not locate meta-selftest in:\n%s" | ||
| 142 | %meta_selftestdir) | ||
| 143 | return False | ||
| 144 | |||
| 145 | if "buildhistory.bbclass" in get_bb_var("BBINCLUDED"): | ||
| 146 | log.error("You have buildhistory enabled already and this isn't recommended for selftest, please disable it first.") | ||
| 147 | return False | ||
| 148 | |||
| 149 | if get_bb_var("PRSERV_HOST"): | ||
| 150 | log.error("Please unset PRSERV_HOST in order to run oe-selftest") | ||
| 151 | return False | ||
| 152 | |||
| 153 | if get_bb_var("SANITY_TESTED_DISTROS"): | ||
| 154 | log.error("Please unset SANITY_TESTED_DISTROS in order to run oe-selftest") | ||
| 155 | return False | ||
| 156 | |||
| 157 | log.info("Running bitbake -p") | ||
| 158 | runCmd("bitbake -p") | ||
| 159 | |||
| 160 | return True | ||
| 161 | 51 | ||
| 162 | def get_tests_modules(include_hidden=False): | 52 | comp_name, comp = load_test_components(logger, 'oe-selftest').popitem() |
| 163 | modules_list = list() | 53 | comp.register_commands(logger, parser) |
| 164 | for modules_path in oeqa.selftest.__path__: | ||
| 165 | for (p, d, f) in os.walk(modules_path): | ||
| 166 | files = sorted([f for f in os.listdir(p) if f.endswith('.py') and not (f.startswith('_') and not include_hidden) and not f.startswith('__') and f != 'base.py']) | ||
| 167 | for f in files: | ||
| 168 | submodules = p.split("selftest")[-1] | ||
| 169 | module = "" | ||
| 170 | if submodules: | ||
| 171 | module = 'oeqa.selftest' + submodules.replace("/",".") + "." + f.split('.py')[0] | ||
| 172 | else: | ||
| 173 | module = 'oeqa.selftest.' + f.split('.py')[0] | ||
| 174 | if module not in modules_list: | ||
| 175 | modules_list.append(module) | ||
| 176 | return modules_list | ||
| 177 | |||
| 178 | |||
| 179 | def get_tests(exclusive_modules=[], include_hidden=False): | ||
| 180 | test_modules = list() | ||
| 181 | for x in exclusive_modules: | ||
| 182 | test_modules.append('oeqa.selftest.' + x) | ||
| 183 | if not test_modules: | ||
| 184 | inc_hidden = include_hidden | ||
| 185 | test_modules = get_tests_modules(inc_hidden) | ||
| 186 | |||
| 187 | return test_modules | ||
| 188 | |||
| 189 | |||
| 190 | class Tc: | ||
| 191 | def __init__(self, tcname, tcclass, tcmodule, tcid=None, tctag=None): | ||
| 192 | self.tcname = tcname | ||
| 193 | self.tcclass = tcclass | ||
| 194 | self.tcmodule = tcmodule | ||
| 195 | self.tcid = tcid | ||
| 196 | # A test case can have multiple tags (as tuples) otherwise str will suffice | ||
| 197 | self.tctag = tctag | ||
| 198 | self.fullpath = '.'.join(['oeqa', 'selftest', tcmodule, tcclass, tcname]) | ||
| 199 | |||
| 200 | |||
| 201 | def get_tests_from_module(tmod): | ||
| 202 | tlist = [] | ||
| 203 | prefix = 'oeqa.selftest.' | ||
| 204 | 54 | ||
| 205 | try: | 55 | try: |
| 206 | import importlib | 56 | args = parser.parse_args() |
| 207 | modlib = importlib.import_module(tmod) | 57 | results = args.func(logger, args) |
| 208 | for mod in list(vars(modlib).values()): | 58 | ret = 0 if results.wasSuccessful() else 1 |
| 209 | if isinstance(mod, type(oeSelfTest)) and issubclass(mod, oeSelfTest) and mod is not oeSelfTest: | 59 | except SystemExit as err: |
| 210 | for test in dir(mod): | 60 | if err.code != 0: |
| 211 | if test.startswith('test_') and hasattr(vars(mod)[test], '__call__'): | 61 | raise err |
| 212 | # Get test case id and feature tag | 62 | ret = err.code |
| 213 | # NOTE: if testcase decorator or feature tag not set will throw error | 63 | except OEQAPreRun as pr: |
| 214 | try: | 64 | ret = 1 |
| 215 | tid = vars(mod)[test].test_case | ||
| 216 | except: | ||
| 217 | print('DEBUG: tc id missing for ' + str(test)) | ||
| 218 | tid = None | ||
| 219 | try: | ||
| 220 | ttag = vars(mod)[test].tag__feature | ||
| 221 | except: | ||
| 222 | # print('DEBUG: feature tag missing for ' + str(test)) | ||
| 223 | ttag = None | ||
| 224 | |||
| 225 | # NOTE: for some reason lstrip() doesn't work for mod.__module__ | ||
| 226 | tlist.append(Tc(test, mod.__name__, mod.__module__.replace(prefix, ''), tid, ttag)) | ||
| 227 | except: | ||
| 228 | pass | ||
| 229 | |||
| 230 | return tlist | ||
| 231 | |||
| 232 | |||
| 233 | def get_all_tests(): | ||
| 234 | # Get all the test modules (except the hidden ones) | ||
| 235 | testlist = [] | ||
| 236 | tests_modules = get_tests_modules() | ||
| 237 | # Get all the tests from modules | ||
| 238 | for tmod in sorted(tests_modules): | ||
| 239 | testlist += get_tests_from_module(tmod) | ||
| 240 | return testlist | ||
| 241 | |||
| 242 | |||
| 243 | def get_testsuite_by(criteria, keyword): | ||
| 244 | # Get a testsuite based on 'keyword' | ||
| 245 | # criteria: name, class, module, id, tag | ||
| 246 | # keyword: a list of tests, classes, modules, ids, tags | ||
| 247 | |||
| 248 | ts = [] | ||
| 249 | all_tests = get_all_tests() | ||
| 250 | |||
| 251 | def get_matches(values): | ||
| 252 | # Get an item and return the ones that match with keyword(s) | ||
| 253 | # values: the list of items (names, modules, classes...) | ||
| 254 | result = [] | ||
| 255 | remaining = values[:] | ||
| 256 | for key in keyword: | ||
| 257 | found = False | ||
| 258 | if key in remaining: | ||
| 259 | # Regular matching of exact item | ||
| 260 | result.append(key) | ||
| 261 | remaining.remove(key) | ||
| 262 | found = True | ||
| 263 | else: | ||
| 264 | # Wildcard matching | ||
| 265 | pattern = re.compile(fnmatch.translate(r"%s" % key)) | ||
| 266 | added = [x for x in remaining if pattern.match(x)] | ||
| 267 | if added: | ||
| 268 | result.extend(added) | ||
| 269 | remaining = [x for x in remaining if x not in added] | ||
| 270 | found = True | ||
| 271 | if not found: | ||
| 272 | log.error("Failed to find test: %s" % key) | ||
| 273 | |||
| 274 | return result | ||
| 275 | |||
| 276 | if criteria == 'name': | ||
| 277 | names = get_matches([ tc.tcname for tc in all_tests ]) | ||
| 278 | ts = [ tc for tc in all_tests if tc.tcname in names ] | ||
| 279 | |||
| 280 | elif criteria == 'class': | ||
| 281 | classes = get_matches([ tc.tcclass for tc in all_tests ]) | ||
| 282 | ts = [ tc for tc in all_tests if tc.tcclass in classes ] | ||
| 283 | |||
| 284 | elif criteria == 'module': | ||
| 285 | modules = get_matches([ tc.tcmodule for tc in all_tests ]) | ||
| 286 | ts = [ tc for tc in all_tests if tc.tcmodule in modules ] | ||
| 287 | |||
| 288 | elif criteria == 'id': | ||
| 289 | ids = get_matches([ str(tc.tcid) for tc in all_tests ]) | ||
| 290 | ts = [ tc for tc in all_tests if str(tc.tcid) in ids ] | ||
| 291 | |||
| 292 | elif criteria == 'tag': | ||
| 293 | values = set() | ||
| 294 | for tc in all_tests: | ||
| 295 | # tc can have multiple tags (as tuple) otherwise str will suffice | ||
| 296 | if isinstance(tc.tctag, tuple): | ||
| 297 | values |= { str(tag) for tag in tc.tctag } | ||
| 298 | else: | ||
| 299 | values.add(str(tc.tctag)) | ||
| 300 | |||
| 301 | tags = get_matches(list(values)) | ||
| 302 | |||
| 303 | for tc in all_tests: | ||
| 304 | for tag in tags: | ||
| 305 | if isinstance(tc.tctag, tuple) and tag in tc.tctag: | ||
| 306 | ts.append(tc) | ||
| 307 | elif tag == tc.tctag: | ||
| 308 | ts.append(tc) | ||
| 309 | |||
| 310 | # Remove duplicates from the list | ||
| 311 | ts = list(set(ts)) | ||
| 312 | |||
| 313 | return ts | ||
| 314 | |||
| 315 | |||
| 316 | def list_testsuite_by(criteria, keyword): | ||
| 317 | # Get a testsuite based on 'keyword' | ||
| 318 | # criteria: name, class, module, id, tag | ||
| 319 | # keyword: a list of tests, classes, modules, ids, tags | ||
| 320 | def tc_key(t): | ||
| 321 | if t[0] is None: | ||
| 322 | return (0,) + t[1:] | ||
| 323 | return t | ||
| 324 | # tcid may be None if no ID was assigned, in which case sorted() will throw | ||
| 325 | # a TypeError as Python 3 does not allow comparison (<,<=,>=,>) of | ||
| 326 | # heterogeneous types, handle this by using a custom key generator | ||
| 327 | ts = sorted([ (tc.tcid, tc.tctag, tc.tcname, tc.tcclass, tc.tcmodule) \ | ||
| 328 | for tc in get_testsuite_by(criteria, keyword) ], key=tc_key) | ||
| 329 | print('_' * 150) | ||
| 330 | for t in ts: | ||
| 331 | if isinstance(t[1], (tuple, list)): | ||
| 332 | print('%-4s\t%-20s\t%-60s\t%-25s\t%-20s' % (t[0], ', '.join(t[1]), t[2], t[3], t[4])) | ||
| 333 | else: | ||
| 334 | print('%-4s\t%-20s\t%-60s\t%-25s\t%-20s' % t) | ||
| 335 | print('_' * 150) | ||
| 336 | print('Filtering by:\t %s' % criteria) | ||
| 337 | print('Looking for:\t %s' % ', '.join(str(x) for x in keyword)) | ||
| 338 | print('Total found:\t %s' % len(ts)) | ||
| 339 | |||
| 340 | |||
| 341 | def list_tests(): | ||
| 342 | # List all available oe-selftest tests | ||
| 343 | |||
| 344 | ts = get_all_tests() | ||
| 345 | |||
| 346 | print('%-4s\t%-10s\t%-50s' % ('id', 'tag', 'test')) | ||
| 347 | print('_' * 80) | ||
| 348 | for t in ts: | ||
| 349 | if isinstance(t.tctag, (tuple, list)): | ||
| 350 | print('%-4s\t%-10s\t%-50s' % (t.tcid, ', '.join(t.tctag), '.'.join([t.tcmodule, t.tcclass, t.tcname]))) | ||
| 351 | else: | ||
| 352 | print('%-4s\t%-10s\t%-50s' % (t.tcid, t.tctag, '.'.join([t.tcmodule, t.tcclass, t.tcname]))) | ||
| 353 | print('_' * 80) | ||
| 354 | print('Total found:\t %s' % len(ts)) | ||
| 355 | |||
| 356 | def list_tags(): | ||
| 357 | # Get all tags set to test cases | ||
| 358 | # This is useful when setting tags to test cases | ||
| 359 | # The list of tags should be kept as minimal as possible | ||
| 360 | tags = set() | ||
| 361 | all_tests = get_all_tests() | ||
| 362 | |||
| 363 | for tc in all_tests: | ||
| 364 | if isinstance(tc.tctag, (tuple, list)): | ||
| 365 | tags.update(set(tc.tctag)) | ||
| 366 | else: | ||
| 367 | tags.add(tc.tctag) | ||
| 368 | |||
| 369 | print('Tags:\t%s' % ', '.join(str(x) for x in tags)) | ||
| 370 | |||
| 371 | def coverage_setup(coverage_source, coverage_include, coverage_omit): | ||
| 372 | """ Set up the coverage measurement for the testcases to be run """ | ||
| 373 | import datetime | ||
| 374 | import subprocess | ||
| 375 | global builddir | ||
| 376 | pokydir = os.path.dirname(os.path.dirname(os.path.realpath(__file__))) | ||
| 377 | curcommit= subprocess.check_output(["git", "--git-dir", os.path.join(pokydir, ".git"), "rev-parse", "HEAD"]).decode('utf-8') | ||
| 378 | coveragerc = "%s/.coveragerc" % builddir | ||
| 379 | data_file = "%s/.coverage." % builddir | ||
| 380 | data_file += datetime.datetime.now().strftime('%Y%m%dT%H%M%S') | ||
| 381 | if os.path.isfile(data_file): | ||
| 382 | os.remove(data_file) | ||
| 383 | with open(coveragerc, 'w') as cps: | ||
| 384 | cps.write("# Generated with command '%s'\n" % " ".join(sys.argv)) | ||
| 385 | cps.write("# HEAD commit %s\n" % curcommit.strip()) | ||
| 386 | cps.write("[run]\n") | ||
| 387 | cps.write("data_file = %s\n" % data_file) | ||
| 388 | cps.write("branch = True\n") | ||
| 389 | # Measure just BBLAYERS, scripts and bitbake folders | ||
| 390 | cps.write("source = \n") | ||
| 391 | if coverage_source: | ||
| 392 | for directory in coverage_source: | ||
| 393 | if not os.path.isdir(directory): | ||
| 394 | log.warn("Directory %s is not valid.", directory) | ||
| 395 | cps.write(" %s\n" % directory) | ||
| 396 | else: | ||
| 397 | for layer in get_bb_var('BBLAYERS').split(): | ||
| 398 | cps.write(" %s\n" % layer) | ||
| 399 | cps.write(" %s\n" % os.path.dirname(os.path.realpath(__file__))) | ||
| 400 | cps.write(" %s\n" % os.path.join(os.path.dirname(os.path.dirname(os.path.realpath(__file__))),'bitbake')) | ||
| 401 | |||
| 402 | if coverage_include: | ||
| 403 | cps.write("include = \n") | ||
| 404 | for pattern in coverage_include: | ||
| 405 | cps.write(" %s\n" % pattern) | ||
| 406 | if coverage_omit: | ||
| 407 | cps.write("omit = \n") | ||
| 408 | for pattern in coverage_omit: | ||
| 409 | cps.write(" %s\n" % pattern) | ||
| 410 | |||
| 411 | return coveragerc | ||
| 412 | |||
| 413 | def coverage_report(): | ||
| 414 | """ Loads the coverage data gathered and reports it back """ | ||
| 415 | try: | ||
| 416 | # Coverage4 uses coverage.Coverage | ||
| 417 | from coverage import Coverage | ||
| 418 | except: | ||
| 419 | # Coverage under version 4 uses coverage.coverage | ||
| 420 | from coverage import coverage as Coverage | ||
| 421 | |||
| 422 | import io as StringIO | ||
| 423 | from coverage.misc import CoverageException | ||
| 424 | |||
| 425 | cov_output = StringIO.StringIO() | ||
| 426 | # Creating the coverage data with the setting from the configuration file | ||
| 427 | cov = Coverage(config_file = os.environ.get('COVERAGE_PROCESS_START')) | ||
| 428 | try: | ||
| 429 | # Load data from the data file specified in the configuration | ||
| 430 | cov.load() | ||
| 431 | # Store report data in a StringIO variable | ||
| 432 | cov.report(file = cov_output, show_missing=False) | ||
| 433 | log.info("\n%s" % cov_output.getvalue()) | ||
| 434 | except CoverageException as e: | ||
| 435 | # Show problems with the reporting. Since Coverage4 not finding any data to report raises an exception | ||
| 436 | log.warn("%s" % str(e)) | ||
| 437 | finally: | ||
| 438 | cov_output.close() | ||
| 439 | |||
| 440 | |||
| 441 | def main(): | ||
| 442 | parser = get_args_parser() | ||
| 443 | args = parser.parse_args() | ||
| 444 | |||
| 445 | # Add <layer>/lib to sys.path, so layers can add selftests | ||
| 446 | log.info("Running bitbake -e to get BBPATH") | ||
| 447 | bbpath = get_bb_var('BBPATH').split(':') | ||
| 448 | layer_libdirs = [p for p in (os.path.join(l, 'lib') for l in bbpath) if os.path.exists(p)] | ||
| 449 | sys.path.extend(layer_libdirs) | ||
| 450 | imp.reload(oeqa.selftest) | ||
| 451 | |||
| 452 | # act like bitbake and enforce en_US.UTF-8 locale | ||
| 453 | os.environ["LC_ALL"] = "en_US.UTF-8" | ||
| 454 | |||
| 455 | if args.run_tests_by and len(args.run_tests_by) >= 2: | ||
| 456 | valid_options = ['name', 'class', 'module', 'id', 'tag'] | ||
| 457 | if args.run_tests_by[0] not in valid_options: | ||
| 458 | print('--run-tests-by %s not a valid option. Choose one of <name|class|module|id|tag>.' % args.run_tests_by[0]) | ||
| 459 | return 1 | ||
| 460 | else: | ||
| 461 | criteria = args.run_tests_by[0] | ||
| 462 | keyword = args.run_tests_by[1:] | ||
| 463 | ts = sorted([ tc.fullpath for tc in get_testsuite_by(criteria, keyword) ]) | ||
| 464 | if not ts: | ||
| 465 | return 1 | ||
| 466 | |||
| 467 | if args.list_tests_by and len(args.list_tests_by) >= 2: | ||
| 468 | valid_options = ['name', 'class', 'module', 'id', 'tag'] | ||
| 469 | if args.list_tests_by[0] not in valid_options: | ||
| 470 | print('--list-tests-by %s not a valid option. Choose one of <name|class|module|id|tag>.' % args.list_tests_by[0]) | ||
| 471 | return 1 | ||
| 472 | else: | ||
| 473 | criteria = args.list_tests_by[0] | ||
| 474 | keyword = args.list_tests_by[1:] | ||
| 475 | list_testsuite_by(criteria, keyword) | ||
| 476 | |||
| 477 | if args.list_tests: | ||
| 478 | list_tests() | ||
| 479 | |||
| 480 | if args.list_tags: | ||
| 481 | list_tags() | ||
| 482 | |||
| 483 | if args.list_allclasses: | ||
| 484 | args.list_modules = True | ||
| 485 | |||
| 486 | if args.list_modules: | ||
| 487 | log.info('Listing all available test modules:') | ||
| 488 | testslist = get_tests(include_hidden=True) | ||
| 489 | for test in testslist: | ||
| 490 | module = test.split('oeqa.selftest.')[-1] | ||
| 491 | info = '' | ||
| 492 | if module.startswith('_'): | ||
| 493 | info = ' (hidden)' | ||
| 494 | print(module + info) | ||
| 495 | if args.list_allclasses: | ||
| 496 | try: | ||
| 497 | import importlib | ||
| 498 | modlib = importlib.import_module(test) | ||
| 499 | for v in vars(modlib): | ||
| 500 | t = vars(modlib)[v] | ||
| 501 | if isinstance(t, type(oeSelfTest)) and issubclass(t, oeSelfTest) and t!=oeSelfTest: | ||
| 502 | print(" --", v) | ||
| 503 | for method in dir(t): | ||
| 504 | if method.startswith("test_") and isinstance(vars(t)[method], collections.Callable): | ||
| 505 | print(" -- --", method) | ||
| 506 | |||
| 507 | except (AttributeError, ImportError) as e: | ||
| 508 | print(e) | ||
| 509 | pass | ||
| 510 | |||
| 511 | if args.run_tests or args.run_all_tests or args.run_tests_by: | ||
| 512 | if not preflight_check(): | ||
| 513 | return 1 | ||
| 514 | |||
| 515 | if args.run_tests_by: | ||
| 516 | testslist = ts | ||
| 517 | else: | ||
| 518 | testslist = get_tests(exclusive_modules=(args.run_tests or []), include_hidden=False) | ||
| 519 | |||
| 520 | suite = unittest.TestSuite() | ||
| 521 | loader = unittest.TestLoader() | ||
| 522 | loader.sortTestMethodsUsing = None | ||
| 523 | runner = TestRunner(verbosity=2, | ||
| 524 | resultclass=buildResultClass(args)) | ||
| 525 | # we need to do this here, otherwise just loading the tests | ||
| 526 | # will take 2 minutes (bitbake -e calls) | ||
| 527 | oeSelfTest.testlayer_path = get_test_layer() | ||
| 528 | for test in testslist: | ||
| 529 | log.info("Loading tests from: %s" % test) | ||
| 530 | try: | ||
| 531 | suite.addTests(loader.loadTestsFromName(test)) | ||
| 532 | except AttributeError as e: | ||
| 533 | log.error("Failed to import %s" % test) | ||
| 534 | log.error(e) | ||
| 535 | return 1 | ||
| 536 | |||
| 537 | if args.machine: | ||
| 538 | # Custom machine sets only weak default values (??=) for MACHINE in machine.inc | ||
| 539 | # This let test cases that require a specific MACHINE to be able to override it, using (?= or =) | ||
| 540 | log.info('Custom machine mode enabled. MACHINE set to %s' % args.machine) | ||
| 541 | if args.machine == 'random': | ||
| 542 | os.environ['CUSTOMMACHINE'] = 'random' | ||
| 543 | result = runner.run(suite) | ||
| 544 | else: # all | ||
| 545 | machines = get_available_machines() | ||
| 546 | for m in machines: | ||
| 547 | log.info('Run tests with custom MACHINE set to: %s' % m) | ||
| 548 | os.environ['CUSTOMMACHINE'] = m | ||
| 549 | result = runner.run(suite) | ||
| 550 | else: | ||
| 551 | result = runner.run(suite) | ||
| 552 | |||
| 553 | log.info("Finished") | ||
| 554 | |||
| 555 | if args.repository: | ||
| 556 | import git | ||
| 557 | # Commit tests results to repository | ||
| 558 | metadata = metadata_from_bb() | ||
| 559 | git_dir = os.path.join(os.getcwd(), 'selftest') | ||
| 560 | if not os.path.isdir(git_dir): | ||
| 561 | os.mkdir(git_dir) | ||
| 562 | |||
| 563 | log.debug('Checking for git repository in %s' % git_dir) | ||
| 564 | try: | ||
| 565 | repo = git.Repo(git_dir) | ||
| 566 | except git.exc.InvalidGitRepositoryError: | ||
| 567 | log.debug("Couldn't find git repository %s; " | ||
| 568 | "cloning from %s" % (git_dir, args.repository)) | ||
| 569 | repo = git.Repo.clone_from(args.repository, git_dir) | ||
| 570 | |||
| 571 | r_branches = repo.git.branch(r=True) | ||
| 572 | r_branches = set(r_branches.replace('origin/', '').split()) | ||
| 573 | l_branches = {str(branch) for branch in repo.branches} | ||
| 574 | branch = '%s/%s/%s' % (metadata['hostname'], | ||
| 575 | metadata['layers']['meta'].get('branch', '(nogit)'), | ||
| 576 | metadata['config']['MACHINE']) | ||
| 577 | |||
| 578 | if branch in l_branches: | ||
| 579 | log.debug('Found branch in local repository, checking out') | ||
| 580 | repo.git.checkout(branch) | ||
| 581 | elif branch in r_branches: | ||
| 582 | log.debug('Found branch in remote repository, checking' | ||
| 583 | ' out and pulling') | ||
| 584 | repo.git.checkout(branch) | ||
| 585 | repo.git.pull() | ||
| 586 | else: | ||
| 587 | log.debug('New branch %s' % branch) | ||
| 588 | repo.git.checkout('master') | ||
| 589 | repo.git.checkout(b=branch) | ||
| 590 | |||
| 591 | cleanResultsDir(repo) | ||
| 592 | xml_dir = os.path.join(os.getcwd(), log_prefix) | ||
| 593 | copyResultFiles(xml_dir, git_dir, repo) | ||
| 594 | metadata_file = os.path.join(git_dir, 'metadata.xml') | ||
| 595 | write_metadata_file(metadata_file, metadata) | ||
| 596 | repo.index.add([metadata_file]) | ||
| 597 | repo.index.write() | ||
| 598 | |||
| 599 | # Get information for commit message | ||
| 600 | layer_info = '' | ||
| 601 | for layer, values in metadata['layers'].items(): | ||
| 602 | layer_info = '%s%-17s = %s:%s\n' % (layer_info, layer, | ||
| 603 | values.get('branch', '(nogit)'), values.get('commit', '0'*40)) | ||
| 604 | msg = 'Selftest for build %s of %s for machine %s on %s\n\n%s' % ( | ||
| 605 | log_prefix[12:], metadata['distro']['pretty_name'], | ||
| 606 | metadata['config']['MACHINE'], metadata['hostname'], layer_info) | ||
| 607 | |||
| 608 | log.debug('Commiting results to local repository') | ||
| 609 | repo.index.commit(msg) | ||
| 610 | if not repo.is_dirty(): | ||
| 611 | try: | ||
| 612 | if branch in r_branches: | ||
| 613 | log.debug('Pushing changes to remote repository') | ||
| 614 | repo.git.push() | ||
| 615 | else: | ||
| 616 | log.debug('Pushing changes to remote repository ' | ||
| 617 | 'creating new branch') | ||
| 618 | repo.git.push('-u', 'origin', branch) | ||
| 619 | except GitCommandError: | ||
| 620 | log.error('Falied to push to remote repository') | ||
| 621 | return 1 | ||
| 622 | else: | ||
| 623 | log.error('Local repository is dirty, not pushing commits') | ||
| 624 | |||
| 625 | if result.wasSuccessful(): | ||
| 626 | return 0 | ||
| 627 | else: | ||
| 628 | return 1 | ||
| 629 | |||
| 630 | def buildResultClass(args): | ||
| 631 | """Build a Result Class to use in the testcase execution""" | ||
| 632 | import site | ||
| 633 | |||
| 634 | class StampedResult(TestResult): | ||
| 635 | """ | ||
| 636 | Custom TestResult that prints the time when a test starts. As oe-selftest | ||
| 637 | can take a long time (ie a few hours) to run, timestamps help us understand | ||
| 638 | what tests are taking a long time to execute. | ||
| 639 | If coverage is required, this class executes the coverage setup and reporting. | ||
| 640 | """ | ||
| 641 | def startTest(self, test): | ||
| 642 | import time | ||
| 643 | self.stream.write(time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + " - ") | ||
| 644 | super(StampedResult, self).startTest(test) | ||
| 645 | |||
| 646 | def startTestRun(self): | ||
| 647 | """ Setup coverage before running any testcase """ | ||
| 648 | |||
| 649 | # variable holding the coverage configuration file allowing subprocess to be measured | ||
| 650 | self.coveragepth = None | ||
| 651 | |||
| 652 | # indicates the system if coverage is currently installed | ||
| 653 | self.coverage_installed = True | ||
| 654 | |||
| 655 | if args.coverage or args.coverage_source or args.coverage_include or args.coverage_omit: | ||
| 656 | try: | ||
| 657 | # check if user can do coverage | ||
| 658 | import coverage | ||
| 659 | except: | ||
| 660 | log.warn("python coverage is not installed. More info on https://pypi.python.org/pypi/coverage") | ||
| 661 | self.coverage_installed = False | ||
| 662 | |||
| 663 | if self.coverage_installed: | ||
| 664 | log.info("Coverage is enabled") | ||
| 665 | |||
| 666 | major_version = int(coverage.version.__version__[0]) | ||
| 667 | if major_version < 4: | ||
| 668 | log.error("python coverage %s installed. Require version 4 or greater." % coverage.version.__version__) | ||
| 669 | self.stop() | ||
| 670 | # In case the user has not set the variable COVERAGE_PROCESS_START, | ||
| 671 | # create a default one and export it. The COVERAGE_PROCESS_START | ||
| 672 | # value indicates where the coverage configuration file resides | ||
| 673 | # More info on https://pypi.python.org/pypi/coverage | ||
| 674 | if not os.environ.get('COVERAGE_PROCESS_START'): | ||
| 675 | os.environ['COVERAGE_PROCESS_START'] = coverage_setup(args.coverage_source, args.coverage_include, args.coverage_omit) | ||
| 676 | |||
| 677 | # Use default site.USER_SITE and write corresponding config file | ||
| 678 | site.ENABLE_USER_SITE = True | ||
| 679 | if not os.path.exists(site.USER_SITE): | ||
| 680 | os.makedirs(site.USER_SITE) | ||
| 681 | self.coveragepth = os.path.join(site.USER_SITE, "coverage.pth") | ||
| 682 | with open(self.coveragepth, 'w') as cps: | ||
| 683 | cps.write('import sys,site; sys.path.extend(site.getsitepackages()); import coverage; coverage.process_startup();') | ||
| 684 | |||
| 685 | def stopTestRun(self): | ||
| 686 | """ Report coverage data after the testcases are run """ | ||
| 687 | |||
| 688 | if args.coverage or args.coverage_source or args.coverage_include or args.coverage_omit: | ||
| 689 | if self.coverage_installed: | ||
| 690 | with open(os.environ['COVERAGE_PROCESS_START']) as ccf: | ||
| 691 | log.info("Coverage configuration file (%s)" % os.environ.get('COVERAGE_PROCESS_START')) | ||
| 692 | log.info("===========================") | ||
| 693 | log.info("\n%s" % "".join(ccf.readlines())) | ||
| 694 | |||
| 695 | log.info("Coverage Report") | ||
| 696 | log.info("===============") | ||
| 697 | try: | ||
| 698 | coverage_report() | ||
| 699 | finally: | ||
| 700 | # remove the pth file | ||
| 701 | try: | ||
| 702 | os.remove(self.coveragepth) | ||
| 703 | except OSError: | ||
| 704 | log.warn("Expected temporal file from coverage is missing, ignoring removal.") | ||
| 705 | |||
| 706 | return StampedResult | ||
| 707 | |||
| 708 | def cleanResultsDir(repo): | ||
| 709 | """ Remove result files from directory """ | ||
| 710 | |||
| 711 | xml_files = [] | ||
| 712 | directory = repo.working_tree_dir | ||
| 713 | for f in os.listdir(directory): | ||
| 714 | path = os.path.join(directory, f) | ||
| 715 | if os.path.isfile(path) and path.endswith('.xml'): | ||
| 716 | xml_files.append(f) | ||
| 717 | repo.index.remove(xml_files, working_tree=True) | ||
| 718 | |||
| 719 | def copyResultFiles(src, dst, repo): | ||
| 720 | """ Copy result files from src to dst removing the time stamp. """ | ||
| 721 | |||
| 722 | import shutil | ||
| 723 | |||
| 724 | re_time = re.compile("-[0-9]+") | ||
| 725 | file_list = [] | ||
| 726 | |||
| 727 | for root, subdirs, files in os.walk(src): | ||
| 728 | tmp_dir = root.replace(src, '').lstrip('/') | ||
| 729 | for s in subdirs: | ||
| 730 | os.mkdir(os.path.join(dst, tmp_dir, s)) | ||
| 731 | for f in files: | ||
| 732 | file_name = os.path.join(dst, tmp_dir, re_time.sub("", f)) | ||
| 733 | shutil.copy2(os.path.join(root, f), file_name) | ||
| 734 | file_list.append(file_name) | ||
| 735 | repo.index.add(file_list) | ||
| 736 | 65 | ||
| 737 | class TestRunner(_TestRunner): | 66 | return ret |
| 738 | """Test runner class aware of exporting tests.""" | ||
| 739 | def __init__(self, *args, **kwargs): | ||
| 740 | try: | ||
| 741 | exportdir = os.path.join(os.getcwd(), log_prefix) | ||
| 742 | kwargsx = dict(**kwargs) | ||
| 743 | # argument specific to XMLTestRunner, if adding a new runner then | ||
| 744 | # also add logic to use other runner's args. | ||
| 745 | kwargsx['output'] = exportdir | ||
| 746 | kwargsx['descriptions'] = False | ||
| 747 | # done for the case where telling the runner where to export | ||
| 748 | super(TestRunner, self).__init__(*args, **kwargsx) | ||
| 749 | except TypeError: | ||
| 750 | log.info("test runner init'ed like unittest") | ||
| 751 | super(TestRunner, self).__init__(*args, **kwargs) | ||
| 752 | 67 | ||
| 753 | if __name__ == "__main__": | 68 | if __name__ == '__main__': |
| 754 | try: | 69 | try: |
| 755 | ret = main() | 70 | ret = main() |
| 756 | except Exception: | 71 | except Exception: |
