diff options
Diffstat (limited to 'scripts/oe-build-perf-test')
| -rwxr-xr-x | scripts/oe-build-perf-test | 216 |
1 files changed, 0 insertions, 216 deletions
diff --git a/scripts/oe-build-perf-test b/scripts/oe-build-perf-test deleted file mode 100755 index 00e00b4ce9..0000000000 --- a/scripts/oe-build-perf-test +++ /dev/null | |||
| @@ -1,216 +0,0 @@ | |||
| 1 | #!/usr/bin/env python3 | ||
| 2 | # | ||
| 3 | # Build performance test script | ||
| 4 | # | ||
| 5 | # Copyright (c) 2016, Intel Corporation. | ||
| 6 | # | ||
| 7 | # SPDX-License-Identifier: GPL-2.0-only | ||
| 8 | # | ||
| 9 | |||
| 10 | import argparse | ||
| 11 | import errno | ||
| 12 | import fcntl | ||
| 13 | import json | ||
| 14 | import logging | ||
| 15 | import os | ||
| 16 | import re | ||
| 17 | import shutil | ||
| 18 | import sys | ||
| 19 | from datetime import datetime | ||
| 20 | |||
| 21 | sys.path.insert(0, os.path.dirname(os.path.realpath(__file__)) + '/lib') | ||
| 22 | import scriptpath | ||
| 23 | scriptpath.add_oe_lib_path() | ||
| 24 | scriptpath.add_bitbake_lib_path() | ||
| 25 | import oeqa.buildperf | ||
| 26 | from oeqa.buildperf import (BuildPerfTestLoader, BuildPerfTestResult, | ||
| 27 | BuildPerfTestRunner, KernelDropCaches) | ||
| 28 | from oeqa.utils.commands import runCmd | ||
| 29 | from oeqa.utils.metadata import metadata_from_bb, write_metadata_file | ||
| 30 | |||
| 31 | |||
| 32 | # Set-up logging | ||
| 33 | LOG_FORMAT = '[%(asctime)s] %(levelname)s: %(message)s' | ||
| 34 | logging.basicConfig(level=logging.INFO, format=LOG_FORMAT, | ||
| 35 | datefmt='%Y-%m-%d %H:%M:%S') | ||
| 36 | log = logging.getLogger() | ||
| 37 | |||
| 38 | |||
| 39 | def acquire_lock(lock_f): | ||
| 40 | """Acquire flock on file""" | ||
| 41 | log.debug("Acquiring lock %s", os.path.abspath(lock_f.name)) | ||
| 42 | try: | ||
| 43 | fcntl.flock(lock_f, fcntl.LOCK_EX | fcntl.LOCK_NB) | ||
| 44 | except IOError as err: | ||
| 45 | if err.errno == errno.EAGAIN: | ||
| 46 | return False | ||
| 47 | raise | ||
| 48 | log.debug("Lock acquired") | ||
| 49 | return True | ||
| 50 | |||
| 51 | |||
| 52 | def pre_run_sanity_check(): | ||
| 53 | """Sanity check of build environment""" | ||
| 54 | build_dir = os.environ.get("BUILDDIR") | ||
| 55 | if not build_dir: | ||
| 56 | log.error("BUILDDIR not set. Please run the build environmnent setup " | ||
| 57 | "script.") | ||
| 58 | return False | ||
| 59 | if os.getcwd() != build_dir: | ||
| 60 | log.error("Please run this script under BUILDDIR (%s)", build_dir) | ||
| 61 | return False | ||
| 62 | |||
| 63 | ret = runCmd('which bitbake', ignore_status=True) | ||
| 64 | if ret.status: | ||
| 65 | log.error("bitbake command not found") | ||
| 66 | return False | ||
| 67 | return True | ||
| 68 | |||
| 69 | def setup_file_logging(log_file): | ||
| 70 | """Setup loggin to file""" | ||
| 71 | log_dir = os.path.dirname(log_file) | ||
| 72 | if not os.path.exists(log_dir): | ||
| 73 | os.makedirs(log_dir) | ||
| 74 | formatter = logging.Formatter(LOG_FORMAT) | ||
| 75 | handler = logging.FileHandler(log_file) | ||
| 76 | handler.setFormatter(formatter) | ||
| 77 | log.addHandler(handler) | ||
| 78 | |||
| 79 | |||
| 80 | def archive_build_conf(out_dir): | ||
| 81 | """Archive build/conf to test results""" | ||
| 82 | src_dir = os.path.join(os.environ['BUILDDIR'], 'conf') | ||
| 83 | tgt_dir = os.path.join(out_dir, 'build', 'conf') | ||
| 84 | os.makedirs(os.path.dirname(tgt_dir)) | ||
| 85 | shutil.copytree(src_dir, tgt_dir) | ||
| 86 | |||
| 87 | |||
| 88 | def update_globalres_file(result_obj, filename, metadata): | ||
| 89 | """Write results to globalres csv file""" | ||
| 90 | # Map test names to time and size columns in globalres | ||
| 91 | # The tuples represent index and length of times and sizes | ||
| 92 | # respectively | ||
| 93 | gr_map = {'test1': ((0, 1), (8, 1)), | ||
| 94 | 'test12': ((1, 1), (None, None)), | ||
| 95 | 'test13': ((2, 1), (9, 1)), | ||
| 96 | 'test2': ((3, 1), (None, None)), | ||
| 97 | 'test3': ((4, 3), (None, None)), | ||
| 98 | 'test4': ((7, 1), (10, 2))} | ||
| 99 | |||
| 100 | values = ['0'] * 12 | ||
| 101 | for status, test, _ in result_obj.all_results(): | ||
| 102 | if status in ['ERROR', 'SKIPPED']: | ||
| 103 | continue | ||
| 104 | (t_ind, t_len), (s_ind, s_len) = gr_map[test.name] | ||
| 105 | if t_ind is not None: | ||
| 106 | values[t_ind:t_ind + t_len] = test.times | ||
| 107 | if s_ind is not None: | ||
| 108 | values[s_ind:s_ind + s_len] = test.sizes | ||
| 109 | |||
| 110 | log.debug("Writing globalres log to %s", filename) | ||
| 111 | rev_info = metadata['layers']['meta'] | ||
| 112 | with open(filename, 'a') as fobj: | ||
| 113 | fobj.write('{},{}:{},{},'.format(metadata['hostname'], | ||
| 114 | rev_info['branch'], | ||
| 115 | rev_info['commit'], | ||
| 116 | rev_info['commit'])) | ||
| 117 | fobj.write(','.join(values) + '\n') | ||
| 118 | |||
| 119 | |||
| 120 | def parse_args(argv): | ||
| 121 | """Parse command line arguments""" | ||
| 122 | parser = argparse.ArgumentParser( | ||
| 123 | formatter_class=argparse.ArgumentDefaultsHelpFormatter) | ||
| 124 | |||
| 125 | parser.add_argument('-D', '--debug', action='store_true', | ||
| 126 | help='Enable debug level logging') | ||
| 127 | parser.add_argument('--globalres-file', | ||
| 128 | type=os.path.abspath, | ||
| 129 | help="Append results to 'globalres' csv file") | ||
| 130 | parser.add_argument('--lock-file', default='./oe-build-perf.lock', | ||
| 131 | metavar='FILENAME', type=os.path.abspath, | ||
| 132 | help="Lock file to use") | ||
| 133 | parser.add_argument('-o', '--out-dir', default='results-{date}', | ||
| 134 | type=os.path.abspath, | ||
| 135 | help="Output directory for test results") | ||
| 136 | parser.add_argument('-x', '--xml', action='store_true', | ||
| 137 | help='Enable JUnit xml output') | ||
| 138 | parser.add_argument('--log-file', | ||
| 139 | default='{out_dir}/oe-build-perf-test.log', | ||
| 140 | help="Log file of this script") | ||
| 141 | parser.add_argument('--run-tests', nargs='+', metavar='TEST', | ||
| 142 | help="List of tests to run") | ||
| 143 | |||
| 144 | return parser.parse_args(argv) | ||
| 145 | |||
| 146 | |||
| 147 | def main(argv=None): | ||
| 148 | """Script entry point""" | ||
| 149 | args = parse_args(argv) | ||
| 150 | |||
| 151 | # Set-up log file | ||
| 152 | out_dir = args.out_dir.format(date=datetime.now().strftime('%Y%m%d%H%M%S')) | ||
| 153 | setup_file_logging(args.log_file.format(out_dir=out_dir)) | ||
| 154 | |||
| 155 | if args.debug: | ||
| 156 | log.setLevel(logging.DEBUG) | ||
| 157 | |||
| 158 | lock_f = open(args.lock_file, 'w') | ||
| 159 | if not acquire_lock(lock_f): | ||
| 160 | log.error("Another instance of this script is running, exiting...") | ||
| 161 | return 1 | ||
| 162 | |||
| 163 | if not pre_run_sanity_check(): | ||
| 164 | return 1 | ||
| 165 | |||
| 166 | # Check our capability to drop caches and ask pass if needed | ||
| 167 | KernelDropCaches.check() | ||
| 168 | |||
| 169 | # Load build perf tests | ||
| 170 | loader = BuildPerfTestLoader() | ||
| 171 | if args.run_tests: | ||
| 172 | suite = loader.loadTestsFromNames(args.run_tests, oeqa.buildperf) | ||
| 173 | else: | ||
| 174 | suite = loader.loadTestsFromModule(oeqa.buildperf) | ||
| 175 | |||
| 176 | # Save test metadata | ||
| 177 | metadata = metadata_from_bb() | ||
| 178 | log.info("Testing Git revision branch:commit %s:%s (%s)", | ||
| 179 | metadata['layers']['meta']['branch'], | ||
| 180 | metadata['layers']['meta']['commit'], | ||
| 181 | metadata['layers']['meta']['commit_count']) | ||
| 182 | if args.xml: | ||
| 183 | write_metadata_file(os.path.join(out_dir, 'metadata.xml'), metadata) | ||
| 184 | else: | ||
| 185 | with open(os.path.join(out_dir, 'metadata.json'), 'w') as fobj: | ||
| 186 | json.dump(metadata, fobj, indent=2) | ||
| 187 | archive_build_conf(out_dir) | ||
| 188 | |||
| 189 | runner = BuildPerfTestRunner(out_dir, verbosity=2) | ||
| 190 | |||
| 191 | # Suppress logger output to stderr so that the output from unittest | ||
| 192 | # is not mixed with occasional logger output | ||
| 193 | log.handlers[0].setLevel(logging.CRITICAL) | ||
| 194 | |||
| 195 | # Run actual tests | ||
| 196 | result = runner.run(suite) | ||
| 197 | |||
| 198 | # Restore logger output to stderr | ||
| 199 | log.handlers[0].setLevel(log.level) | ||
| 200 | |||
| 201 | if args.xml: | ||
| 202 | result.write_results_xml() | ||
| 203 | else: | ||
| 204 | result.write_results_json() | ||
| 205 | result.write_buildstats_json() | ||
| 206 | if args.globalres_file: | ||
| 207 | update_globalres_file(result, args.globalres_file, metadata) | ||
| 208 | if result.wasSuccessful(): | ||
| 209 | return 0 | ||
| 210 | |||
| 211 | return 2 | ||
| 212 | |||
| 213 | |||
| 214 | if __name__ == '__main__': | ||
| 215 | sys.exit(main()) | ||
| 216 | |||
