diff options
author | Markus Lehtonen <markus.lehtonen@linux.intel.com> | 2016-06-29 19:28:31 +0300 |
---|---|---|
committer | Richard Purdie <richard.purdie@linuxfoundation.org> | 2016-08-17 10:35:44 +0100 |
commit | 09b9a4aeee454148a97ebe2a872b8c3f52adcb9a (patch) | |
tree | bdb35ec6cad33a1b05ca1405dd424c178b5ea511 | |
parent | 3acf648f58b892ddee95c50cf57b7c4b4d10d74c (diff) | |
download | poky-09b9a4aeee454148a97ebe2a872b8c3f52adcb9a.tar.gz |
oeqa.buildperf: add BuildPerfTestResult class
The new class is derived from unittest.TextTestResult class. It is
actually implemented by modifying the old BuildPerfTestRunner class
which, in turn, is replaced by a totally new simple implementation
derived from unittest.TestRunner.
(From OE-Core rev: 89eb37ef1ef8d5deb87fd55c9ea7b2cfa2681b07)
Signed-off-by: Markus Lehtonen <markus.lehtonen@linux.intel.com>
Signed-off-by: Ross Burton <ross.burton@intel.com>
Signed-off-by: Richard Purdie <richard.purdie@linuxfoundation.org>
-rw-r--r-- | meta/lib/oeqa/buildperf/__init__.py | 4 | ||||
-rw-r--r-- | meta/lib/oeqa/buildperf/base.py | 150 | ||||
-rwxr-xr-x | scripts/oe-build-perf-test | 10 |
3 files changed, 90 insertions, 74 deletions
diff --git a/meta/lib/oeqa/buildperf/__init__.py b/meta/lib/oeqa/buildperf/__init__.py index 7e51726afb..85abf3a25e 100644 --- a/meta/lib/oeqa/buildperf/__init__.py +++ b/meta/lib/oeqa/buildperf/__init__.py | |||
@@ -10,9 +10,9 @@ | |||
10 | # more details. | 10 | # more details. |
11 | # | 11 | # |
12 | """Build performance tests""" | 12 | """Build performance tests""" |
13 | from .base import (perf_test_case, | 13 | from .base import (BuildPerfTestCase, |
14 | BuildPerfTestCase, | ||
15 | BuildPerfTestLoader, | 14 | BuildPerfTestLoader, |
15 | BuildPerfTestResult, | ||
16 | BuildPerfTestRunner, | 16 | BuildPerfTestRunner, |
17 | KernelDropCaches) | 17 | KernelDropCaches) |
18 | from .test_basic import * | 18 | from .test_basic import * |
diff --git a/meta/lib/oeqa/buildperf/base.py b/meta/lib/oeqa/buildperf/base.py index 1ee546dd85..d592bd26b9 100644 --- a/meta/lib/oeqa/buildperf/base.py +++ b/meta/lib/oeqa/buildperf/base.py | |||
@@ -76,25 +76,26 @@ def time_cmd(cmd, **kwargs): | |||
76 | return ret, timedata | 76 | return ret, timedata |
77 | 77 | ||
78 | 78 | ||
79 | class BuildPerfTestRunner(object): | 79 | class BuildPerfTestResult(unittest.TextTestResult): |
80 | """Runner class for executing the individual tests""" | 80 | """Runner class for executing the individual tests""" |
81 | # List of test cases to run | 81 | # List of test cases to run |
82 | test_run_queue = [] | 82 | test_run_queue = [] |
83 | 83 | ||
84 | def __init__(self, out_dir): | 84 | def __init__(self, out_dir, *args, **kwargs): |
85 | self.results = {} | 85 | super(BuildPerfTestResult, self).__init__(*args, **kwargs) |
86 | self.out_dir = os.path.abspath(out_dir) | ||
87 | if not os.path.exists(self.out_dir): | ||
88 | os.makedirs(self.out_dir) | ||
89 | 86 | ||
87 | self.out_dir = out_dir | ||
90 | # Get Git parameters | 88 | # Get Git parameters |
91 | try: | 89 | try: |
92 | self.repo = GitRepo('.') | 90 | self.repo = GitRepo('.') |
93 | except GitError: | 91 | except GitError: |
94 | self.repo = None | 92 | self.repo = None |
95 | self.git_rev, self.git_branch = self.get_git_revision() | 93 | self.git_revision, self.git_branch = self.get_git_revision() |
94 | self.hostname = socket.gethostname() | ||
95 | self.start_time = self.elapsed_time = None | ||
96 | self.successes = [] | ||
96 | log.info("Using Git branch:revision %s:%s", self.git_branch, | 97 | log.info("Using Git branch:revision %s:%s", self.git_branch, |
97 | self.git_rev) | 98 | self.git_revision) |
98 | 99 | ||
99 | def get_git_revision(self): | 100 | def get_git_revision(self): |
100 | """Get git branch and revision under testing""" | 101 | """Get git branch and revision under testing""" |
@@ -117,79 +118,71 @@ class BuildPerfTestRunner(object): | |||
117 | branch = None | 118 | branch = None |
118 | return str(rev), str(branch) | 119 | return str(rev), str(branch) |
119 | 120 | ||
120 | def run_tests(self): | 121 | def addSuccess(self, test): |
121 | """Method that actually runs the tests""" | 122 | """Record results from successful tests""" |
122 | self.results['schema_version'] = 1 | 123 | super(BuildPerfTestResult, self).addSuccess(test) |
123 | self.results['git_revision'] = self.git_rev | 124 | self.successes.append((test, None)) |
124 | self.results['git_branch'] = self.git_branch | 125 | |
125 | self.results['tester_host'] = socket.gethostname() | 126 | def startTest(self, test): |
126 | start_time = datetime.utcnow() | 127 | """Pre-test hook""" |
127 | self.results['start_time'] = start_time | 128 | test.out_dir = self.out_dir |
128 | self.results['tests'] = {} | 129 | log.info("Executing test %s: %s", test.name, test.shortDescription()) |
129 | 130 | self.stream.write(datetime.now().strftime("[%Y-%m-%d %H:%M:%S] ")) | |
130 | self.archive_build_conf() | 131 | super(BuildPerfTestResult, self).startTest(test) |
131 | for test_class in self.test_run_queue: | 132 | |
132 | log.info("Executing test %s: %s", test_class.name, | 133 | def startTestRun(self): |
133 | test_class.description) | 134 | """Pre-run hook""" |
134 | 135 | self.start_time = datetime.utcnow() | |
135 | test = test_class(self.out_dir) | 136 | |
136 | try: | 137 | def stopTestRun(self): |
137 | test.run() | 138 | """Pre-run hook""" |
138 | except Exception: | 139 | self.elapsed_time = datetime.utcnow() - self.start_time |
139 | # Catch all exceptions. This way e.g buggy tests won't scrap | 140 | |
140 | # the whole test run | 141 | def all_results(self): |
141 | sep = '-' * 5 + ' TRACEBACK ' + '-' * 60 + '\n' | 142 | result_map = {'SUCCESS': self.successes, |
142 | tb_msg = sep + traceback.format_exc() + sep | 143 | 'FAIL': self.failures, |
143 | log.error("Test execution failed with:\n" + tb_msg) | 144 | 'ERROR': self.errors, |
144 | self.results['tests'][test.name] = test.results | 145 | 'EXP_FAIL': self.expectedFailures, |
145 | 146 | 'UNEXP_SUCCESS': self.unexpectedSuccesses} | |
146 | self.results['elapsed_time'] = datetime.utcnow() - start_time | 147 | for status, tests in result_map.items(): |
147 | return 0 | 148 | for test in tests: |
148 | 149 | yield (status, test) | |
149 | def archive_build_conf(self): | 150 | |
150 | """Archive build/conf to test results""" | ||
151 | src_dir = os.path.join(os.environ['BUILDDIR'], 'conf') | ||
152 | tgt_dir = os.path.join(self.out_dir, 'build', 'conf') | ||
153 | os.makedirs(os.path.dirname(tgt_dir)) | ||
154 | shutil.copytree(src_dir, tgt_dir) | ||
155 | 151 | ||
156 | def update_globalres_file(self, filename): | 152 | def update_globalres_file(self, filename): |
157 | """Write results to globalres csv file""" | 153 | """Write results to globalres csv file""" |
154 | # Map test names to time and size columns in globalres | ||
155 | # The tuples represent index and length of times and sizes | ||
156 | # respectively | ||
157 | gr_map = {'test1': ((0, 1), (8, 1)), | ||
158 | 'test12': ((1, 1), (None, None)), | ||
159 | 'test13': ((2, 1), (9, 1)), | ||
160 | 'test2': ((3, 1), (None, None)), | ||
161 | 'test3': ((4, 3), (None, None)), | ||
162 | 'test4': ((7, 1), (10, 2))} | ||
163 | |||
158 | if self.repo: | 164 | if self.repo: |
159 | git_tag_rev = self.repo.run_cmd(['describe', self.git_rev]) | 165 | git_tag_rev = self.repo.run_cmd(['describe', self.git_revision]) |
160 | else: | 166 | else: |
161 | git_tag_rev = self.git_rev | 167 | git_tag_rev = self.git_revision |
162 | times = [] | 168 | |
163 | sizes = [] | 169 | values = ['0'] * 12 |
164 | for test in self.results['tests'].values(): | 170 | for status, test in self.all_results(): |
165 | for measurement in test['measurements']: | 171 | if status not in ['SUCCESS', 'FAILURE', 'EXP_SUCCESS']: |
166 | res_type = measurement['type'] | 172 | continue |
167 | values = measurement['values'] | 173 | (t_ind, t_len), (s_ind, s_len) = gr_map[test.name] |
168 | if res_type == BuildPerfTest.SYSRES: | 174 | if t_ind is not None: |
169 | e_sec = values['elapsed_time'].total_seconds() | 175 | values[t_ind:t_ind + t_len] = test.times |
170 | times.append('{:d}:{:02d}:{:.2f}'.format( | 176 | if s_ind is not None: |
171 | int(e_sec / 3600), | 177 | values[s_ind:s_ind + s_len] = test.sizes |
172 | int((e_sec % 3600) / 60), | ||
173 | e_sec % 60)) | ||
174 | elif res_type == BuildPerfTest.DISKUSAGE: | ||
175 | sizes.append(str(values['size'])) | ||
176 | else: | ||
177 | log.warning("Unable to handle '%s' values in " | ||
178 | "globalres.log", res_type) | ||
179 | 178 | ||
180 | log.debug("Writing globalres log to %s", filename) | 179 | log.debug("Writing globalres log to %s", filename) |
181 | with open(filename, 'a') as fobj: | 180 | with open(filename, 'a') as fobj: |
182 | fobj.write('{},{}:{},{},'.format(self.results['tester_host'], | 181 | fobj.write('{},{}:{},{},'.format(self.hostname, |
183 | self.results['git_branch'], | 182 | self.git_branch, |
184 | self.results['git_revision'], | 183 | self.git_revision, |
185 | git_tag_rev)) | 184 | git_tag_rev)) |
186 | fobj.write(','.join(times + sizes) + '\n') | 185 | fobj.write(','.join(values) + '\n') |
187 | |||
188 | |||
189 | def perf_test_case(obj): | ||
190 | """Decorator for adding test classes""" | ||
191 | BuildPerfTestRunner.test_run_queue.append(obj) | ||
192 | return obj | ||
193 | 186 | ||
194 | 187 | ||
195 | class BuildPerfTestCase(unittest.TestCase): | 188 | class BuildPerfTestCase(unittest.TestCase): |
@@ -330,3 +323,16 @@ class BuildPerfTestCase(unittest.TestCase): | |||
330 | class BuildPerfTestLoader(unittest.TestLoader): | 323 | class BuildPerfTestLoader(unittest.TestLoader): |
331 | """Test loader for build performance tests""" | 324 | """Test loader for build performance tests""" |
332 | sortTestMethodsUsing = None | 325 | sortTestMethodsUsing = None |
326 | |||
327 | |||
328 | class BuildPerfTestRunner(unittest.TextTestRunner): | ||
329 | """Test loader for build performance tests""" | ||
330 | sortTestMethodsUsing = None | ||
331 | |||
332 | def __init__(self, out_dir, *args, **kwargs): | ||
333 | super(BuildPerfTestRunner, self).__init__(*args, **kwargs) | ||
334 | self.out_dir = out_dir | ||
335 | |||
336 | def _makeResult(self): | ||
337 | return BuildPerfTestResult(self.out_dir, self.stream, self.descriptions, | ||
338 | self.verbosity) | ||
diff --git a/scripts/oe-build-perf-test b/scripts/oe-build-perf-test index 996996bc62..8142b0332b 100755 --- a/scripts/oe-build-perf-test +++ b/scripts/oe-build-perf-test | |||
@@ -19,6 +19,7 @@ import errno | |||
19 | import fcntl | 19 | import fcntl |
20 | import logging | 20 | import logging |
21 | import os | 21 | import os |
22 | import shutil | ||
22 | import sys | 23 | import sys |
23 | from datetime import datetime | 24 | from datetime import datetime |
24 | 25 | ||
@@ -78,6 +79,14 @@ def setup_file_logging(log_file): | |||
78 | log.addHandler(handler) | 79 | log.addHandler(handler) |
79 | 80 | ||
80 | 81 | ||
82 | def archive_build_conf(out_dir): | ||
83 | """Archive build/conf to test results""" | ||
84 | src_dir = os.path.join(os.environ['BUILDDIR'], 'conf') | ||
85 | tgt_dir = os.path.join(out_dir, 'build', 'conf') | ||
86 | os.makedirs(os.path.dirname(tgt_dir)) | ||
87 | shutil.copytree(src_dir, tgt_dir) | ||
88 | |||
89 | |||
81 | def parse_args(argv): | 90 | def parse_args(argv): |
82 | """Parse command line arguments""" | 91 | """Parse command line arguments""" |
83 | parser = argparse.ArgumentParser( | 92 | parser = argparse.ArgumentParser( |
@@ -120,6 +129,7 @@ def main(argv=None): | |||
120 | 129 | ||
121 | # Run actual tests | 130 | # Run actual tests |
122 | runner = BuildPerfTestRunner(out_dir) | 131 | runner = BuildPerfTestRunner(out_dir) |
132 | archive_build_conf(out_dir) | ||
123 | ret = runner.run_tests() | 133 | ret = runner.run_tests() |
124 | if not ret: | 134 | if not ret: |
125 | if args.globalres_file: | 135 | if args.globalres_file: |