diff options
author | Markus Lehtonen <markus.lehtonen@linux.intel.com> | 2016-06-27 15:05:10 +0300 |
---|---|---|
committer | Richard Purdie <richard.purdie@linuxfoundation.org> | 2016-08-17 10:35:43 +0100 |
commit | 3f519df38e8e533886ad63879db16df27e21369b (patch) | |
tree | f3d30ad125cde73dcc87f8d6b292acd77de43013 /meta | |
parent | daee7558a563fad5034cb7aeb7d67e7c1cbb17e8 (diff) | |
download | poky-3f519df38e8e533886ad63879db16df27e21369b.tar.gz |
oeqa.buildperf: derive BuildPerfTestCase class from unitest.TestCase
Rename BuildPerfTest to BuildPerfTestCase and convert it to be derived
from TestCase class from the unittest framework of the Python standard
library. This doesn't work with our existing testcases or test runner
class and these need to be modified, too.
(From OE-Core rev: b0b434210a3dbd576f68344e29b8c20d18561099)
Signed-off-by: Markus Lehtonen <markus.lehtonen@linux.intel.com>
Signed-off-by: Ross Burton <ross.burton@intel.com>
Signed-off-by: Richard Purdie <richard.purdie@linuxfoundation.org>
Diffstat (limited to 'meta')
-rw-r--r-- | meta/lib/oeqa/buildperf/__init__.py | 4 | ||||
-rw-r--r-- | meta/lib/oeqa/buildperf/base.py | 67 |
2 files changed, 33 insertions, 38 deletions
diff --git a/meta/lib/oeqa/buildperf/__init__.py b/meta/lib/oeqa/buildperf/__init__.py index c816bd23c9..add3be2944 100644 --- a/meta/lib/oeqa/buildperf/__init__.py +++ b/meta/lib/oeqa/buildperf/__init__.py | |||
@@ -10,6 +10,8 @@ | |||
10 | # more details. | 10 | # more details. |
11 | # | 11 | # |
12 | """Build performance tests""" | 12 | """Build performance tests""" |
13 | from .base import (perf_test_case, BuildPerfTest, BuildPerfTestRunner, | 13 | from .base import (perf_test_case, |
14 | BuildPerfTestCase, | ||
15 | BuildPerfTestRunner, | ||
14 | KernelDropCaches) | 16 | KernelDropCaches) |
15 | from .test_basic import * | 17 | from .test_basic import * |
diff --git a/meta/lib/oeqa/buildperf/base.py b/meta/lib/oeqa/buildperf/base.py index 1eb21f6813..53ac9764a2 100644 --- a/meta/lib/oeqa/buildperf/base.py +++ b/meta/lib/oeqa/buildperf/base.py | |||
@@ -19,6 +19,7 @@ import socket | |||
19 | import tempfile | 19 | import tempfile |
20 | import time | 20 | import time |
21 | import traceback | 21 | import traceback |
22 | import unittest | ||
22 | from datetime import datetime, timedelta | 23 | from datetime import datetime, timedelta |
23 | 24 | ||
24 | from oeqa.utils.commands import runCmd, get_bb_vars | 25 | from oeqa.utils.commands import runCmd, get_bb_vars |
@@ -191,50 +192,34 @@ def perf_test_case(obj): | |||
191 | return obj | 192 | return obj |
192 | 193 | ||
193 | 194 | ||
194 | class BuildPerfTest(object): | 195 | class BuildPerfTestCase(unittest.TestCase): |
195 | """Base class for build performance tests""" | 196 | """Base class for build performance tests""" |
196 | SYSRES = 'sysres' | 197 | SYSRES = 'sysres' |
197 | DISKUSAGE = 'diskusage' | 198 | DISKUSAGE = 'diskusage' |
198 | 199 | ||
199 | name = None | 200 | def __init__(self, *args, **kwargs): |
200 | description = None | 201 | super(BuildPerfTestCase, self).__init__(*args, **kwargs) |
201 | 202 | self.name = self._testMethodName | |
202 | def __init__(self, out_dir): | 203 | self.out_dir = None |
203 | self.out_dir = out_dir | 204 | self.start_time = None |
204 | self.results = {'name':self.name, | 205 | self.elapsed_time = None |
205 | 'description': self.description, | 206 | self.measurements = [] |
206 | 'status': 'NOTRUN', | ||
207 | 'start_time': None, | ||
208 | 'elapsed_time': None, | ||
209 | 'measurements': []} | ||
210 | if not os.path.exists(self.out_dir): | ||
211 | os.makedirs(self.out_dir) | ||
212 | if not self.name: | ||
213 | self.name = self.__class__.__name__ | ||
214 | self.bb_vars = get_bb_vars() | 207 | self.bb_vars = get_bb_vars() |
215 | # TODO: remove the _failed flag when globalres.log is ditched as all | 208 | # TODO: remove 'times' and 'sizes' arrays when globalres support is |
216 | # failures should raise an exception | 209 | # removed |
217 | self._failed = False | 210 | self.times = [] |
218 | self.cmd_log = os.path.join(self.out_dir, 'commands.log') | 211 | self.sizes = [] |
219 | 212 | ||
220 | def run(self): | 213 | def run(self, *args, **kwargs): |
221 | """Run test""" | 214 | """Run test""" |
222 | self.results['status'] = 'FAILED' | 215 | self.start_time = datetime.now() |
223 | self.results['start_time'] = datetime.now() | 216 | super(BuildPerfTestCase, self).run(*args, **kwargs) |
224 | self._run() | 217 | self.elapsed_time = datetime.now() - self.start_time |
225 | self.results['elapsed_time'] = (datetime.now() - | ||
226 | self.results['start_time']) | ||
227 | # Test is regarded as completed if it doesn't raise an exception | ||
228 | if not self._failed: | ||
229 | self.results['status'] = 'COMPLETED' | ||
230 | |||
231 | def _run(self): | ||
232 | """Actual test payload""" | ||
233 | raise NotImplementedError | ||
234 | 218 | ||
235 | def log_cmd_output(self, cmd): | 219 | def log_cmd_output(self, cmd): |
236 | """Run a command and log it's output""" | 220 | """Run a command and log it's output""" |
237 | with open(self.cmd_log, 'a') as fobj: | 221 | cmd_log = os.path.join(self.out_dir, 'commands.log') |
222 | with open(cmd_log, 'a') as fobj: | ||
238 | runCmd(cmd, stdout=fobj) | 223 | runCmd(cmd, stdout=fobj) |
239 | 224 | ||
240 | def measure_cmd_resources(self, cmd, name, legend): | 225 | def measure_cmd_resources(self, cmd, name, legend): |
@@ -255,7 +240,8 @@ class BuildPerfTest(object): | |||
255 | 240 | ||
256 | cmd_str = cmd if isinstance(cmd, str) else ' '.join(cmd) | 241 | cmd_str = cmd if isinstance(cmd, str) else ' '.join(cmd) |
257 | log.info("Timing command: %s", cmd_str) | 242 | log.info("Timing command: %s", cmd_str) |
258 | with open(self.cmd_log, 'a') as fobj: | 243 | cmd_log = os.path.join(self.out_dir, 'commands.log') |
244 | with open(cmd_log, 'a') as fobj: | ||
259 | ret, timedata = time_cmd(cmd, stdout=fobj) | 245 | ret, timedata = time_cmd(cmd, stdout=fobj) |
260 | if ret.status: | 246 | if ret.status: |
261 | log.error("Time will be reported as 0. Command failed: %s", | 247 | log.error("Time will be reported as 0. Command failed: %s", |
@@ -270,12 +256,17 @@ class BuildPerfTest(object): | |||
270 | 'name': name, | 256 | 'name': name, |
271 | 'legend': legend} | 257 | 'legend': legend} |
272 | measurement['values'] = {'elapsed_time': etime} | 258 | measurement['values'] = {'elapsed_time': etime} |
273 | self.results['measurements'].append(measurement) | 259 | self.measurements.append(measurement) |
260 | e_sec = etime.total_seconds() | ||
274 | nlogs = len(glob.glob(self.out_dir + '/results.log*')) | 261 | nlogs = len(glob.glob(self.out_dir + '/results.log*')) |
275 | results_log = os.path.join(self.out_dir, | 262 | results_log = os.path.join(self.out_dir, |
276 | 'results.log.{}'.format(nlogs + 1)) | 263 | 'results.log.{}'.format(nlogs + 1)) |
277 | with open(results_log, 'w') as fobj: | 264 | with open(results_log, 'w') as fobj: |
278 | fobj.write(timedata) | 265 | fobj.write(timedata) |
266 | # Append to 'times' array for globalres log | ||
267 | self.times.append('{:d}:{:02d}:{:.2f}'.format(int(e_sec / 3600), | ||
268 | int((e_sec % 3600) / 60), | ||
269 | e_sec % 60)) | ||
279 | 270 | ||
280 | def measure_disk_usage(self, path, name, legend): | 271 | def measure_disk_usage(self, path, name, legend): |
281 | """Estimate disk usage of a file or directory""" | 272 | """Estimate disk usage of a file or directory""" |
@@ -293,7 +284,9 @@ class BuildPerfTest(object): | |||
293 | 'name': name, | 284 | 'name': name, |
294 | 'legend': legend} | 285 | 'legend': legend} |
295 | measurement['values'] = {'size': size} | 286 | measurement['values'] = {'size': size} |
296 | self.results['measurements'].append(measurement) | 287 | self.measurements.append(measurement) |
288 | # Append to 'sizes' array for globalres log | ||
289 | self.sizes.append(str(size)) | ||
297 | 290 | ||
298 | def save_buildstats(self): | 291 | def save_buildstats(self): |
299 | """Save buildstats""" | 292 | """Save buildstats""" |