summaryrefslogtreecommitdiffstats
path: root/scripts/lib/build_perf
diff options
context:
space:
mode:
authorMarkus Lehtonen <markus.lehtonen@linux.intel.com>2017-03-31 17:07:29 +0300
committerRichard Purdie <richard.purdie@linuxfoundation.org>2017-04-01 23:28:20 +0100
commit9f299876f716f253b0a3d70eb4473a023c593fc5 (patch)
tree057d934e96df36ac3e28113b11f5b1ce70c7b614 /scripts/lib/build_perf
parent5a85d39c9d5502aabc2dde20f2a16bf7ac9f2d22 (diff)
downloadpoky-9f299876f716f253b0a3d70eb4473a023c593fc5.tar.gz
scripts: add oe-build-perf-report script
A new tool for pretty-printing build perf test results stored in a Git repository. The scripts is able to produce either simple plaintext report showing the difference between two commits, or, an html report that also displays trendcharts of the test results. The script uses Jinja2 templates for generating HTML reports so it requires python3-jinja2 to be installed on the system. [YOCTO #10931] (From OE-Core rev: 3b25404f0f99b72f222bdca815929be1cf1cee35) Signed-off-by: Markus Lehtonen <markus.lehtonen@linux.intel.com> Signed-off-by: Richard Purdie <richard.purdie@linuxfoundation.org>
Diffstat (limited to 'scripts/lib/build_perf')
-rw-r--r--scripts/lib/build_perf/__init__.py31
-rw-r--r--scripts/lib/build_perf/html.py19
-rw-r--r--scripts/lib/build_perf/html/measurement_chart.html50
-rw-r--r--scripts/lib/build_perf/html/report.html209
-rw-r--r--scripts/lib/build_perf/report.py342
5 files changed, 651 insertions, 0 deletions
diff --git a/scripts/lib/build_perf/__init__.py b/scripts/lib/build_perf/__init__.py
new file mode 100644
index 0000000000..1f8b729078
--- /dev/null
+++ b/scripts/lib/build_perf/__init__.py
@@ -0,0 +1,31 @@
1#
2# Copyright (c) 2017, Intel Corporation.
3#
4# This program is free software; you can redistribute it and/or modify it
5# under the terms and conditions of the GNU General Public License,
6# version 2, as published by the Free Software Foundation.
7#
8# This program is distributed in the hope it will be useful, but WITHOUT
9# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11# more details.
12#
13"""Build performance test library functions"""
14
15def print_table(rows, row_fmt=None):
16 """Print data table"""
17 if not rows:
18 return
19 if not row_fmt:
20 row_fmt = ['{:{wid}} '] * len(rows[0])
21
22 # Go through the data to get maximum cell widths
23 num_cols = len(row_fmt)
24 col_widths = [0] * num_cols
25 for row in rows:
26 for i, val in enumerate(row):
27 col_widths[i] = max(col_widths[i], len(str(val)))
28
29 for row in rows:
30 print(*[row_fmt[i].format(col, wid=col_widths[i]) for i, col in enumerate(row)])
31
diff --git a/scripts/lib/build_perf/html.py b/scripts/lib/build_perf/html.py
new file mode 100644
index 0000000000..578bb162ee
--- /dev/null
+++ b/scripts/lib/build_perf/html.py
@@ -0,0 +1,19 @@
1#
2# Copyright (c) 2017, Intel Corporation.
3#
4# This program is free software; you can redistribute it and/or modify it
5# under the terms and conditions of the GNU General Public License,
6# version 2, as published by the Free Software Foundation.
7#
8# This program is distributed in the hope it will be useful, but WITHOUT
9# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11# more details.
12#
13"""Helper module for HTML reporting"""
14from jinja2 import Environment, PackageLoader
15
16
17env = Environment(loader=PackageLoader('build_perf', 'html'))
18
19template = env.get_template('report.html')
diff --git a/scripts/lib/build_perf/html/measurement_chart.html b/scripts/lib/build_perf/html/measurement_chart.html
new file mode 100644
index 0000000000..26fe1453c0
--- /dev/null
+++ b/scripts/lib/build_perf/html/measurement_chart.html
@@ -0,0 +1,50 @@
1<script type="text/javascript">
2 google.charts.setOnLoadCallback(drawChart_{{ chart_elem_id }});
3 function drawChart_{{ chart_elem_id }}() {
4 var data = new google.visualization.DataTable();
5
6 // Chart options
7 var options = {
8 theme : 'material',
9 legend: 'none',
10 hAxis: { format: '', title: 'Commit number',
11 minValue: {{ chart_opts.haxis.min }},
12 maxValue: {{ chart_opts.haxis.max }} },
13 {% if measurement.type == 'time' %}
14 vAxis: { format: 'h:mm:ss' },
15 {% else %}
16 vAxis: { format: '' },
17 {% endif %}
18 pointSize: 5,
19 chartArea: { left: 80, right: 15 },
20 };
21
22 // Define data columns
23 data.addColumn('number', 'Commit');
24 data.addColumn('{{ measurement.value_type.gv_data_type }}',
25 '{{ measurement.value_type.quantity }}');
26 // Add data rows
27 data.addRows([
28 {% for sample in measurement.samples %}
29 [{{ sample.commit_num }}, {{ sample.mean.gv_value() }}],
30 {% endfor %}
31 ]);
32
33 // Finally, draw the chart
34 chart_div = document.getElementById('{{ chart_elem_id }}');
35 var chart = new google.visualization.LineChart(chart_div);
36 google.visualization.events.addListener(chart, 'ready', function () {
37 //chart_div = document.getElementById('{{ chart_elem_id }}');
38 //chart_div.innerHTML = '<img src="' + chart.getImageURI() + '">';
39 png_div = document.getElementById('{{ chart_elem_id }}_png');
40 png_div.outerHTML = '<a id="{{ chart_elem_id }}_png" href="' + chart.getImageURI() + '">PNG</a>';
41 console.log("CHART READY: {{ chart_elem_id }}");
42 {% if last_chart == true %}
43 console.log("ALL CHARTS READY");
44 {% endif %}
45 //console.log(chart_div.innerHTML);
46 });
47 chart.draw(data, options);
48}
49</script>
50
diff --git a/scripts/lib/build_perf/html/report.html b/scripts/lib/build_perf/html/report.html
new file mode 100644
index 0000000000..e42871177d
--- /dev/null
+++ b/scripts/lib/build_perf/html/report.html
@@ -0,0 +1,209 @@
1<!DOCTYPE html>
2<html lang="en">
3<head>
4{# Scripts, for visualization#}
5<!--START-OF-SCRIPTS-->
6<script type="text/javascript" src="https://www.gstatic.com/charts/loader.js"></script>
7<script type="text/javascript">
8google.charts.load('current', {'packages':['corechart']});
9</script>
10
11{# Render measurement result charts #}
12{% for test in test_data %}
13 {% set test_loop = loop %}
14 {% if test.status == 'SUCCESS' %}
15 {% for measurement in test.measurements %}
16 {% set chart_elem_id = test.name + '_' + measurement.name + '_chart' %}
17 {% if test_loop.last and loop.last %}
18 {% set last_chart = true %}
19 {% endif %}
20 {% include 'measurement_chart.html' %}
21 {% endfor %}
22 {% endif %}
23{% endfor %}
24
25<!--END-OF-SCRIPTS-->
26
27{# Styles #}
28<style>
29.meta-table {
30 font-size: 14px;
31 text-align: left;
32 border-collapse: collapse;
33}
34.meta-table tr:nth-child(even){background-color: #f2f2f2}
35meta-table th, .meta-table td {
36 padding: 4px;
37}
38.summary {
39 margin: 0;
40 font-size: 14px;
41 text-align: left;
42 border-collapse: collapse;
43}
44summary th, .meta-table td {
45 padding: 4px;
46}
47.measurement {
48 padding: 8px 0px 8px 8px;
49 border: 2px solid #f0f0f0;
50 margin-bottom: 10px;
51}
52.details {
53 margin: 0;
54 font-size: 12px;
55 text-align: left;
56 border-collapse: collapse;
57}
58.details th {
59 font-weight: normal;
60 padding-right: 8px;
61}
62.preformatted {
63 font-family: monospace;
64 white-space: pre-wrap;
65 background-color: #f0f0f0;
66 margin-left: 10px;
67}
68hr {
69 color: #f0f0f0;
70}
71h2 {
72 font-size: 20px;
73 margin-bottom: 0px;
74 color: #707070;
75}
76h3 {
77 font-size: 16px;
78 margin: 0px;
79 color: #707070;
80}
81</style>
82
83<title>{{ title }}</title>
84</head>
85
86{% macro poky_link(commit) -%}
87 <a href="http://git.yoctoproject.org/cgit/cgit.cgi/poky/log/?id={{ commit }}">{{ commit[0:11] }}</a>
88{%- endmacro %}
89
90<body><div style="width: 700px">
91 {# Test metadata #}
92 <h2>General</h2>
93 <hr>
94 <table class="meta-table" style="width: 100%">
95 <tr>
96 <th></th>
97 <th>Current commit</th>
98 <th>Comparing with</th>
99 </tr>
100 {% for key, item in metadata.items() %}
101 <tr>
102 <th>{{ item.title }}</th>
103 {%if key == 'commit' %}
104 <td>{{ poky_link(item.value) }}</td>
105 <td>{{ poky_link(item.value_old) }}</td>
106 {% else %}
107 <td>{{ item.value }}</td>
108 <td>{{ item.value_old }}</td>
109 {% endif %}
110 </tr>
111 {% endfor %}
112 </table>
113
114 {# Test result summary #}
115 <h2>Test result summary</h2>
116 <hr>
117 <table class="summary" style="width: 100%">
118 {% for test in test_data %}
119 {% if loop.index is even %}
120 {% set row_style = 'style="background-color: #f2f2f2"' %}
121 {% else %}
122 {% set row_style = 'style="background-color: #ffffff"' %}
123 {% endif %}
124 <tr {{ row_style }}><td>{{ test.name }}: {{ test.description }}</td>
125 {% if test.status == 'SUCCESS' %}
126 {% for measurement in test.measurements %}
127 {# add empty cell in place of the test name#}
128 {% if loop.index > 1 %}<td></td>{% endif %}
129 {% if measurement.absdiff > 0 %}
130 {% set result_style = "color: red" %}
131 {% elif measurement.absdiff == measurement.absdiff %}
132 {% set result_style = "color: green" %}
133 {% else %}
134 {% set result_style = "color: orange" %}
135 {%endif %}
136 <td>{{ measurement.description }}</td>
137 <td style="font-weight: bold">{{ measurement.value.mean }}</td>
138 <td style="{{ result_style }}">{{ measurement.absdiff_str }}</td>
139 <td style="{{ result_style }}">{{ measurement.reldiff }}</td>
140 </tr><tr {{ row_style }}>
141 {% endfor %}
142 {% else %}
143 <td style="font-weight: bold; color: red;">{{test.status }}</td>
144 <td></td> <td></td> <td></td> <td></td>
145 {% endif %}
146 </tr>
147 {% endfor %}
148 </table>
149
150 {# Detailed test results #}
151 {% for test in test_data %}
152 <h2>{{ test.name }}: {{ test.description }}</h2>
153 <hr>
154 {% if test.status == 'SUCCESS' %}
155 {% for measurement in test.measurements %}
156 <div class="measurement">
157 <h3>{{ measurement.description }}</h3>
158 <div style="font-weight:bold;">
159 <span style="font-size: 23px;">{{ measurement.value.mean }}</span>
160 <span style="font-size: 20px; margin-left: 12px">
161 {% if measurement.absdiff > 0 %}
162 <span style="color: red">
163 {% elif measurement.absdiff == measurement.absdiff %}
164 <span style="color: green">
165 {% else %}
166 <span style="color: orange">
167 {% endif %}
168 {{ measurement.absdiff_str }} ({{measurement.reldiff}})
169 </span></span>
170 </div>
171 <table style="width: 100%">
172 <tr>
173 <td style="width: 75%">
174 {# Linechart #}
175 <div id="{{ test.name }}_{{ measurement.name }}_chart"></div>
176 </td>
177 <td>
178 {# Measurement statistics #}
179 <table class="details">
180 <tr>
181 <th>Test runs</th><td>{{ measurement.value.sample_cnt }}</td>
182 </tr><tr>
183 <th>-/+</th><td>-{{ measurement.value.minus }} / +{{ measurement.value.plus }}</td>
184 </tr><tr>
185 <th>Min</th><td>{{ measurement.value.min }}</td>
186 </tr><tr>
187 <th>Max</th><td>{{ measurement.value.max }}</td>
188 </tr><tr>
189 <th>Stdev</th><td>{{ measurement.value.stdev }}</td>
190 </tr><tr>
191 <th><div id="{{ test.name }}_{{ measurement.name }}_chart_png"></div></th>
192 </tr>
193 </table>
194 </td>
195 </tr>
196 </table>
197 </div>
198 {% endfor %}
199 {# Unsuccessful test #}
200 {% else %}
201 <span style="font-size: 150%; font-weight: bold; color: red;">{{ test.status }}
202 {% if test.err_type %}<span style="font-size: 75%; font-weight: normal">({{ test.err_type }})</span>{% endif %}
203 </span>
204 <div class="preformatted">{{ test.message }}</div>
205 {% endif %}
206 {% endfor %}
207</div></body>
208</html>
209
diff --git a/scripts/lib/build_perf/report.py b/scripts/lib/build_perf/report.py
new file mode 100644
index 0000000000..eb00ccca2d
--- /dev/null
+++ b/scripts/lib/build_perf/report.py
@@ -0,0 +1,342 @@
1#
2# Copyright (c) 2017, Intel Corporation.
3#
4# This program is free software; you can redistribute it and/or modify it
5# under the terms and conditions of the GNU General Public License,
6# version 2, as published by the Free Software Foundation.
7#
8# This program is distributed in the hope it will be useful, but WITHOUT
9# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11# more details.
12#
13"""Handling of build perf test reports"""
14from collections import OrderedDict, Mapping
15from datetime import datetime, timezone
16from numbers import Number
17from statistics import mean, stdev, variance
18
19
20def isofmt_to_timestamp(string):
21 """Convert timestamp string in ISO 8601 format into unix timestamp"""
22 if '.' in string:
23 dt = datetime.strptime(string, '%Y-%m-%dT%H:%M:%S.%f')
24 else:
25 dt = datetime.strptime(string, '%Y-%m-%dT%H:%M:%S')
26 return dt.replace(tzinfo=timezone.utc).timestamp()
27
28
29def metadata_xml_to_json(elem):
30 """Convert metadata xml into JSON format"""
31 assert elem.tag == 'metadata', "Invalid metadata file format"
32
33 def _xml_to_json(elem):
34 """Convert xml element to JSON object"""
35 out = OrderedDict()
36 for child in elem.getchildren():
37 key = child.attrib.get('name', child.tag)
38 if len(child):
39 out[key] = _xml_to_json(child)
40 else:
41 out[key] = child.text
42 return out
43 return _xml_to_json(elem)
44
45
46def results_xml_to_json(elem):
47 """Convert results xml into JSON format"""
48 rusage_fields = ('ru_utime', 'ru_stime', 'ru_maxrss', 'ru_minflt',
49 'ru_majflt', 'ru_inblock', 'ru_oublock', 'ru_nvcsw',
50 'ru_nivcsw')
51 iostat_fields = ('rchar', 'wchar', 'syscr', 'syscw', 'read_bytes',
52 'write_bytes', 'cancelled_write_bytes')
53
54 def _read_measurement(elem):
55 """Convert measurement to JSON"""
56 data = OrderedDict()
57 data['type'] = elem.tag
58 data['name'] = elem.attrib['name']
59 data['legend'] = elem.attrib['legend']
60 values = OrderedDict()
61
62 # SYSRES measurement
63 if elem.tag == 'sysres':
64 for subel in elem:
65 if subel.tag == 'time':
66 values['start_time'] = isofmt_to_timestamp(subel.attrib['timestamp'])
67 values['elapsed_time'] = float(subel.text)
68 elif subel.tag == 'rusage':
69 rusage = OrderedDict()
70 for field in rusage_fields:
71 if 'time' in field:
72 rusage[field] = float(subel.attrib[field])
73 else:
74 rusage[field] = int(subel.attrib[field])
75 values['rusage'] = rusage
76 elif subel.tag == 'iostat':
77 values['iostat'] = OrderedDict([(f, int(subel.attrib[f]))
78 for f in iostat_fields])
79 elif subel.tag == 'buildstats_file':
80 values['buildstats_file'] = subel.text
81 else:
82 raise TypeError("Unknown sysres value element '{}'".format(subel.tag))
83 # DISKUSAGE measurement
84 elif elem.tag == 'diskusage':
85 values['size'] = int(elem.find('size').text)
86 else:
87 raise Exception("Unknown measurement tag '{}'".format(elem.tag))
88 data['values'] = values
89 return data
90
91 def _read_testcase(elem):
92 """Convert testcase into JSON"""
93 assert elem.tag == 'testcase', "Expecting 'testcase' element instead of {}".format(elem.tag)
94
95 data = OrderedDict()
96 data['name'] = elem.attrib['name']
97 data['description'] = elem.attrib['description']
98 data['status'] = 'SUCCESS'
99 data['start_time'] = isofmt_to_timestamp(elem.attrib['timestamp'])
100 data['elapsed_time'] = float(elem.attrib['time'])
101 measurements = OrderedDict()
102
103 for subel in elem.getchildren():
104 if subel.tag == 'error' or subel.tag == 'failure':
105 data['status'] = subel.tag.upper()
106 data['message'] = subel.attrib['message']
107 data['err_type'] = subel.attrib['type']
108 data['err_output'] = subel.text
109 elif subel.tag == 'skipped':
110 data['status'] = 'SKIPPED'
111 data['message'] = subel.text
112 else:
113 measurements[subel.attrib['name']] = _read_measurement(subel)
114 data['measurements'] = measurements
115 return data
116
117 def _read_testsuite(elem):
118 """Convert suite to JSON"""
119 assert elem.tag == 'testsuite', \
120 "Expecting 'testsuite' element instead of {}".format(elem.tag)
121
122 data = OrderedDict()
123 if 'hostname' in elem.attrib:
124 data['tester_host'] = elem.attrib['hostname']
125 data['start_time'] = isofmt_to_timestamp(elem.attrib['timestamp'])
126 data['elapsed_time'] = float(elem.attrib['time'])
127 tests = OrderedDict()
128
129 for case in elem.getchildren():
130 tests[case.attrib['name']] = _read_testcase(case)
131 data['tests'] = tests
132 return data
133
134 # Main function
135 assert elem.tag == 'testsuites', "Invalid test report format"
136 assert len(elem) == 1, "Too many testsuites"
137
138 return _read_testsuite(elem.getchildren()[0])
139
140
141def aggregate_metadata(metadata):
142 """Aggregate metadata into one, basically a sanity check"""
143 mutable_keys = ('pretty_name', 'version_id')
144
145 def aggregate_obj(aggregate, obj, assert_str=True):
146 """Aggregate objects together"""
147 assert type(aggregate) is type(obj), \
148 "Type mismatch: {} != {}".format(type(aggregate), type(obj))
149 if isinstance(obj, Mapping):
150 assert set(aggregate.keys()) == set(obj.keys())
151 for key, val in obj.items():
152 aggregate_obj(aggregate[key], val, key not in mutable_keys)
153 elif isinstance(obj, list):
154 assert len(aggregate) == len(obj)
155 for i, val in enumerate(obj):
156 aggregate_obj(aggregate[i], val)
157 elif not isinstance(obj, str) or (isinstance(obj, str) and assert_str):
158 assert aggregate == obj, "Data mismatch {} != {}".format(aggregate, obj)
159
160 if not metadata:
161 return {}
162
163 # Do the aggregation
164 aggregate = metadata[0].copy()
165 for testrun in metadata[1:]:
166 aggregate_obj(aggregate, testrun)
167 aggregate['testrun_count'] = len(metadata)
168 return aggregate
169
170
171def aggregate_data(data):
172 """Aggregate multiple test results JSON structures into one"""
173
174 mutable_keys = ('status', 'message', 'err_type', 'err_output')
175
176 class SampleList(list):
177 """Container for numerical samples"""
178 pass
179
180 def new_aggregate_obj(obj):
181 """Create new object for aggregate"""
182 if isinstance(obj, Number):
183 new_obj = SampleList()
184 new_obj.append(obj)
185 elif isinstance(obj, str):
186 new_obj = obj
187 else:
188 # Lists and and dicts are kept as is
189 new_obj = obj.__class__()
190 aggregate_obj(new_obj, obj)
191 return new_obj
192
193 def aggregate_obj(aggregate, obj, assert_str=True):
194 """Recursive "aggregation" of JSON objects"""
195 if isinstance(obj, Number):
196 assert isinstance(aggregate, SampleList)
197 aggregate.append(obj)
198 return
199
200 assert type(aggregate) == type(obj), \
201 "Type mismatch: {} != {}".format(type(aggregate), type(obj))
202 if isinstance(obj, Mapping):
203 for key, val in obj.items():
204 if not key in aggregate:
205 aggregate[key] = new_aggregate_obj(val)
206 else:
207 aggregate_obj(aggregate[key], val, key not in mutable_keys)
208 elif isinstance(obj, list):
209 for i, val in enumerate(obj):
210 if i >= len(aggregate):
211 aggregate[key] = new_aggregate_obj(val)
212 else:
213 aggregate_obj(aggregate[i], val)
214 elif isinstance(obj, str):
215 # Sanity check for data
216 if assert_str:
217 assert aggregate == obj, "Data mismatch {} != {}".format(aggregate, obj)
218 else:
219 raise Exception("BUG: unable to aggregate '{}' ({})".format(type(obj), str(obj)))
220
221 if not data:
222 return {}
223
224 # Do the aggregation
225 aggregate = data[0].__class__()
226 for testrun in data:
227 aggregate_obj(aggregate, testrun)
228 return aggregate
229
230
231class MeasurementVal(float):
232 """Base class representing measurement values"""
233 gv_data_type = 'number'
234
235 def gv_value(self):
236 """Value formatting for visualization"""
237 if self != self:
238 return "null"
239 else:
240 return self
241
242
243class TimeVal(MeasurementVal):
244 """Class representing time values"""
245 quantity = 'time'
246 gv_title = 'elapsed time'
247 gv_data_type = 'timeofday'
248
249 def hms(self):
250 """Split time into hours, minutes and seconeds"""
251 hhh = int(abs(self) / 3600)
252 mmm = int((abs(self) % 3600) / 60)
253 sss = abs(self) % 60
254 return hhh, mmm, sss
255
256 def __str__(self):
257 if self != self:
258 return "nan"
259 hh, mm, ss = self.hms()
260 sign = '-' if self < 0 else ''
261 if hh > 0:
262 return '{}{:d}:{:02d}:{:02.0f}'.format(sign, hh, mm, ss)
263 elif mm > 0:
264 return '{}{:d}:{:04.1f}'.format(sign, mm, ss)
265 elif ss > 1:
266 return '{}{:.1f} s'.format(sign, ss)
267 else:
268 return '{}{:.2f} s'.format(sign, ss)
269
270 def gv_value(self):
271 """Value formatting for visualization"""
272 if self != self:
273 return "null"
274 hh, mm, ss = self.hms()
275 return [hh, mm, int(ss), int(ss*1000) % 1000]
276
277
278class SizeVal(MeasurementVal):
279 """Class representing time values"""
280 quantity = 'size'
281 gv_title = 'size in MiB'
282 gv_data_type = 'number'
283
284 def __str__(self):
285 if self != self:
286 return "nan"
287 if abs(self) < 1024:
288 return '{:.1f} kiB'.format(self)
289 elif abs(self) < 1048576:
290 return '{:.2f} MiB'.format(self / 1024)
291 else:
292 return '{:.2f} GiB'.format(self / 1048576)
293
294 def gv_value(self):
295 """Value formatting for visualization"""
296 if self != self:
297 return "null"
298 return self / 1024
299
300def measurement_stats(meas, prefix=''):
301 """Get statistics of a measurement"""
302 if not meas:
303 return {prefix + 'sample_cnt': 0,
304 prefix + 'mean': MeasurementVal('nan'),
305 prefix + 'stdev': MeasurementVal('nan'),
306 prefix + 'variance': MeasurementVal('nan'),
307 prefix + 'min': MeasurementVal('nan'),
308 prefix + 'max': MeasurementVal('nan'),
309 prefix + 'minus': MeasurementVal('nan'),
310 prefix + 'plus': MeasurementVal('nan')}
311
312 stats = {'name': meas['name']}
313 if meas['type'] == 'sysres':
314 val_cls = TimeVal
315 values = meas['values']['elapsed_time']
316 elif meas['type'] == 'diskusage':
317 val_cls = SizeVal
318 values = meas['values']['size']
319 else:
320 raise Exception("Unknown measurement type '{}'".format(meas['type']))
321 stats['val_cls'] = val_cls
322 stats['quantity'] = val_cls.quantity
323 stats[prefix + 'sample_cnt'] = len(values)
324
325 mean_val = val_cls(mean(values))
326 min_val = val_cls(min(values))
327 max_val = val_cls(max(values))
328
329 stats[prefix + 'mean'] = mean_val
330 if len(values) > 1:
331 stats[prefix + 'stdev'] = val_cls(stdev(values))
332 stats[prefix + 'variance'] = val_cls(variance(values))
333 else:
334 stats[prefix + 'stdev'] = float('nan')
335 stats[prefix + 'variance'] = float('nan')
336 stats[prefix + 'min'] = min_val
337 stats[prefix + 'max'] = max_val
338 stats[prefix + 'minus'] = val_cls(mean_val - min_val)
339 stats[prefix + 'plus'] = val_cls(max_val - mean_val)
340
341 return stats
342