summaryrefslogtreecommitdiffstats
path: root/scripts/lib/build_perf
diff options
context:
space:
mode:
Diffstat (limited to 'scripts/lib/build_perf')
-rw-r--r--scripts/lib/build_perf/__init__.py24
-rw-r--r--scripts/lib/build_perf/html.py12
-rw-r--r--scripts/lib/build_perf/html/measurement_chart.html100
-rw-r--r--scripts/lib/build_perf/html/report.html345
-rw-r--r--scripts/lib/build_perf/report.py342
-rw-r--r--scripts/lib/build_perf/scrape-html-report.js56
6 files changed, 0 insertions, 879 deletions
diff --git a/scripts/lib/build_perf/__init__.py b/scripts/lib/build_perf/__init__.py
deleted file mode 100644
index dcbb78042d..0000000000
--- a/scripts/lib/build_perf/__init__.py
+++ /dev/null
@@ -1,24 +0,0 @@
1#
2# Copyright (c) 2017, Intel Corporation.
3#
4# SPDX-License-Identifier: GPL-2.0-only
5#
6"""Build performance test library functions"""
7
8def print_table(rows, row_fmt=None):
9 """Print data table"""
10 if not rows:
11 return
12 if not row_fmt:
13 row_fmt = ['{:{wid}} '] * len(rows[0])
14
15 # Go through the data to get maximum cell widths
16 num_cols = len(row_fmt)
17 col_widths = [0] * num_cols
18 for row in rows:
19 for i, val in enumerate(row):
20 col_widths[i] = max(col_widths[i], len(str(val)))
21
22 for row in rows:
23 print(*[row_fmt[i].format(col, wid=col_widths[i]) for i, col in enumerate(row)])
24
diff --git a/scripts/lib/build_perf/html.py b/scripts/lib/build_perf/html.py
deleted file mode 100644
index d1273c9c50..0000000000
--- a/scripts/lib/build_perf/html.py
+++ /dev/null
@@ -1,12 +0,0 @@
1#
2# Copyright (c) 2017, Intel Corporation.
3#
4# SPDX-License-Identifier: GPL-2.0-only
5#
6"""Helper module for HTML reporting"""
7from jinja2 import Environment, PackageLoader
8
9
10env = Environment(loader=PackageLoader('build_perf', 'html'))
11
12template = env.get_template('report.html')
diff --git a/scripts/lib/build_perf/html/measurement_chart.html b/scripts/lib/build_perf/html/measurement_chart.html
deleted file mode 100644
index ad4a93ed02..0000000000
--- a/scripts/lib/build_perf/html/measurement_chart.html
+++ /dev/null
@@ -1,100 +0,0 @@
1<script type="module">
2 // Get raw data
3 const rawData = [
4 {% for sample in measurement.samples %}
5 [{{ sample.commit_num }}, {{ sample.mean.gv_value() }}, {{ sample.start_time }}],
6 {% endfor %}
7 ];
8
9 const convertToMinute = (time) => {
10 return time[0]*60 + time[1] + time[2]/60 + time[3]/3600;
11 }
12
13 // Update value format to either minutes or leave as size value
14 const updateValue = (value) => {
15 // Assuming the array values are duration in the format [hours, minutes, seconds, milliseconds]
16 return Array.isArray(value) ? convertToMinute(value) : value
17 }
18
19 // Convert raw data to the format: [time, value]
20 const data = rawData.map(([commit, value, time]) => {
21 return [
22 // The Date object takes values in milliseconds rather than seconds. So to use a Unix timestamp we have to multiply it by 1000.
23 new Date(time * 1000).getTime(),
24 // Assuming the array values are duration in the format [hours, minutes, seconds, milliseconds]
25 updateValue(value)
26 ]
27 });
28
29 // Set chart options
30 const option = {
31 tooltip: {
32 trigger: 'axis',
33 valueFormatter: (value) => {
34 const commitNumber = rawData.filter(([commit, dataValue, time]) => updateValue(dataValue) === value)
35 if ('{{ measurement.value_type.quantity }}' == 'time') {
36 const hours = Math.floor(value/60)
37 const minutes = Math.floor(value % 60)
38 const seconds = Math.floor((value * 60) % 60)
39 return [
40 hours + ':' + minutes + ':' + seconds + ', ' +
41 'commit number: ' + commitNumber[0][0]
42 ]
43 }
44 return [
45 value.toFixed(2) + ' MB' + ', ' +
46 'commit number: ' + commitNumber[0][0]
47 ]
48 },
49
50 },
51 xAxis: {
52 type: 'time',
53 },
54 yAxis: {
55 name: '{{ measurement.value_type.quantity }}' == 'time' ? 'Duration in minutes' : 'Disk size in MB',
56 type: 'value',
57 min: function(value) {
58 return Math.round(value.min - 0.5);
59 },
60 max: function(value) {
61 return Math.round(value.max + 0.5);
62 }
63 },
64 dataZoom: [
65 {
66 type: 'slider',
67 xAxisIndex: 0,
68 filterMode: 'none'
69 },
70 ],
71 series: [
72 {
73 name: '{{ measurement.value_type.quantity }}',
74 type: 'line',
75 step: 'start',
76 symbol: 'none',
77 data: data
78 }
79 ]
80 };
81
82 // Draw chart
83 const chart_div = document.getElementById('{{ chart_elem_id }}');
84 // Set dark mode
85 let measurement_chart
86 if (window.matchMedia('(prefers-color-scheme: dark)').matches) {
87 measurement_chart= echarts.init(chart_div, 'dark', {
88 height: 320
89 });
90 } else {
91 measurement_chart= echarts.init(chart_div, null, {
92 height: 320
93 });
94 }
95 // Change chart size with browser resize
96 window.addEventListener('resize', function() {
97 measurement_chart.resize();
98 });
99 measurement_chart.setOption(option);
100</script>
diff --git a/scripts/lib/build_perf/html/report.html b/scripts/lib/build_perf/html/report.html
deleted file mode 100644
index 537ed3ee52..0000000000
--- a/scripts/lib/build_perf/html/report.html
+++ /dev/null
@@ -1,345 +0,0 @@
1<!DOCTYPE html>
2<html lang="en">
3<head>
4{# Scripts, for visualization#}
5<!--START-OF-SCRIPTS-->
6<script src=" https://cdn.jsdelivr.net/npm/echarts@5.5.0/dist/echarts.min.js "></script>
7
8{# Render measurement result charts #}
9{% for test in test_data %}
10 {% if test.status == 'SUCCESS' %}
11 {% for measurement in test.measurements %}
12 {% set chart_elem_id = test.name + '_' + measurement.name + '_chart' %}
13 {% include 'measurement_chart.html' %}
14 {% endfor %}
15 {% endif %}
16{% endfor %}
17
18<!--END-OF-SCRIPTS-->
19
20{# Styles #}
21<style>
22:root {
23 --text: #000;
24 --bg: #fff;
25 --h2heading: #707070;
26 --link: #0000EE;
27 --trtopborder: #9ca3af;
28 --trborder: #e5e7eb;
29 --chartborder: #f0f0f0;
30 }
31.meta-table {
32 font-size: 14px;
33 text-align: left;
34 border-collapse: collapse;
35}
36.summary {
37 font-size: 14px;
38 text-align: left;
39 border-collapse: collapse;
40}
41.measurement {
42 padding: 8px 0px 8px 8px;
43 border: 2px solid var(--chartborder);
44 margin: 1.5rem 0;
45}
46.details {
47 margin: 0;
48 font-size: 12px;
49 text-align: left;
50 border-collapse: collapse;
51}
52.details th {
53 padding-right: 8px;
54}
55.details.plain th {
56 font-weight: normal;
57}
58.preformatted {
59 font-family: monospace;
60 white-space: pre-wrap;
61 background-color: #f0f0f0;
62 margin-left: 10px;
63}
64.card-container {
65 border-bottom-width: 1px;
66 padding: 1.25rem 3rem;
67 box-shadow: 0 1px 3px 0 rgb(0 0 0 / 0.1), 0 1px 2px -1px rgb(0 0 0 / 0.1);
68 border-radius: 0.25rem;
69}
70body {
71 font-family: 'Helvetica', sans-serif;
72 margin: 3rem 8rem;
73 background-color: var(--bg);
74 color: var(--text);
75}
76h1 {
77 text-align: center;
78}
79h2 {
80 font-size: 1.5rem;
81 margin-bottom: 0px;
82 color: var(--h2heading);
83 padding-top: 1.5rem;
84}
85h3 {
86 font-size: 1.3rem;
87 margin: 0px;
88 color: var(--h2heading);
89 padding: 1.5rem 0;
90}
91h4 {
92 font-size: 14px;
93 font-weight: lighter;
94 line-height: 1.2rem;
95 margin: auto;
96 padding-top: 1rem;
97}
98table {
99 margin-top: 1.5rem;
100 line-height: 2rem;
101}
102tr {
103 border-bottom: 1px solid var(--trborder);
104}
105tr:first-child {
106 border-bottom: 1px solid var(--trtopborder);
107}
108tr:last-child {
109 border-bottom: none;
110}
111a {
112 text-decoration: none;
113 font-weight: bold;
114 color: var(--link);
115}
116a:hover {
117 color: #8080ff;
118}
119@media (prefers-color-scheme: dark) {
120 :root {
121 --text: #e9e8fa;
122 --bg: #0F0C28;
123 --h2heading: #B8B7CB;
124 --link: #87cefa;
125 --trtopborder: #394150;
126 --trborder: #212936;
127 --chartborder: #b1b0bf;
128 }
129}
130</style>
131
132<title>{{ title }}</title>
133</head>
134
135{% macro poky_link(commit) -%}
136 <a href="http://git.yoctoproject.org/cgit/cgit.cgi/poky/log/?id={{ commit }}">{{ commit[0:11] }}</a>
137{%- endmacro %}
138
139<body><div>
140 <h1 style="text-align: center;">Performance Test Report</h1>
141 {# Test metadata #}
142 <h2>General</h2>
143 <h4>The table provides an overview of the comparison between two selected commits from the same branch.</h4>
144 <table class="meta-table" style="width: 100%">
145 <tr>
146 <th></th>
147 <th>Current commit</th>
148 <th>Comparing with</th>
149 </tr>
150 {% for key, item in metadata.items() %}
151 <tr>
152 <th>{{ item.title }}</th>
153 {%if key == 'commit' %}
154 <td>{{ poky_link(item.value) }}</td>
155 <td>{{ poky_link(item.value_old) }}</td>
156 {% else %}
157 <td>{{ item.value }}</td>
158 <td>{{ item.value_old }}</td>
159 {% endif %}
160 </tr>
161 {% endfor %}
162 </table>
163
164 {# Test result summary #}
165 <h2>Test result summary</h2>
166 <h4>The test summary presents a thorough breakdown of each test conducted on the branch, including details such as build time and disk space consumption. Additionally, it gives insights into the average time taken for test execution, along with absolute and relative values for a better understanding.</h4>
167 <table class="summary" style="width: 100%">
168 <tr>
169 <th>Test name</th>
170 <th>Measurement description</th>
171 <th>Mean value</th>
172 <th>Absolute difference</th>
173 <th>Relative difference</th>
174 </tr>
175 {% for test in test_data %}
176 {% if test.status == 'SUCCESS' %}
177 {% for measurement in test.measurements %}
178 <tr {{ row_style }}>
179 {% if loop.index == 1 %}
180 <td><a href=#{{test.name}}>{{ test.name }}: {{ test.description }}</a></td>
181 {% else %}
182 {# add empty cell in place of the test name#}
183 <td></td>
184 {% endif %}
185 {% if measurement.absdiff > 0 %}
186 {% set result_style = "color: red" %}
187 {% elif measurement.absdiff == measurement.absdiff %}
188 {% set result_style = "color: green" %}
189 {% else %}
190 {% set result_style = "color: orange" %}
191 {%endif %}
192 {% if measurement.reldiff|abs > 2 %}
193 {% set result_style = result_style + "; font-weight: bold" %}
194 {% endif %}
195 <td>{{ measurement.description }}</td>
196 <td style="font-weight: bold">{{ measurement.value.mean }}</td>
197 <td style="{{ result_style }}">{{ measurement.absdiff_str }}</td>
198 <td style="{{ result_style }}">{{ measurement.reldiff_str }}</td>
199 </tr>
200 {% endfor %}
201 {% else %}
202 <td style="font-weight: bold; color: red;">{{test.status }}</td>
203 <td></td> <td></td> <td></td> <td></td>
204 {% endif %}
205 {% endfor %}
206 </table>
207
208 {# Detailed test results #}
209 <h2>Test details</h2>
210 <h4>The following section provides details of each test, accompanied by charts representing build time and disk usage over time or by commit number.</h4>
211 {% for test in test_data %}
212 <h3 style="color: #000;" id={{test.name}}>{{ test.name }}: {{ test.description }}</h3>
213 {% if test.status == 'SUCCESS' %}
214 <div class="card-container">
215 {% for measurement in test.measurements %}
216 <div class="measurement">
217 <h3>{{ measurement.description }}</h3>
218 <div style="font-weight:bold;">
219 <span style="font-size: 23px;">{{ measurement.value.mean }}</span>
220 <span style="font-size: 20px; margin-left: 12px">
221 {% if measurement.absdiff > 0 %}
222 <span style="color: red">
223 {% elif measurement.absdiff == measurement.absdiff %}
224 <span style="color: green">
225 {% else %}
226 <span style="color: orange">
227 {% endif %}
228 {{ measurement.absdiff_str }} ({{measurement.reldiff_str}})
229 </span></span>
230 </div>
231 {# Table for trendchart and the statistics #}
232 <table style="width: 100%">
233 <tr>
234 <td style="width: 75%">
235 {# Linechart #}
236 <div id="{{ test.name }}_{{ measurement.name }}_chart"></div>
237 </td>
238 <td>
239 {# Measurement statistics #}
240 <table class="details plain">
241 <tr>
242 <th>Test runs</th><td>{{ measurement.value.sample_cnt }}</td>
243 </tr><tr>
244 <th>-/+</th><td>-{{ measurement.value.minus }} / +{{ measurement.value.plus }}</td>
245 </tr><tr>
246 <th>Min</th><td>{{ measurement.value.min }}</td>
247 </tr><tr>
248 <th>Max</th><td>{{ measurement.value.max }}</td>
249 </tr><tr>
250 <th>Stdev</th><td>{{ measurement.value.stdev }}</td>
251 </tr><tr>
252 <th><div id="{{ test.name }}_{{ measurement.name }}_chart_png"></div></th>
253 <td></td>
254 </tr>
255 </table>
256 </td>
257 </tr>
258 </table>
259
260 {# Task and recipe summary from buildstats #}
261 {% if 'buildstats' in measurement %}
262 Task resource usage
263 <table class="details" style="width:100%">
264 <tr>
265 <th>Number of tasks</th>
266 <th>Top consumers of cputime</th>
267 </tr>
268 <tr>
269 <td style="vertical-align: top">{{ measurement.buildstats.tasks.count }} ({{ measurement.buildstats.tasks.change }})</td>
270 {# Table of most resource-hungry tasks #}
271 <td>
272 <table class="details plain">
273 {% for diff in measurement.buildstats.top_consumer|reverse %}
274 <tr>
275 <th>{{ diff.pkg }}.{{ diff.task }}</th>
276 <td>{{ '%0.0f' % diff.value2 }} s</td>
277 </tr>
278 {% endfor %}
279 </table>
280 </td>
281 </tr>
282 <tr>
283 <th>Biggest increase in cputime</th>
284 <th>Biggest decrease in cputime</th>
285 </tr>
286 <tr>
287 {# Table biggest increase in resource usage #}
288 <td>
289 <table class="details plain">
290 {% for diff in measurement.buildstats.top_increase|reverse %}
291 <tr>
292 <th>{{ diff.pkg }}.{{ diff.task }}</th>
293 <td>{{ '%+0.0f' % diff.absdiff }} s</td>
294 </tr>
295 {% endfor %}
296 </table>
297 </td>
298 {# Table biggest decrease in resource usage #}
299 <td>
300 <table class="details plain">
301 {% for diff in measurement.buildstats.top_decrease %}
302 <tr>
303 <th>{{ diff.pkg }}.{{ diff.task }}</th>
304 <td>{{ '%+0.0f' % diff.absdiff }} s</td>
305 </tr>
306 {% endfor %}
307 </table>
308 </td>
309 </tr>
310 </table>
311
312 {# Recipe version differences #}
313 {% if measurement.buildstats.ver_diff %}
314 <div style="margin-top: 16px">Recipe version changes</div>
315 <table class="details">
316 {% for head, recipes in measurement.buildstats.ver_diff.items() %}
317 <tr>
318 <th colspan="2">{{ head }}</th>
319 </tr>
320 {% for name, info in recipes|sort %}
321 <tr>
322 <td>{{ name }}</td>
323 <td>{{ info }}</td>
324 </tr>
325 {% endfor %}
326 {% endfor %}
327 </table>
328 {% else %}
329 <div style="margin-top: 16px">No recipe version changes detected</div>
330 {% endif %}
331 {% endif %}
332 </div>
333 {% endfor %}
334 </div>
335 {# Unsuccessful test #}
336 {% else %}
337 <span style="font-size: 150%; font-weight: bold; color: red;">{{ test.status }}
338 {% if test.err_type %}<span style="font-size: 75%; font-weight: normal">({{ test.err_type }})</span>{% endif %}
339 </span>
340 <div class="preformatted">{{ test.message }}</div>
341 {% endif %}
342 {% endfor %}
343</div></body>
344</html>
345
diff --git a/scripts/lib/build_perf/report.py b/scripts/lib/build_perf/report.py
deleted file mode 100644
index f4e6a92e09..0000000000
--- a/scripts/lib/build_perf/report.py
+++ /dev/null
@@ -1,342 +0,0 @@
1#
2# Copyright (c) 2017, Intel Corporation.
3#
4# SPDX-License-Identifier: GPL-2.0-only
5#
6"""Handling of build perf test reports"""
7from collections import OrderedDict, namedtuple
8from collections.abc import Mapping
9from datetime import datetime, timezone
10from numbers import Number
11from statistics import mean, stdev, variance
12
13
14AggregateTestData = namedtuple('AggregateTestData', ['metadata', 'results'])
15
16
17def isofmt_to_timestamp(string):
18 """Convert timestamp string in ISO 8601 format into unix timestamp"""
19 if '.' in string:
20 dt = datetime.strptime(string, '%Y-%m-%dT%H:%M:%S.%f')
21 else:
22 dt = datetime.strptime(string, '%Y-%m-%dT%H:%M:%S')
23 return dt.replace(tzinfo=timezone.utc).timestamp()
24
25
26def metadata_xml_to_json(elem):
27 """Convert metadata xml into JSON format"""
28 assert elem.tag == 'metadata', "Invalid metadata file format"
29
30 def _xml_to_json(elem):
31 """Convert xml element to JSON object"""
32 out = OrderedDict()
33 for child in elem.getchildren():
34 key = child.attrib.get('name', child.tag)
35 if len(child):
36 out[key] = _xml_to_json(child)
37 else:
38 out[key] = child.text
39 return out
40 return _xml_to_json(elem)
41
42
43def results_xml_to_json(elem):
44 """Convert results xml into JSON format"""
45 rusage_fields = ('ru_utime', 'ru_stime', 'ru_maxrss', 'ru_minflt',
46 'ru_majflt', 'ru_inblock', 'ru_oublock', 'ru_nvcsw',
47 'ru_nivcsw')
48 iostat_fields = ('rchar', 'wchar', 'syscr', 'syscw', 'read_bytes',
49 'write_bytes', 'cancelled_write_bytes')
50
51 def _read_measurement(elem):
52 """Convert measurement to JSON"""
53 data = OrderedDict()
54 data['type'] = elem.tag
55 data['name'] = elem.attrib['name']
56 data['legend'] = elem.attrib['legend']
57 values = OrderedDict()
58
59 # SYSRES measurement
60 if elem.tag == 'sysres':
61 for subel in elem:
62 if subel.tag == 'time':
63 values['start_time'] = isofmt_to_timestamp(subel.attrib['timestamp'])
64 values['elapsed_time'] = float(subel.text)
65 elif subel.tag == 'rusage':
66 rusage = OrderedDict()
67 for field in rusage_fields:
68 if 'time' in field:
69 rusage[field] = float(subel.attrib[field])
70 else:
71 rusage[field] = int(subel.attrib[field])
72 values['rusage'] = rusage
73 elif subel.tag == 'iostat':
74 values['iostat'] = OrderedDict([(f, int(subel.attrib[f]))
75 for f in iostat_fields])
76 elif subel.tag == 'buildstats_file':
77 values['buildstats_file'] = subel.text
78 else:
79 raise TypeError("Unknown sysres value element '{}'".format(subel.tag))
80 # DISKUSAGE measurement
81 elif elem.tag == 'diskusage':
82 values['size'] = int(elem.find('size').text)
83 else:
84 raise Exception("Unknown measurement tag '{}'".format(elem.tag))
85 data['values'] = values
86 return data
87
88 def _read_testcase(elem):
89 """Convert testcase into JSON"""
90 assert elem.tag == 'testcase', "Expecting 'testcase' element instead of {}".format(elem.tag)
91
92 data = OrderedDict()
93 data['name'] = elem.attrib['name']
94 data['description'] = elem.attrib['description']
95 data['status'] = 'SUCCESS'
96 data['start_time'] = isofmt_to_timestamp(elem.attrib['timestamp'])
97 data['elapsed_time'] = float(elem.attrib['time'])
98 measurements = OrderedDict()
99
100 for subel in elem.getchildren():
101 if subel.tag == 'error' or subel.tag == 'failure':
102 data['status'] = subel.tag.upper()
103 data['message'] = subel.attrib['message']
104 data['err_type'] = subel.attrib['type']
105 data['err_output'] = subel.text
106 elif subel.tag == 'skipped':
107 data['status'] = 'SKIPPED'
108 data['message'] = subel.text
109 else:
110 measurements[subel.attrib['name']] = _read_measurement(subel)
111 data['measurements'] = measurements
112 return data
113
114 def _read_testsuite(elem):
115 """Convert suite to JSON"""
116 assert elem.tag == 'testsuite', \
117 "Expecting 'testsuite' element instead of {}".format(elem.tag)
118
119 data = OrderedDict()
120 if 'hostname' in elem.attrib:
121 data['tester_host'] = elem.attrib['hostname']
122 data['start_time'] = isofmt_to_timestamp(elem.attrib['timestamp'])
123 data['elapsed_time'] = float(elem.attrib['time'])
124 tests = OrderedDict()
125
126 for case in elem.getchildren():
127 tests[case.attrib['name']] = _read_testcase(case)
128 data['tests'] = tests
129 return data
130
131 # Main function
132 assert elem.tag == 'testsuites', "Invalid test report format"
133 assert len(elem) == 1, "Too many testsuites"
134
135 return _read_testsuite(elem.getchildren()[0])
136
137
138def aggregate_metadata(metadata):
139 """Aggregate metadata into one, basically a sanity check"""
140 mutable_keys = ('pretty_name', 'version_id')
141
142 def aggregate_obj(aggregate, obj, assert_str=True):
143 """Aggregate objects together"""
144 assert type(aggregate) is type(obj), \
145 "Type mismatch: {} != {}".format(type(aggregate), type(obj))
146 if isinstance(obj, Mapping):
147 assert set(aggregate.keys()) == set(obj.keys())
148 for key, val in obj.items():
149 aggregate_obj(aggregate[key], val, key not in mutable_keys)
150 elif isinstance(obj, list):
151 assert len(aggregate) == len(obj)
152 for i, val in enumerate(obj):
153 aggregate_obj(aggregate[i], val)
154 elif not isinstance(obj, str) or (isinstance(obj, str) and assert_str):
155 assert aggregate == obj, "Data mismatch {} != {}".format(aggregate, obj)
156
157 if not metadata:
158 return {}
159
160 # Do the aggregation
161 aggregate = metadata[0].copy()
162 for testrun in metadata[1:]:
163 aggregate_obj(aggregate, testrun)
164 aggregate['testrun_count'] = len(metadata)
165 return aggregate
166
167
168def aggregate_data(data):
169 """Aggregate multiple test results JSON structures into one"""
170
171 mutable_keys = ('status', 'message', 'err_type', 'err_output')
172
173 class SampleList(list):
174 """Container for numerical samples"""
175 pass
176
177 def new_aggregate_obj(obj):
178 """Create new object for aggregate"""
179 if isinstance(obj, Number):
180 new_obj = SampleList()
181 new_obj.append(obj)
182 elif isinstance(obj, str):
183 new_obj = obj
184 else:
185 # Lists and and dicts are kept as is
186 new_obj = obj.__class__()
187 aggregate_obj(new_obj, obj)
188 return new_obj
189
190 def aggregate_obj(aggregate, obj, assert_str=True):
191 """Recursive "aggregation" of JSON objects"""
192 if isinstance(obj, Number):
193 assert isinstance(aggregate, SampleList)
194 aggregate.append(obj)
195 return
196
197 assert type(aggregate) == type(obj), \
198 "Type mismatch: {} != {}".format(type(aggregate), type(obj))
199 if isinstance(obj, Mapping):
200 for key, val in obj.items():
201 if not key in aggregate:
202 aggregate[key] = new_aggregate_obj(val)
203 else:
204 aggregate_obj(aggregate[key], val, key not in mutable_keys)
205 elif isinstance(obj, list):
206 for i, val in enumerate(obj):
207 if i >= len(aggregate):
208 aggregate[key] = new_aggregate_obj(val)
209 else:
210 aggregate_obj(aggregate[i], val)
211 elif isinstance(obj, str):
212 # Sanity check for data
213 if assert_str:
214 assert aggregate == obj, "Data mismatch {} != {}".format(aggregate, obj)
215 else:
216 raise Exception("BUG: unable to aggregate '{}' ({})".format(type(obj), str(obj)))
217
218 if not data:
219 return {}
220
221 # Do the aggregation
222 aggregate = data[0].__class__()
223 for testrun in data:
224 aggregate_obj(aggregate, testrun)
225 return aggregate
226
227
228class MeasurementVal(float):
229 """Base class representing measurement values"""
230 gv_data_type = 'number'
231
232 def gv_value(self):
233 """Value formatting for visualization"""
234 if self != self:
235 return "null"
236 else:
237 return self
238
239
240class TimeVal(MeasurementVal):
241 """Class representing time values"""
242 quantity = 'time'
243 gv_title = 'elapsed time'
244 gv_data_type = 'timeofday'
245
246 def hms(self):
247 """Split time into hours, minutes and seconeds"""
248 hhh = int(abs(self) / 3600)
249 mmm = int((abs(self) % 3600) / 60)
250 sss = abs(self) % 60
251 return hhh, mmm, sss
252
253 def __str__(self):
254 if self != self:
255 return "nan"
256 hh, mm, ss = self.hms()
257 sign = '-' if self < 0 else ''
258 if hh > 0:
259 return '{}{:d}:{:02d}:{:02.0f}'.format(sign, hh, mm, ss)
260 elif mm > 0:
261 return '{}{:d}:{:04.1f}'.format(sign, mm, ss)
262 elif ss > 1:
263 return '{}{:.1f} s'.format(sign, ss)
264 else:
265 return '{}{:.2f} s'.format(sign, ss)
266
267 def gv_value(self):
268 """Value formatting for visualization"""
269 if self != self:
270 return "null"
271 hh, mm, ss = self.hms()
272 return [hh, mm, int(ss), int(ss*1000) % 1000]
273
274
275class SizeVal(MeasurementVal):
276 """Class representing time values"""
277 quantity = 'size'
278 gv_title = 'size in MiB'
279 gv_data_type = 'number'
280
281 def __str__(self):
282 if self != self:
283 return "nan"
284 if abs(self) < 1024:
285 return '{:.1f} kiB'.format(self)
286 elif abs(self) < 1048576:
287 return '{:.2f} MiB'.format(self / 1024)
288 else:
289 return '{:.2f} GiB'.format(self / 1048576)
290
291 def gv_value(self):
292 """Value formatting for visualization"""
293 if self != self:
294 return "null"
295 return self / 1024
296
297def measurement_stats(meas, prefix='', time=0):
298 """Get statistics of a measurement"""
299 if not meas:
300 return {prefix + 'sample_cnt': 0,
301 prefix + 'mean': MeasurementVal('nan'),
302 prefix + 'stdev': MeasurementVal('nan'),
303 prefix + 'variance': MeasurementVal('nan'),
304 prefix + 'min': MeasurementVal('nan'),
305 prefix + 'max': MeasurementVal('nan'),
306 prefix + 'minus': MeasurementVal('nan'),
307 prefix + 'plus': MeasurementVal('nan')}
308
309 stats = {'name': meas['name']}
310 if meas['type'] == 'sysres':
311 val_cls = TimeVal
312 values = meas['values']['elapsed_time']
313 elif meas['type'] == 'diskusage':
314 val_cls = SizeVal
315 values = meas['values']['size']
316 else:
317 raise Exception("Unknown measurement type '{}'".format(meas['type']))
318 stats['val_cls'] = val_cls
319 stats['quantity'] = val_cls.quantity
320 stats[prefix + 'sample_cnt'] = len(values)
321
322 # Add start time for both type sysres and disk usage
323 start_time = time
324 mean_val = val_cls(mean(values))
325 min_val = val_cls(min(values))
326 max_val = val_cls(max(values))
327
328 stats[prefix + 'mean'] = mean_val
329 if len(values) > 1:
330 stats[prefix + 'stdev'] = val_cls(stdev(values))
331 stats[prefix + 'variance'] = val_cls(variance(values))
332 else:
333 stats[prefix + 'stdev'] = float('nan')
334 stats[prefix + 'variance'] = float('nan')
335 stats[prefix + 'min'] = min_val
336 stats[prefix + 'max'] = max_val
337 stats[prefix + 'minus'] = val_cls(mean_val - min_val)
338 stats[prefix + 'plus'] = val_cls(max_val - mean_val)
339 stats[prefix + 'start_time'] = start_time
340
341 return stats
342
diff --git a/scripts/lib/build_perf/scrape-html-report.js b/scripts/lib/build_perf/scrape-html-report.js
deleted file mode 100644
index 05a1f57001..0000000000
--- a/scripts/lib/build_perf/scrape-html-report.js
+++ /dev/null
@@ -1,56 +0,0 @@
1var fs = require('fs');
2var system = require('system');
3var page = require('webpage').create();
4
5// Examine console log for message from chart drawing
6page.onConsoleMessage = function(msg) {
7 console.log(msg);
8 if (msg === "ALL CHARTS READY") {
9 window.charts_ready = true;
10 }
11 else if (msg.slice(0, 11) === "CHART READY") {
12 var chart_id = msg.split(" ")[2];
13 console.log('grabbing ' + chart_id);
14 var png_data = page.evaluate(function (chart_id) {
15 var chart_div = document.getElementById(chart_id + '_png');
16 return chart_div.outerHTML;
17 }, chart_id);
18 fs.write(args[2] + '/' + chart_id + '.png', png_data, 'w');
19 }
20};
21
22// Check command line arguments
23var args = system.args;
24if (args.length != 3) {
25 console.log("USAGE: " + args[0] + " REPORT_HTML OUT_DIR\n");
26 phantom.exit(1);
27}
28
29// Open the web page
30page.open(args[1], function(status) {
31 if (status == 'fail') {
32 console.log("Failed to open file '" + args[1] + "'");
33 phantom.exit(1);
34 }
35});
36
37// Check status every 100 ms
38interval = window.setInterval(function () {
39 //console.log('waiting');
40 if (window.charts_ready) {
41 clearTimeout(timer);
42 clearInterval(interval);
43
44 var fname = args[1].replace(/\/+$/, "").split("/").pop()
45 console.log("saving " + fname);
46 fs.write(args[2] + '/' + fname, page.content, 'w');
47 phantom.exit(0);
48 }
49}, 100);
50
51// Time-out after 10 seconds
52timer = window.setTimeout(function () {
53 clearInterval(interval);
54 console.log("ERROR: timeout");
55 phantom.exit(1);
56}, 10000);