diff options
-rw-r--r-- | scripts/lib/build_perf/__init__.py | 31 | ||||
-rw-r--r-- | scripts/lib/build_perf/html.py | 19 | ||||
-rw-r--r-- | scripts/lib/build_perf/html/measurement_chart.html | 50 | ||||
-rw-r--r-- | scripts/lib/build_perf/html/report.html | 209 | ||||
-rw-r--r-- | scripts/lib/build_perf/report.py | 342 | ||||
-rwxr-xr-x | scripts/oe-build-perf-report | 531 |
6 files changed, 1182 insertions, 0 deletions
diff --git a/scripts/lib/build_perf/__init__.py b/scripts/lib/build_perf/__init__.py new file mode 100644 index 0000000000..1f8b729078 --- /dev/null +++ b/scripts/lib/build_perf/__init__.py | |||
@@ -0,0 +1,31 @@ | |||
1 | # | ||
2 | # Copyright (c) 2017, Intel Corporation. | ||
3 | # | ||
4 | # This program is free software; you can redistribute it and/or modify it | ||
5 | # under the terms and conditions of the GNU General Public License, | ||
6 | # version 2, as published by the Free Software Foundation. | ||
7 | # | ||
8 | # This program is distributed in the hope it will be useful, but WITHOUT | ||
9 | # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
10 | # FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
11 | # more details. | ||
12 | # | ||
13 | """Build performance test library functions""" | ||
14 | |||
15 | def print_table(rows, row_fmt=None): | ||
16 | """Print data table""" | ||
17 | if not rows: | ||
18 | return | ||
19 | if not row_fmt: | ||
20 | row_fmt = ['{:{wid}} '] * len(rows[0]) | ||
21 | |||
22 | # Go through the data to get maximum cell widths | ||
23 | num_cols = len(row_fmt) | ||
24 | col_widths = [0] * num_cols | ||
25 | for row in rows: | ||
26 | for i, val in enumerate(row): | ||
27 | col_widths[i] = max(col_widths[i], len(str(val))) | ||
28 | |||
29 | for row in rows: | ||
30 | print(*[row_fmt[i].format(col, wid=col_widths[i]) for i, col in enumerate(row)]) | ||
31 | |||
diff --git a/scripts/lib/build_perf/html.py b/scripts/lib/build_perf/html.py new file mode 100644 index 0000000000..578bb162ee --- /dev/null +++ b/scripts/lib/build_perf/html.py | |||
@@ -0,0 +1,19 @@ | |||
1 | # | ||
2 | # Copyright (c) 2017, Intel Corporation. | ||
3 | # | ||
4 | # This program is free software; you can redistribute it and/or modify it | ||
5 | # under the terms and conditions of the GNU General Public License, | ||
6 | # version 2, as published by the Free Software Foundation. | ||
7 | # | ||
8 | # This program is distributed in the hope it will be useful, but WITHOUT | ||
9 | # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
10 | # FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
11 | # more details. | ||
12 | # | ||
13 | """Helper module for HTML reporting""" | ||
14 | from jinja2 import Environment, PackageLoader | ||
15 | |||
16 | |||
17 | env = Environment(loader=PackageLoader('build_perf', 'html')) | ||
18 | |||
19 | template = env.get_template('report.html') | ||
diff --git a/scripts/lib/build_perf/html/measurement_chart.html b/scripts/lib/build_perf/html/measurement_chart.html new file mode 100644 index 0000000000..26fe1453c0 --- /dev/null +++ b/scripts/lib/build_perf/html/measurement_chart.html | |||
@@ -0,0 +1,50 @@ | |||
1 | <script type="text/javascript"> | ||
2 | google.charts.setOnLoadCallback(drawChart_{{ chart_elem_id }}); | ||
3 | function drawChart_{{ chart_elem_id }}() { | ||
4 | var data = new google.visualization.DataTable(); | ||
5 | |||
6 | // Chart options | ||
7 | var options = { | ||
8 | theme : 'material', | ||
9 | legend: 'none', | ||
10 | hAxis: { format: '', title: 'Commit number', | ||
11 | minValue: {{ chart_opts.haxis.min }}, | ||
12 | maxValue: {{ chart_opts.haxis.max }} }, | ||
13 | {% if measurement.type == 'time' %} | ||
14 | vAxis: { format: 'h:mm:ss' }, | ||
15 | {% else %} | ||
16 | vAxis: { format: '' }, | ||
17 | {% endif %} | ||
18 | pointSize: 5, | ||
19 | chartArea: { left: 80, right: 15 }, | ||
20 | }; | ||
21 | |||
22 | // Define data columns | ||
23 | data.addColumn('number', 'Commit'); | ||
24 | data.addColumn('{{ measurement.value_type.gv_data_type }}', | ||
25 | '{{ measurement.value_type.quantity }}'); | ||
26 | // Add data rows | ||
27 | data.addRows([ | ||
28 | {% for sample in measurement.samples %} | ||
29 | [{{ sample.commit_num }}, {{ sample.mean.gv_value() }}], | ||
30 | {% endfor %} | ||
31 | ]); | ||
32 | |||
33 | // Finally, draw the chart | ||
34 | chart_div = document.getElementById('{{ chart_elem_id }}'); | ||
35 | var chart = new google.visualization.LineChart(chart_div); | ||
36 | google.visualization.events.addListener(chart, 'ready', function () { | ||
37 | //chart_div = document.getElementById('{{ chart_elem_id }}'); | ||
38 | //chart_div.innerHTML = '<img src="' + chart.getImageURI() + '">'; | ||
39 | png_div = document.getElementById('{{ chart_elem_id }}_png'); | ||
40 | png_div.outerHTML = '<a id="{{ chart_elem_id }}_png" href="' + chart.getImageURI() + '">PNG</a>'; | ||
41 | console.log("CHART READY: {{ chart_elem_id }}"); | ||
42 | {% if last_chart == true %} | ||
43 | console.log("ALL CHARTS READY"); | ||
44 | {% endif %} | ||
45 | //console.log(chart_div.innerHTML); | ||
46 | }); | ||
47 | chart.draw(data, options); | ||
48 | } | ||
49 | </script> | ||
50 | |||
diff --git a/scripts/lib/build_perf/html/report.html b/scripts/lib/build_perf/html/report.html new file mode 100644 index 0000000000..e42871177d --- /dev/null +++ b/scripts/lib/build_perf/html/report.html | |||
@@ -0,0 +1,209 @@ | |||
1 | <!DOCTYPE html> | ||
2 | <html lang="en"> | ||
3 | <head> | ||
4 | {# Scripts, for visualization#} | ||
5 | <!--START-OF-SCRIPTS--> | ||
6 | <script type="text/javascript" src="https://www.gstatic.com/charts/loader.js"></script> | ||
7 | <script type="text/javascript"> | ||
8 | google.charts.load('current', {'packages':['corechart']}); | ||
9 | </script> | ||
10 | |||
11 | {# Render measurement result charts #} | ||
12 | {% for test in test_data %} | ||
13 | {% set test_loop = loop %} | ||
14 | {% if test.status == 'SUCCESS' %} | ||
15 | {% for measurement in test.measurements %} | ||
16 | {% set chart_elem_id = test.name + '_' + measurement.name + '_chart' %} | ||
17 | {% if test_loop.last and loop.last %} | ||
18 | {% set last_chart = true %} | ||
19 | {% endif %} | ||
20 | {% include 'measurement_chart.html' %} | ||
21 | {% endfor %} | ||
22 | {% endif %} | ||
23 | {% endfor %} | ||
24 | |||
25 | <!--END-OF-SCRIPTS--> | ||
26 | |||
27 | {# Styles #} | ||
28 | <style> | ||
29 | .meta-table { | ||
30 | font-size: 14px; | ||
31 | text-align: left; | ||
32 | border-collapse: collapse; | ||
33 | } | ||
34 | .meta-table tr:nth-child(even){background-color: #f2f2f2} | ||
35 | meta-table th, .meta-table td { | ||
36 | padding: 4px; | ||
37 | } | ||
38 | .summary { | ||
39 | margin: 0; | ||
40 | font-size: 14px; | ||
41 | text-align: left; | ||
42 | border-collapse: collapse; | ||
43 | } | ||
44 | summary th, .meta-table td { | ||
45 | padding: 4px; | ||
46 | } | ||
47 | .measurement { | ||
48 | padding: 8px 0px 8px 8px; | ||
49 | border: 2px solid #f0f0f0; | ||
50 | margin-bottom: 10px; | ||
51 | } | ||
52 | .details { | ||
53 | margin: 0; | ||
54 | font-size: 12px; | ||
55 | text-align: left; | ||
56 | border-collapse: collapse; | ||
57 | } | ||
58 | .details th { | ||
59 | font-weight: normal; | ||
60 | padding-right: 8px; | ||
61 | } | ||
62 | .preformatted { | ||
63 | font-family: monospace; | ||
64 | white-space: pre-wrap; | ||
65 | background-color: #f0f0f0; | ||
66 | margin-left: 10px; | ||
67 | } | ||
68 | hr { | ||
69 | color: #f0f0f0; | ||
70 | } | ||
71 | h2 { | ||
72 | font-size: 20px; | ||
73 | margin-bottom: 0px; | ||
74 | color: #707070; | ||
75 | } | ||
76 | h3 { | ||
77 | font-size: 16px; | ||
78 | margin: 0px; | ||
79 | color: #707070; | ||
80 | } | ||
81 | </style> | ||
82 | |||
83 | <title>{{ title }}</title> | ||
84 | </head> | ||
85 | |||
86 | {% macro poky_link(commit) -%} | ||
87 | <a href="http://git.yoctoproject.org/cgit/cgit.cgi/poky/log/?id={{ commit }}">{{ commit[0:11] }}</a> | ||
88 | {%- endmacro %} | ||
89 | |||
90 | <body><div style="width: 700px"> | ||
91 | {# Test metadata #} | ||
92 | <h2>General</h2> | ||
93 | <hr> | ||
94 | <table class="meta-table" style="width: 100%"> | ||
95 | <tr> | ||
96 | <th></th> | ||
97 | <th>Current commit</th> | ||
98 | <th>Comparing with</th> | ||
99 | </tr> | ||
100 | {% for key, item in metadata.items() %} | ||
101 | <tr> | ||
102 | <th>{{ item.title }}</th> | ||
103 | {%if key == 'commit' %} | ||
104 | <td>{{ poky_link(item.value) }}</td> | ||
105 | <td>{{ poky_link(item.value_old) }}</td> | ||
106 | {% else %} | ||
107 | <td>{{ item.value }}</td> | ||
108 | <td>{{ item.value_old }}</td> | ||
109 | {% endif %} | ||
110 | </tr> | ||
111 | {% endfor %} | ||
112 | </table> | ||
113 | |||
114 | {# Test result summary #} | ||
115 | <h2>Test result summary</h2> | ||
116 | <hr> | ||
117 | <table class="summary" style="width: 100%"> | ||
118 | {% for test in test_data %} | ||
119 | {% if loop.index is even %} | ||
120 | {% set row_style = 'style="background-color: #f2f2f2"' %} | ||
121 | {% else %} | ||
122 | {% set row_style = 'style="background-color: #ffffff"' %} | ||
123 | {% endif %} | ||
124 | <tr {{ row_style }}><td>{{ test.name }}: {{ test.description }}</td> | ||
125 | {% if test.status == 'SUCCESS' %} | ||
126 | {% for measurement in test.measurements %} | ||
127 | {# add empty cell in place of the test name#} | ||
128 | {% if loop.index > 1 %}<td></td>{% endif %} | ||
129 | {% if measurement.absdiff > 0 %} | ||
130 | {% set result_style = "color: red" %} | ||
131 | {% elif measurement.absdiff == measurement.absdiff %} | ||
132 | {% set result_style = "color: green" %} | ||
133 | {% else %} | ||
134 | {% set result_style = "color: orange" %} | ||
135 | {%endif %} | ||
136 | <td>{{ measurement.description }}</td> | ||
137 | <td style="font-weight: bold">{{ measurement.value.mean }}</td> | ||
138 | <td style="{{ result_style }}">{{ measurement.absdiff_str }}</td> | ||
139 | <td style="{{ result_style }}">{{ measurement.reldiff }}</td> | ||
140 | </tr><tr {{ row_style }}> | ||
141 | {% endfor %} | ||
142 | {% else %} | ||
143 | <td style="font-weight: bold; color: red;">{{test.status }}</td> | ||
144 | <td></td> <td></td> <td></td> <td></td> | ||
145 | {% endif %} | ||
146 | </tr> | ||
147 | {% endfor %} | ||
148 | </table> | ||
149 | |||
150 | {# Detailed test results #} | ||
151 | {% for test in test_data %} | ||
152 | <h2>{{ test.name }}: {{ test.description }}</h2> | ||
153 | <hr> | ||
154 | {% if test.status == 'SUCCESS' %} | ||
155 | {% for measurement in test.measurements %} | ||
156 | <div class="measurement"> | ||
157 | <h3>{{ measurement.description }}</h3> | ||
158 | <div style="font-weight:bold;"> | ||
159 | <span style="font-size: 23px;">{{ measurement.value.mean }}</span> | ||
160 | <span style="font-size: 20px; margin-left: 12px"> | ||
161 | {% if measurement.absdiff > 0 %} | ||
162 | <span style="color: red"> | ||
163 | {% elif measurement.absdiff == measurement.absdiff %} | ||
164 | <span style="color: green"> | ||
165 | {% else %} | ||
166 | <span style="color: orange"> | ||
167 | {% endif %} | ||
168 | {{ measurement.absdiff_str }} ({{measurement.reldiff}}) | ||
169 | </span></span> | ||
170 | </div> | ||
171 | <table style="width: 100%"> | ||
172 | <tr> | ||
173 | <td style="width: 75%"> | ||
174 | {# Linechart #} | ||
175 | <div id="{{ test.name }}_{{ measurement.name }}_chart"></div> | ||
176 | </td> | ||
177 | <td> | ||
178 | {# Measurement statistics #} | ||
179 | <table class="details"> | ||
180 | <tr> | ||
181 | <th>Test runs</th><td>{{ measurement.value.sample_cnt }}</td> | ||
182 | </tr><tr> | ||
183 | <th>-/+</th><td>-{{ measurement.value.minus }} / +{{ measurement.value.plus }}</td> | ||
184 | </tr><tr> | ||
185 | <th>Min</th><td>{{ measurement.value.min }}</td> | ||
186 | </tr><tr> | ||
187 | <th>Max</th><td>{{ measurement.value.max }}</td> | ||
188 | </tr><tr> | ||
189 | <th>Stdev</th><td>{{ measurement.value.stdev }}</td> | ||
190 | </tr><tr> | ||
191 | <th><div id="{{ test.name }}_{{ measurement.name }}_chart_png"></div></th> | ||
192 | </tr> | ||
193 | </table> | ||
194 | </td> | ||
195 | </tr> | ||
196 | </table> | ||
197 | </div> | ||
198 | {% endfor %} | ||
199 | {# Unsuccessful test #} | ||
200 | {% else %} | ||
201 | <span style="font-size: 150%; font-weight: bold; color: red;">{{ test.status }} | ||
202 | {% if test.err_type %}<span style="font-size: 75%; font-weight: normal">({{ test.err_type }})</span>{% endif %} | ||
203 | </span> | ||
204 | <div class="preformatted">{{ test.message }}</div> | ||
205 | {% endif %} | ||
206 | {% endfor %} | ||
207 | </div></body> | ||
208 | </html> | ||
209 | |||
diff --git a/scripts/lib/build_perf/report.py b/scripts/lib/build_perf/report.py new file mode 100644 index 0000000000..eb00ccca2d --- /dev/null +++ b/scripts/lib/build_perf/report.py | |||
@@ -0,0 +1,342 @@ | |||
1 | # | ||
2 | # Copyright (c) 2017, Intel Corporation. | ||
3 | # | ||
4 | # This program is free software; you can redistribute it and/or modify it | ||
5 | # under the terms and conditions of the GNU General Public License, | ||
6 | # version 2, as published by the Free Software Foundation. | ||
7 | # | ||
8 | # This program is distributed in the hope it will be useful, but WITHOUT | ||
9 | # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
10 | # FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
11 | # more details. | ||
12 | # | ||
13 | """Handling of build perf test reports""" | ||
14 | from collections import OrderedDict, Mapping | ||
15 | from datetime import datetime, timezone | ||
16 | from numbers import Number | ||
17 | from statistics import mean, stdev, variance | ||
18 | |||
19 | |||
20 | def isofmt_to_timestamp(string): | ||
21 | """Convert timestamp string in ISO 8601 format into unix timestamp""" | ||
22 | if '.' in string: | ||
23 | dt = datetime.strptime(string, '%Y-%m-%dT%H:%M:%S.%f') | ||
24 | else: | ||
25 | dt = datetime.strptime(string, '%Y-%m-%dT%H:%M:%S') | ||
26 | return dt.replace(tzinfo=timezone.utc).timestamp() | ||
27 | |||
28 | |||
29 | def metadata_xml_to_json(elem): | ||
30 | """Convert metadata xml into JSON format""" | ||
31 | assert elem.tag == 'metadata', "Invalid metadata file format" | ||
32 | |||
33 | def _xml_to_json(elem): | ||
34 | """Convert xml element to JSON object""" | ||
35 | out = OrderedDict() | ||
36 | for child in elem.getchildren(): | ||
37 | key = child.attrib.get('name', child.tag) | ||
38 | if len(child): | ||
39 | out[key] = _xml_to_json(child) | ||
40 | else: | ||
41 | out[key] = child.text | ||
42 | return out | ||
43 | return _xml_to_json(elem) | ||
44 | |||
45 | |||
46 | def results_xml_to_json(elem): | ||
47 | """Convert results xml into JSON format""" | ||
48 | rusage_fields = ('ru_utime', 'ru_stime', 'ru_maxrss', 'ru_minflt', | ||
49 | 'ru_majflt', 'ru_inblock', 'ru_oublock', 'ru_nvcsw', | ||
50 | 'ru_nivcsw') | ||
51 | iostat_fields = ('rchar', 'wchar', 'syscr', 'syscw', 'read_bytes', | ||
52 | 'write_bytes', 'cancelled_write_bytes') | ||
53 | |||
54 | def _read_measurement(elem): | ||
55 | """Convert measurement to JSON""" | ||
56 | data = OrderedDict() | ||
57 | data['type'] = elem.tag | ||
58 | data['name'] = elem.attrib['name'] | ||
59 | data['legend'] = elem.attrib['legend'] | ||
60 | values = OrderedDict() | ||
61 | |||
62 | # SYSRES measurement | ||
63 | if elem.tag == 'sysres': | ||
64 | for subel in elem: | ||
65 | if subel.tag == 'time': | ||
66 | values['start_time'] = isofmt_to_timestamp(subel.attrib['timestamp']) | ||
67 | values['elapsed_time'] = float(subel.text) | ||
68 | elif subel.tag == 'rusage': | ||
69 | rusage = OrderedDict() | ||
70 | for field in rusage_fields: | ||
71 | if 'time' in field: | ||
72 | rusage[field] = float(subel.attrib[field]) | ||
73 | else: | ||
74 | rusage[field] = int(subel.attrib[field]) | ||
75 | values['rusage'] = rusage | ||
76 | elif subel.tag == 'iostat': | ||
77 | values['iostat'] = OrderedDict([(f, int(subel.attrib[f])) | ||
78 | for f in iostat_fields]) | ||
79 | elif subel.tag == 'buildstats_file': | ||
80 | values['buildstats_file'] = subel.text | ||
81 | else: | ||
82 | raise TypeError("Unknown sysres value element '{}'".format(subel.tag)) | ||
83 | # DISKUSAGE measurement | ||
84 | elif elem.tag == 'diskusage': | ||
85 | values['size'] = int(elem.find('size').text) | ||
86 | else: | ||
87 | raise Exception("Unknown measurement tag '{}'".format(elem.tag)) | ||
88 | data['values'] = values | ||
89 | return data | ||
90 | |||
91 | def _read_testcase(elem): | ||
92 | """Convert testcase into JSON""" | ||
93 | assert elem.tag == 'testcase', "Expecting 'testcase' element instead of {}".format(elem.tag) | ||
94 | |||
95 | data = OrderedDict() | ||
96 | data['name'] = elem.attrib['name'] | ||
97 | data['description'] = elem.attrib['description'] | ||
98 | data['status'] = 'SUCCESS' | ||
99 | data['start_time'] = isofmt_to_timestamp(elem.attrib['timestamp']) | ||
100 | data['elapsed_time'] = float(elem.attrib['time']) | ||
101 | measurements = OrderedDict() | ||
102 | |||
103 | for subel in elem.getchildren(): | ||
104 | if subel.tag == 'error' or subel.tag == 'failure': | ||
105 | data['status'] = subel.tag.upper() | ||
106 | data['message'] = subel.attrib['message'] | ||
107 | data['err_type'] = subel.attrib['type'] | ||
108 | data['err_output'] = subel.text | ||
109 | elif subel.tag == 'skipped': | ||
110 | data['status'] = 'SKIPPED' | ||
111 | data['message'] = subel.text | ||
112 | else: | ||
113 | measurements[subel.attrib['name']] = _read_measurement(subel) | ||
114 | data['measurements'] = measurements | ||
115 | return data | ||
116 | |||
117 | def _read_testsuite(elem): | ||
118 | """Convert suite to JSON""" | ||
119 | assert elem.tag == 'testsuite', \ | ||
120 | "Expecting 'testsuite' element instead of {}".format(elem.tag) | ||
121 | |||
122 | data = OrderedDict() | ||
123 | if 'hostname' in elem.attrib: | ||
124 | data['tester_host'] = elem.attrib['hostname'] | ||
125 | data['start_time'] = isofmt_to_timestamp(elem.attrib['timestamp']) | ||
126 | data['elapsed_time'] = float(elem.attrib['time']) | ||
127 | tests = OrderedDict() | ||
128 | |||
129 | for case in elem.getchildren(): | ||
130 | tests[case.attrib['name']] = _read_testcase(case) | ||
131 | data['tests'] = tests | ||
132 | return data | ||
133 | |||
134 | # Main function | ||
135 | assert elem.tag == 'testsuites', "Invalid test report format" | ||
136 | assert len(elem) == 1, "Too many testsuites" | ||
137 | |||
138 | return _read_testsuite(elem.getchildren()[0]) | ||
139 | |||
140 | |||
141 | def aggregate_metadata(metadata): | ||
142 | """Aggregate metadata into one, basically a sanity check""" | ||
143 | mutable_keys = ('pretty_name', 'version_id') | ||
144 | |||
145 | def aggregate_obj(aggregate, obj, assert_str=True): | ||
146 | """Aggregate objects together""" | ||
147 | assert type(aggregate) is type(obj), \ | ||
148 | "Type mismatch: {} != {}".format(type(aggregate), type(obj)) | ||
149 | if isinstance(obj, Mapping): | ||
150 | assert set(aggregate.keys()) == set(obj.keys()) | ||
151 | for key, val in obj.items(): | ||
152 | aggregate_obj(aggregate[key], val, key not in mutable_keys) | ||
153 | elif isinstance(obj, list): | ||
154 | assert len(aggregate) == len(obj) | ||
155 | for i, val in enumerate(obj): | ||
156 | aggregate_obj(aggregate[i], val) | ||
157 | elif not isinstance(obj, str) or (isinstance(obj, str) and assert_str): | ||
158 | assert aggregate == obj, "Data mismatch {} != {}".format(aggregate, obj) | ||
159 | |||
160 | if not metadata: | ||
161 | return {} | ||
162 | |||
163 | # Do the aggregation | ||
164 | aggregate = metadata[0].copy() | ||
165 | for testrun in metadata[1:]: | ||
166 | aggregate_obj(aggregate, testrun) | ||
167 | aggregate['testrun_count'] = len(metadata) | ||
168 | return aggregate | ||
169 | |||
170 | |||
171 | def aggregate_data(data): | ||
172 | """Aggregate multiple test results JSON structures into one""" | ||
173 | |||
174 | mutable_keys = ('status', 'message', 'err_type', 'err_output') | ||
175 | |||
176 | class SampleList(list): | ||
177 | """Container for numerical samples""" | ||
178 | pass | ||
179 | |||
180 | def new_aggregate_obj(obj): | ||
181 | """Create new object for aggregate""" | ||
182 | if isinstance(obj, Number): | ||
183 | new_obj = SampleList() | ||
184 | new_obj.append(obj) | ||
185 | elif isinstance(obj, str): | ||
186 | new_obj = obj | ||
187 | else: | ||
188 | # Lists and and dicts are kept as is | ||
189 | new_obj = obj.__class__() | ||
190 | aggregate_obj(new_obj, obj) | ||
191 | return new_obj | ||
192 | |||
193 | def aggregate_obj(aggregate, obj, assert_str=True): | ||
194 | """Recursive "aggregation" of JSON objects""" | ||
195 | if isinstance(obj, Number): | ||
196 | assert isinstance(aggregate, SampleList) | ||
197 | aggregate.append(obj) | ||
198 | return | ||
199 | |||
200 | assert type(aggregate) == type(obj), \ | ||
201 | "Type mismatch: {} != {}".format(type(aggregate), type(obj)) | ||
202 | if isinstance(obj, Mapping): | ||
203 | for key, val in obj.items(): | ||
204 | if not key in aggregate: | ||
205 | aggregate[key] = new_aggregate_obj(val) | ||
206 | else: | ||
207 | aggregate_obj(aggregate[key], val, key not in mutable_keys) | ||
208 | elif isinstance(obj, list): | ||
209 | for i, val in enumerate(obj): | ||
210 | if i >= len(aggregate): | ||
211 | aggregate[key] = new_aggregate_obj(val) | ||
212 | else: | ||
213 | aggregate_obj(aggregate[i], val) | ||
214 | elif isinstance(obj, str): | ||
215 | # Sanity check for data | ||
216 | if assert_str: | ||
217 | assert aggregate == obj, "Data mismatch {} != {}".format(aggregate, obj) | ||
218 | else: | ||
219 | raise Exception("BUG: unable to aggregate '{}' ({})".format(type(obj), str(obj))) | ||
220 | |||
221 | if not data: | ||
222 | return {} | ||
223 | |||
224 | # Do the aggregation | ||
225 | aggregate = data[0].__class__() | ||
226 | for testrun in data: | ||
227 | aggregate_obj(aggregate, testrun) | ||
228 | return aggregate | ||
229 | |||
230 | |||
231 | class MeasurementVal(float): | ||
232 | """Base class representing measurement values""" | ||
233 | gv_data_type = 'number' | ||
234 | |||
235 | def gv_value(self): | ||
236 | """Value formatting for visualization""" | ||
237 | if self != self: | ||
238 | return "null" | ||
239 | else: | ||
240 | return self | ||
241 | |||
242 | |||
243 | class TimeVal(MeasurementVal): | ||
244 | """Class representing time values""" | ||
245 | quantity = 'time' | ||
246 | gv_title = 'elapsed time' | ||
247 | gv_data_type = 'timeofday' | ||
248 | |||
249 | def hms(self): | ||
250 | """Split time into hours, minutes and seconeds""" | ||
251 | hhh = int(abs(self) / 3600) | ||
252 | mmm = int((abs(self) % 3600) / 60) | ||
253 | sss = abs(self) % 60 | ||
254 | return hhh, mmm, sss | ||
255 | |||
256 | def __str__(self): | ||
257 | if self != self: | ||
258 | return "nan" | ||
259 | hh, mm, ss = self.hms() | ||
260 | sign = '-' if self < 0 else '' | ||
261 | if hh > 0: | ||
262 | return '{}{:d}:{:02d}:{:02.0f}'.format(sign, hh, mm, ss) | ||
263 | elif mm > 0: | ||
264 | return '{}{:d}:{:04.1f}'.format(sign, mm, ss) | ||
265 | elif ss > 1: | ||
266 | return '{}{:.1f} s'.format(sign, ss) | ||
267 | else: | ||
268 | return '{}{:.2f} s'.format(sign, ss) | ||
269 | |||
270 | def gv_value(self): | ||
271 | """Value formatting for visualization""" | ||
272 | if self != self: | ||
273 | return "null" | ||
274 | hh, mm, ss = self.hms() | ||
275 | return [hh, mm, int(ss), int(ss*1000) % 1000] | ||
276 | |||
277 | |||
278 | class SizeVal(MeasurementVal): | ||
279 | """Class representing time values""" | ||
280 | quantity = 'size' | ||
281 | gv_title = 'size in MiB' | ||
282 | gv_data_type = 'number' | ||
283 | |||
284 | def __str__(self): | ||
285 | if self != self: | ||
286 | return "nan" | ||
287 | if abs(self) < 1024: | ||
288 | return '{:.1f} kiB'.format(self) | ||
289 | elif abs(self) < 1048576: | ||
290 | return '{:.2f} MiB'.format(self / 1024) | ||
291 | else: | ||
292 | return '{:.2f} GiB'.format(self / 1048576) | ||
293 | |||
294 | def gv_value(self): | ||
295 | """Value formatting for visualization""" | ||
296 | if self != self: | ||
297 | return "null" | ||
298 | return self / 1024 | ||
299 | |||
300 | def measurement_stats(meas, prefix=''): | ||
301 | """Get statistics of a measurement""" | ||
302 | if not meas: | ||
303 | return {prefix + 'sample_cnt': 0, | ||
304 | prefix + 'mean': MeasurementVal('nan'), | ||
305 | prefix + 'stdev': MeasurementVal('nan'), | ||
306 | prefix + 'variance': MeasurementVal('nan'), | ||
307 | prefix + 'min': MeasurementVal('nan'), | ||
308 | prefix + 'max': MeasurementVal('nan'), | ||
309 | prefix + 'minus': MeasurementVal('nan'), | ||
310 | prefix + 'plus': MeasurementVal('nan')} | ||
311 | |||
312 | stats = {'name': meas['name']} | ||
313 | if meas['type'] == 'sysres': | ||
314 | val_cls = TimeVal | ||
315 | values = meas['values']['elapsed_time'] | ||
316 | elif meas['type'] == 'diskusage': | ||
317 | val_cls = SizeVal | ||
318 | values = meas['values']['size'] | ||
319 | else: | ||
320 | raise Exception("Unknown measurement type '{}'".format(meas['type'])) | ||
321 | stats['val_cls'] = val_cls | ||
322 | stats['quantity'] = val_cls.quantity | ||
323 | stats[prefix + 'sample_cnt'] = len(values) | ||
324 | |||
325 | mean_val = val_cls(mean(values)) | ||
326 | min_val = val_cls(min(values)) | ||
327 | max_val = val_cls(max(values)) | ||
328 | |||
329 | stats[prefix + 'mean'] = mean_val | ||
330 | if len(values) > 1: | ||
331 | stats[prefix + 'stdev'] = val_cls(stdev(values)) | ||
332 | stats[prefix + 'variance'] = val_cls(variance(values)) | ||
333 | else: | ||
334 | stats[prefix + 'stdev'] = float('nan') | ||
335 | stats[prefix + 'variance'] = float('nan') | ||
336 | stats[prefix + 'min'] = min_val | ||
337 | stats[prefix + 'max'] = max_val | ||
338 | stats[prefix + 'minus'] = val_cls(mean_val - min_val) | ||
339 | stats[prefix + 'plus'] = val_cls(max_val - mean_val) | ||
340 | |||
341 | return stats | ||
342 | |||
diff --git a/scripts/oe-build-perf-report b/scripts/oe-build-perf-report new file mode 100755 index 0000000000..39766135c6 --- /dev/null +++ b/scripts/oe-build-perf-report | |||
@@ -0,0 +1,531 @@ | |||
1 | #!/usr/bin/python3 | ||
2 | # | ||
3 | # Examine build performance test results | ||
4 | # | ||
5 | # Copyright (c) 2017, Intel Corporation. | ||
6 | # | ||
7 | # This program is free software; you can redistribute it and/or modify it | ||
8 | # under the terms and conditions of the GNU General Public License, | ||
9 | # version 2, as published by the Free Software Foundation. | ||
10 | # | ||
11 | # This program is distributed in the hope it will be useful, but WITHOUT | ||
12 | # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
13 | # FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
14 | # more details. | ||
15 | # | ||
16 | import argparse | ||
17 | import json | ||
18 | import logging | ||
19 | import os | ||
20 | import re | ||
21 | import sys | ||
22 | from collections import namedtuple, OrderedDict | ||
23 | from operator import attrgetter | ||
24 | from xml.etree import ElementTree as ET | ||
25 | |||
26 | # Import oe libs | ||
27 | scripts_path = os.path.dirname(os.path.realpath(__file__)) | ||
28 | sys.path.append(os.path.join(scripts_path, 'lib')) | ||
29 | import scriptpath | ||
30 | from build_perf import print_table | ||
31 | from build_perf.report import (metadata_xml_to_json, results_xml_to_json, | ||
32 | aggregate_data, aggregate_metadata, measurement_stats) | ||
33 | from build_perf import html | ||
34 | |||
35 | scriptpath.add_oe_lib_path() | ||
36 | |||
37 | from oeqa.utils.git import GitRepo | ||
38 | |||
39 | |||
40 | # Setup logging | ||
41 | logging.basicConfig(level=logging.INFO, format="%(levelname)s: %(message)s") | ||
42 | log = logging.getLogger('oe-build-perf-report') | ||
43 | |||
44 | |||
45 | # Container class for tester revisions | ||
46 | TestedRev = namedtuple('TestedRev', 'commit commit_number tags') | ||
47 | |||
48 | |||
49 | def get_test_runs(repo, tag_name, **kwargs): | ||
50 | """Get a sorted list of test runs, matching given pattern""" | ||
51 | # First, get field names from the tag name pattern | ||
52 | field_names = [m.group(1) for m in re.finditer(r'{(\w+)}', tag_name)] | ||
53 | undef_fields = [f for f in field_names if f not in kwargs.keys()] | ||
54 | |||
55 | # Fields for formatting tag name pattern | ||
56 | str_fields = dict([(f, '*') for f in field_names]) | ||
57 | str_fields.update(kwargs) | ||
58 | |||
59 | # Get a list of all matching tags | ||
60 | tag_pattern = tag_name.format(**str_fields) | ||
61 | tags = repo.run_cmd(['tag', '-l', tag_pattern]).splitlines() | ||
62 | log.debug("Found %d tags matching pattern '%s'", len(tags), tag_pattern) | ||
63 | |||
64 | # Parse undefined fields from tag names | ||
65 | str_fields = dict([(f, r'(?P<{}>[\w\-.]+)'.format(f)) for f in field_names]) | ||
66 | str_fields['commit'] = '(?P<commit>[0-9a-f]{7,40})' | ||
67 | str_fields['commit_number'] = '(?P<commit_number>[0-9]{1,7})' | ||
68 | str_fields['tag_number'] = '(?P<tag_number>[0-9]{1,5})' | ||
69 | str_fields.update(kwargs) | ||
70 | tag_re = re.compile(tag_name.format(**str_fields)) | ||
71 | |||
72 | # Parse fields from tags | ||
73 | revs = [] | ||
74 | for tag in tags: | ||
75 | m = tag_re.match(tag) | ||
76 | groups = m.groupdict() | ||
77 | revs.append([groups[f] for f in undef_fields] + [tag]) | ||
78 | |||
79 | # Return field names and a sorted list of revs | ||
80 | return undef_fields, sorted(revs) | ||
81 | |||
82 | def list_test_revs(repo, tag_name, **kwargs): | ||
83 | """Get list of all tested revisions""" | ||
84 | fields, revs = get_test_runs(repo, tag_name, **kwargs) | ||
85 | ignore_fields = ['tag_number'] | ||
86 | print_fields = [i for i, f in enumerate(fields) if f not in ignore_fields] | ||
87 | |||
88 | # Sort revs | ||
89 | rows = [[fields[i].upper() for i in print_fields] + ['TEST RUNS']] | ||
90 | prev = [''] * len(revs) | ||
91 | for rev in revs: | ||
92 | # Only use fields that we want to print | ||
93 | rev = [rev[i] for i in print_fields] | ||
94 | |||
95 | if rev != prev: | ||
96 | new_row = [''] * len(print_fields) + [1] | ||
97 | for i in print_fields: | ||
98 | if rev[i] != prev[i]: | ||
99 | break | ||
100 | new_row[i:-1] = rev[i:] | ||
101 | rows.append(new_row) | ||
102 | else: | ||
103 | rows[-1][-1] += 1 | ||
104 | prev = rev | ||
105 | |||
106 | print_table(rows) | ||
107 | |||
108 | def get_test_revs(repo, tag_name, **kwargs): | ||
109 | """Get list of all tested revisions""" | ||
110 | fields, runs = get_test_runs(repo, tag_name, **kwargs) | ||
111 | |||
112 | revs = {} | ||
113 | commit_i = fields.index('commit') | ||
114 | commit_num_i = fields.index('commit_number') | ||
115 | for run in runs: | ||
116 | commit = run[commit_i] | ||
117 | commit_num = run[commit_num_i] | ||
118 | tag = run[-1] | ||
119 | if not commit in revs: | ||
120 | revs[commit] = TestedRev(commit, commit_num, [tag]) | ||
121 | else: | ||
122 | assert commit_num == revs[commit].commit_number, "Commit numbers do not match" | ||
123 | revs[commit].tags.append(tag) | ||
124 | |||
125 | # Return in sorted table | ||
126 | revs = sorted(revs.values(), key=attrgetter('commit_number')) | ||
127 | log.debug("Found %d tested revisions:\n %s", len(revs), | ||
128 | "\n ".join(['{} ({})'.format(rev.commit_number, rev.commit) for rev in revs])) | ||
129 | return revs | ||
130 | |||
131 | def rev_find(revs, attr, val): | ||
132 | """Search from a list of TestedRev""" | ||
133 | for i, rev in enumerate(revs): | ||
134 | if getattr(rev, attr) == val: | ||
135 | return i | ||
136 | raise ValueError("Unable to find '{}' value '{}'".format(attr, val)) | ||
137 | |||
138 | def is_xml_format(repo, commit): | ||
139 | """Check if the commit contains xml (or json) data""" | ||
140 | if repo.rev_parse(commit + ':results.xml'): | ||
141 | log.debug("Detected report in xml format in %s", commit) | ||
142 | return True | ||
143 | else: | ||
144 | log.debug("No xml report in %s, assuming json formatted results", commit) | ||
145 | return False | ||
146 | |||
147 | def read_results(repo, tags, xml=True): | ||
148 | """Read result files from repo""" | ||
149 | |||
150 | def parse_xml_stream(data): | ||
151 | """Parse multiple concatenated XML objects""" | ||
152 | objs = [] | ||
153 | xml_d = "" | ||
154 | for line in data.splitlines(): | ||
155 | if xml_d and line.startswith('<?xml version='): | ||
156 | objs.append(ET.fromstring(xml_d)) | ||
157 | xml_d = line | ||
158 | else: | ||
159 | xml_d += line | ||
160 | objs.append(ET.fromstring(xml_d)) | ||
161 | return objs | ||
162 | |||
163 | def parse_json_stream(data): | ||
164 | """Parse multiple concatenated JSON objects""" | ||
165 | objs = [] | ||
166 | json_d = "" | ||
167 | for line in data.splitlines(): | ||
168 | if line == '}{': | ||
169 | json_d += '}' | ||
170 | objs.append(json.loads(json_d, object_pairs_hook=OrderedDict)) | ||
171 | json_d = '{' | ||
172 | else: | ||
173 | json_d += line | ||
174 | objs.append(json.loads(json_d, object_pairs_hook=OrderedDict)) | ||
175 | return objs | ||
176 | |||
177 | num_revs = len(tags) | ||
178 | |||
179 | # Optimize by reading all data with one git command | ||
180 | log.debug("Loading raw result data from %d tags, %s...", num_revs, tags[0]) | ||
181 | if xml: | ||
182 | git_objs = [tag + ':metadata.xml' for tag in tags] + [tag + ':results.xml' for tag in tags] | ||
183 | data = parse_xml_stream(repo.run_cmd(['show'] + git_objs + ['--'])) | ||
184 | return ([metadata_xml_to_json(e) for e in data[0:num_revs]], | ||
185 | [results_xml_to_json(e) for e in data[num_revs:]]) | ||
186 | else: | ||
187 | git_objs = [tag + ':metadata.json' for tag in tags] + [tag + ':results.json' for tag in tags] | ||
188 | data = parse_json_stream(repo.run_cmd(['show'] + git_objs + ['--'])) | ||
189 | return data[0:num_revs], data[num_revs:] | ||
190 | |||
191 | |||
192 | def get_data_item(data, key): | ||
193 | """Nested getitem lookup""" | ||
194 | for k in key.split('.'): | ||
195 | data = data[k] | ||
196 | return data | ||
197 | |||
198 | |||
199 | def metadata_diff(metadata_l, metadata_r): | ||
200 | """Prepare a metadata diff for printing""" | ||
201 | keys = [('Hostname', 'hostname', 'hostname'), | ||
202 | ('Branch', 'branch', 'layers.meta.branch'), | ||
203 | ('Commit number', 'commit_num', 'layers.meta.commit_count'), | ||
204 | ('Commit', 'commit', 'layers.meta.commit'), | ||
205 | ('Number of test runs', 'testrun_count', 'testrun_count') | ||
206 | ] | ||
207 | |||
208 | def _metadata_diff(key): | ||
209 | """Diff metadata from two test reports""" | ||
210 | try: | ||
211 | val1 = get_data_item(metadata_l, key) | ||
212 | except KeyError: | ||
213 | val1 = '(N/A)' | ||
214 | try: | ||
215 | val2 = get_data_item(metadata_r, key) | ||
216 | except KeyError: | ||
217 | val2 = '(N/A)' | ||
218 | return val1, val2 | ||
219 | |||
220 | metadata = OrderedDict() | ||
221 | for title, key, key_json in keys: | ||
222 | value_l, value_r = _metadata_diff(key_json) | ||
223 | metadata[key] = {'title': title, | ||
224 | 'value_old': value_l, | ||
225 | 'value': value_r} | ||
226 | return metadata | ||
227 | |||
228 | |||
229 | def print_diff_report(metadata_l, data_l, metadata_r, data_r): | ||
230 | """Print differences between two data sets""" | ||
231 | |||
232 | # First, print general metadata | ||
233 | print("\nTEST METADATA:\n==============") | ||
234 | meta_diff = metadata_diff(metadata_l, metadata_r) | ||
235 | rows = [] | ||
236 | row_fmt = ['{:{wid}} ', '{:<{wid}} ', '{:<{wid}}'] | ||
237 | rows = [['', 'CURRENT COMMIT', 'OOMPARING WITH']] | ||
238 | for key, val in meta_diff.items(): | ||
239 | # Shorten commit hashes | ||
240 | if key == 'commit': | ||
241 | rows.append([val['title'] + ':', val['value'][:20], val['value_old'][:20]]) | ||
242 | else: | ||
243 | rows.append([val['title'] + ':', val['value'], val['value_old']]) | ||
244 | print_table(rows, row_fmt) | ||
245 | |||
246 | |||
247 | # Print test results | ||
248 | print("\nTEST RESULTS:\n=============") | ||
249 | |||
250 | tests = list(data_l['tests'].keys()) | ||
251 | # Append tests that are only present in 'right' set | ||
252 | tests += [t for t in list(data_r['tests'].keys()) if t not in tests] | ||
253 | |||
254 | # Prepare data to be printed | ||
255 | rows = [] | ||
256 | row_fmt = ['{:8}', '{:{wid}}', '{:{wid}}', ' {:>{wid}}', ' {:{wid}} ', '{:{wid}}', | ||
257 | ' {:>{wid}}', ' {:>{wid}}'] | ||
258 | num_cols = len(row_fmt) | ||
259 | for test in tests: | ||
260 | test_l = data_l['tests'][test] if test in data_l['tests'] else None | ||
261 | test_r = data_r['tests'][test] if test in data_r['tests'] else None | ||
262 | pref = ' ' | ||
263 | if test_l is None: | ||
264 | pref = '+' | ||
265 | elif test_r is None: | ||
266 | pref = '-' | ||
267 | descr = test_l['description'] if test_l else test_r['description'] | ||
268 | heading = "{} {}: {}".format(pref, test, descr) | ||
269 | |||
270 | rows.append([heading]) | ||
271 | |||
272 | # Generate the list of measurements | ||
273 | meas_l = test_l['measurements'] if test_l else {} | ||
274 | meas_r = test_r['measurements'] if test_r else {} | ||
275 | measurements = list(meas_l.keys()) | ||
276 | measurements += [m for m in list(meas_r.keys()) if m not in measurements] | ||
277 | |||
278 | for meas in measurements: | ||
279 | m_pref = ' ' | ||
280 | if meas in meas_l: | ||
281 | stats_l = measurement_stats(meas_l[meas], 'l.') | ||
282 | else: | ||
283 | stats_l = measurement_stats(None, 'l.') | ||
284 | m_pref = '+' | ||
285 | if meas in meas_r: | ||
286 | stats_r = measurement_stats(meas_r[meas], 'r.') | ||
287 | else: | ||
288 | stats_r = measurement_stats(None, 'r.') | ||
289 | m_pref = '-' | ||
290 | stats = stats_l.copy() | ||
291 | stats.update(stats_r) | ||
292 | |||
293 | absdiff = stats['val_cls'](stats['r.mean'] - stats['l.mean']) | ||
294 | reldiff = "{:+.1f} %".format(absdiff * 100 / stats['l.mean']) | ||
295 | if stats['r.mean'] > stats['l.mean']: | ||
296 | absdiff = '+' + str(absdiff) | ||
297 | else: | ||
298 | absdiff = str(absdiff) | ||
299 | rows.append(['', m_pref, stats['name'] + ' ' + stats['quantity'], | ||
300 | str(stats['l.mean']), '->', str(stats['r.mean']), | ||
301 | absdiff, reldiff]) | ||
302 | rows.append([''] * num_cols) | ||
303 | |||
304 | print_table(rows, row_fmt) | ||
305 | |||
306 | print() | ||
307 | |||
308 | |||
309 | def print_html_report(data, id_comp): | ||
310 | """Print report in html format""" | ||
311 | # Handle metadata | ||
312 | metadata = {'branch': {'title': 'Branch', 'value': 'master'}, | ||
313 | 'hostname': {'title': 'Hostname', 'value': 'foobar'}, | ||
314 | 'commit': {'title': 'Commit', 'value': '1234'} | ||
315 | } | ||
316 | metadata = metadata_diff(data[id_comp][0], data[-1][0]) | ||
317 | |||
318 | |||
319 | # Generate list of tests | ||
320 | tests = [] | ||
321 | for test in data[-1][1]['tests'].keys(): | ||
322 | test_r = data[-1][1]['tests'][test] | ||
323 | new_test = {'name': test_r['name'], | ||
324 | 'description': test_r['description'], | ||
325 | 'status': test_r['status'], | ||
326 | 'measurements': [], | ||
327 | 'err_type': test_r.get('err_type'), | ||
328 | } | ||
329 | # Limit length of err output shown | ||
330 | if 'message' in test_r: | ||
331 | lines = test_r['message'].splitlines() | ||
332 | if len(lines) > 20: | ||
333 | new_test['message'] = '...\n' + '\n'.join(lines[-20:]) | ||
334 | else: | ||
335 | new_test['message'] = test_r['message'] | ||
336 | |||
337 | |||
338 | # Generate the list of measurements | ||
339 | for meas in test_r['measurements'].keys(): | ||
340 | meas_r = test_r['measurements'][meas] | ||
341 | meas_type = 'time' if meas_r['type'] == 'sysres' else 'size' | ||
342 | new_meas = {'name': meas_r['name'], | ||
343 | 'legend': meas_r['legend'], | ||
344 | 'description': meas_r['name'] + ' ' + meas_type, | ||
345 | } | ||
346 | samples = [] | ||
347 | |||
348 | # Run through all revisions in our data | ||
349 | for meta, test_data in data: | ||
350 | if (not test in test_data['tests'] or | ||
351 | not meas in test_data['tests'][test]['measurements']): | ||
352 | samples.append(measurement_stats(None)) | ||
353 | continue | ||
354 | test_i = test_data['tests'][test] | ||
355 | meas_i = test_i['measurements'][meas] | ||
356 | commit_num = get_data_item(meta, 'layers.meta.commit_count') | ||
357 | samples.append(measurement_stats(meas_i)) | ||
358 | samples[-1]['commit_num'] = commit_num | ||
359 | |||
360 | absdiff = samples[-1]['val_cls'](samples[-1]['mean'] - samples[id_comp]['mean']) | ||
361 | new_meas['absdiff'] = absdiff | ||
362 | new_meas['absdiff_str'] = str(absdiff) if absdiff < 0 else '+' + str(absdiff) | ||
363 | new_meas['reldiff'] = "{:+.1f} %".format(absdiff * 100 / samples[id_comp]['mean']) | ||
364 | new_meas['samples'] = samples | ||
365 | new_meas['value'] = samples[-1] | ||
366 | new_meas['value_type'] = samples[-1]['val_cls'] | ||
367 | |||
368 | new_test['measurements'].append(new_meas) | ||
369 | tests.append(new_test) | ||
370 | |||
371 | # Chart options | ||
372 | chart_opts = {'haxis': {'min': get_data_item(data[0][0], 'layers.meta.commit_count'), | ||
373 | 'max': get_data_item(data[0][0], 'layers.meta.commit_count')} | ||
374 | } | ||
375 | |||
376 | print(html.template.render(metadata=metadata, test_data=tests, chart_opts=chart_opts)) | ||
377 | |||
378 | |||
379 | def auto_args(repo, args): | ||
380 | """Guess arguments, if not defined by the user""" | ||
381 | # Get the latest commit in the repo | ||
382 | log.debug("Guessing arguments from the latest commit") | ||
383 | msg = repo.run_cmd(['log', '-1', '--all', '--format=%b']) | ||
384 | for line in msg.splitlines(): | ||
385 | split = line.split(':', 1) | ||
386 | if len(split) != 2: | ||
387 | continue | ||
388 | |||
389 | key = split[0] | ||
390 | val = split[1].strip() | ||
391 | if key == 'hostname': | ||
392 | log.debug("Using hostname %s", val) | ||
393 | args.hostname = val | ||
394 | elif key == 'branch': | ||
395 | log.debug("Using branch %s", val) | ||
396 | args.branch = val | ||
397 | |||
398 | |||
399 | def parse_args(argv): | ||
400 | """Parse command line arguments""" | ||
401 | description = """ | ||
402 | Examine build performance test results from a Git repository""" | ||
403 | parser = argparse.ArgumentParser( | ||
404 | formatter_class=argparse.ArgumentDefaultsHelpFormatter, | ||
405 | description=description) | ||
406 | |||
407 | parser.add_argument('--debug', '-d', action='store_true', | ||
408 | help="Verbose logging") | ||
409 | parser.add_argument('--repo', '-r', required=True, | ||
410 | help="Results repository (local git clone)") | ||
411 | parser.add_argument('--list', '-l', action='store_true', | ||
412 | help="List available test runs") | ||
413 | parser.add_argument('--html', action='store_true', | ||
414 | help="Generate report in html format") | ||
415 | group = parser.add_argument_group('Tag and revision') | ||
416 | group.add_argument('--tag-name', '-t', | ||
417 | default='{hostname}/{branch}/{machine}/{commit_number}-g{commit}/{tag_number}', | ||
418 | help="Tag name (pattern) for finding results") | ||
419 | group.add_argument('--hostname', '-H') | ||
420 | group.add_argument('--branch', '-B', default='master') | ||
421 | group.add_argument('--machine', default='qemux86') | ||
422 | group.add_argument('--history-length', default=25, type=int, | ||
423 | help="Number of tested revisions to plot in html report") | ||
424 | group.add_argument('--commit', | ||
425 | help="Revision to search for") | ||
426 | group.add_argument('--commit-number', | ||
427 | help="Revision number to search for, redundant if " | ||
428 | "--commit is specified") | ||
429 | group.add_argument('--commit2', | ||
430 | help="Revision to compare with") | ||
431 | group.add_argument('--commit-number2', | ||
432 | help="Revision number to compare with, redundant if " | ||
433 | "--commit2 is specified") | ||
434 | |||
435 | return parser.parse_args(argv) | ||
436 | |||
437 | |||
438 | def main(argv=None): | ||
439 | """Script entry point""" | ||
440 | args = parse_args(argv) | ||
441 | if args.debug: | ||
442 | log.setLevel(logging.DEBUG) | ||
443 | |||
444 | repo = GitRepo(args.repo) | ||
445 | |||
446 | if args.list: | ||
447 | list_test_revs(repo, args.tag_name) | ||
448 | return 0 | ||
449 | |||
450 | # Determine hostname which to use | ||
451 | if not args.hostname: | ||
452 | auto_args(repo, args) | ||
453 | |||
454 | revs = get_test_revs(repo, args.tag_name, hostname=args.hostname, | ||
455 | branch=args.branch, machine=args.machine) | ||
456 | if len(revs) < 2: | ||
457 | log.error("%d tester revisions found, unable to generate report", | ||
458 | len(revs)) | ||
459 | return 1 | ||
460 | |||
461 | # Pick revisions | ||
462 | if args.commit: | ||
463 | if args.commit_number: | ||
464 | log.warning("Ignoring --commit-number as --commit was specified") | ||
465 | index1 = rev_find(revs, 'commit', args.commit) | ||
466 | elif args.commit_number: | ||
467 | index1 = rev_find(revs, 'commit_number', args.commit_number) | ||
468 | else: | ||
469 | index1 = len(revs) - 1 | ||
470 | |||
471 | if args.commit2: | ||
472 | if args.commit_number2: | ||
473 | log.warning("Ignoring --commit-number2 as --commit2 was specified") | ||
474 | index2 = rev_find(revs, 'commit', args.commit2) | ||
475 | elif args.commit_number2: | ||
476 | index2 = rev_find(revs, 'commit_number', args.commit_number2) | ||
477 | else: | ||
478 | if index1 > 0: | ||
479 | index2 = index1 - 1 | ||
480 | else: | ||
481 | log.error("Unable to determine the other commit, use " | ||
482 | "--commit2 or --commit-number2 to specify it") | ||
483 | return 1 | ||
484 | |||
485 | index_l = min(index1, index2) | ||
486 | index_r = max(index1, index2) | ||
487 | |||
488 | rev_l = revs[index_l] | ||
489 | rev_r = revs[index_r] | ||
490 | log.debug("Using 'left' revision %s (%s), %s test runs:\n %s", | ||
491 | rev_l.commit_number, rev_l.commit, len(rev_l.tags), | ||
492 | '\n '.join(rev_l.tags)) | ||
493 | log.debug("Using 'right' revision %s (%s), %s test runs:\n %s", | ||
494 | rev_r.commit_number, rev_r.commit, len(rev_r.tags), | ||
495 | '\n '.join(rev_r.tags)) | ||
496 | |||
497 | # Check report format used in the repo (assume all reports in the same fmt) | ||
498 | xml = is_xml_format(repo, revs[index_r].tags[-1]) | ||
499 | |||
500 | if args.html: | ||
501 | index_0 = max(0, index_r - args.history_length) | ||
502 | rev_range = range(index_0, index_r + 1) | ||
503 | else: | ||
504 | # We do not need range of commits for text report (no graphs) | ||
505 | index_0 = index_l | ||
506 | rev_range = (index_l, index_r) | ||
507 | |||
508 | # Read raw data | ||
509 | log.debug("Reading %d revisions, starting from %s (%s)", | ||
510 | len(rev_range), revs[index_0].commit_number, revs[index_0].commit) | ||
511 | raw_data = [read_results(repo, revs[i].tags, xml) for i in rev_range] | ||
512 | |||
513 | data = [] | ||
514 | for raw_m, raw_d in raw_data: | ||
515 | data.append((aggregate_metadata(raw_m), aggregate_data(raw_d))) | ||
516 | |||
517 | # Re-map list indexes to the new table starting from index 0 | ||
518 | index_r = index_r - index_0 | ||
519 | index_l = index_l - index_0 | ||
520 | |||
521 | # Print report | ||
522 | if not args.html: | ||
523 | print_diff_report(data[index_l][0], data[index_l][1], | ||
524 | data[index_r][0], data[index_r][1]) | ||
525 | else: | ||
526 | print_html_report(data, index_l) | ||
527 | |||
528 | return 0 | ||
529 | |||
530 | if __name__ == "__main__": | ||
531 | sys.exit(main()) | ||