diff options
author | Adrian Dudau <adrian.dudau@enea.com> | 2014-06-26 14:36:22 +0200 |
---|---|---|
committer | Adrian Dudau <adrian.dudau@enea.com> | 2014-06-26 15:32:53 +0200 |
commit | f4cf9fe05bb3f32fabea4e54dd92d368967a80da (patch) | |
tree | 487180fa9866985ea7b28e625651765d86f515c3 /meta/classes/toaster.bbclass | |
download | poky-f4cf9fe05bb3f32fabea4e54dd92d368967a80da.tar.gz |
initial commit for Enea Linux 4.0
Migrated from the internal git server on the daisy-enea branch
Signed-off-by: Adrian Dudau <adrian.dudau@enea.com>
Diffstat (limited to 'meta/classes/toaster.bbclass')
-rw-r--r-- | meta/classes/toaster.bbclass | 331 |
1 files changed, 331 insertions, 0 deletions
diff --git a/meta/classes/toaster.bbclass b/meta/classes/toaster.bbclass new file mode 100644 index 0000000000..4244b2ca7f --- /dev/null +++ b/meta/classes/toaster.bbclass | |||
@@ -0,0 +1,331 @@ | |||
1 | # | ||
2 | # Toaster helper class | ||
3 | # | ||
4 | # Copyright (C) 2013 Intel Corporation | ||
5 | # | ||
6 | # Released under the MIT license (see COPYING.MIT) | ||
7 | # | ||
8 | # This bbclass is designed to extract data used by OE-Core during the build process, | ||
9 | # for recording in the Toaster system. | ||
10 | # The data access is synchronous, preserving the build data integrity across | ||
11 | # different builds. | ||
12 | # | ||
13 | # The data is transferred through the event system, using the MetadataEvent objects. | ||
14 | # | ||
15 | # The model is to enable the datadump functions as postfuncs, and have the dump | ||
16 | # executed after the real taskfunc has been executed. This prevents task signature changing | ||
17 | # is toaster is enabled or not. Build performance is not affected if Toaster is not enabled. | ||
18 | # | ||
19 | # To enable, use INHERIT in local.conf: | ||
20 | # | ||
21 | # INHERIT += "toaster" | ||
22 | # | ||
23 | # | ||
24 | # | ||
25 | # | ||
26 | |||
27 | # Find and dump layer info when we got the layers parsed | ||
28 | |||
29 | |||
30 | |||
31 | python toaster_layerinfo_dumpdata() { | ||
32 | import subprocess | ||
33 | |||
34 | def _get_git_branch(layer_path): | ||
35 | branch = subprocess.Popen("git symbolic-ref HEAD 2>/dev/null ", cwd=layer_path, shell=True, stdout=subprocess.PIPE).communicate()[0] | ||
36 | branch = branch.replace('refs/heads/', '').rstrip() | ||
37 | return branch | ||
38 | |||
39 | def _get_git_revision(layer_path): | ||
40 | revision = subprocess.Popen("git rev-parse HEAD 2>/dev/null ", cwd=layer_path, shell=True, stdout=subprocess.PIPE).communicate()[0].rstrip() | ||
41 | return revision | ||
42 | |||
43 | def _get_url_map_name(layer_name): | ||
44 | """ Some layers have a different name on openembedded.org site, | ||
45 | this method returns the correct name to use in the URL | ||
46 | """ | ||
47 | |||
48 | url_name = layer_name | ||
49 | url_mapping = {'meta': 'openembedded-core'} | ||
50 | |||
51 | for key in url_mapping.keys(): | ||
52 | if key == layer_name: | ||
53 | url_name = url_mapping[key] | ||
54 | |||
55 | return url_name | ||
56 | |||
57 | def _get_layer_version_information(layer_path): | ||
58 | |||
59 | layer_version_info = {} | ||
60 | layer_version_info['branch'] = _get_git_branch(layer_path) | ||
61 | layer_version_info['commit'] = _get_git_revision(layer_path) | ||
62 | layer_version_info['priority'] = 0 | ||
63 | |||
64 | return layer_version_info | ||
65 | |||
66 | |||
67 | def _get_layer_dict(layer_path): | ||
68 | |||
69 | layer_info = {} | ||
70 | layer_name = layer_path.split('/')[-1] | ||
71 | layer_url = 'http://layers.openembedded.org/layerindex/layer/{layer}/' | ||
72 | layer_url_name = _get_url_map_name(layer_name) | ||
73 | |||
74 | layer_info['name'] = layer_name | ||
75 | layer_info['local_path'] = layer_path | ||
76 | layer_info['layer_index_url'] = layer_url.format(layer=layer_url_name) | ||
77 | layer_info['version'] = _get_layer_version_information(layer_path) | ||
78 | |||
79 | return layer_info | ||
80 | |||
81 | |||
82 | bblayers = e.data.getVar("BBLAYERS", True) | ||
83 | |||
84 | llayerinfo = {} | ||
85 | |||
86 | for layer in { l for l in bblayers.strip().split(" ") if len(l) }: | ||
87 | llayerinfo[layer] = _get_layer_dict(layer) | ||
88 | |||
89 | |||
90 | bb.event.fire(bb.event.MetadataEvent("LayerInfo", llayerinfo), e.data) | ||
91 | } | ||
92 | |||
93 | # Dump package file info data | ||
94 | |||
95 | def _toaster_load_pkgdatafile(dirpath, filepath): | ||
96 | import json | ||
97 | import re | ||
98 | pkgdata = {} | ||
99 | with open(os.path.join(dirpath, filepath), "r") as fin: | ||
100 | for line in fin: | ||
101 | try: | ||
102 | kn, kv = line.strip().split(": ", 1) | ||
103 | m = re.match(r"^PKG_([^A-Z:]*)", kn) | ||
104 | if m: | ||
105 | pkgdata['OPKGN'] = m.group(1) | ||
106 | kn = "_".join([x for x in kn.split("_") if x.isupper()]) | ||
107 | pkgdata[kn] = kv.strip() | ||
108 | if kn == 'FILES_INFO': | ||
109 | pkgdata[kn] = json.loads(kv) | ||
110 | |||
111 | except ValueError: | ||
112 | pass # ignore lines without valid key: value pairs | ||
113 | return pkgdata | ||
114 | |||
115 | |||
116 | python toaster_package_dumpdata() { | ||
117 | """ | ||
118 | Dumps the data created by emit_pkgdata | ||
119 | """ | ||
120 | # replicate variables from the package.bbclass | ||
121 | |||
122 | packages = d.getVar('PACKAGES', True) | ||
123 | pkgdest = d.getVar('PKGDEST', True) | ||
124 | |||
125 | pkgdatadir = d.getVar('PKGDESTWORK', True) | ||
126 | |||
127 | # scan and send data for each package | ||
128 | |||
129 | lpkgdata = {} | ||
130 | for pkg in packages.split(): | ||
131 | |||
132 | lpkgdata = _toaster_load_pkgdatafile(pkgdatadir + "/runtime/", pkg) | ||
133 | |||
134 | # Fire an event containing the pkg data | ||
135 | bb.event.fire(bb.event.MetadataEvent("SinglePackageInfo", lpkgdata), d) | ||
136 | } | ||
137 | |||
138 | # 2. Dump output image files information | ||
139 | |||
140 | python toaster_image_dumpdata() { | ||
141 | """ | ||
142 | Image filename for output images is not standardized. | ||
143 | image_types.bbclass will spell out IMAGE_CMD_xxx variables that actually | ||
144 | have hardcoded ways to create image file names in them. | ||
145 | So we look for files starting with the set name. | ||
146 | """ | ||
147 | |||
148 | deploy_dir_image = d.getVar('DEPLOY_DIR_IMAGE', True); | ||
149 | image_name = d.getVar('IMAGE_NAME', True); | ||
150 | |||
151 | image_info_data = {} | ||
152 | |||
153 | for dirpath, dirnames, filenames in os.walk(deploy_dir_image): | ||
154 | for fn in filenames: | ||
155 | if fn.startswith(image_name): | ||
156 | image_output = os.path.join(dirpath, fn) | ||
157 | image_info_data[image_output] = os.stat(image_output).st_size | ||
158 | |||
159 | bb.event.fire(bb.event.MetadataEvent("ImageFileSize",image_info_data), d) | ||
160 | } | ||
161 | |||
162 | |||
163 | |||
164 | # collect list of buildstats files based on fired events; when the build completes, collect all stats and fire an event with collected data | ||
165 | |||
166 | python toaster_collect_task_stats() { | ||
167 | import bb.build | ||
168 | import bb.event | ||
169 | import bb.data | ||
170 | import bb.utils | ||
171 | import os | ||
172 | |||
173 | if not e.data.getVar('BUILDSTATS_BASE', True): | ||
174 | return # if we don't have buildstats, we cannot collect stats | ||
175 | |||
176 | def _append_read_list(v): | ||
177 | lock = bb.utils.lockfile(e.data.expand("${TOPDIR}/toaster.lock"), False, True) | ||
178 | |||
179 | with open(os.path.join(e.data.getVar('BUILDSTATS_BASE', True), "toasterstatlist"), "a") as fout: | ||
180 | bn = get_bn(e) | ||
181 | bsdir = os.path.join(e.data.getVar('BUILDSTATS_BASE', True), bn) | ||
182 | taskdir = os.path.join(bsdir, e.data.expand("${PF}")) | ||
183 | fout.write("%s:%s:%s:%s\n" % (e.taskfile, e.taskname, os.path.join(taskdir, e.task), e.data.expand("${PN}"))) | ||
184 | |||
185 | bb.utils.unlockfile(lock) | ||
186 | |||
187 | def _read_stats(filename): | ||
188 | cpu_usage = 0 | ||
189 | disk_io = 0 | ||
190 | startio = '' | ||
191 | endio = '' | ||
192 | pn = '' | ||
193 | taskname = '' | ||
194 | statinfo = {} | ||
195 | |||
196 | with open(filename, 'r') as task_bs: | ||
197 | for line in task_bs.readlines(): | ||
198 | k,v = line.strip().split(": ", 1) | ||
199 | statinfo[k] = v | ||
200 | |||
201 | try: | ||
202 | cpu_usage = statinfo["CPU usage"] | ||
203 | endio = statinfo["EndTimeIO"] | ||
204 | startio = statinfo["StartTimeIO"] | ||
205 | except KeyError: | ||
206 | pass # we may have incomplete data here | ||
207 | |||
208 | if startio and endio: | ||
209 | disk_io = int(endio.strip('\n ')) - int(startio.strip('\n ')) | ||
210 | |||
211 | if cpu_usage: | ||
212 | cpu_usage = float(cpu_usage.strip('% \n')) | ||
213 | |||
214 | return {'cpu_usage': cpu_usage, 'disk_io': disk_io} | ||
215 | |||
216 | |||
217 | if isinstance(e, (bb.build.TaskSucceeded, bb.build.TaskFailed)): | ||
218 | _append_read_list(e) | ||
219 | pass | ||
220 | |||
221 | |||
222 | if isinstance(e, bb.event.BuildCompleted) and os.path.exists(os.path.join(e.data.getVar('BUILDSTATS_BASE', True), "toasterstatlist")): | ||
223 | events = [] | ||
224 | with open(os.path.join(e.data.getVar('BUILDSTATS_BASE', True), "toasterstatlist"), "r") as fin: | ||
225 | for line in fin: | ||
226 | (taskfile, taskname, filename, recipename) = line.strip().split(":") | ||
227 | events.append((taskfile, taskname, _read_stats(filename), recipename)) | ||
228 | bb.event.fire(bb.event.MetadataEvent("BuildStatsList", events), e.data) | ||
229 | os.unlink(os.path.join(e.data.getVar('BUILDSTATS_BASE', True), "toasterstatlist")) | ||
230 | } | ||
231 | |||
232 | # dump relevant build history data as an event when the build is completed | ||
233 | |||
234 | python toaster_buildhistory_dump() { | ||
235 | import re | ||
236 | BUILDHISTORY_DIR = e.data.expand("${TOPDIR}/buildhistory") | ||
237 | BUILDHISTORY_DIR_IMAGE_BASE = e.data.expand("%s/images/${MACHINE_ARCH}/${TCLIBC}/"% BUILDHISTORY_DIR) | ||
238 | pkgdata_dir = e.data.getVar("PKGDATA_DIR", True) | ||
239 | |||
240 | |||
241 | # scan the build targets for this build | ||
242 | images = {} | ||
243 | allpkgs = {} | ||
244 | files = {} | ||
245 | for target in e._pkgs: | ||
246 | installed_img_path = e.data.expand(os.path.join(BUILDHISTORY_DIR_IMAGE_BASE, target)) | ||
247 | if os.path.exists(installed_img_path): | ||
248 | images[target] = {} | ||
249 | files[target] = {} | ||
250 | files[target]['dirs'] = [] | ||
251 | files[target]['syms'] = [] | ||
252 | files[target]['files'] = [] | ||
253 | with open("%s/installed-package-sizes.txt" % installed_img_path, "r") as fin: | ||
254 | for line in fin: | ||
255 | line = line.rstrip(";") | ||
256 | psize, px = line.split("\t") | ||
257 | punit, pname = px.split(" ") | ||
258 | # this size is "installed-size" as it measures how much space it takes on disk | ||
259 | images[target][pname.strip()] = {'size':int(psize)*1024, 'depends' : []} | ||
260 | |||
261 | with open("%s/depends.dot" % installed_img_path, "r") as fin: | ||
262 | p = re.compile(r' -> ') | ||
263 | dot = re.compile(r'.*style=dotted') | ||
264 | for line in fin: | ||
265 | line = line.rstrip(';') | ||
266 | linesplit = p.split(line) | ||
267 | if len(linesplit) == 2: | ||
268 | pname = linesplit[0].rstrip('"').strip('"') | ||
269 | dependsname = linesplit[1].split(" ")[0].strip().strip(";").strip('"').rstrip('"') | ||
270 | deptype = "depends" | ||
271 | if dot.match(line): | ||
272 | deptype = "recommends" | ||
273 | if not pname in images[target]: | ||
274 | images[target][pname] = {'size': 0, 'depends' : []} | ||
275 | if not dependsname in images[target]: | ||
276 | images[target][dependsname] = {'size': 0, 'depends' : []} | ||
277 | images[target][pname]['depends'].append((dependsname, deptype)) | ||
278 | |||
279 | with open("%s/files-in-image.txt" % installed_img_path, "r") as fin: | ||
280 | for line in fin: | ||
281 | lc = [ x for x in line.strip().split(" ") if len(x) > 0 ] | ||
282 | if lc[0].startswith("l"): | ||
283 | files[target]['syms'].append(lc) | ||
284 | elif lc[0].startswith("d"): | ||
285 | files[target]['dirs'].append(lc) | ||
286 | else: | ||
287 | files[target]['files'].append(lc) | ||
288 | |||
289 | for pname in images[target]: | ||
290 | if not pname in allpkgs: | ||
291 | try: | ||
292 | pkgdata = _toaster_load_pkgdatafile("%s/runtime-reverse/" % pkgdata_dir, pname) | ||
293 | except IOError as err: | ||
294 | if err.errno == 2: | ||
295 | # We expect this e.g. for RRECOMMENDS that are unsatisfied at runtime | ||
296 | continue | ||
297 | else: | ||
298 | raise | ||
299 | allpkgs[pname] = pkgdata | ||
300 | |||
301 | |||
302 | data = { 'pkgdata' : allpkgs, 'imgdata' : images, 'filedata' : files } | ||
303 | |||
304 | bb.event.fire(bb.event.MetadataEvent("ImagePkgList", data), e.data) | ||
305 | |||
306 | } | ||
307 | |||
308 | # dump information related to license manifest path | ||
309 | |||
310 | python toaster_licensemanifest_dump() { | ||
311 | deploy_dir = d.getVar('DEPLOY_DIR', True); | ||
312 | image_name = d.getVar('IMAGE_NAME', True); | ||
313 | |||
314 | data = { 'deploy_dir' : deploy_dir, 'image_name' : image_name } | ||
315 | |||
316 | bb.event.fire(bb.event.MetadataEvent("LicenseManifestPath", data), d) | ||
317 | } | ||
318 | |||
319 | # set event handlers | ||
320 | addhandler toaster_layerinfo_dumpdata | ||
321 | toaster_layerinfo_dumpdata[eventmask] = "bb.event.TreeDataPreparationCompleted" | ||
322 | |||
323 | addhandler toaster_collect_task_stats | ||
324 | toaster_collect_task_stats[eventmask] = "bb.event.BuildCompleted bb.build.TaskSucceeded bb.build.TaskFailed" | ||
325 | |||
326 | addhandler toaster_buildhistory_dump | ||
327 | toaster_buildhistory_dump[eventmask] = "bb.event.BuildCompleted" | ||
328 | do_package[postfuncs] += "toaster_package_dumpdata " | ||
329 | |||
330 | do_rootfs[postfuncs] += "toaster_image_dumpdata " | ||
331 | do_rootfs[postfuncs] += "toaster_licensemanifest_dump " | ||