diff options
author | Tudor Florea <tudor.florea@enea.com> | 2015-10-09 22:59:03 +0200 |
---|---|---|
committer | Tudor Florea <tudor.florea@enea.com> | 2015-10-09 22:59:03 +0200 |
commit | 972dcfcdbfe75dcfeb777150c136576cf1a71e99 (patch) | |
tree | 97a61cd7e293d7ae9d56ef7ed0f81253365bb026 /meta/lib/oe | |
download | poky-972dcfcdbfe75dcfeb777150c136576cf1a71e99.tar.gz |
initial commit for Enea Linux 5.0 arm
Signed-off-by: Tudor Florea <tudor.florea@enea.com>
Diffstat (limited to 'meta/lib/oe')
31 files changed, 7406 insertions, 0 deletions
diff --git a/meta/lib/oe/__init__.py b/meta/lib/oe/__init__.py new file mode 100644 index 0000000000..3ad9513f40 --- /dev/null +++ b/meta/lib/oe/__init__.py | |||
@@ -0,0 +1,2 @@ | |||
1 | from pkgutil import extend_path | ||
2 | __path__ = extend_path(__path__, __name__) | ||
diff --git a/meta/lib/oe/buildhistory_analysis.py b/meta/lib/oe/buildhistory_analysis.py new file mode 100644 index 0000000000..5395c768a3 --- /dev/null +++ b/meta/lib/oe/buildhistory_analysis.py | |||
@@ -0,0 +1,456 @@ | |||
1 | # Report significant differences in the buildhistory repository since a specific revision | ||
2 | # | ||
3 | # Copyright (C) 2012 Intel Corporation | ||
4 | # Author: Paul Eggleton <paul.eggleton@linux.intel.com> | ||
5 | # | ||
6 | # Note: requires GitPython 0.3.1+ | ||
7 | # | ||
8 | # You can use this from the command line by running scripts/buildhistory-diff | ||
9 | # | ||
10 | |||
11 | import sys | ||
12 | import os.path | ||
13 | import difflib | ||
14 | import git | ||
15 | import re | ||
16 | import bb.utils | ||
17 | |||
18 | |||
19 | # How to display fields | ||
20 | list_fields = ['DEPENDS', 'RPROVIDES', 'RDEPENDS', 'RRECOMMENDS', 'RSUGGESTS', 'RREPLACES', 'RCONFLICTS', 'FILES', 'FILELIST', 'USER_CLASSES', 'IMAGE_CLASSES', 'IMAGE_FEATURES', 'IMAGE_LINGUAS', 'IMAGE_INSTALL', 'BAD_RECOMMENDATIONS', 'PACKAGE_EXCLUDE'] | ||
21 | list_order_fields = ['PACKAGES'] | ||
22 | defaultval_map = {'PKG': 'PKG', 'PKGE': 'PE', 'PKGV': 'PV', 'PKGR': 'PR'} | ||
23 | numeric_fields = ['PKGSIZE', 'IMAGESIZE'] | ||
24 | # Fields to monitor | ||
25 | monitor_fields = ['RPROVIDES', 'RDEPENDS', 'RRECOMMENDS', 'RREPLACES', 'RCONFLICTS', 'PACKAGES', 'FILELIST', 'PKGSIZE', 'IMAGESIZE', 'PKG'] | ||
26 | ver_monitor_fields = ['PKGE', 'PKGV', 'PKGR'] | ||
27 | # Percentage change to alert for numeric fields | ||
28 | monitor_numeric_threshold = 10 | ||
29 | # Image files to monitor (note that image-info.txt is handled separately) | ||
30 | img_monitor_files = ['installed-package-names.txt', 'files-in-image.txt'] | ||
31 | # Related context fields for reporting (note: PE, PV & PR are always reported for monitored package fields) | ||
32 | related_fields = {} | ||
33 | related_fields['RDEPENDS'] = ['DEPENDS'] | ||
34 | related_fields['RRECOMMENDS'] = ['DEPENDS'] | ||
35 | related_fields['FILELIST'] = ['FILES'] | ||
36 | related_fields['PKGSIZE'] = ['FILELIST'] | ||
37 | related_fields['files-in-image.txt'] = ['installed-package-names.txt', 'USER_CLASSES', 'IMAGE_CLASSES', 'ROOTFS_POSTPROCESS_COMMAND', 'IMAGE_POSTPROCESS_COMMAND'] | ||
38 | related_fields['installed-package-names.txt'] = ['IMAGE_FEATURES', 'IMAGE_LINGUAS', 'IMAGE_INSTALL', 'BAD_RECOMMENDATIONS', 'NO_RECOMMENDATIONS', 'PACKAGE_EXCLUDE'] | ||
39 | |||
40 | |||
41 | class ChangeRecord: | ||
42 | def __init__(self, path, fieldname, oldvalue, newvalue, monitored): | ||
43 | self.path = path | ||
44 | self.fieldname = fieldname | ||
45 | self.oldvalue = oldvalue | ||
46 | self.newvalue = newvalue | ||
47 | self.monitored = monitored | ||
48 | self.related = [] | ||
49 | self.filechanges = None | ||
50 | |||
51 | def __str__(self): | ||
52 | return self._str_internal(True) | ||
53 | |||
54 | def _str_internal(self, outer): | ||
55 | if outer: | ||
56 | if '/image-files/' in self.path: | ||
57 | prefix = '%s: ' % self.path.split('/image-files/')[0] | ||
58 | else: | ||
59 | prefix = '%s: ' % self.path | ||
60 | else: | ||
61 | prefix = '' | ||
62 | |||
63 | def pkglist_combine(depver): | ||
64 | pkglist = [] | ||
65 | for k,v in depver.iteritems(): | ||
66 | if v: | ||
67 | pkglist.append("%s (%s)" % (k,v)) | ||
68 | else: | ||
69 | pkglist.append(k) | ||
70 | return pkglist | ||
71 | |||
72 | if self.fieldname in list_fields or self.fieldname in list_order_fields: | ||
73 | if self.fieldname in ['RPROVIDES', 'RDEPENDS', 'RRECOMMENDS', 'RSUGGESTS', 'RREPLACES', 'RCONFLICTS']: | ||
74 | (depvera, depverb) = compare_pkg_lists(self.oldvalue, self.newvalue) | ||
75 | aitems = pkglist_combine(depvera) | ||
76 | bitems = pkglist_combine(depverb) | ||
77 | else: | ||
78 | aitems = self.oldvalue.split() | ||
79 | bitems = self.newvalue.split() | ||
80 | removed = list(set(aitems) - set(bitems)) | ||
81 | added = list(set(bitems) - set(aitems)) | ||
82 | |||
83 | if removed or added: | ||
84 | if removed and not bitems: | ||
85 | out = '%s: removed all items "%s"' % (self.fieldname, ' '.join(removed)) | ||
86 | else: | ||
87 | out = '%s:%s%s' % (self.fieldname, ' removed "%s"' % ' '.join(removed) if removed else '', ' added "%s"' % ' '.join(added) if added else '') | ||
88 | else: | ||
89 | out = '%s changed order' % self.fieldname | ||
90 | elif self.fieldname in numeric_fields: | ||
91 | aval = int(self.oldvalue or 0) | ||
92 | bval = int(self.newvalue or 0) | ||
93 | if aval != 0: | ||
94 | percentchg = ((bval - aval) / float(aval)) * 100 | ||
95 | else: | ||
96 | percentchg = 100 | ||
97 | out = '%s changed from %s to %s (%s%d%%)' % (self.fieldname, self.oldvalue or "''", self.newvalue or "''", '+' if percentchg > 0 else '', percentchg) | ||
98 | elif self.fieldname in defaultval_map: | ||
99 | out = '%s changed from %s to %s' % (self.fieldname, self.oldvalue, self.newvalue) | ||
100 | if self.fieldname == 'PKG' and '[default]' in self.newvalue: | ||
101 | out += ' - may indicate debian renaming failure' | ||
102 | elif self.fieldname in ['pkg_preinst', 'pkg_postinst', 'pkg_prerm', 'pkg_postrm']: | ||
103 | if self.oldvalue and self.newvalue: | ||
104 | out = '%s changed:\n ' % self.fieldname | ||
105 | elif self.newvalue: | ||
106 | out = '%s added:\n ' % self.fieldname | ||
107 | elif self.oldvalue: | ||
108 | out = '%s cleared:\n ' % self.fieldname | ||
109 | alines = self.oldvalue.splitlines() | ||
110 | blines = self.newvalue.splitlines() | ||
111 | diff = difflib.unified_diff(alines, blines, self.fieldname, self.fieldname, lineterm='') | ||
112 | out += '\n '.join(list(diff)[2:]) | ||
113 | out += '\n --' | ||
114 | elif self.fieldname in img_monitor_files or '/image-files/' in self.path: | ||
115 | fieldname = self.fieldname | ||
116 | if '/image-files/' in self.path: | ||
117 | fieldname = os.path.join('/' + self.path.split('/image-files/')[1], self.fieldname) | ||
118 | out = 'Changes to %s:\n ' % fieldname | ||
119 | else: | ||
120 | if outer: | ||
121 | prefix = 'Changes to %s ' % self.path | ||
122 | out = '(%s):\n ' % self.fieldname | ||
123 | if self.filechanges: | ||
124 | out += '\n '.join(['%s' % i for i in self.filechanges]) | ||
125 | else: | ||
126 | alines = self.oldvalue.splitlines() | ||
127 | blines = self.newvalue.splitlines() | ||
128 | diff = difflib.unified_diff(alines, blines, fieldname, fieldname, lineterm='') | ||
129 | out += '\n '.join(list(diff)) | ||
130 | out += '\n --' | ||
131 | else: | ||
132 | out = '%s changed from "%s" to "%s"' % (self.fieldname, self.oldvalue, self.newvalue) | ||
133 | |||
134 | if self.related: | ||
135 | for chg in self.related: | ||
136 | if not outer and chg.fieldname in ['PE', 'PV', 'PR']: | ||
137 | continue | ||
138 | for line in chg._str_internal(False).splitlines(): | ||
139 | out += '\n * %s' % line | ||
140 | |||
141 | return '%s%s' % (prefix, out) | ||
142 | |||
143 | class FileChange: | ||
144 | changetype_add = 'A' | ||
145 | changetype_remove = 'R' | ||
146 | changetype_type = 'T' | ||
147 | changetype_perms = 'P' | ||
148 | changetype_ownergroup = 'O' | ||
149 | changetype_link = 'L' | ||
150 | |||
151 | def __init__(self, path, changetype, oldvalue = None, newvalue = None): | ||
152 | self.path = path | ||
153 | self.changetype = changetype | ||
154 | self.oldvalue = oldvalue | ||
155 | self.newvalue = newvalue | ||
156 | |||
157 | def _ftype_str(self, ftype): | ||
158 | if ftype == '-': | ||
159 | return 'file' | ||
160 | elif ftype == 'd': | ||
161 | return 'directory' | ||
162 | elif ftype == 'l': | ||
163 | return 'symlink' | ||
164 | elif ftype == 'c': | ||
165 | return 'char device' | ||
166 | elif ftype == 'b': | ||
167 | return 'block device' | ||
168 | elif ftype == 'p': | ||
169 | return 'fifo' | ||
170 | elif ftype == 's': | ||
171 | return 'socket' | ||
172 | else: | ||
173 | return 'unknown (%s)' % ftype | ||
174 | |||
175 | def __str__(self): | ||
176 | if self.changetype == self.changetype_add: | ||
177 | return '%s was added' % self.path | ||
178 | elif self.changetype == self.changetype_remove: | ||
179 | return '%s was removed' % self.path | ||
180 | elif self.changetype == self.changetype_type: | ||
181 | return '%s changed type from %s to %s' % (self.path, self._ftype_str(self.oldvalue), self._ftype_str(self.newvalue)) | ||
182 | elif self.changetype == self.changetype_perms: | ||
183 | return '%s changed permissions from %s to %s' % (self.path, self.oldvalue, self.newvalue) | ||
184 | elif self.changetype == self.changetype_ownergroup: | ||
185 | return '%s changed owner/group from %s to %s' % (self.path, self.oldvalue, self.newvalue) | ||
186 | elif self.changetype == self.changetype_link: | ||
187 | return '%s changed symlink target from %s to %s' % (self.path, self.oldvalue, self.newvalue) | ||
188 | else: | ||
189 | return '%s changed (unknown)' % self.path | ||
190 | |||
191 | |||
192 | def blob_to_dict(blob): | ||
193 | alines = blob.data_stream.read().splitlines() | ||
194 | adict = {} | ||
195 | for line in alines: | ||
196 | splitv = [i.strip() for i in line.split('=',1)] | ||
197 | if len(splitv) > 1: | ||
198 | adict[splitv[0]] = splitv[1] | ||
199 | return adict | ||
200 | |||
201 | |||
202 | def file_list_to_dict(lines): | ||
203 | adict = {} | ||
204 | for line in lines: | ||
205 | # Leave the last few fields intact so we handle file names containing spaces | ||
206 | splitv = line.split(None,4) | ||
207 | # Grab the path and remove the leading . | ||
208 | path = splitv[4][1:].strip() | ||
209 | # Handle symlinks | ||
210 | if(' -> ' in path): | ||
211 | target = path.split(' -> ')[1] | ||
212 | path = path.split(' -> ')[0] | ||
213 | adict[path] = splitv[0:3] + [target] | ||
214 | else: | ||
215 | adict[path] = splitv[0:3] | ||
216 | return adict | ||
217 | |||
218 | |||
219 | def compare_file_lists(alines, blines): | ||
220 | adict = file_list_to_dict(alines) | ||
221 | bdict = file_list_to_dict(blines) | ||
222 | filechanges = [] | ||
223 | for path, splitv in adict.iteritems(): | ||
224 | newsplitv = bdict.pop(path, None) | ||
225 | if newsplitv: | ||
226 | # Check type | ||
227 | oldvalue = splitv[0][0] | ||
228 | newvalue = newsplitv[0][0] | ||
229 | if oldvalue != newvalue: | ||
230 | filechanges.append(FileChange(path, FileChange.changetype_type, oldvalue, newvalue)) | ||
231 | # Check permissions | ||
232 | oldvalue = splitv[0][1:] | ||
233 | newvalue = newsplitv[0][1:] | ||
234 | if oldvalue != newvalue: | ||
235 | filechanges.append(FileChange(path, FileChange.changetype_perms, oldvalue, newvalue)) | ||
236 | # Check owner/group | ||
237 | oldvalue = '%s/%s' % (splitv[1], splitv[2]) | ||
238 | newvalue = '%s/%s' % (newsplitv[1], newsplitv[2]) | ||
239 | if oldvalue != newvalue: | ||
240 | filechanges.append(FileChange(path, FileChange.changetype_ownergroup, oldvalue, newvalue)) | ||
241 | # Check symlink target | ||
242 | if newsplitv[0][0] == 'l': | ||
243 | if len(splitv) > 3: | ||
244 | oldvalue = splitv[3] | ||
245 | else: | ||
246 | oldvalue = None | ||
247 | newvalue = newsplitv[3] | ||
248 | if oldvalue != newvalue: | ||
249 | filechanges.append(FileChange(path, FileChange.changetype_link, oldvalue, newvalue)) | ||
250 | else: | ||
251 | filechanges.append(FileChange(path, FileChange.changetype_remove)) | ||
252 | |||
253 | # Whatever is left over has been added | ||
254 | for path in bdict: | ||
255 | filechanges.append(FileChange(path, FileChange.changetype_add)) | ||
256 | |||
257 | return filechanges | ||
258 | |||
259 | |||
260 | def compare_lists(alines, blines): | ||
261 | removed = list(set(alines) - set(blines)) | ||
262 | added = list(set(blines) - set(alines)) | ||
263 | |||
264 | filechanges = [] | ||
265 | for pkg in removed: | ||
266 | filechanges.append(FileChange(pkg, FileChange.changetype_remove)) | ||
267 | for pkg in added: | ||
268 | filechanges.append(FileChange(pkg, FileChange.changetype_add)) | ||
269 | |||
270 | return filechanges | ||
271 | |||
272 | |||
273 | def compare_pkg_lists(astr, bstr): | ||
274 | depvera = bb.utils.explode_dep_versions2(astr) | ||
275 | depverb = bb.utils.explode_dep_versions2(bstr) | ||
276 | |||
277 | # Strip out changes where the version has increased | ||
278 | remove = [] | ||
279 | for k in depvera: | ||
280 | if k in depverb: | ||
281 | dva = depvera[k] | ||
282 | dvb = depverb[k] | ||
283 | if dva and dvb and len(dva) == len(dvb): | ||
284 | # Since length is the same, sort so that prefixes (e.g. >=) will line up | ||
285 | dva.sort() | ||
286 | dvb.sort() | ||
287 | removeit = True | ||
288 | for dvai, dvbi in zip(dva, dvb): | ||
289 | if dvai != dvbi: | ||
290 | aiprefix = dvai.split(' ')[0] | ||
291 | biprefix = dvbi.split(' ')[0] | ||
292 | if aiprefix == biprefix and aiprefix in ['>=', '=']: | ||
293 | if bb.utils.vercmp(bb.utils.split_version(dvai), bb.utils.split_version(dvbi)) > 0: | ||
294 | removeit = False | ||
295 | break | ||
296 | else: | ||
297 | removeit = False | ||
298 | break | ||
299 | if removeit: | ||
300 | remove.append(k) | ||
301 | |||
302 | for k in remove: | ||
303 | depvera.pop(k) | ||
304 | depverb.pop(k) | ||
305 | |||
306 | return (depvera, depverb) | ||
307 | |||
308 | |||
309 | def compare_dict_blobs(path, ablob, bblob, report_all, report_ver): | ||
310 | adict = blob_to_dict(ablob) | ||
311 | bdict = blob_to_dict(bblob) | ||
312 | |||
313 | pkgname = os.path.basename(path) | ||
314 | |||
315 | defaultvals = {} | ||
316 | defaultvals['PKG'] = pkgname | ||
317 | defaultvals['PKGE'] = '0' | ||
318 | |||
319 | changes = [] | ||
320 | keys = list(set(adict.keys()) | set(bdict.keys()) | set(defaultval_map.keys())) | ||
321 | for key in keys: | ||
322 | astr = adict.get(key, '') | ||
323 | bstr = bdict.get(key, '') | ||
324 | if key in ver_monitor_fields: | ||
325 | monitored = report_ver or astr or bstr | ||
326 | else: | ||
327 | monitored = key in monitor_fields | ||
328 | mapped_key = defaultval_map.get(key, '') | ||
329 | if mapped_key: | ||
330 | if not astr: | ||
331 | astr = '%s [default]' % adict.get(mapped_key, defaultvals.get(key, '')) | ||
332 | if not bstr: | ||
333 | bstr = '%s [default]' % bdict.get(mapped_key, defaultvals.get(key, '')) | ||
334 | |||
335 | if astr != bstr: | ||
336 | if (not report_all) and key in numeric_fields: | ||
337 | aval = int(astr or 0) | ||
338 | bval = int(bstr or 0) | ||
339 | if aval != 0: | ||
340 | percentchg = ((bval - aval) / float(aval)) * 100 | ||
341 | else: | ||
342 | percentchg = 100 | ||
343 | if abs(percentchg) < monitor_numeric_threshold: | ||
344 | continue | ||
345 | elif (not report_all) and key in list_fields: | ||
346 | if key == "FILELIST" and path.endswith("-dbg") and bstr.strip() != '': | ||
347 | continue | ||
348 | if key in ['RPROVIDES', 'RDEPENDS', 'RRECOMMENDS', 'RSUGGESTS', 'RREPLACES', 'RCONFLICTS']: | ||
349 | (depvera, depverb) = compare_pkg_lists(astr, bstr) | ||
350 | if depvera == depverb: | ||
351 | continue | ||
352 | alist = astr.split() | ||
353 | alist.sort() | ||
354 | blist = bstr.split() | ||
355 | blist.sort() | ||
356 | # We don't care about the removal of self-dependencies | ||
357 | if pkgname in alist and not pkgname in blist: | ||
358 | alist.remove(pkgname) | ||
359 | if ' '.join(alist) == ' '.join(blist): | ||
360 | continue | ||
361 | |||
362 | chg = ChangeRecord(path, key, astr, bstr, monitored) | ||
363 | changes.append(chg) | ||
364 | return changes | ||
365 | |||
366 | |||
367 | def process_changes(repopath, revision1, revision2='HEAD', report_all=False, report_ver=False): | ||
368 | repo = git.Repo(repopath) | ||
369 | assert repo.bare == False | ||
370 | commit = repo.commit(revision1) | ||
371 | diff = commit.diff(revision2) | ||
372 | |||
373 | changes = [] | ||
374 | for d in diff.iter_change_type('M'): | ||
375 | path = os.path.dirname(d.a_blob.path) | ||
376 | if path.startswith('packages/'): | ||
377 | filename = os.path.basename(d.a_blob.path) | ||
378 | if filename == 'latest': | ||
379 | changes.extend(compare_dict_blobs(path, d.a_blob, d.b_blob, report_all, report_ver)) | ||
380 | elif filename.startswith('latest.'): | ||
381 | chg = ChangeRecord(path, filename, d.a_blob.data_stream.read(), d.b_blob.data_stream.read(), True) | ||
382 | changes.append(chg) | ||
383 | elif path.startswith('images/'): | ||
384 | filename = os.path.basename(d.a_blob.path) | ||
385 | if filename in img_monitor_files: | ||
386 | if filename == 'files-in-image.txt': | ||
387 | alines = d.a_blob.data_stream.read().splitlines() | ||
388 | blines = d.b_blob.data_stream.read().splitlines() | ||
389 | filechanges = compare_file_lists(alines,blines) | ||
390 | if filechanges: | ||
391 | chg = ChangeRecord(path, filename, None, None, True) | ||
392 | chg.filechanges = filechanges | ||
393 | changes.append(chg) | ||
394 | elif filename == 'installed-package-names.txt': | ||
395 | alines = d.a_blob.data_stream.read().splitlines() | ||
396 | blines = d.b_blob.data_stream.read().splitlines() | ||
397 | filechanges = compare_lists(alines,blines) | ||
398 | if filechanges: | ||
399 | chg = ChangeRecord(path, filename, None, None, True) | ||
400 | chg.filechanges = filechanges | ||
401 | changes.append(chg) | ||
402 | else: | ||
403 | chg = ChangeRecord(path, filename, d.a_blob.data_stream.read(), d.b_blob.data_stream.read(), True) | ||
404 | changes.append(chg) | ||
405 | elif filename == 'image-info.txt': | ||
406 | changes.extend(compare_dict_blobs(path, d.a_blob, d.b_blob, report_all, report_ver)) | ||
407 | elif '/image-files/' in path: | ||
408 | chg = ChangeRecord(path, filename, d.a_blob.data_stream.read(), d.b_blob.data_stream.read(), True) | ||
409 | changes.append(chg) | ||
410 | |||
411 | # Look for added preinst/postinst/prerm/postrm | ||
412 | # (without reporting newly added recipes) | ||
413 | addedpkgs = [] | ||
414 | addedchanges = [] | ||
415 | for d in diff.iter_change_type('A'): | ||
416 | path = os.path.dirname(d.b_blob.path) | ||
417 | if path.startswith('packages/'): | ||
418 | filename = os.path.basename(d.b_blob.path) | ||
419 | if filename == 'latest': | ||
420 | addedpkgs.append(path) | ||
421 | elif filename.startswith('latest.'): | ||
422 | chg = ChangeRecord(path, filename[7:], '', d.b_blob.data_stream.read(), True) | ||
423 | addedchanges.append(chg) | ||
424 | for chg in addedchanges: | ||
425 | found = False | ||
426 | for pkg in addedpkgs: | ||
427 | if chg.path.startswith(pkg): | ||
428 | found = True | ||
429 | break | ||
430 | if not found: | ||
431 | changes.append(chg) | ||
432 | |||
433 | # Look for cleared preinst/postinst/prerm/postrm | ||
434 | for d in diff.iter_change_type('D'): | ||
435 | path = os.path.dirname(d.a_blob.path) | ||
436 | if path.startswith('packages/'): | ||
437 | filename = os.path.basename(d.a_blob.path) | ||
438 | if filename != 'latest' and filename.startswith('latest.'): | ||
439 | chg = ChangeRecord(path, filename[7:], d.a_blob.data_stream.read(), '', True) | ||
440 | changes.append(chg) | ||
441 | |||
442 | # Link related changes | ||
443 | for chg in changes: | ||
444 | if chg.monitored: | ||
445 | for chg2 in changes: | ||
446 | # (Check dirname in the case of fields from recipe info files) | ||
447 | if chg.path == chg2.path or os.path.dirname(chg.path) == chg2.path: | ||
448 | if chg2.fieldname in related_fields.get(chg.fieldname, []): | ||
449 | chg.related.append(chg2) | ||
450 | elif chg.path == chg2.path and chg.path.startswith('packages/') and chg2.fieldname in ['PE', 'PV', 'PR']: | ||
451 | chg.related.append(chg2) | ||
452 | |||
453 | if report_all: | ||
454 | return changes | ||
455 | else: | ||
456 | return [chg for chg in changes if chg.monitored] | ||
diff --git a/meta/lib/oe/cachedpath.py b/meta/lib/oe/cachedpath.py new file mode 100644 index 0000000000..0840cc4c3f --- /dev/null +++ b/meta/lib/oe/cachedpath.py | |||
@@ -0,0 +1,233 @@ | |||
1 | # | ||
2 | # Based on standard python library functions but avoid | ||
3 | # repeated stat calls. Its assumed the files will not change from under us | ||
4 | # so we can cache stat calls. | ||
5 | # | ||
6 | |||
7 | import os | ||
8 | import errno | ||
9 | import stat as statmod | ||
10 | |||
11 | class CachedPath(object): | ||
12 | def __init__(self): | ||
13 | self.statcache = {} | ||
14 | self.lstatcache = {} | ||
15 | self.normpathcache = {} | ||
16 | return | ||
17 | |||
18 | def updatecache(self, x): | ||
19 | x = self.normpath(x) | ||
20 | if x in self.statcache: | ||
21 | del self.statcache[x] | ||
22 | if x in self.lstatcache: | ||
23 | del self.lstatcache[x] | ||
24 | |||
25 | def normpath(self, path): | ||
26 | if path in self.normpathcache: | ||
27 | return self.normpathcache[path] | ||
28 | newpath = os.path.normpath(path) | ||
29 | self.normpathcache[path] = newpath | ||
30 | return newpath | ||
31 | |||
32 | def _callstat(self, path): | ||
33 | if path in self.statcache: | ||
34 | return self.statcache[path] | ||
35 | try: | ||
36 | st = os.stat(path) | ||
37 | self.statcache[path] = st | ||
38 | return st | ||
39 | except os.error: | ||
40 | self.statcache[path] = False | ||
41 | return False | ||
42 | |||
43 | # We might as well call lstat and then only | ||
44 | # call stat as well in the symbolic link case | ||
45 | # since this turns out to be much more optimal | ||
46 | # in real world usage of this cache | ||
47 | def callstat(self, path): | ||
48 | path = self.normpath(path) | ||
49 | self.calllstat(path) | ||
50 | return self.statcache[path] | ||
51 | |||
52 | def calllstat(self, path): | ||
53 | path = self.normpath(path) | ||
54 | if path in self.lstatcache: | ||
55 | return self.lstatcache[path] | ||
56 | #bb.error("LStatpath:" + path) | ||
57 | try: | ||
58 | lst = os.lstat(path) | ||
59 | self.lstatcache[path] = lst | ||
60 | if not statmod.S_ISLNK(lst.st_mode): | ||
61 | self.statcache[path] = lst | ||
62 | else: | ||
63 | self._callstat(path) | ||
64 | return lst | ||
65 | except (os.error, AttributeError): | ||
66 | self.lstatcache[path] = False | ||
67 | self.statcache[path] = False | ||
68 | return False | ||
69 | |||
70 | # This follows symbolic links, so both islink() and isdir() can be true | ||
71 | # for the same path ono systems that support symlinks | ||
72 | def isfile(self, path): | ||
73 | """Test whether a path is a regular file""" | ||
74 | st = self.callstat(path) | ||
75 | if not st: | ||
76 | return False | ||
77 | return statmod.S_ISREG(st.st_mode) | ||
78 | |||
79 | # Is a path a directory? | ||
80 | # This follows symbolic links, so both islink() and isdir() | ||
81 | # can be true for the same path on systems that support symlinks | ||
82 | def isdir(self, s): | ||
83 | """Return true if the pathname refers to an existing directory.""" | ||
84 | st = self.callstat(s) | ||
85 | if not st: | ||
86 | return False | ||
87 | return statmod.S_ISDIR(st.st_mode) | ||
88 | |||
89 | def islink(self, path): | ||
90 | """Test whether a path is a symbolic link""" | ||
91 | st = self.calllstat(path) | ||
92 | if not st: | ||
93 | return False | ||
94 | return statmod.S_ISLNK(st.st_mode) | ||
95 | |||
96 | # Does a path exist? | ||
97 | # This is false for dangling symbolic links on systems that support them. | ||
98 | def exists(self, path): | ||
99 | """Test whether a path exists. Returns False for broken symbolic links""" | ||
100 | if self.callstat(path): | ||
101 | return True | ||
102 | return False | ||
103 | |||
104 | def lexists(self, path): | ||
105 | """Test whether a path exists. Returns True for broken symbolic links""" | ||
106 | if self.calllstat(path): | ||
107 | return True | ||
108 | return False | ||
109 | |||
110 | def stat(self, path): | ||
111 | return self.callstat(path) | ||
112 | |||
113 | def lstat(self, path): | ||
114 | return self.calllstat(path) | ||
115 | |||
116 | def walk(self, top, topdown=True, onerror=None, followlinks=False): | ||
117 | # Matches os.walk, not os.path.walk() | ||
118 | |||
119 | # We may not have read permission for top, in which case we can't | ||
120 | # get a list of the files the directory contains. os.path.walk | ||
121 | # always suppressed the exception then, rather than blow up for a | ||
122 | # minor reason when (say) a thousand readable directories are still | ||
123 | # left to visit. That logic is copied here. | ||
124 | try: | ||
125 | names = os.listdir(top) | ||
126 | except os.error as err: | ||
127 | if onerror is not None: | ||
128 | onerror(err) | ||
129 | return | ||
130 | |||
131 | dirs, nondirs = [], [] | ||
132 | for name in names: | ||
133 | if self.isdir(os.path.join(top, name)): | ||
134 | dirs.append(name) | ||
135 | else: | ||
136 | nondirs.append(name) | ||
137 | |||
138 | if topdown: | ||
139 | yield top, dirs, nondirs | ||
140 | for name in dirs: | ||
141 | new_path = os.path.join(top, name) | ||
142 | if followlinks or not self.islink(new_path): | ||
143 | for x in self.walk(new_path, topdown, onerror, followlinks): | ||
144 | yield x | ||
145 | if not topdown: | ||
146 | yield top, dirs, nondirs | ||
147 | |||
148 | ## realpath() related functions | ||
149 | def __is_path_below(self, file, root): | ||
150 | return (file + os.path.sep).startswith(root) | ||
151 | |||
152 | def __realpath_rel(self, start, rel_path, root, loop_cnt, assume_dir): | ||
153 | """Calculates real path of symlink 'start' + 'rel_path' below | ||
154 | 'root'; no part of 'start' below 'root' must contain symlinks. """ | ||
155 | have_dir = True | ||
156 | |||
157 | for d in rel_path.split(os.path.sep): | ||
158 | if not have_dir and not assume_dir: | ||
159 | raise OSError(errno.ENOENT, "no such directory %s" % start) | ||
160 | |||
161 | if d == os.path.pardir: # '..' | ||
162 | if len(start) >= len(root): | ||
163 | # do not follow '..' before root | ||
164 | start = os.path.dirname(start) | ||
165 | else: | ||
166 | # emit warning? | ||
167 | pass | ||
168 | else: | ||
169 | (start, have_dir) = self.__realpath(os.path.join(start, d), | ||
170 | root, loop_cnt, assume_dir) | ||
171 | |||
172 | assert(self.__is_path_below(start, root)) | ||
173 | |||
174 | return start | ||
175 | |||
176 | def __realpath(self, file, root, loop_cnt, assume_dir): | ||
177 | while self.islink(file) and len(file) >= len(root): | ||
178 | if loop_cnt == 0: | ||
179 | raise OSError(errno.ELOOP, file) | ||
180 | |||
181 | loop_cnt -= 1 | ||
182 | target = os.path.normpath(os.readlink(file)) | ||
183 | |||
184 | if not os.path.isabs(target): | ||
185 | tdir = os.path.dirname(file) | ||
186 | assert(self.__is_path_below(tdir, root)) | ||
187 | else: | ||
188 | tdir = root | ||
189 | |||
190 | file = self.__realpath_rel(tdir, target, root, loop_cnt, assume_dir) | ||
191 | |||
192 | try: | ||
193 | is_dir = self.isdir(file) | ||
194 | except: | ||
195 | is_dir = False | ||
196 | |||
197 | return (file, is_dir) | ||
198 | |||
199 | def realpath(self, file, root, use_physdir = True, loop_cnt = 100, assume_dir = False): | ||
200 | """ Returns the canonical path of 'file' with assuming a | ||
201 | toplevel 'root' directory. When 'use_physdir' is set, all | ||
202 | preceding path components of 'file' will be resolved first; | ||
203 | this flag should be set unless it is guaranteed that there is | ||
204 | no symlink in the path. When 'assume_dir' is not set, missing | ||
205 | path components will raise an ENOENT error""" | ||
206 | |||
207 | root = os.path.normpath(root) | ||
208 | file = os.path.normpath(file) | ||
209 | |||
210 | if not root.endswith(os.path.sep): | ||
211 | # letting root end with '/' makes some things easier | ||
212 | root = root + os.path.sep | ||
213 | |||
214 | if not self.__is_path_below(file, root): | ||
215 | raise OSError(errno.EINVAL, "file '%s' is not below root" % file) | ||
216 | |||
217 | try: | ||
218 | if use_physdir: | ||
219 | file = self.__realpath_rel(root, file[(len(root) - 1):], root, loop_cnt, assume_dir) | ||
220 | else: | ||
221 | file = self.__realpath(file, root, loop_cnt, assume_dir)[0] | ||
222 | except OSError as e: | ||
223 | if e.errno == errno.ELOOP: | ||
224 | # make ELOOP more readable; without catching it, there will | ||
225 | # be printed a backtrace with 100s of OSError exceptions | ||
226 | # else | ||
227 | raise OSError(errno.ELOOP, | ||
228 | "too much recursions while resolving '%s'; loop in '%s'" % | ||
229 | (file, e.strerror)) | ||
230 | |||
231 | raise | ||
232 | |||
233 | return file | ||
diff --git a/meta/lib/oe/classextend.py b/meta/lib/oe/classextend.py new file mode 100644 index 0000000000..8da87b771a --- /dev/null +++ b/meta/lib/oe/classextend.py | |||
@@ -0,0 +1,118 @@ | |||
1 | class ClassExtender(object): | ||
2 | def __init__(self, extname, d): | ||
3 | self.extname = extname | ||
4 | self.d = d | ||
5 | self.pkgs_mapping = [] | ||
6 | |||
7 | def extend_name(self, name): | ||
8 | if name.startswith("kernel-") or name == "virtual/kernel": | ||
9 | return name | ||
10 | if name.startswith("rtld"): | ||
11 | return name | ||
12 | if name.endswith("-crosssdk"): | ||
13 | return name | ||
14 | if name.endswith("-" + self.extname): | ||
15 | name = name.replace("-" + self.extname, "") | ||
16 | if name.startswith("virtual/"): | ||
17 | subs = name.split("/", 1)[1] | ||
18 | if not subs.startswith(self.extname): | ||
19 | return "virtual/" + self.extname + "-" + subs | ||
20 | return name | ||
21 | if not name.startswith(self.extname): | ||
22 | return self.extname + "-" + name | ||
23 | return name | ||
24 | |||
25 | def map_variable(self, varname, setvar = True): | ||
26 | var = self.d.getVar(varname, True) | ||
27 | if not var: | ||
28 | return "" | ||
29 | var = var.split() | ||
30 | newvar = [] | ||
31 | for v in var: | ||
32 | newvar.append(self.extend_name(v)) | ||
33 | newdata = " ".join(newvar) | ||
34 | if setvar: | ||
35 | self.d.setVar(varname, newdata) | ||
36 | return newdata | ||
37 | |||
38 | def map_regexp_variable(self, varname, setvar = True): | ||
39 | var = self.d.getVar(varname, True) | ||
40 | if not var: | ||
41 | return "" | ||
42 | var = var.split() | ||
43 | newvar = [] | ||
44 | for v in var: | ||
45 | if v.startswith("^" + self.extname): | ||
46 | newvar.append(v) | ||
47 | elif v.startswith("^"): | ||
48 | newvar.append("^" + self.extname + "-" + v[1:]) | ||
49 | else: | ||
50 | newvar.append(self.extend_name(v)) | ||
51 | newdata = " ".join(newvar) | ||
52 | if setvar: | ||
53 | self.d.setVar(varname, newdata) | ||
54 | return newdata | ||
55 | |||
56 | def map_depends(self, dep): | ||
57 | if dep.endswith(("-native", "-native-runtime")) or ('nativesdk-' in dep) or ('cross-canadian' in dep) or ('-crosssdk-' in dep): | ||
58 | return dep | ||
59 | else: | ||
60 | # Do not extend for that already have multilib prefix | ||
61 | var = self.d.getVar("MULTILIB_VARIANTS", True) | ||
62 | if var: | ||
63 | var = var.split() | ||
64 | for v in var: | ||
65 | if dep.startswith(v): | ||
66 | return dep | ||
67 | return self.extend_name(dep) | ||
68 | |||
69 | def map_depends_variable(self, varname, suffix = ""): | ||
70 | # We need to preserve EXTENDPKGV so it can be expanded correctly later | ||
71 | if suffix: | ||
72 | varname = varname + "_" + suffix | ||
73 | orig = self.d.getVar("EXTENDPKGV", False) | ||
74 | self.d.setVar("EXTENDPKGV", "EXTENDPKGV") | ||
75 | deps = self.d.getVar(varname, True) | ||
76 | if not deps: | ||
77 | self.d.setVar("EXTENDPKGV", orig) | ||
78 | return | ||
79 | deps = bb.utils.explode_dep_versions2(deps) | ||
80 | newdeps = {} | ||
81 | for dep in deps: | ||
82 | newdeps[self.map_depends(dep)] = deps[dep] | ||
83 | |||
84 | self.d.setVar(varname, bb.utils.join_deps(newdeps, False).replace("EXTENDPKGV", "${EXTENDPKGV}")) | ||
85 | self.d.setVar("EXTENDPKGV", orig) | ||
86 | |||
87 | def map_packagevars(self): | ||
88 | for pkg in (self.d.getVar("PACKAGES", True).split() + [""]): | ||
89 | self.map_depends_variable("RDEPENDS", pkg) | ||
90 | self.map_depends_variable("RRECOMMENDS", pkg) | ||
91 | self.map_depends_variable("RSUGGESTS", pkg) | ||
92 | self.map_depends_variable("RPROVIDES", pkg) | ||
93 | self.map_depends_variable("RREPLACES", pkg) | ||
94 | self.map_depends_variable("RCONFLICTS", pkg) | ||
95 | self.map_depends_variable("PKG", pkg) | ||
96 | |||
97 | def rename_packages(self): | ||
98 | for pkg in (self.d.getVar("PACKAGES", True) or "").split(): | ||
99 | if pkg.startswith(self.extname): | ||
100 | self.pkgs_mapping.append([pkg.split(self.extname + "-")[1], pkg]) | ||
101 | continue | ||
102 | self.pkgs_mapping.append([pkg, self.extend_name(pkg)]) | ||
103 | |||
104 | self.d.setVar("PACKAGES", " ".join([row[1] for row in self.pkgs_mapping])) | ||
105 | |||
106 | def rename_package_variables(self, variables): | ||
107 | for pkg_mapping in self.pkgs_mapping: | ||
108 | for subs in variables: | ||
109 | self.d.renameVar("%s_%s" % (subs, pkg_mapping[0]), "%s_%s" % (subs, pkg_mapping[1])) | ||
110 | |||
111 | class NativesdkClassExtender(ClassExtender): | ||
112 | def map_depends(self, dep): | ||
113 | if dep.endswith(("-gcc-initial", "-gcc", "-g++")): | ||
114 | return dep + "-crosssdk" | ||
115 | elif dep.endswith(("-native", "-native-runtime")) or ('nativesdk-' in dep) or ('-cross-' in dep) or ('-crosssdk-' in dep): | ||
116 | return dep | ||
117 | else: | ||
118 | return self.extend_name(dep) | ||
diff --git a/meta/lib/oe/classutils.py b/meta/lib/oe/classutils.py new file mode 100644 index 0000000000..58188fdd6e --- /dev/null +++ b/meta/lib/oe/classutils.py | |||
@@ -0,0 +1,43 @@ | |||
1 | class ClassRegistry(type): | ||
2 | """Maintain a registry of classes, indexed by name. | ||
3 | |||
4 | Note that this implementation requires that the names be unique, as it uses | ||
5 | a dictionary to hold the classes by name. | ||
6 | |||
7 | The name in the registry can be overridden via the 'name' attribute of the | ||
8 | class, and the 'priority' attribute controls priority. The prioritized() | ||
9 | method returns the registered classes in priority order. | ||
10 | |||
11 | Subclasses of ClassRegistry may define an 'implemented' property to exert | ||
12 | control over whether the class will be added to the registry (e.g. to keep | ||
13 | abstract base classes out of the registry).""" | ||
14 | priority = 0 | ||
15 | class __metaclass__(type): | ||
16 | """Give each ClassRegistry their own registry""" | ||
17 | def __init__(cls, name, bases, attrs): | ||
18 | cls.registry = {} | ||
19 | type.__init__(cls, name, bases, attrs) | ||
20 | |||
21 | def __init__(cls, name, bases, attrs): | ||
22 | super(ClassRegistry, cls).__init__(name, bases, attrs) | ||
23 | try: | ||
24 | if not cls.implemented: | ||
25 | return | ||
26 | except AttributeError: | ||
27 | pass | ||
28 | |||
29 | try: | ||
30 | cls.name | ||
31 | except AttributeError: | ||
32 | cls.name = name | ||
33 | cls.registry[cls.name] = cls | ||
34 | |||
35 | @classmethod | ||
36 | def prioritized(tcls): | ||
37 | return sorted(tcls.registry.values(), | ||
38 | key=lambda v: v.priority, reverse=True) | ||
39 | |||
40 | def unregister(cls): | ||
41 | for key in cls.registry.keys(): | ||
42 | if cls.registry[key] is cls: | ||
43 | del cls.registry[key] | ||
diff --git a/meta/lib/oe/data.py b/meta/lib/oe/data.py new file mode 100644 index 0000000000..4cc0e02968 --- /dev/null +++ b/meta/lib/oe/data.py | |||
@@ -0,0 +1,17 @@ | |||
1 | import oe.maketype | ||
2 | |||
3 | def typed_value(key, d): | ||
4 | """Construct a value for the specified metadata variable, using its flags | ||
5 | to determine the type and parameters for construction.""" | ||
6 | var_type = d.getVarFlag(key, 'type') | ||
7 | flags = d.getVarFlags(key) | ||
8 | if flags is not None: | ||
9 | flags = dict((flag, d.expand(value)) | ||
10 | for flag, value in flags.iteritems()) | ||
11 | else: | ||
12 | flags = {} | ||
13 | |||
14 | try: | ||
15 | return oe.maketype.create(d.getVar(key, True) or '', var_type, **flags) | ||
16 | except (TypeError, ValueError), exc: | ||
17 | bb.msg.fatal("Data", "%s: %s" % (key, str(exc))) | ||
diff --git a/meta/lib/oe/distro_check.py b/meta/lib/oe/distro_check.py new file mode 100644 index 0000000000..8ed5b0ec80 --- /dev/null +++ b/meta/lib/oe/distro_check.py | |||
@@ -0,0 +1,383 @@ | |||
1 | def get_links_from_url(url): | ||
2 | "Return all the href links found on the web location" | ||
3 | |||
4 | import urllib, sgmllib | ||
5 | |||
6 | class LinksParser(sgmllib.SGMLParser): | ||
7 | def parse(self, s): | ||
8 | "Parse the given string 's'." | ||
9 | self.feed(s) | ||
10 | self.close() | ||
11 | |||
12 | def __init__(self, verbose=0): | ||
13 | "Initialise an object passing 'verbose' to the superclass." | ||
14 | sgmllib.SGMLParser.__init__(self, verbose) | ||
15 | self.hyperlinks = [] | ||
16 | |||
17 | def start_a(self, attributes): | ||
18 | "Process a hyperlink and its 'attributes'." | ||
19 | for name, value in attributes: | ||
20 | if name == "href": | ||
21 | self.hyperlinks.append(value.strip('/')) | ||
22 | |||
23 | def get_hyperlinks(self): | ||
24 | "Return the list of hyperlinks." | ||
25 | return self.hyperlinks | ||
26 | |||
27 | sock = urllib.urlopen(url) | ||
28 | webpage = sock.read() | ||
29 | sock.close() | ||
30 | |||
31 | linksparser = LinksParser() | ||
32 | linksparser.parse(webpage) | ||
33 | return linksparser.get_hyperlinks() | ||
34 | |||
35 | def find_latest_numeric_release(url): | ||
36 | "Find the latest listed numeric release on the given url" | ||
37 | max=0 | ||
38 | maxstr="" | ||
39 | for link in get_links_from_url(url): | ||
40 | try: | ||
41 | release = float(link) | ||
42 | except: | ||
43 | release = 0 | ||
44 | if release > max: | ||
45 | max = release | ||
46 | maxstr = link | ||
47 | return maxstr | ||
48 | |||
49 | def is_src_rpm(name): | ||
50 | "Check if the link is pointing to a src.rpm file" | ||
51 | if name[-8:] == ".src.rpm": | ||
52 | return True | ||
53 | else: | ||
54 | return False | ||
55 | |||
56 | def package_name_from_srpm(srpm): | ||
57 | "Strip out the package name from the src.rpm filename" | ||
58 | strings = srpm.split('-') | ||
59 | package_name = strings[0] | ||
60 | for i in range(1, len (strings) - 1): | ||
61 | str = strings[i] | ||
62 | if not str[0].isdigit(): | ||
63 | package_name += '-' + str | ||
64 | return package_name | ||
65 | |||
66 | def clean_package_list(package_list): | ||
67 | "Removes multiple entries of packages and sorts the list" | ||
68 | set = {} | ||
69 | map(set.__setitem__, package_list, []) | ||
70 | return set.keys() | ||
71 | |||
72 | |||
73 | def get_latest_released_meego_source_package_list(): | ||
74 | "Returns list of all the name os packages in the latest meego distro" | ||
75 | |||
76 | package_names = [] | ||
77 | try: | ||
78 | f = open("/tmp/Meego-1.1", "r") | ||
79 | for line in f: | ||
80 | package_names.append(line[:-1] + ":" + "main") # Also strip the '\n' at the end | ||
81 | except IOError: pass | ||
82 | package_list=clean_package_list(package_names) | ||
83 | return "1.0", package_list | ||
84 | |||
85 | def get_source_package_list_from_url(url, section): | ||
86 | "Return a sectioned list of package names from a URL list" | ||
87 | |||
88 | bb.note("Reading %s: %s" % (url, section)) | ||
89 | links = get_links_from_url(url) | ||
90 | srpms = filter(is_src_rpm, links) | ||
91 | names_list = map(package_name_from_srpm, srpms) | ||
92 | |||
93 | new_pkgs = [] | ||
94 | for pkgs in names_list: | ||
95 | new_pkgs.append(pkgs + ":" + section) | ||
96 | |||
97 | return new_pkgs | ||
98 | |||
99 | def get_latest_released_fedora_source_package_list(): | ||
100 | "Returns list of all the name os packages in the latest fedora distro" | ||
101 | latest = find_latest_numeric_release("http://archive.fedoraproject.org/pub/fedora/linux/releases/") | ||
102 | |||
103 | package_names = get_source_package_list_from_url("http://archive.fedoraproject.org/pub/fedora/linux/releases/%s/Fedora/source/SRPMS/" % latest, "main") | ||
104 | |||
105 | # package_names += get_source_package_list_from_url("http://download.fedora.redhat.com/pub/fedora/linux/releases/%s/Everything/source/SPRMS/" % latest, "everything") | ||
106 | package_names += get_source_package_list_from_url("http://archive.fedoraproject.org/pub/fedora/linux/updates/%s/SRPMS/" % latest, "updates") | ||
107 | |||
108 | package_list=clean_package_list(package_names) | ||
109 | |||
110 | return latest, package_list | ||
111 | |||
112 | def get_latest_released_opensuse_source_package_list(): | ||
113 | "Returns list of all the name os packages in the latest opensuse distro" | ||
114 | latest = find_latest_numeric_release("http://download.opensuse.org/source/distribution/") | ||
115 | |||
116 | package_names = get_source_package_list_from_url("http://download.opensuse.org/source/distribution/%s/repo/oss/suse/src/" % latest, "main") | ||
117 | package_names += get_source_package_list_from_url("http://download.opensuse.org/update/%s/rpm/src/" % latest, "updates") | ||
118 | |||
119 | package_list=clean_package_list(package_names) | ||
120 | return latest, package_list | ||
121 | |||
122 | def get_latest_released_mandriva_source_package_list(): | ||
123 | "Returns list of all the name os packages in the latest mandriva distro" | ||
124 | latest = find_latest_numeric_release("http://distrib-coffee.ipsl.jussieu.fr/pub/linux/MandrivaLinux/official/") | ||
125 | package_names = get_source_package_list_from_url("http://distrib-coffee.ipsl.jussieu.fr/pub/linux/MandrivaLinux/official/%s/SRPMS/main/release/" % latest, "main") | ||
126 | # package_names += get_source_package_list_from_url("http://distrib-coffee.ipsl.jussieu.fr/pub/linux/MandrivaLinux/official/%s/SRPMS/contrib/release/" % latest, "contrib") | ||
127 | package_names += get_source_package_list_from_url("http://distrib-coffee.ipsl.jussieu.fr/pub/linux/MandrivaLinux/official/%s/SRPMS/main/updates/" % latest, "updates") | ||
128 | |||
129 | package_list=clean_package_list(package_names) | ||
130 | return latest, package_list | ||
131 | |||
132 | def find_latest_debian_release(url): | ||
133 | "Find the latest listed debian release on the given url" | ||
134 | |||
135 | releases = [] | ||
136 | for link in get_links_from_url(url): | ||
137 | if link[:6] == "Debian": | ||
138 | if ';' not in link: | ||
139 | releases.append(link) | ||
140 | releases.sort() | ||
141 | try: | ||
142 | return releases.pop()[6:] | ||
143 | except: | ||
144 | return "_NotFound_" | ||
145 | |||
146 | def get_debian_style_source_package_list(url, section): | ||
147 | "Return the list of package-names stored in the debian style Sources.gz file" | ||
148 | import urllib | ||
149 | sock = urllib.urlopen(url) | ||
150 | import tempfile | ||
151 | tmpfile = tempfile.NamedTemporaryFile(mode='wb', prefix='oecore.', suffix='.tmp', delete=False) | ||
152 | tmpfilename=tmpfile.name | ||
153 | tmpfile.write(sock.read()) | ||
154 | sock.close() | ||
155 | tmpfile.close() | ||
156 | import gzip | ||
157 | bb.note("Reading %s: %s" % (url, section)) | ||
158 | |||
159 | f = gzip.open(tmpfilename) | ||
160 | package_names = [] | ||
161 | for line in f: | ||
162 | if line[:9] == "Package: ": | ||
163 | package_names.append(line[9:-1] + ":" + section) # Also strip the '\n' at the end | ||
164 | os.unlink(tmpfilename) | ||
165 | |||
166 | return package_names | ||
167 | |||
168 | def get_latest_released_debian_source_package_list(): | ||
169 | "Returns list of all the name os packages in the latest debian distro" | ||
170 | latest = find_latest_debian_release("http://ftp.debian.org/debian/dists/") | ||
171 | url = "http://ftp.debian.org/debian/dists/stable/main/source/Sources.gz" | ||
172 | package_names = get_debian_style_source_package_list(url, "main") | ||
173 | # url = "http://ftp.debian.org/debian/dists/stable/contrib/source/Sources.gz" | ||
174 | # package_names += get_debian_style_source_package_list(url, "contrib") | ||
175 | url = "http://ftp.debian.org/debian/dists/stable-proposed-updates/main/source/Sources.gz" | ||
176 | package_names += get_debian_style_source_package_list(url, "updates") | ||
177 | package_list=clean_package_list(package_names) | ||
178 | return latest, package_list | ||
179 | |||
180 | def find_latest_ubuntu_release(url): | ||
181 | "Find the latest listed ubuntu release on the given url" | ||
182 | url += "?C=M;O=D" # Descending Sort by Last Modified | ||
183 | for link in get_links_from_url(url): | ||
184 | if link[-8:] == "-updates": | ||
185 | return link[:-8] | ||
186 | return "_NotFound_" | ||
187 | |||
188 | def get_latest_released_ubuntu_source_package_list(): | ||
189 | "Returns list of all the name os packages in the latest ubuntu distro" | ||
190 | latest = find_latest_ubuntu_release("http://archive.ubuntu.com/ubuntu/dists/") | ||
191 | url = "http://archive.ubuntu.com/ubuntu/dists/%s/main/source/Sources.gz" % latest | ||
192 | package_names = get_debian_style_source_package_list(url, "main") | ||
193 | # url = "http://archive.ubuntu.com/ubuntu/dists/%s/multiverse/source/Sources.gz" % latest | ||
194 | # package_names += get_debian_style_source_package_list(url, "multiverse") | ||
195 | # url = "http://archive.ubuntu.com/ubuntu/dists/%s/universe/source/Sources.gz" % latest | ||
196 | # package_names += get_debian_style_source_package_list(url, "universe") | ||
197 | url = "http://archive.ubuntu.com/ubuntu/dists/%s-updates/main/source/Sources.gz" % latest | ||
198 | package_names += get_debian_style_source_package_list(url, "updates") | ||
199 | package_list=clean_package_list(package_names) | ||
200 | return latest, package_list | ||
201 | |||
202 | def create_distro_packages_list(distro_check_dir): | ||
203 | pkglst_dir = os.path.join(distro_check_dir, "package_lists") | ||
204 | if not os.path.isdir (pkglst_dir): | ||
205 | os.makedirs(pkglst_dir) | ||
206 | # first clear old stuff | ||
207 | for file in os.listdir(pkglst_dir): | ||
208 | os.unlink(os.path.join(pkglst_dir, file)) | ||
209 | |||
210 | per_distro_functions = [ | ||
211 | ["Debian", get_latest_released_debian_source_package_list], | ||
212 | ["Ubuntu", get_latest_released_ubuntu_source_package_list], | ||
213 | ["Fedora", get_latest_released_fedora_source_package_list], | ||
214 | ["OpenSuSE", get_latest_released_opensuse_source_package_list], | ||
215 | ["Mandriva", get_latest_released_mandriva_source_package_list], | ||
216 | ["Meego", get_latest_released_meego_source_package_list] | ||
217 | ] | ||
218 | |||
219 | from datetime import datetime | ||
220 | begin = datetime.now() | ||
221 | for distro in per_distro_functions: | ||
222 | name = distro[0] | ||
223 | release, package_list = distro[1]() | ||
224 | bb.note("Distro: %s, Latest Release: %s, # src packages: %d" % (name, release, len(package_list))) | ||
225 | package_list_file = os.path.join(pkglst_dir, name + "-" + release) | ||
226 | f = open(package_list_file, "w+b") | ||
227 | for pkg in package_list: | ||
228 | f.write(pkg + "\n") | ||
229 | f.close() | ||
230 | end = datetime.now() | ||
231 | delta = end - begin | ||
232 | bb.note("package_list generatiosn took this much time: %d seconds" % delta.seconds) | ||
233 | |||
234 | def update_distro_data(distro_check_dir, datetime): | ||
235 | """ | ||
236 | If distro packages list data is old then rebuild it. | ||
237 | The operations has to be protected by a lock so that | ||
238 | only one thread performes it at a time. | ||
239 | """ | ||
240 | if not os.path.isdir (distro_check_dir): | ||
241 | try: | ||
242 | bb.note ("Making new directory: %s" % distro_check_dir) | ||
243 | os.makedirs (distro_check_dir) | ||
244 | except OSError: | ||
245 | raise Exception('Unable to create directory %s' % (distro_check_dir)) | ||
246 | |||
247 | |||
248 | datetime_file = os.path.join(distro_check_dir, "build_datetime") | ||
249 | saved_datetime = "_invalid_" | ||
250 | import fcntl | ||
251 | try: | ||
252 | if not os.path.exists(datetime_file): | ||
253 | open(datetime_file, 'w+b').close() # touch the file so that the next open won't fail | ||
254 | |||
255 | f = open(datetime_file, "r+b") | ||
256 | fcntl.lockf(f, fcntl.LOCK_EX) | ||
257 | saved_datetime = f.read() | ||
258 | if saved_datetime[0:8] != datetime[0:8]: | ||
259 | bb.note("The build datetime did not match: saved:%s current:%s" % (saved_datetime, datetime)) | ||
260 | bb.note("Regenerating distro package lists") | ||
261 | create_distro_packages_list(distro_check_dir) | ||
262 | f.seek(0) | ||
263 | f.write(datetime) | ||
264 | |||
265 | except OSError: | ||
266 | raise Exception('Unable to read/write this file: %s' % (datetime_file)) | ||
267 | finally: | ||
268 | fcntl.lockf(f, fcntl.LOCK_UN) | ||
269 | f.close() | ||
270 | |||
271 | def compare_in_distro_packages_list(distro_check_dir, d): | ||
272 | if not os.path.isdir(distro_check_dir): | ||
273 | raise Exception("compare_in_distro_packages_list: invalid distro_check_dir passed") | ||
274 | |||
275 | localdata = bb.data.createCopy(d) | ||
276 | pkglst_dir = os.path.join(distro_check_dir, "package_lists") | ||
277 | matching_distros = [] | ||
278 | pn = d.getVar('PN', True) | ||
279 | recipe_name = d.getVar('PN', True) | ||
280 | bb.note("Checking: %s" % pn) | ||
281 | |||
282 | trim_dict = dict({"-native":"-native", "-cross":"-cross", "-initial":"-initial"}) | ||
283 | |||
284 | if pn.find("-native") != -1: | ||
285 | pnstripped = pn.split("-native") | ||
286 | localdata.setVar('OVERRIDES', "pn-" + pnstripped[0] + ":" + d.getVar('OVERRIDES', True)) | ||
287 | bb.data.update_data(localdata) | ||
288 | recipe_name = pnstripped[0] | ||
289 | |||
290 | if pn.startswith("nativesdk-"): | ||
291 | pnstripped = pn.split("nativesdk-") | ||
292 | localdata.setVar('OVERRIDES', "pn-" + pnstripped[1] + ":" + d.getVar('OVERRIDES', True)) | ||
293 | bb.data.update_data(localdata) | ||
294 | recipe_name = pnstripped[1] | ||
295 | |||
296 | if pn.find("-cross") != -1: | ||
297 | pnstripped = pn.split("-cross") | ||
298 | localdata.setVar('OVERRIDES', "pn-" + pnstripped[0] + ":" + d.getVar('OVERRIDES', True)) | ||
299 | bb.data.update_data(localdata) | ||
300 | recipe_name = pnstripped[0] | ||
301 | |||
302 | if pn.find("-initial") != -1: | ||
303 | pnstripped = pn.split("-initial") | ||
304 | localdata.setVar('OVERRIDES', "pn-" + pnstripped[0] + ":" + d.getVar('OVERRIDES', True)) | ||
305 | bb.data.update_data(localdata) | ||
306 | recipe_name = pnstripped[0] | ||
307 | |||
308 | bb.note("Recipe: %s" % recipe_name) | ||
309 | tmp = localdata.getVar('DISTRO_PN_ALIAS', True) | ||
310 | |||
311 | distro_exceptions = dict({"OE-Core":'OE-Core', "OpenedHand":'OpenedHand', "Intel":'Intel', "Upstream":'Upstream', "Windriver":'Windriver', "OSPDT":'OSPDT Approved', "Poky":'poky'}) | ||
312 | |||
313 | if tmp: | ||
314 | list = tmp.split(' ') | ||
315 | for str in list: | ||
316 | if str and str.find("=") == -1 and distro_exceptions[str]: | ||
317 | matching_distros.append(str) | ||
318 | |||
319 | distro_pn_aliases = {} | ||
320 | if tmp: | ||
321 | list = tmp.split(' ') | ||
322 | for str in list: | ||
323 | if str.find("=") != -1: | ||
324 | (dist, pn_alias) = str.split('=') | ||
325 | distro_pn_aliases[dist.strip().lower()] = pn_alias.strip() | ||
326 | |||
327 | for file in os.listdir(pkglst_dir): | ||
328 | (distro, distro_release) = file.split("-") | ||
329 | f = open(os.path.join(pkglst_dir, file), "rb") | ||
330 | for line in f: | ||
331 | (pkg, section) = line.split(":") | ||
332 | if distro.lower() in distro_pn_aliases: | ||
333 | pn = distro_pn_aliases[distro.lower()] | ||
334 | else: | ||
335 | pn = recipe_name | ||
336 | if pn == pkg: | ||
337 | matching_distros.append(distro + "-" + section[:-1]) # strip the \n at the end | ||
338 | f.close() | ||
339 | break | ||
340 | f.close() | ||
341 | |||
342 | |||
343 | if tmp != None: | ||
344 | list = tmp.split(' ') | ||
345 | for item in list: | ||
346 | matching_distros.append(item) | ||
347 | bb.note("Matching: %s" % matching_distros) | ||
348 | return matching_distros | ||
349 | |||
350 | def create_log_file(d, logname): | ||
351 | import subprocess | ||
352 | logpath = d.getVar('LOG_DIR', True) | ||
353 | bb.utils.mkdirhier(logpath) | ||
354 | logfn, logsuffix = os.path.splitext(logname) | ||
355 | logfile = os.path.join(logpath, "%s.%s%s" % (logfn, d.getVar('DATETIME', True), logsuffix)) | ||
356 | if not os.path.exists(logfile): | ||
357 | slogfile = os.path.join(logpath, logname) | ||
358 | if os.path.exists(slogfile): | ||
359 | os.remove(slogfile) | ||
360 | subprocess.call("touch %s" % logfile, shell=True) | ||
361 | os.symlink(logfile, slogfile) | ||
362 | d.setVar('LOG_FILE', logfile) | ||
363 | return logfile | ||
364 | |||
365 | |||
366 | def save_distro_check_result(result, datetime, result_file, d): | ||
367 | pn = d.getVar('PN', True) | ||
368 | logdir = d.getVar('LOG_DIR', True) | ||
369 | if not logdir: | ||
370 | bb.error("LOG_DIR variable is not defined, can't write the distro_check results") | ||
371 | return | ||
372 | if not os.path.isdir(logdir): | ||
373 | os.makedirs(logdir) | ||
374 | line = pn | ||
375 | for i in result: | ||
376 | line = line + "," + i | ||
377 | f = open(result_file, "a") | ||
378 | import fcntl | ||
379 | fcntl.lockf(f, fcntl.LOCK_EX) | ||
380 | f.seek(0, os.SEEK_END) # seek to the end of file | ||
381 | f.write(line + "\n") | ||
382 | fcntl.lockf(f, fcntl.LOCK_UN) | ||
383 | f.close() | ||
diff --git a/meta/lib/oe/image.py b/meta/lib/oe/image.py new file mode 100644 index 0000000000..7e080b00dd --- /dev/null +++ b/meta/lib/oe/image.py | |||
@@ -0,0 +1,345 @@ | |||
1 | from oe.utils import execute_pre_post_process | ||
2 | import os | ||
3 | import subprocess | ||
4 | import multiprocessing | ||
5 | |||
6 | |||
7 | def generate_image(arg): | ||
8 | (type, subimages, create_img_cmd) = arg | ||
9 | |||
10 | bb.note("Running image creation script for %s: %s ..." % | ||
11 | (type, create_img_cmd)) | ||
12 | |||
13 | try: | ||
14 | subprocess.check_output(create_img_cmd, stderr=subprocess.STDOUT) | ||
15 | except subprocess.CalledProcessError as e: | ||
16 | return("Error: The image creation script '%s' returned %d:\n%s" % | ||
17 | (e.cmd, e.returncode, e.output)) | ||
18 | |||
19 | return None | ||
20 | |||
21 | |||
22 | """ | ||
23 | This class will help compute IMAGE_FSTYPE dependencies and group them in batches | ||
24 | that can be executed in parallel. | ||
25 | |||
26 | The next example is for illustration purposes, highly unlikely to happen in real life. | ||
27 | It's just one of the test cases I used to test the algorithm: | ||
28 | |||
29 | For: | ||
30 | IMAGE_FSTYPES = "i1 i2 i3 i4 i5" | ||
31 | IMAGE_TYPEDEP_i4 = "i2" | ||
32 | IMAGE_TYPEDEP_i5 = "i6 i4" | ||
33 | IMAGE_TYPEDEP_i6 = "i7" | ||
34 | IMAGE_TYPEDEP_i7 = "i2" | ||
35 | |||
36 | We get the following list of batches that can be executed in parallel, having the | ||
37 | dependencies satisfied: | ||
38 | |||
39 | [['i1', 'i3', 'i2'], ['i4', 'i7'], ['i6'], ['i5']] | ||
40 | """ | ||
41 | class ImageDepGraph(object): | ||
42 | def __init__(self, d): | ||
43 | self.d = d | ||
44 | self.graph = dict() | ||
45 | self.deps_array = dict() | ||
46 | |||
47 | def _construct_dep_graph(self, image_fstypes): | ||
48 | graph = dict() | ||
49 | |||
50 | def add_node(node): | ||
51 | deps = (self.d.getVar('IMAGE_TYPEDEP_' + node, True) or "") | ||
52 | if deps != "": | ||
53 | graph[node] = deps | ||
54 | |||
55 | for dep in deps.split(): | ||
56 | if not dep in graph: | ||
57 | add_node(dep) | ||
58 | else: | ||
59 | graph[node] = "" | ||
60 | |||
61 | for fstype in image_fstypes: | ||
62 | add_node(fstype) | ||
63 | |||
64 | return graph | ||
65 | |||
66 | def _clean_graph(self): | ||
67 | # Live and VMDK images will be processed via inheriting | ||
68 | # bbclass and does not get processed here. Remove them from the fstypes | ||
69 | # graph. Their dependencies are already added, so no worries here. | ||
70 | remove_list = (self.d.getVar('IMAGE_TYPES_MASKED', True) or "").split() | ||
71 | |||
72 | for item in remove_list: | ||
73 | self.graph.pop(item, None) | ||
74 | |||
75 | def _compute_dependencies(self): | ||
76 | """ | ||
77 | returns dict object of nodes with [no_of_depends_on, no_of_depended_by] | ||
78 | for each node | ||
79 | """ | ||
80 | deps_array = dict() | ||
81 | for node in self.graph: | ||
82 | deps_array[node] = [0, 0] | ||
83 | |||
84 | for node in self.graph: | ||
85 | deps = self.graph[node].split() | ||
86 | deps_array[node][0] += len(deps) | ||
87 | for dep in deps: | ||
88 | deps_array[dep][1] += 1 | ||
89 | |||
90 | return deps_array | ||
91 | |||
92 | def _sort_graph(self): | ||
93 | sorted_list = [] | ||
94 | group = [] | ||
95 | for node in self.graph: | ||
96 | if node not in self.deps_array: | ||
97 | continue | ||
98 | |||
99 | depends_on = self.deps_array[node][0] | ||
100 | |||
101 | if depends_on == 0: | ||
102 | group.append(node) | ||
103 | |||
104 | if len(group) == 0 and len(self.deps_array) != 0: | ||
105 | bb.fatal("possible fstype circular dependency...") | ||
106 | |||
107 | sorted_list.append(group) | ||
108 | |||
109 | # remove added nodes from deps_array | ||
110 | for item in group: | ||
111 | for node in self.graph: | ||
112 | if item in self.graph[node].split(): | ||
113 | self.deps_array[node][0] -= 1 | ||
114 | |||
115 | self.deps_array.pop(item, None) | ||
116 | |||
117 | if len(self.deps_array): | ||
118 | # recursive call, to find the next group | ||
119 | sorted_list += self._sort_graph() | ||
120 | |||
121 | return sorted_list | ||
122 | |||
123 | def group_fstypes(self, image_fstypes): | ||
124 | self.graph = self._construct_dep_graph(image_fstypes) | ||
125 | |||
126 | self._clean_graph() | ||
127 | |||
128 | self.deps_array = self._compute_dependencies() | ||
129 | |||
130 | alltypes = [node for node in self.graph] | ||
131 | |||
132 | return (alltypes, self._sort_graph()) | ||
133 | |||
134 | |||
135 | class Image(ImageDepGraph): | ||
136 | def __init__(self, d): | ||
137 | self.d = d | ||
138 | |||
139 | super(Image, self).__init__(d) | ||
140 | |||
141 | def _get_rootfs_size(self): | ||
142 | """compute the rootfs size""" | ||
143 | rootfs_alignment = int(self.d.getVar('IMAGE_ROOTFS_ALIGNMENT', True)) | ||
144 | overhead_factor = float(self.d.getVar('IMAGE_OVERHEAD_FACTOR', True)) | ||
145 | rootfs_req_size = int(self.d.getVar('IMAGE_ROOTFS_SIZE', True)) | ||
146 | rootfs_extra_space = eval(self.d.getVar('IMAGE_ROOTFS_EXTRA_SPACE', True)) | ||
147 | rootfs_maxsize = self.d.getVar('IMAGE_ROOTFS_MAXSIZE', True) | ||
148 | |||
149 | output = subprocess.check_output(['du', '-ks', | ||
150 | self.d.getVar('IMAGE_ROOTFS', True)]) | ||
151 | size_kb = int(output.split()[0]) | ||
152 | base_size = size_kb * overhead_factor | ||
153 | base_size = (base_size, rootfs_req_size)[base_size < rootfs_req_size] + \ | ||
154 | rootfs_extra_space | ||
155 | |||
156 | if base_size != int(base_size): | ||
157 | base_size = int(base_size + 1) | ||
158 | |||
159 | base_size += rootfs_alignment - 1 | ||
160 | base_size -= base_size % rootfs_alignment | ||
161 | |||
162 | # Check the rootfs size against IMAGE_ROOTFS_MAXSIZE (if set) | ||
163 | if rootfs_maxsize: | ||
164 | rootfs_maxsize_int = int(rootfs_maxsize) | ||
165 | if base_size > rootfs_maxsize_int: | ||
166 | bb.fatal("The rootfs size %d(K) overrides the max size %d(K)" % \ | ||
167 | (base_size, rootfs_maxsize_int)) | ||
168 | |||
169 | return base_size | ||
170 | |||
171 | def _create_symlinks(self, subimages): | ||
172 | """create symlinks to the newly created image""" | ||
173 | deploy_dir = self.d.getVar('DEPLOY_DIR_IMAGE', True) | ||
174 | img_name = self.d.getVar('IMAGE_NAME', True) | ||
175 | link_name = self.d.getVar('IMAGE_LINK_NAME', True) | ||
176 | manifest_name = self.d.getVar('IMAGE_MANIFEST', True) | ||
177 | |||
178 | os.chdir(deploy_dir) | ||
179 | |||
180 | if link_name is not None: | ||
181 | for type in subimages: | ||
182 | if os.path.exists(img_name + ".rootfs." + type): | ||
183 | dst = link_name + "." + type | ||
184 | src = img_name + ".rootfs." + type | ||
185 | bb.note("Creating symlink: %s -> %s" % (dst, src)) | ||
186 | os.symlink(src, dst) | ||
187 | |||
188 | if manifest_name is not None and \ | ||
189 | os.path.exists(manifest_name) and \ | ||
190 | not os.path.exists(link_name + ".manifest"): | ||
191 | os.symlink(os.path.basename(manifest_name), | ||
192 | link_name + ".manifest") | ||
193 | |||
194 | def _remove_old_symlinks(self): | ||
195 | """remove the symlinks to old binaries""" | ||
196 | |||
197 | if self.d.getVar('IMAGE_LINK_NAME', True): | ||
198 | deploy_dir = self.d.getVar('DEPLOY_DIR_IMAGE', True) | ||
199 | for img in os.listdir(deploy_dir): | ||
200 | if img.find(self.d.getVar('IMAGE_LINK_NAME', True)) == 0: | ||
201 | img = os.path.join(deploy_dir, img) | ||
202 | if os.path.islink(img): | ||
203 | if self.d.getVar('RM_OLD_IMAGE', True) == "1" and \ | ||
204 | os.path.exists(os.path.realpath(img)): | ||
205 | os.remove(os.path.realpath(img)) | ||
206 | |||
207 | os.remove(img) | ||
208 | |||
209 | """ | ||
210 | This function will just filter out the compressed image types from the | ||
211 | fstype groups returning a (filtered_fstype_groups, cimages) tuple. | ||
212 | """ | ||
213 | def _filter_out_commpressed(self, fstype_groups): | ||
214 | ctypes = self.d.getVar('COMPRESSIONTYPES', True).split() | ||
215 | cimages = {} | ||
216 | |||
217 | filtered_groups = [] | ||
218 | for group in fstype_groups: | ||
219 | filtered_group = [] | ||
220 | for type in group: | ||
221 | basetype = None | ||
222 | for ctype in ctypes: | ||
223 | if type.endswith("." + ctype): | ||
224 | basetype = type[:-len("." + ctype)] | ||
225 | if basetype not in filtered_group: | ||
226 | filtered_group.append(basetype) | ||
227 | if basetype not in cimages: | ||
228 | cimages[basetype] = [] | ||
229 | if ctype not in cimages[basetype]: | ||
230 | cimages[basetype].append(ctype) | ||
231 | break | ||
232 | if not basetype and type not in filtered_group: | ||
233 | filtered_group.append(type) | ||
234 | |||
235 | filtered_groups.append(filtered_group) | ||
236 | |||
237 | return (filtered_groups, cimages) | ||
238 | |||
239 | def _get_image_types(self): | ||
240 | """returns a (types, cimages) tuple""" | ||
241 | |||
242 | alltypes, fstype_groups = self.group_fstypes(self.d.getVar('IMAGE_FSTYPES', True).split()) | ||
243 | |||
244 | filtered_groups, cimages = self._filter_out_commpressed(fstype_groups) | ||
245 | |||
246 | return (alltypes, filtered_groups, cimages) | ||
247 | |||
248 | def _write_script(self, type, cmds): | ||
249 | tempdir = self.d.getVar('T', True) | ||
250 | script_name = os.path.join(tempdir, "create_image." + type) | ||
251 | |||
252 | self.d.setVar('img_creation_func', '\n'.join(cmds)) | ||
253 | self.d.setVarFlag('img_creation_func', 'func', 1) | ||
254 | self.d.setVarFlag('img_creation_func', 'fakeroot', 1) | ||
255 | |||
256 | with open(script_name, "w+") as script: | ||
257 | script.write("%s" % bb.build.shell_trap_code()) | ||
258 | script.write("export ROOTFS_SIZE=%d\n" % self._get_rootfs_size()) | ||
259 | bb.data.emit_func('img_creation_func', script, self.d) | ||
260 | script.write("img_creation_func\n") | ||
261 | |||
262 | os.chmod(script_name, 0775) | ||
263 | |||
264 | return script_name | ||
265 | |||
266 | def _get_imagecmds(self): | ||
267 | old_overrides = self.d.getVar('OVERRIDES', 0) | ||
268 | |||
269 | alltypes, fstype_groups, cimages = self._get_image_types() | ||
270 | |||
271 | image_cmd_groups = [] | ||
272 | |||
273 | bb.note("The image creation groups are: %s" % str(fstype_groups)) | ||
274 | for fstype_group in fstype_groups: | ||
275 | image_cmds = [] | ||
276 | for type in fstype_group: | ||
277 | cmds = [] | ||
278 | subimages = [] | ||
279 | |||
280 | localdata = bb.data.createCopy(self.d) | ||
281 | localdata.setVar('OVERRIDES', '%s:%s' % (type, old_overrides)) | ||
282 | bb.data.update_data(localdata) | ||
283 | localdata.setVar('type', type) | ||
284 | |||
285 | cmds.append("\t" + localdata.getVar("IMAGE_CMD", True)) | ||
286 | cmds.append(localdata.expand("\tcd ${DEPLOY_DIR_IMAGE}")) | ||
287 | |||
288 | if type in cimages: | ||
289 | for ctype in cimages[type]: | ||
290 | cmds.append("\t" + localdata.getVar("COMPRESS_CMD_" + ctype, True)) | ||
291 | subimages.append(type + "." + ctype) | ||
292 | |||
293 | if type not in alltypes: | ||
294 | cmds.append(localdata.expand("\trm ${IMAGE_NAME}.rootfs.${type}")) | ||
295 | else: | ||
296 | subimages.append(type) | ||
297 | |||
298 | script_name = self._write_script(type, cmds) | ||
299 | |||
300 | image_cmds.append((type, subimages, script_name)) | ||
301 | |||
302 | image_cmd_groups.append(image_cmds) | ||
303 | |||
304 | return image_cmd_groups | ||
305 | |||
306 | def create(self): | ||
307 | bb.note("###### Generate images #######") | ||
308 | pre_process_cmds = self.d.getVar("IMAGE_PREPROCESS_COMMAND", True) | ||
309 | post_process_cmds = self.d.getVar("IMAGE_POSTPROCESS_COMMAND", True) | ||
310 | |||
311 | execute_pre_post_process(self.d, pre_process_cmds) | ||
312 | |||
313 | self._remove_old_symlinks() | ||
314 | |||
315 | image_cmd_groups = self._get_imagecmds() | ||
316 | |||
317 | for image_cmds in image_cmd_groups: | ||
318 | # create the images in parallel | ||
319 | nproc = multiprocessing.cpu_count() | ||
320 | pool = bb.utils.multiprocessingpool(nproc) | ||
321 | results = list(pool.imap(generate_image, image_cmds)) | ||
322 | pool.close() | ||
323 | pool.join() | ||
324 | |||
325 | for result in results: | ||
326 | if result is not None: | ||
327 | bb.fatal(result) | ||
328 | |||
329 | for image_type, subimages, script in image_cmds: | ||
330 | bb.note("Creating symlinks for %s image ..." % image_type) | ||
331 | self._create_symlinks(subimages) | ||
332 | |||
333 | execute_pre_post_process(self.d, post_process_cmds) | ||
334 | |||
335 | |||
336 | def create_image(d): | ||
337 | Image(d).create() | ||
338 | |||
339 | if __name__ == "__main__": | ||
340 | """ | ||
341 | Image creation can be called independent from bitbake environment. | ||
342 | """ | ||
343 | """ | ||
344 | TBD | ||
345 | """ | ||
diff --git a/meta/lib/oe/license.py b/meta/lib/oe/license.py new file mode 100644 index 0000000000..340da61102 --- /dev/null +++ b/meta/lib/oe/license.py | |||
@@ -0,0 +1,116 @@ | |||
1 | # vi:sts=4:sw=4:et | ||
2 | """Code for parsing OpenEmbedded license strings""" | ||
3 | |||
4 | import ast | ||
5 | import re | ||
6 | from fnmatch import fnmatchcase as fnmatch | ||
7 | |||
8 | class LicenseError(Exception): | ||
9 | pass | ||
10 | |||
11 | class LicenseSyntaxError(LicenseError): | ||
12 | def __init__(self, licensestr, exc): | ||
13 | self.licensestr = licensestr | ||
14 | self.exc = exc | ||
15 | LicenseError.__init__(self) | ||
16 | |||
17 | def __str__(self): | ||
18 | return "error in '%s': %s" % (self.licensestr, self.exc) | ||
19 | |||
20 | class InvalidLicense(LicenseError): | ||
21 | def __init__(self, license): | ||
22 | self.license = license | ||
23 | LicenseError.__init__(self) | ||
24 | |||
25 | def __str__(self): | ||
26 | return "invalid characters in license '%s'" % self.license | ||
27 | |||
28 | license_operator = re.compile('([&|() ])') | ||
29 | license_pattern = re.compile('[a-zA-Z0-9.+_\-]+$') | ||
30 | |||
31 | class LicenseVisitor(ast.NodeVisitor): | ||
32 | """Syntax tree visitor which can accept OpenEmbedded license strings""" | ||
33 | def visit_string(self, licensestr): | ||
34 | new_elements = [] | ||
35 | elements = filter(lambda x: x.strip(), license_operator.split(licensestr)) | ||
36 | for pos, element in enumerate(elements): | ||
37 | if license_pattern.match(element): | ||
38 | if pos > 0 and license_pattern.match(elements[pos-1]): | ||
39 | new_elements.append('&') | ||
40 | element = '"' + element + '"' | ||
41 | elif not license_operator.match(element): | ||
42 | raise InvalidLicense(element) | ||
43 | new_elements.append(element) | ||
44 | |||
45 | self.visit(ast.parse(' '.join(new_elements))) | ||
46 | |||
47 | class FlattenVisitor(LicenseVisitor): | ||
48 | """Flatten a license tree (parsed from a string) by selecting one of each | ||
49 | set of OR options, in the way the user specifies""" | ||
50 | def __init__(self, choose_licenses): | ||
51 | self.choose_licenses = choose_licenses | ||
52 | self.licenses = [] | ||
53 | LicenseVisitor.__init__(self) | ||
54 | |||
55 | def visit_Str(self, node): | ||
56 | self.licenses.append(node.s) | ||
57 | |||
58 | def visit_BinOp(self, node): | ||
59 | if isinstance(node.op, ast.BitOr): | ||
60 | left = FlattenVisitor(self.choose_licenses) | ||
61 | left.visit(node.left) | ||
62 | |||
63 | right = FlattenVisitor(self.choose_licenses) | ||
64 | right.visit(node.right) | ||
65 | |||
66 | selected = self.choose_licenses(left.licenses, right.licenses) | ||
67 | self.licenses.extend(selected) | ||
68 | else: | ||
69 | self.generic_visit(node) | ||
70 | |||
71 | def flattened_licenses(licensestr, choose_licenses): | ||
72 | """Given a license string and choose_licenses function, return a flat list of licenses""" | ||
73 | flatten = FlattenVisitor(choose_licenses) | ||
74 | try: | ||
75 | flatten.visit_string(licensestr) | ||
76 | except SyntaxError as exc: | ||
77 | raise LicenseSyntaxError(licensestr, exc) | ||
78 | return flatten.licenses | ||
79 | |||
80 | def is_included(licensestr, whitelist=None, blacklist=None): | ||
81 | """Given a license string and whitelist and blacklist, determine if the | ||
82 | license string matches the whitelist and does not match the blacklist. | ||
83 | |||
84 | Returns a tuple holding the boolean state and a list of the applicable | ||
85 | licenses which were excluded (or None, if the state is True) | ||
86 | """ | ||
87 | |||
88 | def include_license(license): | ||
89 | return any(fnmatch(license, pattern) for pattern in whitelist) | ||
90 | |||
91 | def exclude_license(license): | ||
92 | return any(fnmatch(license, pattern) for pattern in blacklist) | ||
93 | |||
94 | def choose_licenses(alpha, beta): | ||
95 | """Select the option in an OR which is the 'best' (has the most | ||
96 | included licenses).""" | ||
97 | alpha_weight = len(filter(include_license, alpha)) | ||
98 | beta_weight = len(filter(include_license, beta)) | ||
99 | if alpha_weight > beta_weight: | ||
100 | return alpha | ||
101 | else: | ||
102 | return beta | ||
103 | |||
104 | if not whitelist: | ||
105 | whitelist = ['*'] | ||
106 | |||
107 | if not blacklist: | ||
108 | blacklist = [] | ||
109 | |||
110 | licenses = flattened_licenses(licensestr, choose_licenses) | ||
111 | excluded = filter(lambda lic: exclude_license(lic), licenses) | ||
112 | included = filter(lambda lic: include_license(lic), licenses) | ||
113 | if excluded: | ||
114 | return False, excluded | ||
115 | else: | ||
116 | return True, included | ||
diff --git a/meta/lib/oe/lsb.py b/meta/lib/oe/lsb.py new file mode 100644 index 0000000000..b53f361035 --- /dev/null +++ b/meta/lib/oe/lsb.py | |||
@@ -0,0 +1,81 @@ | |||
1 | def release_dict(): | ||
2 | """Return the output of lsb_release -ir as a dictionary""" | ||
3 | from subprocess import PIPE | ||
4 | |||
5 | try: | ||
6 | output, err = bb.process.run(['lsb_release', '-ir'], stderr=PIPE) | ||
7 | except bb.process.CmdError as exc: | ||
8 | return None | ||
9 | |||
10 | data = {} | ||
11 | for line in output.splitlines(): | ||
12 | try: | ||
13 | key, value = line.split(":\t", 1) | ||
14 | except ValueError: | ||
15 | continue | ||
16 | else: | ||
17 | data[key] = value | ||
18 | return data | ||
19 | |||
20 | def release_dict_file(): | ||
21 | """ Try to gather LSB release information manually when lsb_release tool is unavailable """ | ||
22 | data = None | ||
23 | try: | ||
24 | if os.path.exists('/etc/lsb-release'): | ||
25 | data = {} | ||
26 | with open('/etc/lsb-release') as f: | ||
27 | for line in f: | ||
28 | key, value = line.split("=", 1) | ||
29 | data[key] = value.strip() | ||
30 | elif os.path.exists('/etc/redhat-release'): | ||
31 | data = {} | ||
32 | with open('/etc/redhat-release') as f: | ||
33 | distro = f.readline().strip() | ||
34 | import re | ||
35 | match = re.match(r'(.*) release (.*) \((.*)\)', distro) | ||
36 | if match: | ||
37 | data['DISTRIB_ID'] = match.group(1) | ||
38 | data['DISTRIB_RELEASE'] = match.group(2) | ||
39 | elif os.path.exists('/etc/SuSE-release'): | ||
40 | data = {} | ||
41 | data['DISTRIB_ID'] = 'SUSE LINUX' | ||
42 | with open('/etc/SuSE-release') as f: | ||
43 | for line in f: | ||
44 | if line.startswith('VERSION = '): | ||
45 | data['DISTRIB_RELEASE'] = line[10:].rstrip() | ||
46 | break | ||
47 | elif os.path.exists('/etc/os-release'): | ||
48 | data = {} | ||
49 | with open('/etc/os-release') as f: | ||
50 | for line in f: | ||
51 | if line.startswith('NAME='): | ||
52 | data['DISTRIB_ID'] = line[5:].rstrip().strip('"') | ||
53 | if line.startswith('VERSION_ID='): | ||
54 | data['DISTRIB_RELEASE'] = line[11:].rstrip().strip('"') | ||
55 | except IOError: | ||
56 | return None | ||
57 | return data | ||
58 | |||
59 | def distro_identifier(adjust_hook=None): | ||
60 | """Return a distro identifier string based upon lsb_release -ri, | ||
61 | with optional adjustment via a hook""" | ||
62 | |||
63 | lsb_data = release_dict() | ||
64 | if lsb_data: | ||
65 | distro_id, release = lsb_data['Distributor ID'], lsb_data['Release'] | ||
66 | else: | ||
67 | lsb_data_file = release_dict_file() | ||
68 | if lsb_data_file: | ||
69 | distro_id, release = lsb_data_file['DISTRIB_ID'], lsb_data_file.get('DISTRIB_RELEASE', None) | ||
70 | else: | ||
71 | distro_id, release = None, None | ||
72 | |||
73 | if adjust_hook: | ||
74 | distro_id, release = adjust_hook(distro_id, release) | ||
75 | if not distro_id: | ||
76 | return "Unknown" | ||
77 | if release: | ||
78 | id_str = '{0}-{1}'.format(distro_id, release) | ||
79 | else: | ||
80 | id_str = distro_id | ||
81 | return id_str.replace(' ','-').replace('/','-') | ||
diff --git a/meta/lib/oe/maketype.py b/meta/lib/oe/maketype.py new file mode 100644 index 0000000000..139f333691 --- /dev/null +++ b/meta/lib/oe/maketype.py | |||
@@ -0,0 +1,99 @@ | |||
1 | """OpenEmbedded variable typing support | ||
2 | |||
3 | Types are defined in the metadata by name, using the 'type' flag on a | ||
4 | variable. Other flags may be utilized in the construction of the types. See | ||
5 | the arguments of the type's factory for details. | ||
6 | """ | ||
7 | |||
8 | import inspect | ||
9 | import types | ||
10 | |||
11 | available_types = {} | ||
12 | |||
13 | class MissingFlag(TypeError): | ||
14 | """A particular flag is required to construct the type, but has not been | ||
15 | provided.""" | ||
16 | def __init__(self, flag, type): | ||
17 | self.flag = flag | ||
18 | self.type = type | ||
19 | TypeError.__init__(self) | ||
20 | |||
21 | def __str__(self): | ||
22 | return "Type '%s' requires flag '%s'" % (self.type, self.flag) | ||
23 | |||
24 | def factory(var_type): | ||
25 | """Return the factory for a specified type.""" | ||
26 | if var_type is None: | ||
27 | raise TypeError("No type specified. Valid types: %s" % | ||
28 | ', '.join(available_types)) | ||
29 | try: | ||
30 | return available_types[var_type] | ||
31 | except KeyError: | ||
32 | raise TypeError("Invalid type '%s':\n Valid types: %s" % | ||
33 | (var_type, ', '.join(available_types))) | ||
34 | |||
35 | def create(value, var_type, **flags): | ||
36 | """Create an object of the specified type, given the specified flags and | ||
37 | string value.""" | ||
38 | obj = factory(var_type) | ||
39 | objflags = {} | ||
40 | for flag in obj.flags: | ||
41 | if flag not in flags: | ||
42 | if flag not in obj.optflags: | ||
43 | raise MissingFlag(flag, var_type) | ||
44 | else: | ||
45 | objflags[flag] = flags[flag] | ||
46 | |||
47 | return obj(value, **objflags) | ||
48 | |||
49 | def get_callable_args(obj): | ||
50 | """Grab all but the first argument of the specified callable, returning | ||
51 | the list, as well as a list of which of the arguments have default | ||
52 | values.""" | ||
53 | if type(obj) is type: | ||
54 | obj = obj.__init__ | ||
55 | |||
56 | args, varargs, keywords, defaults = inspect.getargspec(obj) | ||
57 | flaglist = [] | ||
58 | if args: | ||
59 | if len(args) > 1 and args[0] == 'self': | ||
60 | args = args[1:] | ||
61 | flaglist.extend(args) | ||
62 | |||
63 | optional = set() | ||
64 | if defaults: | ||
65 | optional |= set(flaglist[-len(defaults):]) | ||
66 | return flaglist, optional | ||
67 | |||
68 | def factory_setup(name, obj): | ||
69 | """Prepare a factory for use.""" | ||
70 | args, optional = get_callable_args(obj) | ||
71 | extra_args = args[1:] | ||
72 | if extra_args: | ||
73 | obj.flags, optional = extra_args, optional | ||
74 | obj.optflags = set(optional) | ||
75 | else: | ||
76 | obj.flags = obj.optflags = () | ||
77 | |||
78 | if not hasattr(obj, 'name'): | ||
79 | obj.name = name | ||
80 | |||
81 | def register(name, factory): | ||
82 | """Register a type, given its name and a factory callable. | ||
83 | |||
84 | Determines the required and optional flags from the factory's | ||
85 | arguments.""" | ||
86 | factory_setup(name, factory) | ||
87 | available_types[factory.name] = factory | ||
88 | |||
89 | |||
90 | # Register all our included types | ||
91 | for name in dir(types): | ||
92 | if name.startswith('_'): | ||
93 | continue | ||
94 | |||
95 | obj = getattr(types, name) | ||
96 | if not callable(obj): | ||
97 | continue | ||
98 | |||
99 | register(name, obj) | ||
diff --git a/meta/lib/oe/manifest.py b/meta/lib/oe/manifest.py new file mode 100644 index 0000000000..42832f15d2 --- /dev/null +++ b/meta/lib/oe/manifest.py | |||
@@ -0,0 +1,345 @@ | |||
1 | from abc import ABCMeta, abstractmethod | ||
2 | import os | ||
3 | import re | ||
4 | import bb | ||
5 | |||
6 | |||
7 | class Manifest(object): | ||
8 | """ | ||
9 | This is an abstract class. Do not instantiate this directly. | ||
10 | """ | ||
11 | __metaclass__ = ABCMeta | ||
12 | |||
13 | PKG_TYPE_MUST_INSTALL = "mip" | ||
14 | PKG_TYPE_MULTILIB = "mlp" | ||
15 | PKG_TYPE_LANGUAGE = "lgp" | ||
16 | PKG_TYPE_ATTEMPT_ONLY = "aop" | ||
17 | |||
18 | MANIFEST_TYPE_IMAGE = "image" | ||
19 | MANIFEST_TYPE_SDK_HOST = "sdk_host" | ||
20 | MANIFEST_TYPE_SDK_TARGET = "sdk_target" | ||
21 | |||
22 | var_maps = { | ||
23 | MANIFEST_TYPE_IMAGE: { | ||
24 | "PACKAGE_INSTALL": PKG_TYPE_MUST_INSTALL, | ||
25 | "PACKAGE_INSTALL_ATTEMPTONLY": PKG_TYPE_ATTEMPT_ONLY, | ||
26 | "LINGUAS_INSTALL": PKG_TYPE_LANGUAGE | ||
27 | }, | ||
28 | MANIFEST_TYPE_SDK_HOST: { | ||
29 | "TOOLCHAIN_HOST_TASK": PKG_TYPE_MUST_INSTALL, | ||
30 | "TOOLCHAIN_HOST_TASK_ATTEMPTONLY": PKG_TYPE_ATTEMPT_ONLY | ||
31 | }, | ||
32 | MANIFEST_TYPE_SDK_TARGET: { | ||
33 | "TOOLCHAIN_TARGET_TASK": PKG_TYPE_MUST_INSTALL, | ||
34 | "TOOLCHAIN_TARGET_TASK_ATTEMPTONLY": PKG_TYPE_ATTEMPT_ONLY | ||
35 | } | ||
36 | } | ||
37 | |||
38 | INSTALL_ORDER = [ | ||
39 | PKG_TYPE_LANGUAGE, | ||
40 | PKG_TYPE_MUST_INSTALL, | ||
41 | PKG_TYPE_ATTEMPT_ONLY, | ||
42 | PKG_TYPE_MULTILIB | ||
43 | ] | ||
44 | |||
45 | initial_manifest_file_header = \ | ||
46 | "# This file was generated automatically and contains the packages\n" \ | ||
47 | "# passed on to the package manager in order to create the rootfs.\n\n" \ | ||
48 | "# Format:\n" \ | ||
49 | "# <package_type>,<package_name>\n" \ | ||
50 | "# where:\n" \ | ||
51 | "# <package_type> can be:\n" \ | ||
52 | "# 'mip' = must install package\n" \ | ||
53 | "# 'aop' = attempt only package\n" \ | ||
54 | "# 'mlp' = multilib package\n" \ | ||
55 | "# 'lgp' = language package\n\n" | ||
56 | |||
57 | def __init__(self, d, manifest_dir=None, manifest_type=MANIFEST_TYPE_IMAGE): | ||
58 | self.d = d | ||
59 | self.manifest_type = manifest_type | ||
60 | |||
61 | if manifest_dir is None: | ||
62 | if manifest_type != self.MANIFEST_TYPE_IMAGE: | ||
63 | self.manifest_dir = self.d.getVar('SDK_DIR', True) | ||
64 | else: | ||
65 | self.manifest_dir = self.d.getVar('WORKDIR', True) | ||
66 | else: | ||
67 | self.manifest_dir = manifest_dir | ||
68 | |||
69 | bb.utils.mkdirhier(self.manifest_dir) | ||
70 | |||
71 | self.initial_manifest = os.path.join(self.manifest_dir, "%s_initial_manifest" % manifest_type) | ||
72 | self.final_manifest = os.path.join(self.manifest_dir, "%s_final_manifest" % manifest_type) | ||
73 | self.full_manifest = os.path.join(self.manifest_dir, "%s_full_manifest" % manifest_type) | ||
74 | |||
75 | # packages in the following vars will be split in 'must install' and | ||
76 | # 'multilib' | ||
77 | self.vars_to_split = ["PACKAGE_INSTALL", | ||
78 | "TOOLCHAIN_HOST_TASK", | ||
79 | "TOOLCHAIN_TARGET_TASK"] | ||
80 | |||
81 | """ | ||
82 | This creates a standard initial manifest for core-image-(minimal|sato|sato-sdk). | ||
83 | This will be used for testing until the class is implemented properly! | ||
84 | """ | ||
85 | def _create_dummy_initial(self): | ||
86 | image_rootfs = self.d.getVar('IMAGE_ROOTFS', True) | ||
87 | pkg_list = dict() | ||
88 | if image_rootfs.find("core-image-sato-sdk") > 0: | ||
89 | pkg_list[self.PKG_TYPE_MUST_INSTALL] = \ | ||
90 | "packagegroup-core-x11-sato-games packagegroup-base-extended " \ | ||
91 | "packagegroup-core-x11-sato packagegroup-core-x11-base " \ | ||
92 | "packagegroup-core-sdk packagegroup-core-tools-debug " \ | ||
93 | "packagegroup-core-boot packagegroup-core-tools-testapps " \ | ||
94 | "packagegroup-core-eclipse-debug packagegroup-core-qt-demoapps " \ | ||
95 | "apt packagegroup-core-tools-profile psplash " \ | ||
96 | "packagegroup-core-standalone-sdk-target " \ | ||
97 | "packagegroup-core-ssh-openssh dpkg kernel-dev" | ||
98 | pkg_list[self.PKG_TYPE_LANGUAGE] = \ | ||
99 | "locale-base-en-us locale-base-en-gb" | ||
100 | elif image_rootfs.find("core-image-sato") > 0: | ||
101 | pkg_list[self.PKG_TYPE_MUST_INSTALL] = \ | ||
102 | "packagegroup-core-ssh-dropbear packagegroup-core-x11-sato-games " \ | ||
103 | "packagegroup-core-x11-base psplash apt dpkg packagegroup-base-extended " \ | ||
104 | "packagegroup-core-x11-sato packagegroup-core-boot" | ||
105 | pkg_list['lgp'] = \ | ||
106 | "locale-base-en-us locale-base-en-gb" | ||
107 | elif image_rootfs.find("core-image-minimal") > 0: | ||
108 | pkg_list[self.PKG_TYPE_MUST_INSTALL] = "run-postinsts packagegroup-core-boot" | ||
109 | |||
110 | with open(self.initial_manifest, "w+") as manifest: | ||
111 | manifest.write(self.initial_manifest_file_header) | ||
112 | |||
113 | for pkg_type in pkg_list: | ||
114 | for pkg in pkg_list[pkg_type].split(): | ||
115 | manifest.write("%s,%s\n" % (pkg_type, pkg)) | ||
116 | |||
117 | """ | ||
118 | This will create the initial manifest which will be used by Rootfs class to | ||
119 | generate the rootfs | ||
120 | """ | ||
121 | @abstractmethod | ||
122 | def create_initial(self): | ||
123 | pass | ||
124 | |||
125 | """ | ||
126 | This creates the manifest after everything has been installed. | ||
127 | """ | ||
128 | @abstractmethod | ||
129 | def create_final(self): | ||
130 | pass | ||
131 | |||
132 | """ | ||
133 | This creates the manifest after the package in initial manifest has been | ||
134 | dummy installed. It lists all *to be installed* packages. There is no real | ||
135 | installation, just a test. | ||
136 | """ | ||
137 | @abstractmethod | ||
138 | def create_full(self, pm): | ||
139 | pass | ||
140 | |||
141 | """ | ||
142 | The following function parses an initial manifest and returns a dictionary | ||
143 | object with the must install, attempt only, multilib and language packages. | ||
144 | """ | ||
145 | def parse_initial_manifest(self): | ||
146 | pkgs = dict() | ||
147 | |||
148 | with open(self.initial_manifest) as manifest: | ||
149 | for line in manifest.read().split('\n'): | ||
150 | comment = re.match("^#.*", line) | ||
151 | pattern = "^(%s|%s|%s|%s),(.*)$" % \ | ||
152 | (self.PKG_TYPE_MUST_INSTALL, | ||
153 | self.PKG_TYPE_ATTEMPT_ONLY, | ||
154 | self.PKG_TYPE_MULTILIB, | ||
155 | self.PKG_TYPE_LANGUAGE) | ||
156 | pkg = re.match(pattern, line) | ||
157 | |||
158 | if comment is not None: | ||
159 | continue | ||
160 | |||
161 | if pkg is not None: | ||
162 | pkg_type = pkg.group(1) | ||
163 | pkg_name = pkg.group(2) | ||
164 | |||
165 | if not pkg_type in pkgs: | ||
166 | pkgs[pkg_type] = [pkg_name] | ||
167 | else: | ||
168 | pkgs[pkg_type].append(pkg_name) | ||
169 | |||
170 | return pkgs | ||
171 | |||
172 | ''' | ||
173 | This following function parses a full manifest and return a list | ||
174 | object with packages. | ||
175 | ''' | ||
176 | def parse_full_manifest(self): | ||
177 | installed_pkgs = list() | ||
178 | if not os.path.exists(self.full_manifest): | ||
179 | bb.note('full manifest not exist') | ||
180 | return installed_pkgs | ||
181 | |||
182 | with open(self.full_manifest, 'r') as manifest: | ||
183 | for pkg in manifest.read().split('\n'): | ||
184 | installed_pkgs.append(pkg.strip()) | ||
185 | |||
186 | return installed_pkgs | ||
187 | |||
188 | |||
189 | class RpmManifest(Manifest): | ||
190 | """ | ||
191 | Returns a dictionary object with mip and mlp packages. | ||
192 | """ | ||
193 | def _split_multilib(self, pkg_list): | ||
194 | pkgs = dict() | ||
195 | |||
196 | for pkg in pkg_list.split(): | ||
197 | pkg_type = self.PKG_TYPE_MUST_INSTALL | ||
198 | |||
199 | ml_variants = self.d.getVar('MULTILIB_VARIANTS', True).split() | ||
200 | |||
201 | for ml_variant in ml_variants: | ||
202 | if pkg.startswith(ml_variant + '-'): | ||
203 | pkg_type = self.PKG_TYPE_MULTILIB | ||
204 | |||
205 | if not pkg_type in pkgs: | ||
206 | pkgs[pkg_type] = pkg | ||
207 | else: | ||
208 | pkgs[pkg_type] += " " + pkg | ||
209 | |||
210 | return pkgs | ||
211 | |||
212 | def create_initial(self): | ||
213 | pkgs = dict() | ||
214 | |||
215 | with open(self.initial_manifest, "w+") as manifest: | ||
216 | manifest.write(self.initial_manifest_file_header) | ||
217 | |||
218 | for var in self.var_maps[self.manifest_type]: | ||
219 | if var in self.vars_to_split: | ||
220 | split_pkgs = self._split_multilib(self.d.getVar(var, True)) | ||
221 | if split_pkgs is not None: | ||
222 | pkgs = dict(pkgs.items() + split_pkgs.items()) | ||
223 | else: | ||
224 | pkg_list = self.d.getVar(var, True) | ||
225 | if pkg_list is not None: | ||
226 | pkgs[self.var_maps[self.manifest_type][var]] = self.d.getVar(var, True) | ||
227 | |||
228 | for pkg_type in pkgs: | ||
229 | for pkg in pkgs[pkg_type].split(): | ||
230 | manifest.write("%s,%s\n" % (pkg_type, pkg)) | ||
231 | |||
232 | def create_final(self): | ||
233 | pass | ||
234 | |||
235 | def create_full(self, pm): | ||
236 | pass | ||
237 | |||
238 | |||
239 | class OpkgManifest(Manifest): | ||
240 | """ | ||
241 | Returns a dictionary object with mip and mlp packages. | ||
242 | """ | ||
243 | def _split_multilib(self, pkg_list): | ||
244 | pkgs = dict() | ||
245 | |||
246 | for pkg in pkg_list.split(): | ||
247 | pkg_type = self.PKG_TYPE_MUST_INSTALL | ||
248 | |||
249 | ml_variants = self.d.getVar('MULTILIB_VARIANTS', True).split() | ||
250 | |||
251 | for ml_variant in ml_variants: | ||
252 | if pkg.startswith(ml_variant + '-'): | ||
253 | pkg_type = self.PKG_TYPE_MULTILIB | ||
254 | |||
255 | if not pkg_type in pkgs: | ||
256 | pkgs[pkg_type] = pkg | ||
257 | else: | ||
258 | pkgs[pkg_type] += " " + pkg | ||
259 | |||
260 | return pkgs | ||
261 | |||
262 | def create_initial(self): | ||
263 | pkgs = dict() | ||
264 | |||
265 | with open(self.initial_manifest, "w+") as manifest: | ||
266 | manifest.write(self.initial_manifest_file_header) | ||
267 | |||
268 | for var in self.var_maps[self.manifest_type]: | ||
269 | if var in self.vars_to_split: | ||
270 | split_pkgs = self._split_multilib(self.d.getVar(var, True)) | ||
271 | if split_pkgs is not None: | ||
272 | pkgs = dict(pkgs.items() + split_pkgs.items()) | ||
273 | else: | ||
274 | pkg_list = self.d.getVar(var, True) | ||
275 | if pkg_list is not None: | ||
276 | pkgs[self.var_maps[self.manifest_type][var]] = self.d.getVar(var, True) | ||
277 | |||
278 | for pkg_type in pkgs: | ||
279 | for pkg in pkgs[pkg_type].split(): | ||
280 | manifest.write("%s,%s\n" % (pkg_type, pkg)) | ||
281 | |||
282 | def create_final(self): | ||
283 | pass | ||
284 | |||
285 | def create_full(self, pm): | ||
286 | if not os.path.exists(self.initial_manifest): | ||
287 | self.create_initial() | ||
288 | |||
289 | initial_manifest = self.parse_initial_manifest() | ||
290 | pkgs_to_install = list() | ||
291 | for pkg_type in initial_manifest: | ||
292 | pkgs_to_install += initial_manifest[pkg_type] | ||
293 | if len(pkgs_to_install) == 0: | ||
294 | return | ||
295 | |||
296 | output = pm.dummy_install(pkgs_to_install) | ||
297 | |||
298 | with open(self.full_manifest, 'w+') as manifest: | ||
299 | pkg_re = re.compile('^Installing ([^ ]+) [^ ].*') | ||
300 | for line in set(output.split('\n')): | ||
301 | m = pkg_re.match(line) | ||
302 | if m: | ||
303 | manifest.write(m.group(1) + '\n') | ||
304 | |||
305 | return | ||
306 | |||
307 | |||
308 | class DpkgManifest(Manifest): | ||
309 | def create_initial(self): | ||
310 | with open(self.initial_manifest, "w+") as manifest: | ||
311 | manifest.write(self.initial_manifest_file_header) | ||
312 | |||
313 | for var in self.var_maps[self.manifest_type]: | ||
314 | pkg_list = self.d.getVar(var, True) | ||
315 | |||
316 | if pkg_list is None: | ||
317 | continue | ||
318 | |||
319 | for pkg in pkg_list.split(): | ||
320 | manifest.write("%s,%s\n" % | ||
321 | (self.var_maps[self.manifest_type][var], pkg)) | ||
322 | |||
323 | def create_final(self): | ||
324 | pass | ||
325 | |||
326 | def create_full(self, pm): | ||
327 | pass | ||
328 | |||
329 | |||
330 | def create_manifest(d, final_manifest=False, manifest_dir=None, | ||
331 | manifest_type=Manifest.MANIFEST_TYPE_IMAGE): | ||
332 | manifest_map = {'rpm': RpmManifest, | ||
333 | 'ipk': OpkgManifest, | ||
334 | 'deb': DpkgManifest} | ||
335 | |||
336 | manifest = manifest_map[d.getVar('IMAGE_PKGTYPE', True)](d, manifest_dir, manifest_type) | ||
337 | |||
338 | if final_manifest: | ||
339 | manifest.create_final() | ||
340 | else: | ||
341 | manifest.create_initial() | ||
342 | |||
343 | |||
344 | if __name__ == "__main__": | ||
345 | pass | ||
diff --git a/meta/lib/oe/package.py b/meta/lib/oe/package.py new file mode 100644 index 0000000000..f8b532220a --- /dev/null +++ b/meta/lib/oe/package.py | |||
@@ -0,0 +1,99 @@ | |||
1 | def runstrip(arg): | ||
2 | # Function to strip a single file, called from split_and_strip_files below | ||
3 | # A working 'file' (one which works on the target architecture) | ||
4 | # | ||
5 | # The elftype is a bit pattern (explained in split_and_strip_files) to tell | ||
6 | # us what type of file we're processing... | ||
7 | # 4 - executable | ||
8 | # 8 - shared library | ||
9 | # 16 - kernel module | ||
10 | |||
11 | import commands, stat, subprocess | ||
12 | |||
13 | (file, elftype, strip) = arg | ||
14 | |||
15 | newmode = None | ||
16 | if not os.access(file, os.W_OK) or os.access(file, os.R_OK): | ||
17 | origmode = os.stat(file)[stat.ST_MODE] | ||
18 | newmode = origmode | stat.S_IWRITE | stat.S_IREAD | ||
19 | os.chmod(file, newmode) | ||
20 | |||
21 | extraflags = "" | ||
22 | |||
23 | # kernel module | ||
24 | if elftype & 16: | ||
25 | extraflags = "--strip-debug --remove-section=.comment --remove-section=.note --preserve-dates" | ||
26 | # .so and shared library | ||
27 | elif ".so" in file and elftype & 8: | ||
28 | extraflags = "--remove-section=.comment --remove-section=.note --strip-unneeded" | ||
29 | # shared or executable: | ||
30 | elif elftype & 8 or elftype & 4: | ||
31 | extraflags = "--remove-section=.comment --remove-section=.note" | ||
32 | |||
33 | stripcmd = "'%s' %s '%s'" % (strip, extraflags, file) | ||
34 | bb.debug(1, "runstrip: %s" % stripcmd) | ||
35 | |||
36 | ret = subprocess.call(stripcmd, shell=True) | ||
37 | |||
38 | if newmode: | ||
39 | os.chmod(file, origmode) | ||
40 | |||
41 | if ret: | ||
42 | bb.error("runstrip: '%s' strip command failed" % stripcmd) | ||
43 | |||
44 | return | ||
45 | |||
46 | |||
47 | def file_translate(file): | ||
48 | ft = file.replace("@", "@at@") | ||
49 | ft = ft.replace(" ", "@space@") | ||
50 | ft = ft.replace("\t", "@tab@") | ||
51 | ft = ft.replace("[", "@openbrace@") | ||
52 | ft = ft.replace("]", "@closebrace@") | ||
53 | ft = ft.replace("_", "@underscore@") | ||
54 | return ft | ||
55 | |||
56 | def filedeprunner(arg): | ||
57 | import re, subprocess, shlex | ||
58 | |||
59 | (pkg, pkgfiles, rpmdeps, pkgdest) = arg | ||
60 | provides = {} | ||
61 | requires = {} | ||
62 | |||
63 | r = re.compile(r'[<>=]+ +[^ ]*') | ||
64 | |||
65 | def process_deps(pipe, pkg, pkgdest, provides, requires): | ||
66 | for line in pipe: | ||
67 | f = line.split(" ", 1)[0].strip() | ||
68 | line = line.split(" ", 1)[1].strip() | ||
69 | |||
70 | if line.startswith("Requires:"): | ||
71 | i = requires | ||
72 | elif line.startswith("Provides:"): | ||
73 | i = provides | ||
74 | else: | ||
75 | continue | ||
76 | |||
77 | file = f.replace(pkgdest + "/" + pkg, "") | ||
78 | file = file_translate(file) | ||
79 | value = line.split(":", 1)[1].strip() | ||
80 | value = r.sub(r'(\g<0>)', value) | ||
81 | |||
82 | if value.startswith("rpmlib("): | ||
83 | continue | ||
84 | if value == "python": | ||
85 | continue | ||
86 | if file not in i: | ||
87 | i[file] = [] | ||
88 | i[file].append(value) | ||
89 | |||
90 | return provides, requires | ||
91 | |||
92 | try: | ||
93 | dep_popen = subprocess.Popen(shlex.split(rpmdeps) + pkgfiles, stdout=subprocess.PIPE) | ||
94 | provides, requires = process_deps(dep_popen.stdout, pkg, pkgdest, provides, requires) | ||
95 | except OSError as e: | ||
96 | bb.error("rpmdeps: '%s' command failed, '%s'" % (shlex.split(rpmdeps) + pkgfiles, e)) | ||
97 | raise e | ||
98 | |||
99 | return (pkg, provides, requires) | ||
diff --git a/meta/lib/oe/package_manager.py b/meta/lib/oe/package_manager.py new file mode 100644 index 0000000000..505509543d --- /dev/null +++ b/meta/lib/oe/package_manager.py | |||
@@ -0,0 +1,1797 @@ | |||
1 | from abc import ABCMeta, abstractmethod | ||
2 | import os | ||
3 | import glob | ||
4 | import subprocess | ||
5 | import shutil | ||
6 | import multiprocessing | ||
7 | import re | ||
8 | import bb | ||
9 | import tempfile | ||
10 | import oe.utils | ||
11 | |||
12 | |||
13 | # this can be used by all PM backends to create the index files in parallel | ||
14 | def create_index(arg): | ||
15 | index_cmd = arg | ||
16 | |||
17 | try: | ||
18 | bb.note("Executing '%s' ..." % index_cmd) | ||
19 | result = subprocess.check_output(index_cmd, stderr=subprocess.STDOUT, shell=True) | ||
20 | except subprocess.CalledProcessError as e: | ||
21 | return("Index creation command '%s' failed with return code %d:\n%s" % | ||
22 | (e.cmd, e.returncode, e.output)) | ||
23 | |||
24 | if result: | ||
25 | bb.note(result) | ||
26 | |||
27 | return None | ||
28 | |||
29 | |||
30 | class Indexer(object): | ||
31 | __metaclass__ = ABCMeta | ||
32 | |||
33 | def __init__(self, d, deploy_dir): | ||
34 | self.d = d | ||
35 | self.deploy_dir = deploy_dir | ||
36 | |||
37 | @abstractmethod | ||
38 | def write_index(self): | ||
39 | pass | ||
40 | |||
41 | |||
42 | class RpmIndexer(Indexer): | ||
43 | def get_ml_prefix_and_os_list(self, arch_var=None, os_var=None): | ||
44 | package_archs = { | ||
45 | 'default': [], | ||
46 | } | ||
47 | |||
48 | target_os = { | ||
49 | 'default': "", | ||
50 | } | ||
51 | |||
52 | if arch_var is not None and os_var is not None: | ||
53 | package_archs['default'] = self.d.getVar(arch_var, True).split() | ||
54 | package_archs['default'].reverse() | ||
55 | target_os['default'] = self.d.getVar(os_var, True).strip() | ||
56 | else: | ||
57 | package_archs['default'] = self.d.getVar("PACKAGE_ARCHS", True).split() | ||
58 | # arch order is reversed. This ensures the -best- match is | ||
59 | # listed first! | ||
60 | package_archs['default'].reverse() | ||
61 | target_os['default'] = self.d.getVar("TARGET_OS", True).strip() | ||
62 | multilibs = self.d.getVar('MULTILIBS', True) or "" | ||
63 | for ext in multilibs.split(): | ||
64 | eext = ext.split(':') | ||
65 | if len(eext) > 1 and eext[0] == 'multilib': | ||
66 | localdata = bb.data.createCopy(self.d) | ||
67 | default_tune_key = "DEFAULTTUNE_virtclass-multilib-" + eext[1] | ||
68 | default_tune = localdata.getVar(default_tune_key, False) | ||
69 | if default_tune is None: | ||
70 | default_tune_key = "DEFAULTTUNE_ML_" + eext[1] | ||
71 | default_tune = localdata.getVar(default_tune_key, False) | ||
72 | if default_tune: | ||
73 | localdata.setVar("DEFAULTTUNE", default_tune) | ||
74 | bb.data.update_data(localdata) | ||
75 | package_archs[eext[1]] = localdata.getVar('PACKAGE_ARCHS', | ||
76 | True).split() | ||
77 | package_archs[eext[1]].reverse() | ||
78 | target_os[eext[1]] = localdata.getVar("TARGET_OS", | ||
79 | True).strip() | ||
80 | |||
81 | ml_prefix_list = dict() | ||
82 | for mlib in package_archs: | ||
83 | if mlib == 'default': | ||
84 | ml_prefix_list[mlib] = package_archs[mlib] | ||
85 | else: | ||
86 | ml_prefix_list[mlib] = list() | ||
87 | for arch in package_archs[mlib]: | ||
88 | if arch in ['all', 'noarch', 'any']: | ||
89 | ml_prefix_list[mlib].append(arch) | ||
90 | else: | ||
91 | ml_prefix_list[mlib].append(mlib + "_" + arch) | ||
92 | |||
93 | return (ml_prefix_list, target_os) | ||
94 | |||
95 | def write_index(self): | ||
96 | sdk_pkg_archs = (self.d.getVar('SDK_PACKAGE_ARCHS', True) or "").replace('-', '_').split() | ||
97 | all_mlb_pkg_archs = (self.d.getVar('ALL_MULTILIB_PACKAGE_ARCHS', True) or "").replace('-', '_').split() | ||
98 | |||
99 | mlb_prefix_list = self.get_ml_prefix_and_os_list()[0] | ||
100 | |||
101 | archs = set() | ||
102 | for item in mlb_prefix_list: | ||
103 | archs = archs.union(set(i.replace('-', '_') for i in mlb_prefix_list[item])) | ||
104 | |||
105 | if len(archs) == 0: | ||
106 | archs = archs.union(set(all_mlb_pkg_archs)) | ||
107 | |||
108 | archs = archs.union(set(sdk_pkg_archs)) | ||
109 | |||
110 | rpm_createrepo = bb.utils.which(os.getenv('PATH'), "createrepo") | ||
111 | index_cmds = [] | ||
112 | rpm_dirs_found = False | ||
113 | for arch in archs: | ||
114 | arch_dir = os.path.join(self.deploy_dir, arch) | ||
115 | if not os.path.isdir(arch_dir): | ||
116 | continue | ||
117 | |||
118 | index_cmds.append("%s --update -q %s" % (rpm_createrepo, arch_dir)) | ||
119 | |||
120 | rpm_dirs_found = True | ||
121 | |||
122 | if not rpm_dirs_found: | ||
123 | bb.note("There are no packages in %s" % self.deploy_dir) | ||
124 | return | ||
125 | |||
126 | result = oe.utils.multiprocess_exec(index_cmds, create_index) | ||
127 | if result: | ||
128 | bb.fatal('%s' % ('\n'.join(result))) | ||
129 | |||
130 | |||
131 | class OpkgIndexer(Indexer): | ||
132 | def write_index(self): | ||
133 | arch_vars = ["ALL_MULTILIB_PACKAGE_ARCHS", | ||
134 | "SDK_PACKAGE_ARCHS", | ||
135 | "MULTILIB_ARCHS"] | ||
136 | |||
137 | opkg_index_cmd = bb.utils.which(os.getenv('PATH'), "opkg-make-index") | ||
138 | |||
139 | if not os.path.exists(os.path.join(self.deploy_dir, "Packages")): | ||
140 | open(os.path.join(self.deploy_dir, "Packages"), "w").close() | ||
141 | |||
142 | index_cmds = [] | ||
143 | for arch_var in arch_vars: | ||
144 | archs = self.d.getVar(arch_var, True) | ||
145 | if archs is None: | ||
146 | continue | ||
147 | |||
148 | for arch in archs.split(): | ||
149 | pkgs_dir = os.path.join(self.deploy_dir, arch) | ||
150 | pkgs_file = os.path.join(pkgs_dir, "Packages") | ||
151 | |||
152 | if not os.path.isdir(pkgs_dir): | ||
153 | continue | ||
154 | |||
155 | if not os.path.exists(pkgs_file): | ||
156 | open(pkgs_file, "w").close() | ||
157 | |||
158 | index_cmds.append('%s -r %s -p %s -m %s' % | ||
159 | (opkg_index_cmd, pkgs_file, pkgs_file, pkgs_dir)) | ||
160 | |||
161 | if len(index_cmds) == 0: | ||
162 | bb.note("There are no packages in %s!" % self.deploy_dir) | ||
163 | return | ||
164 | |||
165 | result = oe.utils.multiprocess_exec(index_cmds, create_index) | ||
166 | if result: | ||
167 | bb.fatal('%s' % ('\n'.join(result))) | ||
168 | |||
169 | |||
170 | |||
171 | class DpkgIndexer(Indexer): | ||
172 | def write_index(self): | ||
173 | pkg_archs = self.d.getVar('PACKAGE_ARCHS', True) | ||
174 | if pkg_archs is not None: | ||
175 | arch_list = pkg_archs.split() | ||
176 | sdk_pkg_archs = self.d.getVar('SDK_PACKAGE_ARCHS', True) | ||
177 | if sdk_pkg_archs is not None: | ||
178 | for a in sdk_pkg_archs.split(): | ||
179 | if a not in pkg_archs: | ||
180 | arch_list.append(a) | ||
181 | |||
182 | all_mlb_pkg_arch_list = (self.d.getVar('ALL_MULTILIB_PACKAGE_ARCHS', True) or "").replace('-', '_').split() | ||
183 | arch_list.extend(arch for arch in all_mlb_pkg_arch_list if arch not in arch_list) | ||
184 | |||
185 | apt_ftparchive = bb.utils.which(os.getenv('PATH'), "apt-ftparchive") | ||
186 | gzip = bb.utils.which(os.getenv('PATH'), "gzip") | ||
187 | |||
188 | index_cmds = [] | ||
189 | deb_dirs_found = False | ||
190 | for arch in arch_list: | ||
191 | arch_dir = os.path.join(self.deploy_dir, arch) | ||
192 | if not os.path.isdir(arch_dir): | ||
193 | continue | ||
194 | |||
195 | cmd = "cd %s; PSEUDO_UNLOAD=1 %s packages . > Packages;" % (arch_dir, apt_ftparchive) | ||
196 | |||
197 | cmd += "%s -fc Packages > Packages.gz;" % gzip | ||
198 | |||
199 | with open(os.path.join(arch_dir, "Release"), "w+") as release: | ||
200 | release.write("Label: %s\n" % arch) | ||
201 | |||
202 | cmd += "PSEUDO_UNLOAD=1 %s release . >> Release" % apt_ftparchive | ||
203 | |||
204 | index_cmds.append(cmd) | ||
205 | |||
206 | deb_dirs_found = True | ||
207 | |||
208 | if not deb_dirs_found: | ||
209 | bb.note("There are no packages in %s" % self.deploy_dir) | ||
210 | return | ||
211 | |||
212 | result = oe.utils.multiprocess_exec(index_cmds, create_index) | ||
213 | if result: | ||
214 | bb.fatal('%s' % ('\n'.join(result))) | ||
215 | |||
216 | |||
217 | |||
218 | class PkgsList(object): | ||
219 | __metaclass__ = ABCMeta | ||
220 | |||
221 | def __init__(self, d, rootfs_dir): | ||
222 | self.d = d | ||
223 | self.rootfs_dir = rootfs_dir | ||
224 | |||
225 | @abstractmethod | ||
226 | def list(self, format=None): | ||
227 | pass | ||
228 | |||
229 | |||
230 | class RpmPkgsList(PkgsList): | ||
231 | def __init__(self, d, rootfs_dir, arch_var=None, os_var=None): | ||
232 | super(RpmPkgsList, self).__init__(d, rootfs_dir) | ||
233 | |||
234 | self.rpm_cmd = bb.utils.which(os.getenv('PATH'), "rpm") | ||
235 | self.image_rpmlib = os.path.join(self.rootfs_dir, 'var/lib/rpm') | ||
236 | |||
237 | self.ml_prefix_list, self.ml_os_list = \ | ||
238 | RpmIndexer(d, rootfs_dir).get_ml_prefix_and_os_list(arch_var, os_var) | ||
239 | |||
240 | # Determine rpm version | ||
241 | cmd = "%s --version" % self.rpm_cmd | ||
242 | try: | ||
243 | output = subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True) | ||
244 | except subprocess.CalledProcessError as e: | ||
245 | bb.fatal("Getting rpm version failed. Command '%s' " | ||
246 | "returned %d:\n%s" % (cmd, e.returncode, e.output)) | ||
247 | self.rpm_version = int(output.split()[-1].split('.')[0]) | ||
248 | |||
249 | ''' | ||
250 | Translate the RPM/Smart format names to the OE multilib format names | ||
251 | ''' | ||
252 | def _pkg_translate_smart_to_oe(self, pkg, arch): | ||
253 | new_pkg = pkg | ||
254 | new_arch = arch | ||
255 | fixed_arch = arch.replace('_', '-') | ||
256 | found = 0 | ||
257 | for mlib in self.ml_prefix_list: | ||
258 | for cmp_arch in self.ml_prefix_list[mlib]: | ||
259 | fixed_cmp_arch = cmp_arch.replace('_', '-') | ||
260 | if fixed_arch == fixed_cmp_arch: | ||
261 | if mlib == 'default': | ||
262 | new_pkg = pkg | ||
263 | new_arch = cmp_arch | ||
264 | else: | ||
265 | new_pkg = mlib + '-' + pkg | ||
266 | # We need to strip off the ${mlib}_ prefix on the arch | ||
267 | new_arch = cmp_arch.replace(mlib + '_', '') | ||
268 | |||
269 | # Workaround for bug 3565. Simply look to see if we | ||
270 | # know of a package with that name, if not try again! | ||
271 | filename = os.path.join(self.d.getVar('PKGDATA_DIR', True), | ||
272 | 'runtime-reverse', | ||
273 | new_pkg) | ||
274 | if os.path.exists(filename): | ||
275 | found = 1 | ||
276 | break | ||
277 | |||
278 | if found == 1 and fixed_arch == fixed_cmp_arch: | ||
279 | break | ||
280 | #bb.note('%s, %s -> %s, %s' % (pkg, arch, new_pkg, new_arch)) | ||
281 | return new_pkg, new_arch | ||
282 | |||
283 | def _list_pkg_deps(self): | ||
284 | cmd = [bb.utils.which(os.getenv('PATH'), "rpmresolve"), | ||
285 | "-t", self.image_rpmlib] | ||
286 | |||
287 | try: | ||
288 | output = subprocess.check_output(cmd, stderr=subprocess.STDOUT).strip() | ||
289 | except subprocess.CalledProcessError as e: | ||
290 | bb.fatal("Cannot get the package dependencies. Command '%s' " | ||
291 | "returned %d:\n%s" % (' '.join(cmd), e.returncode, e.output)) | ||
292 | |||
293 | return output | ||
294 | |||
295 | def list(self, format=None): | ||
296 | if format == "deps": | ||
297 | if self.rpm_version == 4: | ||
298 | bb.fatal("'deps' format dependency listings are not supported with rpm 4 since rpmresolve does not work") | ||
299 | return self._list_pkg_deps() | ||
300 | |||
301 | cmd = self.rpm_cmd + ' --root ' + self.rootfs_dir | ||
302 | cmd += ' -D "_dbpath /var/lib/rpm" -qa' | ||
303 | if self.rpm_version == 4: | ||
304 | cmd += " --qf '[%{NAME} %{ARCH} %{VERSION}\n]'" | ||
305 | else: | ||
306 | cmd += " --qf '[%{NAME} %{ARCH} %{VERSION} %{PACKAGEORIGIN}\n]'" | ||
307 | |||
308 | try: | ||
309 | # bb.note(cmd) | ||
310 | tmp_output = subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True).strip() | ||
311 | |||
312 | except subprocess.CalledProcessError as e: | ||
313 | bb.fatal("Cannot get the installed packages list. Command '%s' " | ||
314 | "returned %d:\n%s" % (cmd, e.returncode, e.output)) | ||
315 | |||
316 | output = list() | ||
317 | for line in tmp_output.split('\n'): | ||
318 | if len(line.strip()) == 0: | ||
319 | continue | ||
320 | pkg = line.split()[0] | ||
321 | arch = line.split()[1] | ||
322 | ver = line.split()[2] | ||
323 | if self.rpm_version == 4: | ||
324 | pkgorigin = "unknown" | ||
325 | else: | ||
326 | pkgorigin = line.split()[3] | ||
327 | new_pkg, new_arch = self._pkg_translate_smart_to_oe(pkg, arch) | ||
328 | |||
329 | if format == "arch": | ||
330 | output.append('%s %s' % (new_pkg, new_arch)) | ||
331 | elif format == "file": | ||
332 | output.append('%s %s %s' % (new_pkg, pkgorigin, new_arch)) | ||
333 | elif format == "ver": | ||
334 | output.append('%s %s %s' % (new_pkg, new_arch, ver)) | ||
335 | else: | ||
336 | output.append('%s' % (new_pkg)) | ||
337 | |||
338 | output.sort() | ||
339 | |||
340 | return '\n'.join(output) | ||
341 | |||
342 | |||
343 | class OpkgPkgsList(PkgsList): | ||
344 | def __init__(self, d, rootfs_dir, config_file): | ||
345 | super(OpkgPkgsList, self).__init__(d, rootfs_dir) | ||
346 | |||
347 | self.opkg_cmd = bb.utils.which(os.getenv('PATH'), "opkg-cl") | ||
348 | self.opkg_args = "-f %s -o %s " % (config_file, rootfs_dir) | ||
349 | self.opkg_args += self.d.getVar("OPKG_ARGS", True) | ||
350 | |||
351 | def list(self, format=None): | ||
352 | opkg_query_cmd = bb.utils.which(os.getenv('PATH'), "opkg-query-helper.py") | ||
353 | |||
354 | if format == "arch": | ||
355 | cmd = "%s %s status | %s -a" % \ | ||
356 | (self.opkg_cmd, self.opkg_args, opkg_query_cmd) | ||
357 | elif format == "file": | ||
358 | cmd = "%s %s status | %s -f" % \ | ||
359 | (self.opkg_cmd, self.opkg_args, opkg_query_cmd) | ||
360 | elif format == "ver": | ||
361 | cmd = "%s %s status | %s -v" % \ | ||
362 | (self.opkg_cmd, self.opkg_args, opkg_query_cmd) | ||
363 | elif format == "deps": | ||
364 | cmd = "%s %s status | %s" % \ | ||
365 | (self.opkg_cmd, self.opkg_args, opkg_query_cmd) | ||
366 | else: | ||
367 | cmd = "%s %s list_installed | cut -d' ' -f1" % \ | ||
368 | (self.opkg_cmd, self.opkg_args) | ||
369 | |||
370 | try: | ||
371 | output = subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True).strip() | ||
372 | except subprocess.CalledProcessError as e: | ||
373 | bb.fatal("Cannot get the installed packages list. Command '%s' " | ||
374 | "returned %d:\n%s" % (cmd, e.returncode, e.output)) | ||
375 | |||
376 | if output and format == "file": | ||
377 | tmp_output = "" | ||
378 | for line in output.split('\n'): | ||
379 | pkg, pkg_file, pkg_arch = line.split() | ||
380 | full_path = os.path.join(self.rootfs_dir, pkg_arch, pkg_file) | ||
381 | if os.path.exists(full_path): | ||
382 | tmp_output += "%s %s %s\n" % (pkg, full_path, pkg_arch) | ||
383 | else: | ||
384 | tmp_output += "%s %s %s\n" % (pkg, pkg_file, pkg_arch) | ||
385 | |||
386 | output = tmp_output | ||
387 | |||
388 | return output | ||
389 | |||
390 | |||
391 | class DpkgPkgsList(PkgsList): | ||
392 | def list(self, format=None): | ||
393 | cmd = [bb.utils.which(os.getenv('PATH'), "dpkg-query"), | ||
394 | "--admindir=%s/var/lib/dpkg" % self.rootfs_dir, | ||
395 | "-W"] | ||
396 | |||
397 | if format == "arch": | ||
398 | cmd.append("-f=${Package} ${PackageArch}\n") | ||
399 | elif format == "file": | ||
400 | cmd.append("-f=${Package} ${Package}_${Version}_${Architecture}.deb ${PackageArch}\n") | ||
401 | elif format == "ver": | ||
402 | cmd.append("-f=${Package} ${PackageArch} ${Version}\n") | ||
403 | elif format == "deps": | ||
404 | cmd.append("-f=Package: ${Package}\nDepends: ${Depends}\nRecommends: ${Recommends}\n\n") | ||
405 | else: | ||
406 | cmd.append("-f=${Package}\n") | ||
407 | |||
408 | try: | ||
409 | output = subprocess.check_output(cmd, stderr=subprocess.STDOUT).strip() | ||
410 | except subprocess.CalledProcessError as e: | ||
411 | bb.fatal("Cannot get the installed packages list. Command '%s' " | ||
412 | "returned %d:\n%s" % (' '.join(cmd), e.returncode, e.output)) | ||
413 | |||
414 | if format == "file": | ||
415 | tmp_output = "" | ||
416 | for line in tuple(output.split('\n')): | ||
417 | pkg, pkg_file, pkg_arch = line.split() | ||
418 | full_path = os.path.join(self.rootfs_dir, pkg_arch, pkg_file) | ||
419 | if os.path.exists(full_path): | ||
420 | tmp_output += "%s %s %s\n" % (pkg, full_path, pkg_arch) | ||
421 | else: | ||
422 | tmp_output += "%s %s %s\n" % (pkg, pkg_file, pkg_arch) | ||
423 | |||
424 | output = tmp_output | ||
425 | elif format == "deps": | ||
426 | opkg_query_cmd = bb.utils.which(os.getenv('PATH'), "opkg-query-helper.py") | ||
427 | file_out = tempfile.NamedTemporaryFile() | ||
428 | file_out.write(output) | ||
429 | file_out.flush() | ||
430 | |||
431 | try: | ||
432 | output = subprocess.check_output("cat %s | %s" % | ||
433 | (file_out.name, opkg_query_cmd), | ||
434 | stderr=subprocess.STDOUT, | ||
435 | shell=True) | ||
436 | except subprocess.CalledProcessError as e: | ||
437 | file_out.close() | ||
438 | bb.fatal("Cannot compute packages dependencies. Command '%s' " | ||
439 | "returned %d:\n%s" % (e.cmd, e.returncode, e.output)) | ||
440 | |||
441 | file_out.close() | ||
442 | |||
443 | return output | ||
444 | |||
445 | |||
446 | class PackageManager(object): | ||
447 | """ | ||
448 | This is an abstract class. Do not instantiate this directly. | ||
449 | """ | ||
450 | __metaclass__ = ABCMeta | ||
451 | |||
452 | def __init__(self, d): | ||
453 | self.d = d | ||
454 | self.deploy_dir = None | ||
455 | self.deploy_lock = None | ||
456 | self.feed_uris = self.d.getVar('PACKAGE_FEED_URIS', True) or "" | ||
457 | |||
458 | """ | ||
459 | Update the package manager package database. | ||
460 | """ | ||
461 | @abstractmethod | ||
462 | def update(self): | ||
463 | pass | ||
464 | |||
465 | """ | ||
466 | Install a list of packages. 'pkgs' is a list object. If 'attempt_only' is | ||
467 | True, installation failures are ignored. | ||
468 | """ | ||
469 | @abstractmethod | ||
470 | def install(self, pkgs, attempt_only=False): | ||
471 | pass | ||
472 | |||
473 | """ | ||
474 | Remove a list of packages. 'pkgs' is a list object. If 'with_dependencies' | ||
475 | is False, the any dependencies are left in place. | ||
476 | """ | ||
477 | @abstractmethod | ||
478 | def remove(self, pkgs, with_dependencies=True): | ||
479 | pass | ||
480 | |||
481 | """ | ||
482 | This function creates the index files | ||
483 | """ | ||
484 | @abstractmethod | ||
485 | def write_index(self): | ||
486 | pass | ||
487 | |||
488 | @abstractmethod | ||
489 | def remove_packaging_data(self): | ||
490 | pass | ||
491 | |||
492 | @abstractmethod | ||
493 | def list_installed(self, format=None): | ||
494 | pass | ||
495 | |||
496 | @abstractmethod | ||
497 | def insert_feeds_uris(self): | ||
498 | pass | ||
499 | |||
500 | """ | ||
501 | Install complementary packages based upon the list of currently installed | ||
502 | packages e.g. locales, *-dev, *-dbg, etc. This will only attempt to install | ||
503 | these packages, if they don't exist then no error will occur. Note: every | ||
504 | backend needs to call this function explicitly after the normal package | ||
505 | installation | ||
506 | """ | ||
507 | def install_complementary(self, globs=None): | ||
508 | # we need to write the list of installed packages to a file because the | ||
509 | # oe-pkgdata-util reads it from a file | ||
510 | installed_pkgs_file = os.path.join(self.d.getVar('WORKDIR', True), | ||
511 | "installed_pkgs.txt") | ||
512 | with open(installed_pkgs_file, "w+") as installed_pkgs: | ||
513 | installed_pkgs.write(self.list_installed("arch")) | ||
514 | |||
515 | if globs is None: | ||
516 | globs = self.d.getVar('IMAGE_INSTALL_COMPLEMENTARY', True) | ||
517 | split_linguas = set() | ||
518 | |||
519 | for translation in self.d.getVar('IMAGE_LINGUAS', True).split(): | ||
520 | split_linguas.add(translation) | ||
521 | split_linguas.add(translation.split('-')[0]) | ||
522 | |||
523 | split_linguas = sorted(split_linguas) | ||
524 | |||
525 | for lang in split_linguas: | ||
526 | globs += " *-locale-%s" % lang | ||
527 | |||
528 | if globs is None: | ||
529 | return | ||
530 | |||
531 | cmd = [bb.utils.which(os.getenv('PATH'), "oe-pkgdata-util"), | ||
532 | "glob", self.d.getVar('PKGDATA_DIR', True), installed_pkgs_file, | ||
533 | globs] | ||
534 | exclude = self.d.getVar('PACKAGE_EXCLUDE_COMPLEMENTARY', True) | ||
535 | if exclude: | ||
536 | cmd.extend(['-x', exclude]) | ||
537 | try: | ||
538 | bb.note("Installing complementary packages ...") | ||
539 | complementary_pkgs = subprocess.check_output(cmd, stderr=subprocess.STDOUT) | ||
540 | except subprocess.CalledProcessError as e: | ||
541 | bb.fatal("Could not compute complementary packages list. Command " | ||
542 | "'%s' returned %d:\n%s" % | ||
543 | (' '.join(cmd), e.returncode, e.output)) | ||
544 | |||
545 | self.install(complementary_pkgs.split(), attempt_only=True) | ||
546 | |||
547 | def deploy_dir_lock(self): | ||
548 | if self.deploy_dir is None: | ||
549 | raise RuntimeError("deploy_dir is not set!") | ||
550 | |||
551 | lock_file_name = os.path.join(self.deploy_dir, "deploy.lock") | ||
552 | |||
553 | self.deploy_lock = bb.utils.lockfile(lock_file_name) | ||
554 | |||
555 | def deploy_dir_unlock(self): | ||
556 | if self.deploy_lock is None: | ||
557 | return | ||
558 | |||
559 | bb.utils.unlockfile(self.deploy_lock) | ||
560 | |||
561 | self.deploy_lock = None | ||
562 | |||
563 | |||
564 | class RpmPM(PackageManager): | ||
565 | def __init__(self, | ||
566 | d, | ||
567 | target_rootfs, | ||
568 | target_vendor, | ||
569 | task_name='target', | ||
570 | providename=None, | ||
571 | arch_var=None, | ||
572 | os_var=None): | ||
573 | super(RpmPM, self).__init__(d) | ||
574 | self.target_rootfs = target_rootfs | ||
575 | self.target_vendor = target_vendor | ||
576 | self.task_name = task_name | ||
577 | self.providename = providename | ||
578 | self.fullpkglist = list() | ||
579 | self.deploy_dir = self.d.getVar('DEPLOY_DIR_RPM', True) | ||
580 | self.etcrpm_dir = os.path.join(self.target_rootfs, "etc/rpm") | ||
581 | self.install_dir = os.path.join(self.target_rootfs, "install") | ||
582 | self.rpm_cmd = bb.utils.which(os.getenv('PATH'), "rpm") | ||
583 | self.smart_cmd = bb.utils.which(os.getenv('PATH'), "smart") | ||
584 | self.smart_opt = "--quiet --data-dir=" + os.path.join(target_rootfs, | ||
585 | 'var/lib/smart') | ||
586 | self.scriptlet_wrapper = self.d.expand('${WORKDIR}/scriptlet_wrapper') | ||
587 | self.solution_manifest = self.d.expand('${T}/saved/%s_solution' % | ||
588 | self.task_name) | ||
589 | self.saved_rpmlib = self.d.expand('${T}/saved/%s' % self.task_name) | ||
590 | self.image_rpmlib = os.path.join(self.target_rootfs, 'var/lib/rpm') | ||
591 | |||
592 | if not os.path.exists(self.d.expand('${T}/saved')): | ||
593 | bb.utils.mkdirhier(self.d.expand('${T}/saved')) | ||
594 | |||
595 | self.indexer = RpmIndexer(self.d, self.deploy_dir) | ||
596 | self.pkgs_list = RpmPkgsList(self.d, self.target_rootfs, arch_var, os_var) | ||
597 | self.rpm_version = self.pkgs_list.rpm_version | ||
598 | |||
599 | self.ml_prefix_list, self.ml_os_list = self.indexer.get_ml_prefix_and_os_list(arch_var, os_var) | ||
600 | |||
601 | def insert_feeds_uris(self): | ||
602 | if self.feed_uris == "": | ||
603 | return | ||
604 | |||
605 | # List must be prefered to least preferred order | ||
606 | default_platform_extra = set() | ||
607 | platform_extra = set() | ||
608 | bbextendvariant = self.d.getVar('BBEXTENDVARIANT', True) or "" | ||
609 | for mlib in self.ml_os_list: | ||
610 | for arch in self.ml_prefix_list[mlib]: | ||
611 | plt = arch.replace('-', '_') + '-.*-' + self.ml_os_list[mlib] | ||
612 | if mlib == bbextendvariant: | ||
613 | default_platform_extra.add(plt) | ||
614 | else: | ||
615 | platform_extra.add(plt) | ||
616 | |||
617 | platform_extra = platform_extra.union(default_platform_extra) | ||
618 | |||
619 | arch_list = [] | ||
620 | for canonical_arch in platform_extra: | ||
621 | arch = canonical_arch.split('-')[0] | ||
622 | if not os.path.exists(os.path.join(self.deploy_dir, arch)): | ||
623 | continue | ||
624 | arch_list.append(arch) | ||
625 | |||
626 | uri_iterator = 0 | ||
627 | channel_priority = 10 + 5 * len(self.feed_uris.split()) * len(arch_list) | ||
628 | |||
629 | for uri in self.feed_uris.split(): | ||
630 | for arch in arch_list: | ||
631 | bb.note('Note: adding Smart channel url%d%s (%s)' % | ||
632 | (uri_iterator, arch, channel_priority)) | ||
633 | self._invoke_smart('channel --add url%d-%s type=rpm-md baseurl=%s/rpm/%s -y' | ||
634 | % (uri_iterator, arch, uri, arch)) | ||
635 | self._invoke_smart('channel --set url%d-%s priority=%d' % | ||
636 | (uri_iterator, arch, channel_priority)) | ||
637 | channel_priority -= 5 | ||
638 | uri_iterator += 1 | ||
639 | |||
640 | ''' | ||
641 | Create configs for rpm and smart, and multilib is supported | ||
642 | ''' | ||
643 | def create_configs(self): | ||
644 | target_arch = self.d.getVar('TARGET_ARCH', True) | ||
645 | platform = '%s%s-%s' % (target_arch.replace('-', '_'), | ||
646 | self.target_vendor, | ||
647 | self.ml_os_list['default']) | ||
648 | |||
649 | # List must be prefered to least preferred order | ||
650 | default_platform_extra = list() | ||
651 | platform_extra = list() | ||
652 | bbextendvariant = self.d.getVar('BBEXTENDVARIANT', True) or "" | ||
653 | for mlib in self.ml_os_list: | ||
654 | for arch in self.ml_prefix_list[mlib]: | ||
655 | plt = arch.replace('-', '_') + '-.*-' + self.ml_os_list[mlib] | ||
656 | if mlib == bbextendvariant: | ||
657 | if plt not in default_platform_extra: | ||
658 | default_platform_extra.append(plt) | ||
659 | else: | ||
660 | if plt not in platform_extra: | ||
661 | platform_extra.append(plt) | ||
662 | platform_extra = default_platform_extra + platform_extra | ||
663 | |||
664 | self._create_configs(platform, platform_extra) | ||
665 | |||
666 | def _invoke_smart(self, args): | ||
667 | cmd = "%s %s %s" % (self.smart_cmd, self.smart_opt, args) | ||
668 | # bb.note(cmd) | ||
669 | try: | ||
670 | complementary_pkgs = subprocess.check_output(cmd, | ||
671 | stderr=subprocess.STDOUT, | ||
672 | shell=True) | ||
673 | # bb.note(complementary_pkgs) | ||
674 | return complementary_pkgs | ||
675 | except subprocess.CalledProcessError as e: | ||
676 | bb.fatal("Could not invoke smart. Command " | ||
677 | "'%s' returned %d:\n%s" % (cmd, e.returncode, e.output)) | ||
678 | |||
679 | def _search_pkg_name_in_feeds(self, pkg, feed_archs): | ||
680 | for arch in feed_archs: | ||
681 | arch = arch.replace('-', '_') | ||
682 | for p in self.fullpkglist: | ||
683 | regex_match = r"^%s-[^-]*-[^-]*@%s$" % \ | ||
684 | (re.escape(pkg), re.escape(arch)) | ||
685 | if re.match(regex_match, p) is not None: | ||
686 | # First found is best match | ||
687 | # bb.note('%s -> %s' % (pkg, pkg + '@' + arch)) | ||
688 | return pkg + '@' + arch | ||
689 | |||
690 | return "" | ||
691 | |||
692 | ''' | ||
693 | Translate the OE multilib format names to the RPM/Smart format names | ||
694 | It searched the RPM/Smart format names in probable multilib feeds first, | ||
695 | and then searched the default base feed. | ||
696 | ''' | ||
697 | def _pkg_translate_oe_to_smart(self, pkgs, attempt_only=False): | ||
698 | new_pkgs = list() | ||
699 | |||
700 | for pkg in pkgs: | ||
701 | new_pkg = pkg | ||
702 | # Search new_pkg in probable multilibs first | ||
703 | for mlib in self.ml_prefix_list: | ||
704 | # Jump the default archs | ||
705 | if mlib == 'default': | ||
706 | continue | ||
707 | |||
708 | subst = pkg.replace(mlib + '-', '') | ||
709 | # if the pkg in this multilib feed | ||
710 | if subst != pkg: | ||
711 | feed_archs = self.ml_prefix_list[mlib] | ||
712 | new_pkg = self._search_pkg_name_in_feeds(subst, feed_archs) | ||
713 | if not new_pkg: | ||
714 | # Failed to translate, package not found! | ||
715 | err_msg = '%s not found in the %s feeds (%s).\n' % \ | ||
716 | (pkg, mlib, " ".join(feed_archs)) | ||
717 | if not attempt_only: | ||
718 | err_msg += " ".join(self.fullpkglist) | ||
719 | bb.fatal(err_msg) | ||
720 | bb.warn(err_msg) | ||
721 | else: | ||
722 | new_pkgs.append(new_pkg) | ||
723 | |||
724 | break | ||
725 | |||
726 | # Apparently not a multilib package... | ||
727 | if pkg == new_pkg: | ||
728 | # Search new_pkg in default archs | ||
729 | default_archs = self.ml_prefix_list['default'] | ||
730 | new_pkg = self._search_pkg_name_in_feeds(pkg, default_archs) | ||
731 | if not new_pkg: | ||
732 | err_msg = '%s not found in the base feeds (%s).\n' % \ | ||
733 | (pkg, ' '.join(default_archs)) | ||
734 | if not attempt_only: | ||
735 | err_msg += " ".join(self.fullpkglist) | ||
736 | bb.fatal(err_msg) | ||
737 | bb.warn(err_msg) | ||
738 | else: | ||
739 | new_pkgs.append(new_pkg) | ||
740 | |||
741 | return new_pkgs | ||
742 | |||
743 | def _create_configs(self, platform, platform_extra): | ||
744 | # Setup base system configuration | ||
745 | bb.note("configuring RPM platform settings") | ||
746 | |||
747 | # Configure internal RPM environment when using Smart | ||
748 | os.environ['RPM_ETCRPM'] = self.etcrpm_dir | ||
749 | bb.utils.mkdirhier(self.etcrpm_dir) | ||
750 | |||
751 | # Setup temporary directory -- install... | ||
752 | if os.path.exists(self.install_dir): | ||
753 | bb.utils.remove(self.install_dir, True) | ||
754 | bb.utils.mkdirhier(os.path.join(self.install_dir, 'tmp')) | ||
755 | |||
756 | channel_priority = 5 | ||
757 | platform_dir = os.path.join(self.etcrpm_dir, "platform") | ||
758 | sdkos = self.d.getVar("SDK_OS", True) | ||
759 | with open(platform_dir, "w+") as platform_fd: | ||
760 | platform_fd.write(platform + '\n') | ||
761 | for pt in platform_extra: | ||
762 | channel_priority += 5 | ||
763 | if sdkos: | ||
764 | tmp = re.sub("-%s$" % sdkos, "-%s\n" % sdkos, pt) | ||
765 | tmp = re.sub("-linux.*$", "-linux.*\n", tmp) | ||
766 | platform_fd.write(tmp) | ||
767 | |||
768 | # Tell RPM that the "/" directory exist and is available | ||
769 | bb.note("configuring RPM system provides") | ||
770 | sysinfo_dir = os.path.join(self.etcrpm_dir, "sysinfo") | ||
771 | bb.utils.mkdirhier(sysinfo_dir) | ||
772 | with open(os.path.join(sysinfo_dir, "Dirnames"), "w+") as dirnames: | ||
773 | dirnames.write("/\n") | ||
774 | |||
775 | if self.providename: | ||
776 | providename_dir = os.path.join(sysinfo_dir, "Providename") | ||
777 | if not os.path.exists(providename_dir): | ||
778 | providename_content = '\n'.join(self.providename) | ||
779 | providename_content += '\n' | ||
780 | open(providename_dir, "w+").write(providename_content) | ||
781 | |||
782 | # Configure RPM... we enforce these settings! | ||
783 | bb.note("configuring RPM DB settings") | ||
784 | # After change the __db.* cache size, log file will not be | ||
785 | # generated automatically, that will raise some warnings, | ||
786 | # so touch a bare log for rpm write into it. | ||
787 | if self.rpm_version == 5: | ||
788 | rpmlib_log = os.path.join(self.image_rpmlib, 'log', 'log.0000000001') | ||
789 | if not os.path.exists(rpmlib_log): | ||
790 | bb.utils.mkdirhier(os.path.join(self.image_rpmlib, 'log')) | ||
791 | open(rpmlib_log, 'w+').close() | ||
792 | |||
793 | DB_CONFIG_CONTENT = "# ================ Environment\n" \ | ||
794 | "set_data_dir .\n" \ | ||
795 | "set_create_dir .\n" \ | ||
796 | "set_lg_dir ./log\n" \ | ||
797 | "set_tmp_dir ./tmp\n" \ | ||
798 | "set_flags db_log_autoremove on\n" \ | ||
799 | "\n" \ | ||
800 | "# -- thread_count must be >= 8\n" \ | ||
801 | "set_thread_count 64\n" \ | ||
802 | "\n" \ | ||
803 | "# ================ Logging\n" \ | ||
804 | "\n" \ | ||
805 | "# ================ Memory Pool\n" \ | ||
806 | "set_cachesize 0 1048576 0\n" \ | ||
807 | "set_mp_mmapsize 268435456\n" \ | ||
808 | "\n" \ | ||
809 | "# ================ Locking\n" \ | ||
810 | "set_lk_max_locks 16384\n" \ | ||
811 | "set_lk_max_lockers 16384\n" \ | ||
812 | "set_lk_max_objects 16384\n" \ | ||
813 | "mutex_set_max 163840\n" \ | ||
814 | "\n" \ | ||
815 | "# ================ Replication\n" | ||
816 | |||
817 | db_config_dir = os.path.join(self.image_rpmlib, 'DB_CONFIG') | ||
818 | if not os.path.exists(db_config_dir): | ||
819 | open(db_config_dir, 'w+').write(DB_CONFIG_CONTENT) | ||
820 | |||
821 | # Create database so that smart doesn't complain (lazy init) | ||
822 | opt = "-qa" | ||
823 | if self.rpm_version == 4: | ||
824 | opt = "--initdb" | ||
825 | cmd = "%s --root %s --dbpath /var/lib/rpm %s > /dev/null" % ( | ||
826 | self.rpm_cmd, self.target_rootfs, opt) | ||
827 | try: | ||
828 | subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True) | ||
829 | except subprocess.CalledProcessError as e: | ||
830 | bb.fatal("Create rpm database failed. Command '%s' " | ||
831 | "returned %d:\n%s" % (cmd, e.returncode, e.output)) | ||
832 | |||
833 | # Configure smart | ||
834 | bb.note("configuring Smart settings") | ||
835 | bb.utils.remove(os.path.join(self.target_rootfs, 'var/lib/smart'), | ||
836 | True) | ||
837 | self._invoke_smart('config --set rpm-root=%s' % self.target_rootfs) | ||
838 | self._invoke_smart('config --set rpm-dbpath=/var/lib/rpm') | ||
839 | self._invoke_smart('config --set rpm-extra-macros._var=%s' % | ||
840 | self.d.getVar('localstatedir', True)) | ||
841 | cmd = 'config --set rpm-extra-macros._tmppath=/install/tmp' | ||
842 | |||
843 | prefer_color = self.d.getVar('RPM_PREFER_ELF_ARCH', True) | ||
844 | if prefer_color: | ||
845 | if prefer_color not in ['0', '1', '2', '4']: | ||
846 | bb.fatal("Invalid RPM_PREFER_ELF_ARCH: %s, it should be one of:\n" | ||
847 | "\t1: ELF32 wins\n" | ||
848 | "\t2: ELF64 wins\n" | ||
849 | "\t4: ELF64 N32 wins (mips64 or mips64el only)" % | ||
850 | prefer_color) | ||
851 | if prefer_color == "4" and self.d.getVar("TUNE_ARCH", True) not in \ | ||
852 | ['mips64', 'mips64el']: | ||
853 | bb.fatal("RPM_PREFER_ELF_ARCH = \"4\" is for mips64 or mips64el " | ||
854 | "only.") | ||
855 | self._invoke_smart('config --set rpm-extra-macros._prefer_color=%s' | ||
856 | % prefer_color) | ||
857 | |||
858 | self._invoke_smart(cmd) | ||
859 | |||
860 | # Write common configuration for host and target usage | ||
861 | self._invoke_smart('config --set rpm-nolinktos=1') | ||
862 | self._invoke_smart('config --set rpm-noparentdirs=1') | ||
863 | check_signature = self.d.getVar('RPM_CHECK_SIGNATURES', True) | ||
864 | if check_signature and check_signature.strip() == "0": | ||
865 | self._invoke_smart('config --set rpm-check-signatures=false') | ||
866 | for i in self.d.getVar('BAD_RECOMMENDATIONS', True).split(): | ||
867 | self._invoke_smart('flag --set ignore-recommends %s' % i) | ||
868 | |||
869 | # Do the following configurations here, to avoid them being | ||
870 | # saved for field upgrade | ||
871 | if self.d.getVar('NO_RECOMMENDATIONS', True).strip() == "1": | ||
872 | self._invoke_smart('config --set ignore-all-recommends=1') | ||
873 | pkg_exclude = self.d.getVar('PACKAGE_EXCLUDE', True) or "" | ||
874 | for i in pkg_exclude.split(): | ||
875 | self._invoke_smart('flag --set exclude-packages %s' % i) | ||
876 | |||
877 | # Optional debugging | ||
878 | # self._invoke_smart('config --set rpm-log-level=debug') | ||
879 | # cmd = 'config --set rpm-log-file=/tmp/smart-debug-logfile' | ||
880 | # self._invoke_smart(cmd) | ||
881 | ch_already_added = [] | ||
882 | for canonical_arch in platform_extra: | ||
883 | arch = canonical_arch.split('-')[0] | ||
884 | arch_channel = os.path.join(self.deploy_dir, arch) | ||
885 | if os.path.exists(arch_channel) and not arch in ch_already_added: | ||
886 | bb.note('Note: adding Smart channel %s (%s)' % | ||
887 | (arch, channel_priority)) | ||
888 | self._invoke_smart('channel --add %s type=rpm-md baseurl=%s -y' | ||
889 | % (arch, arch_channel)) | ||
890 | self._invoke_smart('channel --set %s priority=%d' % | ||
891 | (arch, channel_priority)) | ||
892 | channel_priority -= 5 | ||
893 | |||
894 | ch_already_added.append(arch) | ||
895 | |||
896 | bb.note('adding Smart RPM DB channel') | ||
897 | self._invoke_smart('channel --add rpmsys type=rpm-sys -y') | ||
898 | |||
899 | # Construct install scriptlet wrapper. | ||
900 | # Scripts need to be ordered when executed, this ensures numeric order. | ||
901 | # If we ever run into needing more the 899 scripts, we'll have to. | ||
902 | # change num to start with 1000. | ||
903 | # | ||
904 | if self.rpm_version == 4: | ||
905 | scriptletcmd = "$2 $3 $4\n" | ||
906 | else: | ||
907 | scriptletcmd = "$2 $1/$3 $4\n" | ||
908 | |||
909 | SCRIPTLET_FORMAT = "#!/bin/bash\n" \ | ||
910 | "\n" \ | ||
911 | "export PATH=%s\n" \ | ||
912 | "export D=%s\n" \ | ||
913 | 'export OFFLINE_ROOT="$D"\n' \ | ||
914 | 'export IPKG_OFFLINE_ROOT="$D"\n' \ | ||
915 | 'export OPKG_OFFLINE_ROOT="$D"\n' \ | ||
916 | "export INTERCEPT_DIR=%s\n" \ | ||
917 | "export NATIVE_ROOT=%s\n" \ | ||
918 | "\n" \ | ||
919 | + scriptletcmd + \ | ||
920 | "if [ $? -ne 0 ]; then\n" \ | ||
921 | " if [ $4 -eq 1 ]; then\n" \ | ||
922 | " mkdir -p $1/etc/rpm-postinsts\n" \ | ||
923 | " num=100\n" \ | ||
924 | " while [ -e $1/etc/rpm-postinsts/${num}-* ]; do num=$((num + 1)); done\n" \ | ||
925 | " name=`head -1 $1/$3 | cut -d\' \' -f 2`\n" \ | ||
926 | ' echo "#!$2" > $1/etc/rpm-postinsts/${num}-${name}\n' \ | ||
927 | ' echo "# Arg: $4" >> $1/etc/rpm-postinsts/${num}-${name}\n' \ | ||
928 | " cat $1/$3 >> $1/etc/rpm-postinsts/${num}-${name}\n" \ | ||
929 | " chmod +x $1/etc/rpm-postinsts/${num}-${name}\n" \ | ||
930 | " else\n" \ | ||
931 | ' echo "Error: pre/post remove scriptlet failed"\n' \ | ||
932 | " fi\n" \ | ||
933 | "fi\n" | ||
934 | |||
935 | intercept_dir = self.d.expand('${WORKDIR}/intercept_scripts') | ||
936 | native_root = self.d.getVar('STAGING_DIR_NATIVE', True) | ||
937 | scriptlet_content = SCRIPTLET_FORMAT % (os.environ['PATH'], | ||
938 | self.target_rootfs, | ||
939 | intercept_dir, | ||
940 | native_root) | ||
941 | open(self.scriptlet_wrapper, 'w+').write(scriptlet_content) | ||
942 | |||
943 | bb.note("Note: configuring RPM cross-install scriptlet_wrapper") | ||
944 | os.chmod(self.scriptlet_wrapper, 0755) | ||
945 | cmd = 'config --set rpm-extra-macros._cross_scriptlet_wrapper=%s' % \ | ||
946 | self.scriptlet_wrapper | ||
947 | self._invoke_smart(cmd) | ||
948 | |||
949 | # Debug to show smart config info | ||
950 | # bb.note(self._invoke_smart('config --show')) | ||
951 | |||
952 | def update(self): | ||
953 | self._invoke_smart('update rpmsys') | ||
954 | |||
955 | ''' | ||
956 | Install pkgs with smart, the pkg name is oe format | ||
957 | ''' | ||
958 | def install(self, pkgs, attempt_only=False): | ||
959 | |||
960 | bb.note("Installing the following packages: %s" % ' '.join(pkgs)) | ||
961 | if attempt_only and len(pkgs) == 0: | ||
962 | return | ||
963 | pkgs = self._pkg_translate_oe_to_smart(pkgs, attempt_only) | ||
964 | |||
965 | if not attempt_only: | ||
966 | bb.note('to be installed: %s' % ' '.join(pkgs)) | ||
967 | cmd = "%s %s install -y %s" % \ | ||
968 | (self.smart_cmd, self.smart_opt, ' '.join(pkgs)) | ||
969 | bb.note(cmd) | ||
970 | else: | ||
971 | bb.note('installing attempt only packages...') | ||
972 | bb.note('Attempting %s' % ' '.join(pkgs)) | ||
973 | cmd = "%s %s install --attempt -y %s" % \ | ||
974 | (self.smart_cmd, self.smart_opt, ' '.join(pkgs)) | ||
975 | try: | ||
976 | output = subprocess.check_output(cmd.split(), stderr=subprocess.STDOUT) | ||
977 | bb.note(output) | ||
978 | except subprocess.CalledProcessError as e: | ||
979 | bb.fatal("Unable to install packages. Command '%s' " | ||
980 | "returned %d:\n%s" % (cmd, e.returncode, e.output)) | ||
981 | |||
982 | ''' | ||
983 | Remove pkgs with smart, the pkg name is smart/rpm format | ||
984 | ''' | ||
985 | def remove(self, pkgs, with_dependencies=True): | ||
986 | bb.note('to be removed: ' + ' '.join(pkgs)) | ||
987 | |||
988 | if not with_dependencies: | ||
989 | cmd = "%s -e --nodeps " % self.rpm_cmd | ||
990 | cmd += "--root=%s " % self.target_rootfs | ||
991 | cmd += "--dbpath=/var/lib/rpm " | ||
992 | cmd += "--define='_cross_scriptlet_wrapper %s' " % \ | ||
993 | self.scriptlet_wrapper | ||
994 | cmd += "--define='_tmppath /install/tmp' %s" % ' '.join(pkgs) | ||
995 | else: | ||
996 | # for pkg in pkgs: | ||
997 | # bb.note('Debug: What required: %s' % pkg) | ||
998 | # bb.note(self._invoke_smart('query %s --show-requiredby' % pkg)) | ||
999 | |||
1000 | cmd = "%s %s remove -y %s" % (self.smart_cmd, | ||
1001 | self.smart_opt, | ||
1002 | ' '.join(pkgs)) | ||
1003 | |||
1004 | try: | ||
1005 | bb.note(cmd) | ||
1006 | output = subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True) | ||
1007 | bb.note(output) | ||
1008 | except subprocess.CalledProcessError as e: | ||
1009 | bb.note("Unable to remove packages. Command '%s' " | ||
1010 | "returned %d:\n%s" % (cmd, e.returncode, e.output)) | ||
1011 | |||
1012 | def upgrade(self): | ||
1013 | bb.note('smart upgrade') | ||
1014 | self._invoke_smart('upgrade') | ||
1015 | |||
1016 | def write_index(self): | ||
1017 | result = self.indexer.write_index() | ||
1018 | |||
1019 | if result is not None: | ||
1020 | bb.fatal(result) | ||
1021 | |||
1022 | def remove_packaging_data(self): | ||
1023 | bb.utils.remove(self.image_rpmlib, True) | ||
1024 | bb.utils.remove(os.path.join(self.target_rootfs, 'var/lib/smart'), | ||
1025 | True) | ||
1026 | bb.utils.remove(os.path.join(self.target_rootfs, 'var/lib/opkg'), True) | ||
1027 | |||
1028 | # remove temp directory | ||
1029 | bb.utils.remove(self.d.expand('${IMAGE_ROOTFS}/install'), True) | ||
1030 | |||
1031 | def backup_packaging_data(self): | ||
1032 | # Save the rpmlib for increment rpm image generation | ||
1033 | if os.path.exists(self.saved_rpmlib): | ||
1034 | bb.utils.remove(self.saved_rpmlib, True) | ||
1035 | shutil.copytree(self.image_rpmlib, | ||
1036 | self.saved_rpmlib, | ||
1037 | symlinks=True) | ||
1038 | |||
1039 | def recovery_packaging_data(self): | ||
1040 | # Move the rpmlib back | ||
1041 | if os.path.exists(self.saved_rpmlib): | ||
1042 | if os.path.exists(self.image_rpmlib): | ||
1043 | bb.utils.remove(self.image_rpmlib, True) | ||
1044 | |||
1045 | bb.note('Recovery packaging data') | ||
1046 | shutil.copytree(self.saved_rpmlib, | ||
1047 | self.image_rpmlib, | ||
1048 | symlinks=True) | ||
1049 | |||
1050 | def list_installed(self, format=None): | ||
1051 | return self.pkgs_list.list(format) | ||
1052 | |||
1053 | ''' | ||
1054 | If incremental install, we need to determine what we've got, | ||
1055 | what we need to add, and what to remove... | ||
1056 | The dump_install_solution will dump and save the new install | ||
1057 | solution. | ||
1058 | ''' | ||
1059 | def dump_install_solution(self, pkgs): | ||
1060 | bb.note('creating new install solution for incremental install') | ||
1061 | if len(pkgs) == 0: | ||
1062 | return | ||
1063 | |||
1064 | pkgs = self._pkg_translate_oe_to_smart(pkgs, False) | ||
1065 | install_pkgs = list() | ||
1066 | |||
1067 | cmd = "%s %s install -y --dump %s 2>%s" % \ | ||
1068 | (self.smart_cmd, | ||
1069 | self.smart_opt, | ||
1070 | ' '.join(pkgs), | ||
1071 | self.solution_manifest) | ||
1072 | try: | ||
1073 | # Disable rpmsys channel for the fake install | ||
1074 | self._invoke_smart('channel --disable rpmsys') | ||
1075 | |||
1076 | subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True) | ||
1077 | with open(self.solution_manifest, 'r') as manifest: | ||
1078 | for pkg in manifest.read().split('\n'): | ||
1079 | if '@' in pkg: | ||
1080 | install_pkgs.append(pkg) | ||
1081 | except subprocess.CalledProcessError as e: | ||
1082 | bb.note("Unable to dump install packages. Command '%s' " | ||
1083 | "returned %d:\n%s" % (cmd, e.returncode, e.output)) | ||
1084 | # Recovery rpmsys channel | ||
1085 | self._invoke_smart('channel --enable rpmsys') | ||
1086 | return install_pkgs | ||
1087 | |||
1088 | ''' | ||
1089 | If incremental install, we need to determine what we've got, | ||
1090 | what we need to add, and what to remove... | ||
1091 | The load_old_install_solution will load the previous install | ||
1092 | solution | ||
1093 | ''' | ||
1094 | def load_old_install_solution(self): | ||
1095 | bb.note('load old install solution for incremental install') | ||
1096 | installed_pkgs = list() | ||
1097 | if not os.path.exists(self.solution_manifest): | ||
1098 | bb.note('old install solution not exist') | ||
1099 | return installed_pkgs | ||
1100 | |||
1101 | with open(self.solution_manifest, 'r') as manifest: | ||
1102 | for pkg in manifest.read().split('\n'): | ||
1103 | if '@' in pkg: | ||
1104 | installed_pkgs.append(pkg.strip()) | ||
1105 | |||
1106 | return installed_pkgs | ||
1107 | |||
1108 | ''' | ||
1109 | Dump all available packages in feeds, it should be invoked after the | ||
1110 | newest rpm index was created | ||
1111 | ''' | ||
1112 | def dump_all_available_pkgs(self): | ||
1113 | available_manifest = self.d.expand('${T}/saved/available_pkgs.txt') | ||
1114 | available_pkgs = list() | ||
1115 | cmd = "%s %s query --output %s" % \ | ||
1116 | (self.smart_cmd, self.smart_opt, available_manifest) | ||
1117 | try: | ||
1118 | subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True) | ||
1119 | with open(available_manifest, 'r') as manifest: | ||
1120 | for pkg in manifest.read().split('\n'): | ||
1121 | if '@' in pkg: | ||
1122 | available_pkgs.append(pkg.strip()) | ||
1123 | except subprocess.CalledProcessError as e: | ||
1124 | bb.note("Unable to list all available packages. Command '%s' " | ||
1125 | "returned %d:\n%s" % (cmd, e.returncode, e.output)) | ||
1126 | |||
1127 | self.fullpkglist = available_pkgs | ||
1128 | |||
1129 | return | ||
1130 | |||
1131 | def save_rpmpostinst(self, pkg): | ||
1132 | mlibs = (self.d.getVar('MULTILIB_GLOBAL_VARIANTS') or "").split() | ||
1133 | |||
1134 | new_pkg = pkg | ||
1135 | # Remove any multilib prefix from the package name | ||
1136 | for mlib in mlibs: | ||
1137 | if mlib in pkg: | ||
1138 | new_pkg = pkg.replace(mlib + '-', '') | ||
1139 | break | ||
1140 | |||
1141 | bb.note(' * postponing %s' % new_pkg) | ||
1142 | saved_dir = self.target_rootfs + self.d.expand('${sysconfdir}/rpm-postinsts/') + new_pkg | ||
1143 | |||
1144 | cmd = self.rpm_cmd + ' -q --scripts --root ' + self.target_rootfs | ||
1145 | cmd += ' --dbpath=/var/lib/rpm ' + new_pkg | ||
1146 | cmd += ' | sed -n -e "/^postinstall scriptlet (using .*):$/,/^.* scriptlet (using .*):$/ {/.*/p}"' | ||
1147 | cmd += ' | sed -e "/postinstall scriptlet (using \(.*\)):$/d"' | ||
1148 | cmd += ' -e "/^.* scriptlet (using .*):$/d" > %s' % saved_dir | ||
1149 | |||
1150 | try: | ||
1151 | bb.note(cmd) | ||
1152 | output = subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True).strip() | ||
1153 | bb.note(output) | ||
1154 | os.chmod(saved_dir, 0755) | ||
1155 | except subprocess.CalledProcessError as e: | ||
1156 | bb.fatal("Invoke save_rpmpostinst failed. Command '%s' " | ||
1157 | "returned %d:\n%s" % (cmd, e.returncode, e.output)) | ||
1158 | |||
1159 | '''Write common configuration for target usage''' | ||
1160 | def rpm_setup_smart_target_config(self): | ||
1161 | bb.utils.remove(os.path.join(self.target_rootfs, 'var/lib/smart'), | ||
1162 | True) | ||
1163 | |||
1164 | self._invoke_smart('config --set rpm-nolinktos=1') | ||
1165 | self._invoke_smart('config --set rpm-noparentdirs=1') | ||
1166 | for i in self.d.getVar('BAD_RECOMMENDATIONS', True).split(): | ||
1167 | self._invoke_smart('flag --set ignore-recommends %s' % i) | ||
1168 | self._invoke_smart('channel --add rpmsys type=rpm-sys -y') | ||
1169 | |||
1170 | ''' | ||
1171 | The rpm db lock files were produced after invoking rpm to query on | ||
1172 | build system, and they caused the rpm on target didn't work, so we | ||
1173 | need to unlock the rpm db by removing the lock files. | ||
1174 | ''' | ||
1175 | def unlock_rpm_db(self): | ||
1176 | # Remove rpm db lock files | ||
1177 | rpm_db_locks = glob.glob('%s/var/lib/rpm/__db.*' % self.target_rootfs) | ||
1178 | for f in rpm_db_locks: | ||
1179 | bb.utils.remove(f, True) | ||
1180 | |||
1181 | |||
1182 | class OpkgPM(PackageManager): | ||
1183 | def __init__(self, d, target_rootfs, config_file, archs, task_name='target'): | ||
1184 | super(OpkgPM, self).__init__(d) | ||
1185 | |||
1186 | self.target_rootfs = target_rootfs | ||
1187 | self.config_file = config_file | ||
1188 | self.pkg_archs = archs | ||
1189 | self.task_name = task_name | ||
1190 | |||
1191 | self.deploy_dir = self.d.getVar("DEPLOY_DIR_IPK", True) | ||
1192 | self.deploy_lock_file = os.path.join(self.deploy_dir, "deploy.lock") | ||
1193 | self.opkg_cmd = bb.utils.which(os.getenv('PATH'), "opkg-cl") | ||
1194 | self.opkg_args = "-f %s -o %s " % (self.config_file, target_rootfs) | ||
1195 | self.opkg_args += self.d.getVar("OPKG_ARGS", True) | ||
1196 | |||
1197 | opkg_lib_dir = self.d.getVar('OPKGLIBDIR', True) | ||
1198 | if opkg_lib_dir[0] == "/": | ||
1199 | opkg_lib_dir = opkg_lib_dir[1:] | ||
1200 | |||
1201 | self.opkg_dir = os.path.join(target_rootfs, opkg_lib_dir, "opkg") | ||
1202 | |||
1203 | bb.utils.mkdirhier(self.opkg_dir) | ||
1204 | |||
1205 | self.saved_opkg_dir = self.d.expand('${T}/saved/%s' % self.task_name) | ||
1206 | if not os.path.exists(self.d.expand('${T}/saved')): | ||
1207 | bb.utils.mkdirhier(self.d.expand('${T}/saved')) | ||
1208 | |||
1209 | if (self.d.getVar('BUILD_IMAGES_FROM_FEEDS', True) or "") != "1": | ||
1210 | self._create_config() | ||
1211 | else: | ||
1212 | self._create_custom_config() | ||
1213 | |||
1214 | self.indexer = OpkgIndexer(self.d, self.deploy_dir) | ||
1215 | |||
1216 | """ | ||
1217 | This function will change a package's status in /var/lib/opkg/status file. | ||
1218 | If 'packages' is None then the new_status will be applied to all | ||
1219 | packages | ||
1220 | """ | ||
1221 | def mark_packages(self, status_tag, packages=None): | ||
1222 | status_file = os.path.join(self.opkg_dir, "status") | ||
1223 | |||
1224 | with open(status_file, "r") as sf: | ||
1225 | with open(status_file + ".tmp", "w+") as tmp_sf: | ||
1226 | if packages is None: | ||
1227 | tmp_sf.write(re.sub(r"Package: (.*?)\n((?:[^\n]+\n)*?)Status: (.*)(?:unpacked|installed)", | ||
1228 | r"Package: \1\n\2Status: \3%s" % status_tag, | ||
1229 | sf.read())) | ||
1230 | else: | ||
1231 | if type(packages).__name__ != "list": | ||
1232 | raise TypeError("'packages' should be a list object") | ||
1233 | |||
1234 | status = sf.read() | ||
1235 | for pkg in packages: | ||
1236 | status = re.sub(r"Package: %s\n((?:[^\n]+\n)*?)Status: (.*)(?:unpacked|installed)" % pkg, | ||
1237 | r"Package: %s\n\1Status: \2%s" % (pkg, status_tag), | ||
1238 | status) | ||
1239 | |||
1240 | tmp_sf.write(status) | ||
1241 | |||
1242 | os.rename(status_file + ".tmp", status_file) | ||
1243 | |||
1244 | def _create_custom_config(self): | ||
1245 | bb.note("Building from feeds activated!") | ||
1246 | |||
1247 | with open(self.config_file, "w+") as config_file: | ||
1248 | priority = 1 | ||
1249 | for arch in self.pkg_archs.split(): | ||
1250 | config_file.write("arch %s %d\n" % (arch, priority)) | ||
1251 | priority += 5 | ||
1252 | |||
1253 | for line in (self.d.getVar('IPK_FEED_URIS', True) or "").split(): | ||
1254 | feed_match = re.match("^[ \t]*(.*)##([^ \t]*)[ \t]*$", line) | ||
1255 | |||
1256 | if feed_match is not None: | ||
1257 | feed_name = feed_match.group(1) | ||
1258 | feed_uri = feed_match.group(2) | ||
1259 | |||
1260 | bb.note("Add %s feed with URL %s" % (feed_name, feed_uri)) | ||
1261 | |||
1262 | config_file.write("src/gz %s %s\n" % (feed_name, feed_uri)) | ||
1263 | |||
1264 | """ | ||
1265 | Allow to use package deploy directory contents as quick devel-testing | ||
1266 | feed. This creates individual feed configs for each arch subdir of those | ||
1267 | specified as compatible for the current machine. | ||
1268 | NOTE: Development-helper feature, NOT a full-fledged feed. | ||
1269 | """ | ||
1270 | if (self.d.getVar('FEED_DEPLOYDIR_BASE_URI', True) or "") != "": | ||
1271 | for arch in self.pkg_archs.split(): | ||
1272 | cfg_file_name = os.path.join(self.target_rootfs, | ||
1273 | self.d.getVar("sysconfdir", True), | ||
1274 | "opkg", | ||
1275 | "local-%s-feed.conf" % arch) | ||
1276 | |||
1277 | with open(cfg_file_name, "w+") as cfg_file: | ||
1278 | cfg_file.write("src/gz local-%s %s/%s" % | ||
1279 | (arch, | ||
1280 | self.d.getVar('FEED_DEPLOYDIR_BASE_URI', True), | ||
1281 | arch)) | ||
1282 | |||
1283 | def _create_config(self): | ||
1284 | with open(self.config_file, "w+") as config_file: | ||
1285 | priority = 1 | ||
1286 | for arch in self.pkg_archs.split(): | ||
1287 | config_file.write("arch %s %d\n" % (arch, priority)) | ||
1288 | priority += 5 | ||
1289 | |||
1290 | config_file.write("src oe file:%s\n" % self.deploy_dir) | ||
1291 | |||
1292 | for arch in self.pkg_archs.split(): | ||
1293 | pkgs_dir = os.path.join(self.deploy_dir, arch) | ||
1294 | if os.path.isdir(pkgs_dir): | ||
1295 | config_file.write("src oe-%s file:%s\n" % | ||
1296 | (arch, pkgs_dir)) | ||
1297 | |||
1298 | def insert_feeds_uris(self): | ||
1299 | if self.feed_uris == "": | ||
1300 | return | ||
1301 | |||
1302 | rootfs_config = os.path.join('%s/etc/opkg/base-feeds.conf' | ||
1303 | % self.target_rootfs) | ||
1304 | |||
1305 | with open(rootfs_config, "w+") as config_file: | ||
1306 | uri_iterator = 0 | ||
1307 | for uri in self.feed_uris.split(): | ||
1308 | config_file.write("src/gz url-%d %s/ipk\n" % | ||
1309 | (uri_iterator, uri)) | ||
1310 | |||
1311 | for arch in self.pkg_archs.split(): | ||
1312 | if not os.path.exists(os.path.join(self.deploy_dir, arch)): | ||
1313 | continue | ||
1314 | bb.note('Note: adding opkg channel url-%s-%d (%s)' % | ||
1315 | (arch, uri_iterator, uri)) | ||
1316 | |||
1317 | config_file.write("src/gz uri-%s-%d %s/ipk/%s\n" % | ||
1318 | (arch, uri_iterator, uri, arch)) | ||
1319 | uri_iterator += 1 | ||
1320 | |||
1321 | def update(self): | ||
1322 | self.deploy_dir_lock() | ||
1323 | |||
1324 | cmd = "%s %s update" % (self.opkg_cmd, self.opkg_args) | ||
1325 | |||
1326 | try: | ||
1327 | subprocess.check_output(cmd.split(), stderr=subprocess.STDOUT) | ||
1328 | except subprocess.CalledProcessError as e: | ||
1329 | self.deploy_dir_unlock() | ||
1330 | bb.fatal("Unable to update the package index files. Command '%s' " | ||
1331 | "returned %d:\n%s" % (cmd, e.returncode, e.output)) | ||
1332 | |||
1333 | self.deploy_dir_unlock() | ||
1334 | |||
1335 | def install(self, pkgs, attempt_only=False): | ||
1336 | if attempt_only and len(pkgs) == 0: | ||
1337 | return | ||
1338 | |||
1339 | cmd = "%s %s install %s" % (self.opkg_cmd, self.opkg_args, ' '.join(pkgs)) | ||
1340 | |||
1341 | os.environ['D'] = self.target_rootfs | ||
1342 | os.environ['OFFLINE_ROOT'] = self.target_rootfs | ||
1343 | os.environ['IPKG_OFFLINE_ROOT'] = self.target_rootfs | ||
1344 | os.environ['OPKG_OFFLINE_ROOT'] = self.target_rootfs | ||
1345 | os.environ['INTERCEPT_DIR'] = os.path.join(self.d.getVar('WORKDIR', True), | ||
1346 | "intercept_scripts") | ||
1347 | os.environ['NATIVE_ROOT'] = self.d.getVar('STAGING_DIR_NATIVE', True) | ||
1348 | |||
1349 | try: | ||
1350 | bb.note("Installing the following packages: %s" % ' '.join(pkgs)) | ||
1351 | bb.note(cmd) | ||
1352 | output = subprocess.check_output(cmd.split(), stderr=subprocess.STDOUT) | ||
1353 | bb.note(output) | ||
1354 | except subprocess.CalledProcessError as e: | ||
1355 | (bb.fatal, bb.note)[attempt_only]("Unable to install packages. " | ||
1356 | "Command '%s' returned %d:\n%s" % | ||
1357 | (cmd, e.returncode, e.output)) | ||
1358 | |||
1359 | def remove(self, pkgs, with_dependencies=True): | ||
1360 | if with_dependencies: | ||
1361 | cmd = "%s %s --force-depends --force-remove --force-removal-of-dependent-packages remove %s" % \ | ||
1362 | (self.opkg_cmd, self.opkg_args, ' '.join(pkgs)) | ||
1363 | else: | ||
1364 | cmd = "%s %s --force-depends remove %s" % \ | ||
1365 | (self.opkg_cmd, self.opkg_args, ' '.join(pkgs)) | ||
1366 | |||
1367 | try: | ||
1368 | bb.note(cmd) | ||
1369 | output = subprocess.check_output(cmd.split(), stderr=subprocess.STDOUT) | ||
1370 | bb.note(output) | ||
1371 | except subprocess.CalledProcessError as e: | ||
1372 | bb.fatal("Unable to remove packages. Command '%s' " | ||
1373 | "returned %d:\n%s" % (e.cmd, e.returncode, e.output)) | ||
1374 | |||
1375 | def write_index(self): | ||
1376 | self.deploy_dir_lock() | ||
1377 | |||
1378 | result = self.indexer.write_index() | ||
1379 | |||
1380 | self.deploy_dir_unlock() | ||
1381 | |||
1382 | if result is not None: | ||
1383 | bb.fatal(result) | ||
1384 | |||
1385 | def remove_packaging_data(self): | ||
1386 | bb.utils.remove(self.opkg_dir, True) | ||
1387 | # create the directory back, it's needed by PM lock | ||
1388 | bb.utils.mkdirhier(self.opkg_dir) | ||
1389 | |||
1390 | def list_installed(self, format=None): | ||
1391 | return OpkgPkgsList(self.d, self.target_rootfs, self.config_file).list(format) | ||
1392 | |||
1393 | def handle_bad_recommendations(self): | ||
1394 | bad_recommendations = self.d.getVar("BAD_RECOMMENDATIONS", True) or "" | ||
1395 | if bad_recommendations.strip() == "": | ||
1396 | return | ||
1397 | |||
1398 | status_file = os.path.join(self.opkg_dir, "status") | ||
1399 | |||
1400 | # If status file existed, it means the bad recommendations has already | ||
1401 | # been handled | ||
1402 | if os.path.exists(status_file): | ||
1403 | return | ||
1404 | |||
1405 | cmd = "%s %s info " % (self.opkg_cmd, self.opkg_args) | ||
1406 | |||
1407 | with open(status_file, "w+") as status: | ||
1408 | for pkg in bad_recommendations.split(): | ||
1409 | pkg_info = cmd + pkg | ||
1410 | |||
1411 | try: | ||
1412 | output = subprocess.check_output(pkg_info.split(), stderr=subprocess.STDOUT).strip() | ||
1413 | except subprocess.CalledProcessError as e: | ||
1414 | bb.fatal("Cannot get package info. Command '%s' " | ||
1415 | "returned %d:\n%s" % (pkg_info, e.returncode, e.output)) | ||
1416 | |||
1417 | if output == "": | ||
1418 | bb.note("Ignored bad recommendation: '%s' is " | ||
1419 | "not a package" % pkg) | ||
1420 | continue | ||
1421 | |||
1422 | for line in output.split('\n'): | ||
1423 | if line.startswith("Status:"): | ||
1424 | status.write("Status: deinstall hold not-installed\n") | ||
1425 | else: | ||
1426 | status.write(line + "\n") | ||
1427 | |||
1428 | # Append a blank line after each package entry to ensure that it | ||
1429 | # is separated from the following entry | ||
1430 | status.write("\n") | ||
1431 | |||
1432 | ''' | ||
1433 | The following function dummy installs pkgs and returns the log of output. | ||
1434 | ''' | ||
1435 | def dummy_install(self, pkgs): | ||
1436 | if len(pkgs) == 0: | ||
1437 | return | ||
1438 | |||
1439 | # Create an temp dir as opkg root for dummy installation | ||
1440 | temp_rootfs = self.d.expand('${T}/opkg') | ||
1441 | temp_opkg_dir = os.path.join(temp_rootfs, 'var/lib/opkg') | ||
1442 | bb.utils.mkdirhier(temp_opkg_dir) | ||
1443 | |||
1444 | opkg_args = "-f %s -o %s " % (self.config_file, temp_rootfs) | ||
1445 | opkg_args += self.d.getVar("OPKG_ARGS", True) | ||
1446 | |||
1447 | cmd = "%s %s update" % (self.opkg_cmd, opkg_args) | ||
1448 | try: | ||
1449 | subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True) | ||
1450 | except subprocess.CalledProcessError as e: | ||
1451 | bb.fatal("Unable to update. Command '%s' " | ||
1452 | "returned %d:\n%s" % (cmd, e.returncode, e.output)) | ||
1453 | |||
1454 | # Dummy installation | ||
1455 | cmd = "%s %s --noaction install %s " % (self.opkg_cmd, | ||
1456 | opkg_args, | ||
1457 | ' '.join(pkgs)) | ||
1458 | try: | ||
1459 | output = subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True) | ||
1460 | except subprocess.CalledProcessError as e: | ||
1461 | bb.fatal("Unable to dummy install packages. Command '%s' " | ||
1462 | "returned %d:\n%s" % (cmd, e.returncode, e.output)) | ||
1463 | |||
1464 | bb.utils.remove(temp_rootfs, True) | ||
1465 | |||
1466 | return output | ||
1467 | |||
1468 | def backup_packaging_data(self): | ||
1469 | # Save the opkglib for increment ipk image generation | ||
1470 | if os.path.exists(self.saved_opkg_dir): | ||
1471 | bb.utils.remove(self.saved_opkg_dir, True) | ||
1472 | shutil.copytree(self.opkg_dir, | ||
1473 | self.saved_opkg_dir, | ||
1474 | symlinks=True) | ||
1475 | |||
1476 | def recover_packaging_data(self): | ||
1477 | # Move the opkglib back | ||
1478 | if os.path.exists(self.saved_opkg_dir): | ||
1479 | if os.path.exists(self.opkg_dir): | ||
1480 | bb.utils.remove(self.opkg_dir, True) | ||
1481 | |||
1482 | bb.note('Recover packaging data') | ||
1483 | shutil.copytree(self.saved_opkg_dir, | ||
1484 | self.opkg_dir, | ||
1485 | symlinks=True) | ||
1486 | |||
1487 | |||
1488 | class DpkgPM(PackageManager): | ||
1489 | def __init__(self, d, target_rootfs, archs, base_archs, apt_conf_dir=None): | ||
1490 | super(DpkgPM, self).__init__(d) | ||
1491 | self.target_rootfs = target_rootfs | ||
1492 | self.deploy_dir = self.d.getVar('DEPLOY_DIR_DEB', True) | ||
1493 | if apt_conf_dir is None: | ||
1494 | self.apt_conf_dir = self.d.expand("${APTCONF_TARGET}/apt") | ||
1495 | else: | ||
1496 | self.apt_conf_dir = apt_conf_dir | ||
1497 | self.apt_conf_file = os.path.join(self.apt_conf_dir, "apt.conf") | ||
1498 | self.apt_get_cmd = bb.utils.which(os.getenv('PATH'), "apt-get") | ||
1499 | |||
1500 | self.apt_args = d.getVar("APT_ARGS", True) | ||
1501 | |||
1502 | self.all_arch_list = archs.split() | ||
1503 | all_mlb_pkg_arch_list = (self.d.getVar('ALL_MULTILIB_PACKAGE_ARCHS', True) or "").replace('-', '_').split() | ||
1504 | self.all_arch_list.extend(arch for arch in all_mlb_pkg_arch_list if arch not in self.all_arch_list) | ||
1505 | |||
1506 | self._create_configs(archs, base_archs) | ||
1507 | |||
1508 | self.indexer = DpkgIndexer(self.d, self.deploy_dir) | ||
1509 | |||
1510 | """ | ||
1511 | This function will change a package's status in /var/lib/dpkg/status file. | ||
1512 | If 'packages' is None then the new_status will be applied to all | ||
1513 | packages | ||
1514 | """ | ||
1515 | def mark_packages(self, status_tag, packages=None): | ||
1516 | status_file = self.target_rootfs + "/var/lib/dpkg/status" | ||
1517 | |||
1518 | with open(status_file, "r") as sf: | ||
1519 | with open(status_file + ".tmp", "w+") as tmp_sf: | ||
1520 | if packages is None: | ||
1521 | tmp_sf.write(re.sub(r"Package: (.*?)\n((?:[^\n]+\n)*?)Status: (.*)(?:unpacked|installed)", | ||
1522 | r"Package: \1\n\2Status: \3%s" % status_tag, | ||
1523 | sf.read())) | ||
1524 | else: | ||
1525 | if type(packages).__name__ != "list": | ||
1526 | raise TypeError("'packages' should be a list object") | ||
1527 | |||
1528 | status = sf.read() | ||
1529 | for pkg in packages: | ||
1530 | status = re.sub(r"Package: %s\n((?:[^\n]+\n)*?)Status: (.*)(?:unpacked|installed)" % pkg, | ||
1531 | r"Package: %s\n\1Status: \2%s" % (pkg, status_tag), | ||
1532 | status) | ||
1533 | |||
1534 | tmp_sf.write(status) | ||
1535 | |||
1536 | os.rename(status_file + ".tmp", status_file) | ||
1537 | |||
1538 | """ | ||
1539 | Run the pre/post installs for package "package_name". If package_name is | ||
1540 | None, then run all pre/post install scriptlets. | ||
1541 | """ | ||
1542 | def run_pre_post_installs(self, package_name=None): | ||
1543 | info_dir = self.target_rootfs + "/var/lib/dpkg/info" | ||
1544 | suffixes = [(".preinst", "Preinstall"), (".postinst", "Postinstall")] | ||
1545 | status_file = self.target_rootfs + "/var/lib/dpkg/status" | ||
1546 | installed_pkgs = [] | ||
1547 | |||
1548 | with open(status_file, "r") as status: | ||
1549 | for line in status.read().split('\n'): | ||
1550 | m = re.match("^Package: (.*)", line) | ||
1551 | if m is not None: | ||
1552 | installed_pkgs.append(m.group(1)) | ||
1553 | |||
1554 | if package_name is not None and not package_name in installed_pkgs: | ||
1555 | return | ||
1556 | |||
1557 | os.environ['D'] = self.target_rootfs | ||
1558 | os.environ['OFFLINE_ROOT'] = self.target_rootfs | ||
1559 | os.environ['IPKG_OFFLINE_ROOT'] = self.target_rootfs | ||
1560 | os.environ['OPKG_OFFLINE_ROOT'] = self.target_rootfs | ||
1561 | os.environ['INTERCEPT_DIR'] = os.path.join(self.d.getVar('WORKDIR', True), | ||
1562 | "intercept_scripts") | ||
1563 | os.environ['NATIVE_ROOT'] = self.d.getVar('STAGING_DIR_NATIVE', True) | ||
1564 | |||
1565 | failed_pkgs = [] | ||
1566 | for pkg_name in installed_pkgs: | ||
1567 | for suffix in suffixes: | ||
1568 | p_full = os.path.join(info_dir, pkg_name + suffix[0]) | ||
1569 | if os.path.exists(p_full): | ||
1570 | try: | ||
1571 | bb.note("Executing %s for package: %s ..." % | ||
1572 | (suffix[1].lower(), pkg_name)) | ||
1573 | subprocess.check_output(p_full, stderr=subprocess.STDOUT) | ||
1574 | except subprocess.CalledProcessError as e: | ||
1575 | bb.note("%s for package %s failed with %d:\n%s" % | ||
1576 | (suffix[1], pkg_name, e.returncode, e.output)) | ||
1577 | failed_pkgs.append(pkg_name) | ||
1578 | break | ||
1579 | |||
1580 | if len(failed_pkgs): | ||
1581 | self.mark_packages("unpacked", failed_pkgs) | ||
1582 | |||
1583 | def update(self): | ||
1584 | os.environ['APT_CONFIG'] = self.apt_conf_file | ||
1585 | |||
1586 | self.deploy_dir_lock() | ||
1587 | |||
1588 | cmd = "%s update" % self.apt_get_cmd | ||
1589 | |||
1590 | try: | ||
1591 | subprocess.check_output(cmd.split(), stderr=subprocess.STDOUT) | ||
1592 | except subprocess.CalledProcessError as e: | ||
1593 | bb.fatal("Unable to update the package index files. Command '%s' " | ||
1594 | "returned %d:\n%s" % (e.cmd, e.returncode, e.output)) | ||
1595 | |||
1596 | self.deploy_dir_unlock() | ||
1597 | |||
1598 | def install(self, pkgs, attempt_only=False): | ||
1599 | if attempt_only and len(pkgs) == 0: | ||
1600 | return | ||
1601 | |||
1602 | os.environ['APT_CONFIG'] = self.apt_conf_file | ||
1603 | |||
1604 | cmd = "%s %s install --force-yes --allow-unauthenticated %s" % \ | ||
1605 | (self.apt_get_cmd, self.apt_args, ' '.join(pkgs)) | ||
1606 | |||
1607 | try: | ||
1608 | bb.note("Installing the following packages: %s" % ' '.join(pkgs)) | ||
1609 | subprocess.check_output(cmd.split(), stderr=subprocess.STDOUT) | ||
1610 | except subprocess.CalledProcessError as e: | ||
1611 | (bb.fatal, bb.note)[attempt_only]("Unable to install packages. " | ||
1612 | "Command '%s' returned %d:\n%s" % | ||
1613 | (cmd, e.returncode, e.output)) | ||
1614 | |||
1615 | # rename *.dpkg-new files/dirs | ||
1616 | for root, dirs, files in os.walk(self.target_rootfs): | ||
1617 | for dir in dirs: | ||
1618 | new_dir = re.sub("\.dpkg-new", "", dir) | ||
1619 | if dir != new_dir: | ||
1620 | os.rename(os.path.join(root, dir), | ||
1621 | os.path.join(root, new_dir)) | ||
1622 | |||
1623 | for file in files: | ||
1624 | new_file = re.sub("\.dpkg-new", "", file) | ||
1625 | if file != new_file: | ||
1626 | os.rename(os.path.join(root, file), | ||
1627 | os.path.join(root, new_file)) | ||
1628 | |||
1629 | |||
1630 | def remove(self, pkgs, with_dependencies=True): | ||
1631 | if with_dependencies: | ||
1632 | os.environ['APT_CONFIG'] = self.apt_conf_file | ||
1633 | cmd = "%s remove %s" % (self.apt_get_cmd, ' '.join(pkgs)) | ||
1634 | else: | ||
1635 | cmd = "%s --admindir=%s/var/lib/dpkg --instdir=%s" \ | ||
1636 | " -r --force-depends %s" % \ | ||
1637 | (bb.utils.which(os.getenv('PATH'), "dpkg"), | ||
1638 | self.target_rootfs, self.target_rootfs, ' '.join(pkgs)) | ||
1639 | |||
1640 | try: | ||
1641 | subprocess.check_output(cmd.split(), stderr=subprocess.STDOUT) | ||
1642 | except subprocess.CalledProcessError as e: | ||
1643 | bb.fatal("Unable to remove packages. Command '%s' " | ||
1644 | "returned %d:\n%s" % (e.cmd, e.returncode, e.output)) | ||
1645 | |||
1646 | def write_index(self): | ||
1647 | self.deploy_dir_lock() | ||
1648 | |||
1649 | result = self.indexer.write_index() | ||
1650 | |||
1651 | self.deploy_dir_unlock() | ||
1652 | |||
1653 | if result is not None: | ||
1654 | bb.fatal(result) | ||
1655 | |||
1656 | def insert_feeds_uris(self): | ||
1657 | if self.feed_uris == "": | ||
1658 | return | ||
1659 | |||
1660 | sources_conf = os.path.join("%s/etc/apt/sources.list" | ||
1661 | % self.target_rootfs) | ||
1662 | arch_list = [] | ||
1663 | |||
1664 | for arch in self.all_arch_list: | ||
1665 | if not os.path.exists(os.path.join(self.deploy_dir, arch)): | ||
1666 | continue | ||
1667 | arch_list.append(arch) | ||
1668 | |||
1669 | with open(sources_conf, "w+") as sources_file: | ||
1670 | for uri in self.feed_uris.split(): | ||
1671 | for arch in arch_list: | ||
1672 | bb.note('Note: adding dpkg channel at (%s)' % uri) | ||
1673 | sources_file.write("deb %s/deb/%s ./\n" % | ||
1674 | (uri, arch)) | ||
1675 | |||
1676 | def _create_configs(self, archs, base_archs): | ||
1677 | base_archs = re.sub("_", "-", base_archs) | ||
1678 | |||
1679 | if os.path.exists(self.apt_conf_dir): | ||
1680 | bb.utils.remove(self.apt_conf_dir, True) | ||
1681 | |||
1682 | bb.utils.mkdirhier(self.apt_conf_dir) | ||
1683 | bb.utils.mkdirhier(self.apt_conf_dir + "/lists/partial/") | ||
1684 | bb.utils.mkdirhier(self.apt_conf_dir + "/apt.conf.d/") | ||
1685 | |||
1686 | arch_list = [] | ||
1687 | for arch in self.all_arch_list: | ||
1688 | if not os.path.exists(os.path.join(self.deploy_dir, arch)): | ||
1689 | continue | ||
1690 | arch_list.append(arch) | ||
1691 | |||
1692 | with open(os.path.join(self.apt_conf_dir, "preferences"), "w+") as prefs_file: | ||
1693 | priority = 801 | ||
1694 | for arch in arch_list: | ||
1695 | prefs_file.write( | ||
1696 | "Package: *\n" | ||
1697 | "Pin: release l=%s\n" | ||
1698 | "Pin-Priority: %d\n\n" % (arch, priority)) | ||
1699 | |||
1700 | priority += 5 | ||
1701 | |||
1702 | pkg_exclude = self.d.getVar('PACKAGE_EXCLUDE', True) or "" | ||
1703 | for pkg in pkg_exclude.split(): | ||
1704 | prefs_file.write( | ||
1705 | "Package: %s\n" | ||
1706 | "Pin: release *\n" | ||
1707 | "Pin-Priority: -1\n\n" % pkg) | ||
1708 | |||
1709 | arch_list.reverse() | ||
1710 | |||
1711 | with open(os.path.join(self.apt_conf_dir, "sources.list"), "w+") as sources_file: | ||
1712 | for arch in arch_list: | ||
1713 | sources_file.write("deb file:%s/ ./\n" % | ||
1714 | os.path.join(self.deploy_dir, arch)) | ||
1715 | |||
1716 | base_arch_list = base_archs.split() | ||
1717 | multilib_variants = self.d.getVar("MULTILIB_VARIANTS", True); | ||
1718 | for variant in multilib_variants.split(): | ||
1719 | if variant == "lib32": | ||
1720 | base_arch_list.append("i386") | ||
1721 | elif variant == "lib64": | ||
1722 | base_arch_list.append("amd64") | ||
1723 | |||
1724 | with open(self.apt_conf_file, "w+") as apt_conf: | ||
1725 | with open(self.d.expand("${STAGING_ETCDIR_NATIVE}/apt/apt.conf.sample")) as apt_conf_sample: | ||
1726 | for line in apt_conf_sample.read().split("\n"): | ||
1727 | match_arch = re.match(" Architecture \".*\";$", line) | ||
1728 | architectures = "" | ||
1729 | if match_arch: | ||
1730 | for base_arch in base_arch_list: | ||
1731 | architectures += "\"%s\";" % base_arch | ||
1732 | apt_conf.write(" Architectures {%s};\n" % architectures); | ||
1733 | apt_conf.write(" Architecture \"%s\";\n" % base_archs) | ||
1734 | else: | ||
1735 | line = re.sub("#ROOTFS#", self.target_rootfs, line) | ||
1736 | line = re.sub("#APTCONF#", self.apt_conf_dir, line) | ||
1737 | apt_conf.write(line + "\n") | ||
1738 | |||
1739 | target_dpkg_dir = "%s/var/lib/dpkg" % self.target_rootfs | ||
1740 | bb.utils.mkdirhier(os.path.join(target_dpkg_dir, "info")) | ||
1741 | |||
1742 | bb.utils.mkdirhier(os.path.join(target_dpkg_dir, "updates")) | ||
1743 | |||
1744 | if not os.path.exists(os.path.join(target_dpkg_dir, "status")): | ||
1745 | open(os.path.join(target_dpkg_dir, "status"), "w+").close() | ||
1746 | if not os.path.exists(os.path.join(target_dpkg_dir, "available")): | ||
1747 | open(os.path.join(target_dpkg_dir, "available"), "w+").close() | ||
1748 | |||
1749 | def remove_packaging_data(self): | ||
1750 | bb.utils.remove(os.path.join(self.target_rootfs, | ||
1751 | self.d.getVar('opkglibdir', True)), True) | ||
1752 | bb.utils.remove(self.target_rootfs + "/var/lib/dpkg/", True) | ||
1753 | |||
1754 | def fix_broken_dependencies(self): | ||
1755 | os.environ['APT_CONFIG'] = self.apt_conf_file | ||
1756 | |||
1757 | cmd = "%s %s -f install" % (self.apt_get_cmd, self.apt_args) | ||
1758 | |||
1759 | try: | ||
1760 | subprocess.check_output(cmd.split(), stderr=subprocess.STDOUT) | ||
1761 | except subprocess.CalledProcessError as e: | ||
1762 | bb.fatal("Cannot fix broken dependencies. Command '%s' " | ||
1763 | "returned %d:\n%s" % (cmd, e.returncode, e.output)) | ||
1764 | |||
1765 | def list_installed(self, format=None): | ||
1766 | return DpkgPkgsList(self.d, self.target_rootfs).list() | ||
1767 | |||
1768 | |||
1769 | def generate_index_files(d): | ||
1770 | classes = d.getVar('PACKAGE_CLASSES', True).replace("package_", "").split() | ||
1771 | |||
1772 | indexer_map = { | ||
1773 | "rpm": (RpmIndexer, d.getVar('DEPLOY_DIR_RPM', True)), | ||
1774 | "ipk": (OpkgIndexer, d.getVar('DEPLOY_DIR_IPK', True)), | ||
1775 | "deb": (DpkgIndexer, d.getVar('DEPLOY_DIR_DEB', True)) | ||
1776 | } | ||
1777 | |||
1778 | result = None | ||
1779 | |||
1780 | for pkg_class in classes: | ||
1781 | if not pkg_class in indexer_map: | ||
1782 | continue | ||
1783 | |||
1784 | if os.path.exists(indexer_map[pkg_class][1]): | ||
1785 | result = indexer_map[pkg_class][0](d, indexer_map[pkg_class][1]).write_index() | ||
1786 | |||
1787 | if result is not None: | ||
1788 | bb.fatal(result) | ||
1789 | |||
1790 | if __name__ == "__main__": | ||
1791 | """ | ||
1792 | We should be able to run this as a standalone script, from outside bitbake | ||
1793 | environment. | ||
1794 | """ | ||
1795 | """ | ||
1796 | TBD | ||
1797 | """ | ||
diff --git a/meta/lib/oe/packagedata.py b/meta/lib/oe/packagedata.py new file mode 100644 index 0000000000..cd5f0445f5 --- /dev/null +++ b/meta/lib/oe/packagedata.py | |||
@@ -0,0 +1,94 @@ | |||
1 | import codecs | ||
2 | |||
3 | def packaged(pkg, d): | ||
4 | return os.access(get_subpkgedata_fn(pkg, d) + '.packaged', os.R_OK) | ||
5 | |||
6 | def read_pkgdatafile(fn): | ||
7 | pkgdata = {} | ||
8 | |||
9 | def decode(str): | ||
10 | c = codecs.getdecoder("string_escape") | ||
11 | return c(str)[0] | ||
12 | |||
13 | if os.access(fn, os.R_OK): | ||
14 | import re | ||
15 | f = open(fn, 'r') | ||
16 | lines = f.readlines() | ||
17 | f.close() | ||
18 | r = re.compile("([^:]+):\s*(.*)") | ||
19 | for l in lines: | ||
20 | m = r.match(l) | ||
21 | if m: | ||
22 | pkgdata[m.group(1)] = decode(m.group(2)) | ||
23 | |||
24 | return pkgdata | ||
25 | |||
26 | def get_subpkgedata_fn(pkg, d): | ||
27 | return d.expand('${PKGDATA_DIR}/runtime/%s' % pkg) | ||
28 | |||
29 | def has_subpkgdata(pkg, d): | ||
30 | return os.access(get_subpkgedata_fn(pkg, d), os.R_OK) | ||
31 | |||
32 | def read_subpkgdata(pkg, d): | ||
33 | return read_pkgdatafile(get_subpkgedata_fn(pkg, d)) | ||
34 | |||
35 | def has_pkgdata(pn, d): | ||
36 | fn = d.expand('${PKGDATA_DIR}/%s' % pn) | ||
37 | return os.access(fn, os.R_OK) | ||
38 | |||
39 | def read_pkgdata(pn, d): | ||
40 | fn = d.expand('${PKGDATA_DIR}/%s' % pn) | ||
41 | return read_pkgdatafile(fn) | ||
42 | |||
43 | # | ||
44 | # Collapse FOO_pkg variables into FOO | ||
45 | # | ||
46 | def read_subpkgdata_dict(pkg, d): | ||
47 | ret = {} | ||
48 | subd = read_pkgdatafile(get_subpkgedata_fn(pkg, d)) | ||
49 | for var in subd: | ||
50 | newvar = var.replace("_" + pkg, "") | ||
51 | if newvar == var and var + "_" + pkg in subd: | ||
52 | continue | ||
53 | ret[newvar] = subd[var] | ||
54 | return ret | ||
55 | |||
56 | def _pkgmap(d): | ||
57 | """Return a dictionary mapping package to recipe name.""" | ||
58 | |||
59 | pkgdatadir = d.getVar("PKGDATA_DIR", True) | ||
60 | |||
61 | pkgmap = {} | ||
62 | try: | ||
63 | files = os.listdir(pkgdatadir) | ||
64 | except OSError: | ||
65 | bb.warn("No files in %s?" % pkgdatadir) | ||
66 | files = [] | ||
67 | |||
68 | for pn in filter(lambda f: not os.path.isdir(os.path.join(pkgdatadir, f)), files): | ||
69 | try: | ||
70 | pkgdata = read_pkgdatafile(os.path.join(pkgdatadir, pn)) | ||
71 | except OSError: | ||
72 | continue | ||
73 | |||
74 | packages = pkgdata.get("PACKAGES") or "" | ||
75 | for pkg in packages.split(): | ||
76 | pkgmap[pkg] = pn | ||
77 | |||
78 | return pkgmap | ||
79 | |||
80 | def pkgmap(d): | ||
81 | """Return a dictionary mapping package to recipe name. | ||
82 | Cache the mapping in the metadata""" | ||
83 | |||
84 | pkgmap_data = d.getVar("__pkgmap_data", False) | ||
85 | if pkgmap_data is None: | ||
86 | pkgmap_data = _pkgmap(d) | ||
87 | d.setVar("__pkgmap_data", pkgmap_data) | ||
88 | |||
89 | return pkgmap_data | ||
90 | |||
91 | def recipename(pkg, d): | ||
92 | """Return the recipe name for the given binary package name.""" | ||
93 | |||
94 | return pkgmap(d).get(pkg) | ||
diff --git a/meta/lib/oe/packagegroup.py b/meta/lib/oe/packagegroup.py new file mode 100644 index 0000000000..12eb4212ff --- /dev/null +++ b/meta/lib/oe/packagegroup.py | |||
@@ -0,0 +1,36 @@ | |||
1 | import itertools | ||
2 | |||
3 | def is_optional(feature, d): | ||
4 | packages = d.getVar("FEATURE_PACKAGES_%s" % feature, True) | ||
5 | if packages: | ||
6 | return bool(d.getVarFlag("FEATURE_PACKAGES_%s" % feature, "optional")) | ||
7 | else: | ||
8 | return bool(d.getVarFlag("PACKAGE_GROUP_%s" % feature, "optional")) | ||
9 | |||
10 | def packages(features, d): | ||
11 | for feature in features: | ||
12 | packages = d.getVar("FEATURE_PACKAGES_%s" % feature, True) | ||
13 | if not packages: | ||
14 | packages = d.getVar("PACKAGE_GROUP_%s" % feature, True) | ||
15 | for pkg in (packages or "").split(): | ||
16 | yield pkg | ||
17 | |||
18 | def required_packages(features, d): | ||
19 | req = filter(lambda feature: not is_optional(feature, d), features) | ||
20 | return packages(req, d) | ||
21 | |||
22 | def optional_packages(features, d): | ||
23 | opt = filter(lambda feature: is_optional(feature, d), features) | ||
24 | return packages(opt, d) | ||
25 | |||
26 | def active_packages(features, d): | ||
27 | return itertools.chain(required_packages(features, d), | ||
28 | optional_packages(features, d)) | ||
29 | |||
30 | def active_recipes(features, d): | ||
31 | import oe.packagedata | ||
32 | |||
33 | for pkg in active_packages(features, d): | ||
34 | recipe = oe.packagedata.recipename(pkg, d) | ||
35 | if recipe: | ||
36 | yield recipe | ||
diff --git a/meta/lib/oe/patch.py b/meta/lib/oe/patch.py new file mode 100644 index 0000000000..b085c9d6b5 --- /dev/null +++ b/meta/lib/oe/patch.py | |||
@@ -0,0 +1,447 @@ | |||
1 | import oe.path | ||
2 | |||
3 | class NotFoundError(bb.BBHandledException): | ||
4 | def __init__(self, path): | ||
5 | self.path = path | ||
6 | |||
7 | def __str__(self): | ||
8 | return "Error: %s not found." % self.path | ||
9 | |||
10 | class CmdError(bb.BBHandledException): | ||
11 | def __init__(self, exitstatus, output): | ||
12 | self.status = exitstatus | ||
13 | self.output = output | ||
14 | |||
15 | def __str__(self): | ||
16 | return "Command Error: exit status: %d Output:\n%s" % (self.status, self.output) | ||
17 | |||
18 | |||
19 | def runcmd(args, dir = None): | ||
20 | import pipes | ||
21 | |||
22 | if dir: | ||
23 | olddir = os.path.abspath(os.curdir) | ||
24 | if not os.path.exists(dir): | ||
25 | raise NotFoundError(dir) | ||
26 | os.chdir(dir) | ||
27 | # print("cwd: %s -> %s" % (olddir, dir)) | ||
28 | |||
29 | try: | ||
30 | args = [ pipes.quote(str(arg)) for arg in args ] | ||
31 | cmd = " ".join(args) | ||
32 | # print("cmd: %s" % cmd) | ||
33 | (exitstatus, output) = oe.utils.getstatusoutput(cmd) | ||
34 | if exitstatus != 0: | ||
35 | raise CmdError(exitstatus >> 8, output) | ||
36 | return output | ||
37 | |||
38 | finally: | ||
39 | if dir: | ||
40 | os.chdir(olddir) | ||
41 | |||
42 | class PatchError(Exception): | ||
43 | def __init__(self, msg): | ||
44 | self.msg = msg | ||
45 | |||
46 | def __str__(self): | ||
47 | return "Patch Error: %s" % self.msg | ||
48 | |||
49 | class PatchSet(object): | ||
50 | defaults = { | ||
51 | "strippath": 1 | ||
52 | } | ||
53 | |||
54 | def __init__(self, dir, d): | ||
55 | self.dir = dir | ||
56 | self.d = d | ||
57 | self.patches = [] | ||
58 | self._current = None | ||
59 | |||
60 | def current(self): | ||
61 | return self._current | ||
62 | |||
63 | def Clean(self): | ||
64 | """ | ||
65 | Clean out the patch set. Generally includes unapplying all | ||
66 | patches and wiping out all associated metadata. | ||
67 | """ | ||
68 | raise NotImplementedError() | ||
69 | |||
70 | def Import(self, patch, force): | ||
71 | if not patch.get("file"): | ||
72 | if not patch.get("remote"): | ||
73 | raise PatchError("Patch file must be specified in patch import.") | ||
74 | else: | ||
75 | patch["file"] = bb.fetch2.localpath(patch["remote"], self.d) | ||
76 | |||
77 | for param in PatchSet.defaults: | ||
78 | if not patch.get(param): | ||
79 | patch[param] = PatchSet.defaults[param] | ||
80 | |||
81 | if patch.get("remote"): | ||
82 | patch["file"] = bb.data.expand(bb.fetch2.localpath(patch["remote"], self.d), self.d) | ||
83 | |||
84 | patch["filemd5"] = bb.utils.md5_file(patch["file"]) | ||
85 | |||
86 | def Push(self, force): | ||
87 | raise NotImplementedError() | ||
88 | |||
89 | def Pop(self, force): | ||
90 | raise NotImplementedError() | ||
91 | |||
92 | def Refresh(self, remote = None, all = None): | ||
93 | raise NotImplementedError() | ||
94 | |||
95 | |||
96 | class PatchTree(PatchSet): | ||
97 | def __init__(self, dir, d): | ||
98 | PatchSet.__init__(self, dir, d) | ||
99 | self.patchdir = os.path.join(self.dir, 'patches') | ||
100 | self.seriespath = os.path.join(self.dir, 'patches', 'series') | ||
101 | bb.utils.mkdirhier(self.patchdir) | ||
102 | |||
103 | def _appendPatchFile(self, patch, strippath): | ||
104 | with open(self.seriespath, 'a') as f: | ||
105 | f.write(os.path.basename(patch) + "," + strippath + "\n") | ||
106 | shellcmd = ["cat", patch, ">" , self.patchdir + "/" + os.path.basename(patch)] | ||
107 | runcmd(["sh", "-c", " ".join(shellcmd)], self.dir) | ||
108 | |||
109 | def _removePatch(self, p): | ||
110 | patch = {} | ||
111 | patch['file'] = p.split(",")[0] | ||
112 | patch['strippath'] = p.split(",")[1] | ||
113 | self._applypatch(patch, False, True) | ||
114 | |||
115 | def _removePatchFile(self, all = False): | ||
116 | if not os.path.exists(self.seriespath): | ||
117 | return | ||
118 | patches = open(self.seriespath, 'r+').readlines() | ||
119 | if all: | ||
120 | for p in reversed(patches): | ||
121 | self._removePatch(os.path.join(self.patchdir, p.strip())) | ||
122 | patches = [] | ||
123 | else: | ||
124 | self._removePatch(os.path.join(self.patchdir, patches[-1].strip())) | ||
125 | patches.pop() | ||
126 | with open(self.seriespath, 'w') as f: | ||
127 | for p in patches: | ||
128 | f.write(p) | ||
129 | |||
130 | def Import(self, patch, force = None): | ||
131 | """""" | ||
132 | PatchSet.Import(self, patch, force) | ||
133 | |||
134 | if self._current is not None: | ||
135 | i = self._current + 1 | ||
136 | else: | ||
137 | i = 0 | ||
138 | self.patches.insert(i, patch) | ||
139 | |||
140 | def _applypatch(self, patch, force = False, reverse = False, run = True): | ||
141 | shellcmd = ["cat", patch['file'], "|", "patch", "-p", patch['strippath']] | ||
142 | if reverse: | ||
143 | shellcmd.append('-R') | ||
144 | |||
145 | if not run: | ||
146 | return "sh" + "-c" + " ".join(shellcmd) | ||
147 | |||
148 | if not force: | ||
149 | shellcmd.append('--dry-run') | ||
150 | |||
151 | output = runcmd(["sh", "-c", " ".join(shellcmd)], self.dir) | ||
152 | |||
153 | if force: | ||
154 | return | ||
155 | |||
156 | shellcmd.pop(len(shellcmd) - 1) | ||
157 | output = runcmd(["sh", "-c", " ".join(shellcmd)], self.dir) | ||
158 | |||
159 | if not reverse: | ||
160 | self._appendPatchFile(patch['file'], patch['strippath']) | ||
161 | |||
162 | return output | ||
163 | |||
164 | def Push(self, force = False, all = False, run = True): | ||
165 | bb.note("self._current is %s" % self._current) | ||
166 | bb.note("patches is %s" % self.patches) | ||
167 | if all: | ||
168 | for i in self.patches: | ||
169 | bb.note("applying patch %s" % i) | ||
170 | self._applypatch(i, force) | ||
171 | self._current = i | ||
172 | else: | ||
173 | if self._current is not None: | ||
174 | next = self._current + 1 | ||
175 | else: | ||
176 | next = 0 | ||
177 | |||
178 | bb.note("applying patch %s" % self.patches[next]) | ||
179 | ret = self._applypatch(self.patches[next], force) | ||
180 | |||
181 | self._current = next | ||
182 | return ret | ||
183 | |||
184 | def Pop(self, force = None, all = None): | ||
185 | if all: | ||
186 | self._removePatchFile(True) | ||
187 | self._current = None | ||
188 | else: | ||
189 | self._removePatchFile(False) | ||
190 | |||
191 | if self._current == 0: | ||
192 | self._current = None | ||
193 | |||
194 | if self._current is not None: | ||
195 | self._current = self._current - 1 | ||
196 | |||
197 | def Clean(self): | ||
198 | """""" | ||
199 | self.Pop(all=True) | ||
200 | |||
201 | class GitApplyTree(PatchTree): | ||
202 | def __init__(self, dir, d): | ||
203 | PatchTree.__init__(self, dir, d) | ||
204 | |||
205 | def _applypatch(self, patch, force = False, reverse = False, run = True): | ||
206 | def _applypatchhelper(shellcmd, patch, force = False, reverse = False, run = True): | ||
207 | if reverse: | ||
208 | shellcmd.append('-R') | ||
209 | |||
210 | shellcmd.append(patch['file']) | ||
211 | |||
212 | if not run: | ||
213 | return "sh" + "-c" + " ".join(shellcmd) | ||
214 | |||
215 | return runcmd(["sh", "-c", " ".join(shellcmd)], self.dir) | ||
216 | |||
217 | try: | ||
218 | shellcmd = ["git", "--work-tree=.", "am", "-3", "-p%s" % patch['strippath']] | ||
219 | return _applypatchhelper(shellcmd, patch, force, reverse, run) | ||
220 | except CmdError: | ||
221 | shellcmd = ["git", "--git-dir=.", "apply", "-p%s" % patch['strippath']] | ||
222 | return _applypatchhelper(shellcmd, patch, force, reverse, run) | ||
223 | |||
224 | |||
225 | class QuiltTree(PatchSet): | ||
226 | def _runcmd(self, args, run = True): | ||
227 | quiltrc = self.d.getVar('QUILTRCFILE', True) | ||
228 | if not run: | ||
229 | return ["quilt"] + ["--quiltrc"] + [quiltrc] + args | ||
230 | runcmd(["quilt"] + ["--quiltrc"] + [quiltrc] + args, self.dir) | ||
231 | |||
232 | def _quiltpatchpath(self, file): | ||
233 | return os.path.join(self.dir, "patches", os.path.basename(file)) | ||
234 | |||
235 | |||
236 | def __init__(self, dir, d): | ||
237 | PatchSet.__init__(self, dir, d) | ||
238 | self.initialized = False | ||
239 | p = os.path.join(self.dir, 'patches') | ||
240 | if not os.path.exists(p): | ||
241 | os.makedirs(p) | ||
242 | |||
243 | def Clean(self): | ||
244 | try: | ||
245 | self._runcmd(["pop", "-a", "-f"]) | ||
246 | oe.path.remove(os.path.join(self.dir, "patches","series")) | ||
247 | except Exception: | ||
248 | pass | ||
249 | self.initialized = True | ||
250 | |||
251 | def InitFromDir(self): | ||
252 | # read series -> self.patches | ||
253 | seriespath = os.path.join(self.dir, 'patches', 'series') | ||
254 | if not os.path.exists(self.dir): | ||
255 | raise NotFoundError(self.dir) | ||
256 | if os.path.exists(seriespath): | ||
257 | series = file(seriespath, 'r') | ||
258 | for line in series.readlines(): | ||
259 | patch = {} | ||
260 | parts = line.strip().split() | ||
261 | patch["quiltfile"] = self._quiltpatchpath(parts[0]) | ||
262 | patch["quiltfilemd5"] = bb.utils.md5_file(patch["quiltfile"]) | ||
263 | if len(parts) > 1: | ||
264 | patch["strippath"] = parts[1][2:] | ||
265 | self.patches.append(patch) | ||
266 | series.close() | ||
267 | |||
268 | # determine which patches are applied -> self._current | ||
269 | try: | ||
270 | output = runcmd(["quilt", "applied"], self.dir) | ||
271 | except CmdError: | ||
272 | import sys | ||
273 | if sys.exc_value.output.strip() == "No patches applied": | ||
274 | return | ||
275 | else: | ||
276 | raise | ||
277 | output = [val for val in output.split('\n') if not val.startswith('#')] | ||
278 | for patch in self.patches: | ||
279 | if os.path.basename(patch["quiltfile"]) == output[-1]: | ||
280 | self._current = self.patches.index(patch) | ||
281 | self.initialized = True | ||
282 | |||
283 | def Import(self, patch, force = None): | ||
284 | if not self.initialized: | ||
285 | self.InitFromDir() | ||
286 | PatchSet.Import(self, patch, force) | ||
287 | oe.path.symlink(patch["file"], self._quiltpatchpath(patch["file"]), force=True) | ||
288 | f = open(os.path.join(self.dir, "patches","series"), "a"); | ||
289 | f.write(os.path.basename(patch["file"]) + " -p" + patch["strippath"]+"\n") | ||
290 | f.close() | ||
291 | patch["quiltfile"] = self._quiltpatchpath(patch["file"]) | ||
292 | patch["quiltfilemd5"] = bb.utils.md5_file(patch["quiltfile"]) | ||
293 | |||
294 | # TODO: determine if the file being imported: | ||
295 | # 1) is already imported, and is the same | ||
296 | # 2) is already imported, but differs | ||
297 | |||
298 | self.patches.insert(self._current or 0, patch) | ||
299 | |||
300 | |||
301 | def Push(self, force = False, all = False, run = True): | ||
302 | # quilt push [-f] | ||
303 | |||
304 | args = ["push"] | ||
305 | if force: | ||
306 | args.append("-f") | ||
307 | if all: | ||
308 | args.append("-a") | ||
309 | if not run: | ||
310 | return self._runcmd(args, run) | ||
311 | |||
312 | self._runcmd(args) | ||
313 | |||
314 | if self._current is not None: | ||
315 | self._current = self._current + 1 | ||
316 | else: | ||
317 | self._current = 0 | ||
318 | |||
319 | def Pop(self, force = None, all = None): | ||
320 | # quilt pop [-f] | ||
321 | args = ["pop"] | ||
322 | if force: | ||
323 | args.append("-f") | ||
324 | if all: | ||
325 | args.append("-a") | ||
326 | |||
327 | self._runcmd(args) | ||
328 | |||
329 | if self._current == 0: | ||
330 | self._current = None | ||
331 | |||
332 | if self._current is not None: | ||
333 | self._current = self._current - 1 | ||
334 | |||
335 | def Refresh(self, **kwargs): | ||
336 | if kwargs.get("remote"): | ||
337 | patch = self.patches[kwargs["patch"]] | ||
338 | if not patch: | ||
339 | raise PatchError("No patch found at index %s in patchset." % kwargs["patch"]) | ||
340 | (type, host, path, user, pswd, parm) = bb.fetch.decodeurl(patch["remote"]) | ||
341 | if type == "file": | ||
342 | import shutil | ||
343 | if not patch.get("file") and patch.get("remote"): | ||
344 | patch["file"] = bb.fetch2.localpath(patch["remote"], self.d) | ||
345 | |||
346 | shutil.copyfile(patch["quiltfile"], patch["file"]) | ||
347 | else: | ||
348 | raise PatchError("Unable to do a remote refresh of %s, unsupported remote url scheme %s." % (os.path.basename(patch["quiltfile"]), type)) | ||
349 | else: | ||
350 | # quilt refresh | ||
351 | args = ["refresh"] | ||
352 | if kwargs.get("quiltfile"): | ||
353 | args.append(os.path.basename(kwargs["quiltfile"])) | ||
354 | elif kwargs.get("patch"): | ||
355 | args.append(os.path.basename(self.patches[kwargs["patch"]]["quiltfile"])) | ||
356 | self._runcmd(args) | ||
357 | |||
358 | class Resolver(object): | ||
359 | def __init__(self, patchset, terminal): | ||
360 | raise NotImplementedError() | ||
361 | |||
362 | def Resolve(self): | ||
363 | raise NotImplementedError() | ||
364 | |||
365 | def Revert(self): | ||
366 | raise NotImplementedError() | ||
367 | |||
368 | def Finalize(self): | ||
369 | raise NotImplementedError() | ||
370 | |||
371 | class NOOPResolver(Resolver): | ||
372 | def __init__(self, patchset, terminal): | ||
373 | self.patchset = patchset | ||
374 | self.terminal = terminal | ||
375 | |||
376 | def Resolve(self): | ||
377 | olddir = os.path.abspath(os.curdir) | ||
378 | os.chdir(self.patchset.dir) | ||
379 | try: | ||
380 | self.patchset.Push() | ||
381 | except Exception: | ||
382 | import sys | ||
383 | os.chdir(olddir) | ||
384 | raise | ||
385 | |||
386 | # Patch resolver which relies on the user doing all the work involved in the | ||
387 | # resolution, with the exception of refreshing the remote copy of the patch | ||
388 | # files (the urls). | ||
389 | class UserResolver(Resolver): | ||
390 | def __init__(self, patchset, terminal): | ||
391 | self.patchset = patchset | ||
392 | self.terminal = terminal | ||
393 | |||
394 | # Force a push in the patchset, then drop to a shell for the user to | ||
395 | # resolve any rejected hunks | ||
396 | def Resolve(self): | ||
397 | olddir = os.path.abspath(os.curdir) | ||
398 | os.chdir(self.patchset.dir) | ||
399 | try: | ||
400 | self.patchset.Push(False) | ||
401 | except CmdError as v: | ||
402 | # Patch application failed | ||
403 | patchcmd = self.patchset.Push(True, False, False) | ||
404 | |||
405 | t = self.patchset.d.getVar('T', True) | ||
406 | if not t: | ||
407 | bb.msg.fatal("Build", "T not set") | ||
408 | bb.utils.mkdirhier(t) | ||
409 | import random | ||
410 | rcfile = "%s/bashrc.%s.%s" % (t, str(os.getpid()), random.random()) | ||
411 | f = open(rcfile, "w") | ||
412 | f.write("echo '*** Manual patch resolution mode ***'\n") | ||
413 | f.write("echo 'Dropping to a shell, so patch rejects can be fixed manually.'\n") | ||
414 | f.write("echo 'Run \"quilt refresh\" when patch is corrected, press CTRL+D to exit.'\n") | ||
415 | f.write("echo ''\n") | ||
416 | f.write(" ".join(patchcmd) + "\n") | ||
417 | f.close() | ||
418 | os.chmod(rcfile, 0775) | ||
419 | |||
420 | self.terminal("bash --rcfile " + rcfile, 'Patch Rejects: Please fix patch rejects manually', self.patchset.d) | ||
421 | |||
422 | # Construct a new PatchSet after the user's changes, compare the | ||
423 | # sets, checking patches for modifications, and doing a remote | ||
424 | # refresh on each. | ||
425 | oldpatchset = self.patchset | ||
426 | self.patchset = oldpatchset.__class__(self.patchset.dir, self.patchset.d) | ||
427 | |||
428 | for patch in self.patchset.patches: | ||
429 | oldpatch = None | ||
430 | for opatch in oldpatchset.patches: | ||
431 | if opatch["quiltfile"] == patch["quiltfile"]: | ||
432 | oldpatch = opatch | ||
433 | |||
434 | if oldpatch: | ||
435 | patch["remote"] = oldpatch["remote"] | ||
436 | if patch["quiltfile"] == oldpatch["quiltfile"]: | ||
437 | if patch["quiltfilemd5"] != oldpatch["quiltfilemd5"]: | ||
438 | bb.note("Patch %s has changed, updating remote url %s" % (os.path.basename(patch["quiltfile"]), patch["remote"])) | ||
439 | # user change? remote refresh | ||
440 | self.patchset.Refresh(remote=True, patch=self.patchset.patches.index(patch)) | ||
441 | else: | ||
442 | # User did not fix the problem. Abort. | ||
443 | raise PatchError("Patch application failed, and user did not fix and refresh the patch.") | ||
444 | except Exception: | ||
445 | os.chdir(olddir) | ||
446 | raise | ||
447 | os.chdir(olddir) | ||
diff --git a/meta/lib/oe/path.py b/meta/lib/oe/path.py new file mode 100644 index 0000000000..413ebfb395 --- /dev/null +++ b/meta/lib/oe/path.py | |||
@@ -0,0 +1,243 @@ | |||
1 | import errno | ||
2 | import glob | ||
3 | import shutil | ||
4 | import subprocess | ||
5 | import os.path | ||
6 | |||
7 | def join(*paths): | ||
8 | """Like os.path.join but doesn't treat absolute RHS specially""" | ||
9 | return os.path.normpath("/".join(paths)) | ||
10 | |||
11 | def relative(src, dest): | ||
12 | """ Return a relative path from src to dest. | ||
13 | |||
14 | >>> relative("/usr/bin", "/tmp/foo/bar") | ||
15 | ../../tmp/foo/bar | ||
16 | |||
17 | >>> relative("/usr/bin", "/usr/lib") | ||
18 | ../lib | ||
19 | |||
20 | >>> relative("/tmp", "/tmp/foo/bar") | ||
21 | foo/bar | ||
22 | """ | ||
23 | |||
24 | return os.path.relpath(dest, src) | ||
25 | |||
26 | def make_relative_symlink(path): | ||
27 | """ Convert an absolute symlink to a relative one """ | ||
28 | if not os.path.islink(path): | ||
29 | return | ||
30 | link = os.readlink(path) | ||
31 | if not os.path.isabs(link): | ||
32 | return | ||
33 | |||
34 | # find the common ancestor directory | ||
35 | ancestor = path | ||
36 | depth = 0 | ||
37 | while ancestor and not link.startswith(ancestor): | ||
38 | ancestor = ancestor.rpartition('/')[0] | ||
39 | depth += 1 | ||
40 | |||
41 | if not ancestor: | ||
42 | print("make_relative_symlink() Error: unable to find the common ancestor of %s and its target" % path) | ||
43 | return | ||
44 | |||
45 | base = link.partition(ancestor)[2].strip('/') | ||
46 | while depth > 1: | ||
47 | base = "../" + base | ||
48 | depth -= 1 | ||
49 | |||
50 | os.remove(path) | ||
51 | os.symlink(base, path) | ||
52 | |||
53 | def format_display(path, metadata): | ||
54 | """ Prepare a path for display to the user. """ | ||
55 | rel = relative(metadata.getVar("TOPDIR", True), path) | ||
56 | if len(rel) > len(path): | ||
57 | return path | ||
58 | else: | ||
59 | return rel | ||
60 | |||
61 | def copytree(src, dst): | ||
62 | # We could use something like shutil.copytree here but it turns out to | ||
63 | # to be slow. It takes twice as long copying to an empty directory. | ||
64 | # If dst already has contents performance can be 15 time slower | ||
65 | # This way we also preserve hardlinks between files in the tree. | ||
66 | |||
67 | bb.utils.mkdirhier(dst) | ||
68 | cmd = 'tar -cf - -C %s -p . | tar -xf - -C %s' % (src, dst) | ||
69 | check_output(cmd, shell=True, stderr=subprocess.STDOUT) | ||
70 | |||
71 | def copyhardlinktree(src, dst): | ||
72 | """ Make the hard link when possible, otherwise copy. """ | ||
73 | bb.utils.mkdirhier(dst) | ||
74 | if os.path.isdir(src) and not len(os.listdir(src)): | ||
75 | return | ||
76 | |||
77 | if (os.stat(src).st_dev == os.stat(dst).st_dev): | ||
78 | # Need to copy directories only with tar first since cp will error if two | ||
79 | # writers try and create a directory at the same time | ||
80 | cmd = 'cd %s; find . -type d -print | tar -cf - -C %s -p --files-from - --no-recursion | tar -xf - -C %s' % (src, src, dst) | ||
81 | check_output(cmd, shell=True, stderr=subprocess.STDOUT) | ||
82 | cmd = 'cd %s; find . -print0 | cpio --null -pdlu %s' % (src, dst) | ||
83 | check_output(cmd, shell=True, stderr=subprocess.STDOUT) | ||
84 | else: | ||
85 | copytree(src, dst) | ||
86 | |||
87 | def remove(path, recurse=True): | ||
88 | """Equivalent to rm -f or rm -rf""" | ||
89 | for name in glob.glob(path): | ||
90 | try: | ||
91 | os.unlink(name) | ||
92 | except OSError as exc: | ||
93 | if recurse and exc.errno == errno.EISDIR: | ||
94 | shutil.rmtree(name) | ||
95 | elif exc.errno != errno.ENOENT: | ||
96 | raise | ||
97 | |||
98 | def symlink(source, destination, force=False): | ||
99 | """Create a symbolic link""" | ||
100 | try: | ||
101 | if force: | ||
102 | remove(destination) | ||
103 | os.symlink(source, destination) | ||
104 | except OSError as e: | ||
105 | if e.errno != errno.EEXIST or os.readlink(destination) != source: | ||
106 | raise | ||
107 | |||
108 | class CalledProcessError(Exception): | ||
109 | def __init__(self, retcode, cmd, output = None): | ||
110 | self.retcode = retcode | ||
111 | self.cmd = cmd | ||
112 | self.output = output | ||
113 | def __str__(self): | ||
114 | return "Command '%s' returned non-zero exit status %d with output %s" % (self.cmd, self.retcode, self.output) | ||
115 | |||
116 | # Not needed when we move to python 2.7 | ||
117 | def check_output(*popenargs, **kwargs): | ||
118 | r"""Run command with arguments and return its output as a byte string. | ||
119 | |||
120 | If the exit code was non-zero it raises a CalledProcessError. The | ||
121 | CalledProcessError object will have the return code in the returncode | ||
122 | attribute and output in the output attribute. | ||
123 | |||
124 | The arguments are the same as for the Popen constructor. Example: | ||
125 | |||
126 | >>> check_output(["ls", "-l", "/dev/null"]) | ||
127 | 'crw-rw-rw- 1 root root 1, 3 Oct 18 2007 /dev/null\n' | ||
128 | |||
129 | The stdout argument is not allowed as it is used internally. | ||
130 | To capture standard error in the result, use stderr=STDOUT. | ||
131 | |||
132 | >>> check_output(["/bin/sh", "-c", | ||
133 | ... "ls -l non_existent_file ; exit 0"], | ||
134 | ... stderr=STDOUT) | ||
135 | 'ls: non_existent_file: No such file or directory\n' | ||
136 | """ | ||
137 | if 'stdout' in kwargs: | ||
138 | raise ValueError('stdout argument not allowed, it will be overridden.') | ||
139 | process = subprocess.Popen(stdout=subprocess.PIPE, *popenargs, **kwargs) | ||
140 | output, unused_err = process.communicate() | ||
141 | retcode = process.poll() | ||
142 | if retcode: | ||
143 | cmd = kwargs.get("args") | ||
144 | if cmd is None: | ||
145 | cmd = popenargs[0] | ||
146 | raise CalledProcessError(retcode, cmd, output=output) | ||
147 | return output | ||
148 | |||
149 | def find(dir, **walkoptions): | ||
150 | """ Given a directory, recurses into that directory, | ||
151 | returning all files as absolute paths. """ | ||
152 | |||
153 | for root, dirs, files in os.walk(dir, **walkoptions): | ||
154 | for file in files: | ||
155 | yield os.path.join(root, file) | ||
156 | |||
157 | |||
158 | ## realpath() related functions | ||
159 | def __is_path_below(file, root): | ||
160 | return (file + os.path.sep).startswith(root) | ||
161 | |||
162 | def __realpath_rel(start, rel_path, root, loop_cnt, assume_dir): | ||
163 | """Calculates real path of symlink 'start' + 'rel_path' below | ||
164 | 'root'; no part of 'start' below 'root' must contain symlinks. """ | ||
165 | have_dir = True | ||
166 | |||
167 | for d in rel_path.split(os.path.sep): | ||
168 | if not have_dir and not assume_dir: | ||
169 | raise OSError(errno.ENOENT, "no such directory %s" % start) | ||
170 | |||
171 | if d == os.path.pardir: # '..' | ||
172 | if len(start) >= len(root): | ||
173 | # do not follow '..' before root | ||
174 | start = os.path.dirname(start) | ||
175 | else: | ||
176 | # emit warning? | ||
177 | pass | ||
178 | else: | ||
179 | (start, have_dir) = __realpath(os.path.join(start, d), | ||
180 | root, loop_cnt, assume_dir) | ||
181 | |||
182 | assert(__is_path_below(start, root)) | ||
183 | |||
184 | return start | ||
185 | |||
186 | def __realpath(file, root, loop_cnt, assume_dir): | ||
187 | while os.path.islink(file) and len(file) >= len(root): | ||
188 | if loop_cnt == 0: | ||
189 | raise OSError(errno.ELOOP, file) | ||
190 | |||
191 | loop_cnt -= 1 | ||
192 | target = os.path.normpath(os.readlink(file)) | ||
193 | |||
194 | if not os.path.isabs(target): | ||
195 | tdir = os.path.dirname(file) | ||
196 | assert(__is_path_below(tdir, root)) | ||
197 | else: | ||
198 | tdir = root | ||
199 | |||
200 | file = __realpath_rel(tdir, target, root, loop_cnt, assume_dir) | ||
201 | |||
202 | try: | ||
203 | is_dir = os.path.isdir(file) | ||
204 | except: | ||
205 | is_dir = false | ||
206 | |||
207 | return (file, is_dir) | ||
208 | |||
209 | def realpath(file, root, use_physdir = True, loop_cnt = 100, assume_dir = False): | ||
210 | """ Returns the canonical path of 'file' with assuming a | ||
211 | toplevel 'root' directory. When 'use_physdir' is set, all | ||
212 | preceding path components of 'file' will be resolved first; | ||
213 | this flag should be set unless it is guaranteed that there is | ||
214 | no symlink in the path. When 'assume_dir' is not set, missing | ||
215 | path components will raise an ENOENT error""" | ||
216 | |||
217 | root = os.path.normpath(root) | ||
218 | file = os.path.normpath(file) | ||
219 | |||
220 | if not root.endswith(os.path.sep): | ||
221 | # letting root end with '/' makes some things easier | ||
222 | root = root + os.path.sep | ||
223 | |||
224 | if not __is_path_below(file, root): | ||
225 | raise OSError(errno.EINVAL, "file '%s' is not below root" % file) | ||
226 | |||
227 | try: | ||
228 | if use_physdir: | ||
229 | file = __realpath_rel(root, file[(len(root) - 1):], root, loop_cnt, assume_dir) | ||
230 | else: | ||
231 | file = __realpath(file, root, loop_cnt, assume_dir)[0] | ||
232 | except OSError as e: | ||
233 | if e.errno == errno.ELOOP: | ||
234 | # make ELOOP more readable; without catching it, there will | ||
235 | # be printed a backtrace with 100s of OSError exceptions | ||
236 | # else | ||
237 | raise OSError(errno.ELOOP, | ||
238 | "too much recursions while resolving '%s'; loop in '%s'" % | ||
239 | (file, e.strerror)) | ||
240 | |||
241 | raise | ||
242 | |||
243 | return file | ||
diff --git a/meta/lib/oe/prservice.py b/meta/lib/oe/prservice.py new file mode 100644 index 0000000000..b0cbcb1fbc --- /dev/null +++ b/meta/lib/oe/prservice.py | |||
@@ -0,0 +1,126 @@ | |||
1 | |||
2 | def prserv_make_conn(d, check = False): | ||
3 | import prserv.serv | ||
4 | host_params = filter(None, (d.getVar("PRSERV_HOST", True) or '').split(':')) | ||
5 | try: | ||
6 | conn = None | ||
7 | conn = prserv.serv.PRServerConnection(host_params[0], int(host_params[1])) | ||
8 | if check: | ||
9 | if not conn.ping(): | ||
10 | raise Exception('service not available') | ||
11 | d.setVar("__PRSERV_CONN",conn) | ||
12 | except Exception, exc: | ||
13 | bb.fatal("Connecting to PR service %s:%s failed: %s" % (host_params[0], host_params[1], str(exc))) | ||
14 | |||
15 | return conn | ||
16 | |||
17 | def prserv_dump_db(d): | ||
18 | if not d.getVar('PRSERV_HOST', True): | ||
19 | bb.error("Not using network based PR service") | ||
20 | return None | ||
21 | |||
22 | conn = d.getVar("__PRSERV_CONN", True) | ||
23 | if conn is None: | ||
24 | conn = prserv_make_conn(d) | ||
25 | if conn is None: | ||
26 | bb.error("Making connection failed to remote PR service") | ||
27 | return None | ||
28 | |||
29 | #dump db | ||
30 | opt_version = d.getVar('PRSERV_DUMPOPT_VERSION', True) | ||
31 | opt_pkgarch = d.getVar('PRSERV_DUMPOPT_PKGARCH', True) | ||
32 | opt_checksum = d.getVar('PRSERV_DUMPOPT_CHECKSUM', True) | ||
33 | opt_col = ("1" == d.getVar('PRSERV_DUMPOPT_COL', True)) | ||
34 | return conn.export(opt_version, opt_pkgarch, opt_checksum, opt_col) | ||
35 | |||
36 | def prserv_import_db(d, filter_version=None, filter_pkgarch=None, filter_checksum=None): | ||
37 | if not d.getVar('PRSERV_HOST', True): | ||
38 | bb.error("Not using network based PR service") | ||
39 | return None | ||
40 | |||
41 | conn = d.getVar("__PRSERV_CONN", True) | ||
42 | if conn is None: | ||
43 | conn = prserv_make_conn(d) | ||
44 | if conn is None: | ||
45 | bb.error("Making connection failed to remote PR service") | ||
46 | return None | ||
47 | #get the entry values | ||
48 | imported = [] | ||
49 | prefix = "PRAUTO$" | ||
50 | for v in d.keys(): | ||
51 | if v.startswith(prefix): | ||
52 | (remain, sep, checksum) = v.rpartition('$') | ||
53 | (remain, sep, pkgarch) = remain.rpartition('$') | ||
54 | (remain, sep, version) = remain.rpartition('$') | ||
55 | if (remain + '$' != prefix) or \ | ||
56 | (filter_version and filter_version != version) or \ | ||
57 | (filter_pkgarch and filter_pkgarch != pkgarch) or \ | ||
58 | (filter_checksum and filter_checksum != checksum): | ||
59 | continue | ||
60 | try: | ||
61 | value = int(d.getVar(remain + '$' + version + '$' + pkgarch + '$' + checksum, True)) | ||
62 | except BaseException as exc: | ||
63 | bb.debug("Not valid value of %s:%s" % (v,str(exc))) | ||
64 | continue | ||
65 | ret = conn.importone(version,pkgarch,checksum,value) | ||
66 | if ret != value: | ||
67 | bb.error("importing(%s,%s,%s,%d) failed. DB may have larger value %d" % (version,pkgarch,checksum,value,ret)) | ||
68 | else: | ||
69 | imported.append((version,pkgarch,checksum,value)) | ||
70 | return imported | ||
71 | |||
72 | def prserv_export_tofile(d, metainfo, datainfo, lockdown, nomax=False): | ||
73 | import bb.utils | ||
74 | #initilize the output file | ||
75 | bb.utils.mkdirhier(d.getVar('PRSERV_DUMPDIR', True)) | ||
76 | df = d.getVar('PRSERV_DUMPFILE', True) | ||
77 | #write data | ||
78 | lf = bb.utils.lockfile("%s.lock" % df) | ||
79 | f = open(df, "a") | ||
80 | if metainfo: | ||
81 | #dump column info | ||
82 | f.write("#PR_core_ver = \"%s\"\n\n" % metainfo['core_ver']); | ||
83 | f.write("#Table: %s\n" % metainfo['tbl_name']) | ||
84 | f.write("#Columns:\n") | ||
85 | f.write("#name \t type \t notn \t dflt \t pk\n") | ||
86 | f.write("#----------\t --------\t --------\t --------\t ----\n") | ||
87 | for i in range(len(metainfo['col_info'])): | ||
88 | f.write("#%10s\t %8s\t %8s\t %8s\t %4s\n" % | ||
89 | (metainfo['col_info'][i]['name'], | ||
90 | metainfo['col_info'][i]['type'], | ||
91 | metainfo['col_info'][i]['notnull'], | ||
92 | metainfo['col_info'][i]['dflt_value'], | ||
93 | metainfo['col_info'][i]['pk'])) | ||
94 | f.write("\n") | ||
95 | |||
96 | if lockdown: | ||
97 | f.write("PRSERV_LOCKDOWN = \"1\"\n\n") | ||
98 | |||
99 | if datainfo: | ||
100 | idx = {} | ||
101 | for i in range(len(datainfo)): | ||
102 | pkgarch = datainfo[i]['pkgarch'] | ||
103 | value = datainfo[i]['value'] | ||
104 | if pkgarch not in idx: | ||
105 | idx[pkgarch] = i | ||
106 | elif value > datainfo[idx[pkgarch]]['value']: | ||
107 | idx[pkgarch] = i | ||
108 | f.write("PRAUTO$%s$%s$%s = \"%s\"\n" % | ||
109 | (str(datainfo[i]['version']), pkgarch, str(datainfo[i]['checksum']), str(value))) | ||
110 | if not nomax: | ||
111 | for i in idx: | ||
112 | f.write("PRAUTO_%s_%s = \"%s\"\n" % (str(datainfo[idx[i]]['version']),str(datainfo[idx[i]]['pkgarch']),str(datainfo[idx[i]]['value']))) | ||
113 | f.close() | ||
114 | bb.utils.unlockfile(lf) | ||
115 | |||
116 | def prserv_check_avail(d): | ||
117 | host_params = filter(None, (d.getVar("PRSERV_HOST", True) or '').split(':')) | ||
118 | try: | ||
119 | if len(host_params) != 2: | ||
120 | raise TypeError | ||
121 | else: | ||
122 | int(host_params[1]) | ||
123 | except TypeError: | ||
124 | bb.fatal('Undefined/incorrect PRSERV_HOST value. Format: "host:port"') | ||
125 | else: | ||
126 | prserv_make_conn(d, True) | ||
diff --git a/meta/lib/oe/qa.py b/meta/lib/oe/qa.py new file mode 100644 index 0000000000..d5cdaa0fcd --- /dev/null +++ b/meta/lib/oe/qa.py | |||
@@ -0,0 +1,111 @@ | |||
1 | class ELFFile: | ||
2 | EI_NIDENT = 16 | ||
3 | |||
4 | EI_CLASS = 4 | ||
5 | EI_DATA = 5 | ||
6 | EI_VERSION = 6 | ||
7 | EI_OSABI = 7 | ||
8 | EI_ABIVERSION = 8 | ||
9 | |||
10 | # possible values for EI_CLASS | ||
11 | ELFCLASSNONE = 0 | ||
12 | ELFCLASS32 = 1 | ||
13 | ELFCLASS64 = 2 | ||
14 | |||
15 | # possible value for EI_VERSION | ||
16 | EV_CURRENT = 1 | ||
17 | |||
18 | # possible values for EI_DATA | ||
19 | ELFDATANONE = 0 | ||
20 | ELFDATA2LSB = 1 | ||
21 | ELFDATA2MSB = 2 | ||
22 | |||
23 | def my_assert(self, expectation, result): | ||
24 | if not expectation == result: | ||
25 | #print "'%x','%x' %s" % (ord(expectation), ord(result), self.name) | ||
26 | raise Exception("This does not work as expected") | ||
27 | |||
28 | def __init__(self, name, bits = 0): | ||
29 | self.name = name | ||
30 | self.bits = bits | ||
31 | self.objdump_output = {} | ||
32 | |||
33 | def open(self): | ||
34 | self.file = file(self.name, "r") | ||
35 | self.data = self.file.read(ELFFile.EI_NIDENT+4) | ||
36 | |||
37 | self.my_assert(len(self.data), ELFFile.EI_NIDENT+4) | ||
38 | self.my_assert(self.data[0], chr(0x7f) ) | ||
39 | self.my_assert(self.data[1], 'E') | ||
40 | self.my_assert(self.data[2], 'L') | ||
41 | self.my_assert(self.data[3], 'F') | ||
42 | if self.bits == 0: | ||
43 | if self.data[ELFFile.EI_CLASS] == chr(ELFFile.ELFCLASS32): | ||
44 | self.bits = 32 | ||
45 | elif self.data[ELFFile.EI_CLASS] == chr(ELFFile.ELFCLASS64): | ||
46 | self.bits = 64 | ||
47 | else: | ||
48 | # Not 32-bit or 64.. lets assert | ||
49 | raise Exception("ELF but not 32 or 64 bit.") | ||
50 | elif self.bits == 32: | ||
51 | self.my_assert(self.data[ELFFile.EI_CLASS], chr(ELFFile.ELFCLASS32)) | ||
52 | elif self.bits == 64: | ||
53 | self.my_assert(self.data[ELFFile.EI_CLASS], chr(ELFFile.ELFCLASS64)) | ||
54 | else: | ||
55 | raise Exception("Must specify unknown, 32 or 64 bit size.") | ||
56 | self.my_assert(self.data[ELFFile.EI_VERSION], chr(ELFFile.EV_CURRENT) ) | ||
57 | |||
58 | self.sex = self.data[ELFFile.EI_DATA] | ||
59 | if self.sex == chr(ELFFile.ELFDATANONE): | ||
60 | raise Exception("self.sex == ELFDATANONE") | ||
61 | elif self.sex == chr(ELFFile.ELFDATA2LSB): | ||
62 | self.sex = "<" | ||
63 | elif self.sex == chr(ELFFile.ELFDATA2MSB): | ||
64 | self.sex = ">" | ||
65 | else: | ||
66 | raise Exception("Unknown self.sex") | ||
67 | |||
68 | def osAbi(self): | ||
69 | return ord(self.data[ELFFile.EI_OSABI]) | ||
70 | |||
71 | def abiVersion(self): | ||
72 | return ord(self.data[ELFFile.EI_ABIVERSION]) | ||
73 | |||
74 | def abiSize(self): | ||
75 | return self.bits | ||
76 | |||
77 | def isLittleEndian(self): | ||
78 | return self.sex == "<" | ||
79 | |||
80 | def isBigEngian(self): | ||
81 | return self.sex == ">" | ||
82 | |||
83 | def machine(self): | ||
84 | """ | ||
85 | We know the sex stored in self.sex and we | ||
86 | know the position | ||
87 | """ | ||
88 | import struct | ||
89 | (a,) = struct.unpack(self.sex+"H", self.data[18:20]) | ||
90 | return a | ||
91 | |||
92 | def run_objdump(self, cmd, d): | ||
93 | import bb.process | ||
94 | import sys | ||
95 | |||
96 | if cmd in self.objdump_output: | ||
97 | return self.objdump_output[cmd] | ||
98 | |||
99 | objdump = d.getVar('OBJDUMP', True) | ||
100 | |||
101 | env = os.environ.copy() | ||
102 | env["LC_ALL"] = "C" | ||
103 | env["PATH"] = d.getVar('PATH', True) | ||
104 | |||
105 | try: | ||
106 | bb.note("%s %s %s" % (objdump, cmd, self.name)) | ||
107 | self.objdump_output[cmd] = bb.process.run([objdump, cmd, self.name], env=env, shell=False)[0] | ||
108 | return self.objdump_output[cmd] | ||
109 | except Exception as e: | ||
110 | bb.note("%s %s %s failed: %s" % (objdump, cmd, self.name, e)) | ||
111 | return "" | ||
diff --git a/meta/lib/oe/rootfs.py b/meta/lib/oe/rootfs.py new file mode 100644 index 0000000000..67ed9ef03d --- /dev/null +++ b/meta/lib/oe/rootfs.py | |||
@@ -0,0 +1,800 @@ | |||
1 | from abc import ABCMeta, abstractmethod | ||
2 | from oe.utils import execute_pre_post_process | ||
3 | from oe.package_manager import * | ||
4 | from oe.manifest import * | ||
5 | import oe.path | ||
6 | import filecmp | ||
7 | import shutil | ||
8 | import os | ||
9 | import subprocess | ||
10 | import re | ||
11 | |||
12 | |||
13 | class Rootfs(object): | ||
14 | """ | ||
15 | This is an abstract class. Do not instantiate this directly. | ||
16 | """ | ||
17 | __metaclass__ = ABCMeta | ||
18 | |||
19 | def __init__(self, d): | ||
20 | self.d = d | ||
21 | self.pm = None | ||
22 | self.image_rootfs = self.d.getVar('IMAGE_ROOTFS', True) | ||
23 | self.deploy_dir_image = self.d.getVar('DEPLOY_DIR_IMAGE', True) | ||
24 | |||
25 | self.install_order = Manifest.INSTALL_ORDER | ||
26 | |||
27 | @abstractmethod | ||
28 | def _create(self): | ||
29 | pass | ||
30 | |||
31 | @abstractmethod | ||
32 | def _get_delayed_postinsts(self): | ||
33 | pass | ||
34 | |||
35 | @abstractmethod | ||
36 | def _save_postinsts(self): | ||
37 | pass | ||
38 | |||
39 | @abstractmethod | ||
40 | def _log_check(self): | ||
41 | pass | ||
42 | |||
43 | def _insert_feed_uris(self): | ||
44 | if bb.utils.contains("IMAGE_FEATURES", "package-management", | ||
45 | True, False, self.d): | ||
46 | self.pm.insert_feeds_uris() | ||
47 | |||
48 | @abstractmethod | ||
49 | def _handle_intercept_failure(self, failed_script): | ||
50 | pass | ||
51 | |||
52 | """ | ||
53 | The _cleanup() method should be used to clean-up stuff that we don't really | ||
54 | want to end up on target. For example, in the case of RPM, the DB locks. | ||
55 | The method is called, once, at the end of create() method. | ||
56 | """ | ||
57 | @abstractmethod | ||
58 | def _cleanup(self): | ||
59 | pass | ||
60 | |||
61 | def _exec_shell_cmd(self, cmd): | ||
62 | fakerootcmd = self.d.getVar('FAKEROOT', True) | ||
63 | if fakerootcmd is not None: | ||
64 | exec_cmd = [fakerootcmd, cmd] | ||
65 | else: | ||
66 | exec_cmd = cmd | ||
67 | |||
68 | try: | ||
69 | subprocess.check_output(exec_cmd, stderr=subprocess.STDOUT) | ||
70 | except subprocess.CalledProcessError as e: | ||
71 | return("Command '%s' returned %d:\n%s" % (e.cmd, e.returncode, e.output)) | ||
72 | |||
73 | return None | ||
74 | |||
75 | def create(self): | ||
76 | bb.note("###### Generate rootfs #######") | ||
77 | pre_process_cmds = self.d.getVar("ROOTFS_PREPROCESS_COMMAND", True) | ||
78 | post_process_cmds = self.d.getVar("ROOTFS_POSTPROCESS_COMMAND", True) | ||
79 | |||
80 | intercepts_dir = os.path.join(self.d.getVar('WORKDIR', True), | ||
81 | "intercept_scripts") | ||
82 | |||
83 | bb.utils.remove(intercepts_dir, True) | ||
84 | |||
85 | bb.utils.mkdirhier(self.image_rootfs) | ||
86 | |||
87 | bb.utils.mkdirhier(self.deploy_dir_image) | ||
88 | |||
89 | shutil.copytree(self.d.expand("${COREBASE}/scripts/postinst-intercepts"), | ||
90 | intercepts_dir) | ||
91 | |||
92 | shutil.copy(self.d.expand("${COREBASE}/meta/files/deploydir_readme.txt"), | ||
93 | self.deploy_dir_image + | ||
94 | "/README_-_DO_NOT_DELETE_FILES_IN_THIS_DIRECTORY.txt") | ||
95 | |||
96 | execute_pre_post_process(self.d, pre_process_cmds) | ||
97 | |||
98 | # call the package manager dependent create method | ||
99 | self._create() | ||
100 | |||
101 | sysconfdir = self.image_rootfs + self.d.getVar('sysconfdir', True) | ||
102 | bb.utils.mkdirhier(sysconfdir) | ||
103 | with open(sysconfdir + "/version", "w+") as ver: | ||
104 | ver.write(self.d.getVar('BUILDNAME', True) + "\n") | ||
105 | |||
106 | self._run_intercepts() | ||
107 | |||
108 | execute_pre_post_process(self.d, post_process_cmds) | ||
109 | |||
110 | if bb.utils.contains("IMAGE_FEATURES", "read-only-rootfs", | ||
111 | True, False, self.d): | ||
112 | delayed_postinsts = self._get_delayed_postinsts() | ||
113 | if delayed_postinsts is not None: | ||
114 | bb.fatal("The following packages could not be configured " | ||
115 | "offline and rootfs is read-only: %s" % | ||
116 | delayed_postinsts) | ||
117 | |||
118 | if self.d.getVar('USE_DEVFS', True) != "1": | ||
119 | self._create_devfs() | ||
120 | |||
121 | self._uninstall_uneeded() | ||
122 | |||
123 | self._insert_feed_uris() | ||
124 | |||
125 | self._run_ldconfig() | ||
126 | |||
127 | self._generate_kernel_module_deps() | ||
128 | |||
129 | self._cleanup() | ||
130 | |||
131 | def _uninstall_uneeded(self): | ||
132 | # Remove unneeded init script symlinks | ||
133 | delayed_postinsts = self._get_delayed_postinsts() | ||
134 | if delayed_postinsts is None: | ||
135 | if os.path.exists(self.d.expand("${IMAGE_ROOTFS}${sysconfdir}/init.d/run-postinsts")): | ||
136 | self._exec_shell_cmd(["update-rc.d", "-f", "-r", | ||
137 | self.d.getVar('IMAGE_ROOTFS', True), | ||
138 | "run-postinsts", "remove"]) | ||
139 | |||
140 | # Remove unneeded package-management related components | ||
141 | if bb.utils.contains("IMAGE_FEATURES", "package-management", | ||
142 | True, False, self.d): | ||
143 | return | ||
144 | |||
145 | if delayed_postinsts is None: | ||
146 | installed_pkgs_dir = self.d.expand('${WORKDIR}/installed_pkgs.txt') | ||
147 | pkgs_to_remove = list() | ||
148 | with open(installed_pkgs_dir, "r+") as installed_pkgs: | ||
149 | pkgs_installed = installed_pkgs.read().split('\n') | ||
150 | for pkg_installed in pkgs_installed[:]: | ||
151 | pkg = pkg_installed.split()[0] | ||
152 | if pkg in ["update-rc.d", | ||
153 | "base-passwd", | ||
154 | self.d.getVar("ROOTFS_BOOTSTRAP_INSTALL", True) | ||
155 | ]: | ||
156 | pkgs_to_remove.append(pkg) | ||
157 | pkgs_installed.remove(pkg_installed) | ||
158 | |||
159 | if len(pkgs_to_remove) > 0: | ||
160 | self.pm.remove(pkgs_to_remove, False) | ||
161 | # Update installed_pkgs.txt | ||
162 | open(installed_pkgs_dir, "w+").write('\n'.join(pkgs_installed)) | ||
163 | |||
164 | else: | ||
165 | self._save_postinsts() | ||
166 | |||
167 | self.pm.remove_packaging_data() | ||
168 | |||
169 | def _run_intercepts(self): | ||
170 | intercepts_dir = os.path.join(self.d.getVar('WORKDIR', True), | ||
171 | "intercept_scripts") | ||
172 | |||
173 | bb.note("Running intercept scripts:") | ||
174 | os.environ['D'] = self.image_rootfs | ||
175 | for script in os.listdir(intercepts_dir): | ||
176 | script_full = os.path.join(intercepts_dir, script) | ||
177 | |||
178 | if script == "postinst_intercept" or not os.access(script_full, os.X_OK): | ||
179 | continue | ||
180 | |||
181 | bb.note("> Executing %s intercept ..." % script) | ||
182 | |||
183 | try: | ||
184 | subprocess.check_output(script_full) | ||
185 | except subprocess.CalledProcessError as e: | ||
186 | bb.warn("The postinstall intercept hook '%s' failed (exit code: %d)! See log for details!" % | ||
187 | (script, e.returncode)) | ||
188 | |||
189 | with open(script_full) as intercept: | ||
190 | registered_pkgs = None | ||
191 | for line in intercept.read().split("\n"): | ||
192 | m = re.match("^##PKGS:(.*)", line) | ||
193 | if m is not None: | ||
194 | registered_pkgs = m.group(1).strip() | ||
195 | break | ||
196 | |||
197 | if registered_pkgs is not None: | ||
198 | bb.warn("The postinstalls for the following packages " | ||
199 | "will be postponed for first boot: %s" % | ||
200 | registered_pkgs) | ||
201 | |||
202 | # call the backend dependent handler | ||
203 | self._handle_intercept_failure(registered_pkgs) | ||
204 | |||
205 | def _run_ldconfig(self): | ||
206 | if self.d.getVar('LDCONFIGDEPEND', True): | ||
207 | bb.note("Executing: ldconfig -r" + self.image_rootfs + "-c new -v") | ||
208 | self._exec_shell_cmd(['ldconfig', '-r', self.image_rootfs, '-c', | ||
209 | 'new', '-v']) | ||
210 | |||
211 | def _generate_kernel_module_deps(self): | ||
212 | kernel_abi_ver_file = os.path.join(self.d.getVar('STAGING_KERNEL_DIR', True), | ||
213 | 'kernel-abiversion') | ||
214 | if os.path.exists(kernel_abi_ver_file): | ||
215 | kernel_ver = open(kernel_abi_ver_file).read().strip(' \n') | ||
216 | modules_dir = os.path.join(self.image_rootfs, 'lib', 'modules', kernel_ver) | ||
217 | |||
218 | bb.utils.mkdirhier(modules_dir) | ||
219 | |||
220 | self._exec_shell_cmd(['depmodwrapper', '-a', '-b', self.image_rootfs, | ||
221 | kernel_ver]) | ||
222 | |||
223 | """ | ||
224 | Create devfs: | ||
225 | * IMAGE_DEVICE_TABLE is the old name to an absolute path to a device table file | ||
226 | * IMAGE_DEVICE_TABLES is a new name for a file, or list of files, seached | ||
227 | for in the BBPATH | ||
228 | If neither are specified then the default name of files/device_table-minimal.txt | ||
229 | is searched for in the BBPATH (same as the old version.) | ||
230 | """ | ||
231 | def _create_devfs(self): | ||
232 | devtable_list = [] | ||
233 | devtable = self.d.getVar('IMAGE_DEVICE_TABLE', True) | ||
234 | if devtable is not None: | ||
235 | devtable_list.append(devtable) | ||
236 | else: | ||
237 | devtables = self.d.getVar('IMAGE_DEVICE_TABLES', True) | ||
238 | if devtables is None: | ||
239 | devtables = 'files/device_table-minimal.txt' | ||
240 | for devtable in devtables.split(): | ||
241 | devtable_list.append("%s" % bb.utils.which(self.d.getVar('BBPATH', True), devtable)) | ||
242 | |||
243 | for devtable in devtable_list: | ||
244 | self._exec_shell_cmd(["makedevs", "-r", | ||
245 | self.image_rootfs, "-D", devtable]) | ||
246 | |||
247 | |||
248 | class RpmRootfs(Rootfs): | ||
249 | def __init__(self, d, manifest_dir): | ||
250 | super(RpmRootfs, self).__init__(d) | ||
251 | |||
252 | self.manifest = RpmManifest(d, manifest_dir) | ||
253 | |||
254 | self.pm = RpmPM(d, | ||
255 | d.getVar('IMAGE_ROOTFS', True), | ||
256 | self.d.getVar('TARGET_VENDOR', True) | ||
257 | ) | ||
258 | |||
259 | self.inc_rpm_image_gen = self.d.getVar('INC_RPM_IMAGE_GEN', True) | ||
260 | if self.inc_rpm_image_gen != "1": | ||
261 | bb.utils.remove(self.image_rootfs, True) | ||
262 | else: | ||
263 | self.pm.recovery_packaging_data() | ||
264 | bb.utils.remove(self.d.getVar('MULTILIB_TEMP_ROOTFS', True), True) | ||
265 | |||
266 | self.pm.create_configs() | ||
267 | |||
268 | ''' | ||
269 | While rpm incremental image generation is enabled, it will remove the | ||
270 | unneeded pkgs by comparing the new install solution manifest and the | ||
271 | old installed manifest. | ||
272 | ''' | ||
273 | def _create_incremental(self, pkgs_initial_install): | ||
274 | if self.inc_rpm_image_gen == "1": | ||
275 | |||
276 | pkgs_to_install = list() | ||
277 | for pkg_type in pkgs_initial_install: | ||
278 | pkgs_to_install += pkgs_initial_install[pkg_type] | ||
279 | |||
280 | installed_manifest = self.pm.load_old_install_solution() | ||
281 | solution_manifest = self.pm.dump_install_solution(pkgs_to_install) | ||
282 | |||
283 | pkg_to_remove = list() | ||
284 | for pkg in installed_manifest: | ||
285 | if pkg not in solution_manifest: | ||
286 | pkg_to_remove.append(pkg) | ||
287 | |||
288 | self.pm.update() | ||
289 | |||
290 | bb.note('incremental update -- upgrade packages in place ') | ||
291 | self.pm.upgrade() | ||
292 | if pkg_to_remove != []: | ||
293 | bb.note('incremental removed: %s' % ' '.join(pkg_to_remove)) | ||
294 | self.pm.remove(pkg_to_remove) | ||
295 | |||
296 | def _create(self): | ||
297 | pkgs_to_install = self.manifest.parse_initial_manifest() | ||
298 | |||
299 | # update PM index files | ||
300 | self.pm.write_index() | ||
301 | |||
302 | self.pm.dump_all_available_pkgs() | ||
303 | |||
304 | if self.inc_rpm_image_gen == "1": | ||
305 | self._create_incremental(pkgs_to_install) | ||
306 | |||
307 | self.pm.update() | ||
308 | |||
309 | pkgs = [] | ||
310 | pkgs_attempt = [] | ||
311 | for pkg_type in pkgs_to_install: | ||
312 | if pkg_type == Manifest.PKG_TYPE_ATTEMPT_ONLY: | ||
313 | pkgs_attempt += pkgs_to_install[pkg_type] | ||
314 | else: | ||
315 | pkgs += pkgs_to_install[pkg_type] | ||
316 | |||
317 | self.pm.install(pkgs) | ||
318 | |||
319 | self.pm.install(pkgs_attempt, True) | ||
320 | |||
321 | self.pm.install_complementary() | ||
322 | |||
323 | self._log_check() | ||
324 | |||
325 | if self.inc_rpm_image_gen == "1": | ||
326 | self.pm.backup_packaging_data() | ||
327 | |||
328 | self.pm.rpm_setup_smart_target_config() | ||
329 | |||
330 | @staticmethod | ||
331 | def _depends_list(): | ||
332 | return ['DEPLOY_DIR_RPM', 'INC_RPM_IMAGE_GEN', 'RPM_PREPROCESS_COMMANDS', | ||
333 | 'RPM_POSTPROCESS_COMMANDS', 'RPM_PREFER_ELF_ARCH'] | ||
334 | |||
335 | def _get_delayed_postinsts(self): | ||
336 | postinst_dir = self.d.expand("${IMAGE_ROOTFS}${sysconfdir}/rpm-postinsts") | ||
337 | if os.path.isdir(postinst_dir): | ||
338 | files = os.listdir(postinst_dir) | ||
339 | for f in files: | ||
340 | bb.note('Delayed package scriptlet: %s' % f) | ||
341 | return files | ||
342 | |||
343 | return None | ||
344 | |||
345 | def _save_postinsts(self): | ||
346 | # this is just a stub. For RPM, the failed postinstalls are | ||
347 | # already saved in /etc/rpm-postinsts | ||
348 | pass | ||
349 | |||
350 | def _log_check_warn(self): | ||
351 | r = re.compile('(warn|Warn)') | ||
352 | log_path = self.d.expand("${T}/log.do_rootfs") | ||
353 | with open(log_path, 'r') as log: | ||
354 | for line in log: | ||
355 | if 'log_check' in line: | ||
356 | continue | ||
357 | |||
358 | m = r.search(line) | ||
359 | if m: | ||
360 | bb.warn('[log_check] %s: found a warning message in the logfile (keyword \'%s\'):\n[log_check] %s' | ||
361 | % (self.d.getVar('PN', True), m.group(), line)) | ||
362 | |||
363 | def _log_check_error(self): | ||
364 | r = re.compile('(unpacking of archive failed|Cannot find package|exit 1|ERR|Fail)') | ||
365 | log_path = self.d.expand("${T}/log.do_rootfs") | ||
366 | with open(log_path, 'r') as log: | ||
367 | found_error = 0 | ||
368 | message = "\n" | ||
369 | for line in log: | ||
370 | if 'log_check' in line: | ||
371 | continue | ||
372 | |||
373 | m = r.search(line) | ||
374 | if m: | ||
375 | found_error = 1 | ||
376 | bb.warn('[log_check] %s: found an error message in the logfile (keyword \'%s\'):\n[log_check] %s' | ||
377 | % (self.d.getVar('PN', True), m.group(), line)) | ||
378 | |||
379 | if found_error >= 1 and found_error <= 5: | ||
380 | message += line + '\n' | ||
381 | found_error += 1 | ||
382 | |||
383 | if found_error == 6: | ||
384 | bb.fatal(message) | ||
385 | |||
386 | def _log_check(self): | ||
387 | self._log_check_warn() | ||
388 | self._log_check_error() | ||
389 | |||
390 | def _handle_intercept_failure(self, registered_pkgs): | ||
391 | rpm_postinsts_dir = self.image_rootfs + self.d.expand('${sysconfdir}/rpm-postinsts/') | ||
392 | bb.utils.mkdirhier(rpm_postinsts_dir) | ||
393 | |||
394 | # Save the package postinstalls in /etc/rpm-postinsts | ||
395 | for pkg in registered_pkgs.split(): | ||
396 | self.pm.save_rpmpostinst(pkg) | ||
397 | |||
398 | def _cleanup(self): | ||
399 | # during the execution of postprocess commands, rpm is called several | ||
400 | # times to get the files installed, dependencies, etc. This creates the | ||
401 | # __db.00* (Berkeley DB files that hold locks, rpm specific environment | ||
402 | # settings, etc.), that should not get into the final rootfs | ||
403 | self.pm.unlock_rpm_db() | ||
404 | bb.utils.remove(self.image_rootfs + "/install", True) | ||
405 | |||
406 | |||
407 | class DpkgRootfs(Rootfs): | ||
408 | def __init__(self, d, manifest_dir): | ||
409 | super(DpkgRootfs, self).__init__(d) | ||
410 | |||
411 | bb.utils.remove(self.image_rootfs, True) | ||
412 | bb.utils.remove(self.d.getVar('MULTILIB_TEMP_ROOTFS', True), True) | ||
413 | self.manifest = DpkgManifest(d, manifest_dir) | ||
414 | self.pm = DpkgPM(d, d.getVar('IMAGE_ROOTFS', True), | ||
415 | d.getVar('PACKAGE_ARCHS', True), | ||
416 | d.getVar('DPKG_ARCH', True)) | ||
417 | |||
418 | |||
419 | def _create(self): | ||
420 | pkgs_to_install = self.manifest.parse_initial_manifest() | ||
421 | |||
422 | alt_dir = self.d.expand("${IMAGE_ROOTFS}/var/lib/dpkg/alternatives") | ||
423 | bb.utils.mkdirhier(alt_dir) | ||
424 | |||
425 | # update PM index files | ||
426 | self.pm.write_index() | ||
427 | |||
428 | self.pm.update() | ||
429 | |||
430 | for pkg_type in self.install_order: | ||
431 | if pkg_type in pkgs_to_install: | ||
432 | self.pm.install(pkgs_to_install[pkg_type], | ||
433 | [False, True][pkg_type == Manifest.PKG_TYPE_ATTEMPT_ONLY]) | ||
434 | |||
435 | self.pm.install_complementary() | ||
436 | |||
437 | self.pm.fix_broken_dependencies() | ||
438 | |||
439 | self.pm.mark_packages("installed") | ||
440 | |||
441 | self.pm.run_pre_post_installs() | ||
442 | |||
443 | @staticmethod | ||
444 | def _depends_list(): | ||
445 | return ['DEPLOY_DIR_DEB', 'DEB_SDK_ARCH', 'APTCONF_TARGET', 'APT_ARGS', 'DPKG_ARCH', 'DEB_PREPROCESS_COMMANDS', 'DEB_POSTPROCESS_COMMAND'] | ||
446 | |||
447 | def _get_delayed_postinsts(self): | ||
448 | pkg_list = [] | ||
449 | with open(self.image_rootfs + "/var/lib/dpkg/status") as status: | ||
450 | for line in status: | ||
451 | m_pkg = re.match("^Package: (.*)", line) | ||
452 | m_status = re.match("^Status:.*unpacked", line) | ||
453 | if m_pkg is not None: | ||
454 | pkg_name = m_pkg.group(1) | ||
455 | elif m_status is not None: | ||
456 | pkg_list.append(pkg_name) | ||
457 | |||
458 | if len(pkg_list) == 0: | ||
459 | return None | ||
460 | |||
461 | return pkg_list | ||
462 | |||
463 | def _save_postinsts(self): | ||
464 | num = 0 | ||
465 | for p in self._get_delayed_postinsts(): | ||
466 | dst_postinst_dir = self.d.expand("${IMAGE_ROOTFS}${sysconfdir}/deb-postinsts") | ||
467 | src_postinst_dir = self.d.expand("${IMAGE_ROOTFS}/var/lib/dpkg/info") | ||
468 | |||
469 | bb.utils.mkdirhier(dst_postinst_dir) | ||
470 | |||
471 | if os.path.exists(os.path.join(src_postinst_dir, p + ".postinst")): | ||
472 | shutil.copy(os.path.join(src_postinst_dir, p + ".postinst"), | ||
473 | os.path.join(dst_postinst_dir, "%03d-%s" % (num, p))) | ||
474 | |||
475 | num += 1 | ||
476 | |||
477 | def _handle_intercept_failure(self, registered_pkgs): | ||
478 | self.pm.mark_packages("unpacked", registered_pkgs.split()) | ||
479 | |||
480 | def _log_check(self): | ||
481 | pass | ||
482 | |||
483 | def _cleanup(self): | ||
484 | pass | ||
485 | |||
486 | |||
487 | class OpkgRootfs(Rootfs): | ||
488 | def __init__(self, d, manifest_dir): | ||
489 | super(OpkgRootfs, self).__init__(d) | ||
490 | |||
491 | self.manifest = OpkgManifest(d, manifest_dir) | ||
492 | self.opkg_conf = self.d.getVar("IPKGCONF_TARGET", True) | ||
493 | self.pkg_archs = self.d.getVar("ALL_MULTILIB_PACKAGE_ARCHS", True) | ||
494 | |||
495 | self.inc_opkg_image_gen = self.d.getVar('INC_IPK_IMAGE_GEN', True) or "" | ||
496 | if self._remove_old_rootfs(): | ||
497 | bb.utils.remove(self.image_rootfs, True) | ||
498 | self.pm = OpkgPM(d, | ||
499 | self.image_rootfs, | ||
500 | self.opkg_conf, | ||
501 | self.pkg_archs) | ||
502 | else: | ||
503 | self.pm = OpkgPM(d, | ||
504 | self.image_rootfs, | ||
505 | self.opkg_conf, | ||
506 | self.pkg_archs) | ||
507 | self.pm.recover_packaging_data() | ||
508 | |||
509 | bb.utils.remove(self.d.getVar('MULTILIB_TEMP_ROOTFS', True), True) | ||
510 | |||
511 | def _prelink_file(self, root_dir, filename): | ||
512 | bb.note('prelink %s in %s' % (filename, root_dir)) | ||
513 | prelink_cfg = oe.path.join(root_dir, | ||
514 | self.d.expand('${sysconfdir}/prelink.conf')) | ||
515 | if not os.path.exists(prelink_cfg): | ||
516 | shutil.copy(self.d.expand('${STAGING_DIR_NATIVE}${sysconfdir_native}/prelink.conf'), | ||
517 | prelink_cfg) | ||
518 | |||
519 | cmd_prelink = self.d.expand('${STAGING_DIR_NATIVE}${sbindir_native}/prelink') | ||
520 | self._exec_shell_cmd([cmd_prelink, | ||
521 | '--root', | ||
522 | root_dir, | ||
523 | '-amR', | ||
524 | '-N', | ||
525 | '-c', | ||
526 | self.d.expand('${sysconfdir}/prelink.conf')]) | ||
527 | |||
528 | ''' | ||
529 | Compare two files with the same key twice to see if they are equal. | ||
530 | If they are not equal, it means they are duplicated and come from | ||
531 | different packages. | ||
532 | 1st: Comapre them directly; | ||
533 | 2nd: While incremental image creation is enabled, one of the | ||
534 | files could be probaly prelinked in the previous image | ||
535 | creation and the file has been changed, so we need to | ||
536 | prelink the other one and compare them. | ||
537 | ''' | ||
538 | def _file_equal(self, key, f1, f2): | ||
539 | |||
540 | # Both of them are not prelinked | ||
541 | if filecmp.cmp(f1, f2): | ||
542 | return True | ||
543 | |||
544 | if self.image_rootfs not in f1: | ||
545 | self._prelink_file(f1.replace(key, ''), f1) | ||
546 | |||
547 | if self.image_rootfs not in f2: | ||
548 | self._prelink_file(f2.replace(key, ''), f2) | ||
549 | |||
550 | # Both of them are prelinked | ||
551 | if filecmp.cmp(f1, f2): | ||
552 | return True | ||
553 | |||
554 | # Not equal | ||
555 | return False | ||
556 | |||
557 | """ | ||
558 | This function was reused from the old implementation. | ||
559 | See commit: "image.bbclass: Added variables for multilib support." by | ||
560 | Lianhao Lu. | ||
561 | """ | ||
562 | def _multilib_sanity_test(self, dirs): | ||
563 | |||
564 | allow_replace = self.d.getVar("MULTILIBRE_ALLOW_REP", True) | ||
565 | if allow_replace is None: | ||
566 | allow_replace = "" | ||
567 | |||
568 | allow_rep = re.compile(re.sub("\|$", "", allow_replace)) | ||
569 | error_prompt = "Multilib check error:" | ||
570 | |||
571 | files = {} | ||
572 | for dir in dirs: | ||
573 | for root, subfolders, subfiles in os.walk(dir): | ||
574 | for file in subfiles: | ||
575 | item = os.path.join(root, file) | ||
576 | key = str(os.path.join("/", os.path.relpath(item, dir))) | ||
577 | |||
578 | valid = True | ||
579 | if key in files: | ||
580 | #check whether the file is allow to replace | ||
581 | if allow_rep.match(key): | ||
582 | valid = True | ||
583 | else: | ||
584 | if os.path.exists(files[key]) and \ | ||
585 | os.path.exists(item) and \ | ||
586 | not self._file_equal(key, files[key], item): | ||
587 | valid = False | ||
588 | bb.fatal("%s duplicate files %s %s is not the same\n" % | ||
589 | (error_prompt, item, files[key])) | ||
590 | |||
591 | #pass the check, add to list | ||
592 | if valid: | ||
593 | files[key] = item | ||
594 | |||
595 | def _multilib_test_install(self, pkgs): | ||
596 | ml_temp = self.d.getVar("MULTILIB_TEMP_ROOTFS", True) | ||
597 | bb.utils.mkdirhier(ml_temp) | ||
598 | |||
599 | dirs = [self.image_rootfs] | ||
600 | |||
601 | for variant in self.d.getVar("MULTILIB_VARIANTS", True).split(): | ||
602 | ml_target_rootfs = os.path.join(ml_temp, variant) | ||
603 | |||
604 | bb.utils.remove(ml_target_rootfs, True) | ||
605 | |||
606 | ml_opkg_conf = os.path.join(ml_temp, | ||
607 | variant + "-" + os.path.basename(self.opkg_conf)) | ||
608 | |||
609 | ml_pm = OpkgPM(self.d, ml_target_rootfs, ml_opkg_conf, self.pkg_archs) | ||
610 | |||
611 | ml_pm.update() | ||
612 | ml_pm.install(pkgs) | ||
613 | |||
614 | dirs.append(ml_target_rootfs) | ||
615 | |||
616 | self._multilib_sanity_test(dirs) | ||
617 | |||
618 | ''' | ||
619 | While ipk incremental image generation is enabled, it will remove the | ||
620 | unneeded pkgs by comparing the old full manifest in previous existing | ||
621 | image and the new full manifest in the current image. | ||
622 | ''' | ||
623 | def _remove_extra_packages(self, pkgs_initial_install): | ||
624 | if self.inc_opkg_image_gen == "1": | ||
625 | # Parse full manifest in previous existing image creation session | ||
626 | old_full_manifest = self.manifest.parse_full_manifest() | ||
627 | |||
628 | # Create full manifest for the current image session, the old one | ||
629 | # will be replaced by the new one. | ||
630 | self.manifest.create_full(self.pm) | ||
631 | |||
632 | # Parse full manifest in current image creation session | ||
633 | new_full_manifest = self.manifest.parse_full_manifest() | ||
634 | |||
635 | pkg_to_remove = list() | ||
636 | for pkg in old_full_manifest: | ||
637 | if pkg not in new_full_manifest: | ||
638 | pkg_to_remove.append(pkg) | ||
639 | |||
640 | if pkg_to_remove != []: | ||
641 | bb.note('decremental removed: %s' % ' '.join(pkg_to_remove)) | ||
642 | self.pm.remove(pkg_to_remove) | ||
643 | |||
644 | ''' | ||
645 | Compare with previous existing image creation, if some conditions | ||
646 | triggered, the previous old image should be removed. | ||
647 | The conditions include any of 'PACKAGE_EXCLUDE, NO_RECOMMENDATIONS | ||
648 | and BAD_RECOMMENDATIONS' has been changed. | ||
649 | ''' | ||
650 | def _remove_old_rootfs(self): | ||
651 | if self.inc_opkg_image_gen != "1": | ||
652 | return True | ||
653 | |||
654 | vars_list_file = self.d.expand('${T}/vars_list') | ||
655 | |||
656 | old_vars_list = "" | ||
657 | if os.path.exists(vars_list_file): | ||
658 | old_vars_list = open(vars_list_file, 'r+').read() | ||
659 | |||
660 | new_vars_list = '%s:%s:%s\n' % \ | ||
661 | ((self.d.getVar('BAD_RECOMMENDATIONS', True) or '').strip(), | ||
662 | (self.d.getVar('NO_RECOMMENDATIONS', True) or '').strip(), | ||
663 | (self.d.getVar('PACKAGE_EXCLUDE', True) or '').strip()) | ||
664 | open(vars_list_file, 'w+').write(new_vars_list) | ||
665 | |||
666 | if old_vars_list != new_vars_list: | ||
667 | return True | ||
668 | |||
669 | return False | ||
670 | |||
671 | def _create(self): | ||
672 | pkgs_to_install = self.manifest.parse_initial_manifest() | ||
673 | opkg_pre_process_cmds = self.d.getVar('OPKG_PREPROCESS_COMMANDS', True) | ||
674 | opkg_post_process_cmds = self.d.getVar('OPKG_POSTPROCESS_COMMANDS', True) | ||
675 | rootfs_post_install_cmds = self.d.getVar('ROOTFS_POSTINSTALL_COMMAND', True) | ||
676 | |||
677 | # update PM index files, unless users provide their own feeds | ||
678 | if (self.d.getVar('BUILD_IMAGES_FROM_FEEDS', True) or "") != "1": | ||
679 | self.pm.write_index() | ||
680 | |||
681 | execute_pre_post_process(self.d, opkg_pre_process_cmds) | ||
682 | |||
683 | self.pm.update() | ||
684 | |||
685 | self.pm.handle_bad_recommendations() | ||
686 | |||
687 | if self.inc_opkg_image_gen == "1": | ||
688 | self._remove_extra_packages(pkgs_to_install) | ||
689 | |||
690 | for pkg_type in self.install_order: | ||
691 | if pkg_type in pkgs_to_install: | ||
692 | # For multilib, we perform a sanity test before final install | ||
693 | # If sanity test fails, it will automatically do a bb.fatal() | ||
694 | # and the installation will stop | ||
695 | if pkg_type == Manifest.PKG_TYPE_MULTILIB: | ||
696 | self._multilib_test_install(pkgs_to_install[pkg_type]) | ||
697 | |||
698 | self.pm.install(pkgs_to_install[pkg_type], | ||
699 | [False, True][pkg_type == Manifest.PKG_TYPE_ATTEMPT_ONLY]) | ||
700 | |||
701 | self.pm.install_complementary() | ||
702 | |||
703 | execute_pre_post_process(self.d, opkg_post_process_cmds) | ||
704 | execute_pre_post_process(self.d, rootfs_post_install_cmds) | ||
705 | |||
706 | if self.inc_opkg_image_gen == "1": | ||
707 | self.pm.backup_packaging_data() | ||
708 | |||
709 | @staticmethod | ||
710 | def _depends_list(): | ||
711 | return ['IPKGCONF_SDK', 'IPK_FEED_URIS', 'DEPLOY_DIR_IPK', 'IPKGCONF_TARGET', 'INC_IPK_IMAGE_GEN', 'OPKG_ARGS', 'OPKGLIBDIR', 'OPKG_PREPROCESS_COMMANDS', 'OPKG_POSTPROCESS_COMMANDS', 'OPKGLIBDIR'] | ||
712 | |||
713 | def _get_delayed_postinsts(self): | ||
714 | pkg_list = [] | ||
715 | status_file = os.path.join(self.image_rootfs, | ||
716 | self.d.getVar('OPKGLIBDIR', True).strip('/'), | ||
717 | "opkg", "status") | ||
718 | |||
719 | with open(status_file) as status: | ||
720 | for line in status: | ||
721 | m_pkg = re.match("^Package: (.*)", line) | ||
722 | m_status = re.match("^Status:.*unpacked", line) | ||
723 | if m_pkg is not None: | ||
724 | pkg_name = m_pkg.group(1) | ||
725 | elif m_status is not None: | ||
726 | pkg_list.append(pkg_name) | ||
727 | |||
728 | if len(pkg_list) == 0: | ||
729 | return None | ||
730 | |||
731 | return pkg_list | ||
732 | |||
733 | def _save_postinsts(self): | ||
734 | num = 0 | ||
735 | for p in self._get_delayed_postinsts(): | ||
736 | dst_postinst_dir = self.d.expand("${IMAGE_ROOTFS}${sysconfdir}/ipk-postinsts") | ||
737 | src_postinst_dir = self.d.expand("${IMAGE_ROOTFS}${OPKGLIBDIR}/opkg/info") | ||
738 | |||
739 | bb.utils.mkdirhier(dst_postinst_dir) | ||
740 | |||
741 | if os.path.exists(os.path.join(src_postinst_dir, p + ".postinst")): | ||
742 | shutil.copy(os.path.join(src_postinst_dir, p + ".postinst"), | ||
743 | os.path.join(dst_postinst_dir, "%03d-%s" % (num, p))) | ||
744 | |||
745 | num += 1 | ||
746 | |||
747 | def _handle_intercept_failure(self, registered_pkgs): | ||
748 | self.pm.mark_packages("unpacked", registered_pkgs.split()) | ||
749 | |||
750 | def _log_check(self): | ||
751 | pass | ||
752 | |||
753 | def _cleanup(self): | ||
754 | pass | ||
755 | |||
756 | def get_class_for_type(imgtype): | ||
757 | return {"rpm": RpmRootfs, | ||
758 | "ipk": OpkgRootfs, | ||
759 | "deb": DpkgRootfs}[imgtype] | ||
760 | |||
761 | def variable_depends(d, manifest_dir=None): | ||
762 | img_type = d.getVar('IMAGE_PKGTYPE', True) | ||
763 | cls = get_class_for_type(img_type) | ||
764 | return cls._depends_list() | ||
765 | |||
766 | def create_rootfs(d, manifest_dir=None): | ||
767 | env_bkp = os.environ.copy() | ||
768 | |||
769 | img_type = d.getVar('IMAGE_PKGTYPE', True) | ||
770 | if img_type == "rpm": | ||
771 | RpmRootfs(d, manifest_dir).create() | ||
772 | elif img_type == "ipk": | ||
773 | OpkgRootfs(d, manifest_dir).create() | ||
774 | elif img_type == "deb": | ||
775 | DpkgRootfs(d, manifest_dir).create() | ||
776 | |||
777 | os.environ.clear() | ||
778 | os.environ.update(env_bkp) | ||
779 | |||
780 | |||
781 | def image_list_installed_packages(d, format=None, rootfs_dir=None): | ||
782 | if not rootfs_dir: | ||
783 | rootfs_dir = d.getVar('IMAGE_ROOTFS', True) | ||
784 | |||
785 | img_type = d.getVar('IMAGE_PKGTYPE', True) | ||
786 | if img_type == "rpm": | ||
787 | return RpmPkgsList(d, rootfs_dir).list(format) | ||
788 | elif img_type == "ipk": | ||
789 | return OpkgPkgsList(d, rootfs_dir, d.getVar("IPKGCONF_TARGET", True)).list(format) | ||
790 | elif img_type == "deb": | ||
791 | return DpkgPkgsList(d, rootfs_dir).list(format) | ||
792 | |||
793 | if __name__ == "__main__": | ||
794 | """ | ||
795 | We should be able to run this as a standalone script, from outside bitbake | ||
796 | environment. | ||
797 | """ | ||
798 | """ | ||
799 | TBD | ||
800 | """ | ||
diff --git a/meta/lib/oe/sdk.py b/meta/lib/oe/sdk.py new file mode 100644 index 0000000000..c57a441941 --- /dev/null +++ b/meta/lib/oe/sdk.py | |||
@@ -0,0 +1,326 @@ | |||
1 | from abc import ABCMeta, abstractmethod | ||
2 | from oe.utils import execute_pre_post_process | ||
3 | from oe.manifest import * | ||
4 | from oe.package_manager import * | ||
5 | import os | ||
6 | import shutil | ||
7 | import glob | ||
8 | |||
9 | |||
10 | class Sdk(object): | ||
11 | __metaclass__ = ABCMeta | ||
12 | |||
13 | def __init__(self, d, manifest_dir): | ||
14 | self.d = d | ||
15 | self.sdk_output = self.d.getVar('SDK_OUTPUT', True) | ||
16 | self.sdk_native_path = self.d.getVar('SDKPATHNATIVE', True).strip('/') | ||
17 | self.target_path = self.d.getVar('SDKTARGETSYSROOT', True).strip('/') | ||
18 | self.sysconfdir = self.d.getVar('sysconfdir', True).strip('/') | ||
19 | |||
20 | self.sdk_target_sysroot = os.path.join(self.sdk_output, self.target_path) | ||
21 | self.sdk_host_sysroot = self.sdk_output | ||
22 | |||
23 | if manifest_dir is None: | ||
24 | self.manifest_dir = self.d.getVar("SDK_DIR", True) | ||
25 | else: | ||
26 | self.manifest_dir = manifest_dir | ||
27 | |||
28 | bb.utils.remove(self.sdk_output, True) | ||
29 | |||
30 | self.install_order = Manifest.INSTALL_ORDER | ||
31 | |||
32 | @abstractmethod | ||
33 | def _populate(self): | ||
34 | pass | ||
35 | |||
36 | def populate(self): | ||
37 | bb.utils.mkdirhier(self.sdk_output) | ||
38 | |||
39 | # call backend dependent implementation | ||
40 | self._populate() | ||
41 | |||
42 | # Don't ship any libGL in the SDK | ||
43 | bb.utils.remove(os.path.join(self.sdk_output, self.sdk_native_path, | ||
44 | self.d.getVar('libdir_nativesdk', True).strip('/'), | ||
45 | "libGL*")) | ||
46 | |||
47 | # Fix or remove broken .la files | ||
48 | bb.utils.remove(os.path.join(self.sdk_output, self.sdk_native_path, | ||
49 | self.d.getVar('libdir_nativesdk', True).strip('/'), | ||
50 | "*.la")) | ||
51 | |||
52 | # Link the ld.so.cache file into the hosts filesystem | ||
53 | link_name = os.path.join(self.sdk_output, self.sdk_native_path, | ||
54 | self.sysconfdir, "ld.so.cache") | ||
55 | bb.utils.mkdirhier(os.path.dirname(link_name)) | ||
56 | os.symlink("/etc/ld.so.cache", link_name) | ||
57 | |||
58 | execute_pre_post_process(self.d, self.d.getVar('SDK_POSTPROCESS_COMMAND', True)) | ||
59 | |||
60 | |||
61 | class RpmSdk(Sdk): | ||
62 | def __init__(self, d, manifest_dir=None): | ||
63 | super(RpmSdk, self).__init__(d, manifest_dir) | ||
64 | |||
65 | self.target_manifest = RpmManifest(d, self.manifest_dir, | ||
66 | Manifest.MANIFEST_TYPE_SDK_TARGET) | ||
67 | self.host_manifest = RpmManifest(d, self.manifest_dir, | ||
68 | Manifest.MANIFEST_TYPE_SDK_HOST) | ||
69 | |||
70 | target_providename = ['/bin/sh', | ||
71 | '/bin/bash', | ||
72 | '/usr/bin/env', | ||
73 | '/usr/bin/perl', | ||
74 | 'pkgconfig' | ||
75 | ] | ||
76 | |||
77 | self.target_pm = RpmPM(d, | ||
78 | self.sdk_target_sysroot, | ||
79 | self.d.getVar('TARGET_VENDOR', True), | ||
80 | 'target', | ||
81 | target_providename | ||
82 | ) | ||
83 | |||
84 | sdk_providename = ['/bin/sh', | ||
85 | '/bin/bash', | ||
86 | '/usr/bin/env', | ||
87 | '/usr/bin/perl', | ||
88 | 'pkgconfig', | ||
89 | 'libGL.so()(64bit)', | ||
90 | 'libGL.so' | ||
91 | ] | ||
92 | |||
93 | self.host_pm = RpmPM(d, | ||
94 | self.sdk_host_sysroot, | ||
95 | self.d.getVar('SDK_VENDOR', True), | ||
96 | 'host', | ||
97 | sdk_providename, | ||
98 | "SDK_PACKAGE_ARCHS", | ||
99 | "SDK_OS" | ||
100 | ) | ||
101 | |||
102 | def _populate_sysroot(self, pm, manifest): | ||
103 | pkgs_to_install = manifest.parse_initial_manifest() | ||
104 | |||
105 | pm.create_configs() | ||
106 | pm.write_index() | ||
107 | pm.dump_all_available_pkgs() | ||
108 | pm.update() | ||
109 | |||
110 | for pkg_type in self.install_order: | ||
111 | if pkg_type in pkgs_to_install: | ||
112 | pm.install(pkgs_to_install[pkg_type], | ||
113 | [False, True][pkg_type == Manifest.PKG_TYPE_ATTEMPT_ONLY]) | ||
114 | |||
115 | def _populate(self): | ||
116 | bb.note("Installing TARGET packages") | ||
117 | self._populate_sysroot(self.target_pm, self.target_manifest) | ||
118 | |||
119 | self.target_pm.install_complementary(self.d.getVar('SDKIMAGE_INSTALL_COMPLEMENTARY', True)) | ||
120 | |||
121 | execute_pre_post_process(self.d, self.d.getVar("POPULATE_SDK_POST_TARGET_COMMAND", True)) | ||
122 | |||
123 | self.target_pm.remove_packaging_data() | ||
124 | |||
125 | bb.note("Installing NATIVESDK packages") | ||
126 | self._populate_sysroot(self.host_pm, self.host_manifest) | ||
127 | |||
128 | execute_pre_post_process(self.d, self.d.getVar("POPULATE_SDK_POST_HOST_COMMAND", True)) | ||
129 | |||
130 | self.host_pm.remove_packaging_data() | ||
131 | |||
132 | # Move host RPM library data | ||
133 | native_rpm_state_dir = os.path.join(self.sdk_output, | ||
134 | self.sdk_native_path, | ||
135 | self.d.getVar('localstatedir_nativesdk', True).strip('/'), | ||
136 | "lib", | ||
137 | "rpm" | ||
138 | ) | ||
139 | bb.utils.mkdirhier(native_rpm_state_dir) | ||
140 | for f in glob.glob(os.path.join(self.sdk_output, | ||
141 | "var", | ||
142 | "lib", | ||
143 | "rpm", | ||
144 | "*")): | ||
145 | bb.utils.movefile(f, native_rpm_state_dir) | ||
146 | |||
147 | bb.utils.remove(os.path.join(self.sdk_output, "var"), True) | ||
148 | |||
149 | # Move host sysconfig data | ||
150 | native_sysconf_dir = os.path.join(self.sdk_output, | ||
151 | self.sdk_native_path, | ||
152 | self.d.getVar('sysconfdir', | ||
153 | True).strip('/'), | ||
154 | ) | ||
155 | bb.utils.mkdirhier(native_sysconf_dir) | ||
156 | for f in glob.glob(os.path.join(self.sdk_output, "etc", "*")): | ||
157 | bb.utils.movefile(f, native_sysconf_dir) | ||
158 | bb.utils.remove(os.path.join(self.sdk_output, "etc"), True) | ||
159 | |||
160 | |||
161 | class OpkgSdk(Sdk): | ||
162 | def __init__(self, d, manifest_dir=None): | ||
163 | super(OpkgSdk, self).__init__(d, manifest_dir) | ||
164 | |||
165 | self.target_conf = self.d.getVar("IPKGCONF_TARGET", True) | ||
166 | self.host_conf = self.d.getVar("IPKGCONF_SDK", True) | ||
167 | |||
168 | self.target_manifest = OpkgManifest(d, self.manifest_dir, | ||
169 | Manifest.MANIFEST_TYPE_SDK_TARGET) | ||
170 | self.host_manifest = OpkgManifest(d, self.manifest_dir, | ||
171 | Manifest.MANIFEST_TYPE_SDK_HOST) | ||
172 | |||
173 | self.target_pm = OpkgPM(d, self.sdk_target_sysroot, self.target_conf, | ||
174 | self.d.getVar("ALL_MULTILIB_PACKAGE_ARCHS", True)) | ||
175 | |||
176 | self.host_pm = OpkgPM(d, self.sdk_host_sysroot, self.host_conf, | ||
177 | self.d.getVar("SDK_PACKAGE_ARCHS", True)) | ||
178 | |||
179 | def _populate_sysroot(self, pm, manifest): | ||
180 | pkgs_to_install = manifest.parse_initial_manifest() | ||
181 | |||
182 | if (self.d.getVar('BUILD_IMAGES_FROM_FEEDS', True) or "") != "1": | ||
183 | pm.write_index() | ||
184 | |||
185 | pm.update() | ||
186 | |||
187 | for pkg_type in self.install_order: | ||
188 | if pkg_type in pkgs_to_install: | ||
189 | pm.install(pkgs_to_install[pkg_type], | ||
190 | [False, True][pkg_type == Manifest.PKG_TYPE_ATTEMPT_ONLY]) | ||
191 | |||
192 | def _populate(self): | ||
193 | bb.note("Installing TARGET packages") | ||
194 | self._populate_sysroot(self.target_pm, self.target_manifest) | ||
195 | |||
196 | self.target_pm.install_complementary(self.d.getVar('SDKIMAGE_INSTALL_COMPLEMENTARY', True)) | ||
197 | |||
198 | execute_pre_post_process(self.d, self.d.getVar("POPULATE_SDK_POST_TARGET_COMMAND", True)) | ||
199 | |||
200 | bb.note("Installing NATIVESDK packages") | ||
201 | self._populate_sysroot(self.host_pm, self.host_manifest) | ||
202 | |||
203 | execute_pre_post_process(self.d, self.d.getVar("POPULATE_SDK_POST_HOST_COMMAND", True)) | ||
204 | |||
205 | target_sysconfdir = os.path.join(self.sdk_target_sysroot, self.sysconfdir) | ||
206 | host_sysconfdir = os.path.join(self.sdk_host_sysroot, self.sysconfdir) | ||
207 | |||
208 | bb.utils.mkdirhier(target_sysconfdir) | ||
209 | shutil.copy(self.target_conf, target_sysconfdir) | ||
210 | os.chmod(os.path.join(target_sysconfdir, | ||
211 | os.path.basename(self.target_conf)), 0644) | ||
212 | |||
213 | bb.utils.mkdirhier(host_sysconfdir) | ||
214 | shutil.copy(self.host_conf, host_sysconfdir) | ||
215 | os.chmod(os.path.join(host_sysconfdir, | ||
216 | os.path.basename(self.host_conf)), 0644) | ||
217 | |||
218 | native_opkg_state_dir = os.path.join(self.sdk_output, self.sdk_native_path, | ||
219 | self.d.getVar('localstatedir_nativesdk', True).strip('/'), | ||
220 | "lib", "opkg") | ||
221 | bb.utils.mkdirhier(native_opkg_state_dir) | ||
222 | for f in glob.glob(os.path.join(self.sdk_output, "var", "lib", "opkg", "*")): | ||
223 | bb.utils.movefile(f, native_opkg_state_dir) | ||
224 | |||
225 | bb.utils.remove(os.path.join(self.sdk_output, "var"), True) | ||
226 | |||
227 | |||
228 | class DpkgSdk(Sdk): | ||
229 | def __init__(self, d, manifest_dir=None): | ||
230 | super(DpkgSdk, self).__init__(d, manifest_dir) | ||
231 | |||
232 | self.target_conf_dir = os.path.join(self.d.getVar("APTCONF_TARGET", True), "apt") | ||
233 | self.host_conf_dir = os.path.join(self.d.getVar("APTCONF_TARGET", True), "apt-sdk") | ||
234 | |||
235 | self.target_manifest = DpkgManifest(d, self.manifest_dir, | ||
236 | Manifest.MANIFEST_TYPE_SDK_TARGET) | ||
237 | self.host_manifest = DpkgManifest(d, self.manifest_dir, | ||
238 | Manifest.MANIFEST_TYPE_SDK_HOST) | ||
239 | |||
240 | self.target_pm = DpkgPM(d, self.sdk_target_sysroot, | ||
241 | self.d.getVar("PACKAGE_ARCHS", True), | ||
242 | self.d.getVar("DPKG_ARCH", True), | ||
243 | self.target_conf_dir) | ||
244 | |||
245 | self.host_pm = DpkgPM(d, self.sdk_host_sysroot, | ||
246 | self.d.getVar("SDK_PACKAGE_ARCHS", True), | ||
247 | self.d.getVar("DEB_SDK_ARCH", True), | ||
248 | self.host_conf_dir) | ||
249 | |||
250 | def _copy_apt_dir_to(self, dst_dir): | ||
251 | staging_etcdir_native = self.d.getVar("STAGING_ETCDIR_NATIVE", True) | ||
252 | |||
253 | bb.utils.remove(dst_dir, True) | ||
254 | |||
255 | shutil.copytree(os.path.join(staging_etcdir_native, "apt"), dst_dir) | ||
256 | |||
257 | def _populate_sysroot(self, pm, manifest): | ||
258 | pkgs_to_install = manifest.parse_initial_manifest() | ||
259 | |||
260 | pm.write_index() | ||
261 | pm.update() | ||
262 | |||
263 | for pkg_type in self.install_order: | ||
264 | if pkg_type in pkgs_to_install: | ||
265 | pm.install(pkgs_to_install[pkg_type], | ||
266 | [False, True][pkg_type == Manifest.PKG_TYPE_ATTEMPT_ONLY]) | ||
267 | |||
268 | def _populate(self): | ||
269 | bb.note("Installing TARGET packages") | ||
270 | self._populate_sysroot(self.target_pm, self.target_manifest) | ||
271 | |||
272 | execute_pre_post_process(self.d, self.d.getVar("POPULATE_SDK_POST_TARGET_COMMAND", True)) | ||
273 | |||
274 | self._copy_apt_dir_to(os.path.join(self.sdk_target_sysroot, "etc", "apt")) | ||
275 | |||
276 | bb.note("Installing NATIVESDK packages") | ||
277 | self._populate_sysroot(self.host_pm, self.host_manifest) | ||
278 | |||
279 | execute_pre_post_process(self.d, self.d.getVar("POPULATE_SDK_POST_HOST_COMMAND", True)) | ||
280 | |||
281 | self._copy_apt_dir_to(os.path.join(self.sdk_output, self.sdk_native_path, | ||
282 | "etc", "apt")) | ||
283 | |||
284 | native_dpkg_state_dir = os.path.join(self.sdk_output, self.sdk_native_path, | ||
285 | "var", "lib", "dpkg") | ||
286 | bb.utils.mkdirhier(native_dpkg_state_dir) | ||
287 | for f in glob.glob(os.path.join(self.sdk_output, "var", "lib", "dpkg", "*")): | ||
288 | bb.utils.movefile(f, native_dpkg_state_dir) | ||
289 | |||
290 | bb.utils.remove(os.path.join(self.sdk_output, "var"), True) | ||
291 | |||
292 | |||
293 | def sdk_list_installed_packages(d, target, format=None, rootfs_dir=None): | ||
294 | if rootfs_dir is None: | ||
295 | sdk_output = d.getVar('SDK_OUTPUT', True) | ||
296 | target_path = d.getVar('SDKTARGETSYSROOT', True).strip('/') | ||
297 | |||
298 | rootfs_dir = [sdk_output, os.path.join(sdk_output, target_path)][target is True] | ||
299 | |||
300 | img_type = d.getVar('IMAGE_PKGTYPE', True) | ||
301 | if img_type == "rpm": | ||
302 | arch_var = ["SDK_PACKAGE_ARCHS", None][target is True] | ||
303 | os_var = ["SDK_OS", None][target is True] | ||
304 | return RpmPkgsList(d, rootfs_dir, arch_var, os_var).list(format) | ||
305 | elif img_type == "ipk": | ||
306 | conf_file_var = ["IPKGCONF_SDK", "IPKGCONF_TARGET"][target is True] | ||
307 | return OpkgPkgsList(d, rootfs_dir, d.getVar(conf_file_var, True)).list(format) | ||
308 | elif img_type == "deb": | ||
309 | return DpkgPkgsList(d, rootfs_dir).list(format) | ||
310 | |||
311 | def populate_sdk(d, manifest_dir=None): | ||
312 | env_bkp = os.environ.copy() | ||
313 | |||
314 | img_type = d.getVar('IMAGE_PKGTYPE', True) | ||
315 | if img_type == "rpm": | ||
316 | RpmSdk(d, manifest_dir).populate() | ||
317 | elif img_type == "ipk": | ||
318 | OpkgSdk(d, manifest_dir).populate() | ||
319 | elif img_type == "deb": | ||
320 | DpkgSdk(d, manifest_dir).populate() | ||
321 | |||
322 | os.environ.clear() | ||
323 | os.environ.update(env_bkp) | ||
324 | |||
325 | if __name__ == "__main__": | ||
326 | pass | ||
diff --git a/meta/lib/oe/sstatesig.py b/meta/lib/oe/sstatesig.py new file mode 100644 index 0000000000..af7617ee61 --- /dev/null +++ b/meta/lib/oe/sstatesig.py | |||
@@ -0,0 +1,276 @@ | |||
1 | import bb.siggen | ||
2 | |||
3 | def sstate_rundepfilter(siggen, fn, recipename, task, dep, depname, dataCache): | ||
4 | # Return True if we should keep the dependency, False to drop it | ||
5 | def isNative(x): | ||
6 | return x.endswith("-native") | ||
7 | def isCross(x): | ||
8 | return "-cross-" in x | ||
9 | def isNativeSDK(x): | ||
10 | return x.startswith("nativesdk-") | ||
11 | def isKernel(fn): | ||
12 | inherits = " ".join(dataCache.inherits[fn]) | ||
13 | return inherits.find("/module-base.bbclass") != -1 or inherits.find("/linux-kernel-base.bbclass") != -1 | ||
14 | def isPackageGroup(fn): | ||
15 | inherits = " ".join(dataCache.inherits[fn]) | ||
16 | return "/packagegroup.bbclass" in inherits | ||
17 | def isAllArch(fn): | ||
18 | inherits = " ".join(dataCache.inherits[fn]) | ||
19 | return "/allarch.bbclass" in inherits | ||
20 | def isImage(fn): | ||
21 | return "/image.bbclass" in " ".join(dataCache.inherits[fn]) | ||
22 | |||
23 | # Always include our own inter-task dependencies | ||
24 | if recipename == depname: | ||
25 | return True | ||
26 | |||
27 | # Quilt (patch application) changing isn't likely to affect anything | ||
28 | excludelist = ['quilt-native', 'subversion-native', 'git-native'] | ||
29 | if depname in excludelist and recipename != depname: | ||
30 | return False | ||
31 | |||
32 | # Exclude well defined recipe->dependency | ||
33 | if "%s->%s" % (recipename, depname) in siggen.saferecipedeps: | ||
34 | return False | ||
35 | |||
36 | # Don't change native/cross/nativesdk recipe dependencies any further | ||
37 | if isNative(recipename) or isCross(recipename) or isNativeSDK(recipename): | ||
38 | return True | ||
39 | |||
40 | # Only target packages beyond here | ||
41 | |||
42 | # allarch packagegroups are assumed to have well behaved names which don't change between architecures/tunes | ||
43 | if isPackageGroup(fn) and isAllArch(fn): | ||
44 | return False | ||
45 | |||
46 | # Exclude well defined machine specific configurations which don't change ABI | ||
47 | if depname in siggen.abisaferecipes and not isImage(fn): | ||
48 | return False | ||
49 | |||
50 | # Kernel modules are well namespaced. We don't want to depend on the kernel's checksum | ||
51 | # if we're just doing an RRECOMMENDS_xxx = "kernel-module-*", not least because the checksum | ||
52 | # is machine specific. | ||
53 | # Therefore if we're not a kernel or a module recipe (inheriting the kernel classes) | ||
54 | # and we reccomend a kernel-module, we exclude the dependency. | ||
55 | depfn = dep.rsplit(".", 1)[0] | ||
56 | if dataCache and isKernel(depfn) and not isKernel(fn): | ||
57 | for pkg in dataCache.runrecs[fn]: | ||
58 | if " ".join(dataCache.runrecs[fn][pkg]).find("kernel-module-") != -1: | ||
59 | return False | ||
60 | |||
61 | # Default to keep dependencies | ||
62 | return True | ||
63 | |||
64 | def sstate_lockedsigs(d): | ||
65 | sigs = {} | ||
66 | types = (d.getVar("SIGGEN_LOCKEDSIGS_TYPES", True) or "").split() | ||
67 | for t in types: | ||
68 | lockedsigs = (d.getVar("SIGGEN_LOCKEDSIGS_%s" % t, True) or "").split() | ||
69 | for ls in lockedsigs: | ||
70 | pn, task, h = ls.split(":", 2) | ||
71 | if pn not in sigs: | ||
72 | sigs[pn] = {} | ||
73 | sigs[pn][task] = h | ||
74 | return sigs | ||
75 | |||
76 | class SignatureGeneratorOEBasic(bb.siggen.SignatureGeneratorBasic): | ||
77 | name = "OEBasic" | ||
78 | def init_rundepcheck(self, data): | ||
79 | self.abisaferecipes = (data.getVar("SIGGEN_EXCLUDERECIPES_ABISAFE", True) or "").split() | ||
80 | self.saferecipedeps = (data.getVar("SIGGEN_EXCLUDE_SAFE_RECIPE_DEPS", True) or "").split() | ||
81 | pass | ||
82 | def rundep_check(self, fn, recipename, task, dep, depname, dataCache = None): | ||
83 | return sstate_rundepfilter(self, fn, recipename, task, dep, depname, dataCache) | ||
84 | |||
85 | class SignatureGeneratorOEBasicHash(bb.siggen.SignatureGeneratorBasicHash): | ||
86 | name = "OEBasicHash" | ||
87 | def init_rundepcheck(self, data): | ||
88 | self.abisaferecipes = (data.getVar("SIGGEN_EXCLUDERECIPES_ABISAFE", True) or "").split() | ||
89 | self.saferecipedeps = (data.getVar("SIGGEN_EXCLUDE_SAFE_RECIPE_DEPS", True) or "").split() | ||
90 | self.lockedsigs = sstate_lockedsigs(data) | ||
91 | self.lockedhashes = {} | ||
92 | self.lockedpnmap = {} | ||
93 | self.lockedhashfn = {} | ||
94 | self.machine = data.getVar("MACHINE", True) | ||
95 | self.mismatch_msgs = [] | ||
96 | pass | ||
97 | def rundep_check(self, fn, recipename, task, dep, depname, dataCache = None): | ||
98 | return sstate_rundepfilter(self, fn, recipename, task, dep, depname, dataCache) | ||
99 | |||
100 | def get_taskdata(self): | ||
101 | data = super(bb.siggen.SignatureGeneratorBasicHash, self).get_taskdata() | ||
102 | return (data, self.lockedpnmap, self.lockedhashfn) | ||
103 | |||
104 | def set_taskdata(self, data): | ||
105 | coredata, self.lockedpnmap, self.lockedhashfn = data | ||
106 | super(bb.siggen.SignatureGeneratorBasicHash, self).set_taskdata(coredata) | ||
107 | |||
108 | def dump_sigs(self, dataCache, options): | ||
109 | self.dump_lockedsigs() | ||
110 | return super(bb.siggen.SignatureGeneratorBasicHash, self).dump_sigs(dataCache, options) | ||
111 | |||
112 | def get_taskhash(self, fn, task, deps, dataCache): | ||
113 | h = super(bb.siggen.SignatureGeneratorBasicHash, self).get_taskhash(fn, task, deps, dataCache) | ||
114 | |||
115 | recipename = dataCache.pkg_fn[fn] | ||
116 | self.lockedpnmap[fn] = recipename | ||
117 | self.lockedhashfn[fn] = dataCache.hashfn[fn] | ||
118 | if recipename in self.lockedsigs: | ||
119 | if task in self.lockedsigs[recipename]: | ||
120 | k = fn + "." + task | ||
121 | h_locked = self.lockedsigs[recipename][task] | ||
122 | self.lockedhashes[k] = h_locked | ||
123 | self.taskhash[k] = h_locked | ||
124 | #bb.warn("Using %s %s %s" % (recipename, task, h)) | ||
125 | |||
126 | if h != h_locked: | ||
127 | self.mismatch_msgs.append('The %s:%s sig (%s) changed, use locked sig %s to instead' | ||
128 | % (recipename, task, h, h_locked)) | ||
129 | |||
130 | return h_locked | ||
131 | #bb.warn("%s %s %s" % (recipename, task, h)) | ||
132 | return h | ||
133 | |||
134 | def dump_sigtask(self, fn, task, stampbase, runtime): | ||
135 | k = fn + "." + task | ||
136 | if k in self.lockedhashes: | ||
137 | return | ||
138 | super(bb.siggen.SignatureGeneratorBasicHash, self).dump_sigtask(fn, task, stampbase, runtime) | ||
139 | |||
140 | def dump_lockedsigs(self, sigfile=None): | ||
141 | if not sigfile: | ||
142 | sigfile = os.getcwd() + "/locked-sigs.inc" | ||
143 | |||
144 | bb.plain("Writing locked sigs to %s" % sigfile) | ||
145 | types = {} | ||
146 | for k in self.runtaskdeps: | ||
147 | fn = k.rsplit(".",1)[0] | ||
148 | t = self.lockedhashfn[fn].split(" ")[1].split(":")[5] | ||
149 | t = 't-' + t.replace('_', '-') | ||
150 | if t not in types: | ||
151 | types[t] = [] | ||
152 | types[t].append(k) | ||
153 | |||
154 | with open(sigfile, "w") as f: | ||
155 | for t in types: | ||
156 | f.write('SIGGEN_LOCKEDSIGS_%s = "\\\n' % t) | ||
157 | types[t].sort() | ||
158 | sortedk = sorted(types[t], key=lambda k: self.lockedpnmap[k.rsplit(".",1)[0]]) | ||
159 | for k in sortedk: | ||
160 | fn = k.rsplit(".",1)[0] | ||
161 | task = k.rsplit(".",1)[1] | ||
162 | if k not in self.taskhash: | ||
163 | continue | ||
164 | f.write(" " + self.lockedpnmap[fn] + ":" + task + ":" + self.taskhash[k] + " \\\n") | ||
165 | f.write(' "\n') | ||
166 | f.write('SIGGEN_LOCKEDSIGS_TYPES_%s = "%s"' % (self.machine, " ".join(types.keys()))) | ||
167 | |||
168 | def checkhashes(self, missed, ret, sq_fn, sq_task, sq_hash, sq_hashfn, d): | ||
169 | checklevel = d.getVar("SIGGEN_LOCKEDSIGS_CHECK_LEVEL", True) | ||
170 | for task in range(len(sq_fn)): | ||
171 | if task not in ret: | ||
172 | for pn in self.lockedsigs: | ||
173 | if sq_hash[task] in self.lockedsigs[pn].itervalues(): | ||
174 | self.mismatch_msgs.append("Locked sig is set for %s:%s (%s) yet not in sstate cache?" | ||
175 | % (pn, sq_task[task], sq_hash[task])) | ||
176 | |||
177 | if self.mismatch_msgs and checklevel == 'warn': | ||
178 | bb.warn("\n".join(self.mismatch_msgs)) | ||
179 | elif self.mismatch_msgs and checklevel == 'error': | ||
180 | bb.fatal("\n".join(self.mismatch_msgs)) | ||
181 | |||
182 | |||
183 | # Insert these classes into siggen's namespace so it can see and select them | ||
184 | bb.siggen.SignatureGeneratorOEBasic = SignatureGeneratorOEBasic | ||
185 | bb.siggen.SignatureGeneratorOEBasicHash = SignatureGeneratorOEBasicHash | ||
186 | |||
187 | |||
188 | def find_siginfo(pn, taskname, taskhashlist, d): | ||
189 | """ Find signature data files for comparison purposes """ | ||
190 | |||
191 | import fnmatch | ||
192 | import glob | ||
193 | |||
194 | if taskhashlist: | ||
195 | hashfiles = {} | ||
196 | |||
197 | if not taskname: | ||
198 | # We have to derive pn and taskname | ||
199 | key = pn | ||
200 | splitit = key.split('.bb.') | ||
201 | taskname = splitit[1] | ||
202 | pn = os.path.basename(splitit[0]).split('_')[0] | ||
203 | if key.startswith('virtual:native:'): | ||
204 | pn = pn + '-native' | ||
205 | |||
206 | if taskname in ['do_fetch', 'do_unpack', 'do_patch', 'do_populate_lic']: | ||
207 | pn.replace("-native", "") | ||
208 | |||
209 | filedates = {} | ||
210 | |||
211 | # First search in stamps dir | ||
212 | localdata = d.createCopy() | ||
213 | localdata.setVar('MULTIMACH_TARGET_SYS', '*') | ||
214 | localdata.setVar('PN', pn) | ||
215 | localdata.setVar('PV', '*') | ||
216 | localdata.setVar('PR', '*') | ||
217 | localdata.setVar('EXTENDPE', '') | ||
218 | stamp = localdata.getVar('STAMP', True) | ||
219 | filespec = '%s.%s.sigdata.*' % (stamp, taskname) | ||
220 | foundall = False | ||
221 | import glob | ||
222 | for fullpath in glob.glob(filespec): | ||
223 | match = False | ||
224 | if taskhashlist: | ||
225 | for taskhash in taskhashlist: | ||
226 | if fullpath.endswith('.%s' % taskhash): | ||
227 | hashfiles[taskhash] = fullpath | ||
228 | if len(hashfiles) == len(taskhashlist): | ||
229 | foundall = True | ||
230 | break | ||
231 | else: | ||
232 | try: | ||
233 | filedates[fullpath] = os.stat(fullpath).st_mtime | ||
234 | except OSError: | ||
235 | continue | ||
236 | |||
237 | if not taskhashlist or (len(filedates) < 2 and not foundall): | ||
238 | # That didn't work, look in sstate-cache | ||
239 | hashes = taskhashlist or ['*'] | ||
240 | localdata = bb.data.createCopy(d) | ||
241 | for hashval in hashes: | ||
242 | localdata.setVar('PACKAGE_ARCH', '*') | ||
243 | localdata.setVar('TARGET_VENDOR', '*') | ||
244 | localdata.setVar('TARGET_OS', '*') | ||
245 | localdata.setVar('PN', pn) | ||
246 | localdata.setVar('PV', '*') | ||
247 | localdata.setVar('PR', '*') | ||
248 | localdata.setVar('BB_TASKHASH', hashval) | ||
249 | if pn.endswith('-native') or "-cross-" in pn or "-crosssdk-" in pn: | ||
250 | localdata.setVar('SSTATE_EXTRAPATH', "${NATIVELSBSTRING}/") | ||
251 | sstatename = taskname[3:] | ||
252 | filespec = '%s_%s.*.siginfo' % (localdata.getVar('SSTATE_PKG', True), sstatename) | ||
253 | |||
254 | if hashval != '*': | ||
255 | sstatedir = "%s/%s" % (d.getVar('SSTATE_DIR', True), hashval[:2]) | ||
256 | else: | ||
257 | sstatedir = d.getVar('SSTATE_DIR', True) | ||
258 | |||
259 | for root, dirs, files in os.walk(sstatedir): | ||
260 | for fn in files: | ||
261 | fullpath = os.path.join(root, fn) | ||
262 | if fnmatch.fnmatch(fullpath, filespec): | ||
263 | if taskhashlist: | ||
264 | hashfiles[hashval] = fullpath | ||
265 | else: | ||
266 | try: | ||
267 | filedates[fullpath] = os.stat(fullpath).st_mtime | ||
268 | except: | ||
269 | continue | ||
270 | |||
271 | if taskhashlist: | ||
272 | return hashfiles | ||
273 | else: | ||
274 | return filedates | ||
275 | |||
276 | bb.siggen.find_siginfo = find_siginfo | ||
diff --git a/meta/lib/oe/terminal.py b/meta/lib/oe/terminal.py new file mode 100644 index 0000000000..0a623c75b1 --- /dev/null +++ b/meta/lib/oe/terminal.py | |||
@@ -0,0 +1,208 @@ | |||
1 | import logging | ||
2 | import oe.classutils | ||
3 | import shlex | ||
4 | from bb.process import Popen, ExecutionError | ||
5 | |||
6 | logger = logging.getLogger('BitBake.OE.Terminal') | ||
7 | |||
8 | |||
9 | class UnsupportedTerminal(Exception): | ||
10 | pass | ||
11 | |||
12 | class NoSupportedTerminals(Exception): | ||
13 | pass | ||
14 | |||
15 | |||
16 | class Registry(oe.classutils.ClassRegistry): | ||
17 | command = None | ||
18 | |||
19 | def __init__(cls, name, bases, attrs): | ||
20 | super(Registry, cls).__init__(name.lower(), bases, attrs) | ||
21 | |||
22 | @property | ||
23 | def implemented(cls): | ||
24 | return bool(cls.command) | ||
25 | |||
26 | |||
27 | class Terminal(Popen): | ||
28 | __metaclass__ = Registry | ||
29 | |||
30 | def __init__(self, sh_cmd, title=None, env=None, d=None): | ||
31 | fmt_sh_cmd = self.format_command(sh_cmd, title) | ||
32 | try: | ||
33 | Popen.__init__(self, fmt_sh_cmd, env=env) | ||
34 | except OSError as exc: | ||
35 | import errno | ||
36 | if exc.errno == errno.ENOENT: | ||
37 | raise UnsupportedTerminal(self.name) | ||
38 | else: | ||
39 | raise | ||
40 | |||
41 | def format_command(self, sh_cmd, title): | ||
42 | fmt = {'title': title or 'Terminal', 'command': sh_cmd} | ||
43 | if isinstance(self.command, basestring): | ||
44 | return shlex.split(self.command.format(**fmt)) | ||
45 | else: | ||
46 | return [element.format(**fmt) for element in self.command] | ||
47 | |||
48 | class XTerminal(Terminal): | ||
49 | def __init__(self, sh_cmd, title=None, env=None, d=None): | ||
50 | Terminal.__init__(self, sh_cmd, title, env, d) | ||
51 | if not os.environ.get('DISPLAY'): | ||
52 | raise UnsupportedTerminal(self.name) | ||
53 | |||
54 | class Gnome(XTerminal): | ||
55 | command = 'gnome-terminal -t "{title}" -x {command}' | ||
56 | priority = 2 | ||
57 | |||
58 | class Mate(XTerminal): | ||
59 | command = 'mate-terminal -t "{title}" -x {command}' | ||
60 | priority = 2 | ||
61 | |||
62 | class Xfce(XTerminal): | ||
63 | command = 'xfce4-terminal -T "{title}" -e "{command}"' | ||
64 | priority = 2 | ||
65 | |||
66 | class Konsole(XTerminal): | ||
67 | command = 'konsole -T "{title}" -e {command}' | ||
68 | priority = 2 | ||
69 | |||
70 | def __init__(self, sh_cmd, title=None, env=None, d=None): | ||
71 | # Check version | ||
72 | vernum = check_konsole_version("konsole") | ||
73 | if vernum: | ||
74 | if vernum.split('.')[0] == "2": | ||
75 | logger.debug(1, 'Konsole from KDE 4.x will not work as devshell, skipping') | ||
76 | raise UnsupportedTerminal(self.name) | ||
77 | XTerminal.__init__(self, sh_cmd, title, env, d) | ||
78 | |||
79 | class XTerm(XTerminal): | ||
80 | command = 'xterm -T "{title}" -e {command}' | ||
81 | priority = 1 | ||
82 | |||
83 | class Rxvt(XTerminal): | ||
84 | command = 'rxvt -T "{title}" -e {command}' | ||
85 | priority = 1 | ||
86 | |||
87 | class Screen(Terminal): | ||
88 | command = 'screen -D -m -t "{title}" -S devshell {command}' | ||
89 | |||
90 | def __init__(self, sh_cmd, title=None, env=None, d=None): | ||
91 | s_id = "devshell_%i" % os.getpid() | ||
92 | self.command = "screen -D -m -t \"{title}\" -S %s {command}" % s_id | ||
93 | Terminal.__init__(self, sh_cmd, title, env, d) | ||
94 | msg = 'Screen started. Please connect in another terminal with ' \ | ||
95 | '"screen -r %s"' % s_id | ||
96 | if (d): | ||
97 | bb.event.fire(bb.event.LogExecTTY(msg, "screen -r %s" % s_id, | ||
98 | 0.5, 10), d) | ||
99 | else: | ||
100 | logger.warn(msg) | ||
101 | |||
102 | class TmuxRunning(Terminal): | ||
103 | """Open a new pane in the current running tmux window""" | ||
104 | name = 'tmux-running' | ||
105 | command = 'tmux split-window "{command}"' | ||
106 | priority = 2.75 | ||
107 | |||
108 | def __init__(self, sh_cmd, title=None, env=None, d=None): | ||
109 | if not bb.utils.which(os.getenv('PATH'), 'tmux'): | ||
110 | raise UnsupportedTerminal('tmux is not installed') | ||
111 | |||
112 | if not os.getenv('TMUX'): | ||
113 | raise UnsupportedTerminal('tmux is not running') | ||
114 | |||
115 | Terminal.__init__(self, sh_cmd, title, env, d) | ||
116 | |||
117 | class Tmux(Terminal): | ||
118 | """Start a new tmux session and window""" | ||
119 | command = 'tmux new -d -s devshell -n devshell "{command}"' | ||
120 | priority = 0.75 | ||
121 | |||
122 | def __init__(self, sh_cmd, title=None, env=None, d=None): | ||
123 | if not bb.utils.which(os.getenv('PATH'), 'tmux'): | ||
124 | raise UnsupportedTerminal('tmux is not installed') | ||
125 | |||
126 | # TODO: consider using a 'devshell' session shared amongst all | ||
127 | # devshells, if it's already there, add a new window to it. | ||
128 | window_name = 'devshell-%i' % os.getpid() | ||
129 | |||
130 | self.command = 'tmux new -d -s {0} -n {0} "{{command}}"'.format(window_name) | ||
131 | Terminal.__init__(self, sh_cmd, title, env, d) | ||
132 | |||
133 | attach_cmd = 'tmux att -t {0}'.format(window_name) | ||
134 | msg = 'Tmux started. Please connect in another terminal with `tmux att -t {0}`'.format(window_name) | ||
135 | if d: | ||
136 | bb.event.fire(bb.event.LogExecTTY(msg, attach_cmd, 0.5, 10), d) | ||
137 | else: | ||
138 | logger.warn(msg) | ||
139 | |||
140 | class Custom(Terminal): | ||
141 | command = 'false' # This is a placeholder | ||
142 | priority = 3 | ||
143 | |||
144 | def __init__(self, sh_cmd, title=None, env=None, d=None): | ||
145 | self.command = d and d.getVar('OE_TERMINAL_CUSTOMCMD', True) | ||
146 | if self.command: | ||
147 | if not '{command}' in self.command: | ||
148 | self.command += ' {command}' | ||
149 | Terminal.__init__(self, sh_cmd, title, env, d) | ||
150 | logger.warn('Custom terminal was started.') | ||
151 | else: | ||
152 | logger.debug(1, 'No custom terminal (OE_TERMINAL_CUSTOMCMD) set') | ||
153 | raise UnsupportedTerminal('OE_TERMINAL_CUSTOMCMD not set') | ||
154 | |||
155 | |||
156 | def prioritized(): | ||
157 | return Registry.prioritized() | ||
158 | |||
159 | def spawn_preferred(sh_cmd, title=None, env=None, d=None): | ||
160 | """Spawn the first supported terminal, by priority""" | ||
161 | for terminal in prioritized(): | ||
162 | try: | ||
163 | spawn(terminal.name, sh_cmd, title, env, d) | ||
164 | break | ||
165 | except UnsupportedTerminal: | ||
166 | continue | ||
167 | else: | ||
168 | raise NoSupportedTerminals() | ||
169 | |||
170 | def spawn(name, sh_cmd, title=None, env=None, d=None): | ||
171 | """Spawn the specified terminal, by name""" | ||
172 | logger.debug(1, 'Attempting to spawn terminal "%s"', name) | ||
173 | try: | ||
174 | terminal = Registry.registry[name] | ||
175 | except KeyError: | ||
176 | raise UnsupportedTerminal(name) | ||
177 | |||
178 | pipe = terminal(sh_cmd, title, env, d) | ||
179 | output = pipe.communicate()[0] | ||
180 | if pipe.returncode != 0: | ||
181 | raise ExecutionError(sh_cmd, pipe.returncode, output) | ||
182 | |||
183 | def check_konsole_version(konsole): | ||
184 | import subprocess as sub | ||
185 | try: | ||
186 | p = sub.Popen(['sh', '-c', '%s --version' % konsole],stdout=sub.PIPE,stderr=sub.PIPE) | ||
187 | out, err = p.communicate() | ||
188 | ver_info = out.rstrip().split('\n') | ||
189 | except OSError as exc: | ||
190 | import errno | ||
191 | if exc.errno == errno.ENOENT: | ||
192 | return None | ||
193 | else: | ||
194 | raise | ||
195 | vernum = None | ||
196 | for ver in ver_info: | ||
197 | if ver.startswith('Konsole'): | ||
198 | vernum = ver.split(' ')[-1] | ||
199 | return vernum | ||
200 | |||
201 | def distro_name(): | ||
202 | try: | ||
203 | p = Popen(['lsb_release', '-i']) | ||
204 | out, err = p.communicate() | ||
205 | distro = out.split(':')[1].strip().lower() | ||
206 | except: | ||
207 | distro = "unknown" | ||
208 | return distro | ||
diff --git a/meta/lib/oe/tests/__init__.py b/meta/lib/oe/tests/__init__.py new file mode 100644 index 0000000000..e69de29bb2 --- /dev/null +++ b/meta/lib/oe/tests/__init__.py | |||
diff --git a/meta/lib/oe/tests/test_license.py b/meta/lib/oe/tests/test_license.py new file mode 100644 index 0000000000..c388886184 --- /dev/null +++ b/meta/lib/oe/tests/test_license.py | |||
@@ -0,0 +1,68 @@ | |||
1 | import unittest | ||
2 | import oe.license | ||
3 | |||
4 | class SeenVisitor(oe.license.LicenseVisitor): | ||
5 | def __init__(self): | ||
6 | self.seen = [] | ||
7 | oe.license.LicenseVisitor.__init__(self) | ||
8 | |||
9 | def visit_Str(self, node): | ||
10 | self.seen.append(node.s) | ||
11 | |||
12 | class TestSingleLicense(unittest.TestCase): | ||
13 | licenses = [ | ||
14 | "GPLv2", | ||
15 | "LGPL-2.0", | ||
16 | "Artistic", | ||
17 | "MIT", | ||
18 | "GPLv3+", | ||
19 | "FOO_BAR", | ||
20 | ] | ||
21 | invalid_licenses = ["GPL/BSD"] | ||
22 | |||
23 | @staticmethod | ||
24 | def parse(licensestr): | ||
25 | visitor = SeenVisitor() | ||
26 | visitor.visit_string(licensestr) | ||
27 | return visitor.seen | ||
28 | |||
29 | def test_single_licenses(self): | ||
30 | for license in self.licenses: | ||
31 | licenses = self.parse(license) | ||
32 | self.assertListEqual(licenses, [license]) | ||
33 | |||
34 | def test_invalid_licenses(self): | ||
35 | for license in self.invalid_licenses: | ||
36 | with self.assertRaises(oe.license.InvalidLicense) as cm: | ||
37 | self.parse(license) | ||
38 | self.assertEqual(cm.exception.license, license) | ||
39 | |||
40 | class TestSimpleCombinations(unittest.TestCase): | ||
41 | tests = { | ||
42 | "FOO&BAR": ["FOO", "BAR"], | ||
43 | "BAZ & MOO": ["BAZ", "MOO"], | ||
44 | "ALPHA|BETA": ["ALPHA"], | ||
45 | "BAZ&MOO|FOO": ["FOO"], | ||
46 | "FOO&BAR|BAZ": ["FOO", "BAR"], | ||
47 | } | ||
48 | preferred = ["ALPHA", "FOO", "BAR"] | ||
49 | |||
50 | def test_tests(self): | ||
51 | def choose(a, b): | ||
52 | if all(lic in self.preferred for lic in b): | ||
53 | return b | ||
54 | else: | ||
55 | return a | ||
56 | |||
57 | for license, expected in self.tests.items(): | ||
58 | licenses = oe.license.flattened_licenses(license, choose) | ||
59 | self.assertListEqual(licenses, expected) | ||
60 | |||
61 | class TestComplexCombinations(TestSimpleCombinations): | ||
62 | tests = { | ||
63 | "FOO & (BAR | BAZ)&MOO": ["FOO", "BAR", "MOO"], | ||
64 | "(ALPHA|(BETA&THETA)|OMEGA)&DELTA": ["OMEGA", "DELTA"], | ||
65 | "((ALPHA|BETA)&FOO)|BAZ": ["BETA", "FOO"], | ||
66 | "(GPL-2.0|Proprietary)&BSD-4-clause&MIT": ["GPL-2.0", "BSD-4-clause", "MIT"], | ||
67 | } | ||
68 | preferred = ["BAR", "OMEGA", "BETA", "GPL-2.0"] | ||
diff --git a/meta/lib/oe/tests/test_path.py b/meta/lib/oe/tests/test_path.py new file mode 100644 index 0000000000..3d41ce157a --- /dev/null +++ b/meta/lib/oe/tests/test_path.py | |||
@@ -0,0 +1,89 @@ | |||
1 | import unittest | ||
2 | import oe, oe.path | ||
3 | import tempfile | ||
4 | import os | ||
5 | import errno | ||
6 | import shutil | ||
7 | |||
8 | class TestRealPath(unittest.TestCase): | ||
9 | DIRS = [ "a", "b", "etc", "sbin", "usr", "usr/bin", "usr/binX", "usr/sbin", "usr/include", "usr/include/gdbm" ] | ||
10 | FILES = [ "etc/passwd", "b/file" ] | ||
11 | LINKS = [ | ||
12 | ( "bin", "/usr/bin", "/usr/bin" ), | ||
13 | ( "binX", "usr/binX", "/usr/binX" ), | ||
14 | ( "c", "broken", "/broken" ), | ||
15 | ( "etc/passwd-1", "passwd", "/etc/passwd" ), | ||
16 | ( "etc/passwd-2", "passwd-1", "/etc/passwd" ), | ||
17 | ( "etc/passwd-3", "/etc/passwd-1", "/etc/passwd" ), | ||
18 | ( "etc/shadow-1", "/etc/shadow", "/etc/shadow" ), | ||
19 | ( "etc/shadow-2", "/etc/shadow-1", "/etc/shadow" ), | ||
20 | ( "prog-A", "bin/prog-A", "/usr/bin/prog-A" ), | ||
21 | ( "prog-B", "/bin/prog-B", "/usr/bin/prog-B" ), | ||
22 | ( "usr/bin/prog-C", "../../sbin/prog-C", "/sbin/prog-C" ), | ||
23 | ( "usr/bin/prog-D", "/sbin/prog-D", "/sbin/prog-D" ), | ||
24 | ( "usr/binX/prog-E", "../sbin/prog-E", None ), | ||
25 | ( "usr/bin/prog-F", "../../../sbin/prog-F", "/sbin/prog-F" ), | ||
26 | ( "loop", "a/loop", None ), | ||
27 | ( "a/loop", "../loop", None ), | ||
28 | ( "b/test", "file/foo", "/b/file/foo" ), | ||
29 | ] | ||
30 | |||
31 | LINKS_PHYS = [ | ||
32 | ( "./", "/", "" ), | ||
33 | ( "binX/prog-E", "/usr/sbin/prog-E", "/sbin/prog-E" ), | ||
34 | ] | ||
35 | |||
36 | EXCEPTIONS = [ | ||
37 | ( "loop", errno.ELOOP ), | ||
38 | ( "b/test", errno.ENOENT ), | ||
39 | ] | ||
40 | |||
41 | def __del__(self): | ||
42 | try: | ||
43 | #os.system("tree -F %s" % self.tmpdir) | ||
44 | shutil.rmtree(self.tmpdir) | ||
45 | except: | ||
46 | pass | ||
47 | |||
48 | def setUp(self): | ||
49 | self.tmpdir = tempfile.mkdtemp(prefix = "oe-test_path") | ||
50 | self.root = os.path.join(self.tmpdir, "R") | ||
51 | |||
52 | os.mkdir(os.path.join(self.tmpdir, "_real")) | ||
53 | os.symlink("_real", self.root) | ||
54 | |||
55 | for d in self.DIRS: | ||
56 | os.mkdir(os.path.join(self.root, d)) | ||
57 | for f in self.FILES: | ||
58 | file(os.path.join(self.root, f), "w") | ||
59 | for l in self.LINKS: | ||
60 | os.symlink(l[1], os.path.join(self.root, l[0])) | ||
61 | |||
62 | def __realpath(self, file, use_physdir, assume_dir = True): | ||
63 | return oe.path.realpath(os.path.join(self.root, file), self.root, | ||
64 | use_physdir, assume_dir = assume_dir) | ||
65 | |||
66 | def test_norm(self): | ||
67 | for l in self.LINKS: | ||
68 | if l[2] == None: | ||
69 | continue | ||
70 | |||
71 | target_p = self.__realpath(l[0], True) | ||
72 | target_l = self.__realpath(l[0], False) | ||
73 | |||
74 | if l[2] != False: | ||
75 | self.assertEqual(target_p, target_l) | ||
76 | self.assertEqual(l[2], target_p[len(self.root):]) | ||
77 | |||
78 | def test_phys(self): | ||
79 | for l in self.LINKS_PHYS: | ||
80 | target_p = self.__realpath(l[0], True) | ||
81 | target_l = self.__realpath(l[0], False) | ||
82 | |||
83 | self.assertEqual(l[1], target_p[len(self.root):]) | ||
84 | self.assertEqual(l[2], target_l[len(self.root):]) | ||
85 | |||
86 | def test_loop(self): | ||
87 | for e in self.EXCEPTIONS: | ||
88 | self.assertRaisesRegexp(OSError, r'\[Errno %u\]' % e[1], | ||
89 | self.__realpath, e[0], False, False) | ||
diff --git a/meta/lib/oe/tests/test_types.py b/meta/lib/oe/tests/test_types.py new file mode 100644 index 0000000000..367cc30e45 --- /dev/null +++ b/meta/lib/oe/tests/test_types.py | |||
@@ -0,0 +1,62 @@ | |||
1 | import unittest | ||
2 | from oe.maketype import create, factory | ||
3 | |||
4 | class TestTypes(unittest.TestCase): | ||
5 | def assertIsInstance(self, obj, cls): | ||
6 | return self.assertTrue(isinstance(obj, cls)) | ||
7 | |||
8 | def assertIsNot(self, obj, other): | ||
9 | return self.assertFalse(obj is other) | ||
10 | |||
11 | def assertFactoryCreated(self, value, type, **flags): | ||
12 | cls = factory(type) | ||
13 | self.assertIsNot(cls, None) | ||
14 | self.assertIsInstance(create(value, type, **flags), cls) | ||
15 | |||
16 | class TestBooleanType(TestTypes): | ||
17 | def test_invalid(self): | ||
18 | self.assertRaises(ValueError, create, '', 'boolean') | ||
19 | self.assertRaises(ValueError, create, 'foo', 'boolean') | ||
20 | self.assertRaises(TypeError, create, object(), 'boolean') | ||
21 | |||
22 | def test_true(self): | ||
23 | self.assertTrue(create('y', 'boolean')) | ||
24 | self.assertTrue(create('yes', 'boolean')) | ||
25 | self.assertTrue(create('1', 'boolean')) | ||
26 | self.assertTrue(create('t', 'boolean')) | ||
27 | self.assertTrue(create('true', 'boolean')) | ||
28 | self.assertTrue(create('TRUE', 'boolean')) | ||
29 | self.assertTrue(create('truE', 'boolean')) | ||
30 | |||
31 | def test_false(self): | ||
32 | self.assertFalse(create('n', 'boolean')) | ||
33 | self.assertFalse(create('no', 'boolean')) | ||
34 | self.assertFalse(create('0', 'boolean')) | ||
35 | self.assertFalse(create('f', 'boolean')) | ||
36 | self.assertFalse(create('false', 'boolean')) | ||
37 | self.assertFalse(create('FALSE', 'boolean')) | ||
38 | self.assertFalse(create('faLse', 'boolean')) | ||
39 | |||
40 | def test_bool_equality(self): | ||
41 | self.assertEqual(create('n', 'boolean'), False) | ||
42 | self.assertNotEqual(create('n', 'boolean'), True) | ||
43 | self.assertEqual(create('y', 'boolean'), True) | ||
44 | self.assertNotEqual(create('y', 'boolean'), False) | ||
45 | |||
46 | class TestList(TestTypes): | ||
47 | def assertListEqual(self, value, valid, sep=None): | ||
48 | obj = create(value, 'list', separator=sep) | ||
49 | self.assertEqual(obj, valid) | ||
50 | if sep is not None: | ||
51 | self.assertEqual(obj.separator, sep) | ||
52 | self.assertEqual(str(obj), obj.separator.join(obj)) | ||
53 | |||
54 | def test_list_nosep(self): | ||
55 | testlist = ['alpha', 'beta', 'theta'] | ||
56 | self.assertListEqual('alpha beta theta', testlist) | ||
57 | self.assertListEqual('alpha beta\ttheta', testlist) | ||
58 | self.assertListEqual('alpha', ['alpha']) | ||
59 | |||
60 | def test_list_usersep(self): | ||
61 | self.assertListEqual('foo:bar', ['foo', 'bar'], ':') | ||
62 | self.assertListEqual('foo:bar:baz', ['foo', 'bar', 'baz'], ':') | ||
diff --git a/meta/lib/oe/tests/test_utils.py b/meta/lib/oe/tests/test_utils.py new file mode 100644 index 0000000000..5d9ac52e7d --- /dev/null +++ b/meta/lib/oe/tests/test_utils.py | |||
@@ -0,0 +1,51 @@ | |||
1 | import unittest | ||
2 | from oe.utils import packages_filter_out_system | ||
3 | |||
4 | class TestPackagesFilterOutSystem(unittest.TestCase): | ||
5 | def test_filter(self): | ||
6 | """ | ||
7 | Test that oe.utils.packages_filter_out_system works. | ||
8 | """ | ||
9 | try: | ||
10 | import bb | ||
11 | except ImportError: | ||
12 | self.skipTest("Cannot import bb") | ||
13 | |||
14 | d = bb.data_smart.DataSmart() | ||
15 | d.setVar("PN", "foo") | ||
16 | |||
17 | d.setVar("PACKAGES", "foo foo-doc foo-dev") | ||
18 | pkgs = packages_filter_out_system(d) | ||
19 | self.assertEqual(pkgs, []) | ||
20 | |||
21 | d.setVar("PACKAGES", "foo foo-doc foo-data foo-dev") | ||
22 | pkgs = packages_filter_out_system(d) | ||
23 | self.assertEqual(pkgs, ["foo-data"]) | ||
24 | |||
25 | d.setVar("PACKAGES", "foo foo-locale-en-gb") | ||
26 | pkgs = packages_filter_out_system(d) | ||
27 | self.assertEqual(pkgs, []) | ||
28 | |||
29 | d.setVar("PACKAGES", "foo foo-data foo-locale-en-gb") | ||
30 | pkgs = packages_filter_out_system(d) | ||
31 | self.assertEqual(pkgs, ["foo-data"]) | ||
32 | |||
33 | |||
34 | class TestTrimVersion(unittest.TestCase): | ||
35 | def test_version_exception(self): | ||
36 | with self.assertRaises(TypeError): | ||
37 | trim_version(None, 2) | ||
38 | with self.assertRaises(TypeError): | ||
39 | trim_version((1, 2, 3), 2) | ||
40 | |||
41 | def test_num_exception(self): | ||
42 | with self.assertRaises(ValueError): | ||
43 | trim_version("1.2.3", 0) | ||
44 | with self.assertRaises(ValueError): | ||
45 | trim_version("1.2.3", -1) | ||
46 | |||
47 | def test_valid(self): | ||
48 | self.assertEqual(trim_version("1.2.3", 1), "1") | ||
49 | self.assertEqual(trim_version("1.2.3", 2), "1.2") | ||
50 | self.assertEqual(trim_version("1.2.3", 3), "1.2.3") | ||
51 | self.assertEqual(trim_version("1.2.3", 4), "1.2.3") | ||
diff --git a/meta/lib/oe/types.py b/meta/lib/oe/types.py new file mode 100644 index 0000000000..7f47c17d0e --- /dev/null +++ b/meta/lib/oe/types.py | |||
@@ -0,0 +1,153 @@ | |||
1 | import errno | ||
2 | import re | ||
3 | import os | ||
4 | |||
5 | |||
6 | class OEList(list): | ||
7 | """OpenEmbedded 'list' type | ||
8 | |||
9 | Acts as an ordinary list, but is constructed from a string value and a | ||
10 | separator (optional), and re-joins itself when converted to a string with | ||
11 | str(). Set the variable type flag to 'list' to use this type, and the | ||
12 | 'separator' flag may be specified (defaulting to whitespace).""" | ||
13 | |||
14 | name = "list" | ||
15 | |||
16 | def __init__(self, value, separator = None): | ||
17 | if value is not None: | ||
18 | list.__init__(self, value.split(separator)) | ||
19 | else: | ||
20 | list.__init__(self) | ||
21 | |||
22 | if separator is None: | ||
23 | self.separator = " " | ||
24 | else: | ||
25 | self.separator = separator | ||
26 | |||
27 | def __str__(self): | ||
28 | return self.separator.join(self) | ||
29 | |||
30 | def choice(value, choices): | ||
31 | """OpenEmbedded 'choice' type | ||
32 | |||
33 | Acts as a multiple choice for the user. To use this, set the variable | ||
34 | type flag to 'choice', and set the 'choices' flag to a space separated | ||
35 | list of valid values.""" | ||
36 | if not isinstance(value, basestring): | ||
37 | raise TypeError("choice accepts a string, not '%s'" % type(value)) | ||
38 | |||
39 | value = value.lower() | ||
40 | choices = choices.lower() | ||
41 | if value not in choices.split(): | ||
42 | raise ValueError("Invalid choice '%s'. Valid choices: %s" % | ||
43 | (value, choices)) | ||
44 | return value | ||
45 | |||
46 | class NoMatch(object): | ||
47 | """Stub python regex pattern object which never matches anything""" | ||
48 | def findall(self, string, flags=0): | ||
49 | return None | ||
50 | |||
51 | def finditer(self, string, flags=0): | ||
52 | return None | ||
53 | |||
54 | def match(self, flags=0): | ||
55 | return None | ||
56 | |||
57 | def search(self, string, flags=0): | ||
58 | return None | ||
59 | |||
60 | def split(self, string, maxsplit=0): | ||
61 | return None | ||
62 | |||
63 | def sub(pattern, repl, string, count=0): | ||
64 | return None | ||
65 | |||
66 | def subn(pattern, repl, string, count=0): | ||
67 | return None | ||
68 | |||
69 | NoMatch = NoMatch() | ||
70 | |||
71 | def regex(value, regexflags=None): | ||
72 | """OpenEmbedded 'regex' type | ||
73 | |||
74 | Acts as a regular expression, returning the pre-compiled regular | ||
75 | expression pattern object. To use this type, set the variable type flag | ||
76 | to 'regex', and optionally, set the 'regexflags' type to a space separated | ||
77 | list of the flags to control the regular expression matching (e.g. | ||
78 | FOO[regexflags] += 'ignorecase'). See the python documentation on the | ||
79 | 're' module for a list of valid flags.""" | ||
80 | |||
81 | flagval = 0 | ||
82 | if regexflags: | ||
83 | for flag in regexflags.split(): | ||
84 | flag = flag.upper() | ||
85 | try: | ||
86 | flagval |= getattr(re, flag) | ||
87 | except AttributeError: | ||
88 | raise ValueError("Invalid regex flag '%s'" % flag) | ||
89 | |||
90 | if not value: | ||
91 | # Let's ensure that the default behavior for an undefined or empty | ||
92 | # variable is to match nothing. If the user explicitly wants to match | ||
93 | # anything, they can match '.*' instead. | ||
94 | return NoMatch | ||
95 | |||
96 | try: | ||
97 | return re.compile(value, flagval) | ||
98 | except re.error as exc: | ||
99 | raise ValueError("Invalid regex value '%s': %s" % | ||
100 | (value, exc.args[0])) | ||
101 | |||
102 | def boolean(value): | ||
103 | """OpenEmbedded 'boolean' type | ||
104 | |||
105 | Valid values for true: 'yes', 'y', 'true', 't', '1' | ||
106 | Valid values for false: 'no', 'n', 'false', 'f', '0' | ||
107 | """ | ||
108 | |||
109 | if not isinstance(value, basestring): | ||
110 | raise TypeError("boolean accepts a string, not '%s'" % type(value)) | ||
111 | |||
112 | value = value.lower() | ||
113 | if value in ('yes', 'y', 'true', 't', '1'): | ||
114 | return True | ||
115 | elif value in ('no', 'n', 'false', 'f', '0'): | ||
116 | return False | ||
117 | raise ValueError("Invalid boolean value '%s'" % value) | ||
118 | |||
119 | def integer(value, numberbase=10): | ||
120 | """OpenEmbedded 'integer' type | ||
121 | |||
122 | Defaults to base 10, but this can be specified using the optional | ||
123 | 'numberbase' flag.""" | ||
124 | |||
125 | return int(value, int(numberbase)) | ||
126 | |||
127 | _float = float | ||
128 | def float(value, fromhex='false'): | ||
129 | """OpenEmbedded floating point type | ||
130 | |||
131 | To use this type, set the type flag to 'float', and optionally set the | ||
132 | 'fromhex' flag to a true value (obeying the same rules as for the | ||
133 | 'boolean' type) if the value is in base 16 rather than base 10.""" | ||
134 | |||
135 | if boolean(fromhex): | ||
136 | return _float.fromhex(value) | ||
137 | else: | ||
138 | return _float(value) | ||
139 | |||
140 | def path(value, relativeto='', normalize='true', mustexist='false'): | ||
141 | value = os.path.join(relativeto, value) | ||
142 | |||
143 | if boolean(normalize): | ||
144 | value = os.path.normpath(value) | ||
145 | |||
146 | if boolean(mustexist): | ||
147 | try: | ||
148 | open(value, 'r') | ||
149 | except IOError as exc: | ||
150 | if exc.errno == errno.ENOENT: | ||
151 | raise ValueError("{0}: {1}".format(value, os.strerror(errno.ENOENT))) | ||
152 | |||
153 | return value | ||
diff --git a/meta/lib/oe/utils.py b/meta/lib/oe/utils.py new file mode 100644 index 0000000000..35442568e2 --- /dev/null +++ b/meta/lib/oe/utils.py | |||
@@ -0,0 +1,182 @@ | |||
1 | try: | ||
2 | # Python 2 | ||
3 | import commands as cmdstatus | ||
4 | except ImportError: | ||
5 | # Python 3 | ||
6 | import subprocess as cmdstatus | ||
7 | |||
8 | def read_file(filename): | ||
9 | try: | ||
10 | f = open( filename, "r" ) | ||
11 | except IOError as reason: | ||
12 | return "" # WARNING: can't raise an error now because of the new RDEPENDS handling. This is a bit ugly. :M: | ||
13 | else: | ||
14 | data = f.read().strip() | ||
15 | f.close() | ||
16 | return data | ||
17 | return None | ||
18 | |||
19 | def ifelse(condition, iftrue = True, iffalse = False): | ||
20 | if condition: | ||
21 | return iftrue | ||
22 | else: | ||
23 | return iffalse | ||
24 | |||
25 | def conditional(variable, checkvalue, truevalue, falsevalue, d): | ||
26 | if d.getVar(variable,1) == checkvalue: | ||
27 | return truevalue | ||
28 | else: | ||
29 | return falsevalue | ||
30 | |||
31 | def less_or_equal(variable, checkvalue, truevalue, falsevalue, d): | ||
32 | if float(d.getVar(variable,1)) <= float(checkvalue): | ||
33 | return truevalue | ||
34 | else: | ||
35 | return falsevalue | ||
36 | |||
37 | def version_less_or_equal(variable, checkvalue, truevalue, falsevalue, d): | ||
38 | result = bb.utils.vercmp_string(d.getVar(variable,True), checkvalue) | ||
39 | if result <= 0: | ||
40 | return truevalue | ||
41 | else: | ||
42 | return falsevalue | ||
43 | |||
44 | def both_contain(variable1, variable2, checkvalue, d): | ||
45 | if d.getVar(variable1,1).find(checkvalue) != -1 and d.getVar(variable2,1).find(checkvalue) != -1: | ||
46 | return checkvalue | ||
47 | else: | ||
48 | return "" | ||
49 | |||
50 | def prune_suffix(var, suffixes, d): | ||
51 | # See if var ends with any of the suffixes listed and | ||
52 | # remove it if found | ||
53 | for suffix in suffixes: | ||
54 | if var.endswith(suffix): | ||
55 | var = var.replace(suffix, "") | ||
56 | |||
57 | prefix = d.getVar("MLPREFIX", True) | ||
58 | if prefix and var.startswith(prefix): | ||
59 | var = var.replace(prefix, "") | ||
60 | |||
61 | return var | ||
62 | |||
63 | def str_filter(f, str, d): | ||
64 | from re import match | ||
65 | return " ".join(filter(lambda x: match(f, x, 0), str.split())) | ||
66 | |||
67 | def str_filter_out(f, str, d): | ||
68 | from re import match | ||
69 | return " ".join(filter(lambda x: not match(f, x, 0), str.split())) | ||
70 | |||
71 | def param_bool(cfg, field, dflt = None): | ||
72 | """Lookup <field> in <cfg> map and convert it to a boolean; take | ||
73 | <dflt> when this <field> does not exist""" | ||
74 | value = cfg.get(field, dflt) | ||
75 | strvalue = str(value).lower() | ||
76 | if strvalue in ('yes', 'y', 'true', 't', '1'): | ||
77 | return True | ||
78 | elif strvalue in ('no', 'n', 'false', 'f', '0'): | ||
79 | return False | ||
80 | raise ValueError("invalid value for boolean parameter '%s': '%s'" % (field, value)) | ||
81 | |||
82 | def inherits(d, *classes): | ||
83 | """Return True if the metadata inherits any of the specified classes""" | ||
84 | return any(bb.data.inherits_class(cls, d) for cls in classes) | ||
85 | |||
86 | def features_backfill(var,d): | ||
87 | # This construct allows the addition of new features to variable specified | ||
88 | # as var | ||
89 | # Example for var = "DISTRO_FEATURES" | ||
90 | # This construct allows the addition of new features to DISTRO_FEATURES | ||
91 | # that if not present would disable existing functionality, without | ||
92 | # disturbing distributions that have already set DISTRO_FEATURES. | ||
93 | # Distributions wanting to elide a value in DISTRO_FEATURES_BACKFILL should | ||
94 | # add the feature to DISTRO_FEATURES_BACKFILL_CONSIDERED | ||
95 | features = (d.getVar(var, True) or "").split() | ||
96 | backfill = (d.getVar(var+"_BACKFILL", True) or "").split() | ||
97 | considered = (d.getVar(var+"_BACKFILL_CONSIDERED", True) or "").split() | ||
98 | |||
99 | addfeatures = [] | ||
100 | for feature in backfill: | ||
101 | if feature not in features and feature not in considered: | ||
102 | addfeatures.append(feature) | ||
103 | |||
104 | if addfeatures: | ||
105 | d.appendVar(var, " " + " ".join(addfeatures)) | ||
106 | |||
107 | |||
108 | def packages_filter_out_system(d): | ||
109 | """ | ||
110 | Return a list of packages from PACKAGES with the "system" packages such as | ||
111 | PN-dbg PN-doc PN-locale-eb-gb removed. | ||
112 | """ | ||
113 | pn = d.getVar('PN', True) | ||
114 | blacklist = map(lambda suffix: pn + suffix, ('', '-dbg', '-dev', '-doc', '-locale', '-staticdev')) | ||
115 | localepkg = pn + "-locale-" | ||
116 | pkgs = [] | ||
117 | |||
118 | for pkg in d.getVar('PACKAGES', True).split(): | ||
119 | if pkg not in blacklist and localepkg not in pkg: | ||
120 | pkgs.append(pkg) | ||
121 | return pkgs | ||
122 | |||
123 | def getstatusoutput(cmd): | ||
124 | return cmdstatus.getstatusoutput(cmd) | ||
125 | |||
126 | |||
127 | def trim_version(version, num_parts=2): | ||
128 | """ | ||
129 | Return just the first <num_parts> of <version>, split by periods. For | ||
130 | example, trim_version("1.2.3", 2) will return "1.2". | ||
131 | """ | ||
132 | if type(version) is not str: | ||
133 | raise TypeError("Version should be a string") | ||
134 | if num_parts < 1: | ||
135 | raise ValueError("Cannot split to parts < 1") | ||
136 | |||
137 | parts = version.split(".") | ||
138 | trimmed = ".".join(parts[:num_parts]) | ||
139 | return trimmed | ||
140 | |||
141 | def cpu_count(): | ||
142 | import multiprocessing | ||
143 | return multiprocessing.cpu_count() | ||
144 | |||
145 | def execute_pre_post_process(d, cmds): | ||
146 | if cmds is None: | ||
147 | return | ||
148 | |||
149 | for cmd in cmds.strip().split(';'): | ||
150 | cmd = cmd.strip() | ||
151 | if cmd != '': | ||
152 | bb.note("Executing %s ..." % cmd) | ||
153 | bb.build.exec_func(cmd, d) | ||
154 | |||
155 | def multiprocess_exec(commands, function): | ||
156 | import signal | ||
157 | import multiprocessing | ||
158 | |||
159 | if not commands: | ||
160 | return [] | ||
161 | |||
162 | def init_worker(): | ||
163 | signal.signal(signal.SIGINT, signal.SIG_IGN) | ||
164 | |||
165 | nproc = min(multiprocessing.cpu_count(), len(commands)) | ||
166 | pool = bb.utils.multiprocessingpool(nproc, init_worker) | ||
167 | imap = pool.imap(function, commands) | ||
168 | |||
169 | try: | ||
170 | res = list(imap) | ||
171 | pool.close() | ||
172 | pool.join() | ||
173 | results = [] | ||
174 | for result in res: | ||
175 | if result is not None: | ||
176 | results.append(result) | ||
177 | return results | ||
178 | |||
179 | except KeyboardInterrupt: | ||
180 | pool.terminate() | ||
181 | pool.join() | ||
182 | raise | ||