diff options
Diffstat (limited to 'meta/lib')
63 files changed, 5027 insertions, 0 deletions
diff --git a/meta/lib/oe/__init__.py b/meta/lib/oe/__init__.py new file mode 100644 index 0000000000..e69de29bb2 --- /dev/null +++ b/meta/lib/oe/__init__.py | |||
diff --git a/meta/lib/oe/buildhistory_analysis.py b/meta/lib/oe/buildhistory_analysis.py new file mode 100644 index 0000000000..86b5a12347 --- /dev/null +++ b/meta/lib/oe/buildhistory_analysis.py | |||
@@ -0,0 +1,453 @@ | |||
1 | # Report significant differences in the buildhistory repository since a specific revision | ||
2 | # | ||
3 | # Copyright (C) 2012 Intel Corporation | ||
4 | # Author: Paul Eggleton <paul.eggleton@linux.intel.com> | ||
5 | # | ||
6 | # Note: requires GitPython 0.3.1+ | ||
7 | # | ||
8 | # You can use this from the command line by running scripts/buildhistory-diff | ||
9 | # | ||
10 | |||
11 | import sys | ||
12 | import os.path | ||
13 | import difflib | ||
14 | import git | ||
15 | import re | ||
16 | import bb.utils | ||
17 | |||
18 | |||
19 | # How to display fields | ||
20 | list_fields = ['DEPENDS', 'RPROVIDES', 'RDEPENDS', 'RRECOMMENDS', 'RSUGGESTS', 'RREPLACES', 'RCONFLICTS', 'FILES', 'FILELIST', 'USER_CLASSES', 'IMAGE_CLASSES', 'IMAGE_FEATURES', 'IMAGE_LINGUAS', 'IMAGE_INSTALL', 'BAD_RECOMMENDATIONS'] | ||
21 | list_order_fields = ['PACKAGES'] | ||
22 | defaultval_fields = ['PKG', 'PKGE', 'PKGV', 'PKGR'] | ||
23 | numeric_fields = ['PKGSIZE', 'IMAGESIZE'] | ||
24 | # Fields to monitor | ||
25 | monitor_fields = ['RPROVIDES', 'RDEPENDS', 'RRECOMMENDS', 'RREPLACES', 'RCONFLICTS', 'PACKAGES', 'FILELIST', 'PKGSIZE', 'IMAGESIZE', 'PKG', 'PKGE', 'PKGV', 'PKGR'] | ||
26 | # Percentage change to alert for numeric fields | ||
27 | monitor_numeric_threshold = 10 | ||
28 | # Image files to monitor (note that image-info.txt is handled separately) | ||
29 | img_monitor_files = ['installed-package-names.txt', 'files-in-image.txt'] | ||
30 | # Related context fields for reporting (note: PE, PV & PR are always reported for monitored package fields) | ||
31 | related_fields = {} | ||
32 | related_fields['RDEPENDS'] = ['DEPENDS'] | ||
33 | related_fields['RRECOMMENDS'] = ['DEPENDS'] | ||
34 | related_fields['FILELIST'] = ['FILES'] | ||
35 | related_fields['PKGSIZE'] = ['FILELIST'] | ||
36 | related_fields['files-in-image.txt'] = ['installed-package-names.txt', 'USER_CLASSES', 'IMAGE_CLASSES', 'ROOTFS_POSTPROCESS_COMMAND', 'IMAGE_POSTPROCESS_COMMAND'] | ||
37 | related_fields['installed-package-names.txt'] = ['IMAGE_FEATURES', 'IMAGE_LINGUAS', 'IMAGE_INSTALL', 'BAD_RECOMMENDATIONS'] | ||
38 | |||
39 | |||
40 | class ChangeRecord: | ||
41 | def __init__(self, path, fieldname, oldvalue, newvalue, monitored): | ||
42 | self.path = path | ||
43 | self.fieldname = fieldname | ||
44 | self.oldvalue = oldvalue | ||
45 | self.newvalue = newvalue | ||
46 | self.monitored = monitored | ||
47 | self.related = [] | ||
48 | self.filechanges = None | ||
49 | |||
50 | def __str__(self): | ||
51 | return self._str_internal(True) | ||
52 | |||
53 | def _str_internal(self, outer): | ||
54 | if outer: | ||
55 | if '/image-files/' in self.path: | ||
56 | prefix = '%s: ' % self.path.split('/image-files/')[0] | ||
57 | else: | ||
58 | prefix = '%s: ' % self.path | ||
59 | else: | ||
60 | prefix = '' | ||
61 | |||
62 | def pkglist_combine(depver): | ||
63 | pkglist = [] | ||
64 | for k,v in depver.iteritems(): | ||
65 | if v: | ||
66 | pkglist.append("%s (%s)" % (k,v)) | ||
67 | else: | ||
68 | pkglist.append(k) | ||
69 | return pkglist | ||
70 | |||
71 | if self.fieldname in list_fields or self.fieldname in list_order_fields: | ||
72 | if self.fieldname in ['RPROVIDES', 'RDEPENDS', 'RRECOMMENDS', 'RSUGGESTS', 'RREPLACES', 'RCONFLICTS']: | ||
73 | (depvera, depverb) = compare_pkg_lists(self.oldvalue, self.newvalue) | ||
74 | aitems = pkglist_combine(depvera) | ||
75 | bitems = pkglist_combine(depverb) | ||
76 | else: | ||
77 | aitems = self.oldvalue.split() | ||
78 | bitems = self.newvalue.split() | ||
79 | removed = list(set(aitems) - set(bitems)) | ||
80 | added = list(set(bitems) - set(aitems)) | ||
81 | |||
82 | if removed or added: | ||
83 | if removed and not bitems: | ||
84 | out = '%s: removed all items "%s"' % (self.fieldname, ' '.join(removed)) | ||
85 | else: | ||
86 | out = '%s:%s%s' % (self.fieldname, ' removed "%s"' % ' '.join(removed) if removed else '', ' added "%s"' % ' '.join(added) if added else '') | ||
87 | else: | ||
88 | out = '%s changed order' % self.fieldname | ||
89 | elif self.fieldname in numeric_fields: | ||
90 | aval = int(self.oldvalue or 0) | ||
91 | bval = int(self.newvalue or 0) | ||
92 | if aval != 0: | ||
93 | percentchg = ((bval - aval) / float(aval)) * 100 | ||
94 | else: | ||
95 | percentchg = 100 | ||
96 | out = '%s changed from %s to %s (%s%d%%)' % (self.fieldname, self.oldvalue or "''", self.newvalue or "''", '+' if percentchg > 0 else '', percentchg) | ||
97 | elif self.fieldname in defaultval_fields: | ||
98 | out = '%s changed from %s to %s' % (self.fieldname, self.oldvalue, self.newvalue) | ||
99 | if self.fieldname == 'PKG' and '[default]' in self.newvalue: | ||
100 | out += ' - may indicate debian renaming failure' | ||
101 | elif self.fieldname in ['pkg_preinst', 'pkg_postinst', 'pkg_prerm', 'pkg_postrm']: | ||
102 | if self.oldvalue and self.newvalue: | ||
103 | out = '%s changed:\n ' % self.fieldname | ||
104 | elif self.newvalue: | ||
105 | out = '%s added:\n ' % self.fieldname | ||
106 | elif self.oldvalue: | ||
107 | out = '%s cleared:\n ' % self.fieldname | ||
108 | alines = self.oldvalue.splitlines() | ||
109 | blines = self.newvalue.splitlines() | ||
110 | diff = difflib.unified_diff(alines, blines, self.fieldname, self.fieldname, lineterm='') | ||
111 | out += '\n '.join(list(diff)[2:]) | ||
112 | out += '\n --' | ||
113 | elif self.fieldname in img_monitor_files or '/image-files/' in self.path: | ||
114 | fieldname = self.fieldname | ||
115 | if '/image-files/' in self.path: | ||
116 | fieldname = os.path.join('/' + self.path.split('/image-files/')[1], self.fieldname) | ||
117 | out = 'Changes to %s:\n ' % fieldname | ||
118 | else: | ||
119 | if outer: | ||
120 | prefix = 'Changes to %s ' % self.path | ||
121 | out = '(%s):\n ' % self.fieldname | ||
122 | if self.filechanges: | ||
123 | out += '\n '.join(['%s' % i for i in self.filechanges]) | ||
124 | else: | ||
125 | alines = self.oldvalue.splitlines() | ||
126 | blines = self.newvalue.splitlines() | ||
127 | diff = difflib.unified_diff(alines, blines, fieldname, fieldname, lineterm='') | ||
128 | out += '\n '.join(list(diff)) | ||
129 | out += '\n --' | ||
130 | else: | ||
131 | out = '%s changed from "%s" to "%s"' % (self.fieldname, self.oldvalue, self.newvalue) | ||
132 | |||
133 | if self.related: | ||
134 | for chg in self.related: | ||
135 | if not outer and chg.fieldname in ['PE', 'PV', 'PR']: | ||
136 | continue | ||
137 | for line in chg._str_internal(False).splitlines(): | ||
138 | out += '\n * %s' % line | ||
139 | |||
140 | return '%s%s' % (prefix, out) | ||
141 | |||
142 | class FileChange: | ||
143 | changetype_add = 'A' | ||
144 | changetype_remove = 'R' | ||
145 | changetype_type = 'T' | ||
146 | changetype_perms = 'P' | ||
147 | changetype_ownergroup = 'O' | ||
148 | changetype_link = 'L' | ||
149 | |||
150 | def __init__(self, path, changetype, oldvalue = None, newvalue = None): | ||
151 | self.path = path | ||
152 | self.changetype = changetype | ||
153 | self.oldvalue = oldvalue | ||
154 | self.newvalue = newvalue | ||
155 | |||
156 | def _ftype_str(self, ftype): | ||
157 | if ftype == '-': | ||
158 | return 'file' | ||
159 | elif ftype == 'd': | ||
160 | return 'directory' | ||
161 | elif ftype == 'l': | ||
162 | return 'symlink' | ||
163 | elif ftype == 'c': | ||
164 | return 'char device' | ||
165 | elif ftype == 'b': | ||
166 | return 'block device' | ||
167 | elif ftype == 'p': | ||
168 | return 'fifo' | ||
169 | elif ftype == 's': | ||
170 | return 'socket' | ||
171 | else: | ||
172 | return 'unknown (%s)' % ftype | ||
173 | |||
174 | def __str__(self): | ||
175 | if self.changetype == self.changetype_add: | ||
176 | return '%s was added' % self.path | ||
177 | elif self.changetype == self.changetype_remove: | ||
178 | return '%s was removed' % self.path | ||
179 | elif self.changetype == self.changetype_type: | ||
180 | return '%s changed type from %s to %s' % (self.path, self._ftype_str(self.oldvalue), self._ftype_str(self.newvalue)) | ||
181 | elif self.changetype == self.changetype_perms: | ||
182 | return '%s changed permissions from %s to %s' % (self.path, self.oldvalue, self.newvalue) | ||
183 | elif self.changetype == self.changetype_ownergroup: | ||
184 | return '%s changed owner/group from %s to %s' % (self.path, self.oldvalue, self.newvalue) | ||
185 | elif self.changetype == self.changetype_link: | ||
186 | return '%s changed symlink target from %s to %s' % (self.path, self.oldvalue, self.newvalue) | ||
187 | else: | ||
188 | return '%s changed (unknown)' % self.path | ||
189 | |||
190 | |||
191 | def blob_to_dict(blob): | ||
192 | alines = blob.data_stream.read().splitlines() | ||
193 | adict = {} | ||
194 | for line in alines: | ||
195 | splitv = [i.strip() for i in line.split('=',1)] | ||
196 | if len(splitv) > 1: | ||
197 | adict[splitv[0]] = splitv[1] | ||
198 | return adict | ||
199 | |||
200 | |||
201 | def file_list_to_dict(lines): | ||
202 | adict = {} | ||
203 | for line in lines: | ||
204 | # Leave the last few fields intact so we handle file names containing spaces | ||
205 | splitv = line.split(None,4) | ||
206 | # Grab the path and remove the leading . | ||
207 | path = splitv[4][1:].strip() | ||
208 | # Handle symlinks | ||
209 | if(' -> ' in path): | ||
210 | target = path.split(' -> ')[1] | ||
211 | path = path.split(' -> ')[0] | ||
212 | adict[path] = splitv[0:3] + [target] | ||
213 | else: | ||
214 | adict[path] = splitv[0:3] | ||
215 | return adict | ||
216 | |||
217 | |||
218 | def compare_file_lists(alines, blines): | ||
219 | adict = file_list_to_dict(alines) | ||
220 | bdict = file_list_to_dict(blines) | ||
221 | filechanges = [] | ||
222 | for path, splitv in adict.iteritems(): | ||
223 | newsplitv = bdict.pop(path, None) | ||
224 | if newsplitv: | ||
225 | # Check type | ||
226 | oldvalue = splitv[0][0] | ||
227 | newvalue = newsplitv[0][0] | ||
228 | if oldvalue != newvalue: | ||
229 | filechanges.append(FileChange(path, FileChange.changetype_type, oldvalue, newvalue)) | ||
230 | # Check permissions | ||
231 | oldvalue = splitv[0][1:] | ||
232 | newvalue = newsplitv[0][1:] | ||
233 | if oldvalue != newvalue: | ||
234 | filechanges.append(FileChange(path, FileChange.changetype_perms, oldvalue, newvalue)) | ||
235 | # Check owner/group | ||
236 | oldvalue = '%s/%s' % (splitv[1], splitv[2]) | ||
237 | newvalue = '%s/%s' % (newsplitv[1], newsplitv[2]) | ||
238 | if oldvalue != newvalue: | ||
239 | filechanges.append(FileChange(path, FileChange.changetype_ownergroup, oldvalue, newvalue)) | ||
240 | # Check symlink target | ||
241 | if newsplitv[0][0] == 'l': | ||
242 | if len(splitv) > 3: | ||
243 | oldvalue = splitv[3] | ||
244 | else: | ||
245 | oldvalue = None | ||
246 | newvalue = newsplitv[3] | ||
247 | if oldvalue != newvalue: | ||
248 | filechanges.append(FileChange(path, FileChange.changetype_link, oldvalue, newvalue)) | ||
249 | else: | ||
250 | filechanges.append(FileChange(path, FileChange.changetype_remove)) | ||
251 | |||
252 | # Whatever is left over has been added | ||
253 | for path in bdict: | ||
254 | filechanges.append(FileChange(path, FileChange.changetype_add)) | ||
255 | |||
256 | return filechanges | ||
257 | |||
258 | |||
259 | def compare_lists(alines, blines): | ||
260 | removed = list(set(alines) - set(blines)) | ||
261 | added = list(set(blines) - set(alines)) | ||
262 | |||
263 | filechanges = [] | ||
264 | for pkg in removed: | ||
265 | filechanges.append(FileChange(pkg, FileChange.changetype_remove)) | ||
266 | for pkg in added: | ||
267 | filechanges.append(FileChange(pkg, FileChange.changetype_add)) | ||
268 | |||
269 | return filechanges | ||
270 | |||
271 | |||
272 | def compare_pkg_lists(astr, bstr): | ||
273 | depvera = bb.utils.explode_dep_versions2(astr) | ||
274 | depverb = bb.utils.explode_dep_versions2(bstr) | ||
275 | |||
276 | # Strip out changes where the version has increased | ||
277 | remove = [] | ||
278 | for k in depvera: | ||
279 | if k in depverb: | ||
280 | dva = depvera[k] | ||
281 | dvb = depverb[k] | ||
282 | if dva and dvb and len(dva) == len(dvb): | ||
283 | # Since length is the same, sort so that prefixes (e.g. >=) will line up | ||
284 | dva.sort() | ||
285 | dvb.sort() | ||
286 | removeit = True | ||
287 | for dvai, dvbi in zip(dva, dvb): | ||
288 | if dvai != dvbi: | ||
289 | aiprefix = dvai.split(' ')[0] | ||
290 | biprefix = dvbi.split(' ')[0] | ||
291 | if aiprefix == biprefix and aiprefix in ['>=', '=']: | ||
292 | if bb.utils.vercmp(bb.utils.split_version(dvai), bb.utils.split_version(dvbi)) > 0: | ||
293 | removeit = False | ||
294 | break | ||
295 | else: | ||
296 | removeit = False | ||
297 | break | ||
298 | if removeit: | ||
299 | remove.append(k) | ||
300 | |||
301 | for k in remove: | ||
302 | depvera.pop(k) | ||
303 | depverb.pop(k) | ||
304 | |||
305 | return (depvera, depverb) | ||
306 | |||
307 | |||
308 | def compare_dict_blobs(path, ablob, bblob, report_all): | ||
309 | adict = blob_to_dict(ablob) | ||
310 | bdict = blob_to_dict(bblob) | ||
311 | |||
312 | pkgname = os.path.basename(path) | ||
313 | defaultvals = {} | ||
314 | defaultvals['PKG'] = pkgname | ||
315 | defaultvals['PKGE'] = adict.get('PE', '0') | ||
316 | defaultvals['PKGV'] = adict.get('PV', '') | ||
317 | defaultvals['PKGR'] = adict.get('PR', '') | ||
318 | for key in defaultvals: | ||
319 | defaultvals[key] = '%s [default]' % defaultvals[key] | ||
320 | |||
321 | changes = [] | ||
322 | keys = list(set(adict.keys()) | set(bdict.keys())) | ||
323 | for key in keys: | ||
324 | astr = adict.get(key, '') | ||
325 | bstr = bdict.get(key, '') | ||
326 | if astr != bstr: | ||
327 | if (not report_all) and key in numeric_fields: | ||
328 | aval = int(astr or 0) | ||
329 | bval = int(bstr or 0) | ||
330 | if aval != 0: | ||
331 | percentchg = ((bval - aval) / float(aval)) * 100 | ||
332 | else: | ||
333 | percentchg = 100 | ||
334 | if abs(percentchg) < monitor_numeric_threshold: | ||
335 | continue | ||
336 | elif (not report_all) and key in list_fields: | ||
337 | if key == "FILELIST" and path.endswith("-dbg") and bstr.strip() != '': | ||
338 | continue | ||
339 | if key in ['RPROVIDES', 'RDEPENDS', 'RRECOMMENDS', 'RSUGGESTS', 'RREPLACES', 'RCONFLICTS']: | ||
340 | (depvera, depverb) = compare_pkg_lists(astr, bstr) | ||
341 | if depvera == depverb: | ||
342 | continue | ||
343 | alist = astr.split() | ||
344 | alist.sort() | ||
345 | blist = bstr.split() | ||
346 | blist.sort() | ||
347 | # We don't care about the removal of self-dependencies | ||
348 | if pkgname in alist and not pkgname in blist: | ||
349 | alist.remove(pkgname) | ||
350 | if ' '.join(alist) == ' '.join(blist): | ||
351 | continue | ||
352 | |||
353 | if key in defaultval_fields: | ||
354 | if not astr: | ||
355 | astr = defaultvals[key] | ||
356 | elif not bstr: | ||
357 | bstr = defaultvals[key] | ||
358 | |||
359 | chg = ChangeRecord(path, key, astr, bstr, key in monitor_fields) | ||
360 | changes.append(chg) | ||
361 | return changes | ||
362 | |||
363 | |||
364 | def process_changes(repopath, revision1, revision2 = 'HEAD', report_all = False): | ||
365 | repo = git.Repo(repopath) | ||
366 | assert repo.bare == False | ||
367 | commit = repo.commit(revision1) | ||
368 | diff = commit.diff(revision2) | ||
369 | |||
370 | changes = [] | ||
371 | for d in diff.iter_change_type('M'): | ||
372 | path = os.path.dirname(d.a_blob.path) | ||
373 | if path.startswith('packages/'): | ||
374 | filename = os.path.basename(d.a_blob.path) | ||
375 | if filename == 'latest': | ||
376 | changes.extend(compare_dict_blobs(path, d.a_blob, d.b_blob, report_all)) | ||
377 | elif filename.startswith('latest.'): | ||
378 | chg = ChangeRecord(path, filename, d.a_blob.data_stream.read(), d.b_blob.data_stream.read(), True) | ||
379 | changes.append(chg) | ||
380 | elif path.startswith('images/'): | ||
381 | filename = os.path.basename(d.a_blob.path) | ||
382 | if filename in img_monitor_files: | ||
383 | if filename == 'files-in-image.txt': | ||
384 | alines = d.a_blob.data_stream.read().splitlines() | ||
385 | blines = d.b_blob.data_stream.read().splitlines() | ||
386 | filechanges = compare_file_lists(alines,blines) | ||
387 | if filechanges: | ||
388 | chg = ChangeRecord(path, filename, None, None, True) | ||
389 | chg.filechanges = filechanges | ||
390 | changes.append(chg) | ||
391 | elif filename == 'installed-package-names.txt': | ||
392 | alines = d.a_blob.data_stream.read().splitlines() | ||
393 | blines = d.b_blob.data_stream.read().splitlines() | ||
394 | filechanges = compare_lists(alines,blines) | ||
395 | if filechanges: | ||
396 | chg = ChangeRecord(path, filename, None, None, True) | ||
397 | chg.filechanges = filechanges | ||
398 | changes.append(chg) | ||
399 | else: | ||
400 | chg = ChangeRecord(path, filename, d.a_blob.data_stream.read(), d.b_blob.data_stream.read(), True) | ||
401 | changes.append(chg) | ||
402 | elif filename == 'image-info.txt': | ||
403 | changes.extend(compare_dict_blobs(path, d.a_blob, d.b_blob, report_all)) | ||
404 | elif '/image-files/' in path: | ||
405 | chg = ChangeRecord(path, filename, d.a_blob.data_stream.read(), d.b_blob.data_stream.read(), True) | ||
406 | changes.append(chg) | ||
407 | |||
408 | # Look for added preinst/postinst/prerm/postrm | ||
409 | # (without reporting newly added recipes) | ||
410 | addedpkgs = [] | ||
411 | addedchanges = [] | ||
412 | for d in diff.iter_change_type('A'): | ||
413 | path = os.path.dirname(d.b_blob.path) | ||
414 | if path.startswith('packages/'): | ||
415 | filename = os.path.basename(d.b_blob.path) | ||
416 | if filename == 'latest': | ||
417 | addedpkgs.append(path) | ||
418 | elif filename.startswith('latest.'): | ||
419 | chg = ChangeRecord(path, filename[7:], '', d.b_blob.data_stream.read(), True) | ||
420 | addedchanges.append(chg) | ||
421 | for chg in addedchanges: | ||
422 | found = False | ||
423 | for pkg in addedpkgs: | ||
424 | if chg.path.startswith(pkg): | ||
425 | found = True | ||
426 | break | ||
427 | if not found: | ||
428 | changes.append(chg) | ||
429 | |||
430 | # Look for cleared preinst/postinst/prerm/postrm | ||
431 | for d in diff.iter_change_type('D'): | ||
432 | path = os.path.dirname(d.a_blob.path) | ||
433 | if path.startswith('packages/'): | ||
434 | filename = os.path.basename(d.a_blob.path) | ||
435 | if filename != 'latest' and filename.startswith('latest.'): | ||
436 | chg = ChangeRecord(path, filename[7:], d.a_blob.data_stream.read(), '', True) | ||
437 | changes.append(chg) | ||
438 | |||
439 | # Link related changes | ||
440 | for chg in changes: | ||
441 | if chg.monitored: | ||
442 | for chg2 in changes: | ||
443 | # (Check dirname in the case of fields from recipe info files) | ||
444 | if chg.path == chg2.path or os.path.dirname(chg.path) == chg2.path: | ||
445 | if chg2.fieldname in related_fields.get(chg.fieldname, []): | ||
446 | chg.related.append(chg2) | ||
447 | elif chg.path == chg2.path and chg.path.startswith('packages/') and chg2.fieldname in ['PE', 'PV', 'PR']: | ||
448 | chg.related.append(chg2) | ||
449 | |||
450 | if report_all: | ||
451 | return changes | ||
452 | else: | ||
453 | return [chg for chg in changes if chg.monitored] | ||
diff --git a/meta/lib/oe/cachedpath.py b/meta/lib/oe/cachedpath.py new file mode 100644 index 0000000000..0840cc4c3f --- /dev/null +++ b/meta/lib/oe/cachedpath.py | |||
@@ -0,0 +1,233 @@ | |||
1 | # | ||
2 | # Based on standard python library functions but avoid | ||
3 | # repeated stat calls. Its assumed the files will not change from under us | ||
4 | # so we can cache stat calls. | ||
5 | # | ||
6 | |||
7 | import os | ||
8 | import errno | ||
9 | import stat as statmod | ||
10 | |||
11 | class CachedPath(object): | ||
12 | def __init__(self): | ||
13 | self.statcache = {} | ||
14 | self.lstatcache = {} | ||
15 | self.normpathcache = {} | ||
16 | return | ||
17 | |||
18 | def updatecache(self, x): | ||
19 | x = self.normpath(x) | ||
20 | if x in self.statcache: | ||
21 | del self.statcache[x] | ||
22 | if x in self.lstatcache: | ||
23 | del self.lstatcache[x] | ||
24 | |||
25 | def normpath(self, path): | ||
26 | if path in self.normpathcache: | ||
27 | return self.normpathcache[path] | ||
28 | newpath = os.path.normpath(path) | ||
29 | self.normpathcache[path] = newpath | ||
30 | return newpath | ||
31 | |||
32 | def _callstat(self, path): | ||
33 | if path in self.statcache: | ||
34 | return self.statcache[path] | ||
35 | try: | ||
36 | st = os.stat(path) | ||
37 | self.statcache[path] = st | ||
38 | return st | ||
39 | except os.error: | ||
40 | self.statcache[path] = False | ||
41 | return False | ||
42 | |||
43 | # We might as well call lstat and then only | ||
44 | # call stat as well in the symbolic link case | ||
45 | # since this turns out to be much more optimal | ||
46 | # in real world usage of this cache | ||
47 | def callstat(self, path): | ||
48 | path = self.normpath(path) | ||
49 | self.calllstat(path) | ||
50 | return self.statcache[path] | ||
51 | |||
52 | def calllstat(self, path): | ||
53 | path = self.normpath(path) | ||
54 | if path in self.lstatcache: | ||
55 | return self.lstatcache[path] | ||
56 | #bb.error("LStatpath:" + path) | ||
57 | try: | ||
58 | lst = os.lstat(path) | ||
59 | self.lstatcache[path] = lst | ||
60 | if not statmod.S_ISLNK(lst.st_mode): | ||
61 | self.statcache[path] = lst | ||
62 | else: | ||
63 | self._callstat(path) | ||
64 | return lst | ||
65 | except (os.error, AttributeError): | ||
66 | self.lstatcache[path] = False | ||
67 | self.statcache[path] = False | ||
68 | return False | ||
69 | |||
70 | # This follows symbolic links, so both islink() and isdir() can be true | ||
71 | # for the same path ono systems that support symlinks | ||
72 | def isfile(self, path): | ||
73 | """Test whether a path is a regular file""" | ||
74 | st = self.callstat(path) | ||
75 | if not st: | ||
76 | return False | ||
77 | return statmod.S_ISREG(st.st_mode) | ||
78 | |||
79 | # Is a path a directory? | ||
80 | # This follows symbolic links, so both islink() and isdir() | ||
81 | # can be true for the same path on systems that support symlinks | ||
82 | def isdir(self, s): | ||
83 | """Return true if the pathname refers to an existing directory.""" | ||
84 | st = self.callstat(s) | ||
85 | if not st: | ||
86 | return False | ||
87 | return statmod.S_ISDIR(st.st_mode) | ||
88 | |||
89 | def islink(self, path): | ||
90 | """Test whether a path is a symbolic link""" | ||
91 | st = self.calllstat(path) | ||
92 | if not st: | ||
93 | return False | ||
94 | return statmod.S_ISLNK(st.st_mode) | ||
95 | |||
96 | # Does a path exist? | ||
97 | # This is false for dangling symbolic links on systems that support them. | ||
98 | def exists(self, path): | ||
99 | """Test whether a path exists. Returns False for broken symbolic links""" | ||
100 | if self.callstat(path): | ||
101 | return True | ||
102 | return False | ||
103 | |||
104 | def lexists(self, path): | ||
105 | """Test whether a path exists. Returns True for broken symbolic links""" | ||
106 | if self.calllstat(path): | ||
107 | return True | ||
108 | return False | ||
109 | |||
110 | def stat(self, path): | ||
111 | return self.callstat(path) | ||
112 | |||
113 | def lstat(self, path): | ||
114 | return self.calllstat(path) | ||
115 | |||
116 | def walk(self, top, topdown=True, onerror=None, followlinks=False): | ||
117 | # Matches os.walk, not os.path.walk() | ||
118 | |||
119 | # We may not have read permission for top, in which case we can't | ||
120 | # get a list of the files the directory contains. os.path.walk | ||
121 | # always suppressed the exception then, rather than blow up for a | ||
122 | # minor reason when (say) a thousand readable directories are still | ||
123 | # left to visit. That logic is copied here. | ||
124 | try: | ||
125 | names = os.listdir(top) | ||
126 | except os.error as err: | ||
127 | if onerror is not None: | ||
128 | onerror(err) | ||
129 | return | ||
130 | |||
131 | dirs, nondirs = [], [] | ||
132 | for name in names: | ||
133 | if self.isdir(os.path.join(top, name)): | ||
134 | dirs.append(name) | ||
135 | else: | ||
136 | nondirs.append(name) | ||
137 | |||
138 | if topdown: | ||
139 | yield top, dirs, nondirs | ||
140 | for name in dirs: | ||
141 | new_path = os.path.join(top, name) | ||
142 | if followlinks or not self.islink(new_path): | ||
143 | for x in self.walk(new_path, topdown, onerror, followlinks): | ||
144 | yield x | ||
145 | if not topdown: | ||
146 | yield top, dirs, nondirs | ||
147 | |||
148 | ## realpath() related functions | ||
149 | def __is_path_below(self, file, root): | ||
150 | return (file + os.path.sep).startswith(root) | ||
151 | |||
152 | def __realpath_rel(self, start, rel_path, root, loop_cnt, assume_dir): | ||
153 | """Calculates real path of symlink 'start' + 'rel_path' below | ||
154 | 'root'; no part of 'start' below 'root' must contain symlinks. """ | ||
155 | have_dir = True | ||
156 | |||
157 | for d in rel_path.split(os.path.sep): | ||
158 | if not have_dir and not assume_dir: | ||
159 | raise OSError(errno.ENOENT, "no such directory %s" % start) | ||
160 | |||
161 | if d == os.path.pardir: # '..' | ||
162 | if len(start) >= len(root): | ||
163 | # do not follow '..' before root | ||
164 | start = os.path.dirname(start) | ||
165 | else: | ||
166 | # emit warning? | ||
167 | pass | ||
168 | else: | ||
169 | (start, have_dir) = self.__realpath(os.path.join(start, d), | ||
170 | root, loop_cnt, assume_dir) | ||
171 | |||
172 | assert(self.__is_path_below(start, root)) | ||
173 | |||
174 | return start | ||
175 | |||
176 | def __realpath(self, file, root, loop_cnt, assume_dir): | ||
177 | while self.islink(file) and len(file) >= len(root): | ||
178 | if loop_cnt == 0: | ||
179 | raise OSError(errno.ELOOP, file) | ||
180 | |||
181 | loop_cnt -= 1 | ||
182 | target = os.path.normpath(os.readlink(file)) | ||
183 | |||
184 | if not os.path.isabs(target): | ||
185 | tdir = os.path.dirname(file) | ||
186 | assert(self.__is_path_below(tdir, root)) | ||
187 | else: | ||
188 | tdir = root | ||
189 | |||
190 | file = self.__realpath_rel(tdir, target, root, loop_cnt, assume_dir) | ||
191 | |||
192 | try: | ||
193 | is_dir = self.isdir(file) | ||
194 | except: | ||
195 | is_dir = False | ||
196 | |||
197 | return (file, is_dir) | ||
198 | |||
199 | def realpath(self, file, root, use_physdir = True, loop_cnt = 100, assume_dir = False): | ||
200 | """ Returns the canonical path of 'file' with assuming a | ||
201 | toplevel 'root' directory. When 'use_physdir' is set, all | ||
202 | preceding path components of 'file' will be resolved first; | ||
203 | this flag should be set unless it is guaranteed that there is | ||
204 | no symlink in the path. When 'assume_dir' is not set, missing | ||
205 | path components will raise an ENOENT error""" | ||
206 | |||
207 | root = os.path.normpath(root) | ||
208 | file = os.path.normpath(file) | ||
209 | |||
210 | if not root.endswith(os.path.sep): | ||
211 | # letting root end with '/' makes some things easier | ||
212 | root = root + os.path.sep | ||
213 | |||
214 | if not self.__is_path_below(file, root): | ||
215 | raise OSError(errno.EINVAL, "file '%s' is not below root" % file) | ||
216 | |||
217 | try: | ||
218 | if use_physdir: | ||
219 | file = self.__realpath_rel(root, file[(len(root) - 1):], root, loop_cnt, assume_dir) | ||
220 | else: | ||
221 | file = self.__realpath(file, root, loop_cnt, assume_dir)[0] | ||
222 | except OSError as e: | ||
223 | if e.errno == errno.ELOOP: | ||
224 | # make ELOOP more readable; without catching it, there will | ||
225 | # be printed a backtrace with 100s of OSError exceptions | ||
226 | # else | ||
227 | raise OSError(errno.ELOOP, | ||
228 | "too much recursions while resolving '%s'; loop in '%s'" % | ||
229 | (file, e.strerror)) | ||
230 | |||
231 | raise | ||
232 | |||
233 | return file | ||
diff --git a/meta/lib/oe/classextend.py b/meta/lib/oe/classextend.py new file mode 100644 index 0000000000..e2ae7e9f94 --- /dev/null +++ b/meta/lib/oe/classextend.py | |||
@@ -0,0 +1,104 @@ | |||
1 | class ClassExtender(object): | ||
2 | def __init__(self, extname, d): | ||
3 | self.extname = extname | ||
4 | self.d = d | ||
5 | self.pkgs_mapping = [] | ||
6 | |||
7 | def extend_name(self, name): | ||
8 | if name.startswith("kernel-") or name == "virtual/kernel": | ||
9 | return name | ||
10 | if name.startswith("rtld"): | ||
11 | return name | ||
12 | if name.endswith("-" + self.extname): | ||
13 | name = name.replace("-" + self.extname, "") | ||
14 | if name.startswith("virtual/"): | ||
15 | subs = name.split("/", 1)[1] | ||
16 | if not subs.startswith(self.extname): | ||
17 | return "virtual/" + self.extname + "-" + subs | ||
18 | return name | ||
19 | if not name.startswith(self.extname): | ||
20 | return self.extname + "-" + name | ||
21 | return name | ||
22 | |||
23 | def map_variable(self, varname, setvar = True): | ||
24 | var = self.d.getVar(varname, True) | ||
25 | if not var: | ||
26 | return "" | ||
27 | var = var.split() | ||
28 | newvar = [] | ||
29 | for v in var: | ||
30 | newvar.append(self.extend_name(v)) | ||
31 | newdata = " ".join(newvar) | ||
32 | if setvar: | ||
33 | self.d.setVar(varname, newdata) | ||
34 | return newdata | ||
35 | |||
36 | def map_regexp_variable(self, varname, setvar = True): | ||
37 | var = self.d.getVar(varname, True) | ||
38 | if not var: | ||
39 | return "" | ||
40 | var = var.split() | ||
41 | newvar = [] | ||
42 | for v in var: | ||
43 | if v.startswith("^" + self.extname): | ||
44 | newvar.append(v) | ||
45 | elif v.startswith("^"): | ||
46 | newvar.append("^" + self.extname + "-" + v[1:]) | ||
47 | else: | ||
48 | newvar.append(self.extend_name(v)) | ||
49 | newdata = " ".join(newvar) | ||
50 | if setvar: | ||
51 | self.d.setVar(varname, newdata) | ||
52 | return newdata | ||
53 | |||
54 | def map_depends(self, dep): | ||
55 | if dep.endswith(("-native", "-native-runtime", "-crosssdk")) or ('nativesdk-' in dep) or ('cross-canadian' in dep): | ||
56 | return dep | ||
57 | else: | ||
58 | return self.extend_name(dep) | ||
59 | |||
60 | def map_depends_variable(self, varname, suffix = ""): | ||
61 | if suffix: | ||
62 | varname = varname + "_" + suffix | ||
63 | deps = self.d.getVar(varname, True) | ||
64 | if not deps: | ||
65 | return | ||
66 | deps = bb.utils.explode_dep_versions2(deps) | ||
67 | newdeps = {} | ||
68 | for dep in deps: | ||
69 | newdeps[self.map_depends(dep)] = deps[dep] | ||
70 | |||
71 | self.d.setVar(varname, bb.utils.join_deps(newdeps, False)) | ||
72 | |||
73 | def map_packagevars(self): | ||
74 | for pkg in (self.d.getVar("PACKAGES", True).split() + [""]): | ||
75 | self.map_depends_variable("RDEPENDS", pkg) | ||
76 | self.map_depends_variable("RRECOMMENDS", pkg) | ||
77 | self.map_depends_variable("RSUGGESTS", pkg) | ||
78 | self.map_depends_variable("RPROVIDES", pkg) | ||
79 | self.map_depends_variable("RREPLACES", pkg) | ||
80 | self.map_depends_variable("RCONFLICTS", pkg) | ||
81 | self.map_depends_variable("PKG", pkg) | ||
82 | |||
83 | def rename_packages(self): | ||
84 | for pkg in (self.d.getVar("PACKAGES", True) or "").split(): | ||
85 | if pkg.startswith(self.extname): | ||
86 | self.pkgs_mapping.append([pkg.split(self.extname + "-")[1], pkg]) | ||
87 | continue | ||
88 | self.pkgs_mapping.append([pkg, self.extend_name(pkg)]) | ||
89 | |||
90 | self.d.setVar("PACKAGES", " ".join([row[1] for row in self.pkgs_mapping])) | ||
91 | |||
92 | def rename_package_variables(self, variables): | ||
93 | for pkg_mapping in self.pkgs_mapping: | ||
94 | for subs in variables: | ||
95 | self.d.renameVar("%s_%s" % (subs, pkg_mapping[0]), "%s_%s" % (subs, pkg_mapping[1])) | ||
96 | |||
97 | class NativesdkClassExtender(ClassExtender): | ||
98 | def map_depends(self, dep): | ||
99 | if dep.endswith(("-native", "-native-runtime", "-cross", "-crosssdk")) or ('nativesdk-' in dep): | ||
100 | return dep | ||
101 | elif dep.endswith(("-gcc-intermediate", "-gcc-initial", "-gcc", "-g++")): | ||
102 | return dep + "-crosssdk" | ||
103 | else: | ||
104 | return self.extend_name(dep) | ||
diff --git a/meta/lib/oe/classutils.py b/meta/lib/oe/classutils.py new file mode 100644 index 0000000000..58188fdd6e --- /dev/null +++ b/meta/lib/oe/classutils.py | |||
@@ -0,0 +1,43 @@ | |||
1 | class ClassRegistry(type): | ||
2 | """Maintain a registry of classes, indexed by name. | ||
3 | |||
4 | Note that this implementation requires that the names be unique, as it uses | ||
5 | a dictionary to hold the classes by name. | ||
6 | |||
7 | The name in the registry can be overridden via the 'name' attribute of the | ||
8 | class, and the 'priority' attribute controls priority. The prioritized() | ||
9 | method returns the registered classes in priority order. | ||
10 | |||
11 | Subclasses of ClassRegistry may define an 'implemented' property to exert | ||
12 | control over whether the class will be added to the registry (e.g. to keep | ||
13 | abstract base classes out of the registry).""" | ||
14 | priority = 0 | ||
15 | class __metaclass__(type): | ||
16 | """Give each ClassRegistry their own registry""" | ||
17 | def __init__(cls, name, bases, attrs): | ||
18 | cls.registry = {} | ||
19 | type.__init__(cls, name, bases, attrs) | ||
20 | |||
21 | def __init__(cls, name, bases, attrs): | ||
22 | super(ClassRegistry, cls).__init__(name, bases, attrs) | ||
23 | try: | ||
24 | if not cls.implemented: | ||
25 | return | ||
26 | except AttributeError: | ||
27 | pass | ||
28 | |||
29 | try: | ||
30 | cls.name | ||
31 | except AttributeError: | ||
32 | cls.name = name | ||
33 | cls.registry[cls.name] = cls | ||
34 | |||
35 | @classmethod | ||
36 | def prioritized(tcls): | ||
37 | return sorted(tcls.registry.values(), | ||
38 | key=lambda v: v.priority, reverse=True) | ||
39 | |||
40 | def unregister(cls): | ||
41 | for key in cls.registry.keys(): | ||
42 | if cls.registry[key] is cls: | ||
43 | del cls.registry[key] | ||
diff --git a/meta/lib/oe/data.py b/meta/lib/oe/data.py new file mode 100644 index 0000000000..4cc0e02968 --- /dev/null +++ b/meta/lib/oe/data.py | |||
@@ -0,0 +1,17 @@ | |||
1 | import oe.maketype | ||
2 | |||
3 | def typed_value(key, d): | ||
4 | """Construct a value for the specified metadata variable, using its flags | ||
5 | to determine the type and parameters for construction.""" | ||
6 | var_type = d.getVarFlag(key, 'type') | ||
7 | flags = d.getVarFlags(key) | ||
8 | if flags is not None: | ||
9 | flags = dict((flag, d.expand(value)) | ||
10 | for flag, value in flags.iteritems()) | ||
11 | else: | ||
12 | flags = {} | ||
13 | |||
14 | try: | ||
15 | return oe.maketype.create(d.getVar(key, True) or '', var_type, **flags) | ||
16 | except (TypeError, ValueError), exc: | ||
17 | bb.msg.fatal("Data", "%s: %s" % (key, str(exc))) | ||
diff --git a/meta/lib/oe/distro_check.py b/meta/lib/oe/distro_check.py new file mode 100644 index 0000000000..8ed5b0ec80 --- /dev/null +++ b/meta/lib/oe/distro_check.py | |||
@@ -0,0 +1,383 @@ | |||
1 | def get_links_from_url(url): | ||
2 | "Return all the href links found on the web location" | ||
3 | |||
4 | import urllib, sgmllib | ||
5 | |||
6 | class LinksParser(sgmllib.SGMLParser): | ||
7 | def parse(self, s): | ||
8 | "Parse the given string 's'." | ||
9 | self.feed(s) | ||
10 | self.close() | ||
11 | |||
12 | def __init__(self, verbose=0): | ||
13 | "Initialise an object passing 'verbose' to the superclass." | ||
14 | sgmllib.SGMLParser.__init__(self, verbose) | ||
15 | self.hyperlinks = [] | ||
16 | |||
17 | def start_a(self, attributes): | ||
18 | "Process a hyperlink and its 'attributes'." | ||
19 | for name, value in attributes: | ||
20 | if name == "href": | ||
21 | self.hyperlinks.append(value.strip('/')) | ||
22 | |||
23 | def get_hyperlinks(self): | ||
24 | "Return the list of hyperlinks." | ||
25 | return self.hyperlinks | ||
26 | |||
27 | sock = urllib.urlopen(url) | ||
28 | webpage = sock.read() | ||
29 | sock.close() | ||
30 | |||
31 | linksparser = LinksParser() | ||
32 | linksparser.parse(webpage) | ||
33 | return linksparser.get_hyperlinks() | ||
34 | |||
35 | def find_latest_numeric_release(url): | ||
36 | "Find the latest listed numeric release on the given url" | ||
37 | max=0 | ||
38 | maxstr="" | ||
39 | for link in get_links_from_url(url): | ||
40 | try: | ||
41 | release = float(link) | ||
42 | except: | ||
43 | release = 0 | ||
44 | if release > max: | ||
45 | max = release | ||
46 | maxstr = link | ||
47 | return maxstr | ||
48 | |||
49 | def is_src_rpm(name): | ||
50 | "Check if the link is pointing to a src.rpm file" | ||
51 | if name[-8:] == ".src.rpm": | ||
52 | return True | ||
53 | else: | ||
54 | return False | ||
55 | |||
56 | def package_name_from_srpm(srpm): | ||
57 | "Strip out the package name from the src.rpm filename" | ||
58 | strings = srpm.split('-') | ||
59 | package_name = strings[0] | ||
60 | for i in range(1, len (strings) - 1): | ||
61 | str = strings[i] | ||
62 | if not str[0].isdigit(): | ||
63 | package_name += '-' + str | ||
64 | return package_name | ||
65 | |||
66 | def clean_package_list(package_list): | ||
67 | "Removes multiple entries of packages and sorts the list" | ||
68 | set = {} | ||
69 | map(set.__setitem__, package_list, []) | ||
70 | return set.keys() | ||
71 | |||
72 | |||
73 | def get_latest_released_meego_source_package_list(): | ||
74 | "Returns list of all the name os packages in the latest meego distro" | ||
75 | |||
76 | package_names = [] | ||
77 | try: | ||
78 | f = open("/tmp/Meego-1.1", "r") | ||
79 | for line in f: | ||
80 | package_names.append(line[:-1] + ":" + "main") # Also strip the '\n' at the end | ||
81 | except IOError: pass | ||
82 | package_list=clean_package_list(package_names) | ||
83 | return "1.0", package_list | ||
84 | |||
85 | def get_source_package_list_from_url(url, section): | ||
86 | "Return a sectioned list of package names from a URL list" | ||
87 | |||
88 | bb.note("Reading %s: %s" % (url, section)) | ||
89 | links = get_links_from_url(url) | ||
90 | srpms = filter(is_src_rpm, links) | ||
91 | names_list = map(package_name_from_srpm, srpms) | ||
92 | |||
93 | new_pkgs = [] | ||
94 | for pkgs in names_list: | ||
95 | new_pkgs.append(pkgs + ":" + section) | ||
96 | |||
97 | return new_pkgs | ||
98 | |||
99 | def get_latest_released_fedora_source_package_list(): | ||
100 | "Returns list of all the name os packages in the latest fedora distro" | ||
101 | latest = find_latest_numeric_release("http://archive.fedoraproject.org/pub/fedora/linux/releases/") | ||
102 | |||
103 | package_names = get_source_package_list_from_url("http://archive.fedoraproject.org/pub/fedora/linux/releases/%s/Fedora/source/SRPMS/" % latest, "main") | ||
104 | |||
105 | # package_names += get_source_package_list_from_url("http://download.fedora.redhat.com/pub/fedora/linux/releases/%s/Everything/source/SPRMS/" % latest, "everything") | ||
106 | package_names += get_source_package_list_from_url("http://archive.fedoraproject.org/pub/fedora/linux/updates/%s/SRPMS/" % latest, "updates") | ||
107 | |||
108 | package_list=clean_package_list(package_names) | ||
109 | |||
110 | return latest, package_list | ||
111 | |||
112 | def get_latest_released_opensuse_source_package_list(): | ||
113 | "Returns list of all the name os packages in the latest opensuse distro" | ||
114 | latest = find_latest_numeric_release("http://download.opensuse.org/source/distribution/") | ||
115 | |||
116 | package_names = get_source_package_list_from_url("http://download.opensuse.org/source/distribution/%s/repo/oss/suse/src/" % latest, "main") | ||
117 | package_names += get_source_package_list_from_url("http://download.opensuse.org/update/%s/rpm/src/" % latest, "updates") | ||
118 | |||
119 | package_list=clean_package_list(package_names) | ||
120 | return latest, package_list | ||
121 | |||
122 | def get_latest_released_mandriva_source_package_list(): | ||
123 | "Returns list of all the name os packages in the latest mandriva distro" | ||
124 | latest = find_latest_numeric_release("http://distrib-coffee.ipsl.jussieu.fr/pub/linux/MandrivaLinux/official/") | ||
125 | package_names = get_source_package_list_from_url("http://distrib-coffee.ipsl.jussieu.fr/pub/linux/MandrivaLinux/official/%s/SRPMS/main/release/" % latest, "main") | ||
126 | # package_names += get_source_package_list_from_url("http://distrib-coffee.ipsl.jussieu.fr/pub/linux/MandrivaLinux/official/%s/SRPMS/contrib/release/" % latest, "contrib") | ||
127 | package_names += get_source_package_list_from_url("http://distrib-coffee.ipsl.jussieu.fr/pub/linux/MandrivaLinux/official/%s/SRPMS/main/updates/" % latest, "updates") | ||
128 | |||
129 | package_list=clean_package_list(package_names) | ||
130 | return latest, package_list | ||
131 | |||
132 | def find_latest_debian_release(url): | ||
133 | "Find the latest listed debian release on the given url" | ||
134 | |||
135 | releases = [] | ||
136 | for link in get_links_from_url(url): | ||
137 | if link[:6] == "Debian": | ||
138 | if ';' not in link: | ||
139 | releases.append(link) | ||
140 | releases.sort() | ||
141 | try: | ||
142 | return releases.pop()[6:] | ||
143 | except: | ||
144 | return "_NotFound_" | ||
145 | |||
146 | def get_debian_style_source_package_list(url, section): | ||
147 | "Return the list of package-names stored in the debian style Sources.gz file" | ||
148 | import urllib | ||
149 | sock = urllib.urlopen(url) | ||
150 | import tempfile | ||
151 | tmpfile = tempfile.NamedTemporaryFile(mode='wb', prefix='oecore.', suffix='.tmp', delete=False) | ||
152 | tmpfilename=tmpfile.name | ||
153 | tmpfile.write(sock.read()) | ||
154 | sock.close() | ||
155 | tmpfile.close() | ||
156 | import gzip | ||
157 | bb.note("Reading %s: %s" % (url, section)) | ||
158 | |||
159 | f = gzip.open(tmpfilename) | ||
160 | package_names = [] | ||
161 | for line in f: | ||
162 | if line[:9] == "Package: ": | ||
163 | package_names.append(line[9:-1] + ":" + section) # Also strip the '\n' at the end | ||
164 | os.unlink(tmpfilename) | ||
165 | |||
166 | return package_names | ||
167 | |||
168 | def get_latest_released_debian_source_package_list(): | ||
169 | "Returns list of all the name os packages in the latest debian distro" | ||
170 | latest = find_latest_debian_release("http://ftp.debian.org/debian/dists/") | ||
171 | url = "http://ftp.debian.org/debian/dists/stable/main/source/Sources.gz" | ||
172 | package_names = get_debian_style_source_package_list(url, "main") | ||
173 | # url = "http://ftp.debian.org/debian/dists/stable/contrib/source/Sources.gz" | ||
174 | # package_names += get_debian_style_source_package_list(url, "contrib") | ||
175 | url = "http://ftp.debian.org/debian/dists/stable-proposed-updates/main/source/Sources.gz" | ||
176 | package_names += get_debian_style_source_package_list(url, "updates") | ||
177 | package_list=clean_package_list(package_names) | ||
178 | return latest, package_list | ||
179 | |||
180 | def find_latest_ubuntu_release(url): | ||
181 | "Find the latest listed ubuntu release on the given url" | ||
182 | url += "?C=M;O=D" # Descending Sort by Last Modified | ||
183 | for link in get_links_from_url(url): | ||
184 | if link[-8:] == "-updates": | ||
185 | return link[:-8] | ||
186 | return "_NotFound_" | ||
187 | |||
188 | def get_latest_released_ubuntu_source_package_list(): | ||
189 | "Returns list of all the name os packages in the latest ubuntu distro" | ||
190 | latest = find_latest_ubuntu_release("http://archive.ubuntu.com/ubuntu/dists/") | ||
191 | url = "http://archive.ubuntu.com/ubuntu/dists/%s/main/source/Sources.gz" % latest | ||
192 | package_names = get_debian_style_source_package_list(url, "main") | ||
193 | # url = "http://archive.ubuntu.com/ubuntu/dists/%s/multiverse/source/Sources.gz" % latest | ||
194 | # package_names += get_debian_style_source_package_list(url, "multiverse") | ||
195 | # url = "http://archive.ubuntu.com/ubuntu/dists/%s/universe/source/Sources.gz" % latest | ||
196 | # package_names += get_debian_style_source_package_list(url, "universe") | ||
197 | url = "http://archive.ubuntu.com/ubuntu/dists/%s-updates/main/source/Sources.gz" % latest | ||
198 | package_names += get_debian_style_source_package_list(url, "updates") | ||
199 | package_list=clean_package_list(package_names) | ||
200 | return latest, package_list | ||
201 | |||
202 | def create_distro_packages_list(distro_check_dir): | ||
203 | pkglst_dir = os.path.join(distro_check_dir, "package_lists") | ||
204 | if not os.path.isdir (pkglst_dir): | ||
205 | os.makedirs(pkglst_dir) | ||
206 | # first clear old stuff | ||
207 | for file in os.listdir(pkglst_dir): | ||
208 | os.unlink(os.path.join(pkglst_dir, file)) | ||
209 | |||
210 | per_distro_functions = [ | ||
211 | ["Debian", get_latest_released_debian_source_package_list], | ||
212 | ["Ubuntu", get_latest_released_ubuntu_source_package_list], | ||
213 | ["Fedora", get_latest_released_fedora_source_package_list], | ||
214 | ["OpenSuSE", get_latest_released_opensuse_source_package_list], | ||
215 | ["Mandriva", get_latest_released_mandriva_source_package_list], | ||
216 | ["Meego", get_latest_released_meego_source_package_list] | ||
217 | ] | ||
218 | |||
219 | from datetime import datetime | ||
220 | begin = datetime.now() | ||
221 | for distro in per_distro_functions: | ||
222 | name = distro[0] | ||
223 | release, package_list = distro[1]() | ||
224 | bb.note("Distro: %s, Latest Release: %s, # src packages: %d" % (name, release, len(package_list))) | ||
225 | package_list_file = os.path.join(pkglst_dir, name + "-" + release) | ||
226 | f = open(package_list_file, "w+b") | ||
227 | for pkg in package_list: | ||
228 | f.write(pkg + "\n") | ||
229 | f.close() | ||
230 | end = datetime.now() | ||
231 | delta = end - begin | ||
232 | bb.note("package_list generatiosn took this much time: %d seconds" % delta.seconds) | ||
233 | |||
234 | def update_distro_data(distro_check_dir, datetime): | ||
235 | """ | ||
236 | If distro packages list data is old then rebuild it. | ||
237 | The operations has to be protected by a lock so that | ||
238 | only one thread performes it at a time. | ||
239 | """ | ||
240 | if not os.path.isdir (distro_check_dir): | ||
241 | try: | ||
242 | bb.note ("Making new directory: %s" % distro_check_dir) | ||
243 | os.makedirs (distro_check_dir) | ||
244 | except OSError: | ||
245 | raise Exception('Unable to create directory %s' % (distro_check_dir)) | ||
246 | |||
247 | |||
248 | datetime_file = os.path.join(distro_check_dir, "build_datetime") | ||
249 | saved_datetime = "_invalid_" | ||
250 | import fcntl | ||
251 | try: | ||
252 | if not os.path.exists(datetime_file): | ||
253 | open(datetime_file, 'w+b').close() # touch the file so that the next open won't fail | ||
254 | |||
255 | f = open(datetime_file, "r+b") | ||
256 | fcntl.lockf(f, fcntl.LOCK_EX) | ||
257 | saved_datetime = f.read() | ||
258 | if saved_datetime[0:8] != datetime[0:8]: | ||
259 | bb.note("The build datetime did not match: saved:%s current:%s" % (saved_datetime, datetime)) | ||
260 | bb.note("Regenerating distro package lists") | ||
261 | create_distro_packages_list(distro_check_dir) | ||
262 | f.seek(0) | ||
263 | f.write(datetime) | ||
264 | |||
265 | except OSError: | ||
266 | raise Exception('Unable to read/write this file: %s' % (datetime_file)) | ||
267 | finally: | ||
268 | fcntl.lockf(f, fcntl.LOCK_UN) | ||
269 | f.close() | ||
270 | |||
271 | def compare_in_distro_packages_list(distro_check_dir, d): | ||
272 | if not os.path.isdir(distro_check_dir): | ||
273 | raise Exception("compare_in_distro_packages_list: invalid distro_check_dir passed") | ||
274 | |||
275 | localdata = bb.data.createCopy(d) | ||
276 | pkglst_dir = os.path.join(distro_check_dir, "package_lists") | ||
277 | matching_distros = [] | ||
278 | pn = d.getVar('PN', True) | ||
279 | recipe_name = d.getVar('PN', True) | ||
280 | bb.note("Checking: %s" % pn) | ||
281 | |||
282 | trim_dict = dict({"-native":"-native", "-cross":"-cross", "-initial":"-initial"}) | ||
283 | |||
284 | if pn.find("-native") != -1: | ||
285 | pnstripped = pn.split("-native") | ||
286 | localdata.setVar('OVERRIDES', "pn-" + pnstripped[0] + ":" + d.getVar('OVERRIDES', True)) | ||
287 | bb.data.update_data(localdata) | ||
288 | recipe_name = pnstripped[0] | ||
289 | |||
290 | if pn.startswith("nativesdk-"): | ||
291 | pnstripped = pn.split("nativesdk-") | ||
292 | localdata.setVar('OVERRIDES', "pn-" + pnstripped[1] + ":" + d.getVar('OVERRIDES', True)) | ||
293 | bb.data.update_data(localdata) | ||
294 | recipe_name = pnstripped[1] | ||
295 | |||
296 | if pn.find("-cross") != -1: | ||
297 | pnstripped = pn.split("-cross") | ||
298 | localdata.setVar('OVERRIDES', "pn-" + pnstripped[0] + ":" + d.getVar('OVERRIDES', True)) | ||
299 | bb.data.update_data(localdata) | ||
300 | recipe_name = pnstripped[0] | ||
301 | |||
302 | if pn.find("-initial") != -1: | ||
303 | pnstripped = pn.split("-initial") | ||
304 | localdata.setVar('OVERRIDES', "pn-" + pnstripped[0] + ":" + d.getVar('OVERRIDES', True)) | ||
305 | bb.data.update_data(localdata) | ||
306 | recipe_name = pnstripped[0] | ||
307 | |||
308 | bb.note("Recipe: %s" % recipe_name) | ||
309 | tmp = localdata.getVar('DISTRO_PN_ALIAS', True) | ||
310 | |||
311 | distro_exceptions = dict({"OE-Core":'OE-Core', "OpenedHand":'OpenedHand', "Intel":'Intel', "Upstream":'Upstream', "Windriver":'Windriver', "OSPDT":'OSPDT Approved', "Poky":'poky'}) | ||
312 | |||
313 | if tmp: | ||
314 | list = tmp.split(' ') | ||
315 | for str in list: | ||
316 | if str and str.find("=") == -1 and distro_exceptions[str]: | ||
317 | matching_distros.append(str) | ||
318 | |||
319 | distro_pn_aliases = {} | ||
320 | if tmp: | ||
321 | list = tmp.split(' ') | ||
322 | for str in list: | ||
323 | if str.find("=") != -1: | ||
324 | (dist, pn_alias) = str.split('=') | ||
325 | distro_pn_aliases[dist.strip().lower()] = pn_alias.strip() | ||
326 | |||
327 | for file in os.listdir(pkglst_dir): | ||
328 | (distro, distro_release) = file.split("-") | ||
329 | f = open(os.path.join(pkglst_dir, file), "rb") | ||
330 | for line in f: | ||
331 | (pkg, section) = line.split(":") | ||
332 | if distro.lower() in distro_pn_aliases: | ||
333 | pn = distro_pn_aliases[distro.lower()] | ||
334 | else: | ||
335 | pn = recipe_name | ||
336 | if pn == pkg: | ||
337 | matching_distros.append(distro + "-" + section[:-1]) # strip the \n at the end | ||
338 | f.close() | ||
339 | break | ||
340 | f.close() | ||
341 | |||
342 | |||
343 | if tmp != None: | ||
344 | list = tmp.split(' ') | ||
345 | for item in list: | ||
346 | matching_distros.append(item) | ||
347 | bb.note("Matching: %s" % matching_distros) | ||
348 | return matching_distros | ||
349 | |||
350 | def create_log_file(d, logname): | ||
351 | import subprocess | ||
352 | logpath = d.getVar('LOG_DIR', True) | ||
353 | bb.utils.mkdirhier(logpath) | ||
354 | logfn, logsuffix = os.path.splitext(logname) | ||
355 | logfile = os.path.join(logpath, "%s.%s%s" % (logfn, d.getVar('DATETIME', True), logsuffix)) | ||
356 | if not os.path.exists(logfile): | ||
357 | slogfile = os.path.join(logpath, logname) | ||
358 | if os.path.exists(slogfile): | ||
359 | os.remove(slogfile) | ||
360 | subprocess.call("touch %s" % logfile, shell=True) | ||
361 | os.symlink(logfile, slogfile) | ||
362 | d.setVar('LOG_FILE', logfile) | ||
363 | return logfile | ||
364 | |||
365 | |||
366 | def save_distro_check_result(result, datetime, result_file, d): | ||
367 | pn = d.getVar('PN', True) | ||
368 | logdir = d.getVar('LOG_DIR', True) | ||
369 | if not logdir: | ||
370 | bb.error("LOG_DIR variable is not defined, can't write the distro_check results") | ||
371 | return | ||
372 | if not os.path.isdir(logdir): | ||
373 | os.makedirs(logdir) | ||
374 | line = pn | ||
375 | for i in result: | ||
376 | line = line + "," + i | ||
377 | f = open(result_file, "a") | ||
378 | import fcntl | ||
379 | fcntl.lockf(f, fcntl.LOCK_EX) | ||
380 | f.seek(0, os.SEEK_END) # seek to the end of file | ||
381 | f.write(line + "\n") | ||
382 | fcntl.lockf(f, fcntl.LOCK_UN) | ||
383 | f.close() | ||
diff --git a/meta/lib/oe/license.py b/meta/lib/oe/license.py new file mode 100644 index 0000000000..340da61102 --- /dev/null +++ b/meta/lib/oe/license.py | |||
@@ -0,0 +1,116 @@ | |||
1 | # vi:sts=4:sw=4:et | ||
2 | """Code for parsing OpenEmbedded license strings""" | ||
3 | |||
4 | import ast | ||
5 | import re | ||
6 | from fnmatch import fnmatchcase as fnmatch | ||
7 | |||
8 | class LicenseError(Exception): | ||
9 | pass | ||
10 | |||
11 | class LicenseSyntaxError(LicenseError): | ||
12 | def __init__(self, licensestr, exc): | ||
13 | self.licensestr = licensestr | ||
14 | self.exc = exc | ||
15 | LicenseError.__init__(self) | ||
16 | |||
17 | def __str__(self): | ||
18 | return "error in '%s': %s" % (self.licensestr, self.exc) | ||
19 | |||
20 | class InvalidLicense(LicenseError): | ||
21 | def __init__(self, license): | ||
22 | self.license = license | ||
23 | LicenseError.__init__(self) | ||
24 | |||
25 | def __str__(self): | ||
26 | return "invalid characters in license '%s'" % self.license | ||
27 | |||
28 | license_operator = re.compile('([&|() ])') | ||
29 | license_pattern = re.compile('[a-zA-Z0-9.+_\-]+$') | ||
30 | |||
31 | class LicenseVisitor(ast.NodeVisitor): | ||
32 | """Syntax tree visitor which can accept OpenEmbedded license strings""" | ||
33 | def visit_string(self, licensestr): | ||
34 | new_elements = [] | ||
35 | elements = filter(lambda x: x.strip(), license_operator.split(licensestr)) | ||
36 | for pos, element in enumerate(elements): | ||
37 | if license_pattern.match(element): | ||
38 | if pos > 0 and license_pattern.match(elements[pos-1]): | ||
39 | new_elements.append('&') | ||
40 | element = '"' + element + '"' | ||
41 | elif not license_operator.match(element): | ||
42 | raise InvalidLicense(element) | ||
43 | new_elements.append(element) | ||
44 | |||
45 | self.visit(ast.parse(' '.join(new_elements))) | ||
46 | |||
47 | class FlattenVisitor(LicenseVisitor): | ||
48 | """Flatten a license tree (parsed from a string) by selecting one of each | ||
49 | set of OR options, in the way the user specifies""" | ||
50 | def __init__(self, choose_licenses): | ||
51 | self.choose_licenses = choose_licenses | ||
52 | self.licenses = [] | ||
53 | LicenseVisitor.__init__(self) | ||
54 | |||
55 | def visit_Str(self, node): | ||
56 | self.licenses.append(node.s) | ||
57 | |||
58 | def visit_BinOp(self, node): | ||
59 | if isinstance(node.op, ast.BitOr): | ||
60 | left = FlattenVisitor(self.choose_licenses) | ||
61 | left.visit(node.left) | ||
62 | |||
63 | right = FlattenVisitor(self.choose_licenses) | ||
64 | right.visit(node.right) | ||
65 | |||
66 | selected = self.choose_licenses(left.licenses, right.licenses) | ||
67 | self.licenses.extend(selected) | ||
68 | else: | ||
69 | self.generic_visit(node) | ||
70 | |||
71 | def flattened_licenses(licensestr, choose_licenses): | ||
72 | """Given a license string and choose_licenses function, return a flat list of licenses""" | ||
73 | flatten = FlattenVisitor(choose_licenses) | ||
74 | try: | ||
75 | flatten.visit_string(licensestr) | ||
76 | except SyntaxError as exc: | ||
77 | raise LicenseSyntaxError(licensestr, exc) | ||
78 | return flatten.licenses | ||
79 | |||
80 | def is_included(licensestr, whitelist=None, blacklist=None): | ||
81 | """Given a license string and whitelist and blacklist, determine if the | ||
82 | license string matches the whitelist and does not match the blacklist. | ||
83 | |||
84 | Returns a tuple holding the boolean state and a list of the applicable | ||
85 | licenses which were excluded (or None, if the state is True) | ||
86 | """ | ||
87 | |||
88 | def include_license(license): | ||
89 | return any(fnmatch(license, pattern) for pattern in whitelist) | ||
90 | |||
91 | def exclude_license(license): | ||
92 | return any(fnmatch(license, pattern) for pattern in blacklist) | ||
93 | |||
94 | def choose_licenses(alpha, beta): | ||
95 | """Select the option in an OR which is the 'best' (has the most | ||
96 | included licenses).""" | ||
97 | alpha_weight = len(filter(include_license, alpha)) | ||
98 | beta_weight = len(filter(include_license, beta)) | ||
99 | if alpha_weight > beta_weight: | ||
100 | return alpha | ||
101 | else: | ||
102 | return beta | ||
103 | |||
104 | if not whitelist: | ||
105 | whitelist = ['*'] | ||
106 | |||
107 | if not blacklist: | ||
108 | blacklist = [] | ||
109 | |||
110 | licenses = flattened_licenses(licensestr, choose_licenses) | ||
111 | excluded = filter(lambda lic: exclude_license(lic), licenses) | ||
112 | included = filter(lambda lic: include_license(lic), licenses) | ||
113 | if excluded: | ||
114 | return False, excluded | ||
115 | else: | ||
116 | return True, included | ||
diff --git a/meta/lib/oe/lsb.py b/meta/lib/oe/lsb.py new file mode 100644 index 0000000000..b53f361035 --- /dev/null +++ b/meta/lib/oe/lsb.py | |||
@@ -0,0 +1,81 @@ | |||
1 | def release_dict(): | ||
2 | """Return the output of lsb_release -ir as a dictionary""" | ||
3 | from subprocess import PIPE | ||
4 | |||
5 | try: | ||
6 | output, err = bb.process.run(['lsb_release', '-ir'], stderr=PIPE) | ||
7 | except bb.process.CmdError as exc: | ||
8 | return None | ||
9 | |||
10 | data = {} | ||
11 | for line in output.splitlines(): | ||
12 | try: | ||
13 | key, value = line.split(":\t", 1) | ||
14 | except ValueError: | ||
15 | continue | ||
16 | else: | ||
17 | data[key] = value | ||
18 | return data | ||
19 | |||
20 | def release_dict_file(): | ||
21 | """ Try to gather LSB release information manually when lsb_release tool is unavailable """ | ||
22 | data = None | ||
23 | try: | ||
24 | if os.path.exists('/etc/lsb-release'): | ||
25 | data = {} | ||
26 | with open('/etc/lsb-release') as f: | ||
27 | for line in f: | ||
28 | key, value = line.split("=", 1) | ||
29 | data[key] = value.strip() | ||
30 | elif os.path.exists('/etc/redhat-release'): | ||
31 | data = {} | ||
32 | with open('/etc/redhat-release') as f: | ||
33 | distro = f.readline().strip() | ||
34 | import re | ||
35 | match = re.match(r'(.*) release (.*) \((.*)\)', distro) | ||
36 | if match: | ||
37 | data['DISTRIB_ID'] = match.group(1) | ||
38 | data['DISTRIB_RELEASE'] = match.group(2) | ||
39 | elif os.path.exists('/etc/SuSE-release'): | ||
40 | data = {} | ||
41 | data['DISTRIB_ID'] = 'SUSE LINUX' | ||
42 | with open('/etc/SuSE-release') as f: | ||
43 | for line in f: | ||
44 | if line.startswith('VERSION = '): | ||
45 | data['DISTRIB_RELEASE'] = line[10:].rstrip() | ||
46 | break | ||
47 | elif os.path.exists('/etc/os-release'): | ||
48 | data = {} | ||
49 | with open('/etc/os-release') as f: | ||
50 | for line in f: | ||
51 | if line.startswith('NAME='): | ||
52 | data['DISTRIB_ID'] = line[5:].rstrip().strip('"') | ||
53 | if line.startswith('VERSION_ID='): | ||
54 | data['DISTRIB_RELEASE'] = line[11:].rstrip().strip('"') | ||
55 | except IOError: | ||
56 | return None | ||
57 | return data | ||
58 | |||
59 | def distro_identifier(adjust_hook=None): | ||
60 | """Return a distro identifier string based upon lsb_release -ri, | ||
61 | with optional adjustment via a hook""" | ||
62 | |||
63 | lsb_data = release_dict() | ||
64 | if lsb_data: | ||
65 | distro_id, release = lsb_data['Distributor ID'], lsb_data['Release'] | ||
66 | else: | ||
67 | lsb_data_file = release_dict_file() | ||
68 | if lsb_data_file: | ||
69 | distro_id, release = lsb_data_file['DISTRIB_ID'], lsb_data_file.get('DISTRIB_RELEASE', None) | ||
70 | else: | ||
71 | distro_id, release = None, None | ||
72 | |||
73 | if adjust_hook: | ||
74 | distro_id, release = adjust_hook(distro_id, release) | ||
75 | if not distro_id: | ||
76 | return "Unknown" | ||
77 | if release: | ||
78 | id_str = '{0}-{1}'.format(distro_id, release) | ||
79 | else: | ||
80 | id_str = distro_id | ||
81 | return id_str.replace(' ','-').replace('/','-') | ||
diff --git a/meta/lib/oe/maketype.py b/meta/lib/oe/maketype.py new file mode 100644 index 0000000000..139f333691 --- /dev/null +++ b/meta/lib/oe/maketype.py | |||
@@ -0,0 +1,99 @@ | |||
1 | """OpenEmbedded variable typing support | ||
2 | |||
3 | Types are defined in the metadata by name, using the 'type' flag on a | ||
4 | variable. Other flags may be utilized in the construction of the types. See | ||
5 | the arguments of the type's factory for details. | ||
6 | """ | ||
7 | |||
8 | import inspect | ||
9 | import types | ||
10 | |||
11 | available_types = {} | ||
12 | |||
13 | class MissingFlag(TypeError): | ||
14 | """A particular flag is required to construct the type, but has not been | ||
15 | provided.""" | ||
16 | def __init__(self, flag, type): | ||
17 | self.flag = flag | ||
18 | self.type = type | ||
19 | TypeError.__init__(self) | ||
20 | |||
21 | def __str__(self): | ||
22 | return "Type '%s' requires flag '%s'" % (self.type, self.flag) | ||
23 | |||
24 | def factory(var_type): | ||
25 | """Return the factory for a specified type.""" | ||
26 | if var_type is None: | ||
27 | raise TypeError("No type specified. Valid types: %s" % | ||
28 | ', '.join(available_types)) | ||
29 | try: | ||
30 | return available_types[var_type] | ||
31 | except KeyError: | ||
32 | raise TypeError("Invalid type '%s':\n Valid types: %s" % | ||
33 | (var_type, ', '.join(available_types))) | ||
34 | |||
35 | def create(value, var_type, **flags): | ||
36 | """Create an object of the specified type, given the specified flags and | ||
37 | string value.""" | ||
38 | obj = factory(var_type) | ||
39 | objflags = {} | ||
40 | for flag in obj.flags: | ||
41 | if flag not in flags: | ||
42 | if flag not in obj.optflags: | ||
43 | raise MissingFlag(flag, var_type) | ||
44 | else: | ||
45 | objflags[flag] = flags[flag] | ||
46 | |||
47 | return obj(value, **objflags) | ||
48 | |||
49 | def get_callable_args(obj): | ||
50 | """Grab all but the first argument of the specified callable, returning | ||
51 | the list, as well as a list of which of the arguments have default | ||
52 | values.""" | ||
53 | if type(obj) is type: | ||
54 | obj = obj.__init__ | ||
55 | |||
56 | args, varargs, keywords, defaults = inspect.getargspec(obj) | ||
57 | flaglist = [] | ||
58 | if args: | ||
59 | if len(args) > 1 and args[0] == 'self': | ||
60 | args = args[1:] | ||
61 | flaglist.extend(args) | ||
62 | |||
63 | optional = set() | ||
64 | if defaults: | ||
65 | optional |= set(flaglist[-len(defaults):]) | ||
66 | return flaglist, optional | ||
67 | |||
68 | def factory_setup(name, obj): | ||
69 | """Prepare a factory for use.""" | ||
70 | args, optional = get_callable_args(obj) | ||
71 | extra_args = args[1:] | ||
72 | if extra_args: | ||
73 | obj.flags, optional = extra_args, optional | ||
74 | obj.optflags = set(optional) | ||
75 | else: | ||
76 | obj.flags = obj.optflags = () | ||
77 | |||
78 | if not hasattr(obj, 'name'): | ||
79 | obj.name = name | ||
80 | |||
81 | def register(name, factory): | ||
82 | """Register a type, given its name and a factory callable. | ||
83 | |||
84 | Determines the required and optional flags from the factory's | ||
85 | arguments.""" | ||
86 | factory_setup(name, factory) | ||
87 | available_types[factory.name] = factory | ||
88 | |||
89 | |||
90 | # Register all our included types | ||
91 | for name in dir(types): | ||
92 | if name.startswith('_'): | ||
93 | continue | ||
94 | |||
95 | obj = getattr(types, name) | ||
96 | if not callable(obj): | ||
97 | continue | ||
98 | |||
99 | register(name, obj) | ||
diff --git a/meta/lib/oe/package.py b/meta/lib/oe/package.py new file mode 100644 index 0000000000..9a0ddb8536 --- /dev/null +++ b/meta/lib/oe/package.py | |||
@@ -0,0 +1,96 @@ | |||
1 | def runstrip(arg): | ||
2 | # Function to strip a single file, called from split_and_strip_files below | ||
3 | # A working 'file' (one which works on the target architecture) | ||
4 | # | ||
5 | # The elftype is a bit pattern (explained in split_and_strip_files) to tell | ||
6 | # us what type of file we're processing... | ||
7 | # 4 - executable | ||
8 | # 8 - shared library | ||
9 | # 16 - kernel module | ||
10 | |||
11 | import commands, stat, subprocess | ||
12 | |||
13 | (file, elftype, strip) = arg | ||
14 | |||
15 | newmode = None | ||
16 | if not os.access(file, os.W_OK) or os.access(file, os.R_OK): | ||
17 | origmode = os.stat(file)[stat.ST_MODE] | ||
18 | newmode = origmode | stat.S_IWRITE | stat.S_IREAD | ||
19 | os.chmod(file, newmode) | ||
20 | |||
21 | extraflags = "" | ||
22 | |||
23 | # kernel module | ||
24 | if elftype & 16: | ||
25 | extraflags = "--strip-debug --remove-section=.comment --remove-section=.note --preserve-dates" | ||
26 | # .so and shared library | ||
27 | elif ".so" in file and elftype & 8: | ||
28 | extraflags = "--remove-section=.comment --remove-section=.note --strip-unneeded" | ||
29 | # shared or executable: | ||
30 | elif elftype & 8 or elftype & 4: | ||
31 | extraflags = "--remove-section=.comment --remove-section=.note" | ||
32 | |||
33 | stripcmd = "'%s' %s '%s'" % (strip, extraflags, file) | ||
34 | bb.debug(1, "runstrip: %s" % stripcmd) | ||
35 | |||
36 | ret = subprocess.call(stripcmd, shell=True) | ||
37 | |||
38 | if newmode: | ||
39 | os.chmod(file, origmode) | ||
40 | |||
41 | if ret: | ||
42 | bb.error("runstrip: '%s' strip command failed" % stripcmd) | ||
43 | |||
44 | return | ||
45 | |||
46 | |||
47 | def file_translate(file): | ||
48 | ft = file.replace("@", "@at@") | ||
49 | ft = ft.replace(" ", "@space@") | ||
50 | ft = ft.replace("\t", "@tab@") | ||
51 | ft = ft.replace("[", "@openbrace@") | ||
52 | ft = ft.replace("]", "@closebrace@") | ||
53 | ft = ft.replace("_", "@underscore@") | ||
54 | return ft | ||
55 | |||
56 | def filedeprunner(arg): | ||
57 | import re | ||
58 | |||
59 | (pkg, pkgfiles, rpmdeps, pkgdest) = arg | ||
60 | provides = {} | ||
61 | requires = {} | ||
62 | |||
63 | r = re.compile(r'[<>=]+ +[^ ]*') | ||
64 | |||
65 | def process_deps(pipe, pkg, pkgdest, provides, requires): | ||
66 | for line in pipe: | ||
67 | f = line.split(" ", 1)[0].strip() | ||
68 | line = line.split(" ", 1)[1].strip() | ||
69 | |||
70 | if line.startswith("Requires:"): | ||
71 | i = requires | ||
72 | elif line.startswith("Provides:"): | ||
73 | i = provides | ||
74 | else: | ||
75 | continue | ||
76 | |||
77 | file = f.replace(pkgdest + "/" + pkg, "") | ||
78 | file = file_translate(file) | ||
79 | value = line.split(":", 1)[1].strip() | ||
80 | value = r.sub(r'(\g<0>)', value) | ||
81 | |||
82 | if value.startswith("rpmlib("): | ||
83 | continue | ||
84 | if value == "python": | ||
85 | continue | ||
86 | if file not in i: | ||
87 | i[file] = [] | ||
88 | i[file].append(value) | ||
89 | |||
90 | return provides, requires | ||
91 | |||
92 | dep_pipe = os.popen(rpmdeps + " " + " ".join(pkgfiles)) | ||
93 | |||
94 | provides, requires = process_deps(dep_pipe, pkg, pkgdest, provides, requires) | ||
95 | |||
96 | return (pkg, provides, requires) | ||
diff --git a/meta/lib/oe/packagedata.py b/meta/lib/oe/packagedata.py new file mode 100644 index 0000000000..cd5f0445f5 --- /dev/null +++ b/meta/lib/oe/packagedata.py | |||
@@ -0,0 +1,94 @@ | |||
1 | import codecs | ||
2 | |||
3 | def packaged(pkg, d): | ||
4 | return os.access(get_subpkgedata_fn(pkg, d) + '.packaged', os.R_OK) | ||
5 | |||
6 | def read_pkgdatafile(fn): | ||
7 | pkgdata = {} | ||
8 | |||
9 | def decode(str): | ||
10 | c = codecs.getdecoder("string_escape") | ||
11 | return c(str)[0] | ||
12 | |||
13 | if os.access(fn, os.R_OK): | ||
14 | import re | ||
15 | f = open(fn, 'r') | ||
16 | lines = f.readlines() | ||
17 | f.close() | ||
18 | r = re.compile("([^:]+):\s*(.*)") | ||
19 | for l in lines: | ||
20 | m = r.match(l) | ||
21 | if m: | ||
22 | pkgdata[m.group(1)] = decode(m.group(2)) | ||
23 | |||
24 | return pkgdata | ||
25 | |||
26 | def get_subpkgedata_fn(pkg, d): | ||
27 | return d.expand('${PKGDATA_DIR}/runtime/%s' % pkg) | ||
28 | |||
29 | def has_subpkgdata(pkg, d): | ||
30 | return os.access(get_subpkgedata_fn(pkg, d), os.R_OK) | ||
31 | |||
32 | def read_subpkgdata(pkg, d): | ||
33 | return read_pkgdatafile(get_subpkgedata_fn(pkg, d)) | ||
34 | |||
35 | def has_pkgdata(pn, d): | ||
36 | fn = d.expand('${PKGDATA_DIR}/%s' % pn) | ||
37 | return os.access(fn, os.R_OK) | ||
38 | |||
39 | def read_pkgdata(pn, d): | ||
40 | fn = d.expand('${PKGDATA_DIR}/%s' % pn) | ||
41 | return read_pkgdatafile(fn) | ||
42 | |||
43 | # | ||
44 | # Collapse FOO_pkg variables into FOO | ||
45 | # | ||
46 | def read_subpkgdata_dict(pkg, d): | ||
47 | ret = {} | ||
48 | subd = read_pkgdatafile(get_subpkgedata_fn(pkg, d)) | ||
49 | for var in subd: | ||
50 | newvar = var.replace("_" + pkg, "") | ||
51 | if newvar == var and var + "_" + pkg in subd: | ||
52 | continue | ||
53 | ret[newvar] = subd[var] | ||
54 | return ret | ||
55 | |||
56 | def _pkgmap(d): | ||
57 | """Return a dictionary mapping package to recipe name.""" | ||
58 | |||
59 | pkgdatadir = d.getVar("PKGDATA_DIR", True) | ||
60 | |||
61 | pkgmap = {} | ||
62 | try: | ||
63 | files = os.listdir(pkgdatadir) | ||
64 | except OSError: | ||
65 | bb.warn("No files in %s?" % pkgdatadir) | ||
66 | files = [] | ||
67 | |||
68 | for pn in filter(lambda f: not os.path.isdir(os.path.join(pkgdatadir, f)), files): | ||
69 | try: | ||
70 | pkgdata = read_pkgdatafile(os.path.join(pkgdatadir, pn)) | ||
71 | except OSError: | ||
72 | continue | ||
73 | |||
74 | packages = pkgdata.get("PACKAGES") or "" | ||
75 | for pkg in packages.split(): | ||
76 | pkgmap[pkg] = pn | ||
77 | |||
78 | return pkgmap | ||
79 | |||
80 | def pkgmap(d): | ||
81 | """Return a dictionary mapping package to recipe name. | ||
82 | Cache the mapping in the metadata""" | ||
83 | |||
84 | pkgmap_data = d.getVar("__pkgmap_data", False) | ||
85 | if pkgmap_data is None: | ||
86 | pkgmap_data = _pkgmap(d) | ||
87 | d.setVar("__pkgmap_data", pkgmap_data) | ||
88 | |||
89 | return pkgmap_data | ||
90 | |||
91 | def recipename(pkg, d): | ||
92 | """Return the recipe name for the given binary package name.""" | ||
93 | |||
94 | return pkgmap(d).get(pkg) | ||
diff --git a/meta/lib/oe/packagegroup.py b/meta/lib/oe/packagegroup.py new file mode 100644 index 0000000000..b04c45a1af --- /dev/null +++ b/meta/lib/oe/packagegroup.py | |||
@@ -0,0 +1,29 @@ | |||
1 | import itertools | ||
2 | |||
3 | def is_optional(group, d): | ||
4 | return bool(d.getVarFlag("PACKAGE_GROUP_%s" % group, "optional")) | ||
5 | |||
6 | def packages(groups, d): | ||
7 | for group in groups: | ||
8 | for pkg in (d.getVar("PACKAGE_GROUP_%s" % group, True) or "").split(): | ||
9 | yield pkg | ||
10 | |||
11 | def required_packages(groups, d): | ||
12 | req = filter(lambda group: not is_optional(group, d), groups) | ||
13 | return packages(req, d) | ||
14 | |||
15 | def optional_packages(groups, d): | ||
16 | opt = filter(lambda group: is_optional(group, d), groups) | ||
17 | return packages(opt, d) | ||
18 | |||
19 | def active_packages(features, d): | ||
20 | return itertools.chain(required_packages(features, d), | ||
21 | optional_packages(features, d)) | ||
22 | |||
23 | def active_recipes(features, d): | ||
24 | import oe.packagedata | ||
25 | |||
26 | for pkg in active_packages(features, d): | ||
27 | recipe = oe.packagedata.recipename(pkg, d) | ||
28 | if recipe: | ||
29 | yield recipe | ||
diff --git a/meta/lib/oe/patch.py b/meta/lib/oe/patch.py new file mode 100644 index 0000000000..59abd0af19 --- /dev/null +++ b/meta/lib/oe/patch.py | |||
@@ -0,0 +1,441 @@ | |||
1 | import oe.path | ||
2 | |||
3 | class NotFoundError(bb.BBHandledException): | ||
4 | def __init__(self, path): | ||
5 | self.path = path | ||
6 | |||
7 | def __str__(self): | ||
8 | return "Error: %s not found." % self.path | ||
9 | |||
10 | class CmdError(bb.BBHandledException): | ||
11 | def __init__(self, exitstatus, output): | ||
12 | self.status = exitstatus | ||
13 | self.output = output | ||
14 | |||
15 | def __str__(self): | ||
16 | return "Command Error: exit status: %d Output:\n%s" % (self.status, self.output) | ||
17 | |||
18 | |||
19 | def runcmd(args, dir = None): | ||
20 | import pipes | ||
21 | |||
22 | if dir: | ||
23 | olddir = os.path.abspath(os.curdir) | ||
24 | if not os.path.exists(dir): | ||
25 | raise NotFoundError(dir) | ||
26 | os.chdir(dir) | ||
27 | # print("cwd: %s -> %s" % (olddir, dir)) | ||
28 | |||
29 | try: | ||
30 | args = [ pipes.quote(str(arg)) for arg in args ] | ||
31 | cmd = " ".join(args) | ||
32 | # print("cmd: %s" % cmd) | ||
33 | (exitstatus, output) = oe.utils.getstatusoutput(cmd) | ||
34 | if exitstatus != 0: | ||
35 | raise CmdError(exitstatus >> 8, output) | ||
36 | return output | ||
37 | |||
38 | finally: | ||
39 | if dir: | ||
40 | os.chdir(olddir) | ||
41 | |||
42 | class PatchError(Exception): | ||
43 | def __init__(self, msg): | ||
44 | self.msg = msg | ||
45 | |||
46 | def __str__(self): | ||
47 | return "Patch Error: %s" % self.msg | ||
48 | |||
49 | class PatchSet(object): | ||
50 | defaults = { | ||
51 | "strippath": 1 | ||
52 | } | ||
53 | |||
54 | def __init__(self, dir, d): | ||
55 | self.dir = dir | ||
56 | self.d = d | ||
57 | self.patches = [] | ||
58 | self._current = None | ||
59 | |||
60 | def current(self): | ||
61 | return self._current | ||
62 | |||
63 | def Clean(self): | ||
64 | """ | ||
65 | Clean out the patch set. Generally includes unapplying all | ||
66 | patches and wiping out all associated metadata. | ||
67 | """ | ||
68 | raise NotImplementedError() | ||
69 | |||
70 | def Import(self, patch, force): | ||
71 | if not patch.get("file"): | ||
72 | if not patch.get("remote"): | ||
73 | raise PatchError("Patch file must be specified in patch import.") | ||
74 | else: | ||
75 | patch["file"] = bb.fetch2.localpath(patch["remote"], self.d) | ||
76 | |||
77 | for param in PatchSet.defaults: | ||
78 | if not patch.get(param): | ||
79 | patch[param] = PatchSet.defaults[param] | ||
80 | |||
81 | if patch.get("remote"): | ||
82 | patch["file"] = bb.data.expand(bb.fetch2.localpath(patch["remote"], self.d), self.d) | ||
83 | |||
84 | patch["filemd5"] = bb.utils.md5_file(patch["file"]) | ||
85 | |||
86 | def Push(self, force): | ||
87 | raise NotImplementedError() | ||
88 | |||
89 | def Pop(self, force): | ||
90 | raise NotImplementedError() | ||
91 | |||
92 | def Refresh(self, remote = None, all = None): | ||
93 | raise NotImplementedError() | ||
94 | |||
95 | |||
96 | class PatchTree(PatchSet): | ||
97 | def __init__(self, dir, d): | ||
98 | PatchSet.__init__(self, dir, d) | ||
99 | self.patchdir = os.path.join(self.dir, 'patches') | ||
100 | self.seriespath = os.path.join(self.dir, 'patches', 'series') | ||
101 | bb.utils.mkdirhier(self.patchdir) | ||
102 | |||
103 | def _appendPatchFile(self, patch, strippath): | ||
104 | with open(self.seriespath, 'a') as f: | ||
105 | f.write(os.path.basename(patch) + "," + strippath + "\n") | ||
106 | shellcmd = ["cat", patch, ">" , self.patchdir + "/" + os.path.basename(patch)] | ||
107 | runcmd(["sh", "-c", " ".join(shellcmd)], self.dir) | ||
108 | |||
109 | def _removePatch(self, p): | ||
110 | patch = {} | ||
111 | patch['file'] = p.split(",")[0] | ||
112 | patch['strippath'] = p.split(",")[1] | ||
113 | self._applypatch(patch, False, True) | ||
114 | |||
115 | def _removePatchFile(self, all = False): | ||
116 | if not os.path.exists(self.seriespath): | ||
117 | return | ||
118 | patches = open(self.seriespath, 'r+').readlines() | ||
119 | if all: | ||
120 | for p in reversed(patches): | ||
121 | self._removePatch(os.path.join(self.patchdir, p.strip())) | ||
122 | patches = [] | ||
123 | else: | ||
124 | self._removePatch(os.path.join(self.patchdir, patches[-1].strip())) | ||
125 | patches.pop() | ||
126 | with open(self.seriespath, 'w') as f: | ||
127 | for p in patches: | ||
128 | f.write(p) | ||
129 | |||
130 | def Import(self, patch, force = None): | ||
131 | """""" | ||
132 | PatchSet.Import(self, patch, force) | ||
133 | |||
134 | if self._current is not None: | ||
135 | i = self._current + 1 | ||
136 | else: | ||
137 | i = 0 | ||
138 | self.patches.insert(i, patch) | ||
139 | |||
140 | def _applypatch(self, patch, force = False, reverse = False, run = True): | ||
141 | shellcmd = ["cat", patch['file'], "|", "patch", "-p", patch['strippath']] | ||
142 | if reverse: | ||
143 | shellcmd.append('-R') | ||
144 | |||
145 | if not run: | ||
146 | return "sh" + "-c" + " ".join(shellcmd) | ||
147 | |||
148 | if not force: | ||
149 | shellcmd.append('--dry-run') | ||
150 | |||
151 | output = runcmd(["sh", "-c", " ".join(shellcmd)], self.dir) | ||
152 | |||
153 | if force: | ||
154 | return | ||
155 | |||
156 | shellcmd.pop(len(shellcmd) - 1) | ||
157 | output = runcmd(["sh", "-c", " ".join(shellcmd)], self.dir) | ||
158 | |||
159 | if not reverse: | ||
160 | self._appendPatchFile(patch['file'], patch['strippath']) | ||
161 | |||
162 | return output | ||
163 | |||
164 | def Push(self, force = False, all = False, run = True): | ||
165 | bb.note("self._current is %s" % self._current) | ||
166 | bb.note("patches is %s" % self.patches) | ||
167 | if all: | ||
168 | for i in self.patches: | ||
169 | bb.note("applying patch %s" % i) | ||
170 | self._applypatch(i, force) | ||
171 | self._current = i | ||
172 | else: | ||
173 | if self._current is not None: | ||
174 | next = self._current + 1 | ||
175 | else: | ||
176 | next = 0 | ||
177 | |||
178 | bb.note("applying patch %s" % self.patches[next]) | ||
179 | ret = self._applypatch(self.patches[next], force) | ||
180 | |||
181 | self._current = next | ||
182 | return ret | ||
183 | |||
184 | def Pop(self, force = None, all = None): | ||
185 | if all: | ||
186 | self._removePatchFile(True) | ||
187 | self._current = None | ||
188 | else: | ||
189 | self._removePatchFile(False) | ||
190 | |||
191 | if self._current == 0: | ||
192 | self._current = None | ||
193 | |||
194 | if self._current is not None: | ||
195 | self._current = self._current - 1 | ||
196 | |||
197 | def Clean(self): | ||
198 | """""" | ||
199 | self.Pop(all=True) | ||
200 | |||
201 | class GitApplyTree(PatchTree): | ||
202 | def __init__(self, dir, d): | ||
203 | PatchTree.__init__(self, dir, d) | ||
204 | |||
205 | def _applypatch(self, patch, force = False, reverse = False, run = True): | ||
206 | shellcmd = ["git", "--git-dir=.", "apply", "-p%s" % patch['strippath']] | ||
207 | |||
208 | if reverse: | ||
209 | shellcmd.append('-R') | ||
210 | |||
211 | shellcmd.append(patch['file']) | ||
212 | |||
213 | if not run: | ||
214 | return "sh" + "-c" + " ".join(shellcmd) | ||
215 | |||
216 | return runcmd(["sh", "-c", " ".join(shellcmd)], self.dir) | ||
217 | |||
218 | |||
219 | class QuiltTree(PatchSet): | ||
220 | def _runcmd(self, args, run = True): | ||
221 | quiltrc = self.d.getVar('QUILTRCFILE', True) | ||
222 | if not run: | ||
223 | return ["quilt"] + ["--quiltrc"] + [quiltrc] + args | ||
224 | runcmd(["quilt"] + ["--quiltrc"] + [quiltrc] + args, self.dir) | ||
225 | |||
226 | def _quiltpatchpath(self, file): | ||
227 | return os.path.join(self.dir, "patches", os.path.basename(file)) | ||
228 | |||
229 | |||
230 | def __init__(self, dir, d): | ||
231 | PatchSet.__init__(self, dir, d) | ||
232 | self.initialized = False | ||
233 | p = os.path.join(self.dir, 'patches') | ||
234 | if not os.path.exists(p): | ||
235 | os.makedirs(p) | ||
236 | |||
237 | def Clean(self): | ||
238 | try: | ||
239 | self._runcmd(["pop", "-a", "-f"]) | ||
240 | oe.path.remove(os.path.join(self.dir, "patches","series")) | ||
241 | except Exception: | ||
242 | pass | ||
243 | self.initialized = True | ||
244 | |||
245 | def InitFromDir(self): | ||
246 | # read series -> self.patches | ||
247 | seriespath = os.path.join(self.dir, 'patches', 'series') | ||
248 | if not os.path.exists(self.dir): | ||
249 | raise NotFoundError(self.dir) | ||
250 | if os.path.exists(seriespath): | ||
251 | series = file(seriespath, 'r') | ||
252 | for line in series.readlines(): | ||
253 | patch = {} | ||
254 | parts = line.strip().split() | ||
255 | patch["quiltfile"] = self._quiltpatchpath(parts[0]) | ||
256 | patch["quiltfilemd5"] = bb.utils.md5_file(patch["quiltfile"]) | ||
257 | if len(parts) > 1: | ||
258 | patch["strippath"] = parts[1][2:] | ||
259 | self.patches.append(patch) | ||
260 | series.close() | ||
261 | |||
262 | # determine which patches are applied -> self._current | ||
263 | try: | ||
264 | output = runcmd(["quilt", "applied"], self.dir) | ||
265 | except CmdError: | ||
266 | import sys | ||
267 | if sys.exc_value.output.strip() == "No patches applied": | ||
268 | return | ||
269 | else: | ||
270 | raise | ||
271 | output = [val for val in output.split('\n') if not val.startswith('#')] | ||
272 | for patch in self.patches: | ||
273 | if os.path.basename(patch["quiltfile"]) == output[-1]: | ||
274 | self._current = self.patches.index(patch) | ||
275 | self.initialized = True | ||
276 | |||
277 | def Import(self, patch, force = None): | ||
278 | if not self.initialized: | ||
279 | self.InitFromDir() | ||
280 | PatchSet.Import(self, patch, force) | ||
281 | oe.path.symlink(patch["file"], self._quiltpatchpath(patch["file"]), force=True) | ||
282 | f = open(os.path.join(self.dir, "patches","series"), "a"); | ||
283 | f.write(os.path.basename(patch["file"]) + " -p" + patch["strippath"]+"\n") | ||
284 | f.close() | ||
285 | patch["quiltfile"] = self._quiltpatchpath(patch["file"]) | ||
286 | patch["quiltfilemd5"] = bb.utils.md5_file(patch["quiltfile"]) | ||
287 | |||
288 | # TODO: determine if the file being imported: | ||
289 | # 1) is already imported, and is the same | ||
290 | # 2) is already imported, but differs | ||
291 | |||
292 | self.patches.insert(self._current or 0, patch) | ||
293 | |||
294 | |||
295 | def Push(self, force = False, all = False, run = True): | ||
296 | # quilt push [-f] | ||
297 | |||
298 | args = ["push"] | ||
299 | if force: | ||
300 | args.append("-f") | ||
301 | if all: | ||
302 | args.append("-a") | ||
303 | if not run: | ||
304 | return self._runcmd(args, run) | ||
305 | |||
306 | self._runcmd(args) | ||
307 | |||
308 | if self._current is not None: | ||
309 | self._current = self._current + 1 | ||
310 | else: | ||
311 | self._current = 0 | ||
312 | |||
313 | def Pop(self, force = None, all = None): | ||
314 | # quilt pop [-f] | ||
315 | args = ["pop"] | ||
316 | if force: | ||
317 | args.append("-f") | ||
318 | if all: | ||
319 | args.append("-a") | ||
320 | |||
321 | self._runcmd(args) | ||
322 | |||
323 | if self._current == 0: | ||
324 | self._current = None | ||
325 | |||
326 | if self._current is not None: | ||
327 | self._current = self._current - 1 | ||
328 | |||
329 | def Refresh(self, **kwargs): | ||
330 | if kwargs.get("remote"): | ||
331 | patch = self.patches[kwargs["patch"]] | ||
332 | if not patch: | ||
333 | raise PatchError("No patch found at index %s in patchset." % kwargs["patch"]) | ||
334 | (type, host, path, user, pswd, parm) = bb.fetch.decodeurl(patch["remote"]) | ||
335 | if type == "file": | ||
336 | import shutil | ||
337 | if not patch.get("file") and patch.get("remote"): | ||
338 | patch["file"] = bb.fetch2.localpath(patch["remote"], self.d) | ||
339 | |||
340 | shutil.copyfile(patch["quiltfile"], patch["file"]) | ||
341 | else: | ||
342 | raise PatchError("Unable to do a remote refresh of %s, unsupported remote url scheme %s." % (os.path.basename(patch["quiltfile"]), type)) | ||
343 | else: | ||
344 | # quilt refresh | ||
345 | args = ["refresh"] | ||
346 | if kwargs.get("quiltfile"): | ||
347 | args.append(os.path.basename(kwargs["quiltfile"])) | ||
348 | elif kwargs.get("patch"): | ||
349 | args.append(os.path.basename(self.patches[kwargs["patch"]]["quiltfile"])) | ||
350 | self._runcmd(args) | ||
351 | |||
352 | class Resolver(object): | ||
353 | def __init__(self, patchset, terminal): | ||
354 | raise NotImplementedError() | ||
355 | |||
356 | def Resolve(self): | ||
357 | raise NotImplementedError() | ||
358 | |||
359 | def Revert(self): | ||
360 | raise NotImplementedError() | ||
361 | |||
362 | def Finalize(self): | ||
363 | raise NotImplementedError() | ||
364 | |||
365 | class NOOPResolver(Resolver): | ||
366 | def __init__(self, patchset, terminal): | ||
367 | self.patchset = patchset | ||
368 | self.terminal = terminal | ||
369 | |||
370 | def Resolve(self): | ||
371 | olddir = os.path.abspath(os.curdir) | ||
372 | os.chdir(self.patchset.dir) | ||
373 | try: | ||
374 | self.patchset.Push() | ||
375 | except Exception: | ||
376 | import sys | ||
377 | os.chdir(olddir) | ||
378 | raise | ||
379 | |||
380 | # Patch resolver which relies on the user doing all the work involved in the | ||
381 | # resolution, with the exception of refreshing the remote copy of the patch | ||
382 | # files (the urls). | ||
383 | class UserResolver(Resolver): | ||
384 | def __init__(self, patchset, terminal): | ||
385 | self.patchset = patchset | ||
386 | self.terminal = terminal | ||
387 | |||
388 | # Force a push in the patchset, then drop to a shell for the user to | ||
389 | # resolve any rejected hunks | ||
390 | def Resolve(self): | ||
391 | olddir = os.path.abspath(os.curdir) | ||
392 | os.chdir(self.patchset.dir) | ||
393 | try: | ||
394 | self.patchset.Push(False) | ||
395 | except CmdError as v: | ||
396 | # Patch application failed | ||
397 | patchcmd = self.patchset.Push(True, False, False) | ||
398 | |||
399 | t = self.patchset.d.getVar('T', True) | ||
400 | if not t: | ||
401 | bb.msg.fatal("Build", "T not set") | ||
402 | bb.utils.mkdirhier(t) | ||
403 | import random | ||
404 | rcfile = "%s/bashrc.%s.%s" % (t, str(os.getpid()), random.random()) | ||
405 | f = open(rcfile, "w") | ||
406 | f.write("echo '*** Manual patch resolution mode ***'\n") | ||
407 | f.write("echo 'Dropping to a shell, so patch rejects can be fixed manually.'\n") | ||
408 | f.write("echo 'Run \"quilt refresh\" when patch is corrected, press CTRL+D to exit.'\n") | ||
409 | f.write("echo ''\n") | ||
410 | f.write(" ".join(patchcmd) + "\n") | ||
411 | f.close() | ||
412 | os.chmod(rcfile, 0775) | ||
413 | |||
414 | self.terminal("bash --rcfile " + rcfile, 'Patch Rejects: Please fix patch rejects manually', self.patchset.d) | ||
415 | |||
416 | # Construct a new PatchSet after the user's changes, compare the | ||
417 | # sets, checking patches for modifications, and doing a remote | ||
418 | # refresh on each. | ||
419 | oldpatchset = self.patchset | ||
420 | self.patchset = oldpatchset.__class__(self.patchset.dir, self.patchset.d) | ||
421 | |||
422 | for patch in self.patchset.patches: | ||
423 | oldpatch = None | ||
424 | for opatch in oldpatchset.patches: | ||
425 | if opatch["quiltfile"] == patch["quiltfile"]: | ||
426 | oldpatch = opatch | ||
427 | |||
428 | if oldpatch: | ||
429 | patch["remote"] = oldpatch["remote"] | ||
430 | if patch["quiltfile"] == oldpatch["quiltfile"]: | ||
431 | if patch["quiltfilemd5"] != oldpatch["quiltfilemd5"]: | ||
432 | bb.note("Patch %s has changed, updating remote url %s" % (os.path.basename(patch["quiltfile"]), patch["remote"])) | ||
433 | # user change? remote refresh | ||
434 | self.patchset.Refresh(remote=True, patch=self.patchset.patches.index(patch)) | ||
435 | else: | ||
436 | # User did not fix the problem. Abort. | ||
437 | raise PatchError("Patch application failed, and user did not fix and refresh the patch.") | ||
438 | except Exception: | ||
439 | os.chdir(olddir) | ||
440 | raise | ||
441 | os.chdir(olddir) | ||
diff --git a/meta/lib/oe/path.py b/meta/lib/oe/path.py new file mode 100644 index 0000000000..1310e38fe1 --- /dev/null +++ b/meta/lib/oe/path.py | |||
@@ -0,0 +1,261 @@ | |||
1 | import errno | ||
2 | import glob | ||
3 | import shutil | ||
4 | import subprocess | ||
5 | import os.path | ||
6 | |||
7 | def join(*paths): | ||
8 | """Like os.path.join but doesn't treat absolute RHS specially""" | ||
9 | return os.path.normpath("/".join(paths)) | ||
10 | |||
11 | def relative(src, dest): | ||
12 | """ Return a relative path from src to dest. | ||
13 | |||
14 | >>> relative("/usr/bin", "/tmp/foo/bar") | ||
15 | ../../tmp/foo/bar | ||
16 | |||
17 | >>> relative("/usr/bin", "/usr/lib") | ||
18 | ../lib | ||
19 | |||
20 | >>> relative("/tmp", "/tmp/foo/bar") | ||
21 | foo/bar | ||
22 | """ | ||
23 | |||
24 | if hasattr(os.path, "relpath"): | ||
25 | return os.path.relpath(dest, src) | ||
26 | else: | ||
27 | destlist = os.path.normpath(dest).split(os.path.sep) | ||
28 | srclist = os.path.normpath(src).split(os.path.sep) | ||
29 | |||
30 | # Find common section of the path | ||
31 | common = os.path.commonprefix([destlist, srclist]) | ||
32 | commonlen = len(common) | ||
33 | |||
34 | # Climb back to the point where they differentiate | ||
35 | relpath = [ os.path.pardir ] * (len(srclist) - commonlen) | ||
36 | if commonlen < len(destlist): | ||
37 | # Add remaining portion | ||
38 | relpath += destlist[commonlen:] | ||
39 | |||
40 | return os.path.sep.join(relpath) | ||
41 | |||
42 | def make_relative_symlink(path): | ||
43 | """ Convert an absolute symlink to a relative one """ | ||
44 | if not os.path.islink(path): | ||
45 | return | ||
46 | link = os.readlink(path) | ||
47 | if not os.path.isabs(link): | ||
48 | return | ||
49 | |||
50 | # find the common ancestor directory | ||
51 | ancestor = path | ||
52 | depth = 0 | ||
53 | while ancestor and not link.startswith(ancestor): | ||
54 | ancestor = ancestor.rpartition('/')[0] | ||
55 | depth += 1 | ||
56 | |||
57 | if not ancestor: | ||
58 | print("make_relative_symlink() Error: unable to find the common ancestor of %s and its target" % path) | ||
59 | return | ||
60 | |||
61 | base = link.partition(ancestor)[2].strip('/') | ||
62 | while depth > 1: | ||
63 | base = "../" + base | ||
64 | depth -= 1 | ||
65 | |||
66 | os.remove(path) | ||
67 | os.symlink(base, path) | ||
68 | |||
69 | def format_display(path, metadata): | ||
70 | """ Prepare a path for display to the user. """ | ||
71 | rel = relative(metadata.getVar("TOPDIR", True), path) | ||
72 | if len(rel) > len(path): | ||
73 | return path | ||
74 | else: | ||
75 | return rel | ||
76 | |||
77 | def copytree(src, dst): | ||
78 | # We could use something like shutil.copytree here but it turns out to | ||
79 | # to be slow. It takes twice as long copying to an empty directory. | ||
80 | # If dst already has contents performance can be 15 time slower | ||
81 | # This way we also preserve hardlinks between files in the tree. | ||
82 | |||
83 | bb.utils.mkdirhier(dst) | ||
84 | cmd = 'tar -cf - -C %s -p . | tar -xf - -C %s' % (src, dst) | ||
85 | check_output(cmd, shell=True, stderr=subprocess.STDOUT) | ||
86 | |||
87 | def copyhardlinktree(src, dst): | ||
88 | """ Make the hard link when possible, otherwise copy. """ | ||
89 | bb.utils.mkdirhier(dst) | ||
90 | if os.path.isdir(src) and not len(os.listdir(src)): | ||
91 | return | ||
92 | |||
93 | if (os.stat(src).st_dev == os.stat(dst).st_dev): | ||
94 | # Need to copy directories only with tar first since cp will error if two | ||
95 | # writers try and create a directory at the same time | ||
96 | cmd = 'cd %s; find . -type d -print | tar -cf - -C %s -p --files-from - | tar -xf - -C %s' % (src, src, dst) | ||
97 | check_output(cmd, shell=True, stderr=subprocess.STDOUT) | ||
98 | if os.path.isdir(src): | ||
99 | src = src + "/*" | ||
100 | cmd = 'cp -afl %s %s' % (src, dst) | ||
101 | check_output(cmd, shell=True, stderr=subprocess.STDOUT) | ||
102 | else: | ||
103 | copytree(src, dst) | ||
104 | |||
105 | def remove(path, recurse=True): | ||
106 | """Equivalent to rm -f or rm -rf""" | ||
107 | for name in glob.glob(path): | ||
108 | try: | ||
109 | os.unlink(name) | ||
110 | except OSError as exc: | ||
111 | if recurse and exc.errno == errno.EISDIR: | ||
112 | shutil.rmtree(name) | ||
113 | elif exc.errno != errno.ENOENT: | ||
114 | raise | ||
115 | |||
116 | def symlink(source, destination, force=False): | ||
117 | """Create a symbolic link""" | ||
118 | try: | ||
119 | if force: | ||
120 | remove(destination) | ||
121 | os.symlink(source, destination) | ||
122 | except OSError as e: | ||
123 | if e.errno != errno.EEXIST or os.readlink(destination) != source: | ||
124 | raise | ||
125 | |||
126 | class CalledProcessError(Exception): | ||
127 | def __init__(self, retcode, cmd, output = None): | ||
128 | self.retcode = retcode | ||
129 | self.cmd = cmd | ||
130 | self.output = output | ||
131 | def __str__(self): | ||
132 | return "Command '%s' returned non-zero exit status %d with output %s" % (self.cmd, self.retcode, self.output) | ||
133 | |||
134 | # Not needed when we move to python 2.7 | ||
135 | def check_output(*popenargs, **kwargs): | ||
136 | r"""Run command with arguments and return its output as a byte string. | ||
137 | |||
138 | If the exit code was non-zero it raises a CalledProcessError. The | ||
139 | CalledProcessError object will have the return code in the returncode | ||
140 | attribute and output in the output attribute. | ||
141 | |||
142 | The arguments are the same as for the Popen constructor. Example: | ||
143 | |||
144 | >>> check_output(["ls", "-l", "/dev/null"]) | ||
145 | 'crw-rw-rw- 1 root root 1, 3 Oct 18 2007 /dev/null\n' | ||
146 | |||
147 | The stdout argument is not allowed as it is used internally. | ||
148 | To capture standard error in the result, use stderr=STDOUT. | ||
149 | |||
150 | >>> check_output(["/bin/sh", "-c", | ||
151 | ... "ls -l non_existent_file ; exit 0"], | ||
152 | ... stderr=STDOUT) | ||
153 | 'ls: non_existent_file: No such file or directory\n' | ||
154 | """ | ||
155 | if 'stdout' in kwargs: | ||
156 | raise ValueError('stdout argument not allowed, it will be overridden.') | ||
157 | process = subprocess.Popen(stdout=subprocess.PIPE, *popenargs, **kwargs) | ||
158 | output, unused_err = process.communicate() | ||
159 | retcode = process.poll() | ||
160 | if retcode: | ||
161 | cmd = kwargs.get("args") | ||
162 | if cmd is None: | ||
163 | cmd = popenargs[0] | ||
164 | raise CalledProcessError(retcode, cmd, output=output) | ||
165 | return output | ||
166 | |||
167 | def find(dir, **walkoptions): | ||
168 | """ Given a directory, recurses into that directory, | ||
169 | returning all files as absolute paths. """ | ||
170 | |||
171 | for root, dirs, files in os.walk(dir, **walkoptions): | ||
172 | for file in files: | ||
173 | yield os.path.join(root, file) | ||
174 | |||
175 | |||
176 | ## realpath() related functions | ||
177 | def __is_path_below(file, root): | ||
178 | return (file + os.path.sep).startswith(root) | ||
179 | |||
180 | def __realpath_rel(start, rel_path, root, loop_cnt, assume_dir): | ||
181 | """Calculates real path of symlink 'start' + 'rel_path' below | ||
182 | 'root'; no part of 'start' below 'root' must contain symlinks. """ | ||
183 | have_dir = True | ||
184 | |||
185 | for d in rel_path.split(os.path.sep): | ||
186 | if not have_dir and not assume_dir: | ||
187 | raise OSError(errno.ENOENT, "no such directory %s" % start) | ||
188 | |||
189 | if d == os.path.pardir: # '..' | ||
190 | if len(start) >= len(root): | ||
191 | # do not follow '..' before root | ||
192 | start = os.path.dirname(start) | ||
193 | else: | ||
194 | # emit warning? | ||
195 | pass | ||
196 | else: | ||
197 | (start, have_dir) = __realpath(os.path.join(start, d), | ||
198 | root, loop_cnt, assume_dir) | ||
199 | |||
200 | assert(__is_path_below(start, root)) | ||
201 | |||
202 | return start | ||
203 | |||
204 | def __realpath(file, root, loop_cnt, assume_dir): | ||
205 | while os.path.islink(file) and len(file) >= len(root): | ||
206 | if loop_cnt == 0: | ||
207 | raise OSError(errno.ELOOP, file) | ||
208 | |||
209 | loop_cnt -= 1 | ||
210 | target = os.path.normpath(os.readlink(file)) | ||
211 | |||
212 | if not os.path.isabs(target): | ||
213 | tdir = os.path.dirname(file) | ||
214 | assert(__is_path_below(tdir, root)) | ||
215 | else: | ||
216 | tdir = root | ||
217 | |||
218 | file = __realpath_rel(tdir, target, root, loop_cnt, assume_dir) | ||
219 | |||
220 | try: | ||
221 | is_dir = os.path.isdir(file) | ||
222 | except: | ||
223 | is_dir = false | ||
224 | |||
225 | return (file, is_dir) | ||
226 | |||
227 | def realpath(file, root, use_physdir = True, loop_cnt = 100, assume_dir = False): | ||
228 | """ Returns the canonical path of 'file' with assuming a | ||
229 | toplevel 'root' directory. When 'use_physdir' is set, all | ||
230 | preceding path components of 'file' will be resolved first; | ||
231 | this flag should be set unless it is guaranteed that there is | ||
232 | no symlink in the path. When 'assume_dir' is not set, missing | ||
233 | path components will raise an ENOENT error""" | ||
234 | |||
235 | root = os.path.normpath(root) | ||
236 | file = os.path.normpath(file) | ||
237 | |||
238 | if not root.endswith(os.path.sep): | ||
239 | # letting root end with '/' makes some things easier | ||
240 | root = root + os.path.sep | ||
241 | |||
242 | if not __is_path_below(file, root): | ||
243 | raise OSError(errno.EINVAL, "file '%s' is not below root" % file) | ||
244 | |||
245 | try: | ||
246 | if use_physdir: | ||
247 | file = __realpath_rel(root, file[(len(root) - 1):], root, loop_cnt, assume_dir) | ||
248 | else: | ||
249 | file = __realpath(file, root, loop_cnt, assume_dir)[0] | ||
250 | except OSError as e: | ||
251 | if e.errno == errno.ELOOP: | ||
252 | # make ELOOP more readable; without catching it, there will | ||
253 | # be printed a backtrace with 100s of OSError exceptions | ||
254 | # else | ||
255 | raise OSError(errno.ELOOP, | ||
256 | "too much recursions while resolving '%s'; loop in '%s'" % | ||
257 | (file, e.strerror)) | ||
258 | |||
259 | raise | ||
260 | |||
261 | return file | ||
diff --git a/meta/lib/oe/prservice.py b/meta/lib/oe/prservice.py new file mode 100644 index 0000000000..b0cbcb1fbc --- /dev/null +++ b/meta/lib/oe/prservice.py | |||
@@ -0,0 +1,126 @@ | |||
1 | |||
2 | def prserv_make_conn(d, check = False): | ||
3 | import prserv.serv | ||
4 | host_params = filter(None, (d.getVar("PRSERV_HOST", True) or '').split(':')) | ||
5 | try: | ||
6 | conn = None | ||
7 | conn = prserv.serv.PRServerConnection(host_params[0], int(host_params[1])) | ||
8 | if check: | ||
9 | if not conn.ping(): | ||
10 | raise Exception('service not available') | ||
11 | d.setVar("__PRSERV_CONN",conn) | ||
12 | except Exception, exc: | ||
13 | bb.fatal("Connecting to PR service %s:%s failed: %s" % (host_params[0], host_params[1], str(exc))) | ||
14 | |||
15 | return conn | ||
16 | |||
17 | def prserv_dump_db(d): | ||
18 | if not d.getVar('PRSERV_HOST', True): | ||
19 | bb.error("Not using network based PR service") | ||
20 | return None | ||
21 | |||
22 | conn = d.getVar("__PRSERV_CONN", True) | ||
23 | if conn is None: | ||
24 | conn = prserv_make_conn(d) | ||
25 | if conn is None: | ||
26 | bb.error("Making connection failed to remote PR service") | ||
27 | return None | ||
28 | |||
29 | #dump db | ||
30 | opt_version = d.getVar('PRSERV_DUMPOPT_VERSION', True) | ||
31 | opt_pkgarch = d.getVar('PRSERV_DUMPOPT_PKGARCH', True) | ||
32 | opt_checksum = d.getVar('PRSERV_DUMPOPT_CHECKSUM', True) | ||
33 | opt_col = ("1" == d.getVar('PRSERV_DUMPOPT_COL', True)) | ||
34 | return conn.export(opt_version, opt_pkgarch, opt_checksum, opt_col) | ||
35 | |||
36 | def prserv_import_db(d, filter_version=None, filter_pkgarch=None, filter_checksum=None): | ||
37 | if not d.getVar('PRSERV_HOST', True): | ||
38 | bb.error("Not using network based PR service") | ||
39 | return None | ||
40 | |||
41 | conn = d.getVar("__PRSERV_CONN", True) | ||
42 | if conn is None: | ||
43 | conn = prserv_make_conn(d) | ||
44 | if conn is None: | ||
45 | bb.error("Making connection failed to remote PR service") | ||
46 | return None | ||
47 | #get the entry values | ||
48 | imported = [] | ||
49 | prefix = "PRAUTO$" | ||
50 | for v in d.keys(): | ||
51 | if v.startswith(prefix): | ||
52 | (remain, sep, checksum) = v.rpartition('$') | ||
53 | (remain, sep, pkgarch) = remain.rpartition('$') | ||
54 | (remain, sep, version) = remain.rpartition('$') | ||
55 | if (remain + '$' != prefix) or \ | ||
56 | (filter_version and filter_version != version) or \ | ||
57 | (filter_pkgarch and filter_pkgarch != pkgarch) or \ | ||
58 | (filter_checksum and filter_checksum != checksum): | ||
59 | continue | ||
60 | try: | ||
61 | value = int(d.getVar(remain + '$' + version + '$' + pkgarch + '$' + checksum, True)) | ||
62 | except BaseException as exc: | ||
63 | bb.debug("Not valid value of %s:%s" % (v,str(exc))) | ||
64 | continue | ||
65 | ret = conn.importone(version,pkgarch,checksum,value) | ||
66 | if ret != value: | ||
67 | bb.error("importing(%s,%s,%s,%d) failed. DB may have larger value %d" % (version,pkgarch,checksum,value,ret)) | ||
68 | else: | ||
69 | imported.append((version,pkgarch,checksum,value)) | ||
70 | return imported | ||
71 | |||
72 | def prserv_export_tofile(d, metainfo, datainfo, lockdown, nomax=False): | ||
73 | import bb.utils | ||
74 | #initilize the output file | ||
75 | bb.utils.mkdirhier(d.getVar('PRSERV_DUMPDIR', True)) | ||
76 | df = d.getVar('PRSERV_DUMPFILE', True) | ||
77 | #write data | ||
78 | lf = bb.utils.lockfile("%s.lock" % df) | ||
79 | f = open(df, "a") | ||
80 | if metainfo: | ||
81 | #dump column info | ||
82 | f.write("#PR_core_ver = \"%s\"\n\n" % metainfo['core_ver']); | ||
83 | f.write("#Table: %s\n" % metainfo['tbl_name']) | ||
84 | f.write("#Columns:\n") | ||
85 | f.write("#name \t type \t notn \t dflt \t pk\n") | ||
86 | f.write("#----------\t --------\t --------\t --------\t ----\n") | ||
87 | for i in range(len(metainfo['col_info'])): | ||
88 | f.write("#%10s\t %8s\t %8s\t %8s\t %4s\n" % | ||
89 | (metainfo['col_info'][i]['name'], | ||
90 | metainfo['col_info'][i]['type'], | ||
91 | metainfo['col_info'][i]['notnull'], | ||
92 | metainfo['col_info'][i]['dflt_value'], | ||
93 | metainfo['col_info'][i]['pk'])) | ||
94 | f.write("\n") | ||
95 | |||
96 | if lockdown: | ||
97 | f.write("PRSERV_LOCKDOWN = \"1\"\n\n") | ||
98 | |||
99 | if datainfo: | ||
100 | idx = {} | ||
101 | for i in range(len(datainfo)): | ||
102 | pkgarch = datainfo[i]['pkgarch'] | ||
103 | value = datainfo[i]['value'] | ||
104 | if pkgarch not in idx: | ||
105 | idx[pkgarch] = i | ||
106 | elif value > datainfo[idx[pkgarch]]['value']: | ||
107 | idx[pkgarch] = i | ||
108 | f.write("PRAUTO$%s$%s$%s = \"%s\"\n" % | ||
109 | (str(datainfo[i]['version']), pkgarch, str(datainfo[i]['checksum']), str(value))) | ||
110 | if not nomax: | ||
111 | for i in idx: | ||
112 | f.write("PRAUTO_%s_%s = \"%s\"\n" % (str(datainfo[idx[i]]['version']),str(datainfo[idx[i]]['pkgarch']),str(datainfo[idx[i]]['value']))) | ||
113 | f.close() | ||
114 | bb.utils.unlockfile(lf) | ||
115 | |||
116 | def prserv_check_avail(d): | ||
117 | host_params = filter(None, (d.getVar("PRSERV_HOST", True) or '').split(':')) | ||
118 | try: | ||
119 | if len(host_params) != 2: | ||
120 | raise TypeError | ||
121 | else: | ||
122 | int(host_params[1]) | ||
123 | except TypeError: | ||
124 | bb.fatal('Undefined/incorrect PRSERV_HOST value. Format: "host:port"') | ||
125 | else: | ||
126 | prserv_make_conn(d, True) | ||
diff --git a/meta/lib/oe/qa.py b/meta/lib/oe/qa.py new file mode 100644 index 0000000000..d5cdaa0fcd --- /dev/null +++ b/meta/lib/oe/qa.py | |||
@@ -0,0 +1,111 @@ | |||
1 | class ELFFile: | ||
2 | EI_NIDENT = 16 | ||
3 | |||
4 | EI_CLASS = 4 | ||
5 | EI_DATA = 5 | ||
6 | EI_VERSION = 6 | ||
7 | EI_OSABI = 7 | ||
8 | EI_ABIVERSION = 8 | ||
9 | |||
10 | # possible values for EI_CLASS | ||
11 | ELFCLASSNONE = 0 | ||
12 | ELFCLASS32 = 1 | ||
13 | ELFCLASS64 = 2 | ||
14 | |||
15 | # possible value for EI_VERSION | ||
16 | EV_CURRENT = 1 | ||
17 | |||
18 | # possible values for EI_DATA | ||
19 | ELFDATANONE = 0 | ||
20 | ELFDATA2LSB = 1 | ||
21 | ELFDATA2MSB = 2 | ||
22 | |||
23 | def my_assert(self, expectation, result): | ||
24 | if not expectation == result: | ||
25 | #print "'%x','%x' %s" % (ord(expectation), ord(result), self.name) | ||
26 | raise Exception("This does not work as expected") | ||
27 | |||
28 | def __init__(self, name, bits = 0): | ||
29 | self.name = name | ||
30 | self.bits = bits | ||
31 | self.objdump_output = {} | ||
32 | |||
33 | def open(self): | ||
34 | self.file = file(self.name, "r") | ||
35 | self.data = self.file.read(ELFFile.EI_NIDENT+4) | ||
36 | |||
37 | self.my_assert(len(self.data), ELFFile.EI_NIDENT+4) | ||
38 | self.my_assert(self.data[0], chr(0x7f) ) | ||
39 | self.my_assert(self.data[1], 'E') | ||
40 | self.my_assert(self.data[2], 'L') | ||
41 | self.my_assert(self.data[3], 'F') | ||
42 | if self.bits == 0: | ||
43 | if self.data[ELFFile.EI_CLASS] == chr(ELFFile.ELFCLASS32): | ||
44 | self.bits = 32 | ||
45 | elif self.data[ELFFile.EI_CLASS] == chr(ELFFile.ELFCLASS64): | ||
46 | self.bits = 64 | ||
47 | else: | ||
48 | # Not 32-bit or 64.. lets assert | ||
49 | raise Exception("ELF but not 32 or 64 bit.") | ||
50 | elif self.bits == 32: | ||
51 | self.my_assert(self.data[ELFFile.EI_CLASS], chr(ELFFile.ELFCLASS32)) | ||
52 | elif self.bits == 64: | ||
53 | self.my_assert(self.data[ELFFile.EI_CLASS], chr(ELFFile.ELFCLASS64)) | ||
54 | else: | ||
55 | raise Exception("Must specify unknown, 32 or 64 bit size.") | ||
56 | self.my_assert(self.data[ELFFile.EI_VERSION], chr(ELFFile.EV_CURRENT) ) | ||
57 | |||
58 | self.sex = self.data[ELFFile.EI_DATA] | ||
59 | if self.sex == chr(ELFFile.ELFDATANONE): | ||
60 | raise Exception("self.sex == ELFDATANONE") | ||
61 | elif self.sex == chr(ELFFile.ELFDATA2LSB): | ||
62 | self.sex = "<" | ||
63 | elif self.sex == chr(ELFFile.ELFDATA2MSB): | ||
64 | self.sex = ">" | ||
65 | else: | ||
66 | raise Exception("Unknown self.sex") | ||
67 | |||
68 | def osAbi(self): | ||
69 | return ord(self.data[ELFFile.EI_OSABI]) | ||
70 | |||
71 | def abiVersion(self): | ||
72 | return ord(self.data[ELFFile.EI_ABIVERSION]) | ||
73 | |||
74 | def abiSize(self): | ||
75 | return self.bits | ||
76 | |||
77 | def isLittleEndian(self): | ||
78 | return self.sex == "<" | ||
79 | |||
80 | def isBigEngian(self): | ||
81 | return self.sex == ">" | ||
82 | |||
83 | def machine(self): | ||
84 | """ | ||
85 | We know the sex stored in self.sex and we | ||
86 | know the position | ||
87 | """ | ||
88 | import struct | ||
89 | (a,) = struct.unpack(self.sex+"H", self.data[18:20]) | ||
90 | return a | ||
91 | |||
92 | def run_objdump(self, cmd, d): | ||
93 | import bb.process | ||
94 | import sys | ||
95 | |||
96 | if cmd in self.objdump_output: | ||
97 | return self.objdump_output[cmd] | ||
98 | |||
99 | objdump = d.getVar('OBJDUMP', True) | ||
100 | |||
101 | env = os.environ.copy() | ||
102 | env["LC_ALL"] = "C" | ||
103 | env["PATH"] = d.getVar('PATH', True) | ||
104 | |||
105 | try: | ||
106 | bb.note("%s %s %s" % (objdump, cmd, self.name)) | ||
107 | self.objdump_output[cmd] = bb.process.run([objdump, cmd, self.name], env=env, shell=False)[0] | ||
108 | return self.objdump_output[cmd] | ||
109 | except Exception as e: | ||
110 | bb.note("%s %s %s failed: %s" % (objdump, cmd, self.name, e)) | ||
111 | return "" | ||
diff --git a/meta/lib/oe/sstatesig.py b/meta/lib/oe/sstatesig.py new file mode 100644 index 0000000000..852fb7e64a --- /dev/null +++ b/meta/lib/oe/sstatesig.py | |||
@@ -0,0 +1,161 @@ | |||
1 | import bb.siggen | ||
2 | |||
3 | def sstate_rundepfilter(siggen, fn, recipename, task, dep, depname, dataCache): | ||
4 | # Return True if we should keep the dependency, False to drop it | ||
5 | def isNative(x): | ||
6 | return x.endswith("-native") | ||
7 | def isCross(x): | ||
8 | return x.endswith("-cross") or x.endswith("-cross-initial") or x.endswith("-cross-intermediate") | ||
9 | def isNativeSDK(x): | ||
10 | return x.startswith("nativesdk-") | ||
11 | def isKernel(fn): | ||
12 | inherits = " ".join(dataCache.inherits[fn]) | ||
13 | return inherits.find("module-base.bbclass") != -1 or inherits.find("linux-kernel-base.bbclass") != -1 | ||
14 | |||
15 | # Always include our own inter-task dependencies | ||
16 | if recipename == depname: | ||
17 | return True | ||
18 | |||
19 | # Quilt (patch application) changing isn't likely to affect anything | ||
20 | excludelist = ['quilt-native', 'subversion-native', 'git-native'] | ||
21 | if depname in excludelist and recipename != depname: | ||
22 | return False | ||
23 | |||
24 | # Don't change native/cross/nativesdk recipe dependencies any further | ||
25 | if isNative(recipename) or isCross(recipename) or isNativeSDK(recipename): | ||
26 | return True | ||
27 | |||
28 | # Only target packages beyond here | ||
29 | |||
30 | # Drop native/cross/nativesdk dependencies from target recipes | ||
31 | if isNative(depname) or isCross(depname) or isNativeSDK(depname): | ||
32 | return False | ||
33 | |||
34 | # Exclude well defined machine specific configurations which don't change ABI | ||
35 | if depname in siggen.abisaferecipes: | ||
36 | return False | ||
37 | |||
38 | # Exclude well defined recipe->dependency | ||
39 | if "%s->%s" % (recipename, depname) in siggen.saferecipedeps: | ||
40 | return False | ||
41 | |||
42 | # Kernel modules are well namespaced. We don't want to depend on the kernel's checksum | ||
43 | # if we're just doing an RRECOMMENDS_xxx = "kernel-module-*", not least because the checksum | ||
44 | # is machine specific. | ||
45 | # Therefore if we're not a kernel or a module recipe (inheriting the kernel classes) | ||
46 | # and we reccomend a kernel-module, we exclude the dependency. | ||
47 | depfn = dep.rsplit(".", 1)[0] | ||
48 | if dataCache and isKernel(depfn) and not isKernel(fn): | ||
49 | for pkg in dataCache.runrecs[fn]: | ||
50 | if " ".join(dataCache.runrecs[fn][pkg]).find("kernel-module-") != -1: | ||
51 | return False | ||
52 | |||
53 | # Default to keep dependencies | ||
54 | return True | ||
55 | |||
56 | class SignatureGeneratorOEBasic(bb.siggen.SignatureGeneratorBasic): | ||
57 | name = "OEBasic" | ||
58 | def init_rundepcheck(self, data): | ||
59 | self.abisaferecipes = (data.getVar("SIGGEN_EXCLUDERECIPES_ABISAFE", True) or "").split() | ||
60 | self.saferecipedeps = (data.getVar("SIGGEN_EXCLUDE_SAFE_RECIPE_DEPS", True) or "").split() | ||
61 | pass | ||
62 | def rundep_check(self, fn, recipename, task, dep, depname, dataCache = None): | ||
63 | return sstate_rundepfilter(self, fn, recipename, task, dep, depname, dataCache) | ||
64 | |||
65 | class SignatureGeneratorOEBasicHash(bb.siggen.SignatureGeneratorBasicHash): | ||
66 | name = "OEBasicHash" | ||
67 | def init_rundepcheck(self, data): | ||
68 | self.abisaferecipes = (data.getVar("SIGGEN_EXCLUDERECIPES_ABISAFE", True) or "").split() | ||
69 | self.saferecipedeps = (data.getVar("SIGGEN_EXCLUDE_SAFE_RECIPE_DEPS", True) or "").split() | ||
70 | pass | ||
71 | def rundep_check(self, fn, recipename, task, dep, depname, dataCache = None): | ||
72 | return sstate_rundepfilter(self, fn, recipename, task, dep, depname, dataCache) | ||
73 | |||
74 | # Insert these classes into siggen's namespace so it can see and select them | ||
75 | bb.siggen.SignatureGeneratorOEBasic = SignatureGeneratorOEBasic | ||
76 | bb.siggen.SignatureGeneratorOEBasicHash = SignatureGeneratorOEBasicHash | ||
77 | |||
78 | |||
79 | def find_siginfo(pn, taskname, taskhashlist, d): | ||
80 | """ Find signature data files for comparison purposes """ | ||
81 | |||
82 | import fnmatch | ||
83 | import glob | ||
84 | |||
85 | if taskhashlist: | ||
86 | hashfiles = {} | ||
87 | |||
88 | if not taskname: | ||
89 | # We have to derive pn and taskname | ||
90 | key = pn | ||
91 | splitit = key.split('.bb.') | ||
92 | taskname = splitit[1] | ||
93 | pn = os.path.basename(splitit[0]).split('_')[0] | ||
94 | if key.startswith('virtual:native:'): | ||
95 | pn = pn + '-native' | ||
96 | |||
97 | filedates = {} | ||
98 | |||
99 | # First search in stamps dir | ||
100 | localdata = d.createCopy() | ||
101 | localdata.setVar('MULTIMACH_TARGET_SYS', '*') | ||
102 | localdata.setVar('PN', pn) | ||
103 | localdata.setVar('PV', '*') | ||
104 | localdata.setVar('PR', '*') | ||
105 | localdata.setVar('EXTENDPE', '') | ||
106 | stamp = localdata.getVar('STAMP', True) | ||
107 | filespec = '%s.%s.sigdata.*' % (stamp, taskname) | ||
108 | foundall = False | ||
109 | import glob | ||
110 | for fullpath in glob.glob(filespec): | ||
111 | match = False | ||
112 | if taskhashlist: | ||
113 | for taskhash in taskhashlist: | ||
114 | if fullpath.endswith('.%s' % taskhash): | ||
115 | hashfiles[taskhash] = fullpath | ||
116 | if len(hashfiles) == len(taskhashlist): | ||
117 | foundall = True | ||
118 | break | ||
119 | else: | ||
120 | filedates[fullpath] = os.stat(fullpath).st_mtime | ||
121 | |||
122 | if len(filedates) < 2 and not foundall: | ||
123 | # That didn't work, look in sstate-cache | ||
124 | hashes = taskhashlist or ['*'] | ||
125 | localdata = bb.data.createCopy(d) | ||
126 | for hashval in hashes: | ||
127 | localdata.setVar('PACKAGE_ARCH', '*') | ||
128 | localdata.setVar('TARGET_VENDOR', '*') | ||
129 | localdata.setVar('TARGET_OS', '*') | ||
130 | localdata.setVar('PN', pn) | ||
131 | localdata.setVar('PV', '*') | ||
132 | localdata.setVar('PR', '*') | ||
133 | localdata.setVar('BB_TASKHASH', hashval) | ||
134 | if pn.endswith('-native') or pn.endswith('-crosssdk') or pn.endswith('-cross'): | ||
135 | localdata.setVar('SSTATE_EXTRAPATH', "${NATIVELSBSTRING}/") | ||
136 | sstatename = d.getVarFlag(taskname, "sstate-name") | ||
137 | if not sstatename: | ||
138 | sstatename = taskname | ||
139 | filespec = '%s_%s.*.siginfo' % (localdata.getVar('SSTATE_PKG', True), sstatename) | ||
140 | |||
141 | if hashval != '*': | ||
142 | sstatedir = "%s/%s" % (d.getVar('SSTATE_DIR', True), hashval[:2]) | ||
143 | else: | ||
144 | sstatedir = d.getVar('SSTATE_DIR', True) | ||
145 | |||
146 | filedates = {} | ||
147 | for root, dirs, files in os.walk(sstatedir): | ||
148 | for fn in files: | ||
149 | fullpath = os.path.join(root, fn) | ||
150 | if fnmatch.fnmatch(fullpath, filespec): | ||
151 | if taskhashlist: | ||
152 | hashfiles[hashval] = fullpath | ||
153 | else: | ||
154 | filedates[fullpath] = os.stat(fullpath).st_mtime | ||
155 | |||
156 | if taskhashlist: | ||
157 | return hashfiles | ||
158 | else: | ||
159 | return filedates | ||
160 | |||
161 | bb.siggen.find_siginfo = find_siginfo | ||
diff --git a/meta/lib/oe/terminal.py b/meta/lib/oe/terminal.py new file mode 100644 index 0000000000..be2a26bedd --- /dev/null +++ b/meta/lib/oe/terminal.py | |||
@@ -0,0 +1,218 @@ | |||
1 | import logging | ||
2 | import oe.classutils | ||
3 | import shlex | ||
4 | from bb.process import Popen, ExecutionError | ||
5 | |||
6 | logger = logging.getLogger('BitBake.OE.Terminal') | ||
7 | |||
8 | |||
9 | class UnsupportedTerminal(Exception): | ||
10 | pass | ||
11 | |||
12 | class NoSupportedTerminals(Exception): | ||
13 | pass | ||
14 | |||
15 | |||
16 | class Registry(oe.classutils.ClassRegistry): | ||
17 | command = None | ||
18 | |||
19 | def __init__(cls, name, bases, attrs): | ||
20 | super(Registry, cls).__init__(name.lower(), bases, attrs) | ||
21 | |||
22 | @property | ||
23 | def implemented(cls): | ||
24 | return bool(cls.command) | ||
25 | |||
26 | |||
27 | class Terminal(Popen): | ||
28 | __metaclass__ = Registry | ||
29 | |||
30 | def __init__(self, sh_cmd, title=None, env=None, d=None): | ||
31 | fmt_sh_cmd = self.format_command(sh_cmd, title) | ||
32 | try: | ||
33 | Popen.__init__(self, fmt_sh_cmd, env=env) | ||
34 | except OSError as exc: | ||
35 | import errno | ||
36 | if exc.errno == errno.ENOENT: | ||
37 | raise UnsupportedTerminal(self.name) | ||
38 | else: | ||
39 | raise | ||
40 | |||
41 | def format_command(self, sh_cmd, title): | ||
42 | fmt = {'title': title or 'Terminal', 'command': sh_cmd} | ||
43 | if isinstance(self.command, basestring): | ||
44 | return shlex.split(self.command.format(**fmt)) | ||
45 | else: | ||
46 | return [element.format(**fmt) for element in self.command] | ||
47 | |||
48 | class XTerminal(Terminal): | ||
49 | def __init__(self, sh_cmd, title=None, env=None, d=None): | ||
50 | Terminal.__init__(self, sh_cmd, title, env, d) | ||
51 | if not os.environ.get('DISPLAY'): | ||
52 | raise UnsupportedTerminal(self.name) | ||
53 | |||
54 | class Gnome(XTerminal): | ||
55 | command = 'gnome-terminal --disable-factory -t "{title}" -x {command}' | ||
56 | priority = 2 | ||
57 | |||
58 | class Mate(XTerminal): | ||
59 | command = 'mate-terminal --disable-factory -t "{title}" -x {command}' | ||
60 | priority = 2 | ||
61 | |||
62 | class Xfce(XTerminal): | ||
63 | command = 'Terminal -T "{title}" -e "{command}"' | ||
64 | priority = 2 | ||
65 | |||
66 | def __init__(self, command, title=None, env=None, d=None): | ||
67 | # Upstream binary name is Terminal but Debian/Ubuntu use | ||
68 | # xfce4-terminal to avoid possible(?) conflicts | ||
69 | distro = distro_name() | ||
70 | if distro == 'ubuntu' or distro == 'debian': | ||
71 | cmd = 'xfce4-terminal -T "{title}" -e "{command}"' | ||
72 | else: | ||
73 | cmd = command | ||
74 | XTerminal.__init__(self, cmd, title, env, d) | ||
75 | |||
76 | class Konsole(XTerminal): | ||
77 | command = 'konsole -T "{title}" -e {command}' | ||
78 | priority = 2 | ||
79 | |||
80 | def __init__(self, sh_cmd, title=None, env=None, d=None): | ||
81 | # Check version | ||
82 | vernum = check_konsole_version("konsole") | ||
83 | if vernum: | ||
84 | if vernum.split('.')[0] == "2": | ||
85 | logger.debug(1, 'Konsole from KDE 4.x will not work as devshell, skipping') | ||
86 | raise UnsupportedTerminal(self.name) | ||
87 | XTerminal.__init__(self, sh_cmd, title, env, d) | ||
88 | |||
89 | class XTerm(XTerminal): | ||
90 | command = 'xterm -T "{title}" -e {command}' | ||
91 | priority = 1 | ||
92 | |||
93 | class Rxvt(XTerminal): | ||
94 | command = 'rxvt -T "{title}" -e {command}' | ||
95 | priority = 1 | ||
96 | |||
97 | class Screen(Terminal): | ||
98 | command = 'screen -D -m -t "{title}" -S devshell {command}' | ||
99 | |||
100 | def __init__(self, sh_cmd, title=None, env=None, d=None): | ||
101 | s_id = "devshell_%i" % os.getpid() | ||
102 | self.command = "screen -D -m -t \"{title}\" -S %s {command}" % s_id | ||
103 | Terminal.__init__(self, sh_cmd, title, env, d) | ||
104 | msg = 'Screen started. Please connect in another terminal with ' \ | ||
105 | '"screen -r %s"' % s_id | ||
106 | if (d): | ||
107 | bb.event.fire(bb.event.LogExecTTY(msg, "screen -r %s" % s_id, | ||
108 | 0.5, 10), d) | ||
109 | else: | ||
110 | logger.warn(msg) | ||
111 | |||
112 | class TmuxRunning(Terminal): | ||
113 | """Open a new pane in the current running tmux window""" | ||
114 | name = 'tmux-running' | ||
115 | command = 'tmux split-window "{command}"' | ||
116 | priority = 2.75 | ||
117 | |||
118 | def __init__(self, sh_cmd, title=None, env=None, d=None): | ||
119 | if not bb.utils.which(os.getenv('PATH'), 'tmux'): | ||
120 | raise UnsupportedTerminal('tmux is not installed') | ||
121 | |||
122 | if not os.getenv('TMUX'): | ||
123 | raise UnsupportedTerminal('tmux is not running') | ||
124 | |||
125 | Terminal.__init__(self, sh_cmd, title, env, d) | ||
126 | |||
127 | class Tmux(Terminal): | ||
128 | """Start a new tmux session and window""" | ||
129 | command = 'tmux new -d -s devshell -n devshell "{command}"' | ||
130 | priority = 0.75 | ||
131 | |||
132 | def __init__(self, sh_cmd, title=None, env=None, d=None): | ||
133 | if not bb.utils.which(os.getenv('PATH'), 'tmux'): | ||
134 | raise UnsupportedTerminal('tmux is not installed') | ||
135 | |||
136 | # TODO: consider using a 'devshell' session shared amongst all | ||
137 | # devshells, if it's already there, add a new window to it. | ||
138 | window_name = 'devshell-%i' % os.getpid() | ||
139 | |||
140 | self.command = 'tmux new -d -s {0} -n {0} "{{command}}"'.format(window_name) | ||
141 | Terminal.__init__(self, sh_cmd, title, env, d) | ||
142 | |||
143 | attach_cmd = 'tmux att -t {0}'.format(window_name) | ||
144 | msg = 'Tmux started. Please connect in another terminal with `tmux att -t {0}`'.format(window_name) | ||
145 | if d: | ||
146 | bb.event.fire(bb.event.LogExecTTY(msg, attach_cmd, 0.5, 10), d) | ||
147 | else: | ||
148 | logger.warn(msg) | ||
149 | |||
150 | class Custom(Terminal): | ||
151 | command = 'false' # This is a placeholder | ||
152 | priority = 3 | ||
153 | |||
154 | def __init__(self, sh_cmd, title=None, env=None, d=None): | ||
155 | self.command = d and d.getVar('OE_TERMINAL_CUSTOMCMD', True) | ||
156 | if self.command: | ||
157 | if not '{command}' in self.command: | ||
158 | self.command += ' {command}' | ||
159 | Terminal.__init__(self, sh_cmd, title, env, d) | ||
160 | logger.warn('Custom terminal was started.') | ||
161 | else: | ||
162 | logger.debug(1, 'No custom terminal (OE_TERMINAL_CUSTOMCMD) set') | ||
163 | raise UnsupportedTerminal('OE_TERMINAL_CUSTOMCMD not set') | ||
164 | |||
165 | |||
166 | def prioritized(): | ||
167 | return Registry.prioritized() | ||
168 | |||
169 | def spawn_preferred(sh_cmd, title=None, env=None, d=None): | ||
170 | """Spawn the first supported terminal, by priority""" | ||
171 | for terminal in prioritized(): | ||
172 | try: | ||
173 | spawn(terminal.name, sh_cmd, title, env, d) | ||
174 | break | ||
175 | except UnsupportedTerminal: | ||
176 | continue | ||
177 | else: | ||
178 | raise NoSupportedTerminals() | ||
179 | |||
180 | def spawn(name, sh_cmd, title=None, env=None, d=None): | ||
181 | """Spawn the specified terminal, by name""" | ||
182 | logger.debug(1, 'Attempting to spawn terminal "%s"', name) | ||
183 | try: | ||
184 | terminal = Registry.registry[name] | ||
185 | except KeyError: | ||
186 | raise UnsupportedTerminal(name) | ||
187 | |||
188 | pipe = terminal(sh_cmd, title, env, d) | ||
189 | output = pipe.communicate()[0] | ||
190 | if pipe.returncode != 0: | ||
191 | raise ExecutionError(sh_cmd, pipe.returncode, output) | ||
192 | |||
193 | def check_konsole_version(konsole): | ||
194 | import subprocess as sub | ||
195 | try: | ||
196 | p = sub.Popen(['sh', '-c', '%s --version' % konsole],stdout=sub.PIPE,stderr=sub.PIPE) | ||
197 | out, err = p.communicate() | ||
198 | ver_info = out.rstrip().split('\n') | ||
199 | except OSError as exc: | ||
200 | import errno | ||
201 | if exc.errno == errno.ENOENT: | ||
202 | return None | ||
203 | else: | ||
204 | raise | ||
205 | vernum = None | ||
206 | for ver in ver_info: | ||
207 | if ver.startswith('Konsole'): | ||
208 | vernum = ver.split(' ')[-1] | ||
209 | return vernum | ||
210 | |||
211 | def distro_name(): | ||
212 | try: | ||
213 | p = Popen(['lsb_release', '-i']) | ||
214 | out, err = p.communicate() | ||
215 | distro = out.split(':')[1].strip().lower() | ||
216 | except: | ||
217 | distro = "unknown" | ||
218 | return distro | ||
diff --git a/meta/lib/oe/tests/__init__.py b/meta/lib/oe/tests/__init__.py new file mode 100644 index 0000000000..e69de29bb2 --- /dev/null +++ b/meta/lib/oe/tests/__init__.py | |||
diff --git a/meta/lib/oe/tests/test_license.py b/meta/lib/oe/tests/test_license.py new file mode 100644 index 0000000000..c388886184 --- /dev/null +++ b/meta/lib/oe/tests/test_license.py | |||
@@ -0,0 +1,68 @@ | |||
1 | import unittest | ||
2 | import oe.license | ||
3 | |||
4 | class SeenVisitor(oe.license.LicenseVisitor): | ||
5 | def __init__(self): | ||
6 | self.seen = [] | ||
7 | oe.license.LicenseVisitor.__init__(self) | ||
8 | |||
9 | def visit_Str(self, node): | ||
10 | self.seen.append(node.s) | ||
11 | |||
12 | class TestSingleLicense(unittest.TestCase): | ||
13 | licenses = [ | ||
14 | "GPLv2", | ||
15 | "LGPL-2.0", | ||
16 | "Artistic", | ||
17 | "MIT", | ||
18 | "GPLv3+", | ||
19 | "FOO_BAR", | ||
20 | ] | ||
21 | invalid_licenses = ["GPL/BSD"] | ||
22 | |||
23 | @staticmethod | ||
24 | def parse(licensestr): | ||
25 | visitor = SeenVisitor() | ||
26 | visitor.visit_string(licensestr) | ||
27 | return visitor.seen | ||
28 | |||
29 | def test_single_licenses(self): | ||
30 | for license in self.licenses: | ||
31 | licenses = self.parse(license) | ||
32 | self.assertListEqual(licenses, [license]) | ||
33 | |||
34 | def test_invalid_licenses(self): | ||
35 | for license in self.invalid_licenses: | ||
36 | with self.assertRaises(oe.license.InvalidLicense) as cm: | ||
37 | self.parse(license) | ||
38 | self.assertEqual(cm.exception.license, license) | ||
39 | |||
40 | class TestSimpleCombinations(unittest.TestCase): | ||
41 | tests = { | ||
42 | "FOO&BAR": ["FOO", "BAR"], | ||
43 | "BAZ & MOO": ["BAZ", "MOO"], | ||
44 | "ALPHA|BETA": ["ALPHA"], | ||
45 | "BAZ&MOO|FOO": ["FOO"], | ||
46 | "FOO&BAR|BAZ": ["FOO", "BAR"], | ||
47 | } | ||
48 | preferred = ["ALPHA", "FOO", "BAR"] | ||
49 | |||
50 | def test_tests(self): | ||
51 | def choose(a, b): | ||
52 | if all(lic in self.preferred for lic in b): | ||
53 | return b | ||
54 | else: | ||
55 | return a | ||
56 | |||
57 | for license, expected in self.tests.items(): | ||
58 | licenses = oe.license.flattened_licenses(license, choose) | ||
59 | self.assertListEqual(licenses, expected) | ||
60 | |||
61 | class TestComplexCombinations(TestSimpleCombinations): | ||
62 | tests = { | ||
63 | "FOO & (BAR | BAZ)&MOO": ["FOO", "BAR", "MOO"], | ||
64 | "(ALPHA|(BETA&THETA)|OMEGA)&DELTA": ["OMEGA", "DELTA"], | ||
65 | "((ALPHA|BETA)&FOO)|BAZ": ["BETA", "FOO"], | ||
66 | "(GPL-2.0|Proprietary)&BSD-4-clause&MIT": ["GPL-2.0", "BSD-4-clause", "MIT"], | ||
67 | } | ||
68 | preferred = ["BAR", "OMEGA", "BETA", "GPL-2.0"] | ||
diff --git a/meta/lib/oe/tests/test_path.py b/meta/lib/oe/tests/test_path.py new file mode 100644 index 0000000000..3d41ce157a --- /dev/null +++ b/meta/lib/oe/tests/test_path.py | |||
@@ -0,0 +1,89 @@ | |||
1 | import unittest | ||
2 | import oe, oe.path | ||
3 | import tempfile | ||
4 | import os | ||
5 | import errno | ||
6 | import shutil | ||
7 | |||
8 | class TestRealPath(unittest.TestCase): | ||
9 | DIRS = [ "a", "b", "etc", "sbin", "usr", "usr/bin", "usr/binX", "usr/sbin", "usr/include", "usr/include/gdbm" ] | ||
10 | FILES = [ "etc/passwd", "b/file" ] | ||
11 | LINKS = [ | ||
12 | ( "bin", "/usr/bin", "/usr/bin" ), | ||
13 | ( "binX", "usr/binX", "/usr/binX" ), | ||
14 | ( "c", "broken", "/broken" ), | ||
15 | ( "etc/passwd-1", "passwd", "/etc/passwd" ), | ||
16 | ( "etc/passwd-2", "passwd-1", "/etc/passwd" ), | ||
17 | ( "etc/passwd-3", "/etc/passwd-1", "/etc/passwd" ), | ||
18 | ( "etc/shadow-1", "/etc/shadow", "/etc/shadow" ), | ||
19 | ( "etc/shadow-2", "/etc/shadow-1", "/etc/shadow" ), | ||
20 | ( "prog-A", "bin/prog-A", "/usr/bin/prog-A" ), | ||
21 | ( "prog-B", "/bin/prog-B", "/usr/bin/prog-B" ), | ||
22 | ( "usr/bin/prog-C", "../../sbin/prog-C", "/sbin/prog-C" ), | ||
23 | ( "usr/bin/prog-D", "/sbin/prog-D", "/sbin/prog-D" ), | ||
24 | ( "usr/binX/prog-E", "../sbin/prog-E", None ), | ||
25 | ( "usr/bin/prog-F", "../../../sbin/prog-F", "/sbin/prog-F" ), | ||
26 | ( "loop", "a/loop", None ), | ||
27 | ( "a/loop", "../loop", None ), | ||
28 | ( "b/test", "file/foo", "/b/file/foo" ), | ||
29 | ] | ||
30 | |||
31 | LINKS_PHYS = [ | ||
32 | ( "./", "/", "" ), | ||
33 | ( "binX/prog-E", "/usr/sbin/prog-E", "/sbin/prog-E" ), | ||
34 | ] | ||
35 | |||
36 | EXCEPTIONS = [ | ||
37 | ( "loop", errno.ELOOP ), | ||
38 | ( "b/test", errno.ENOENT ), | ||
39 | ] | ||
40 | |||
41 | def __del__(self): | ||
42 | try: | ||
43 | #os.system("tree -F %s" % self.tmpdir) | ||
44 | shutil.rmtree(self.tmpdir) | ||
45 | except: | ||
46 | pass | ||
47 | |||
48 | def setUp(self): | ||
49 | self.tmpdir = tempfile.mkdtemp(prefix = "oe-test_path") | ||
50 | self.root = os.path.join(self.tmpdir, "R") | ||
51 | |||
52 | os.mkdir(os.path.join(self.tmpdir, "_real")) | ||
53 | os.symlink("_real", self.root) | ||
54 | |||
55 | for d in self.DIRS: | ||
56 | os.mkdir(os.path.join(self.root, d)) | ||
57 | for f in self.FILES: | ||
58 | file(os.path.join(self.root, f), "w") | ||
59 | for l in self.LINKS: | ||
60 | os.symlink(l[1], os.path.join(self.root, l[0])) | ||
61 | |||
62 | def __realpath(self, file, use_physdir, assume_dir = True): | ||
63 | return oe.path.realpath(os.path.join(self.root, file), self.root, | ||
64 | use_physdir, assume_dir = assume_dir) | ||
65 | |||
66 | def test_norm(self): | ||
67 | for l in self.LINKS: | ||
68 | if l[2] == None: | ||
69 | continue | ||
70 | |||
71 | target_p = self.__realpath(l[0], True) | ||
72 | target_l = self.__realpath(l[0], False) | ||
73 | |||
74 | if l[2] != False: | ||
75 | self.assertEqual(target_p, target_l) | ||
76 | self.assertEqual(l[2], target_p[len(self.root):]) | ||
77 | |||
78 | def test_phys(self): | ||
79 | for l in self.LINKS_PHYS: | ||
80 | target_p = self.__realpath(l[0], True) | ||
81 | target_l = self.__realpath(l[0], False) | ||
82 | |||
83 | self.assertEqual(l[1], target_p[len(self.root):]) | ||
84 | self.assertEqual(l[2], target_l[len(self.root):]) | ||
85 | |||
86 | def test_loop(self): | ||
87 | for e in self.EXCEPTIONS: | ||
88 | self.assertRaisesRegexp(OSError, r'\[Errno %u\]' % e[1], | ||
89 | self.__realpath, e[0], False, False) | ||
diff --git a/meta/lib/oe/tests/test_types.py b/meta/lib/oe/tests/test_types.py new file mode 100644 index 0000000000..367cc30e45 --- /dev/null +++ b/meta/lib/oe/tests/test_types.py | |||
@@ -0,0 +1,62 @@ | |||
1 | import unittest | ||
2 | from oe.maketype import create, factory | ||
3 | |||
4 | class TestTypes(unittest.TestCase): | ||
5 | def assertIsInstance(self, obj, cls): | ||
6 | return self.assertTrue(isinstance(obj, cls)) | ||
7 | |||
8 | def assertIsNot(self, obj, other): | ||
9 | return self.assertFalse(obj is other) | ||
10 | |||
11 | def assertFactoryCreated(self, value, type, **flags): | ||
12 | cls = factory(type) | ||
13 | self.assertIsNot(cls, None) | ||
14 | self.assertIsInstance(create(value, type, **flags), cls) | ||
15 | |||
16 | class TestBooleanType(TestTypes): | ||
17 | def test_invalid(self): | ||
18 | self.assertRaises(ValueError, create, '', 'boolean') | ||
19 | self.assertRaises(ValueError, create, 'foo', 'boolean') | ||
20 | self.assertRaises(TypeError, create, object(), 'boolean') | ||
21 | |||
22 | def test_true(self): | ||
23 | self.assertTrue(create('y', 'boolean')) | ||
24 | self.assertTrue(create('yes', 'boolean')) | ||
25 | self.assertTrue(create('1', 'boolean')) | ||
26 | self.assertTrue(create('t', 'boolean')) | ||
27 | self.assertTrue(create('true', 'boolean')) | ||
28 | self.assertTrue(create('TRUE', 'boolean')) | ||
29 | self.assertTrue(create('truE', 'boolean')) | ||
30 | |||
31 | def test_false(self): | ||
32 | self.assertFalse(create('n', 'boolean')) | ||
33 | self.assertFalse(create('no', 'boolean')) | ||
34 | self.assertFalse(create('0', 'boolean')) | ||
35 | self.assertFalse(create('f', 'boolean')) | ||
36 | self.assertFalse(create('false', 'boolean')) | ||
37 | self.assertFalse(create('FALSE', 'boolean')) | ||
38 | self.assertFalse(create('faLse', 'boolean')) | ||
39 | |||
40 | def test_bool_equality(self): | ||
41 | self.assertEqual(create('n', 'boolean'), False) | ||
42 | self.assertNotEqual(create('n', 'boolean'), True) | ||
43 | self.assertEqual(create('y', 'boolean'), True) | ||
44 | self.assertNotEqual(create('y', 'boolean'), False) | ||
45 | |||
46 | class TestList(TestTypes): | ||
47 | def assertListEqual(self, value, valid, sep=None): | ||
48 | obj = create(value, 'list', separator=sep) | ||
49 | self.assertEqual(obj, valid) | ||
50 | if sep is not None: | ||
51 | self.assertEqual(obj.separator, sep) | ||
52 | self.assertEqual(str(obj), obj.separator.join(obj)) | ||
53 | |||
54 | def test_list_nosep(self): | ||
55 | testlist = ['alpha', 'beta', 'theta'] | ||
56 | self.assertListEqual('alpha beta theta', testlist) | ||
57 | self.assertListEqual('alpha beta\ttheta', testlist) | ||
58 | self.assertListEqual('alpha', ['alpha']) | ||
59 | |||
60 | def test_list_usersep(self): | ||
61 | self.assertListEqual('foo:bar', ['foo', 'bar'], ':') | ||
62 | self.assertListEqual('foo:bar:baz', ['foo', 'bar', 'baz'], ':') | ||
diff --git a/meta/lib/oe/tests/test_utils.py b/meta/lib/oe/tests/test_utils.py new file mode 100644 index 0000000000..5d9ac52e7d --- /dev/null +++ b/meta/lib/oe/tests/test_utils.py | |||
@@ -0,0 +1,51 @@ | |||
1 | import unittest | ||
2 | from oe.utils import packages_filter_out_system | ||
3 | |||
4 | class TestPackagesFilterOutSystem(unittest.TestCase): | ||
5 | def test_filter(self): | ||
6 | """ | ||
7 | Test that oe.utils.packages_filter_out_system works. | ||
8 | """ | ||
9 | try: | ||
10 | import bb | ||
11 | except ImportError: | ||
12 | self.skipTest("Cannot import bb") | ||
13 | |||
14 | d = bb.data_smart.DataSmart() | ||
15 | d.setVar("PN", "foo") | ||
16 | |||
17 | d.setVar("PACKAGES", "foo foo-doc foo-dev") | ||
18 | pkgs = packages_filter_out_system(d) | ||
19 | self.assertEqual(pkgs, []) | ||
20 | |||
21 | d.setVar("PACKAGES", "foo foo-doc foo-data foo-dev") | ||
22 | pkgs = packages_filter_out_system(d) | ||
23 | self.assertEqual(pkgs, ["foo-data"]) | ||
24 | |||
25 | d.setVar("PACKAGES", "foo foo-locale-en-gb") | ||
26 | pkgs = packages_filter_out_system(d) | ||
27 | self.assertEqual(pkgs, []) | ||
28 | |||
29 | d.setVar("PACKAGES", "foo foo-data foo-locale-en-gb") | ||
30 | pkgs = packages_filter_out_system(d) | ||
31 | self.assertEqual(pkgs, ["foo-data"]) | ||
32 | |||
33 | |||
34 | class TestTrimVersion(unittest.TestCase): | ||
35 | def test_version_exception(self): | ||
36 | with self.assertRaises(TypeError): | ||
37 | trim_version(None, 2) | ||
38 | with self.assertRaises(TypeError): | ||
39 | trim_version((1, 2, 3), 2) | ||
40 | |||
41 | def test_num_exception(self): | ||
42 | with self.assertRaises(ValueError): | ||
43 | trim_version("1.2.3", 0) | ||
44 | with self.assertRaises(ValueError): | ||
45 | trim_version("1.2.3", -1) | ||
46 | |||
47 | def test_valid(self): | ||
48 | self.assertEqual(trim_version("1.2.3", 1), "1") | ||
49 | self.assertEqual(trim_version("1.2.3", 2), "1.2") | ||
50 | self.assertEqual(trim_version("1.2.3", 3), "1.2.3") | ||
51 | self.assertEqual(trim_version("1.2.3", 4), "1.2.3") | ||
diff --git a/meta/lib/oe/types.py b/meta/lib/oe/types.py new file mode 100644 index 0000000000..7f47c17d0e --- /dev/null +++ b/meta/lib/oe/types.py | |||
@@ -0,0 +1,153 @@ | |||
1 | import errno | ||
2 | import re | ||
3 | import os | ||
4 | |||
5 | |||
6 | class OEList(list): | ||
7 | """OpenEmbedded 'list' type | ||
8 | |||
9 | Acts as an ordinary list, but is constructed from a string value and a | ||
10 | separator (optional), and re-joins itself when converted to a string with | ||
11 | str(). Set the variable type flag to 'list' to use this type, and the | ||
12 | 'separator' flag may be specified (defaulting to whitespace).""" | ||
13 | |||
14 | name = "list" | ||
15 | |||
16 | def __init__(self, value, separator = None): | ||
17 | if value is not None: | ||
18 | list.__init__(self, value.split(separator)) | ||
19 | else: | ||
20 | list.__init__(self) | ||
21 | |||
22 | if separator is None: | ||
23 | self.separator = " " | ||
24 | else: | ||
25 | self.separator = separator | ||
26 | |||
27 | def __str__(self): | ||
28 | return self.separator.join(self) | ||
29 | |||
30 | def choice(value, choices): | ||
31 | """OpenEmbedded 'choice' type | ||
32 | |||
33 | Acts as a multiple choice for the user. To use this, set the variable | ||
34 | type flag to 'choice', and set the 'choices' flag to a space separated | ||
35 | list of valid values.""" | ||
36 | if not isinstance(value, basestring): | ||
37 | raise TypeError("choice accepts a string, not '%s'" % type(value)) | ||
38 | |||
39 | value = value.lower() | ||
40 | choices = choices.lower() | ||
41 | if value not in choices.split(): | ||
42 | raise ValueError("Invalid choice '%s'. Valid choices: %s" % | ||
43 | (value, choices)) | ||
44 | return value | ||
45 | |||
46 | class NoMatch(object): | ||
47 | """Stub python regex pattern object which never matches anything""" | ||
48 | def findall(self, string, flags=0): | ||
49 | return None | ||
50 | |||
51 | def finditer(self, string, flags=0): | ||
52 | return None | ||
53 | |||
54 | def match(self, flags=0): | ||
55 | return None | ||
56 | |||
57 | def search(self, string, flags=0): | ||
58 | return None | ||
59 | |||
60 | def split(self, string, maxsplit=0): | ||
61 | return None | ||
62 | |||
63 | def sub(pattern, repl, string, count=0): | ||
64 | return None | ||
65 | |||
66 | def subn(pattern, repl, string, count=0): | ||
67 | return None | ||
68 | |||
69 | NoMatch = NoMatch() | ||
70 | |||
71 | def regex(value, regexflags=None): | ||
72 | """OpenEmbedded 'regex' type | ||
73 | |||
74 | Acts as a regular expression, returning the pre-compiled regular | ||
75 | expression pattern object. To use this type, set the variable type flag | ||
76 | to 'regex', and optionally, set the 'regexflags' type to a space separated | ||
77 | list of the flags to control the regular expression matching (e.g. | ||
78 | FOO[regexflags] += 'ignorecase'). See the python documentation on the | ||
79 | 're' module for a list of valid flags.""" | ||
80 | |||
81 | flagval = 0 | ||
82 | if regexflags: | ||
83 | for flag in regexflags.split(): | ||
84 | flag = flag.upper() | ||
85 | try: | ||
86 | flagval |= getattr(re, flag) | ||
87 | except AttributeError: | ||
88 | raise ValueError("Invalid regex flag '%s'" % flag) | ||
89 | |||
90 | if not value: | ||
91 | # Let's ensure that the default behavior for an undefined or empty | ||
92 | # variable is to match nothing. If the user explicitly wants to match | ||
93 | # anything, they can match '.*' instead. | ||
94 | return NoMatch | ||
95 | |||
96 | try: | ||
97 | return re.compile(value, flagval) | ||
98 | except re.error as exc: | ||
99 | raise ValueError("Invalid regex value '%s': %s" % | ||
100 | (value, exc.args[0])) | ||
101 | |||
102 | def boolean(value): | ||
103 | """OpenEmbedded 'boolean' type | ||
104 | |||
105 | Valid values for true: 'yes', 'y', 'true', 't', '1' | ||
106 | Valid values for false: 'no', 'n', 'false', 'f', '0' | ||
107 | """ | ||
108 | |||
109 | if not isinstance(value, basestring): | ||
110 | raise TypeError("boolean accepts a string, not '%s'" % type(value)) | ||
111 | |||
112 | value = value.lower() | ||
113 | if value in ('yes', 'y', 'true', 't', '1'): | ||
114 | return True | ||
115 | elif value in ('no', 'n', 'false', 'f', '0'): | ||
116 | return False | ||
117 | raise ValueError("Invalid boolean value '%s'" % value) | ||
118 | |||
119 | def integer(value, numberbase=10): | ||
120 | """OpenEmbedded 'integer' type | ||
121 | |||
122 | Defaults to base 10, but this can be specified using the optional | ||
123 | 'numberbase' flag.""" | ||
124 | |||
125 | return int(value, int(numberbase)) | ||
126 | |||
127 | _float = float | ||
128 | def float(value, fromhex='false'): | ||
129 | """OpenEmbedded floating point type | ||
130 | |||
131 | To use this type, set the type flag to 'float', and optionally set the | ||
132 | 'fromhex' flag to a true value (obeying the same rules as for the | ||
133 | 'boolean' type) if the value is in base 16 rather than base 10.""" | ||
134 | |||
135 | if boolean(fromhex): | ||
136 | return _float.fromhex(value) | ||
137 | else: | ||
138 | return _float(value) | ||
139 | |||
140 | def path(value, relativeto='', normalize='true', mustexist='false'): | ||
141 | value = os.path.join(relativeto, value) | ||
142 | |||
143 | if boolean(normalize): | ||
144 | value = os.path.normpath(value) | ||
145 | |||
146 | if boolean(mustexist): | ||
147 | try: | ||
148 | open(value, 'r') | ||
149 | except IOError as exc: | ||
150 | if exc.errno == errno.ENOENT: | ||
151 | raise ValueError("{0}: {1}".format(value, os.strerror(errno.ENOENT))) | ||
152 | |||
153 | return value | ||
diff --git a/meta/lib/oe/utils.py b/meta/lib/oe/utils.py new file mode 100644 index 0000000000..82987e80d0 --- /dev/null +++ b/meta/lib/oe/utils.py | |||
@@ -0,0 +1,152 @@ | |||
1 | try: | ||
2 | # Python 2 | ||
3 | import commands as cmdstatus | ||
4 | except ImportError: | ||
5 | # Python 3 | ||
6 | import subprocess as cmdstatus | ||
7 | |||
8 | def read_file(filename): | ||
9 | try: | ||
10 | f = open( filename, "r" ) | ||
11 | except IOError as reason: | ||
12 | return "" # WARNING: can't raise an error now because of the new RDEPENDS handling. This is a bit ugly. :M: | ||
13 | else: | ||
14 | data = f.read().strip() | ||
15 | f.close() | ||
16 | return data | ||
17 | return None | ||
18 | |||
19 | def ifelse(condition, iftrue = True, iffalse = False): | ||
20 | if condition: | ||
21 | return iftrue | ||
22 | else: | ||
23 | return iffalse | ||
24 | |||
25 | def conditional(variable, checkvalue, truevalue, falsevalue, d): | ||
26 | if d.getVar(variable,1) == checkvalue: | ||
27 | return truevalue | ||
28 | else: | ||
29 | return falsevalue | ||
30 | |||
31 | def less_or_equal(variable, checkvalue, truevalue, falsevalue, d): | ||
32 | if float(d.getVar(variable,1)) <= float(checkvalue): | ||
33 | return truevalue | ||
34 | else: | ||
35 | return falsevalue | ||
36 | |||
37 | def version_less_or_equal(variable, checkvalue, truevalue, falsevalue, d): | ||
38 | result = bb.utils.vercmp_string(d.getVar(variable,True), checkvalue) | ||
39 | if result <= 0: | ||
40 | return truevalue | ||
41 | else: | ||
42 | return falsevalue | ||
43 | |||
44 | def contains(variable, checkvalues, truevalue, falsevalue, d): | ||
45 | val = d.getVar(variable, True) | ||
46 | if not val: | ||
47 | return falsevalue | ||
48 | val = set(val.split()) | ||
49 | if isinstance(checkvalues, basestring): | ||
50 | checkvalues = set(checkvalues.split()) | ||
51 | else: | ||
52 | checkvalues = set(checkvalues) | ||
53 | if checkvalues.issubset(val): | ||
54 | return truevalue | ||
55 | return falsevalue | ||
56 | |||
57 | def both_contain(variable1, variable2, checkvalue, d): | ||
58 | if d.getVar(variable1,1).find(checkvalue) != -1 and d.getVar(variable2,1).find(checkvalue) != -1: | ||
59 | return checkvalue | ||
60 | else: | ||
61 | return "" | ||
62 | |||
63 | def prune_suffix(var, suffixes, d): | ||
64 | # See if var ends with any of the suffixes listed and | ||
65 | # remove it if found | ||
66 | for suffix in suffixes: | ||
67 | if var.endswith(suffix): | ||
68 | var = var.replace(suffix, "") | ||
69 | |||
70 | prefix = d.getVar("MLPREFIX", True) | ||
71 | if prefix and var.startswith(prefix): | ||
72 | var = var.replace(prefix, "") | ||
73 | |||
74 | return var | ||
75 | |||
76 | def str_filter(f, str, d): | ||
77 | from re import match | ||
78 | return " ".join(filter(lambda x: match(f, x, 0), str.split())) | ||
79 | |||
80 | def str_filter_out(f, str, d): | ||
81 | from re import match | ||
82 | return " ".join(filter(lambda x: not match(f, x, 0), str.split())) | ||
83 | |||
84 | def param_bool(cfg, field, dflt = None): | ||
85 | """Lookup <field> in <cfg> map and convert it to a boolean; take | ||
86 | <dflt> when this <field> does not exist""" | ||
87 | value = cfg.get(field, dflt) | ||
88 | strvalue = str(value).lower() | ||
89 | if strvalue in ('yes', 'y', 'true', 't', '1'): | ||
90 | return True | ||
91 | elif strvalue in ('no', 'n', 'false', 'f', '0'): | ||
92 | return False | ||
93 | raise ValueError("invalid value for boolean parameter '%s': '%s'" % (field, value)) | ||
94 | |||
95 | def inherits(d, *classes): | ||
96 | """Return True if the metadata inherits any of the specified classes""" | ||
97 | return any(bb.data.inherits_class(cls, d) for cls in classes) | ||
98 | |||
99 | def features_backfill(var,d): | ||
100 | # This construct allows the addition of new features to variable specified | ||
101 | # as var | ||
102 | # Example for var = "DISTRO_FEATURES" | ||
103 | # This construct allows the addition of new features to DISTRO_FEATURES | ||
104 | # that if not present would disable existing functionality, without | ||
105 | # disturbing distributions that have already set DISTRO_FEATURES. | ||
106 | # Distributions wanting to elide a value in DISTRO_FEATURES_BACKFILL should | ||
107 | # add the feature to DISTRO_FEATURES_BACKFILL_CONSIDERED | ||
108 | features = (d.getVar(var, True) or "").split() | ||
109 | backfill = (d.getVar(var+"_BACKFILL", True) or "").split() | ||
110 | considered = (d.getVar(var+"_BACKFILL_CONSIDERED", True) or "").split() | ||
111 | |||
112 | addfeatures = [] | ||
113 | for feature in backfill: | ||
114 | if feature not in features and feature not in considered: | ||
115 | addfeatures.append(feature) | ||
116 | |||
117 | if addfeatures: | ||
118 | d.appendVar(var, " " + " ".join(addfeatures)) | ||
119 | |||
120 | |||
121 | def packages_filter_out_system(d): | ||
122 | """ | ||
123 | Return a list of packages from PACKAGES with the "system" packages such as | ||
124 | PN-dbg PN-doc PN-locale-eb-gb removed. | ||
125 | """ | ||
126 | pn = d.getVar('PN', True) | ||
127 | blacklist = map(lambda suffix: pn + suffix, ('', '-dbg', '-dev', '-doc', '-locale', '-staticdev')) | ||
128 | localepkg = pn + "-locale-" | ||
129 | pkgs = [] | ||
130 | |||
131 | for pkg in d.getVar('PACKAGES', True).split(): | ||
132 | if pkg not in blacklist and localepkg not in pkg: | ||
133 | pkgs.append(pkg) | ||
134 | return pkgs | ||
135 | |||
136 | def getstatusoutput(cmd): | ||
137 | return cmdstatus.getstatusoutput(cmd) | ||
138 | |||
139 | |||
140 | def trim_version(version, num_parts=2): | ||
141 | """ | ||
142 | Return just the first <num_parts> of <version>, split by periods. For | ||
143 | example, trim_version("1.2.3", 2) will return "1.2". | ||
144 | """ | ||
145 | if type(version) is not str: | ||
146 | raise TypeError("Version should be a string") | ||
147 | if num_parts < 1: | ||
148 | raise ValueError("Cannot split to parts < 1") | ||
149 | |||
150 | parts = version.split(".") | ||
151 | trimmed = ".".join(parts[:num_parts]) | ||
152 | return trimmed | ||
diff --git a/meta/lib/oeqa/__init__.py b/meta/lib/oeqa/__init__.py new file mode 100644 index 0000000000..e69de29bb2 --- /dev/null +++ b/meta/lib/oeqa/__init__.py | |||
diff --git a/meta/lib/oeqa/oetest.py b/meta/lib/oeqa/oetest.py new file mode 100644 index 0000000000..529abdc19a --- /dev/null +++ b/meta/lib/oeqa/oetest.py | |||
@@ -0,0 +1,120 @@ | |||
1 | # Copyright (C) 2013 Intel Corporation | ||
2 | # | ||
3 | # Released under the MIT license (see COPYING.MIT) | ||
4 | |||
5 | # Main unittest module used by testimage.bbclass | ||
6 | # This provides the oeRuntimeTest base class which is inherited by all tests in meta/lib/oeqa/runtime. | ||
7 | |||
8 | # It also has some helper functions and it's responsible for actually starting the tests | ||
9 | |||
10 | import os, re, mmap | ||
11 | import unittest | ||
12 | import inspect | ||
13 | import bb | ||
14 | from oeqa.utils.sshcontrol import SSHControl | ||
15 | |||
16 | |||
17 | def runTests(tc): | ||
18 | |||
19 | # set the context object passed from the test class | ||
20 | setattr(oeRuntimeTest, "tc", tc) | ||
21 | # set ps command to use | ||
22 | setattr(oeRuntimeTest, "pscmd", "ps -ef" if oeRuntimeTest.hasPackage("procps") else "ps") | ||
23 | # prepare test suite, loader and runner | ||
24 | suite = unittest.TestSuite() | ||
25 | testloader = unittest.TestLoader() | ||
26 | testloader.sortTestMethodsUsing = None | ||
27 | runner = unittest.TextTestRunner(verbosity=2) | ||
28 | |||
29 | bb.note("Test modules %s" % tc.testslist) | ||
30 | suite = testloader.loadTestsFromNames(tc.testslist) | ||
31 | bb.note("Found %s tests" % suite.countTestCases()) | ||
32 | |||
33 | result = runner.run(suite) | ||
34 | |||
35 | return result | ||
36 | |||
37 | |||
38 | |||
39 | class oeRuntimeTest(unittest.TestCase): | ||
40 | |||
41 | longMessage = True | ||
42 | testFailures = [] | ||
43 | testSkipped = [] | ||
44 | testErrors = [] | ||
45 | |||
46 | def __init__(self, methodName='runTest'): | ||
47 | self.target = oeRuntimeTest.tc.target | ||
48 | super(oeRuntimeTest, self).__init__(methodName) | ||
49 | |||
50 | |||
51 | def run(self, result=None): | ||
52 | super(oeRuntimeTest, self).run(result) | ||
53 | |||
54 | # we add to our own lists the results, we use those for decorators | ||
55 | if len(result.failures) > len(oeRuntimeTest.testFailures): | ||
56 | oeRuntimeTest.testFailures.append(str(result.failures[-1][0]).split()[0]) | ||
57 | if len(result.skipped) > len(oeRuntimeTest.testSkipped): | ||
58 | oeRuntimeTest.testSkipped.append(str(result.skipped[-1][0]).split()[0]) | ||
59 | if len(result.errors) > len(oeRuntimeTest.testErrors): | ||
60 | oeRuntimeTest.testErrors.append(str(result.errors[-1][0]).split()[0]) | ||
61 | |||
62 | @classmethod | ||
63 | def hasPackage(self, pkg): | ||
64 | |||
65 | pkgfile = os.path.join(oeRuntimeTest.tc.d.getVar("WORKDIR", True), "installed_pkgs.txt") | ||
66 | |||
67 | with open(pkgfile) as f: | ||
68 | data = mmap.mmap(f.fileno(), 0, access=mmap.ACCESS_READ) | ||
69 | match = re.search(pkg, data) | ||
70 | data.close() | ||
71 | |||
72 | if match: | ||
73 | return True | ||
74 | |||
75 | return False | ||
76 | |||
77 | @classmethod | ||
78 | def hasFeature(self,feature): | ||
79 | |||
80 | if feature in oeRuntimeTest.tc.d.getVar("IMAGE_FEATURES", True).split() or \ | ||
81 | feature in oeRuntimeTest.tc.d.getVar("DISTRO_FEATURES", True).split(): | ||
82 | return True | ||
83 | else: | ||
84 | return False | ||
85 | |||
86 | @classmethod | ||
87 | def restartTarget(self,params=None): | ||
88 | |||
89 | if oeRuntimeTest.tc.qemu.restart(params): | ||
90 | oeRuntimeTest.tc.target.host = oeRuntimeTest.tc.qemu.ip | ||
91 | else: | ||
92 | raise Exception("Restarting target failed") | ||
93 | |||
94 | |||
95 | def getmodule(pos=2): | ||
96 | # stack returns a list of tuples containg frame information | ||
97 | # First element of the list the is current frame, caller is 1 | ||
98 | frameinfo = inspect.stack()[pos] | ||
99 | modname = inspect.getmodulename(frameinfo[1]) | ||
100 | #modname = inspect.getmodule(frameinfo[0]).__name__ | ||
101 | return modname | ||
102 | |||
103 | def skipModule(reason, pos=2): | ||
104 | modname = getmodule(pos) | ||
105 | if modname not in oeRuntimeTest.tc.testsrequired: | ||
106 | raise unittest.SkipTest("%s: %s" % (modname, reason)) | ||
107 | else: | ||
108 | raise Exception("\nTest %s wants to be skipped.\nReason is: %s" \ | ||
109 | "\nTest was required in TEST_SUITES, so either the condition for skipping is wrong" \ | ||
110 | "\nor the image really doesn't have the requred feature/package when it should." % (modname, reason)) | ||
111 | |||
112 | def skipModuleIf(cond, reason): | ||
113 | |||
114 | if cond: | ||
115 | skipModule(reason, 3) | ||
116 | |||
117 | def skipModuleUnless(cond, reason): | ||
118 | |||
119 | if not cond: | ||
120 | skipModule(reason, 3) | ||
diff --git a/meta/lib/oeqa/runtime/__init__.py b/meta/lib/oeqa/runtime/__init__.py new file mode 100644 index 0000000000..4cf3fa76b6 --- /dev/null +++ b/meta/lib/oeqa/runtime/__init__.py | |||
@@ -0,0 +1,3 @@ | |||
1 | # Enable other layers to have tests in the same named directory | ||
2 | from pkgutil import extend_path | ||
3 | __path__ = extend_path(__path__, __name__) | ||
diff --git a/meta/lib/oeqa/runtime/buildcvs.py b/meta/lib/oeqa/runtime/buildcvs.py new file mode 100644 index 0000000000..f024dfa99a --- /dev/null +++ b/meta/lib/oeqa/runtime/buildcvs.py | |||
@@ -0,0 +1,32 @@ | |||
1 | from oeqa.oetest import oeRuntimeTest | ||
2 | from oeqa.utils.decorators import * | ||
3 | from oeqa.utils.targetbuild import TargetBuildProject | ||
4 | |||
5 | def setUpModule(): | ||
6 | if not oeRuntimeTest.hasFeature("tools-sdk"): | ||
7 | skipModule("Image doesn't have tools-sdk in IMAGE_FEATURES") | ||
8 | |||
9 | class BuildCvsTest(oeRuntimeTest): | ||
10 | |||
11 | @classmethod | ||
12 | def setUpClass(self): | ||
13 | self.restartTarget("-m 512") | ||
14 | self.project = TargetBuildProject(oeRuntimeTest.tc.target, | ||
15 | "http://ftp.gnu.org/non-gnu/cvs/source/feature/1.12.13/cvs-1.12.13.tar.bz2") | ||
16 | self.project.download_archive() | ||
17 | |||
18 | @skipUnlessPassed("test_ssh") | ||
19 | def test_cvs(self): | ||
20 | self.assertEqual(self.project.run_configure(), 0, | ||
21 | msg="Running configure failed") | ||
22 | |||
23 | self.assertEqual(self.project.run_make(), 0, | ||
24 | msg="Running make failed") | ||
25 | |||
26 | self.assertEqual(self.project.run_install(), 0, | ||
27 | msg="Running make install failed") | ||
28 | |||
29 | @classmethod | ||
30 | def tearDownClass(self): | ||
31 | self.project.clean() | ||
32 | self.restartTarget() | ||
diff --git a/meta/lib/oeqa/runtime/buildiptables.py b/meta/lib/oeqa/runtime/buildiptables.py new file mode 100644 index 0000000000..88ece3bd8a --- /dev/null +++ b/meta/lib/oeqa/runtime/buildiptables.py | |||
@@ -0,0 +1,32 @@ | |||
1 | from oeqa.oetest import oeRuntimeTest | ||
2 | from oeqa.utils.decorators import * | ||
3 | from oeqa.utils.targetbuild import TargetBuildProject | ||
4 | |||
5 | def setUpModule(): | ||
6 | if not oeRuntimeTest.hasFeature("tools-sdk"): | ||
7 | skipModule("Image doesn't have tools-sdk in IMAGE_FEATURES") | ||
8 | |||
9 | class BuildIptablesTest(oeRuntimeTest): | ||
10 | |||
11 | @classmethod | ||
12 | def setUpClass(self): | ||
13 | self.restartTarget("-m 512") | ||
14 | self.project = TargetBuildProject(oeRuntimeTest.tc.target, | ||
15 | "http://netfilter.org/projects/iptables/files/iptables-1.4.13.tar.bz2") | ||
16 | self.project.download_archive() | ||
17 | |||
18 | @skipUnlessPassed("test_ssh") | ||
19 | def test_iptables(self): | ||
20 | self.assertEqual(self.project.run_configure(), 0, | ||
21 | msg="Running configure failed") | ||
22 | |||
23 | self.assertEqual(self.project.run_make(), 0, | ||
24 | msg="Running make failed") | ||
25 | |||
26 | self.assertEqual(self.project.run_install(), 0, | ||
27 | msg="Running make install failed") | ||
28 | |||
29 | @classmethod | ||
30 | def tearDownClass(self): | ||
31 | self.project.clean() | ||
32 | self.restartTarget() | ||
diff --git a/meta/lib/oeqa/runtime/buildsudoku.py b/meta/lib/oeqa/runtime/buildsudoku.py new file mode 100644 index 0000000000..0a7306ddc7 --- /dev/null +++ b/meta/lib/oeqa/runtime/buildsudoku.py | |||
@@ -0,0 +1,29 @@ | |||
1 | from oeqa.oetest import oeRuntimeTest | ||
2 | from oeqa.utils.decorators import * | ||
3 | from oeqa.utils.targetbuild import TargetBuildProject | ||
4 | |||
5 | def setUpModule(): | ||
6 | if not oeRuntimeTest.hasFeature("tools-sdk"): | ||
7 | skipModule("Image doesn't have tools-sdk in IMAGE_FEATURES") | ||
8 | |||
9 | class SudokuTest(oeRuntimeTest): | ||
10 | |||
11 | @classmethod | ||
12 | def setUpClass(self): | ||
13 | self.restartTarget("-m 512") | ||
14 | self.project = TargetBuildProject(oeRuntimeTest.tc.target, | ||
15 | "http://downloads.sourceforge.net/project/sudoku-savant/sudoku-savant/sudoku-savant-1.3/sudoku-savant-1.3.tar.bz2") | ||
16 | self.project.download_archive() | ||
17 | |||
18 | @skipUnlessPassed("test_ssh") | ||
19 | def test_sudoku(self): | ||
20 | self.assertEqual(self.project.run_configure(), 0, | ||
21 | msg="Running configure failed") | ||
22 | |||
23 | self.assertEqual(self.project.run_make(), 0, | ||
24 | msg="Running make failed") | ||
25 | |||
26 | @classmethod | ||
27 | def tearDownClass(self): | ||
28 | self.project.clean() | ||
29 | self.restartTarget() | ||
diff --git a/meta/lib/oeqa/runtime/connman.py b/meta/lib/oeqa/runtime/connman.py new file mode 100644 index 0000000000..5ef96f6b06 --- /dev/null +++ b/meta/lib/oeqa/runtime/connman.py | |||
@@ -0,0 +1,29 @@ | |||
1 | import unittest | ||
2 | from oeqa.oetest import oeRuntimeTest, skipModule | ||
3 | from oeqa.utils.decorators import * | ||
4 | |||
5 | def setUpModule(): | ||
6 | if not oeRuntimeTest.hasPackage("connman"): | ||
7 | skipModule("No connman package in image") | ||
8 | |||
9 | |||
10 | class ConnmanTest(oeRuntimeTest): | ||
11 | |||
12 | @skipUnlessPassed('test_ssh') | ||
13 | def test_connmand_help(self): | ||
14 | (status, output) = self.target.run('/usr/sbin/connmand --help') | ||
15 | self.assertEqual(status, 0, msg="status and output: %s and %s" % (status,output)) | ||
16 | |||
17 | |||
18 | @skipUnlessPassed('test_connmand_help') | ||
19 | def test_connmand_running(self): | ||
20 | (status, output) = self.target.run(oeRuntimeTest.pscmd + ' | grep [c]onnmand') | ||
21 | self.assertEqual(status, 0, msg="no connmand process, ps output: %s" % self.target.run(oeRuntimeTest.pscmd)[1]) | ||
22 | |||
23 | @skipUnlessPassed('test_connmand_running') | ||
24 | def test_connmand_unique(self): | ||
25 | self.target.run('/usr/sbin/connmand') | ||
26 | output = self.target.run(oeRuntimeTest.pscmd + ' | grep -c [c]onnmand')[1] | ||
27 | self.assertEqual(output, "1", msg="more than one connmand running in background, ps output: %s\n%s" % (output, self.target.run(oeRuntimeTest.pscmd)[1])) | ||
28 | |||
29 | |||
diff --git a/meta/lib/oeqa/runtime/date.py b/meta/lib/oeqa/runtime/date.py new file mode 100644 index 0000000000..a208e29ada --- /dev/null +++ b/meta/lib/oeqa/runtime/date.py | |||
@@ -0,0 +1,22 @@ | |||
1 | from oeqa.oetest import oeRuntimeTest | ||
2 | from oeqa.utils.decorators import * | ||
3 | import re | ||
4 | |||
5 | class DateTest(oeRuntimeTest): | ||
6 | |||
7 | @skipUnlessPassed("test_ssh") | ||
8 | def test_date(self): | ||
9 | (status, output) = self.target.run('date +"%Y-%m-%d %T"') | ||
10 | self.assertEqual(status, 0, msg="Failed to get initial date, output: %s" % output) | ||
11 | oldDate = output | ||
12 | |||
13 | sampleDate = '"2016-08-09 10:00:00"' | ||
14 | (status, output) = self.target.run("date -s %s" % sampleDate) | ||
15 | self.assertEqual(status, 0, msg="Date set failed, output: %s" % output) | ||
16 | |||
17 | (status, output) = self.target.run("date -R") | ||
18 | p = re.match('Tue, 09 Aug 2016 10:00:.. \+0000', output) | ||
19 | self.assertTrue(p, msg="The date was not set correctly, output: %s" % output) | ||
20 | |||
21 | (status, output) = self.target.run('date -s "%s"' % oldDate) | ||
22 | self.assertEqual(status, 0, msg="Failed to reset date, output: %s" % output) | ||
diff --git a/meta/lib/oeqa/runtime/df.py b/meta/lib/oeqa/runtime/df.py new file mode 100644 index 0000000000..b6da35027c --- /dev/null +++ b/meta/lib/oeqa/runtime/df.py | |||
@@ -0,0 +1,11 @@ | |||
1 | import unittest | ||
2 | from oeqa.oetest import oeRuntimeTest | ||
3 | from oeqa.utils.decorators import * | ||
4 | |||
5 | |||
6 | class DfTest(oeRuntimeTest): | ||
7 | |||
8 | @skipUnlessPassed("test_ssh") | ||
9 | def test_df(self): | ||
10 | (status,output) = self.target.run("df / | sed -n '2p' | awk '{print $4}'") | ||
11 | self.assertTrue(int(output)>5120, msg="Not enough space on image. Current size is %s" % output) | ||
diff --git a/meta/lib/oeqa/runtime/dmesg.py b/meta/lib/oeqa/runtime/dmesg.py new file mode 100644 index 0000000000..a53d1f0bf3 --- /dev/null +++ b/meta/lib/oeqa/runtime/dmesg.py | |||
@@ -0,0 +1,11 @@ | |||
1 | import unittest | ||
2 | from oeqa.oetest import oeRuntimeTest | ||
3 | from oeqa.utils.decorators import * | ||
4 | |||
5 | |||
6 | class DmesgTest(oeRuntimeTest): | ||
7 | |||
8 | @skipUnlessPassed('test_ssh') | ||
9 | def test_dmesg(self): | ||
10 | (status, output) = self.target.run('dmesg | grep -v mmci-pl18x | grep -v "error changing net interface name" | grep -i error') | ||
11 | self.assertEqual(status, 1, msg = "Error messages in dmesg log: %s" % output) | ||
diff --git a/meta/lib/oeqa/runtime/files/test.c b/meta/lib/oeqa/runtime/files/test.c new file mode 100644 index 0000000000..2d8389c92e --- /dev/null +++ b/meta/lib/oeqa/runtime/files/test.c | |||
@@ -0,0 +1,26 @@ | |||
1 | #include <stdio.h> | ||
2 | #include <math.h> | ||
3 | #include <stdlib.h> | ||
4 | |||
5 | double convert(long long l) | ||
6 | { | ||
7 | return (double)l; | ||
8 | } | ||
9 | |||
10 | int main(int argc, char * argv[]) { | ||
11 | |||
12 | long long l = 10; | ||
13 | double f; | ||
14 | double check = 10.0; | ||
15 | |||
16 | f = convert(l); | ||
17 | printf("convert: %lld => %f\n", l, f); | ||
18 | if ( f != check ) exit(1); | ||
19 | |||
20 | f = 1234.67; | ||
21 | check = 1234.0; | ||
22 | printf("floorf(%f) = %f\n", f, floorf(f)); | ||
23 | if ( floorf(f) != check) exit(1); | ||
24 | |||
25 | return 0; | ||
26 | } | ||
diff --git a/meta/lib/oeqa/runtime/files/test.pl b/meta/lib/oeqa/runtime/files/test.pl new file mode 100644 index 0000000000..689c8f1635 --- /dev/null +++ b/meta/lib/oeqa/runtime/files/test.pl | |||
@@ -0,0 +1,2 @@ | |||
1 | $a = 9.01e+21 - 9.01e+21 + 0.01; | ||
2 | print ("the value of a is ", $a, "\n"); | ||
diff --git a/meta/lib/oeqa/runtime/files/testmakefile b/meta/lib/oeqa/runtime/files/testmakefile new file mode 100644 index 0000000000..ca1844e930 --- /dev/null +++ b/meta/lib/oeqa/runtime/files/testmakefile | |||
@@ -0,0 +1,5 @@ | |||
1 | test: test.o | ||
2 | gcc -o test test.o -lm | ||
3 | test.o: test.c | ||
4 | gcc -c test.c | ||
5 | |||
diff --git a/meta/lib/oeqa/runtime/gcc.py b/meta/lib/oeqa/runtime/gcc.py new file mode 100644 index 0000000000..b63badd3e4 --- /dev/null +++ b/meta/lib/oeqa/runtime/gcc.py | |||
@@ -0,0 +1,36 @@ | |||
1 | import unittest | ||
2 | import os | ||
3 | from oeqa.oetest import oeRuntimeTest, skipModule | ||
4 | from oeqa.utils.decorators import * | ||
5 | |||
6 | def setUpModule(): | ||
7 | if not oeRuntimeTest.hasFeature("tools-sdk"): | ||
8 | skipModule("Image doesn't have tools-sdk in IMAGE_FEATURES") | ||
9 | |||
10 | |||
11 | class GccCompileTest(oeRuntimeTest): | ||
12 | |||
13 | @classmethod | ||
14 | def setUpClass(self): | ||
15 | oeRuntimeTest.tc.target.copy_to(os.path.join(oeRuntimeTest.tc.filesdir, "test.c"), "/tmp/test.c") | ||
16 | oeRuntimeTest.tc.target.copy_to(os.path.join(oeRuntimeTest.tc.filesdir, "testmakefile"), "/tmp/testmakefile") | ||
17 | |||
18 | def test_gcc_compile(self): | ||
19 | (status, output) = self.target.run('gcc /tmp/test.c -o /tmp/test -lm') | ||
20 | self.assertEqual(status, 0, msg="gcc compile failed, output: %s" % output) | ||
21 | (status, output) = self.target.run('/tmp/test') | ||
22 | self.assertEqual(status, 0, msg="running compiled file failed, output %s" % output) | ||
23 | |||
24 | def test_gpp_compile(self): | ||
25 | (status, output) = self.target.run('g++ /tmp/test.c -o /tmp/test -lm') | ||
26 | self.assertEqual(status, 0, msg="g++ compile failed, output: %s" % output) | ||
27 | (status, output) = self.target.run('/tmp/test') | ||
28 | self.assertEqual(status, 0, msg="running compiled file failed, output %s" % output) | ||
29 | |||
30 | def test_make(self): | ||
31 | (status, output) = self.target.run('cd /tmp; make -f testmakefile') | ||
32 | self.assertEqual(status, 0, msg="running make failed, output %s" % output) | ||
33 | |||
34 | @classmethod | ||
35 | def tearDownClass(self): | ||
36 | oeRuntimeTest.tc.target.run("rm /tmp/test.c /tmp/test.o /tmp/test /tmp/testmakefile") | ||
diff --git a/meta/lib/oeqa/runtime/ldd.py b/meta/lib/oeqa/runtime/ldd.py new file mode 100644 index 0000000000..4374530fc4 --- /dev/null +++ b/meta/lib/oeqa/runtime/ldd.py | |||
@@ -0,0 +1,19 @@ | |||
1 | import unittest | ||
2 | from oeqa.oetest import oeRuntimeTest | ||
3 | from oeqa.utils.decorators import * | ||
4 | |||
5 | def setUpModule(): | ||
6 | if not oeRuntimeTest.hasFeature("tools-sdk"): | ||
7 | skipModule("Image doesn't have tools-sdk in IMAGE_FEATURES") | ||
8 | |||
9 | class LddTest(oeRuntimeTest): | ||
10 | |||
11 | @skipUnlessPassed('test_ssh') | ||
12 | def test_ldd_exists(self): | ||
13 | (status, output) = self.target.run('which ldd') | ||
14 | self.assertEqual(status, 0, msg = "ldd does not exist in PATH: which ldd: %s" % output) | ||
15 | |||
16 | @skipUnlessPassed('test_ldd_exists') | ||
17 | def test_ldd_rtldlist_check(self): | ||
18 | (status, output) = self.target.run('for i in $(which ldd | xargs cat | grep "^RTLDLIST"|cut -d\'=\' -f2|tr -d \'"\'); do test -f $i && echo $i && break; done') | ||
19 | self.assertEqual(status, 0, msg = "ldd path not correct or RTLDLIST files don't exist. ") | ||
diff --git a/meta/lib/oeqa/runtime/logrotate.py b/meta/lib/oeqa/runtime/logrotate.py new file mode 100644 index 0000000000..80489a3267 --- /dev/null +++ b/meta/lib/oeqa/runtime/logrotate.py | |||
@@ -0,0 +1,27 @@ | |||
1 | # This test should cover https://bugzilla.yoctoproject.org/tr_show_case.cgi?case_id=289 testcase | ||
2 | # Note that the image under test must have logrotate installed | ||
3 | |||
4 | import unittest | ||
5 | from oeqa.oetest import oeRuntimeTest, skipModule | ||
6 | from oeqa.utils.decorators import * | ||
7 | |||
8 | def setUpModule(): | ||
9 | if not oeRuntimeTest.hasPackage("logrotate"): | ||
10 | skipModule("No logrotate package in image") | ||
11 | |||
12 | |||
13 | class LogrotateTest(oeRuntimeTest): | ||
14 | |||
15 | @skipUnlessPassed("test_ssh") | ||
16 | def test_1_logrotate_setup(self): | ||
17 | (status, output) = self.target.run('mkdir /home/root/logrotate_dir') | ||
18 | self.assertEqual(status, 0, msg = "Could not create logrotate_dir. Output: %s" % output) | ||
19 | (status, output) = self.target.run("sed -i 's#wtmp {#wtmp {\\n olddir /home/root/logrotate_dir#' /etc/logrotate.conf") | ||
20 | self.assertEqual(status, 0, msg = "Could not write to logrotate.conf file. Status and output: %s and %s)" % (status, output)) | ||
21 | |||
22 | @skipUnlessPassed("test_1_logrotate_setup") | ||
23 | def test_2_logrotate(self): | ||
24 | (status, output) = self.target.run('logrotate -f /etc/logrotate.conf') | ||
25 | self.assertEqual(status, 0, msg = "logrotate service could not be reloaded. Status and output: %s and %s" % (status, output)) | ||
26 | output = self.target.run('ls -la /home/root/logrotate_dir/ | wc -l')[1] | ||
27 | self.assertTrue(int(output)>=3, msg = "new logfile could not be created. List of files within log directory: %s" %(self.target.run('ls -la /home/root/logrotate_dir')[1])) | ||
diff --git a/meta/lib/oeqa/runtime/multilib.py b/meta/lib/oeqa/runtime/multilib.py new file mode 100644 index 0000000000..13a3b54b18 --- /dev/null +++ b/meta/lib/oeqa/runtime/multilib.py | |||
@@ -0,0 +1,17 @@ | |||
1 | import unittest | ||
2 | from oeqa.oetest import oeRuntimeTest, skipModule | ||
3 | from oeqa.utils.decorators import * | ||
4 | |||
5 | def setUpModule(): | ||
6 | multilibs = oeRuntimeTest.tc.d.getVar("MULTILIBS", True) or "" | ||
7 | if "multilib:lib32" not in multilibs: | ||
8 | skipModule("this isn't a multilib:lib32 image") | ||
9 | |||
10 | |||
11 | class MultilibTest(oeRuntimeTest): | ||
12 | |||
13 | @skipUnlessPassed('test_ssh') | ||
14 | def test_file_connman(self): | ||
15 | self.assertTrue(oeRuntimeTest.hasPackage('connman-gnome'), msg="This test assumes connman-gnome is installed") | ||
16 | (status, output) = self.target.run("readelf -h /usr/bin/connman-applet | sed -n '3p' | awk '{print $2}'") | ||
17 | self.assertEqual(output, "ELF32", msg="connman-applet isn't an ELF32 binary. readelf says: %s" % self.target.run("readelf -h /usr/bin/connman-applet")[1]) | ||
diff --git a/meta/lib/oeqa/runtime/pam.py b/meta/lib/oeqa/runtime/pam.py new file mode 100644 index 0000000000..52e1eb88e6 --- /dev/null +++ b/meta/lib/oeqa/runtime/pam.py | |||
@@ -0,0 +1,24 @@ | |||
1 | # This test should cover https://bugzilla.yoctoproject.org/tr_show_case.cgi?case_id=287 testcase | ||
2 | # Note that the image under test must have "pam" in DISTRO_FEATURES | ||
3 | |||
4 | import unittest | ||
5 | from oeqa.oetest import oeRuntimeTest | ||
6 | from oeqa.utils.decorators import * | ||
7 | |||
8 | def setUpModule(): | ||
9 | if not oeRuntimeTest.hasFeature("pam"): | ||
10 | skipModule("target doesn't have 'pam' in DISTRO_FEATURES") | ||
11 | |||
12 | |||
13 | class PamBasicTest(oeRuntimeTest): | ||
14 | |||
15 | @skipUnlessPassed('test_ssh') | ||
16 | def test_pam(self): | ||
17 | (status, output) = self.target.run('login --help') | ||
18 | self.assertEqual(status, 1, msg = "login command does not work as expected. Status and output:%s and %s" %(status, output)) | ||
19 | (status, output) = self.target.run('passwd --help') | ||
20 | self.assertEqual(status, 6, msg = "passwd command does not work as expected. Status and output:%s and %s" %(status, output)) | ||
21 | (status, output) = self.target.run('su --help') | ||
22 | self.assertEqual(status, 2, msg = "su command does not work as expected. Status and output:%s and %s" %(status, output)) | ||
23 | (status, output) = self.target.run('useradd --help') | ||
24 | self.assertEqual(status, 2, msg = "useradd command does not work as expected. Status and output:%s and %s" %(status, output)) | ||
diff --git a/meta/lib/oeqa/runtime/perl.py b/meta/lib/oeqa/runtime/perl.py new file mode 100644 index 0000000000..c9bb684c11 --- /dev/null +++ b/meta/lib/oeqa/runtime/perl.py | |||
@@ -0,0 +1,28 @@ | |||
1 | import unittest | ||
2 | import os | ||
3 | from oeqa.oetest import oeRuntimeTest, skipModule | ||
4 | from oeqa.utils.decorators import * | ||
5 | |||
6 | def setUpModule(): | ||
7 | if not oeRuntimeTest.hasPackage("perl"): | ||
8 | skipModule("No perl package in the image") | ||
9 | |||
10 | |||
11 | class PerlTest(oeRuntimeTest): | ||
12 | |||
13 | @classmethod | ||
14 | def setUpClass(self): | ||
15 | oeRuntimeTest.tc.target.copy_to(os.path.join(oeRuntimeTest.tc.filesdir, "test.pl"), "/tmp/test.pl") | ||
16 | |||
17 | def test_perl_exists(self): | ||
18 | (status, output) = self.target.run('which perl') | ||
19 | self.assertEqual(status, 0, msg="Perl binary not in PATH or not on target.") | ||
20 | |||
21 | def test_perl_works(self): | ||
22 | (status, output) = self.target.run('perl /tmp/test.pl') | ||
23 | self.assertEqual(status, 0, msg="Exit status was not 0. Output: %s" % output) | ||
24 | self.assertEqual(output, "the value of a is 0.01", msg="Incorrect output: %s" % output) | ||
25 | |||
26 | @classmethod | ||
27 | def tearDownClass(self): | ||
28 | oeRuntimeTest.tc.target.run("rm /tmp/test.pl") | ||
diff --git a/meta/lib/oeqa/runtime/ping.py b/meta/lib/oeqa/runtime/ping.py new file mode 100644 index 0000000000..0d028f9b22 --- /dev/null +++ b/meta/lib/oeqa/runtime/ping.py | |||
@@ -0,0 +1,20 @@ | |||
1 | import subprocess | ||
2 | import unittest | ||
3 | import sys | ||
4 | import time | ||
5 | from oeqa.oetest import oeRuntimeTest | ||
6 | |||
7 | class PingTest(oeRuntimeTest): | ||
8 | |||
9 | def test_ping(self): | ||
10 | output = '' | ||
11 | count = 0 | ||
12 | endtime = time.time() + 60 | ||
13 | while count < 5 and time.time() < endtime: | ||
14 | proc = subprocess.Popen("ping -c 1 %s" % oeRuntimeTest.tc.qemu.ip, shell=True, stdout=subprocess.PIPE) | ||
15 | output += proc.communicate()[0] | ||
16 | if proc.poll() == 0: | ||
17 | count += 1 | ||
18 | else: | ||
19 | count = 0 | ||
20 | self.assertEqual(count, 5, msg = "Expected 5 consecutive replies, got %d.\nping output is:\n%s" % (count,output)) | ||
diff --git a/meta/lib/oeqa/runtime/rpm.py b/meta/lib/oeqa/runtime/rpm.py new file mode 100644 index 0000000000..154cad5014 --- /dev/null +++ b/meta/lib/oeqa/runtime/rpm.py | |||
@@ -0,0 +1,49 @@ | |||
1 | import unittest | ||
2 | import os | ||
3 | from oeqa.oetest import oeRuntimeTest, skipModule | ||
4 | from oeqa.utils.decorators import * | ||
5 | import oe.packagedata | ||
6 | |||
7 | def setUpModule(): | ||
8 | if not oeRuntimeTest.hasFeature("package-management"): | ||
9 | skipModule("rpm module skipped: target doesn't have package-management in IMAGE_FEATURES") | ||
10 | if "package_rpm" != oeRuntimeTest.tc.d.getVar("PACKAGE_CLASSES", True).split()[0]: | ||
11 | skipModule("rpm module skipped: target doesn't have rpm as primary package manager") | ||
12 | |||
13 | |||
14 | class RpmBasicTest(oeRuntimeTest): | ||
15 | |||
16 | @skipUnlessPassed('test_ssh') | ||
17 | def test_rpm_help(self): | ||
18 | (status, output) = self.target.run('rpm --help') | ||
19 | self.assertEqual(status, 0, msg="status and output: %s and %s" % (status,output)) | ||
20 | |||
21 | @skipUnlessPassed('test_rpm_help') | ||
22 | def test_rpm_query(self): | ||
23 | (status, output) = self.target.run('rpm -q rpm') | ||
24 | self.assertEqual(status, 0, msg="status and output: %s and %s" % (status,output)) | ||
25 | |||
26 | class RpmInstallRemoveTest(oeRuntimeTest): | ||
27 | |||
28 | @classmethod | ||
29 | def setUpClass(self): | ||
30 | deploydir = os.path.join(oeRuntimeTest.tc.d.getVar('DEPLOY_DIR', True), "rpm", oeRuntimeTest.tc.d.getVar('TUNE_PKGARCH', True)) | ||
31 | pkgdata = oe.packagedata.read_subpkgdata("rpm-doc", oeRuntimeTest.tc.d) | ||
32 | # pick rpm-doc as a test file to get installed, because it's small and it will always be built for standard targets | ||
33 | testrpmfile = "rpm-doc-%s-%s.%s.rpm" % (pkgdata["PKGV"], pkgdata["PKGR"], oeRuntimeTest.tc.d.getVar('TUNE_PKGARCH', True)) | ||
34 | oeRuntimeTest.tc.target.copy_to(os.path.join(deploydir,testrpmfile), "/tmp/rpm-doc.rpm") | ||
35 | |||
36 | @skipUnlessPassed('test_rpm_help') | ||
37 | def test_rpm_install(self): | ||
38 | (status, output) = self.target.run('rpm -ivh /tmp/rpm-doc.rpm') | ||
39 | self.assertEqual(status, 0, msg="Failed to install rpm-doc package: %s" % output) | ||
40 | |||
41 | @skipUnlessPassed('test_rpm_install') | ||
42 | def test_rpm_remove(self): | ||
43 | (status,output) = self.target.run('rpm -e rpm-doc') | ||
44 | self.assertEqual(status, 0, msg="Failed to remove rpm-doc package: %s" % output) | ||
45 | |||
46 | @classmethod | ||
47 | def tearDownClass(self): | ||
48 | oeRuntimeTest.tc.target.run('rm -f /tmp/rpm-doc.rpm') | ||
49 | |||
diff --git a/meta/lib/oeqa/runtime/scanelf.py b/meta/lib/oeqa/runtime/scanelf.py new file mode 100644 index 0000000000..b9abf24640 --- /dev/null +++ b/meta/lib/oeqa/runtime/scanelf.py | |||
@@ -0,0 +1,26 @@ | |||
1 | import unittest | ||
2 | from oeqa.oetest import oeRuntimeTest, skipModule | ||
3 | from oeqa.utils.decorators import * | ||
4 | |||
5 | def setUpModule(): | ||
6 | if not oeRuntimeTest.hasPackage("pax-utils"): | ||
7 | skipModule("pax-utils package not installed") | ||
8 | |||
9 | class ScanelfTest(oeRuntimeTest): | ||
10 | |||
11 | def setUp(self): | ||
12 | self.scancmd = 'scanelf --quiet --recursive --mount --ldpath --path' | ||
13 | |||
14 | @skipUnlessPassed('test_ssh') | ||
15 | def test_scanelf_textrel(self): | ||
16 | # print TEXTREL information | ||
17 | self.scancmd += " --textrel" | ||
18 | (status, output) = self.target.run(self.scancmd) | ||
19 | self.assertEqual(output.strip(), "", "\n".join([self.scancmd, output])) | ||
20 | |||
21 | @skipUnlessPassed('test_ssh') | ||
22 | def test_scanelf_rpath(self): | ||
23 | # print RPATH information | ||
24 | self.scancmd += " --rpath" | ||
25 | (status, output) = self.target.run(self.scancmd) | ||
26 | self.assertEqual(output.strip(), "", "\n".join([self.scancmd, output])) | ||
diff --git a/meta/lib/oeqa/runtime/scp.py b/meta/lib/oeqa/runtime/scp.py new file mode 100644 index 0000000000..03095bf966 --- /dev/null +++ b/meta/lib/oeqa/runtime/scp.py | |||
@@ -0,0 +1,21 @@ | |||
1 | import os | ||
2 | from oeqa.oetest import oeRuntimeTest, skipModule | ||
3 | from oeqa.utils.decorators import skipUnlessPassed | ||
4 | |||
5 | def setUpModule(): | ||
6 | if not (oeRuntimeTest.hasPackage("dropbear") or oeRuntimeTest.hasPackage("openssh-sshd")): | ||
7 | skipModule("No ssh package in image") | ||
8 | |||
9 | class ScpTest(oeRuntimeTest): | ||
10 | |||
11 | @skipUnlessPassed('test_ssh') | ||
12 | def test_scp_file(self): | ||
13 | test_log_dir = oeRuntimeTest.tc.d.getVar("TEST_LOG_DIR", True) | ||
14 | test_file_path = os.path.join(test_log_dir, 'test_scp_file') | ||
15 | with open(test_file_path, 'w') as test_scp_file: | ||
16 | test_scp_file.seek(2 ** 22 - 1) | ||
17 | test_scp_file.write(os.linesep) | ||
18 | (status, output) = self.target.copy_to(test_file_path, '/tmp/test_scp_file') | ||
19 | self.assertEqual(status, 0, msg = "File could not be copied. Output: %s" % output) | ||
20 | (status, output) = self.target.run("ls -la /tmp/test_scp_file") | ||
21 | self.assertEqual(status, 0, msg = "SCP test failed") | ||
diff --git a/meta/lib/oeqa/runtime/skeletoninit.py b/meta/lib/oeqa/runtime/skeletoninit.py new file mode 100644 index 0000000000..557e715a3e --- /dev/null +++ b/meta/lib/oeqa/runtime/skeletoninit.py | |||
@@ -0,0 +1,28 @@ | |||
1 | # This test should cover https://bugzilla.yoctoproject.org/tr_show_case.cgi?case_id=284 testcase | ||
2 | # Note that the image under test must have meta-skeleton layer in bblayers and IMAGE_INSTALL_append = " service" in local.conf | ||
3 | |||
4 | import unittest | ||
5 | from oeqa.oetest import oeRuntimeTest | ||
6 | from oeqa.utils.decorators import * | ||
7 | |||
8 | def setUpModule(): | ||
9 | if not oeRuntimeTest.hasPackage("service"): | ||
10 | skipModule("No service package in image") | ||
11 | |||
12 | |||
13 | class SkeletonBasicTest(oeRuntimeTest): | ||
14 | |||
15 | @skipUnlessPassed('test_ssh') | ||
16 | @unittest.skipIf("systemd" == oeRuntimeTest.tc.d.getVar("VIRTUAL-RUNTIME_init_manager"), "Not appropiate for systemd image") | ||
17 | def test_skeleton_availability(self): | ||
18 | (status, output) = self.target.run('ls /etc/init.d/skeleton') | ||
19 | self.assertEqual(status, 0, msg = "skeleton init script not found. Output:\n%s " % output) | ||
20 | (status, output) = self.target.run('ls /usr/sbin/skeleton-test') | ||
21 | self.assertEqual(status, 0, msg = "skeleton-test not found. Output:\n%s" % output) | ||
22 | |||
23 | @skipUnlessPassed('test_skeleton_availability') | ||
24 | @unittest.skipIf("systemd" == oeRuntimeTest.tc.d.getVar("VIRTUAL-RUNTIME_init_manager"), "Not appropiate for systemd image") | ||
25 | def test_skeleton_script(self): | ||
26 | output1 = self.target.run("/etc/init.d/skeleton start")[1] | ||
27 | (status, output2) = self.target.run(oeRuntimeTest.pscmd + ' | grep [s]keleton-test') | ||
28 | self.assertEqual(status, 0, msg = "Skeleton script could not be started:\n%s\n%s" % (output1, output2)) | ||
diff --git a/meta/lib/oeqa/runtime/smart.py b/meta/lib/oeqa/runtime/smart.py new file mode 100644 index 0000000000..c3fdf7d499 --- /dev/null +++ b/meta/lib/oeqa/runtime/smart.py | |||
@@ -0,0 +1,108 @@ | |||
1 | import unittest | ||
2 | import re | ||
3 | from oeqa.oetest import oeRuntimeTest | ||
4 | from oeqa.utils.decorators import * | ||
5 | from oeqa.utils.httpserver import HTTPService | ||
6 | |||
7 | def setUpModule(): | ||
8 | if not oeRuntimeTest.hasFeature("package-management"): | ||
9 | skipModule("Image doesn't have package management feature") | ||
10 | if not oeRuntimeTest.hasPackage("smart"): | ||
11 | skipModule("Image doesn't have smart installed") | ||
12 | |||
13 | class SmartTest(oeRuntimeTest): | ||
14 | |||
15 | @skipUnlessPassed('test_smart_help') | ||
16 | def smart(self, command, expected = 0): | ||
17 | command = 'smart %s' % command | ||
18 | status, output = self.target.run(command, 1500) | ||
19 | message = os.linesep.join([command, output]) | ||
20 | self.assertEqual(status, expected, message) | ||
21 | self.assertFalse("Cannot allocate memory" in output, message) | ||
22 | return output | ||
23 | |||
24 | class SmartBasicTest(SmartTest): | ||
25 | |||
26 | @skipUnlessPassed('test_ssh') | ||
27 | def test_smart_help(self): | ||
28 | self.smart('--help') | ||
29 | |||
30 | def test_smart_version(self): | ||
31 | self.smart('--version') | ||
32 | |||
33 | def test_smart_info(self): | ||
34 | self.smart('info python-smartpm') | ||
35 | |||
36 | def test_smart_query(self): | ||
37 | self.smart('query python-smartpm') | ||
38 | |||
39 | def test_smart_search(self): | ||
40 | self.smart('search python-smartpm') | ||
41 | |||
42 | def test_smart_stats(self): | ||
43 | self.smart('stats') | ||
44 | |||
45 | class SmartRepoTest(SmartTest): | ||
46 | |||
47 | @classmethod | ||
48 | def setUpClass(self): | ||
49 | self.repo_server = HTTPService(oeRuntimeTest.tc.d.getVar('DEPLOY_DIR', True), oeRuntimeTest.tc.qemu.host_ip) | ||
50 | self.repo_server.start() | ||
51 | |||
52 | @classmethod | ||
53 | def tearDownClass(self): | ||
54 | self.repo_server.stop() | ||
55 | |||
56 | def test_smart_channel(self): | ||
57 | self.smart('channel', 1) | ||
58 | |||
59 | def test_smart_channel_add(self): | ||
60 | image_pkgtype = self.tc.d.getVar('IMAGE_PKGTYPE', True) | ||
61 | deploy_url = 'http://%s:%s/%s' %(self.tc.qemu.host_ip, self.repo_server.port, image_pkgtype) | ||
62 | pkgarchs = self.tc.d.getVar('PACKAGE_ARCHS', True) | ||
63 | for arch in os.listdir('%s/%s' % (self.repo_server.root_dir, image_pkgtype)): | ||
64 | if arch in pkgarchs: | ||
65 | self.smart('channel -y --add {a} type=rpm-md baseurl={u}/{a}'.format(a=arch, u=deploy_url)) | ||
66 | self.smart('update') | ||
67 | |||
68 | def test_smart_channel_help(self): | ||
69 | self.smart('channel --help') | ||
70 | |||
71 | def test_smart_channel_list(self): | ||
72 | self.smart('channel --list') | ||
73 | |||
74 | def test_smart_channel_show(self): | ||
75 | self.smart('channel --show') | ||
76 | |||
77 | def test_smart_channel_rpmsys(self): | ||
78 | self.smart('channel --show rpmsys') | ||
79 | self.smart('channel --disable rpmsys') | ||
80 | self.smart('channel --enable rpmsys') | ||
81 | |||
82 | @skipUnlessPassed('test_smart_channel_add') | ||
83 | def test_smart_install(self): | ||
84 | self.smart('remove -y psplash-default') | ||
85 | self.smart('install -y psplash-default') | ||
86 | |||
87 | @skipUnlessPassed('test_smart_install') | ||
88 | def test_smart_install_dependency(self): | ||
89 | self.smart('remove -y psplash') | ||
90 | self.smart('install -y psplash-default') | ||
91 | |||
92 | @skipUnlessPassed('test_smart_channel_add') | ||
93 | def test_smart_install_from_disk(self): | ||
94 | self.smart('remove -y psplash-default') | ||
95 | self.smart('download psplash-default') | ||
96 | self.smart('install -y ./psplash-default*') | ||
97 | |||
98 | @skipUnlessPassed('test_smart_channel_add') | ||
99 | def test_smart_install_from_http(self): | ||
100 | output = self.smart('download --urls psplash-default') | ||
101 | url = re.search('(http://.*/psplash-default.*\.rpm)', output) | ||
102 | self.assertTrue(url, msg="Couln't find download url in %s" % output) | ||
103 | self.smart('remove -y psplash-default') | ||
104 | self.smart('install -y %s' % url.group(0)) | ||
105 | |||
106 | @skipUnlessPassed('test_smart_install') | ||
107 | def test_smart_reinstall(self): | ||
108 | self.smart('reinstall -y psplash-default') | ||
diff --git a/meta/lib/oeqa/runtime/ssh.py b/meta/lib/oeqa/runtime/ssh.py new file mode 100644 index 0000000000..8c96020e54 --- /dev/null +++ b/meta/lib/oeqa/runtime/ssh.py | |||
@@ -0,0 +1,16 @@ | |||
1 | import subprocess | ||
2 | import unittest | ||
3 | import sys | ||
4 | from oeqa.oetest import oeRuntimeTest, skipModule | ||
5 | from oeqa.utils.decorators import * | ||
6 | |||
7 | def setUpModule(): | ||
8 | if not (oeRuntimeTest.hasPackage("dropbear") or oeRuntimeTest.hasPackage("openssh")): | ||
9 | skipModule("No ssh package in image") | ||
10 | |||
11 | class SshTest(oeRuntimeTest): | ||
12 | |||
13 | @skipUnlessPassed('test_ping') | ||
14 | def test_ssh(self): | ||
15 | (status, output) = self.target.run('uname -a') | ||
16 | self.assertEqual(status, 0, msg="SSH Test failed: %s" % output) | ||
diff --git a/meta/lib/oeqa/runtime/syslog.py b/meta/lib/oeqa/runtime/syslog.py new file mode 100644 index 0000000000..91d79635b7 --- /dev/null +++ b/meta/lib/oeqa/runtime/syslog.py | |||
@@ -0,0 +1,46 @@ | |||
1 | import unittest | ||
2 | from oeqa.oetest import oeRuntimeTest, skipModule | ||
3 | from oeqa.utils.decorators import * | ||
4 | |||
5 | def setUpModule(): | ||
6 | if not oeRuntimeTest.hasPackage("syslog"): | ||
7 | skipModule("No syslog package in image") | ||
8 | |||
9 | class SyslogTest(oeRuntimeTest): | ||
10 | |||
11 | @skipUnlessPassed("test_ssh") | ||
12 | def test_syslog_help(self): | ||
13 | (status,output) = self.target.run('/sbin/syslogd --help') | ||
14 | self.assertEqual(status, 1, msg="status and output: %s and %s" % (status,output)) | ||
15 | |||
16 | @skipUnlessPassed("test_syslog_help") | ||
17 | def test_syslog_running(self): | ||
18 | (status,output) = self.target.run(oeRuntimeTest.pscmd + ' | grep -i [s]yslogd') | ||
19 | self.assertEqual(status, 0, msg="no syslogd process, ps output: %s" % self.target.run(oeRuntimeTest.pscmd)[1]) | ||
20 | |||
21 | |||
22 | class SyslogTestConfig(oeRuntimeTest): | ||
23 | |||
24 | @skipUnlessPassed("test_syslog_running") | ||
25 | def test_syslog_logger(self): | ||
26 | (status,output) = self.target.run('logger foobar && test -e /var/log/messages && grep foobar /var/log/messages || logread | grep foobar') | ||
27 | self.assertEqual(status, 0, msg="Test log string not found in /var/log/messages. Output: %s " % output) | ||
28 | |||
29 | @skipUnlessPassed("test_syslog_running") | ||
30 | def test_syslog_restart(self): | ||
31 | if "systemd" != oeRuntimeTest.tc.d.getVar("VIRTUAL-RUNTIME_init_manager"): | ||
32 | (status,output) = self.target.run('/etc/init.d/syslog restart') | ||
33 | else: | ||
34 | (status,output) = self.target.run('systemctl restart syslog.service') | ||
35 | |||
36 | @skipUnlessPassed("test_syslog_restart") | ||
37 | @skipUnlessPassed("test_syslog_logger") | ||
38 | @unittest.skipIf("systemd" == oeRuntimeTest.tc.d.getVar("VIRTUAL-RUNTIME_init_manager"), "Not appropiate for systemd image") | ||
39 | def test_syslog_startup_config(self): | ||
40 | self.target.run('echo "LOGFILE=/var/log/test" >> /etc/syslog-startup.conf') | ||
41 | (status,output) = self.target.run('/etc/init.d/syslog restart') | ||
42 | self.assertEqual(status, 0, msg="Could not restart syslog service. Status and output: %s and %s" % (status,output)) | ||
43 | (status,output) = self.target.run('logger foobar && grep foobar /var/log/test') | ||
44 | self.assertEqual(status, 0, msg="Test log string not found. Output: %s " % output) | ||
45 | self.target.run("sed -i 's#LOGFILE=/var/log/test##' /etc/syslog-startup.conf") | ||
46 | self.target.run('/etc/init.d/syslog restart') | ||
diff --git a/meta/lib/oeqa/runtime/systemd.py b/meta/lib/oeqa/runtime/systemd.py new file mode 100644 index 0000000000..e4f433632f --- /dev/null +++ b/meta/lib/oeqa/runtime/systemd.py | |||
@@ -0,0 +1,59 @@ | |||
1 | import unittest | ||
2 | from oeqa.oetest import oeRuntimeTest, skipModule | ||
3 | from oeqa.utils.decorators import * | ||
4 | |||
5 | def setUpModule(): | ||
6 | if not oeRuntimeTest.hasFeature("systemd"): | ||
7 | skipModule("target doesn't have systemd in DISTRO_FEATURES") | ||
8 | if "systemd" != oeRuntimeTest.tc.d.getVar("VIRTUAL-RUNTIME_init_manager", True): | ||
9 | skipModule("systemd is not the init manager for this image") | ||
10 | |||
11 | |||
12 | class SystemdBasicTest(oeRuntimeTest): | ||
13 | |||
14 | @skipUnlessPassed('test_ssh') | ||
15 | def test_systemd_version(self): | ||
16 | (status, output) = self.target.run('systemctl --version') | ||
17 | self.assertEqual(status, 0, msg="status and output: %s and %s" % (status,output)) | ||
18 | |||
19 | class SystemdTests(oeRuntimeTest): | ||
20 | |||
21 | @skipUnlessPassed('test_systemd_version') | ||
22 | def test_systemd_failed(self): | ||
23 | (status, output) = self.target.run('systemctl --failed | grep "0 loaded units listed"') | ||
24 | self.assertEqual(status, 0, msg="Failed systemd services: %s" % self.target.run('systemctl --failed')[1]) | ||
25 | |||
26 | @skipUnlessPassed('test_systemd_version') | ||
27 | def test_systemd_service(self): | ||
28 | (status, output) = self.target.run('systemctl list-unit-files | grep "systemd-hostnamed.service"') | ||
29 | self.assertEqual(status, 0, msg="systemd-hostnamed.service service is not available.") | ||
30 | |||
31 | @skipUnlessPassed('test_systemd_service') | ||
32 | def test_systemd_stop(self): | ||
33 | self.target.run('systemctl stop systemd-hostnamed.service') | ||
34 | (status, output) = self.target.run('systemctl show systemd-hostnamed.service | grep "ActiveState" | grep "=inactive"') | ||
35 | self.assertEqual(status, 0, msg="systemd-hostnamed.service service could not be stopped.Status and output: %s and %s" % (status, output)) | ||
36 | |||
37 | @skipUnlessPassed('test_systemd_stop') | ||
38 | @skipUnlessPassed('test_systemd_version') | ||
39 | def test_systemd_start(self): | ||
40 | self.target.run('systemctl start systemd-hostnamed.service') | ||
41 | (status, output) = self.target.run('systemctl show systemd-hostnamed.service | grep "ActiveState" | grep "=active"') | ||
42 | self.assertEqual(status, 0, msg="systemd-hostnamed.service service could not be started. Status and output: %s and %s" % (status, output)) | ||
43 | |||
44 | @skipUnlessPassed('test_systemd_version') | ||
45 | def test_systemd_enable(self): | ||
46 | self.target.run('systemctl enable machineid.service') | ||
47 | (status, output) = self.target.run('systemctl is-enabled machineid.service') | ||
48 | self.assertEqual(output, 'enabled', msg="machineid.service service could not be enabled. Status and output: %s and %s" % (status, output)) | ||
49 | |||
50 | @skipUnlessPassed('test_systemd_enable') | ||
51 | def test_systemd_disable(self): | ||
52 | self.target.run('systemctl disable machineid.service') | ||
53 | (status, output) = self.target.run('systemctl is-enabled machineid.service') | ||
54 | self.assertEqual(output, 'disabled', msg="machineid.service service could not be disabled. Status and output: %s and %s" % (status, output)) | ||
55 | |||
56 | @skipUnlessPassed('test_systemd_version') | ||
57 | def test_systemd_list(self): | ||
58 | (status, output) = self.target.run('systemctl list-unit-files') | ||
59 | self.assertEqual(status, 0, msg="systemctl list-unit-files command failed. Status: %s" % status) | ||
diff --git a/meta/lib/oeqa/runtime/vnc.py b/meta/lib/oeqa/runtime/vnc.py new file mode 100644 index 0000000000..5ed10727bc --- /dev/null +++ b/meta/lib/oeqa/runtime/vnc.py | |||
@@ -0,0 +1,19 @@ | |||
1 | from oeqa.oetest import oeRuntimeTest | ||
2 | from oeqa.utils.decorators import * | ||
3 | import re | ||
4 | |||
5 | def setUpModule(): | ||
6 | skipModuleUnless(oeRuntimeTest.hasPackage('x11vnc'), "No x11vnc package in image") | ||
7 | |||
8 | class VNCTest(oeRuntimeTest): | ||
9 | |||
10 | @skipUnlessPassed('test_ssh') | ||
11 | def test_vnc(self): | ||
12 | (status, output) = self.target.run('x11vnc -display :0 -bg -o x11vnc.log') | ||
13 | self.assertEqual(status, 0, msg="x11vnc server failed to start: %s" % output) | ||
14 | port = re.search('PORT=[0-9]*', output) | ||
15 | self.assertTrue(port, msg="Listening port not specified in command output: %s" %output) | ||
16 | |||
17 | vncport = port.group(0).split('=')[1] | ||
18 | (status, output) = self.target.run('netstat -ntl | grep ":%s"' % vncport) | ||
19 | self.assertEqual(status, 0, msg="x11vnc server not running on port %s\n\n%s" % (vncport, self.target.run('netstat -ntl; cat x11vnc.log')[1])) | ||
diff --git a/meta/lib/oeqa/runtime/x32lib.py b/meta/lib/oeqa/runtime/x32lib.py new file mode 100644 index 0000000000..6bad201b12 --- /dev/null +++ b/meta/lib/oeqa/runtime/x32lib.py | |||
@@ -0,0 +1,17 @@ | |||
1 | import unittest | ||
2 | from oeqa.oetest import oeRuntimeTest, skipModule | ||
3 | from oeqa.utils.decorators import * | ||
4 | |||
5 | def setUpModule(): | ||
6 | #check if DEFAULTTUNE is set and it's value is: x86-64-x32 | ||
7 | defaulttune = oeRuntimeTest.tc.d.getVar("DEFAULTTUNE", True) | ||
8 | if "x86-64-x32" not in defaulttune: | ||
9 | skipModule("DEFAULTTUNE is not set to x86-64-x32") | ||
10 | |||
11 | class X32libTest(oeRuntimeTest): | ||
12 | |||
13 | @skipUnlessPassed("test_ssh") | ||
14 | def test_x32_file(self): | ||
15 | status1 = self.target.run("readelf -h /bin/ls | grep Class | grep ELF32")[0] | ||
16 | status2 = self.target.run("readelf -h /bin/ls | grep Machine | grep X86-64")[0] | ||
17 | self.assertTrue(status1 == 0 and status2 == 0, msg="/bin/ls isn't an X86-64 ELF32 binary. readelf says: %s" % self.target.run("readelf -h /bin/ls")[1]) | ||
diff --git a/meta/lib/oeqa/runtime/xorg.py b/meta/lib/oeqa/runtime/xorg.py new file mode 100644 index 0000000000..12dccd8198 --- /dev/null +++ b/meta/lib/oeqa/runtime/xorg.py | |||
@@ -0,0 +1,21 @@ | |||
1 | import unittest | ||
2 | from oeqa.oetest import oeRuntimeTest, skipModule | ||
3 | from oeqa.utils.decorators import * | ||
4 | |||
5 | def setUpModule(): | ||
6 | if not oeRuntimeTest.hasFeature("x11-base"): | ||
7 | skipModule("target doesn't have x11 in IMAGE_FEATURES") | ||
8 | |||
9 | |||
10 | class XorgTest(oeRuntimeTest): | ||
11 | |||
12 | @skipUnlessPassed('test_ssh') | ||
13 | def test_xorg_running(self): | ||
14 | (status, output) = self.target.run(oeRuntimeTest.pscmd + ' | grep -v xinit | grep [X]org') | ||
15 | self.assertEqual(status, 0, msg="Xorg does not appear to be running %s" % self.target.run(oeRuntimeTest.pscmd)[1]) | ||
16 | |||
17 | @skipUnlessPassed('test_ssh') | ||
18 | def test_xorg_error(self): | ||
19 | (status, output) = self.target.run('cat /var/log/Xorg.0.log | grep -v "(EE) error," | grep -v "PreInit" | grep -v "evdev:" | grep -v "glx" | grep "(EE)"') | ||
20 | self.assertEqual(status, 1, msg="Errors in Xorg log: %s" % output) | ||
21 | |||
diff --git a/meta/lib/oeqa/utils/__init__.py b/meta/lib/oeqa/utils/__init__.py new file mode 100644 index 0000000000..e69de29bb2 --- /dev/null +++ b/meta/lib/oeqa/utils/__init__.py | |||
diff --git a/meta/lib/oeqa/utils/decorators.py b/meta/lib/oeqa/utils/decorators.py new file mode 100644 index 0000000000..33fed5a10b --- /dev/null +++ b/meta/lib/oeqa/utils/decorators.py | |||
@@ -0,0 +1,50 @@ | |||
1 | # Copyright (C) 2013 Intel Corporation | ||
2 | # | ||
3 | # Released under the MIT license (see COPYING.MIT) | ||
4 | |||
5 | # Some custom decorators that can be used by unittests | ||
6 | # Most useful is skipUnlessPassed which can be used for | ||
7 | # creating dependecies between two test methods. | ||
8 | |||
9 | from oeqa.oetest import * | ||
10 | |||
11 | class skipIfFailure(object): | ||
12 | |||
13 | def __init__(self,testcase): | ||
14 | self.testcase = testcase | ||
15 | |||
16 | def __call__(self,f): | ||
17 | def wrapped_f(*args): | ||
18 | if self.testcase in (oeRuntimeTest.testFailures or oeRuntimeTest.testErrors): | ||
19 | raise unittest.SkipTest("Testcase dependency not met: %s" % self.testcase) | ||
20 | return f(*args) | ||
21 | wrapped_f.__name__ = f.__name__ | ||
22 | return wrapped_f | ||
23 | |||
24 | class skipIfSkipped(object): | ||
25 | |||
26 | def __init__(self,testcase): | ||
27 | self.testcase = testcase | ||
28 | |||
29 | def __call__(self,f): | ||
30 | def wrapped_f(*args): | ||
31 | if self.testcase in oeRuntimeTest.testSkipped: | ||
32 | raise unittest.SkipTest("Testcase dependency not met: %s" % self.testcase) | ||
33 | return f(*args) | ||
34 | wrapped_f.__name__ = f.__name__ | ||
35 | return wrapped_f | ||
36 | |||
37 | class skipUnlessPassed(object): | ||
38 | |||
39 | def __init__(self,testcase): | ||
40 | self.testcase = testcase | ||
41 | |||
42 | def __call__(self,f): | ||
43 | def wrapped_f(*args): | ||
44 | if self.testcase in oeRuntimeTest.testSkipped or \ | ||
45 | self.testcase in oeRuntimeTest.testFailures or \ | ||
46 | self.testcase in oeRuntimeTest.testErrors: | ||
47 | raise unittest.SkipTest("Testcase dependency not met: %s" % self.testcase) | ||
48 | return f(*args) | ||
49 | wrapped_f.__name__ = f.__name__ | ||
50 | return wrapped_f | ||
diff --git a/meta/lib/oeqa/utils/httpserver.py b/meta/lib/oeqa/utils/httpserver.py new file mode 100644 index 0000000000..f161a1bddd --- /dev/null +++ b/meta/lib/oeqa/utils/httpserver.py | |||
@@ -0,0 +1,33 @@ | |||
1 | import SimpleHTTPServer | ||
2 | import multiprocessing | ||
3 | import os | ||
4 | |||
5 | class HTTPServer(SimpleHTTPServer.BaseHTTPServer.HTTPServer): | ||
6 | |||
7 | def server_start(self, root_dir): | ||
8 | os.chdir(root_dir) | ||
9 | self.serve_forever() | ||
10 | |||
11 | class HTTPRequestHandler(SimpleHTTPServer.SimpleHTTPRequestHandler): | ||
12 | |||
13 | def log_message(self, format_str, *args): | ||
14 | pass | ||
15 | |||
16 | class HTTPService(object): | ||
17 | |||
18 | def __init__(self, root_dir, host=''): | ||
19 | self.root_dir = root_dir | ||
20 | self.host = host | ||
21 | self.port = 0 | ||
22 | |||
23 | def start(self): | ||
24 | self.server = HTTPServer((self.host, self.port), HTTPRequestHandler) | ||
25 | if self.port == 0: | ||
26 | self.port = self.server.server_port | ||
27 | self.process = multiprocessing.Process(target=self.server.server_start, args=[self.root_dir]) | ||
28 | self.process.start() | ||
29 | |||
30 | def stop(self): | ||
31 | self.server.server_close() | ||
32 | self.process.terminate() | ||
33 | self.process.join() | ||
diff --git a/meta/lib/oeqa/utils/qemurunner.py b/meta/lib/oeqa/utils/qemurunner.py new file mode 100644 index 0000000000..256cf3c6a8 --- /dev/null +++ b/meta/lib/oeqa/utils/qemurunner.py | |||
@@ -0,0 +1,228 @@ | |||
1 | # Copyright (C) 2013 Intel Corporation | ||
2 | # | ||
3 | # Released under the MIT license (see COPYING.MIT) | ||
4 | |||
5 | # This module provides a class for starting qemu images using runqemu. | ||
6 | # It's used by testimage.bbclass. | ||
7 | |||
8 | import subprocess | ||
9 | import os | ||
10 | import time | ||
11 | import signal | ||
12 | import re | ||
13 | import socket | ||
14 | import select | ||
15 | import bb | ||
16 | |||
17 | class QemuRunner: | ||
18 | |||
19 | def __init__(self, machine, rootfs, display = None, tmpdir = None, deploy_dir_image = None, logfile = None, boottime = 400, runqemutime = 60): | ||
20 | # Popen object | ||
21 | self.runqemu = None | ||
22 | |||
23 | self.machine = machine | ||
24 | self.rootfs = rootfs | ||
25 | |||
26 | self.qemupid = None | ||
27 | self.ip = None | ||
28 | |||
29 | self.display = display | ||
30 | self.tmpdir = tmpdir | ||
31 | self.deploy_dir_image = deploy_dir_image | ||
32 | self.logfile = logfile | ||
33 | self.boottime = boottime | ||
34 | self.runqemutime = runqemutime | ||
35 | |||
36 | self.create_socket() | ||
37 | |||
38 | def create_socket(self): | ||
39 | |||
40 | self.bootlog = '' | ||
41 | self.qemusock = None | ||
42 | |||
43 | try: | ||
44 | self.server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) | ||
45 | self.server_socket.setblocking(0) | ||
46 | self.server_socket.bind(("127.0.0.1",0)) | ||
47 | self.server_socket.listen(2) | ||
48 | self.serverport = self.server_socket.getsockname()[1] | ||
49 | bb.note("Created listening socket for qemu serial console on: 127.0.0.1:%s" % self.serverport) | ||
50 | except socket.error, msg: | ||
51 | self.server_socket.close() | ||
52 | bb.fatal("Failed to create listening socket: %s" %msg[1]) | ||
53 | |||
54 | |||
55 | def log(self, msg): | ||
56 | if self.logfile: | ||
57 | with open(self.logfile, "a") as f: | ||
58 | f.write("%s" % msg) | ||
59 | |||
60 | def launch(self, qemuparams = None): | ||
61 | |||
62 | if self.display: | ||
63 | os.environ["DISPLAY"] = self.display | ||
64 | else: | ||
65 | bb.error("To start qemu I need a X desktop, please set DISPLAY correctly (e.g. DISPLAY=:1)") | ||
66 | return False | ||
67 | if not os.path.exists(self.rootfs): | ||
68 | bb.error("Invalid rootfs %s" % self.rootfs) | ||
69 | return False | ||
70 | if not os.path.exists(self.tmpdir): | ||
71 | bb.error("Invalid TMPDIR path %s" % self.tmpdir) | ||
72 | return False | ||
73 | else: | ||
74 | os.environ["OE_TMPDIR"] = self.tmpdir | ||
75 | if not os.path.exists(self.deploy_dir_image): | ||
76 | bb.error("Invalid DEPLOY_DIR_IMAGE path %s" % self.deploy_dir_image) | ||
77 | return False | ||
78 | else: | ||
79 | os.environ["DEPLOY_DIR_IMAGE"] = self.deploy_dir_image | ||
80 | |||
81 | # Set this flag so that Qemu doesn't do any grabs as SDL grabs interact | ||
82 | # badly with screensavers. | ||
83 | os.environ["QEMU_DONT_GRAB"] = "1" | ||
84 | self.qemuparams = 'bootparams="console=tty1 console=ttyS0,115200n8" qemuparams="-serial tcp:127.0.0.1:%s"' % self.serverport | ||
85 | if qemuparams: | ||
86 | self.qemuparams = self.qemuparams[:-1] + " " + qemuparams + " " + '\"' | ||
87 | |||
88 | launch_cmd = 'runqemu %s %s %s' % (self.machine, self.rootfs, self.qemuparams) | ||
89 | self.runqemu = subprocess.Popen(launch_cmd,shell=True,stdout=subprocess.PIPE,stderr=subprocess.STDOUT,preexec_fn=os.setpgrp) | ||
90 | |||
91 | bb.note("runqemu started, pid is %s" % self.runqemu.pid) | ||
92 | bb.note("waiting at most %s seconds for qemu pid" % self.runqemutime) | ||
93 | endtime = time.time() + self.runqemutime | ||
94 | while not self.is_alive() and time.time() < endtime: | ||
95 | time.sleep(1) | ||
96 | |||
97 | if self.is_alive(): | ||
98 | bb.note("qemu started - qemu procces pid is %s" % self.qemupid) | ||
99 | cmdline = open('/proc/%s/cmdline' % self.qemupid).read() | ||
100 | self.ip, _, self.host_ip = cmdline.split('ip=')[1].split(' ')[0].split(':')[0:3] | ||
101 | if not re.search("^((?:[0-9]{1,3}\.){3}[0-9]{1,3})$", self.ip): | ||
102 | bb.note("Couldn't get ip from qemu process arguments, I got '%s'" % self.ip) | ||
103 | bb.note("Here is the ps output:\n%s" % cmdline) | ||
104 | self.kill() | ||
105 | return False | ||
106 | bb.note("IP found: %s" % self.ip) | ||
107 | bb.note("Waiting at most %d seconds for login banner" % self.boottime ) | ||
108 | endtime = time.time() + self.boottime | ||
109 | socklist = [self.server_socket] | ||
110 | reachedlogin = False | ||
111 | stopread = False | ||
112 | while time.time() < endtime and not stopread: | ||
113 | sread, swrite, serror = select.select(socklist, [], [], 5) | ||
114 | for sock in sread: | ||
115 | if sock is self.server_socket: | ||
116 | self.qemusock, addr = self.server_socket.accept() | ||
117 | self.qemusock.setblocking(0) | ||
118 | socklist.append(self.qemusock) | ||
119 | socklist.remove(self.server_socket) | ||
120 | bb.note("Connection from %s:%s" % addr) | ||
121 | else: | ||
122 | data = sock.recv(1024) | ||
123 | if data: | ||
124 | self.log(data) | ||
125 | self.bootlog += data | ||
126 | lastlines = "\n".join(self.bootlog.splitlines()[-2:]) | ||
127 | if re.search("login:", lastlines): | ||
128 | stopread = True | ||
129 | reachedlogin = True | ||
130 | bb.note("Reached login banner") | ||
131 | else: | ||
132 | socklist.remove(sock) | ||
133 | sock.close() | ||
134 | stopread = True | ||
135 | |||
136 | if not reachedlogin: | ||
137 | bb.note("Target didn't reached login boot in %d seconds" % self.boottime) | ||
138 | lines = "\n".join(self.bootlog.splitlines()[-5:]) | ||
139 | bb.note("Last 5 lines of text:\n%s" % lines) | ||
140 | bb.note("Check full boot log: %s" % self.logfile) | ||
141 | self.kill() | ||
142 | return False | ||
143 | else: | ||
144 | bb.note("Qemu pid didn't appeared in %s seconds" % self.runqemutime) | ||
145 | output = self.runqemu.stdout | ||
146 | self.kill() | ||
147 | bb.note("Output from runqemu:\n%s" % output.read()) | ||
148 | return False | ||
149 | |||
150 | return self.is_alive() | ||
151 | |||
152 | def kill(self): | ||
153 | |||
154 | if self.runqemu: | ||
155 | bb.note("Sending SIGTERM to runqemu") | ||
156 | os.kill(-self.runqemu.pid,signal.SIGTERM) | ||
157 | endtime = time.time() + self.runqemutime | ||
158 | while self.runqemu.poll() is None and time.time() < endtime: | ||
159 | time.sleep(1) | ||
160 | if self.runqemu.poll() is None: | ||
161 | bb.note("Sending SIGKILL to runqemu") | ||
162 | os.kill(-self.runqemu.pid,signal.SIGKILL) | ||
163 | self.runqemu = None | ||
164 | if self.server_socket: | ||
165 | self.server_socket.close() | ||
166 | self.server_socket = None | ||
167 | self.qemupid = None | ||
168 | self.ip = None | ||
169 | |||
170 | def restart(self, qemuparams = None): | ||
171 | bb.note("Restarting qemu process") | ||
172 | if self.runqemu.poll() is None: | ||
173 | self.kill() | ||
174 | self.create_socket() | ||
175 | if self.launch(qemuparams): | ||
176 | return True | ||
177 | return False | ||
178 | |||
179 | def is_alive(self): | ||
180 | qemu_child = self.find_child(str(self.runqemu.pid)) | ||
181 | if qemu_child: | ||
182 | self.qemupid = qemu_child[0] | ||
183 | if os.path.exists("/proc/" + str(self.qemupid)): | ||
184 | return True | ||
185 | return False | ||
186 | |||
187 | def find_child(self,parent_pid): | ||
188 | # | ||
189 | # Walk the process tree from the process specified looking for a qemu-system. Return its [pid'cmd] | ||
190 | # | ||
191 | ps = subprocess.Popen(['ps', 'axww', '-o', 'pid,ppid,command'], stdout=subprocess.PIPE).communicate()[0] | ||
192 | processes = ps.split('\n') | ||
193 | nfields = len(processes[0].split()) - 1 | ||
194 | pids = {} | ||
195 | commands = {} | ||
196 | for row in processes[1:]: | ||
197 | data = row.split(None, nfields) | ||
198 | if len(data) != 3: | ||
199 | continue | ||
200 | if data[1] not in pids: | ||
201 | pids[data[1]] = [] | ||
202 | |||
203 | pids[data[1]].append(data[0]) | ||
204 | commands[data[0]] = data[2] | ||
205 | |||
206 | if parent_pid not in pids: | ||
207 | return [] | ||
208 | |||
209 | parents = [] | ||
210 | newparents = pids[parent_pid] | ||
211 | while newparents: | ||
212 | next = [] | ||
213 | for p in newparents: | ||
214 | if p in pids: | ||
215 | for n in pids[p]: | ||
216 | if n not in parents and n not in next: | ||
217 | next.append(n) | ||
218 | if p not in parents: | ||
219 | parents.append(p) | ||
220 | newparents = next | ||
221 | #print "Children matching %s:" % str(parents) | ||
222 | for p in parents: | ||
223 | # Need to be careful here since runqemu-internal runs "ldd qemu-system-xxxx" | ||
224 | # Also, old versions of ldd (2.11) run "LD_XXXX qemu-system-xxxx" | ||
225 | basecmd = commands[p].split()[0] | ||
226 | basecmd = os.path.basename(basecmd) | ||
227 | if "qemu-system" in basecmd and "-serial tcp" in commands[p]: | ||
228 | return [int(p),commands[p]] | ||
diff --git a/meta/lib/oeqa/utils/sshcontrol.py b/meta/lib/oeqa/utils/sshcontrol.py new file mode 100644 index 0000000000..1539ff2a37 --- /dev/null +++ b/meta/lib/oeqa/utils/sshcontrol.py | |||
@@ -0,0 +1,109 @@ | |||
1 | # Copyright (C) 2013 Intel Corporation | ||
2 | # | ||
3 | # Released under the MIT license (see COPYING.MIT) | ||
4 | |||
5 | # Provides a class for setting up ssh connections, | ||
6 | # running commands and copying files to/from a target. | ||
7 | # It's used by testimage.bbclass and tests in lib/oeqa/runtime. | ||
8 | |||
9 | import subprocess | ||
10 | import time | ||
11 | import os | ||
12 | |||
13 | class SSHControl(object): | ||
14 | |||
15 | def __init__(self, host=None, timeout=300, logfile=None): | ||
16 | self.host = host | ||
17 | self.timeout = timeout | ||
18 | self._starttime = None | ||
19 | self._out = '' | ||
20 | self._ret = 126 | ||
21 | self.logfile = logfile | ||
22 | self.ssh_options = [ | ||
23 | '-o', 'UserKnownHostsFile=/dev/null', | ||
24 | '-o', 'StrictHostKeyChecking=no', | ||
25 | '-o', 'LogLevel=ERROR' | ||
26 | ] | ||
27 | self.ssh = ['ssh', '-l', 'root'] + self.ssh_options | ||
28 | |||
29 | def log(self, msg): | ||
30 | if self.logfile: | ||
31 | with open(self.logfile, "a") as f: | ||
32 | f.write("%s\n" % msg) | ||
33 | |||
34 | def _internal_run(self, cmd): | ||
35 | # We need this for a proper PATH | ||
36 | cmd = ". /etc/profile; " + cmd | ||
37 | command = self.ssh + [self.host, cmd] | ||
38 | self.log("[Running]$ %s" % " ".join(command)) | ||
39 | self._starttime = time.time() | ||
40 | # ssh hangs without os.setsid | ||
41 | proc = subprocess.Popen(command, shell=False, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, preexec_fn=os.setsid) | ||
42 | return proc | ||
43 | |||
44 | def run(self, cmd, timeout=None): | ||
45 | """Run cmd and get it's return code and output. | ||
46 | Let it run for timeout seconds and then terminate/kill it, | ||
47 | if time is 0 will let cmd run until it finishes. | ||
48 | Time can be passed to here or can be set per class instance.""" | ||
49 | |||
50 | if self.host: | ||
51 | sshconn = self._internal_run(cmd) | ||
52 | else: | ||
53 | raise Exception("Remote IP hasn't been set: '%s'" % actualcmd) | ||
54 | |||
55 | if timeout == 0: | ||
56 | self._out = sshconn.communicate()[0] | ||
57 | self._ret = sshconn.poll() | ||
58 | else: | ||
59 | if timeout is None: | ||
60 | tdelta = self.timeout | ||
61 | else: | ||
62 | tdelta = timeout | ||
63 | endtime = self._starttime + tdelta | ||
64 | while sshconn.poll() is None and time.time() < endtime: | ||
65 | time.sleep(1) | ||
66 | # process hasn't returned yet | ||
67 | if sshconn.poll() is None: | ||
68 | self._ret = 255 | ||
69 | sshconn.terminate() | ||
70 | sshconn.kill() | ||
71 | self._out = sshconn.stdout.read() | ||
72 | sshconn.stdout.close() | ||
73 | self._out += "\n[!!! SSH command timed out after %d seconds and it was killed]" % tdelta | ||
74 | else: | ||
75 | self._out = sshconn.stdout.read() | ||
76 | self._ret = sshconn.poll() | ||
77 | # strip the last LF so we can test the output | ||
78 | self._out = self._out.rstrip() | ||
79 | self.log("%s" % self._out) | ||
80 | self.log("[SSH command returned after %d seconds]: %s" % (time.time() - self._starttime, self._ret)) | ||
81 | return (self._ret, self._out) | ||
82 | |||
83 | def _internal_scp(self, cmd): | ||
84 | cmd = ['scp'] + self.ssh_options + cmd | ||
85 | self.log("[Running SCP]$ %s" % " ".join(cmd)) | ||
86 | self._starttime = time.time() | ||
87 | scpconn = subprocess.Popen(cmd, shell=False, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, preexec_fn=os.setsid) | ||
88 | out = scpconn.communicate()[0] | ||
89 | ret = scpconn.poll() | ||
90 | self.log("%s" % out) | ||
91 | self.log("[SCP command returned after %d seconds]: %s" % (time.time() - self._starttime, ret)) | ||
92 | if ret != 0: | ||
93 | # we raise an exception so that tests fail in setUp and setUpClass without a need for an assert | ||
94 | raise Exception("Error running %s, output: %s" % ( " ".join(cmd), out)) | ||
95 | return (ret, out) | ||
96 | |||
97 | def copy_to(self, localpath, remotepath): | ||
98 | actualcmd = [localpath, 'root@%s:%s' % (self.host, remotepath)] | ||
99 | return self._internal_scp(actualcmd) | ||
100 | |||
101 | def copy_from(self, remotepath, localpath): | ||
102 | actualcmd = ['root@%s:%s' % (self.host, remotepath), localpath] | ||
103 | return self._internal_scp(actualcmd) | ||
104 | |||
105 | def get_status(self): | ||
106 | return self._ret | ||
107 | |||
108 | def get_output(self): | ||
109 | return self._out | ||
diff --git a/meta/lib/oeqa/utils/targetbuild.py b/meta/lib/oeqa/utils/targetbuild.py new file mode 100644 index 0000000000..9b2cf53773 --- /dev/null +++ b/meta/lib/oeqa/utils/targetbuild.py | |||
@@ -0,0 +1,63 @@ | |||
1 | # Copyright (C) 2013 Intel Corporation | ||
2 | # | ||
3 | # Released under the MIT license (see COPYING.MIT) | ||
4 | |||
5 | # Provides a class for automating build tests for projects | ||
6 | |||
7 | from oeqa.oetest import oeRuntimeTest | ||
8 | import bb.fetch2 | ||
9 | import bb.data | ||
10 | import os | ||
11 | import re | ||
12 | |||
13 | |||
14 | class TargetBuildProject(): | ||
15 | |||
16 | def __init__(self, target, uri, foldername=None): | ||
17 | self.target = target | ||
18 | self.uri = uri | ||
19 | self.targetdir = "/home/root/" | ||
20 | |||
21 | self.localdata = bb.data.createCopy(oeRuntimeTest.tc.d) | ||
22 | bb.data.update_data(self.localdata) | ||
23 | |||
24 | if not foldername: | ||
25 | self.archive = os.path.basename(uri) | ||
26 | self.fname = re.sub(r'.tar.bz2|tar.gz$', '', self.archive) | ||
27 | else: | ||
28 | self.fname = foldername | ||
29 | |||
30 | def download_archive(self): | ||
31 | |||
32 | try: | ||
33 | self.localdata.delVar("BB_STRICT_CHECKSUM") | ||
34 | fetcher = bb.fetch2.Fetch([self.uri], self.localdata) | ||
35 | fetcher.download() | ||
36 | self.localarchive = fetcher.localpath(self.uri) | ||
37 | except bb.fetch2.BBFetchException: | ||
38 | raise Exception("Failed to download archive: %s" % self.uri) | ||
39 | |||
40 | (status, output) = self.target.copy_to(self.localarchive, self.targetdir) | ||
41 | if status != 0: | ||
42 | raise Exception("Failed to copy archive to target, output: %s" % output) | ||
43 | |||
44 | (status, output) = self.target.run('tar xf %s%s -C %s' % (self.targetdir, self.archive, self.targetdir)) | ||
45 | if status != 0: | ||
46 | raise Exception("Failed to extract archive, output: %s" % output) | ||
47 | |||
48 | #Change targetdir to project folder | ||
49 | self.targetdir = self.targetdir + self.fname | ||
50 | |||
51 | # The timeout parameter of target.run is set to 0 to make the ssh command | ||
52 | # run with no timeout. | ||
53 | def run_configure(self): | ||
54 | return self.target.run('cd %s; ./configure' % self.targetdir, 0)[0] | ||
55 | |||
56 | def run_make(self): | ||
57 | return self.target.run('cd %s; make' % self.targetdir, 0)[0] | ||
58 | |||
59 | def run_install(self): | ||
60 | return self.target.run('cd %s; make install' % self.targetdir, 0)[0] | ||
61 | |||
62 | def clean(self): | ||
63 | self.target.run('rm -rf %s' % self.targetdir) | ||