summaryrefslogtreecommitdiffstats
path: root/meta/lib
diff options
context:
space:
mode:
authorTudor Florea <tudor.florea@enea.com>2015-10-09 22:59:03 +0200
committerTudor Florea <tudor.florea@enea.com>2015-10-09 22:59:03 +0200
commit972dcfcdbfe75dcfeb777150c136576cf1a71e99 (patch)
tree97a61cd7e293d7ae9d56ef7ed0f81253365bb026 /meta/lib
downloadpoky-972dcfcdbfe75dcfeb777150c136576cf1a71e99.tar.gz
initial commit for Enea Linux 5.0 arm
Signed-off-by: Tudor Florea <tudor.florea@enea.com>
Diffstat (limited to 'meta/lib')
-rw-r--r--meta/lib/oe/__init__.py2
-rw-r--r--meta/lib/oe/buildhistory_analysis.py456
-rw-r--r--meta/lib/oe/cachedpath.py233
-rw-r--r--meta/lib/oe/classextend.py118
-rw-r--r--meta/lib/oe/classutils.py43
-rw-r--r--meta/lib/oe/data.py17
-rw-r--r--meta/lib/oe/distro_check.py383
-rw-r--r--meta/lib/oe/image.py345
-rw-r--r--meta/lib/oe/license.py116
-rw-r--r--meta/lib/oe/lsb.py81
-rw-r--r--meta/lib/oe/maketype.py99
-rw-r--r--meta/lib/oe/manifest.py345
-rw-r--r--meta/lib/oe/package.py99
-rw-r--r--meta/lib/oe/package_manager.py1797
-rw-r--r--meta/lib/oe/packagedata.py94
-rw-r--r--meta/lib/oe/packagegroup.py36
-rw-r--r--meta/lib/oe/patch.py447
-rw-r--r--meta/lib/oe/path.py243
-rw-r--r--meta/lib/oe/prservice.py126
-rw-r--r--meta/lib/oe/qa.py111
-rw-r--r--meta/lib/oe/rootfs.py800
-rw-r--r--meta/lib/oe/sdk.py326
-rw-r--r--meta/lib/oe/sstatesig.py276
-rw-r--r--meta/lib/oe/terminal.py208
-rw-r--r--meta/lib/oe/tests/__init__.py0
-rw-r--r--meta/lib/oe/tests/test_license.py68
-rw-r--r--meta/lib/oe/tests/test_path.py89
-rw-r--r--meta/lib/oe/tests/test_types.py62
-rw-r--r--meta/lib/oe/tests/test_utils.py51
-rw-r--r--meta/lib/oe/types.py153
-rw-r--r--meta/lib/oe/utils.py182
-rw-r--r--meta/lib/oeqa/__init__.py0
-rw-r--r--meta/lib/oeqa/controllers/__init__.py3
-rw-r--r--meta/lib/oeqa/controllers/masterimage.py201
-rw-r--r--meta/lib/oeqa/controllers/testtargetloader.py70
-rw-r--r--meta/lib/oeqa/oetest.py106
-rwxr-xr-xmeta/lib/oeqa/runexported.py140
-rw-r--r--meta/lib/oeqa/runtime/__init__.py3
-rw-r--r--meta/lib/oeqa/runtime/_ptest.py124
-rw-r--r--meta/lib/oeqa/runtime/buildcvs.py31
-rw-r--r--meta/lib/oeqa/runtime/buildiptables.py31
-rw-r--r--meta/lib/oeqa/runtime/buildsudoku.py28
-rw-r--r--meta/lib/oeqa/runtime/connman.py30
-rw-r--r--meta/lib/oeqa/runtime/date.py23
-rw-r--r--meta/lib/oeqa/runtime/df.py12
-rw-r--r--meta/lib/oeqa/runtime/dmesg.py12
-rw-r--r--meta/lib/oeqa/runtime/files/hellomod.c19
-rw-r--r--meta/lib/oeqa/runtime/files/hellomod_makefile8
-rw-r--r--meta/lib/oeqa/runtime/files/test.c26
-rw-r--r--meta/lib/oeqa/runtime/files/test.cpp3
-rw-r--r--meta/lib/oeqa/runtime/files/test.pl2
-rw-r--r--meta/lib/oeqa/runtime/files/test.py6
-rw-r--r--meta/lib/oeqa/runtime/files/testmakefile5
-rw-r--r--meta/lib/oeqa/runtime/gcc.py46
-rw-r--r--meta/lib/oeqa/runtime/kernelmodule.py34
-rw-r--r--meta/lib/oeqa/runtime/ldd.py20
-rw-r--r--meta/lib/oeqa/runtime/logrotate.py28
-rw-r--r--meta/lib/oeqa/runtime/multilib.py18
-rw-r--r--meta/lib/oeqa/runtime/pam.py25
-rw-r--r--meta/lib/oeqa/runtime/parselogs.py178
-rw-r--r--meta/lib/oeqa/runtime/perl.py29
-rw-r--r--meta/lib/oeqa/runtime/ping.py20
-rw-r--r--meta/lib/oeqa/runtime/python.py34
-rw-r--r--meta/lib/oeqa/runtime/rpm.py53
-rw-r--r--meta/lib/oeqa/runtime/scanelf.py28
-rw-r--r--meta/lib/oeqa/runtime/scp.py22
-rw-r--r--meta/lib/oeqa/runtime/skeletoninit.py29
-rw-r--r--meta/lib/oeqa/runtime/smart.py121
-rw-r--r--meta/lib/oeqa/runtime/ssh.py19
-rw-r--r--meta/lib/oeqa/runtime/syslog.py48
-rw-r--r--meta/lib/oeqa/runtime/systemd.py88
-rw-r--r--meta/lib/oeqa/runtime/vnc.py20
-rw-r--r--meta/lib/oeqa/runtime/x32lib.py18
-rw-r--r--meta/lib/oeqa/runtime/xorg.py17
-rw-r--r--meta/lib/oeqa/sdk/__init__.py3
-rw-r--r--meta/lib/oeqa/sdk/buildcvs.py25
-rw-r--r--meta/lib/oeqa/sdk/buildiptables.py26
-rw-r--r--meta/lib/oeqa/sdk/buildsudoku.py26
-rw-r--r--meta/lib/oeqa/selftest/__init__.py2
-rw-r--r--meta/lib/oeqa/selftest/_sstatetests_noauto.py95
-rw-r--r--meta/lib/oeqa/selftest/_toaster.py445
-rw-r--r--meta/lib/oeqa/selftest/base.py131
-rw-r--r--meta/lib/oeqa/selftest/bblayers.py43
-rw-r--r--meta/lib/oeqa/selftest/bbtests.py178
-rw-r--r--meta/lib/oeqa/selftest/buildhistory.py45
-rw-r--r--meta/lib/oeqa/selftest/buildoptions.py120
-rw-r--r--meta/lib/oeqa/selftest/oescripts.py54
-rw-r--r--meta/lib/oeqa/selftest/prservice.py121
-rw-r--r--meta/lib/oeqa/selftest/sstate.py53
-rw-r--r--meta/lib/oeqa/selftest/sstatetests.py204
-rw-r--r--meta/lib/oeqa/targetcontrol.py199
-rw-r--r--meta/lib/oeqa/utils/__init__.py15
-rw-r--r--meta/lib/oeqa/utils/commands.py154
-rw-r--r--meta/lib/oeqa/utils/decorators.py158
-rw-r--r--meta/lib/oeqa/utils/ftools.py27
-rw-r--r--meta/lib/oeqa/utils/httpserver.py35
-rw-r--r--meta/lib/oeqa/utils/logparser.py125
-rw-r--r--meta/lib/oeqa/utils/qemurunner.py237
-rw-r--r--meta/lib/oeqa/utils/sshcontrol.py138
-rw-r--r--meta/lib/oeqa/utils/targetbuild.py132
100 files changed, 11975 insertions, 0 deletions
diff --git a/meta/lib/oe/__init__.py b/meta/lib/oe/__init__.py
new file mode 100644
index 0000000000..3ad9513f40
--- /dev/null
+++ b/meta/lib/oe/__init__.py
@@ -0,0 +1,2 @@
1from pkgutil import extend_path
2__path__ = extend_path(__path__, __name__)
diff --git a/meta/lib/oe/buildhistory_analysis.py b/meta/lib/oe/buildhistory_analysis.py
new file mode 100644
index 0000000000..5395c768a3
--- /dev/null
+++ b/meta/lib/oe/buildhistory_analysis.py
@@ -0,0 +1,456 @@
1# Report significant differences in the buildhistory repository since a specific revision
2#
3# Copyright (C) 2012 Intel Corporation
4# Author: Paul Eggleton <paul.eggleton@linux.intel.com>
5#
6# Note: requires GitPython 0.3.1+
7#
8# You can use this from the command line by running scripts/buildhistory-diff
9#
10
11import sys
12import os.path
13import difflib
14import git
15import re
16import bb.utils
17
18
19# How to display fields
20list_fields = ['DEPENDS', 'RPROVIDES', 'RDEPENDS', 'RRECOMMENDS', 'RSUGGESTS', 'RREPLACES', 'RCONFLICTS', 'FILES', 'FILELIST', 'USER_CLASSES', 'IMAGE_CLASSES', 'IMAGE_FEATURES', 'IMAGE_LINGUAS', 'IMAGE_INSTALL', 'BAD_RECOMMENDATIONS', 'PACKAGE_EXCLUDE']
21list_order_fields = ['PACKAGES']
22defaultval_map = {'PKG': 'PKG', 'PKGE': 'PE', 'PKGV': 'PV', 'PKGR': 'PR'}
23numeric_fields = ['PKGSIZE', 'IMAGESIZE']
24# Fields to monitor
25monitor_fields = ['RPROVIDES', 'RDEPENDS', 'RRECOMMENDS', 'RREPLACES', 'RCONFLICTS', 'PACKAGES', 'FILELIST', 'PKGSIZE', 'IMAGESIZE', 'PKG']
26ver_monitor_fields = ['PKGE', 'PKGV', 'PKGR']
27# Percentage change to alert for numeric fields
28monitor_numeric_threshold = 10
29# Image files to monitor (note that image-info.txt is handled separately)
30img_monitor_files = ['installed-package-names.txt', 'files-in-image.txt']
31# Related context fields for reporting (note: PE, PV & PR are always reported for monitored package fields)
32related_fields = {}
33related_fields['RDEPENDS'] = ['DEPENDS']
34related_fields['RRECOMMENDS'] = ['DEPENDS']
35related_fields['FILELIST'] = ['FILES']
36related_fields['PKGSIZE'] = ['FILELIST']
37related_fields['files-in-image.txt'] = ['installed-package-names.txt', 'USER_CLASSES', 'IMAGE_CLASSES', 'ROOTFS_POSTPROCESS_COMMAND', 'IMAGE_POSTPROCESS_COMMAND']
38related_fields['installed-package-names.txt'] = ['IMAGE_FEATURES', 'IMAGE_LINGUAS', 'IMAGE_INSTALL', 'BAD_RECOMMENDATIONS', 'NO_RECOMMENDATIONS', 'PACKAGE_EXCLUDE']
39
40
41class ChangeRecord:
42 def __init__(self, path, fieldname, oldvalue, newvalue, monitored):
43 self.path = path
44 self.fieldname = fieldname
45 self.oldvalue = oldvalue
46 self.newvalue = newvalue
47 self.monitored = monitored
48 self.related = []
49 self.filechanges = None
50
51 def __str__(self):
52 return self._str_internal(True)
53
54 def _str_internal(self, outer):
55 if outer:
56 if '/image-files/' in self.path:
57 prefix = '%s: ' % self.path.split('/image-files/')[0]
58 else:
59 prefix = '%s: ' % self.path
60 else:
61 prefix = ''
62
63 def pkglist_combine(depver):
64 pkglist = []
65 for k,v in depver.iteritems():
66 if v:
67 pkglist.append("%s (%s)" % (k,v))
68 else:
69 pkglist.append(k)
70 return pkglist
71
72 if self.fieldname in list_fields or self.fieldname in list_order_fields:
73 if self.fieldname in ['RPROVIDES', 'RDEPENDS', 'RRECOMMENDS', 'RSUGGESTS', 'RREPLACES', 'RCONFLICTS']:
74 (depvera, depverb) = compare_pkg_lists(self.oldvalue, self.newvalue)
75 aitems = pkglist_combine(depvera)
76 bitems = pkglist_combine(depverb)
77 else:
78 aitems = self.oldvalue.split()
79 bitems = self.newvalue.split()
80 removed = list(set(aitems) - set(bitems))
81 added = list(set(bitems) - set(aitems))
82
83 if removed or added:
84 if removed and not bitems:
85 out = '%s: removed all items "%s"' % (self.fieldname, ' '.join(removed))
86 else:
87 out = '%s:%s%s' % (self.fieldname, ' removed "%s"' % ' '.join(removed) if removed else '', ' added "%s"' % ' '.join(added) if added else '')
88 else:
89 out = '%s changed order' % self.fieldname
90 elif self.fieldname in numeric_fields:
91 aval = int(self.oldvalue or 0)
92 bval = int(self.newvalue or 0)
93 if aval != 0:
94 percentchg = ((bval - aval) / float(aval)) * 100
95 else:
96 percentchg = 100
97 out = '%s changed from %s to %s (%s%d%%)' % (self.fieldname, self.oldvalue or "''", self.newvalue or "''", '+' if percentchg > 0 else '', percentchg)
98 elif self.fieldname in defaultval_map:
99 out = '%s changed from %s to %s' % (self.fieldname, self.oldvalue, self.newvalue)
100 if self.fieldname == 'PKG' and '[default]' in self.newvalue:
101 out += ' - may indicate debian renaming failure'
102 elif self.fieldname in ['pkg_preinst', 'pkg_postinst', 'pkg_prerm', 'pkg_postrm']:
103 if self.oldvalue and self.newvalue:
104 out = '%s changed:\n ' % self.fieldname
105 elif self.newvalue:
106 out = '%s added:\n ' % self.fieldname
107 elif self.oldvalue:
108 out = '%s cleared:\n ' % self.fieldname
109 alines = self.oldvalue.splitlines()
110 blines = self.newvalue.splitlines()
111 diff = difflib.unified_diff(alines, blines, self.fieldname, self.fieldname, lineterm='')
112 out += '\n '.join(list(diff)[2:])
113 out += '\n --'
114 elif self.fieldname in img_monitor_files or '/image-files/' in self.path:
115 fieldname = self.fieldname
116 if '/image-files/' in self.path:
117 fieldname = os.path.join('/' + self.path.split('/image-files/')[1], self.fieldname)
118 out = 'Changes to %s:\n ' % fieldname
119 else:
120 if outer:
121 prefix = 'Changes to %s ' % self.path
122 out = '(%s):\n ' % self.fieldname
123 if self.filechanges:
124 out += '\n '.join(['%s' % i for i in self.filechanges])
125 else:
126 alines = self.oldvalue.splitlines()
127 blines = self.newvalue.splitlines()
128 diff = difflib.unified_diff(alines, blines, fieldname, fieldname, lineterm='')
129 out += '\n '.join(list(diff))
130 out += '\n --'
131 else:
132 out = '%s changed from "%s" to "%s"' % (self.fieldname, self.oldvalue, self.newvalue)
133
134 if self.related:
135 for chg in self.related:
136 if not outer and chg.fieldname in ['PE', 'PV', 'PR']:
137 continue
138 for line in chg._str_internal(False).splitlines():
139 out += '\n * %s' % line
140
141 return '%s%s' % (prefix, out)
142
143class FileChange:
144 changetype_add = 'A'
145 changetype_remove = 'R'
146 changetype_type = 'T'
147 changetype_perms = 'P'
148 changetype_ownergroup = 'O'
149 changetype_link = 'L'
150
151 def __init__(self, path, changetype, oldvalue = None, newvalue = None):
152 self.path = path
153 self.changetype = changetype
154 self.oldvalue = oldvalue
155 self.newvalue = newvalue
156
157 def _ftype_str(self, ftype):
158 if ftype == '-':
159 return 'file'
160 elif ftype == 'd':
161 return 'directory'
162 elif ftype == 'l':
163 return 'symlink'
164 elif ftype == 'c':
165 return 'char device'
166 elif ftype == 'b':
167 return 'block device'
168 elif ftype == 'p':
169 return 'fifo'
170 elif ftype == 's':
171 return 'socket'
172 else:
173 return 'unknown (%s)' % ftype
174
175 def __str__(self):
176 if self.changetype == self.changetype_add:
177 return '%s was added' % self.path
178 elif self.changetype == self.changetype_remove:
179 return '%s was removed' % self.path
180 elif self.changetype == self.changetype_type:
181 return '%s changed type from %s to %s' % (self.path, self._ftype_str(self.oldvalue), self._ftype_str(self.newvalue))
182 elif self.changetype == self.changetype_perms:
183 return '%s changed permissions from %s to %s' % (self.path, self.oldvalue, self.newvalue)
184 elif self.changetype == self.changetype_ownergroup:
185 return '%s changed owner/group from %s to %s' % (self.path, self.oldvalue, self.newvalue)
186 elif self.changetype == self.changetype_link:
187 return '%s changed symlink target from %s to %s' % (self.path, self.oldvalue, self.newvalue)
188 else:
189 return '%s changed (unknown)' % self.path
190
191
192def blob_to_dict(blob):
193 alines = blob.data_stream.read().splitlines()
194 adict = {}
195 for line in alines:
196 splitv = [i.strip() for i in line.split('=',1)]
197 if len(splitv) > 1:
198 adict[splitv[0]] = splitv[1]
199 return adict
200
201
202def file_list_to_dict(lines):
203 adict = {}
204 for line in lines:
205 # Leave the last few fields intact so we handle file names containing spaces
206 splitv = line.split(None,4)
207 # Grab the path and remove the leading .
208 path = splitv[4][1:].strip()
209 # Handle symlinks
210 if(' -> ' in path):
211 target = path.split(' -> ')[1]
212 path = path.split(' -> ')[0]
213 adict[path] = splitv[0:3] + [target]
214 else:
215 adict[path] = splitv[0:3]
216 return adict
217
218
219def compare_file_lists(alines, blines):
220 adict = file_list_to_dict(alines)
221 bdict = file_list_to_dict(blines)
222 filechanges = []
223 for path, splitv in adict.iteritems():
224 newsplitv = bdict.pop(path, None)
225 if newsplitv:
226 # Check type
227 oldvalue = splitv[0][0]
228 newvalue = newsplitv[0][0]
229 if oldvalue != newvalue:
230 filechanges.append(FileChange(path, FileChange.changetype_type, oldvalue, newvalue))
231 # Check permissions
232 oldvalue = splitv[0][1:]
233 newvalue = newsplitv[0][1:]
234 if oldvalue != newvalue:
235 filechanges.append(FileChange(path, FileChange.changetype_perms, oldvalue, newvalue))
236 # Check owner/group
237 oldvalue = '%s/%s' % (splitv[1], splitv[2])
238 newvalue = '%s/%s' % (newsplitv[1], newsplitv[2])
239 if oldvalue != newvalue:
240 filechanges.append(FileChange(path, FileChange.changetype_ownergroup, oldvalue, newvalue))
241 # Check symlink target
242 if newsplitv[0][0] == 'l':
243 if len(splitv) > 3:
244 oldvalue = splitv[3]
245 else:
246 oldvalue = None
247 newvalue = newsplitv[3]
248 if oldvalue != newvalue:
249 filechanges.append(FileChange(path, FileChange.changetype_link, oldvalue, newvalue))
250 else:
251 filechanges.append(FileChange(path, FileChange.changetype_remove))
252
253 # Whatever is left over has been added
254 for path in bdict:
255 filechanges.append(FileChange(path, FileChange.changetype_add))
256
257 return filechanges
258
259
260def compare_lists(alines, blines):
261 removed = list(set(alines) - set(blines))
262 added = list(set(blines) - set(alines))
263
264 filechanges = []
265 for pkg in removed:
266 filechanges.append(FileChange(pkg, FileChange.changetype_remove))
267 for pkg in added:
268 filechanges.append(FileChange(pkg, FileChange.changetype_add))
269
270 return filechanges
271
272
273def compare_pkg_lists(astr, bstr):
274 depvera = bb.utils.explode_dep_versions2(astr)
275 depverb = bb.utils.explode_dep_versions2(bstr)
276
277 # Strip out changes where the version has increased
278 remove = []
279 for k in depvera:
280 if k in depverb:
281 dva = depvera[k]
282 dvb = depverb[k]
283 if dva and dvb and len(dva) == len(dvb):
284 # Since length is the same, sort so that prefixes (e.g. >=) will line up
285 dva.sort()
286 dvb.sort()
287 removeit = True
288 for dvai, dvbi in zip(dva, dvb):
289 if dvai != dvbi:
290 aiprefix = dvai.split(' ')[0]
291 biprefix = dvbi.split(' ')[0]
292 if aiprefix == biprefix and aiprefix in ['>=', '=']:
293 if bb.utils.vercmp(bb.utils.split_version(dvai), bb.utils.split_version(dvbi)) > 0:
294 removeit = False
295 break
296 else:
297 removeit = False
298 break
299 if removeit:
300 remove.append(k)
301
302 for k in remove:
303 depvera.pop(k)
304 depverb.pop(k)
305
306 return (depvera, depverb)
307
308
309def compare_dict_blobs(path, ablob, bblob, report_all, report_ver):
310 adict = blob_to_dict(ablob)
311 bdict = blob_to_dict(bblob)
312
313 pkgname = os.path.basename(path)
314
315 defaultvals = {}
316 defaultvals['PKG'] = pkgname
317 defaultvals['PKGE'] = '0'
318
319 changes = []
320 keys = list(set(adict.keys()) | set(bdict.keys()) | set(defaultval_map.keys()))
321 for key in keys:
322 astr = adict.get(key, '')
323 bstr = bdict.get(key, '')
324 if key in ver_monitor_fields:
325 monitored = report_ver or astr or bstr
326 else:
327 monitored = key in monitor_fields
328 mapped_key = defaultval_map.get(key, '')
329 if mapped_key:
330 if not astr:
331 astr = '%s [default]' % adict.get(mapped_key, defaultvals.get(key, ''))
332 if not bstr:
333 bstr = '%s [default]' % bdict.get(mapped_key, defaultvals.get(key, ''))
334
335 if astr != bstr:
336 if (not report_all) and key in numeric_fields:
337 aval = int(astr or 0)
338 bval = int(bstr or 0)
339 if aval != 0:
340 percentchg = ((bval - aval) / float(aval)) * 100
341 else:
342 percentchg = 100
343 if abs(percentchg) < monitor_numeric_threshold:
344 continue
345 elif (not report_all) and key in list_fields:
346 if key == "FILELIST" and path.endswith("-dbg") and bstr.strip() != '':
347 continue
348 if key in ['RPROVIDES', 'RDEPENDS', 'RRECOMMENDS', 'RSUGGESTS', 'RREPLACES', 'RCONFLICTS']:
349 (depvera, depverb) = compare_pkg_lists(astr, bstr)
350 if depvera == depverb:
351 continue
352 alist = astr.split()
353 alist.sort()
354 blist = bstr.split()
355 blist.sort()
356 # We don't care about the removal of self-dependencies
357 if pkgname in alist and not pkgname in blist:
358 alist.remove(pkgname)
359 if ' '.join(alist) == ' '.join(blist):
360 continue
361
362 chg = ChangeRecord(path, key, astr, bstr, monitored)
363 changes.append(chg)
364 return changes
365
366
367def process_changes(repopath, revision1, revision2='HEAD', report_all=False, report_ver=False):
368 repo = git.Repo(repopath)
369 assert repo.bare == False
370 commit = repo.commit(revision1)
371 diff = commit.diff(revision2)
372
373 changes = []
374 for d in diff.iter_change_type('M'):
375 path = os.path.dirname(d.a_blob.path)
376 if path.startswith('packages/'):
377 filename = os.path.basename(d.a_blob.path)
378 if filename == 'latest':
379 changes.extend(compare_dict_blobs(path, d.a_blob, d.b_blob, report_all, report_ver))
380 elif filename.startswith('latest.'):
381 chg = ChangeRecord(path, filename, d.a_blob.data_stream.read(), d.b_blob.data_stream.read(), True)
382 changes.append(chg)
383 elif path.startswith('images/'):
384 filename = os.path.basename(d.a_blob.path)
385 if filename in img_monitor_files:
386 if filename == 'files-in-image.txt':
387 alines = d.a_blob.data_stream.read().splitlines()
388 blines = d.b_blob.data_stream.read().splitlines()
389 filechanges = compare_file_lists(alines,blines)
390 if filechanges:
391 chg = ChangeRecord(path, filename, None, None, True)
392 chg.filechanges = filechanges
393 changes.append(chg)
394 elif filename == 'installed-package-names.txt':
395 alines = d.a_blob.data_stream.read().splitlines()
396 blines = d.b_blob.data_stream.read().splitlines()
397 filechanges = compare_lists(alines,blines)
398 if filechanges:
399 chg = ChangeRecord(path, filename, None, None, True)
400 chg.filechanges = filechanges
401 changes.append(chg)
402 else:
403 chg = ChangeRecord(path, filename, d.a_blob.data_stream.read(), d.b_blob.data_stream.read(), True)
404 changes.append(chg)
405 elif filename == 'image-info.txt':
406 changes.extend(compare_dict_blobs(path, d.a_blob, d.b_blob, report_all, report_ver))
407 elif '/image-files/' in path:
408 chg = ChangeRecord(path, filename, d.a_blob.data_stream.read(), d.b_blob.data_stream.read(), True)
409 changes.append(chg)
410
411 # Look for added preinst/postinst/prerm/postrm
412 # (without reporting newly added recipes)
413 addedpkgs = []
414 addedchanges = []
415 for d in diff.iter_change_type('A'):
416 path = os.path.dirname(d.b_blob.path)
417 if path.startswith('packages/'):
418 filename = os.path.basename(d.b_blob.path)
419 if filename == 'latest':
420 addedpkgs.append(path)
421 elif filename.startswith('latest.'):
422 chg = ChangeRecord(path, filename[7:], '', d.b_blob.data_stream.read(), True)
423 addedchanges.append(chg)
424 for chg in addedchanges:
425 found = False
426 for pkg in addedpkgs:
427 if chg.path.startswith(pkg):
428 found = True
429 break
430 if not found:
431 changes.append(chg)
432
433 # Look for cleared preinst/postinst/prerm/postrm
434 for d in diff.iter_change_type('D'):
435 path = os.path.dirname(d.a_blob.path)
436 if path.startswith('packages/'):
437 filename = os.path.basename(d.a_blob.path)
438 if filename != 'latest' and filename.startswith('latest.'):
439 chg = ChangeRecord(path, filename[7:], d.a_blob.data_stream.read(), '', True)
440 changes.append(chg)
441
442 # Link related changes
443 for chg in changes:
444 if chg.monitored:
445 for chg2 in changes:
446 # (Check dirname in the case of fields from recipe info files)
447 if chg.path == chg2.path or os.path.dirname(chg.path) == chg2.path:
448 if chg2.fieldname in related_fields.get(chg.fieldname, []):
449 chg.related.append(chg2)
450 elif chg.path == chg2.path and chg.path.startswith('packages/') and chg2.fieldname in ['PE', 'PV', 'PR']:
451 chg.related.append(chg2)
452
453 if report_all:
454 return changes
455 else:
456 return [chg for chg in changes if chg.monitored]
diff --git a/meta/lib/oe/cachedpath.py b/meta/lib/oe/cachedpath.py
new file mode 100644
index 0000000000..0840cc4c3f
--- /dev/null
+++ b/meta/lib/oe/cachedpath.py
@@ -0,0 +1,233 @@
1#
2# Based on standard python library functions but avoid
3# repeated stat calls. Its assumed the files will not change from under us
4# so we can cache stat calls.
5#
6
7import os
8import errno
9import stat as statmod
10
11class CachedPath(object):
12 def __init__(self):
13 self.statcache = {}
14 self.lstatcache = {}
15 self.normpathcache = {}
16 return
17
18 def updatecache(self, x):
19 x = self.normpath(x)
20 if x in self.statcache:
21 del self.statcache[x]
22 if x in self.lstatcache:
23 del self.lstatcache[x]
24
25 def normpath(self, path):
26 if path in self.normpathcache:
27 return self.normpathcache[path]
28 newpath = os.path.normpath(path)
29 self.normpathcache[path] = newpath
30 return newpath
31
32 def _callstat(self, path):
33 if path in self.statcache:
34 return self.statcache[path]
35 try:
36 st = os.stat(path)
37 self.statcache[path] = st
38 return st
39 except os.error:
40 self.statcache[path] = False
41 return False
42
43 # We might as well call lstat and then only
44 # call stat as well in the symbolic link case
45 # since this turns out to be much more optimal
46 # in real world usage of this cache
47 def callstat(self, path):
48 path = self.normpath(path)
49 self.calllstat(path)
50 return self.statcache[path]
51
52 def calllstat(self, path):
53 path = self.normpath(path)
54 if path in self.lstatcache:
55 return self.lstatcache[path]
56 #bb.error("LStatpath:" + path)
57 try:
58 lst = os.lstat(path)
59 self.lstatcache[path] = lst
60 if not statmod.S_ISLNK(lst.st_mode):
61 self.statcache[path] = lst
62 else:
63 self._callstat(path)
64 return lst
65 except (os.error, AttributeError):
66 self.lstatcache[path] = False
67 self.statcache[path] = False
68 return False
69
70 # This follows symbolic links, so both islink() and isdir() can be true
71 # for the same path ono systems that support symlinks
72 def isfile(self, path):
73 """Test whether a path is a regular file"""
74 st = self.callstat(path)
75 if not st:
76 return False
77 return statmod.S_ISREG(st.st_mode)
78
79 # Is a path a directory?
80 # This follows symbolic links, so both islink() and isdir()
81 # can be true for the same path on systems that support symlinks
82 def isdir(self, s):
83 """Return true if the pathname refers to an existing directory."""
84 st = self.callstat(s)
85 if not st:
86 return False
87 return statmod.S_ISDIR(st.st_mode)
88
89 def islink(self, path):
90 """Test whether a path is a symbolic link"""
91 st = self.calllstat(path)
92 if not st:
93 return False
94 return statmod.S_ISLNK(st.st_mode)
95
96 # Does a path exist?
97 # This is false for dangling symbolic links on systems that support them.
98 def exists(self, path):
99 """Test whether a path exists. Returns False for broken symbolic links"""
100 if self.callstat(path):
101 return True
102 return False
103
104 def lexists(self, path):
105 """Test whether a path exists. Returns True for broken symbolic links"""
106 if self.calllstat(path):
107 return True
108 return False
109
110 def stat(self, path):
111 return self.callstat(path)
112
113 def lstat(self, path):
114 return self.calllstat(path)
115
116 def walk(self, top, topdown=True, onerror=None, followlinks=False):
117 # Matches os.walk, not os.path.walk()
118
119 # We may not have read permission for top, in which case we can't
120 # get a list of the files the directory contains. os.path.walk
121 # always suppressed the exception then, rather than blow up for a
122 # minor reason when (say) a thousand readable directories are still
123 # left to visit. That logic is copied here.
124 try:
125 names = os.listdir(top)
126 except os.error as err:
127 if onerror is not None:
128 onerror(err)
129 return
130
131 dirs, nondirs = [], []
132 for name in names:
133 if self.isdir(os.path.join(top, name)):
134 dirs.append(name)
135 else:
136 nondirs.append(name)
137
138 if topdown:
139 yield top, dirs, nondirs
140 for name in dirs:
141 new_path = os.path.join(top, name)
142 if followlinks or not self.islink(new_path):
143 for x in self.walk(new_path, topdown, onerror, followlinks):
144 yield x
145 if not topdown:
146 yield top, dirs, nondirs
147
148 ## realpath() related functions
149 def __is_path_below(self, file, root):
150 return (file + os.path.sep).startswith(root)
151
152 def __realpath_rel(self, start, rel_path, root, loop_cnt, assume_dir):
153 """Calculates real path of symlink 'start' + 'rel_path' below
154 'root'; no part of 'start' below 'root' must contain symlinks. """
155 have_dir = True
156
157 for d in rel_path.split(os.path.sep):
158 if not have_dir and not assume_dir:
159 raise OSError(errno.ENOENT, "no such directory %s" % start)
160
161 if d == os.path.pardir: # '..'
162 if len(start) >= len(root):
163 # do not follow '..' before root
164 start = os.path.dirname(start)
165 else:
166 # emit warning?
167 pass
168 else:
169 (start, have_dir) = self.__realpath(os.path.join(start, d),
170 root, loop_cnt, assume_dir)
171
172 assert(self.__is_path_below(start, root))
173
174 return start
175
176 def __realpath(self, file, root, loop_cnt, assume_dir):
177 while self.islink(file) and len(file) >= len(root):
178 if loop_cnt == 0:
179 raise OSError(errno.ELOOP, file)
180
181 loop_cnt -= 1
182 target = os.path.normpath(os.readlink(file))
183
184 if not os.path.isabs(target):
185 tdir = os.path.dirname(file)
186 assert(self.__is_path_below(tdir, root))
187 else:
188 tdir = root
189
190 file = self.__realpath_rel(tdir, target, root, loop_cnt, assume_dir)
191
192 try:
193 is_dir = self.isdir(file)
194 except:
195 is_dir = False
196
197 return (file, is_dir)
198
199 def realpath(self, file, root, use_physdir = True, loop_cnt = 100, assume_dir = False):
200 """ Returns the canonical path of 'file' with assuming a
201 toplevel 'root' directory. When 'use_physdir' is set, all
202 preceding path components of 'file' will be resolved first;
203 this flag should be set unless it is guaranteed that there is
204 no symlink in the path. When 'assume_dir' is not set, missing
205 path components will raise an ENOENT error"""
206
207 root = os.path.normpath(root)
208 file = os.path.normpath(file)
209
210 if not root.endswith(os.path.sep):
211 # letting root end with '/' makes some things easier
212 root = root + os.path.sep
213
214 if not self.__is_path_below(file, root):
215 raise OSError(errno.EINVAL, "file '%s' is not below root" % file)
216
217 try:
218 if use_physdir:
219 file = self.__realpath_rel(root, file[(len(root) - 1):], root, loop_cnt, assume_dir)
220 else:
221 file = self.__realpath(file, root, loop_cnt, assume_dir)[0]
222 except OSError as e:
223 if e.errno == errno.ELOOP:
224 # make ELOOP more readable; without catching it, there will
225 # be printed a backtrace with 100s of OSError exceptions
226 # else
227 raise OSError(errno.ELOOP,
228 "too much recursions while resolving '%s'; loop in '%s'" %
229 (file, e.strerror))
230
231 raise
232
233 return file
diff --git a/meta/lib/oe/classextend.py b/meta/lib/oe/classextend.py
new file mode 100644
index 0000000000..8da87b771a
--- /dev/null
+++ b/meta/lib/oe/classextend.py
@@ -0,0 +1,118 @@
1class ClassExtender(object):
2 def __init__(self, extname, d):
3 self.extname = extname
4 self.d = d
5 self.pkgs_mapping = []
6
7 def extend_name(self, name):
8 if name.startswith("kernel-") or name == "virtual/kernel":
9 return name
10 if name.startswith("rtld"):
11 return name
12 if name.endswith("-crosssdk"):
13 return name
14 if name.endswith("-" + self.extname):
15 name = name.replace("-" + self.extname, "")
16 if name.startswith("virtual/"):
17 subs = name.split("/", 1)[1]
18 if not subs.startswith(self.extname):
19 return "virtual/" + self.extname + "-" + subs
20 return name
21 if not name.startswith(self.extname):
22 return self.extname + "-" + name
23 return name
24
25 def map_variable(self, varname, setvar = True):
26 var = self.d.getVar(varname, True)
27 if not var:
28 return ""
29 var = var.split()
30 newvar = []
31 for v in var:
32 newvar.append(self.extend_name(v))
33 newdata = " ".join(newvar)
34 if setvar:
35 self.d.setVar(varname, newdata)
36 return newdata
37
38 def map_regexp_variable(self, varname, setvar = True):
39 var = self.d.getVar(varname, True)
40 if not var:
41 return ""
42 var = var.split()
43 newvar = []
44 for v in var:
45 if v.startswith("^" + self.extname):
46 newvar.append(v)
47 elif v.startswith("^"):
48 newvar.append("^" + self.extname + "-" + v[1:])
49 else:
50 newvar.append(self.extend_name(v))
51 newdata = " ".join(newvar)
52 if setvar:
53 self.d.setVar(varname, newdata)
54 return newdata
55
56 def map_depends(self, dep):
57 if dep.endswith(("-native", "-native-runtime")) or ('nativesdk-' in dep) or ('cross-canadian' in dep) or ('-crosssdk-' in dep):
58 return dep
59 else:
60 # Do not extend for that already have multilib prefix
61 var = self.d.getVar("MULTILIB_VARIANTS", True)
62 if var:
63 var = var.split()
64 for v in var:
65 if dep.startswith(v):
66 return dep
67 return self.extend_name(dep)
68
69 def map_depends_variable(self, varname, suffix = ""):
70 # We need to preserve EXTENDPKGV so it can be expanded correctly later
71 if suffix:
72 varname = varname + "_" + suffix
73 orig = self.d.getVar("EXTENDPKGV", False)
74 self.d.setVar("EXTENDPKGV", "EXTENDPKGV")
75 deps = self.d.getVar(varname, True)
76 if not deps:
77 self.d.setVar("EXTENDPKGV", orig)
78 return
79 deps = bb.utils.explode_dep_versions2(deps)
80 newdeps = {}
81 for dep in deps:
82 newdeps[self.map_depends(dep)] = deps[dep]
83
84 self.d.setVar(varname, bb.utils.join_deps(newdeps, False).replace("EXTENDPKGV", "${EXTENDPKGV}"))
85 self.d.setVar("EXTENDPKGV", orig)
86
87 def map_packagevars(self):
88 for pkg in (self.d.getVar("PACKAGES", True).split() + [""]):
89 self.map_depends_variable("RDEPENDS", pkg)
90 self.map_depends_variable("RRECOMMENDS", pkg)
91 self.map_depends_variable("RSUGGESTS", pkg)
92 self.map_depends_variable("RPROVIDES", pkg)
93 self.map_depends_variable("RREPLACES", pkg)
94 self.map_depends_variable("RCONFLICTS", pkg)
95 self.map_depends_variable("PKG", pkg)
96
97 def rename_packages(self):
98 for pkg in (self.d.getVar("PACKAGES", True) or "").split():
99 if pkg.startswith(self.extname):
100 self.pkgs_mapping.append([pkg.split(self.extname + "-")[1], pkg])
101 continue
102 self.pkgs_mapping.append([pkg, self.extend_name(pkg)])
103
104 self.d.setVar("PACKAGES", " ".join([row[1] for row in self.pkgs_mapping]))
105
106 def rename_package_variables(self, variables):
107 for pkg_mapping in self.pkgs_mapping:
108 for subs in variables:
109 self.d.renameVar("%s_%s" % (subs, pkg_mapping[0]), "%s_%s" % (subs, pkg_mapping[1]))
110
111class NativesdkClassExtender(ClassExtender):
112 def map_depends(self, dep):
113 if dep.endswith(("-gcc-initial", "-gcc", "-g++")):
114 return dep + "-crosssdk"
115 elif dep.endswith(("-native", "-native-runtime")) or ('nativesdk-' in dep) or ('-cross-' in dep) or ('-crosssdk-' in dep):
116 return dep
117 else:
118 return self.extend_name(dep)
diff --git a/meta/lib/oe/classutils.py b/meta/lib/oe/classutils.py
new file mode 100644
index 0000000000..58188fdd6e
--- /dev/null
+++ b/meta/lib/oe/classutils.py
@@ -0,0 +1,43 @@
1class ClassRegistry(type):
2 """Maintain a registry of classes, indexed by name.
3
4Note that this implementation requires that the names be unique, as it uses
5a dictionary to hold the classes by name.
6
7The name in the registry can be overridden via the 'name' attribute of the
8class, and the 'priority' attribute controls priority. The prioritized()
9method returns the registered classes in priority order.
10
11Subclasses of ClassRegistry may define an 'implemented' property to exert
12control over whether the class will be added to the registry (e.g. to keep
13abstract base classes out of the registry)."""
14 priority = 0
15 class __metaclass__(type):
16 """Give each ClassRegistry their own registry"""
17 def __init__(cls, name, bases, attrs):
18 cls.registry = {}
19 type.__init__(cls, name, bases, attrs)
20
21 def __init__(cls, name, bases, attrs):
22 super(ClassRegistry, cls).__init__(name, bases, attrs)
23 try:
24 if not cls.implemented:
25 return
26 except AttributeError:
27 pass
28
29 try:
30 cls.name
31 except AttributeError:
32 cls.name = name
33 cls.registry[cls.name] = cls
34
35 @classmethod
36 def prioritized(tcls):
37 return sorted(tcls.registry.values(),
38 key=lambda v: v.priority, reverse=True)
39
40 def unregister(cls):
41 for key in cls.registry.keys():
42 if cls.registry[key] is cls:
43 del cls.registry[key]
diff --git a/meta/lib/oe/data.py b/meta/lib/oe/data.py
new file mode 100644
index 0000000000..4cc0e02968
--- /dev/null
+++ b/meta/lib/oe/data.py
@@ -0,0 +1,17 @@
1import oe.maketype
2
3def typed_value(key, d):
4 """Construct a value for the specified metadata variable, using its flags
5 to determine the type and parameters for construction."""
6 var_type = d.getVarFlag(key, 'type')
7 flags = d.getVarFlags(key)
8 if flags is not None:
9 flags = dict((flag, d.expand(value))
10 for flag, value in flags.iteritems())
11 else:
12 flags = {}
13
14 try:
15 return oe.maketype.create(d.getVar(key, True) or '', var_type, **flags)
16 except (TypeError, ValueError), exc:
17 bb.msg.fatal("Data", "%s: %s" % (key, str(exc)))
diff --git a/meta/lib/oe/distro_check.py b/meta/lib/oe/distro_check.py
new file mode 100644
index 0000000000..8ed5b0ec80
--- /dev/null
+++ b/meta/lib/oe/distro_check.py
@@ -0,0 +1,383 @@
1def get_links_from_url(url):
2 "Return all the href links found on the web location"
3
4 import urllib, sgmllib
5
6 class LinksParser(sgmllib.SGMLParser):
7 def parse(self, s):
8 "Parse the given string 's'."
9 self.feed(s)
10 self.close()
11
12 def __init__(self, verbose=0):
13 "Initialise an object passing 'verbose' to the superclass."
14 sgmllib.SGMLParser.__init__(self, verbose)
15 self.hyperlinks = []
16
17 def start_a(self, attributes):
18 "Process a hyperlink and its 'attributes'."
19 for name, value in attributes:
20 if name == "href":
21 self.hyperlinks.append(value.strip('/'))
22
23 def get_hyperlinks(self):
24 "Return the list of hyperlinks."
25 return self.hyperlinks
26
27 sock = urllib.urlopen(url)
28 webpage = sock.read()
29 sock.close()
30
31 linksparser = LinksParser()
32 linksparser.parse(webpage)
33 return linksparser.get_hyperlinks()
34
35def find_latest_numeric_release(url):
36 "Find the latest listed numeric release on the given url"
37 max=0
38 maxstr=""
39 for link in get_links_from_url(url):
40 try:
41 release = float(link)
42 except:
43 release = 0
44 if release > max:
45 max = release
46 maxstr = link
47 return maxstr
48
49def is_src_rpm(name):
50 "Check if the link is pointing to a src.rpm file"
51 if name[-8:] == ".src.rpm":
52 return True
53 else:
54 return False
55
56def package_name_from_srpm(srpm):
57 "Strip out the package name from the src.rpm filename"
58 strings = srpm.split('-')
59 package_name = strings[0]
60 for i in range(1, len (strings) - 1):
61 str = strings[i]
62 if not str[0].isdigit():
63 package_name += '-' + str
64 return package_name
65
66def clean_package_list(package_list):
67 "Removes multiple entries of packages and sorts the list"
68 set = {}
69 map(set.__setitem__, package_list, [])
70 return set.keys()
71
72
73def get_latest_released_meego_source_package_list():
74 "Returns list of all the name os packages in the latest meego distro"
75
76 package_names = []
77 try:
78 f = open("/tmp/Meego-1.1", "r")
79 for line in f:
80 package_names.append(line[:-1] + ":" + "main") # Also strip the '\n' at the end
81 except IOError: pass
82 package_list=clean_package_list(package_names)
83 return "1.0", package_list
84
85def get_source_package_list_from_url(url, section):
86 "Return a sectioned list of package names from a URL list"
87
88 bb.note("Reading %s: %s" % (url, section))
89 links = get_links_from_url(url)
90 srpms = filter(is_src_rpm, links)
91 names_list = map(package_name_from_srpm, srpms)
92
93 new_pkgs = []
94 for pkgs in names_list:
95 new_pkgs.append(pkgs + ":" + section)
96
97 return new_pkgs
98
99def get_latest_released_fedora_source_package_list():
100 "Returns list of all the name os packages in the latest fedora distro"
101 latest = find_latest_numeric_release("http://archive.fedoraproject.org/pub/fedora/linux/releases/")
102
103 package_names = get_source_package_list_from_url("http://archive.fedoraproject.org/pub/fedora/linux/releases/%s/Fedora/source/SRPMS/" % latest, "main")
104
105# package_names += get_source_package_list_from_url("http://download.fedora.redhat.com/pub/fedora/linux/releases/%s/Everything/source/SPRMS/" % latest, "everything")
106 package_names += get_source_package_list_from_url("http://archive.fedoraproject.org/pub/fedora/linux/updates/%s/SRPMS/" % latest, "updates")
107
108 package_list=clean_package_list(package_names)
109
110 return latest, package_list
111
112def get_latest_released_opensuse_source_package_list():
113 "Returns list of all the name os packages in the latest opensuse distro"
114 latest = find_latest_numeric_release("http://download.opensuse.org/source/distribution/")
115
116 package_names = get_source_package_list_from_url("http://download.opensuse.org/source/distribution/%s/repo/oss/suse/src/" % latest, "main")
117 package_names += get_source_package_list_from_url("http://download.opensuse.org/update/%s/rpm/src/" % latest, "updates")
118
119 package_list=clean_package_list(package_names)
120 return latest, package_list
121
122def get_latest_released_mandriva_source_package_list():
123 "Returns list of all the name os packages in the latest mandriva distro"
124 latest = find_latest_numeric_release("http://distrib-coffee.ipsl.jussieu.fr/pub/linux/MandrivaLinux/official/")
125 package_names = get_source_package_list_from_url("http://distrib-coffee.ipsl.jussieu.fr/pub/linux/MandrivaLinux/official/%s/SRPMS/main/release/" % latest, "main")
126# package_names += get_source_package_list_from_url("http://distrib-coffee.ipsl.jussieu.fr/pub/linux/MandrivaLinux/official/%s/SRPMS/contrib/release/" % latest, "contrib")
127 package_names += get_source_package_list_from_url("http://distrib-coffee.ipsl.jussieu.fr/pub/linux/MandrivaLinux/official/%s/SRPMS/main/updates/" % latest, "updates")
128
129 package_list=clean_package_list(package_names)
130 return latest, package_list
131
132def find_latest_debian_release(url):
133 "Find the latest listed debian release on the given url"
134
135 releases = []
136 for link in get_links_from_url(url):
137 if link[:6] == "Debian":
138 if ';' not in link:
139 releases.append(link)
140 releases.sort()
141 try:
142 return releases.pop()[6:]
143 except:
144 return "_NotFound_"
145
146def get_debian_style_source_package_list(url, section):
147 "Return the list of package-names stored in the debian style Sources.gz file"
148 import urllib
149 sock = urllib.urlopen(url)
150 import tempfile
151 tmpfile = tempfile.NamedTemporaryFile(mode='wb', prefix='oecore.', suffix='.tmp', delete=False)
152 tmpfilename=tmpfile.name
153 tmpfile.write(sock.read())
154 sock.close()
155 tmpfile.close()
156 import gzip
157 bb.note("Reading %s: %s" % (url, section))
158
159 f = gzip.open(tmpfilename)
160 package_names = []
161 for line in f:
162 if line[:9] == "Package: ":
163 package_names.append(line[9:-1] + ":" + section) # Also strip the '\n' at the end
164 os.unlink(tmpfilename)
165
166 return package_names
167
168def get_latest_released_debian_source_package_list():
169 "Returns list of all the name os packages in the latest debian distro"
170 latest = find_latest_debian_release("http://ftp.debian.org/debian/dists/")
171 url = "http://ftp.debian.org/debian/dists/stable/main/source/Sources.gz"
172 package_names = get_debian_style_source_package_list(url, "main")
173# url = "http://ftp.debian.org/debian/dists/stable/contrib/source/Sources.gz"
174# package_names += get_debian_style_source_package_list(url, "contrib")
175 url = "http://ftp.debian.org/debian/dists/stable-proposed-updates/main/source/Sources.gz"
176 package_names += get_debian_style_source_package_list(url, "updates")
177 package_list=clean_package_list(package_names)
178 return latest, package_list
179
180def find_latest_ubuntu_release(url):
181 "Find the latest listed ubuntu release on the given url"
182 url += "?C=M;O=D" # Descending Sort by Last Modified
183 for link in get_links_from_url(url):
184 if link[-8:] == "-updates":
185 return link[:-8]
186 return "_NotFound_"
187
188def get_latest_released_ubuntu_source_package_list():
189 "Returns list of all the name os packages in the latest ubuntu distro"
190 latest = find_latest_ubuntu_release("http://archive.ubuntu.com/ubuntu/dists/")
191 url = "http://archive.ubuntu.com/ubuntu/dists/%s/main/source/Sources.gz" % latest
192 package_names = get_debian_style_source_package_list(url, "main")
193# url = "http://archive.ubuntu.com/ubuntu/dists/%s/multiverse/source/Sources.gz" % latest
194# package_names += get_debian_style_source_package_list(url, "multiverse")
195# url = "http://archive.ubuntu.com/ubuntu/dists/%s/universe/source/Sources.gz" % latest
196# package_names += get_debian_style_source_package_list(url, "universe")
197 url = "http://archive.ubuntu.com/ubuntu/dists/%s-updates/main/source/Sources.gz" % latest
198 package_names += get_debian_style_source_package_list(url, "updates")
199 package_list=clean_package_list(package_names)
200 return latest, package_list
201
202def create_distro_packages_list(distro_check_dir):
203 pkglst_dir = os.path.join(distro_check_dir, "package_lists")
204 if not os.path.isdir (pkglst_dir):
205 os.makedirs(pkglst_dir)
206 # first clear old stuff
207 for file in os.listdir(pkglst_dir):
208 os.unlink(os.path.join(pkglst_dir, file))
209
210 per_distro_functions = [
211 ["Debian", get_latest_released_debian_source_package_list],
212 ["Ubuntu", get_latest_released_ubuntu_source_package_list],
213 ["Fedora", get_latest_released_fedora_source_package_list],
214 ["OpenSuSE", get_latest_released_opensuse_source_package_list],
215 ["Mandriva", get_latest_released_mandriva_source_package_list],
216 ["Meego", get_latest_released_meego_source_package_list]
217 ]
218
219 from datetime import datetime
220 begin = datetime.now()
221 for distro in per_distro_functions:
222 name = distro[0]
223 release, package_list = distro[1]()
224 bb.note("Distro: %s, Latest Release: %s, # src packages: %d" % (name, release, len(package_list)))
225 package_list_file = os.path.join(pkglst_dir, name + "-" + release)
226 f = open(package_list_file, "w+b")
227 for pkg in package_list:
228 f.write(pkg + "\n")
229 f.close()
230 end = datetime.now()
231 delta = end - begin
232 bb.note("package_list generatiosn took this much time: %d seconds" % delta.seconds)
233
234def update_distro_data(distro_check_dir, datetime):
235 """
236 If distro packages list data is old then rebuild it.
237 The operations has to be protected by a lock so that
238 only one thread performes it at a time.
239 """
240 if not os.path.isdir (distro_check_dir):
241 try:
242 bb.note ("Making new directory: %s" % distro_check_dir)
243 os.makedirs (distro_check_dir)
244 except OSError:
245 raise Exception('Unable to create directory %s' % (distro_check_dir))
246
247
248 datetime_file = os.path.join(distro_check_dir, "build_datetime")
249 saved_datetime = "_invalid_"
250 import fcntl
251 try:
252 if not os.path.exists(datetime_file):
253 open(datetime_file, 'w+b').close() # touch the file so that the next open won't fail
254
255 f = open(datetime_file, "r+b")
256 fcntl.lockf(f, fcntl.LOCK_EX)
257 saved_datetime = f.read()
258 if saved_datetime[0:8] != datetime[0:8]:
259 bb.note("The build datetime did not match: saved:%s current:%s" % (saved_datetime, datetime))
260 bb.note("Regenerating distro package lists")
261 create_distro_packages_list(distro_check_dir)
262 f.seek(0)
263 f.write(datetime)
264
265 except OSError:
266 raise Exception('Unable to read/write this file: %s' % (datetime_file))
267 finally:
268 fcntl.lockf(f, fcntl.LOCK_UN)
269 f.close()
270
271def compare_in_distro_packages_list(distro_check_dir, d):
272 if not os.path.isdir(distro_check_dir):
273 raise Exception("compare_in_distro_packages_list: invalid distro_check_dir passed")
274
275 localdata = bb.data.createCopy(d)
276 pkglst_dir = os.path.join(distro_check_dir, "package_lists")
277 matching_distros = []
278 pn = d.getVar('PN', True)
279 recipe_name = d.getVar('PN', True)
280 bb.note("Checking: %s" % pn)
281
282 trim_dict = dict({"-native":"-native", "-cross":"-cross", "-initial":"-initial"})
283
284 if pn.find("-native") != -1:
285 pnstripped = pn.split("-native")
286 localdata.setVar('OVERRIDES', "pn-" + pnstripped[0] + ":" + d.getVar('OVERRIDES', True))
287 bb.data.update_data(localdata)
288 recipe_name = pnstripped[0]
289
290 if pn.startswith("nativesdk-"):
291 pnstripped = pn.split("nativesdk-")
292 localdata.setVar('OVERRIDES', "pn-" + pnstripped[1] + ":" + d.getVar('OVERRIDES', True))
293 bb.data.update_data(localdata)
294 recipe_name = pnstripped[1]
295
296 if pn.find("-cross") != -1:
297 pnstripped = pn.split("-cross")
298 localdata.setVar('OVERRIDES', "pn-" + pnstripped[0] + ":" + d.getVar('OVERRIDES', True))
299 bb.data.update_data(localdata)
300 recipe_name = pnstripped[0]
301
302 if pn.find("-initial") != -1:
303 pnstripped = pn.split("-initial")
304 localdata.setVar('OVERRIDES', "pn-" + pnstripped[0] + ":" + d.getVar('OVERRIDES', True))
305 bb.data.update_data(localdata)
306 recipe_name = pnstripped[0]
307
308 bb.note("Recipe: %s" % recipe_name)
309 tmp = localdata.getVar('DISTRO_PN_ALIAS', True)
310
311 distro_exceptions = dict({"OE-Core":'OE-Core', "OpenedHand":'OpenedHand', "Intel":'Intel', "Upstream":'Upstream', "Windriver":'Windriver', "OSPDT":'OSPDT Approved', "Poky":'poky'})
312
313 if tmp:
314 list = tmp.split(' ')
315 for str in list:
316 if str and str.find("=") == -1 and distro_exceptions[str]:
317 matching_distros.append(str)
318
319 distro_pn_aliases = {}
320 if tmp:
321 list = tmp.split(' ')
322 for str in list:
323 if str.find("=") != -1:
324 (dist, pn_alias) = str.split('=')
325 distro_pn_aliases[dist.strip().lower()] = pn_alias.strip()
326
327 for file in os.listdir(pkglst_dir):
328 (distro, distro_release) = file.split("-")
329 f = open(os.path.join(pkglst_dir, file), "rb")
330 for line in f:
331 (pkg, section) = line.split(":")
332 if distro.lower() in distro_pn_aliases:
333 pn = distro_pn_aliases[distro.lower()]
334 else:
335 pn = recipe_name
336 if pn == pkg:
337 matching_distros.append(distro + "-" + section[:-1]) # strip the \n at the end
338 f.close()
339 break
340 f.close()
341
342
343 if tmp != None:
344 list = tmp.split(' ')
345 for item in list:
346 matching_distros.append(item)
347 bb.note("Matching: %s" % matching_distros)
348 return matching_distros
349
350def create_log_file(d, logname):
351 import subprocess
352 logpath = d.getVar('LOG_DIR', True)
353 bb.utils.mkdirhier(logpath)
354 logfn, logsuffix = os.path.splitext(logname)
355 logfile = os.path.join(logpath, "%s.%s%s" % (logfn, d.getVar('DATETIME', True), logsuffix))
356 if not os.path.exists(logfile):
357 slogfile = os.path.join(logpath, logname)
358 if os.path.exists(slogfile):
359 os.remove(slogfile)
360 subprocess.call("touch %s" % logfile, shell=True)
361 os.symlink(logfile, slogfile)
362 d.setVar('LOG_FILE', logfile)
363 return logfile
364
365
366def save_distro_check_result(result, datetime, result_file, d):
367 pn = d.getVar('PN', True)
368 logdir = d.getVar('LOG_DIR', True)
369 if not logdir:
370 bb.error("LOG_DIR variable is not defined, can't write the distro_check results")
371 return
372 if not os.path.isdir(logdir):
373 os.makedirs(logdir)
374 line = pn
375 for i in result:
376 line = line + "," + i
377 f = open(result_file, "a")
378 import fcntl
379 fcntl.lockf(f, fcntl.LOCK_EX)
380 f.seek(0, os.SEEK_END) # seek to the end of file
381 f.write(line + "\n")
382 fcntl.lockf(f, fcntl.LOCK_UN)
383 f.close()
diff --git a/meta/lib/oe/image.py b/meta/lib/oe/image.py
new file mode 100644
index 0000000000..7e080b00dd
--- /dev/null
+++ b/meta/lib/oe/image.py
@@ -0,0 +1,345 @@
1from oe.utils import execute_pre_post_process
2import os
3import subprocess
4import multiprocessing
5
6
7def generate_image(arg):
8 (type, subimages, create_img_cmd) = arg
9
10 bb.note("Running image creation script for %s: %s ..." %
11 (type, create_img_cmd))
12
13 try:
14 subprocess.check_output(create_img_cmd, stderr=subprocess.STDOUT)
15 except subprocess.CalledProcessError as e:
16 return("Error: The image creation script '%s' returned %d:\n%s" %
17 (e.cmd, e.returncode, e.output))
18
19 return None
20
21
22"""
23This class will help compute IMAGE_FSTYPE dependencies and group them in batches
24that can be executed in parallel.
25
26The next example is for illustration purposes, highly unlikely to happen in real life.
27It's just one of the test cases I used to test the algorithm:
28
29For:
30IMAGE_FSTYPES = "i1 i2 i3 i4 i5"
31IMAGE_TYPEDEP_i4 = "i2"
32IMAGE_TYPEDEP_i5 = "i6 i4"
33IMAGE_TYPEDEP_i6 = "i7"
34IMAGE_TYPEDEP_i7 = "i2"
35
36We get the following list of batches that can be executed in parallel, having the
37dependencies satisfied:
38
39[['i1', 'i3', 'i2'], ['i4', 'i7'], ['i6'], ['i5']]
40"""
41class ImageDepGraph(object):
42 def __init__(self, d):
43 self.d = d
44 self.graph = dict()
45 self.deps_array = dict()
46
47 def _construct_dep_graph(self, image_fstypes):
48 graph = dict()
49
50 def add_node(node):
51 deps = (self.d.getVar('IMAGE_TYPEDEP_' + node, True) or "")
52 if deps != "":
53 graph[node] = deps
54
55 for dep in deps.split():
56 if not dep in graph:
57 add_node(dep)
58 else:
59 graph[node] = ""
60
61 for fstype in image_fstypes:
62 add_node(fstype)
63
64 return graph
65
66 def _clean_graph(self):
67 # Live and VMDK images will be processed via inheriting
68 # bbclass and does not get processed here. Remove them from the fstypes
69 # graph. Their dependencies are already added, so no worries here.
70 remove_list = (self.d.getVar('IMAGE_TYPES_MASKED', True) or "").split()
71
72 for item in remove_list:
73 self.graph.pop(item, None)
74
75 def _compute_dependencies(self):
76 """
77 returns dict object of nodes with [no_of_depends_on, no_of_depended_by]
78 for each node
79 """
80 deps_array = dict()
81 for node in self.graph:
82 deps_array[node] = [0, 0]
83
84 for node in self.graph:
85 deps = self.graph[node].split()
86 deps_array[node][0] += len(deps)
87 for dep in deps:
88 deps_array[dep][1] += 1
89
90 return deps_array
91
92 def _sort_graph(self):
93 sorted_list = []
94 group = []
95 for node in self.graph:
96 if node not in self.deps_array:
97 continue
98
99 depends_on = self.deps_array[node][0]
100
101 if depends_on == 0:
102 group.append(node)
103
104 if len(group) == 0 and len(self.deps_array) != 0:
105 bb.fatal("possible fstype circular dependency...")
106
107 sorted_list.append(group)
108
109 # remove added nodes from deps_array
110 for item in group:
111 for node in self.graph:
112 if item in self.graph[node].split():
113 self.deps_array[node][0] -= 1
114
115 self.deps_array.pop(item, None)
116
117 if len(self.deps_array):
118 # recursive call, to find the next group
119 sorted_list += self._sort_graph()
120
121 return sorted_list
122
123 def group_fstypes(self, image_fstypes):
124 self.graph = self._construct_dep_graph(image_fstypes)
125
126 self._clean_graph()
127
128 self.deps_array = self._compute_dependencies()
129
130 alltypes = [node for node in self.graph]
131
132 return (alltypes, self._sort_graph())
133
134
135class Image(ImageDepGraph):
136 def __init__(self, d):
137 self.d = d
138
139 super(Image, self).__init__(d)
140
141 def _get_rootfs_size(self):
142 """compute the rootfs size"""
143 rootfs_alignment = int(self.d.getVar('IMAGE_ROOTFS_ALIGNMENT', True))
144 overhead_factor = float(self.d.getVar('IMAGE_OVERHEAD_FACTOR', True))
145 rootfs_req_size = int(self.d.getVar('IMAGE_ROOTFS_SIZE', True))
146 rootfs_extra_space = eval(self.d.getVar('IMAGE_ROOTFS_EXTRA_SPACE', True))
147 rootfs_maxsize = self.d.getVar('IMAGE_ROOTFS_MAXSIZE', True)
148
149 output = subprocess.check_output(['du', '-ks',
150 self.d.getVar('IMAGE_ROOTFS', True)])
151 size_kb = int(output.split()[0])
152 base_size = size_kb * overhead_factor
153 base_size = (base_size, rootfs_req_size)[base_size < rootfs_req_size] + \
154 rootfs_extra_space
155
156 if base_size != int(base_size):
157 base_size = int(base_size + 1)
158
159 base_size += rootfs_alignment - 1
160 base_size -= base_size % rootfs_alignment
161
162 # Check the rootfs size against IMAGE_ROOTFS_MAXSIZE (if set)
163 if rootfs_maxsize:
164 rootfs_maxsize_int = int(rootfs_maxsize)
165 if base_size > rootfs_maxsize_int:
166 bb.fatal("The rootfs size %d(K) overrides the max size %d(K)" % \
167 (base_size, rootfs_maxsize_int))
168
169 return base_size
170
171 def _create_symlinks(self, subimages):
172 """create symlinks to the newly created image"""
173 deploy_dir = self.d.getVar('DEPLOY_DIR_IMAGE', True)
174 img_name = self.d.getVar('IMAGE_NAME', True)
175 link_name = self.d.getVar('IMAGE_LINK_NAME', True)
176 manifest_name = self.d.getVar('IMAGE_MANIFEST', True)
177
178 os.chdir(deploy_dir)
179
180 if link_name is not None:
181 for type in subimages:
182 if os.path.exists(img_name + ".rootfs." + type):
183 dst = link_name + "." + type
184 src = img_name + ".rootfs." + type
185 bb.note("Creating symlink: %s -> %s" % (dst, src))
186 os.symlink(src, dst)
187
188 if manifest_name is not None and \
189 os.path.exists(manifest_name) and \
190 not os.path.exists(link_name + ".manifest"):
191 os.symlink(os.path.basename(manifest_name),
192 link_name + ".manifest")
193
194 def _remove_old_symlinks(self):
195 """remove the symlinks to old binaries"""
196
197 if self.d.getVar('IMAGE_LINK_NAME', True):
198 deploy_dir = self.d.getVar('DEPLOY_DIR_IMAGE', True)
199 for img in os.listdir(deploy_dir):
200 if img.find(self.d.getVar('IMAGE_LINK_NAME', True)) == 0:
201 img = os.path.join(deploy_dir, img)
202 if os.path.islink(img):
203 if self.d.getVar('RM_OLD_IMAGE', True) == "1" and \
204 os.path.exists(os.path.realpath(img)):
205 os.remove(os.path.realpath(img))
206
207 os.remove(img)
208
209 """
210 This function will just filter out the compressed image types from the
211 fstype groups returning a (filtered_fstype_groups, cimages) tuple.
212 """
213 def _filter_out_commpressed(self, fstype_groups):
214 ctypes = self.d.getVar('COMPRESSIONTYPES', True).split()
215 cimages = {}
216
217 filtered_groups = []
218 for group in fstype_groups:
219 filtered_group = []
220 for type in group:
221 basetype = None
222 for ctype in ctypes:
223 if type.endswith("." + ctype):
224 basetype = type[:-len("." + ctype)]
225 if basetype not in filtered_group:
226 filtered_group.append(basetype)
227 if basetype not in cimages:
228 cimages[basetype] = []
229 if ctype not in cimages[basetype]:
230 cimages[basetype].append(ctype)
231 break
232 if not basetype and type not in filtered_group:
233 filtered_group.append(type)
234
235 filtered_groups.append(filtered_group)
236
237 return (filtered_groups, cimages)
238
239 def _get_image_types(self):
240 """returns a (types, cimages) tuple"""
241
242 alltypes, fstype_groups = self.group_fstypes(self.d.getVar('IMAGE_FSTYPES', True).split())
243
244 filtered_groups, cimages = self._filter_out_commpressed(fstype_groups)
245
246 return (alltypes, filtered_groups, cimages)
247
248 def _write_script(self, type, cmds):
249 tempdir = self.d.getVar('T', True)
250 script_name = os.path.join(tempdir, "create_image." + type)
251
252 self.d.setVar('img_creation_func', '\n'.join(cmds))
253 self.d.setVarFlag('img_creation_func', 'func', 1)
254 self.d.setVarFlag('img_creation_func', 'fakeroot', 1)
255
256 with open(script_name, "w+") as script:
257 script.write("%s" % bb.build.shell_trap_code())
258 script.write("export ROOTFS_SIZE=%d\n" % self._get_rootfs_size())
259 bb.data.emit_func('img_creation_func', script, self.d)
260 script.write("img_creation_func\n")
261
262 os.chmod(script_name, 0775)
263
264 return script_name
265
266 def _get_imagecmds(self):
267 old_overrides = self.d.getVar('OVERRIDES', 0)
268
269 alltypes, fstype_groups, cimages = self._get_image_types()
270
271 image_cmd_groups = []
272
273 bb.note("The image creation groups are: %s" % str(fstype_groups))
274 for fstype_group in fstype_groups:
275 image_cmds = []
276 for type in fstype_group:
277 cmds = []
278 subimages = []
279
280 localdata = bb.data.createCopy(self.d)
281 localdata.setVar('OVERRIDES', '%s:%s' % (type, old_overrides))
282 bb.data.update_data(localdata)
283 localdata.setVar('type', type)
284
285 cmds.append("\t" + localdata.getVar("IMAGE_CMD", True))
286 cmds.append(localdata.expand("\tcd ${DEPLOY_DIR_IMAGE}"))
287
288 if type in cimages:
289 for ctype in cimages[type]:
290 cmds.append("\t" + localdata.getVar("COMPRESS_CMD_" + ctype, True))
291 subimages.append(type + "." + ctype)
292
293 if type not in alltypes:
294 cmds.append(localdata.expand("\trm ${IMAGE_NAME}.rootfs.${type}"))
295 else:
296 subimages.append(type)
297
298 script_name = self._write_script(type, cmds)
299
300 image_cmds.append((type, subimages, script_name))
301
302 image_cmd_groups.append(image_cmds)
303
304 return image_cmd_groups
305
306 def create(self):
307 bb.note("###### Generate images #######")
308 pre_process_cmds = self.d.getVar("IMAGE_PREPROCESS_COMMAND", True)
309 post_process_cmds = self.d.getVar("IMAGE_POSTPROCESS_COMMAND", True)
310
311 execute_pre_post_process(self.d, pre_process_cmds)
312
313 self._remove_old_symlinks()
314
315 image_cmd_groups = self._get_imagecmds()
316
317 for image_cmds in image_cmd_groups:
318 # create the images in parallel
319 nproc = multiprocessing.cpu_count()
320 pool = bb.utils.multiprocessingpool(nproc)
321 results = list(pool.imap(generate_image, image_cmds))
322 pool.close()
323 pool.join()
324
325 for result in results:
326 if result is not None:
327 bb.fatal(result)
328
329 for image_type, subimages, script in image_cmds:
330 bb.note("Creating symlinks for %s image ..." % image_type)
331 self._create_symlinks(subimages)
332
333 execute_pre_post_process(self.d, post_process_cmds)
334
335
336def create_image(d):
337 Image(d).create()
338
339if __name__ == "__main__":
340 """
341 Image creation can be called independent from bitbake environment.
342 """
343 """
344 TBD
345 """
diff --git a/meta/lib/oe/license.py b/meta/lib/oe/license.py
new file mode 100644
index 0000000000..340da61102
--- /dev/null
+++ b/meta/lib/oe/license.py
@@ -0,0 +1,116 @@
1# vi:sts=4:sw=4:et
2"""Code for parsing OpenEmbedded license strings"""
3
4import ast
5import re
6from fnmatch import fnmatchcase as fnmatch
7
8class LicenseError(Exception):
9 pass
10
11class LicenseSyntaxError(LicenseError):
12 def __init__(self, licensestr, exc):
13 self.licensestr = licensestr
14 self.exc = exc
15 LicenseError.__init__(self)
16
17 def __str__(self):
18 return "error in '%s': %s" % (self.licensestr, self.exc)
19
20class InvalidLicense(LicenseError):
21 def __init__(self, license):
22 self.license = license
23 LicenseError.__init__(self)
24
25 def __str__(self):
26 return "invalid characters in license '%s'" % self.license
27
28license_operator = re.compile('([&|() ])')
29license_pattern = re.compile('[a-zA-Z0-9.+_\-]+$')
30
31class LicenseVisitor(ast.NodeVisitor):
32 """Syntax tree visitor which can accept OpenEmbedded license strings"""
33 def visit_string(self, licensestr):
34 new_elements = []
35 elements = filter(lambda x: x.strip(), license_operator.split(licensestr))
36 for pos, element in enumerate(elements):
37 if license_pattern.match(element):
38 if pos > 0 and license_pattern.match(elements[pos-1]):
39 new_elements.append('&')
40 element = '"' + element + '"'
41 elif not license_operator.match(element):
42 raise InvalidLicense(element)
43 new_elements.append(element)
44
45 self.visit(ast.parse(' '.join(new_elements)))
46
47class FlattenVisitor(LicenseVisitor):
48 """Flatten a license tree (parsed from a string) by selecting one of each
49 set of OR options, in the way the user specifies"""
50 def __init__(self, choose_licenses):
51 self.choose_licenses = choose_licenses
52 self.licenses = []
53 LicenseVisitor.__init__(self)
54
55 def visit_Str(self, node):
56 self.licenses.append(node.s)
57
58 def visit_BinOp(self, node):
59 if isinstance(node.op, ast.BitOr):
60 left = FlattenVisitor(self.choose_licenses)
61 left.visit(node.left)
62
63 right = FlattenVisitor(self.choose_licenses)
64 right.visit(node.right)
65
66 selected = self.choose_licenses(left.licenses, right.licenses)
67 self.licenses.extend(selected)
68 else:
69 self.generic_visit(node)
70
71def flattened_licenses(licensestr, choose_licenses):
72 """Given a license string and choose_licenses function, return a flat list of licenses"""
73 flatten = FlattenVisitor(choose_licenses)
74 try:
75 flatten.visit_string(licensestr)
76 except SyntaxError as exc:
77 raise LicenseSyntaxError(licensestr, exc)
78 return flatten.licenses
79
80def is_included(licensestr, whitelist=None, blacklist=None):
81 """Given a license string and whitelist and blacklist, determine if the
82 license string matches the whitelist and does not match the blacklist.
83
84 Returns a tuple holding the boolean state and a list of the applicable
85 licenses which were excluded (or None, if the state is True)
86 """
87
88 def include_license(license):
89 return any(fnmatch(license, pattern) for pattern in whitelist)
90
91 def exclude_license(license):
92 return any(fnmatch(license, pattern) for pattern in blacklist)
93
94 def choose_licenses(alpha, beta):
95 """Select the option in an OR which is the 'best' (has the most
96 included licenses)."""
97 alpha_weight = len(filter(include_license, alpha))
98 beta_weight = len(filter(include_license, beta))
99 if alpha_weight > beta_weight:
100 return alpha
101 else:
102 return beta
103
104 if not whitelist:
105 whitelist = ['*']
106
107 if not blacklist:
108 blacklist = []
109
110 licenses = flattened_licenses(licensestr, choose_licenses)
111 excluded = filter(lambda lic: exclude_license(lic), licenses)
112 included = filter(lambda lic: include_license(lic), licenses)
113 if excluded:
114 return False, excluded
115 else:
116 return True, included
diff --git a/meta/lib/oe/lsb.py b/meta/lib/oe/lsb.py
new file mode 100644
index 0000000000..b53f361035
--- /dev/null
+++ b/meta/lib/oe/lsb.py
@@ -0,0 +1,81 @@
1def release_dict():
2 """Return the output of lsb_release -ir as a dictionary"""
3 from subprocess import PIPE
4
5 try:
6 output, err = bb.process.run(['lsb_release', '-ir'], stderr=PIPE)
7 except bb.process.CmdError as exc:
8 return None
9
10 data = {}
11 for line in output.splitlines():
12 try:
13 key, value = line.split(":\t", 1)
14 except ValueError:
15 continue
16 else:
17 data[key] = value
18 return data
19
20def release_dict_file():
21 """ Try to gather LSB release information manually when lsb_release tool is unavailable """
22 data = None
23 try:
24 if os.path.exists('/etc/lsb-release'):
25 data = {}
26 with open('/etc/lsb-release') as f:
27 for line in f:
28 key, value = line.split("=", 1)
29 data[key] = value.strip()
30 elif os.path.exists('/etc/redhat-release'):
31 data = {}
32 with open('/etc/redhat-release') as f:
33 distro = f.readline().strip()
34 import re
35 match = re.match(r'(.*) release (.*) \((.*)\)', distro)
36 if match:
37 data['DISTRIB_ID'] = match.group(1)
38 data['DISTRIB_RELEASE'] = match.group(2)
39 elif os.path.exists('/etc/SuSE-release'):
40 data = {}
41 data['DISTRIB_ID'] = 'SUSE LINUX'
42 with open('/etc/SuSE-release') as f:
43 for line in f:
44 if line.startswith('VERSION = '):
45 data['DISTRIB_RELEASE'] = line[10:].rstrip()
46 break
47 elif os.path.exists('/etc/os-release'):
48 data = {}
49 with open('/etc/os-release') as f:
50 for line in f:
51 if line.startswith('NAME='):
52 data['DISTRIB_ID'] = line[5:].rstrip().strip('"')
53 if line.startswith('VERSION_ID='):
54 data['DISTRIB_RELEASE'] = line[11:].rstrip().strip('"')
55 except IOError:
56 return None
57 return data
58
59def distro_identifier(adjust_hook=None):
60 """Return a distro identifier string based upon lsb_release -ri,
61 with optional adjustment via a hook"""
62
63 lsb_data = release_dict()
64 if lsb_data:
65 distro_id, release = lsb_data['Distributor ID'], lsb_data['Release']
66 else:
67 lsb_data_file = release_dict_file()
68 if lsb_data_file:
69 distro_id, release = lsb_data_file['DISTRIB_ID'], lsb_data_file.get('DISTRIB_RELEASE', None)
70 else:
71 distro_id, release = None, None
72
73 if adjust_hook:
74 distro_id, release = adjust_hook(distro_id, release)
75 if not distro_id:
76 return "Unknown"
77 if release:
78 id_str = '{0}-{1}'.format(distro_id, release)
79 else:
80 id_str = distro_id
81 return id_str.replace(' ','-').replace('/','-')
diff --git a/meta/lib/oe/maketype.py b/meta/lib/oe/maketype.py
new file mode 100644
index 0000000000..139f333691
--- /dev/null
+++ b/meta/lib/oe/maketype.py
@@ -0,0 +1,99 @@
1"""OpenEmbedded variable typing support
2
3Types are defined in the metadata by name, using the 'type' flag on a
4variable. Other flags may be utilized in the construction of the types. See
5the arguments of the type's factory for details.
6"""
7
8import inspect
9import types
10
11available_types = {}
12
13class MissingFlag(TypeError):
14 """A particular flag is required to construct the type, but has not been
15 provided."""
16 def __init__(self, flag, type):
17 self.flag = flag
18 self.type = type
19 TypeError.__init__(self)
20
21 def __str__(self):
22 return "Type '%s' requires flag '%s'" % (self.type, self.flag)
23
24def factory(var_type):
25 """Return the factory for a specified type."""
26 if var_type is None:
27 raise TypeError("No type specified. Valid types: %s" %
28 ', '.join(available_types))
29 try:
30 return available_types[var_type]
31 except KeyError:
32 raise TypeError("Invalid type '%s':\n Valid types: %s" %
33 (var_type, ', '.join(available_types)))
34
35def create(value, var_type, **flags):
36 """Create an object of the specified type, given the specified flags and
37 string value."""
38 obj = factory(var_type)
39 objflags = {}
40 for flag in obj.flags:
41 if flag not in flags:
42 if flag not in obj.optflags:
43 raise MissingFlag(flag, var_type)
44 else:
45 objflags[flag] = flags[flag]
46
47 return obj(value, **objflags)
48
49def get_callable_args(obj):
50 """Grab all but the first argument of the specified callable, returning
51 the list, as well as a list of which of the arguments have default
52 values."""
53 if type(obj) is type:
54 obj = obj.__init__
55
56 args, varargs, keywords, defaults = inspect.getargspec(obj)
57 flaglist = []
58 if args:
59 if len(args) > 1 and args[0] == 'self':
60 args = args[1:]
61 flaglist.extend(args)
62
63 optional = set()
64 if defaults:
65 optional |= set(flaglist[-len(defaults):])
66 return flaglist, optional
67
68def factory_setup(name, obj):
69 """Prepare a factory for use."""
70 args, optional = get_callable_args(obj)
71 extra_args = args[1:]
72 if extra_args:
73 obj.flags, optional = extra_args, optional
74 obj.optflags = set(optional)
75 else:
76 obj.flags = obj.optflags = ()
77
78 if not hasattr(obj, 'name'):
79 obj.name = name
80
81def register(name, factory):
82 """Register a type, given its name and a factory callable.
83
84 Determines the required and optional flags from the factory's
85 arguments."""
86 factory_setup(name, factory)
87 available_types[factory.name] = factory
88
89
90# Register all our included types
91for name in dir(types):
92 if name.startswith('_'):
93 continue
94
95 obj = getattr(types, name)
96 if not callable(obj):
97 continue
98
99 register(name, obj)
diff --git a/meta/lib/oe/manifest.py b/meta/lib/oe/manifest.py
new file mode 100644
index 0000000000..42832f15d2
--- /dev/null
+++ b/meta/lib/oe/manifest.py
@@ -0,0 +1,345 @@
1from abc import ABCMeta, abstractmethod
2import os
3import re
4import bb
5
6
7class Manifest(object):
8 """
9 This is an abstract class. Do not instantiate this directly.
10 """
11 __metaclass__ = ABCMeta
12
13 PKG_TYPE_MUST_INSTALL = "mip"
14 PKG_TYPE_MULTILIB = "mlp"
15 PKG_TYPE_LANGUAGE = "lgp"
16 PKG_TYPE_ATTEMPT_ONLY = "aop"
17
18 MANIFEST_TYPE_IMAGE = "image"
19 MANIFEST_TYPE_SDK_HOST = "sdk_host"
20 MANIFEST_TYPE_SDK_TARGET = "sdk_target"
21
22 var_maps = {
23 MANIFEST_TYPE_IMAGE: {
24 "PACKAGE_INSTALL": PKG_TYPE_MUST_INSTALL,
25 "PACKAGE_INSTALL_ATTEMPTONLY": PKG_TYPE_ATTEMPT_ONLY,
26 "LINGUAS_INSTALL": PKG_TYPE_LANGUAGE
27 },
28 MANIFEST_TYPE_SDK_HOST: {
29 "TOOLCHAIN_HOST_TASK": PKG_TYPE_MUST_INSTALL,
30 "TOOLCHAIN_HOST_TASK_ATTEMPTONLY": PKG_TYPE_ATTEMPT_ONLY
31 },
32 MANIFEST_TYPE_SDK_TARGET: {
33 "TOOLCHAIN_TARGET_TASK": PKG_TYPE_MUST_INSTALL,
34 "TOOLCHAIN_TARGET_TASK_ATTEMPTONLY": PKG_TYPE_ATTEMPT_ONLY
35 }
36 }
37
38 INSTALL_ORDER = [
39 PKG_TYPE_LANGUAGE,
40 PKG_TYPE_MUST_INSTALL,
41 PKG_TYPE_ATTEMPT_ONLY,
42 PKG_TYPE_MULTILIB
43 ]
44
45 initial_manifest_file_header = \
46 "# This file was generated automatically and contains the packages\n" \
47 "# passed on to the package manager in order to create the rootfs.\n\n" \
48 "# Format:\n" \
49 "# <package_type>,<package_name>\n" \
50 "# where:\n" \
51 "# <package_type> can be:\n" \
52 "# 'mip' = must install package\n" \
53 "# 'aop' = attempt only package\n" \
54 "# 'mlp' = multilib package\n" \
55 "# 'lgp' = language package\n\n"
56
57 def __init__(self, d, manifest_dir=None, manifest_type=MANIFEST_TYPE_IMAGE):
58 self.d = d
59 self.manifest_type = manifest_type
60
61 if manifest_dir is None:
62 if manifest_type != self.MANIFEST_TYPE_IMAGE:
63 self.manifest_dir = self.d.getVar('SDK_DIR', True)
64 else:
65 self.manifest_dir = self.d.getVar('WORKDIR', True)
66 else:
67 self.manifest_dir = manifest_dir
68
69 bb.utils.mkdirhier(self.manifest_dir)
70
71 self.initial_manifest = os.path.join(self.manifest_dir, "%s_initial_manifest" % manifest_type)
72 self.final_manifest = os.path.join(self.manifest_dir, "%s_final_manifest" % manifest_type)
73 self.full_manifest = os.path.join(self.manifest_dir, "%s_full_manifest" % manifest_type)
74
75 # packages in the following vars will be split in 'must install' and
76 # 'multilib'
77 self.vars_to_split = ["PACKAGE_INSTALL",
78 "TOOLCHAIN_HOST_TASK",
79 "TOOLCHAIN_TARGET_TASK"]
80
81 """
82 This creates a standard initial manifest for core-image-(minimal|sato|sato-sdk).
83 This will be used for testing until the class is implemented properly!
84 """
85 def _create_dummy_initial(self):
86 image_rootfs = self.d.getVar('IMAGE_ROOTFS', True)
87 pkg_list = dict()
88 if image_rootfs.find("core-image-sato-sdk") > 0:
89 pkg_list[self.PKG_TYPE_MUST_INSTALL] = \
90 "packagegroup-core-x11-sato-games packagegroup-base-extended " \
91 "packagegroup-core-x11-sato packagegroup-core-x11-base " \
92 "packagegroup-core-sdk packagegroup-core-tools-debug " \
93 "packagegroup-core-boot packagegroup-core-tools-testapps " \
94 "packagegroup-core-eclipse-debug packagegroup-core-qt-demoapps " \
95 "apt packagegroup-core-tools-profile psplash " \
96 "packagegroup-core-standalone-sdk-target " \
97 "packagegroup-core-ssh-openssh dpkg kernel-dev"
98 pkg_list[self.PKG_TYPE_LANGUAGE] = \
99 "locale-base-en-us locale-base-en-gb"
100 elif image_rootfs.find("core-image-sato") > 0:
101 pkg_list[self.PKG_TYPE_MUST_INSTALL] = \
102 "packagegroup-core-ssh-dropbear packagegroup-core-x11-sato-games " \
103 "packagegroup-core-x11-base psplash apt dpkg packagegroup-base-extended " \
104 "packagegroup-core-x11-sato packagegroup-core-boot"
105 pkg_list['lgp'] = \
106 "locale-base-en-us locale-base-en-gb"
107 elif image_rootfs.find("core-image-minimal") > 0:
108 pkg_list[self.PKG_TYPE_MUST_INSTALL] = "run-postinsts packagegroup-core-boot"
109
110 with open(self.initial_manifest, "w+") as manifest:
111 manifest.write(self.initial_manifest_file_header)
112
113 for pkg_type in pkg_list:
114 for pkg in pkg_list[pkg_type].split():
115 manifest.write("%s,%s\n" % (pkg_type, pkg))
116
117 """
118 This will create the initial manifest which will be used by Rootfs class to
119 generate the rootfs
120 """
121 @abstractmethod
122 def create_initial(self):
123 pass
124
125 """
126 This creates the manifest after everything has been installed.
127 """
128 @abstractmethod
129 def create_final(self):
130 pass
131
132 """
133 This creates the manifest after the package in initial manifest has been
134 dummy installed. It lists all *to be installed* packages. There is no real
135 installation, just a test.
136 """
137 @abstractmethod
138 def create_full(self, pm):
139 pass
140
141 """
142 The following function parses an initial manifest and returns a dictionary
143 object with the must install, attempt only, multilib and language packages.
144 """
145 def parse_initial_manifest(self):
146 pkgs = dict()
147
148 with open(self.initial_manifest) as manifest:
149 for line in manifest.read().split('\n'):
150 comment = re.match("^#.*", line)
151 pattern = "^(%s|%s|%s|%s),(.*)$" % \
152 (self.PKG_TYPE_MUST_INSTALL,
153 self.PKG_TYPE_ATTEMPT_ONLY,
154 self.PKG_TYPE_MULTILIB,
155 self.PKG_TYPE_LANGUAGE)
156 pkg = re.match(pattern, line)
157
158 if comment is not None:
159 continue
160
161 if pkg is not None:
162 pkg_type = pkg.group(1)
163 pkg_name = pkg.group(2)
164
165 if not pkg_type in pkgs:
166 pkgs[pkg_type] = [pkg_name]
167 else:
168 pkgs[pkg_type].append(pkg_name)
169
170 return pkgs
171
172 '''
173 This following function parses a full manifest and return a list
174 object with packages.
175 '''
176 def parse_full_manifest(self):
177 installed_pkgs = list()
178 if not os.path.exists(self.full_manifest):
179 bb.note('full manifest not exist')
180 return installed_pkgs
181
182 with open(self.full_manifest, 'r') as manifest:
183 for pkg in manifest.read().split('\n'):
184 installed_pkgs.append(pkg.strip())
185
186 return installed_pkgs
187
188
189class RpmManifest(Manifest):
190 """
191 Returns a dictionary object with mip and mlp packages.
192 """
193 def _split_multilib(self, pkg_list):
194 pkgs = dict()
195
196 for pkg in pkg_list.split():
197 pkg_type = self.PKG_TYPE_MUST_INSTALL
198
199 ml_variants = self.d.getVar('MULTILIB_VARIANTS', True).split()
200
201 for ml_variant in ml_variants:
202 if pkg.startswith(ml_variant + '-'):
203 pkg_type = self.PKG_TYPE_MULTILIB
204
205 if not pkg_type in pkgs:
206 pkgs[pkg_type] = pkg
207 else:
208 pkgs[pkg_type] += " " + pkg
209
210 return pkgs
211
212 def create_initial(self):
213 pkgs = dict()
214
215 with open(self.initial_manifest, "w+") as manifest:
216 manifest.write(self.initial_manifest_file_header)
217
218 for var in self.var_maps[self.manifest_type]:
219 if var in self.vars_to_split:
220 split_pkgs = self._split_multilib(self.d.getVar(var, True))
221 if split_pkgs is not None:
222 pkgs = dict(pkgs.items() + split_pkgs.items())
223 else:
224 pkg_list = self.d.getVar(var, True)
225 if pkg_list is not None:
226 pkgs[self.var_maps[self.manifest_type][var]] = self.d.getVar(var, True)
227
228 for pkg_type in pkgs:
229 for pkg in pkgs[pkg_type].split():
230 manifest.write("%s,%s\n" % (pkg_type, pkg))
231
232 def create_final(self):
233 pass
234
235 def create_full(self, pm):
236 pass
237
238
239class OpkgManifest(Manifest):
240 """
241 Returns a dictionary object with mip and mlp packages.
242 """
243 def _split_multilib(self, pkg_list):
244 pkgs = dict()
245
246 for pkg in pkg_list.split():
247 pkg_type = self.PKG_TYPE_MUST_INSTALL
248
249 ml_variants = self.d.getVar('MULTILIB_VARIANTS', True).split()
250
251 for ml_variant in ml_variants:
252 if pkg.startswith(ml_variant + '-'):
253 pkg_type = self.PKG_TYPE_MULTILIB
254
255 if not pkg_type in pkgs:
256 pkgs[pkg_type] = pkg
257 else:
258 pkgs[pkg_type] += " " + pkg
259
260 return pkgs
261
262 def create_initial(self):
263 pkgs = dict()
264
265 with open(self.initial_manifest, "w+") as manifest:
266 manifest.write(self.initial_manifest_file_header)
267
268 for var in self.var_maps[self.manifest_type]:
269 if var in self.vars_to_split:
270 split_pkgs = self._split_multilib(self.d.getVar(var, True))
271 if split_pkgs is not None:
272 pkgs = dict(pkgs.items() + split_pkgs.items())
273 else:
274 pkg_list = self.d.getVar(var, True)
275 if pkg_list is not None:
276 pkgs[self.var_maps[self.manifest_type][var]] = self.d.getVar(var, True)
277
278 for pkg_type in pkgs:
279 for pkg in pkgs[pkg_type].split():
280 manifest.write("%s,%s\n" % (pkg_type, pkg))
281
282 def create_final(self):
283 pass
284
285 def create_full(self, pm):
286 if not os.path.exists(self.initial_manifest):
287 self.create_initial()
288
289 initial_manifest = self.parse_initial_manifest()
290 pkgs_to_install = list()
291 for pkg_type in initial_manifest:
292 pkgs_to_install += initial_manifest[pkg_type]
293 if len(pkgs_to_install) == 0:
294 return
295
296 output = pm.dummy_install(pkgs_to_install)
297
298 with open(self.full_manifest, 'w+') as manifest:
299 pkg_re = re.compile('^Installing ([^ ]+) [^ ].*')
300 for line in set(output.split('\n')):
301 m = pkg_re.match(line)
302 if m:
303 manifest.write(m.group(1) + '\n')
304
305 return
306
307
308class DpkgManifest(Manifest):
309 def create_initial(self):
310 with open(self.initial_manifest, "w+") as manifest:
311 manifest.write(self.initial_manifest_file_header)
312
313 for var in self.var_maps[self.manifest_type]:
314 pkg_list = self.d.getVar(var, True)
315
316 if pkg_list is None:
317 continue
318
319 for pkg in pkg_list.split():
320 manifest.write("%s,%s\n" %
321 (self.var_maps[self.manifest_type][var], pkg))
322
323 def create_final(self):
324 pass
325
326 def create_full(self, pm):
327 pass
328
329
330def create_manifest(d, final_manifest=False, manifest_dir=None,
331 manifest_type=Manifest.MANIFEST_TYPE_IMAGE):
332 manifest_map = {'rpm': RpmManifest,
333 'ipk': OpkgManifest,
334 'deb': DpkgManifest}
335
336 manifest = manifest_map[d.getVar('IMAGE_PKGTYPE', True)](d, manifest_dir, manifest_type)
337
338 if final_manifest:
339 manifest.create_final()
340 else:
341 manifest.create_initial()
342
343
344if __name__ == "__main__":
345 pass
diff --git a/meta/lib/oe/package.py b/meta/lib/oe/package.py
new file mode 100644
index 0000000000..f8b532220a
--- /dev/null
+++ b/meta/lib/oe/package.py
@@ -0,0 +1,99 @@
1def runstrip(arg):
2 # Function to strip a single file, called from split_and_strip_files below
3 # A working 'file' (one which works on the target architecture)
4 #
5 # The elftype is a bit pattern (explained in split_and_strip_files) to tell
6 # us what type of file we're processing...
7 # 4 - executable
8 # 8 - shared library
9 # 16 - kernel module
10
11 import commands, stat, subprocess
12
13 (file, elftype, strip) = arg
14
15 newmode = None
16 if not os.access(file, os.W_OK) or os.access(file, os.R_OK):
17 origmode = os.stat(file)[stat.ST_MODE]
18 newmode = origmode | stat.S_IWRITE | stat.S_IREAD
19 os.chmod(file, newmode)
20
21 extraflags = ""
22
23 # kernel module
24 if elftype & 16:
25 extraflags = "--strip-debug --remove-section=.comment --remove-section=.note --preserve-dates"
26 # .so and shared library
27 elif ".so" in file and elftype & 8:
28 extraflags = "--remove-section=.comment --remove-section=.note --strip-unneeded"
29 # shared or executable:
30 elif elftype & 8 or elftype & 4:
31 extraflags = "--remove-section=.comment --remove-section=.note"
32
33 stripcmd = "'%s' %s '%s'" % (strip, extraflags, file)
34 bb.debug(1, "runstrip: %s" % stripcmd)
35
36 ret = subprocess.call(stripcmd, shell=True)
37
38 if newmode:
39 os.chmod(file, origmode)
40
41 if ret:
42 bb.error("runstrip: '%s' strip command failed" % stripcmd)
43
44 return
45
46
47def file_translate(file):
48 ft = file.replace("@", "@at@")
49 ft = ft.replace(" ", "@space@")
50 ft = ft.replace("\t", "@tab@")
51 ft = ft.replace("[", "@openbrace@")
52 ft = ft.replace("]", "@closebrace@")
53 ft = ft.replace("_", "@underscore@")
54 return ft
55
56def filedeprunner(arg):
57 import re, subprocess, shlex
58
59 (pkg, pkgfiles, rpmdeps, pkgdest) = arg
60 provides = {}
61 requires = {}
62
63 r = re.compile(r'[<>=]+ +[^ ]*')
64
65 def process_deps(pipe, pkg, pkgdest, provides, requires):
66 for line in pipe:
67 f = line.split(" ", 1)[0].strip()
68 line = line.split(" ", 1)[1].strip()
69
70 if line.startswith("Requires:"):
71 i = requires
72 elif line.startswith("Provides:"):
73 i = provides
74 else:
75 continue
76
77 file = f.replace(pkgdest + "/" + pkg, "")
78 file = file_translate(file)
79 value = line.split(":", 1)[1].strip()
80 value = r.sub(r'(\g<0>)', value)
81
82 if value.startswith("rpmlib("):
83 continue
84 if value == "python":
85 continue
86 if file not in i:
87 i[file] = []
88 i[file].append(value)
89
90 return provides, requires
91
92 try:
93 dep_popen = subprocess.Popen(shlex.split(rpmdeps) + pkgfiles, stdout=subprocess.PIPE)
94 provides, requires = process_deps(dep_popen.stdout, pkg, pkgdest, provides, requires)
95 except OSError as e:
96 bb.error("rpmdeps: '%s' command failed, '%s'" % (shlex.split(rpmdeps) + pkgfiles, e))
97 raise e
98
99 return (pkg, provides, requires)
diff --git a/meta/lib/oe/package_manager.py b/meta/lib/oe/package_manager.py
new file mode 100644
index 0000000000..505509543d
--- /dev/null
+++ b/meta/lib/oe/package_manager.py
@@ -0,0 +1,1797 @@
1from abc import ABCMeta, abstractmethod
2import os
3import glob
4import subprocess
5import shutil
6import multiprocessing
7import re
8import bb
9import tempfile
10import oe.utils
11
12
13# this can be used by all PM backends to create the index files in parallel
14def create_index(arg):
15 index_cmd = arg
16
17 try:
18 bb.note("Executing '%s' ..." % index_cmd)
19 result = subprocess.check_output(index_cmd, stderr=subprocess.STDOUT, shell=True)
20 except subprocess.CalledProcessError as e:
21 return("Index creation command '%s' failed with return code %d:\n%s" %
22 (e.cmd, e.returncode, e.output))
23
24 if result:
25 bb.note(result)
26
27 return None
28
29
30class Indexer(object):
31 __metaclass__ = ABCMeta
32
33 def __init__(self, d, deploy_dir):
34 self.d = d
35 self.deploy_dir = deploy_dir
36
37 @abstractmethod
38 def write_index(self):
39 pass
40
41
42class RpmIndexer(Indexer):
43 def get_ml_prefix_and_os_list(self, arch_var=None, os_var=None):
44 package_archs = {
45 'default': [],
46 }
47
48 target_os = {
49 'default': "",
50 }
51
52 if arch_var is not None and os_var is not None:
53 package_archs['default'] = self.d.getVar(arch_var, True).split()
54 package_archs['default'].reverse()
55 target_os['default'] = self.d.getVar(os_var, True).strip()
56 else:
57 package_archs['default'] = self.d.getVar("PACKAGE_ARCHS", True).split()
58 # arch order is reversed. This ensures the -best- match is
59 # listed first!
60 package_archs['default'].reverse()
61 target_os['default'] = self.d.getVar("TARGET_OS", True).strip()
62 multilibs = self.d.getVar('MULTILIBS', True) or ""
63 for ext in multilibs.split():
64 eext = ext.split(':')
65 if len(eext) > 1 and eext[0] == 'multilib':
66 localdata = bb.data.createCopy(self.d)
67 default_tune_key = "DEFAULTTUNE_virtclass-multilib-" + eext[1]
68 default_tune = localdata.getVar(default_tune_key, False)
69 if default_tune is None:
70 default_tune_key = "DEFAULTTUNE_ML_" + eext[1]
71 default_tune = localdata.getVar(default_tune_key, False)
72 if default_tune:
73 localdata.setVar("DEFAULTTUNE", default_tune)
74 bb.data.update_data(localdata)
75 package_archs[eext[1]] = localdata.getVar('PACKAGE_ARCHS',
76 True).split()
77 package_archs[eext[1]].reverse()
78 target_os[eext[1]] = localdata.getVar("TARGET_OS",
79 True).strip()
80
81 ml_prefix_list = dict()
82 for mlib in package_archs:
83 if mlib == 'default':
84 ml_prefix_list[mlib] = package_archs[mlib]
85 else:
86 ml_prefix_list[mlib] = list()
87 for arch in package_archs[mlib]:
88 if arch in ['all', 'noarch', 'any']:
89 ml_prefix_list[mlib].append(arch)
90 else:
91 ml_prefix_list[mlib].append(mlib + "_" + arch)
92
93 return (ml_prefix_list, target_os)
94
95 def write_index(self):
96 sdk_pkg_archs = (self.d.getVar('SDK_PACKAGE_ARCHS', True) or "").replace('-', '_').split()
97 all_mlb_pkg_archs = (self.d.getVar('ALL_MULTILIB_PACKAGE_ARCHS', True) or "").replace('-', '_').split()
98
99 mlb_prefix_list = self.get_ml_prefix_and_os_list()[0]
100
101 archs = set()
102 for item in mlb_prefix_list:
103 archs = archs.union(set(i.replace('-', '_') for i in mlb_prefix_list[item]))
104
105 if len(archs) == 0:
106 archs = archs.union(set(all_mlb_pkg_archs))
107
108 archs = archs.union(set(sdk_pkg_archs))
109
110 rpm_createrepo = bb.utils.which(os.getenv('PATH'), "createrepo")
111 index_cmds = []
112 rpm_dirs_found = False
113 for arch in archs:
114 arch_dir = os.path.join(self.deploy_dir, arch)
115 if not os.path.isdir(arch_dir):
116 continue
117
118 index_cmds.append("%s --update -q %s" % (rpm_createrepo, arch_dir))
119
120 rpm_dirs_found = True
121
122 if not rpm_dirs_found:
123 bb.note("There are no packages in %s" % self.deploy_dir)
124 return
125
126 result = oe.utils.multiprocess_exec(index_cmds, create_index)
127 if result:
128 bb.fatal('%s' % ('\n'.join(result)))
129
130
131class OpkgIndexer(Indexer):
132 def write_index(self):
133 arch_vars = ["ALL_MULTILIB_PACKAGE_ARCHS",
134 "SDK_PACKAGE_ARCHS",
135 "MULTILIB_ARCHS"]
136
137 opkg_index_cmd = bb.utils.which(os.getenv('PATH'), "opkg-make-index")
138
139 if not os.path.exists(os.path.join(self.deploy_dir, "Packages")):
140 open(os.path.join(self.deploy_dir, "Packages"), "w").close()
141
142 index_cmds = []
143 for arch_var in arch_vars:
144 archs = self.d.getVar(arch_var, True)
145 if archs is None:
146 continue
147
148 for arch in archs.split():
149 pkgs_dir = os.path.join(self.deploy_dir, arch)
150 pkgs_file = os.path.join(pkgs_dir, "Packages")
151
152 if not os.path.isdir(pkgs_dir):
153 continue
154
155 if not os.path.exists(pkgs_file):
156 open(pkgs_file, "w").close()
157
158 index_cmds.append('%s -r %s -p %s -m %s' %
159 (opkg_index_cmd, pkgs_file, pkgs_file, pkgs_dir))
160
161 if len(index_cmds) == 0:
162 bb.note("There are no packages in %s!" % self.deploy_dir)
163 return
164
165 result = oe.utils.multiprocess_exec(index_cmds, create_index)
166 if result:
167 bb.fatal('%s' % ('\n'.join(result)))
168
169
170
171class DpkgIndexer(Indexer):
172 def write_index(self):
173 pkg_archs = self.d.getVar('PACKAGE_ARCHS', True)
174 if pkg_archs is not None:
175 arch_list = pkg_archs.split()
176 sdk_pkg_archs = self.d.getVar('SDK_PACKAGE_ARCHS', True)
177 if sdk_pkg_archs is not None:
178 for a in sdk_pkg_archs.split():
179 if a not in pkg_archs:
180 arch_list.append(a)
181
182 all_mlb_pkg_arch_list = (self.d.getVar('ALL_MULTILIB_PACKAGE_ARCHS', True) or "").replace('-', '_').split()
183 arch_list.extend(arch for arch in all_mlb_pkg_arch_list if arch not in arch_list)
184
185 apt_ftparchive = bb.utils.which(os.getenv('PATH'), "apt-ftparchive")
186 gzip = bb.utils.which(os.getenv('PATH'), "gzip")
187
188 index_cmds = []
189 deb_dirs_found = False
190 for arch in arch_list:
191 arch_dir = os.path.join(self.deploy_dir, arch)
192 if not os.path.isdir(arch_dir):
193 continue
194
195 cmd = "cd %s; PSEUDO_UNLOAD=1 %s packages . > Packages;" % (arch_dir, apt_ftparchive)
196
197 cmd += "%s -fc Packages > Packages.gz;" % gzip
198
199 with open(os.path.join(arch_dir, "Release"), "w+") as release:
200 release.write("Label: %s\n" % arch)
201
202 cmd += "PSEUDO_UNLOAD=1 %s release . >> Release" % apt_ftparchive
203
204 index_cmds.append(cmd)
205
206 deb_dirs_found = True
207
208 if not deb_dirs_found:
209 bb.note("There are no packages in %s" % self.deploy_dir)
210 return
211
212 result = oe.utils.multiprocess_exec(index_cmds, create_index)
213 if result:
214 bb.fatal('%s' % ('\n'.join(result)))
215
216
217
218class PkgsList(object):
219 __metaclass__ = ABCMeta
220
221 def __init__(self, d, rootfs_dir):
222 self.d = d
223 self.rootfs_dir = rootfs_dir
224
225 @abstractmethod
226 def list(self, format=None):
227 pass
228
229
230class RpmPkgsList(PkgsList):
231 def __init__(self, d, rootfs_dir, arch_var=None, os_var=None):
232 super(RpmPkgsList, self).__init__(d, rootfs_dir)
233
234 self.rpm_cmd = bb.utils.which(os.getenv('PATH'), "rpm")
235 self.image_rpmlib = os.path.join(self.rootfs_dir, 'var/lib/rpm')
236
237 self.ml_prefix_list, self.ml_os_list = \
238 RpmIndexer(d, rootfs_dir).get_ml_prefix_and_os_list(arch_var, os_var)
239
240 # Determine rpm version
241 cmd = "%s --version" % self.rpm_cmd
242 try:
243 output = subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True)
244 except subprocess.CalledProcessError as e:
245 bb.fatal("Getting rpm version failed. Command '%s' "
246 "returned %d:\n%s" % (cmd, e.returncode, e.output))
247 self.rpm_version = int(output.split()[-1].split('.')[0])
248
249 '''
250 Translate the RPM/Smart format names to the OE multilib format names
251 '''
252 def _pkg_translate_smart_to_oe(self, pkg, arch):
253 new_pkg = pkg
254 new_arch = arch
255 fixed_arch = arch.replace('_', '-')
256 found = 0
257 for mlib in self.ml_prefix_list:
258 for cmp_arch in self.ml_prefix_list[mlib]:
259 fixed_cmp_arch = cmp_arch.replace('_', '-')
260 if fixed_arch == fixed_cmp_arch:
261 if mlib == 'default':
262 new_pkg = pkg
263 new_arch = cmp_arch
264 else:
265 new_pkg = mlib + '-' + pkg
266 # We need to strip off the ${mlib}_ prefix on the arch
267 new_arch = cmp_arch.replace(mlib + '_', '')
268
269 # Workaround for bug 3565. Simply look to see if we
270 # know of a package with that name, if not try again!
271 filename = os.path.join(self.d.getVar('PKGDATA_DIR', True),
272 'runtime-reverse',
273 new_pkg)
274 if os.path.exists(filename):
275 found = 1
276 break
277
278 if found == 1 and fixed_arch == fixed_cmp_arch:
279 break
280 #bb.note('%s, %s -> %s, %s' % (pkg, arch, new_pkg, new_arch))
281 return new_pkg, new_arch
282
283 def _list_pkg_deps(self):
284 cmd = [bb.utils.which(os.getenv('PATH'), "rpmresolve"),
285 "-t", self.image_rpmlib]
286
287 try:
288 output = subprocess.check_output(cmd, stderr=subprocess.STDOUT).strip()
289 except subprocess.CalledProcessError as e:
290 bb.fatal("Cannot get the package dependencies. Command '%s' "
291 "returned %d:\n%s" % (' '.join(cmd), e.returncode, e.output))
292
293 return output
294
295 def list(self, format=None):
296 if format == "deps":
297 if self.rpm_version == 4:
298 bb.fatal("'deps' format dependency listings are not supported with rpm 4 since rpmresolve does not work")
299 return self._list_pkg_deps()
300
301 cmd = self.rpm_cmd + ' --root ' + self.rootfs_dir
302 cmd += ' -D "_dbpath /var/lib/rpm" -qa'
303 if self.rpm_version == 4:
304 cmd += " --qf '[%{NAME} %{ARCH} %{VERSION}\n]'"
305 else:
306 cmd += " --qf '[%{NAME} %{ARCH} %{VERSION} %{PACKAGEORIGIN}\n]'"
307
308 try:
309 # bb.note(cmd)
310 tmp_output = subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True).strip()
311
312 except subprocess.CalledProcessError as e:
313 bb.fatal("Cannot get the installed packages list. Command '%s' "
314 "returned %d:\n%s" % (cmd, e.returncode, e.output))
315
316 output = list()
317 for line in tmp_output.split('\n'):
318 if len(line.strip()) == 0:
319 continue
320 pkg = line.split()[0]
321 arch = line.split()[1]
322 ver = line.split()[2]
323 if self.rpm_version == 4:
324 pkgorigin = "unknown"
325 else:
326 pkgorigin = line.split()[3]
327 new_pkg, new_arch = self._pkg_translate_smart_to_oe(pkg, arch)
328
329 if format == "arch":
330 output.append('%s %s' % (new_pkg, new_arch))
331 elif format == "file":
332 output.append('%s %s %s' % (new_pkg, pkgorigin, new_arch))
333 elif format == "ver":
334 output.append('%s %s %s' % (new_pkg, new_arch, ver))
335 else:
336 output.append('%s' % (new_pkg))
337
338 output.sort()
339
340 return '\n'.join(output)
341
342
343class OpkgPkgsList(PkgsList):
344 def __init__(self, d, rootfs_dir, config_file):
345 super(OpkgPkgsList, self).__init__(d, rootfs_dir)
346
347 self.opkg_cmd = bb.utils.which(os.getenv('PATH'), "opkg-cl")
348 self.opkg_args = "-f %s -o %s " % (config_file, rootfs_dir)
349 self.opkg_args += self.d.getVar("OPKG_ARGS", True)
350
351 def list(self, format=None):
352 opkg_query_cmd = bb.utils.which(os.getenv('PATH'), "opkg-query-helper.py")
353
354 if format == "arch":
355 cmd = "%s %s status | %s -a" % \
356 (self.opkg_cmd, self.opkg_args, opkg_query_cmd)
357 elif format == "file":
358 cmd = "%s %s status | %s -f" % \
359 (self.opkg_cmd, self.opkg_args, opkg_query_cmd)
360 elif format == "ver":
361 cmd = "%s %s status | %s -v" % \
362 (self.opkg_cmd, self.opkg_args, opkg_query_cmd)
363 elif format == "deps":
364 cmd = "%s %s status | %s" % \
365 (self.opkg_cmd, self.opkg_args, opkg_query_cmd)
366 else:
367 cmd = "%s %s list_installed | cut -d' ' -f1" % \
368 (self.opkg_cmd, self.opkg_args)
369
370 try:
371 output = subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True).strip()
372 except subprocess.CalledProcessError as e:
373 bb.fatal("Cannot get the installed packages list. Command '%s' "
374 "returned %d:\n%s" % (cmd, e.returncode, e.output))
375
376 if output and format == "file":
377 tmp_output = ""
378 for line in output.split('\n'):
379 pkg, pkg_file, pkg_arch = line.split()
380 full_path = os.path.join(self.rootfs_dir, pkg_arch, pkg_file)
381 if os.path.exists(full_path):
382 tmp_output += "%s %s %s\n" % (pkg, full_path, pkg_arch)
383 else:
384 tmp_output += "%s %s %s\n" % (pkg, pkg_file, pkg_arch)
385
386 output = tmp_output
387
388 return output
389
390
391class DpkgPkgsList(PkgsList):
392 def list(self, format=None):
393 cmd = [bb.utils.which(os.getenv('PATH'), "dpkg-query"),
394 "--admindir=%s/var/lib/dpkg" % self.rootfs_dir,
395 "-W"]
396
397 if format == "arch":
398 cmd.append("-f=${Package} ${PackageArch}\n")
399 elif format == "file":
400 cmd.append("-f=${Package} ${Package}_${Version}_${Architecture}.deb ${PackageArch}\n")
401 elif format == "ver":
402 cmd.append("-f=${Package} ${PackageArch} ${Version}\n")
403 elif format == "deps":
404 cmd.append("-f=Package: ${Package}\nDepends: ${Depends}\nRecommends: ${Recommends}\n\n")
405 else:
406 cmd.append("-f=${Package}\n")
407
408 try:
409 output = subprocess.check_output(cmd, stderr=subprocess.STDOUT).strip()
410 except subprocess.CalledProcessError as e:
411 bb.fatal("Cannot get the installed packages list. Command '%s' "
412 "returned %d:\n%s" % (' '.join(cmd), e.returncode, e.output))
413
414 if format == "file":
415 tmp_output = ""
416 for line in tuple(output.split('\n')):
417 pkg, pkg_file, pkg_arch = line.split()
418 full_path = os.path.join(self.rootfs_dir, pkg_arch, pkg_file)
419 if os.path.exists(full_path):
420 tmp_output += "%s %s %s\n" % (pkg, full_path, pkg_arch)
421 else:
422 tmp_output += "%s %s %s\n" % (pkg, pkg_file, pkg_arch)
423
424 output = tmp_output
425 elif format == "deps":
426 opkg_query_cmd = bb.utils.which(os.getenv('PATH'), "opkg-query-helper.py")
427 file_out = tempfile.NamedTemporaryFile()
428 file_out.write(output)
429 file_out.flush()
430
431 try:
432 output = subprocess.check_output("cat %s | %s" %
433 (file_out.name, opkg_query_cmd),
434 stderr=subprocess.STDOUT,
435 shell=True)
436 except subprocess.CalledProcessError as e:
437 file_out.close()
438 bb.fatal("Cannot compute packages dependencies. Command '%s' "
439 "returned %d:\n%s" % (e.cmd, e.returncode, e.output))
440
441 file_out.close()
442
443 return output
444
445
446class PackageManager(object):
447 """
448 This is an abstract class. Do not instantiate this directly.
449 """
450 __metaclass__ = ABCMeta
451
452 def __init__(self, d):
453 self.d = d
454 self.deploy_dir = None
455 self.deploy_lock = None
456 self.feed_uris = self.d.getVar('PACKAGE_FEED_URIS', True) or ""
457
458 """
459 Update the package manager package database.
460 """
461 @abstractmethod
462 def update(self):
463 pass
464
465 """
466 Install a list of packages. 'pkgs' is a list object. If 'attempt_only' is
467 True, installation failures are ignored.
468 """
469 @abstractmethod
470 def install(self, pkgs, attempt_only=False):
471 pass
472
473 """
474 Remove a list of packages. 'pkgs' is a list object. If 'with_dependencies'
475 is False, the any dependencies are left in place.
476 """
477 @abstractmethod
478 def remove(self, pkgs, with_dependencies=True):
479 pass
480
481 """
482 This function creates the index files
483 """
484 @abstractmethod
485 def write_index(self):
486 pass
487
488 @abstractmethod
489 def remove_packaging_data(self):
490 pass
491
492 @abstractmethod
493 def list_installed(self, format=None):
494 pass
495
496 @abstractmethod
497 def insert_feeds_uris(self):
498 pass
499
500 """
501 Install complementary packages based upon the list of currently installed
502 packages e.g. locales, *-dev, *-dbg, etc. This will only attempt to install
503 these packages, if they don't exist then no error will occur. Note: every
504 backend needs to call this function explicitly after the normal package
505 installation
506 """
507 def install_complementary(self, globs=None):
508 # we need to write the list of installed packages to a file because the
509 # oe-pkgdata-util reads it from a file
510 installed_pkgs_file = os.path.join(self.d.getVar('WORKDIR', True),
511 "installed_pkgs.txt")
512 with open(installed_pkgs_file, "w+") as installed_pkgs:
513 installed_pkgs.write(self.list_installed("arch"))
514
515 if globs is None:
516 globs = self.d.getVar('IMAGE_INSTALL_COMPLEMENTARY', True)
517 split_linguas = set()
518
519 for translation in self.d.getVar('IMAGE_LINGUAS', True).split():
520 split_linguas.add(translation)
521 split_linguas.add(translation.split('-')[0])
522
523 split_linguas = sorted(split_linguas)
524
525 for lang in split_linguas:
526 globs += " *-locale-%s" % lang
527
528 if globs is None:
529 return
530
531 cmd = [bb.utils.which(os.getenv('PATH'), "oe-pkgdata-util"),
532 "glob", self.d.getVar('PKGDATA_DIR', True), installed_pkgs_file,
533 globs]
534 exclude = self.d.getVar('PACKAGE_EXCLUDE_COMPLEMENTARY', True)
535 if exclude:
536 cmd.extend(['-x', exclude])
537 try:
538 bb.note("Installing complementary packages ...")
539 complementary_pkgs = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
540 except subprocess.CalledProcessError as e:
541 bb.fatal("Could not compute complementary packages list. Command "
542 "'%s' returned %d:\n%s" %
543 (' '.join(cmd), e.returncode, e.output))
544
545 self.install(complementary_pkgs.split(), attempt_only=True)
546
547 def deploy_dir_lock(self):
548 if self.deploy_dir is None:
549 raise RuntimeError("deploy_dir is not set!")
550
551 lock_file_name = os.path.join(self.deploy_dir, "deploy.lock")
552
553 self.deploy_lock = bb.utils.lockfile(lock_file_name)
554
555 def deploy_dir_unlock(self):
556 if self.deploy_lock is None:
557 return
558
559 bb.utils.unlockfile(self.deploy_lock)
560
561 self.deploy_lock = None
562
563
564class RpmPM(PackageManager):
565 def __init__(self,
566 d,
567 target_rootfs,
568 target_vendor,
569 task_name='target',
570 providename=None,
571 arch_var=None,
572 os_var=None):
573 super(RpmPM, self).__init__(d)
574 self.target_rootfs = target_rootfs
575 self.target_vendor = target_vendor
576 self.task_name = task_name
577 self.providename = providename
578 self.fullpkglist = list()
579 self.deploy_dir = self.d.getVar('DEPLOY_DIR_RPM', True)
580 self.etcrpm_dir = os.path.join(self.target_rootfs, "etc/rpm")
581 self.install_dir = os.path.join(self.target_rootfs, "install")
582 self.rpm_cmd = bb.utils.which(os.getenv('PATH'), "rpm")
583 self.smart_cmd = bb.utils.which(os.getenv('PATH'), "smart")
584 self.smart_opt = "--quiet --data-dir=" + os.path.join(target_rootfs,
585 'var/lib/smart')
586 self.scriptlet_wrapper = self.d.expand('${WORKDIR}/scriptlet_wrapper')
587 self.solution_manifest = self.d.expand('${T}/saved/%s_solution' %
588 self.task_name)
589 self.saved_rpmlib = self.d.expand('${T}/saved/%s' % self.task_name)
590 self.image_rpmlib = os.path.join(self.target_rootfs, 'var/lib/rpm')
591
592 if not os.path.exists(self.d.expand('${T}/saved')):
593 bb.utils.mkdirhier(self.d.expand('${T}/saved'))
594
595 self.indexer = RpmIndexer(self.d, self.deploy_dir)
596 self.pkgs_list = RpmPkgsList(self.d, self.target_rootfs, arch_var, os_var)
597 self.rpm_version = self.pkgs_list.rpm_version
598
599 self.ml_prefix_list, self.ml_os_list = self.indexer.get_ml_prefix_and_os_list(arch_var, os_var)
600
601 def insert_feeds_uris(self):
602 if self.feed_uris == "":
603 return
604
605 # List must be prefered to least preferred order
606 default_platform_extra = set()
607 platform_extra = set()
608 bbextendvariant = self.d.getVar('BBEXTENDVARIANT', True) or ""
609 for mlib in self.ml_os_list:
610 for arch in self.ml_prefix_list[mlib]:
611 plt = arch.replace('-', '_') + '-.*-' + self.ml_os_list[mlib]
612 if mlib == bbextendvariant:
613 default_platform_extra.add(plt)
614 else:
615 platform_extra.add(plt)
616
617 platform_extra = platform_extra.union(default_platform_extra)
618
619 arch_list = []
620 for canonical_arch in platform_extra:
621 arch = canonical_arch.split('-')[0]
622 if not os.path.exists(os.path.join(self.deploy_dir, arch)):
623 continue
624 arch_list.append(arch)
625
626 uri_iterator = 0
627 channel_priority = 10 + 5 * len(self.feed_uris.split()) * len(arch_list)
628
629 for uri in self.feed_uris.split():
630 for arch in arch_list:
631 bb.note('Note: adding Smart channel url%d%s (%s)' %
632 (uri_iterator, arch, channel_priority))
633 self._invoke_smart('channel --add url%d-%s type=rpm-md baseurl=%s/rpm/%s -y'
634 % (uri_iterator, arch, uri, arch))
635 self._invoke_smart('channel --set url%d-%s priority=%d' %
636 (uri_iterator, arch, channel_priority))
637 channel_priority -= 5
638 uri_iterator += 1
639
640 '''
641 Create configs for rpm and smart, and multilib is supported
642 '''
643 def create_configs(self):
644 target_arch = self.d.getVar('TARGET_ARCH', True)
645 platform = '%s%s-%s' % (target_arch.replace('-', '_'),
646 self.target_vendor,
647 self.ml_os_list['default'])
648
649 # List must be prefered to least preferred order
650 default_platform_extra = list()
651 platform_extra = list()
652 bbextendvariant = self.d.getVar('BBEXTENDVARIANT', True) or ""
653 for mlib in self.ml_os_list:
654 for arch in self.ml_prefix_list[mlib]:
655 plt = arch.replace('-', '_') + '-.*-' + self.ml_os_list[mlib]
656 if mlib == bbextendvariant:
657 if plt not in default_platform_extra:
658 default_platform_extra.append(plt)
659 else:
660 if plt not in platform_extra:
661 platform_extra.append(plt)
662 platform_extra = default_platform_extra + platform_extra
663
664 self._create_configs(platform, platform_extra)
665
666 def _invoke_smart(self, args):
667 cmd = "%s %s %s" % (self.smart_cmd, self.smart_opt, args)
668 # bb.note(cmd)
669 try:
670 complementary_pkgs = subprocess.check_output(cmd,
671 stderr=subprocess.STDOUT,
672 shell=True)
673 # bb.note(complementary_pkgs)
674 return complementary_pkgs
675 except subprocess.CalledProcessError as e:
676 bb.fatal("Could not invoke smart. Command "
677 "'%s' returned %d:\n%s" % (cmd, e.returncode, e.output))
678
679 def _search_pkg_name_in_feeds(self, pkg, feed_archs):
680 for arch in feed_archs:
681 arch = arch.replace('-', '_')
682 for p in self.fullpkglist:
683 regex_match = r"^%s-[^-]*-[^-]*@%s$" % \
684 (re.escape(pkg), re.escape(arch))
685 if re.match(regex_match, p) is not None:
686 # First found is best match
687 # bb.note('%s -> %s' % (pkg, pkg + '@' + arch))
688 return pkg + '@' + arch
689
690 return ""
691
692 '''
693 Translate the OE multilib format names to the RPM/Smart format names
694 It searched the RPM/Smart format names in probable multilib feeds first,
695 and then searched the default base feed.
696 '''
697 def _pkg_translate_oe_to_smart(self, pkgs, attempt_only=False):
698 new_pkgs = list()
699
700 for pkg in pkgs:
701 new_pkg = pkg
702 # Search new_pkg in probable multilibs first
703 for mlib in self.ml_prefix_list:
704 # Jump the default archs
705 if mlib == 'default':
706 continue
707
708 subst = pkg.replace(mlib + '-', '')
709 # if the pkg in this multilib feed
710 if subst != pkg:
711 feed_archs = self.ml_prefix_list[mlib]
712 new_pkg = self._search_pkg_name_in_feeds(subst, feed_archs)
713 if not new_pkg:
714 # Failed to translate, package not found!
715 err_msg = '%s not found in the %s feeds (%s).\n' % \
716 (pkg, mlib, " ".join(feed_archs))
717 if not attempt_only:
718 err_msg += " ".join(self.fullpkglist)
719 bb.fatal(err_msg)
720 bb.warn(err_msg)
721 else:
722 new_pkgs.append(new_pkg)
723
724 break
725
726 # Apparently not a multilib package...
727 if pkg == new_pkg:
728 # Search new_pkg in default archs
729 default_archs = self.ml_prefix_list['default']
730 new_pkg = self._search_pkg_name_in_feeds(pkg, default_archs)
731 if not new_pkg:
732 err_msg = '%s not found in the base feeds (%s).\n' % \
733 (pkg, ' '.join(default_archs))
734 if not attempt_only:
735 err_msg += " ".join(self.fullpkglist)
736 bb.fatal(err_msg)
737 bb.warn(err_msg)
738 else:
739 new_pkgs.append(new_pkg)
740
741 return new_pkgs
742
743 def _create_configs(self, platform, platform_extra):
744 # Setup base system configuration
745 bb.note("configuring RPM platform settings")
746
747 # Configure internal RPM environment when using Smart
748 os.environ['RPM_ETCRPM'] = self.etcrpm_dir
749 bb.utils.mkdirhier(self.etcrpm_dir)
750
751 # Setup temporary directory -- install...
752 if os.path.exists(self.install_dir):
753 bb.utils.remove(self.install_dir, True)
754 bb.utils.mkdirhier(os.path.join(self.install_dir, 'tmp'))
755
756 channel_priority = 5
757 platform_dir = os.path.join(self.etcrpm_dir, "platform")
758 sdkos = self.d.getVar("SDK_OS", True)
759 with open(platform_dir, "w+") as platform_fd:
760 platform_fd.write(platform + '\n')
761 for pt in platform_extra:
762 channel_priority += 5
763 if sdkos:
764 tmp = re.sub("-%s$" % sdkos, "-%s\n" % sdkos, pt)
765 tmp = re.sub("-linux.*$", "-linux.*\n", tmp)
766 platform_fd.write(tmp)
767
768 # Tell RPM that the "/" directory exist and is available
769 bb.note("configuring RPM system provides")
770 sysinfo_dir = os.path.join(self.etcrpm_dir, "sysinfo")
771 bb.utils.mkdirhier(sysinfo_dir)
772 with open(os.path.join(sysinfo_dir, "Dirnames"), "w+") as dirnames:
773 dirnames.write("/\n")
774
775 if self.providename:
776 providename_dir = os.path.join(sysinfo_dir, "Providename")
777 if not os.path.exists(providename_dir):
778 providename_content = '\n'.join(self.providename)
779 providename_content += '\n'
780 open(providename_dir, "w+").write(providename_content)
781
782 # Configure RPM... we enforce these settings!
783 bb.note("configuring RPM DB settings")
784 # After change the __db.* cache size, log file will not be
785 # generated automatically, that will raise some warnings,
786 # so touch a bare log for rpm write into it.
787 if self.rpm_version == 5:
788 rpmlib_log = os.path.join(self.image_rpmlib, 'log', 'log.0000000001')
789 if not os.path.exists(rpmlib_log):
790 bb.utils.mkdirhier(os.path.join(self.image_rpmlib, 'log'))
791 open(rpmlib_log, 'w+').close()
792
793 DB_CONFIG_CONTENT = "# ================ Environment\n" \
794 "set_data_dir .\n" \
795 "set_create_dir .\n" \
796 "set_lg_dir ./log\n" \
797 "set_tmp_dir ./tmp\n" \
798 "set_flags db_log_autoremove on\n" \
799 "\n" \
800 "# -- thread_count must be >= 8\n" \
801 "set_thread_count 64\n" \
802 "\n" \
803 "# ================ Logging\n" \
804 "\n" \
805 "# ================ Memory Pool\n" \
806 "set_cachesize 0 1048576 0\n" \
807 "set_mp_mmapsize 268435456\n" \
808 "\n" \
809 "# ================ Locking\n" \
810 "set_lk_max_locks 16384\n" \
811 "set_lk_max_lockers 16384\n" \
812 "set_lk_max_objects 16384\n" \
813 "mutex_set_max 163840\n" \
814 "\n" \
815 "# ================ Replication\n"
816
817 db_config_dir = os.path.join(self.image_rpmlib, 'DB_CONFIG')
818 if not os.path.exists(db_config_dir):
819 open(db_config_dir, 'w+').write(DB_CONFIG_CONTENT)
820
821 # Create database so that smart doesn't complain (lazy init)
822 opt = "-qa"
823 if self.rpm_version == 4:
824 opt = "--initdb"
825 cmd = "%s --root %s --dbpath /var/lib/rpm %s > /dev/null" % (
826 self.rpm_cmd, self.target_rootfs, opt)
827 try:
828 subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True)
829 except subprocess.CalledProcessError as e:
830 bb.fatal("Create rpm database failed. Command '%s' "
831 "returned %d:\n%s" % (cmd, e.returncode, e.output))
832
833 # Configure smart
834 bb.note("configuring Smart settings")
835 bb.utils.remove(os.path.join(self.target_rootfs, 'var/lib/smart'),
836 True)
837 self._invoke_smart('config --set rpm-root=%s' % self.target_rootfs)
838 self._invoke_smart('config --set rpm-dbpath=/var/lib/rpm')
839 self._invoke_smart('config --set rpm-extra-macros._var=%s' %
840 self.d.getVar('localstatedir', True))
841 cmd = 'config --set rpm-extra-macros._tmppath=/install/tmp'
842
843 prefer_color = self.d.getVar('RPM_PREFER_ELF_ARCH', True)
844 if prefer_color:
845 if prefer_color not in ['0', '1', '2', '4']:
846 bb.fatal("Invalid RPM_PREFER_ELF_ARCH: %s, it should be one of:\n"
847 "\t1: ELF32 wins\n"
848 "\t2: ELF64 wins\n"
849 "\t4: ELF64 N32 wins (mips64 or mips64el only)" %
850 prefer_color)
851 if prefer_color == "4" and self.d.getVar("TUNE_ARCH", True) not in \
852 ['mips64', 'mips64el']:
853 bb.fatal("RPM_PREFER_ELF_ARCH = \"4\" is for mips64 or mips64el "
854 "only.")
855 self._invoke_smart('config --set rpm-extra-macros._prefer_color=%s'
856 % prefer_color)
857
858 self._invoke_smart(cmd)
859
860 # Write common configuration for host and target usage
861 self._invoke_smart('config --set rpm-nolinktos=1')
862 self._invoke_smart('config --set rpm-noparentdirs=1')
863 check_signature = self.d.getVar('RPM_CHECK_SIGNATURES', True)
864 if check_signature and check_signature.strip() == "0":
865 self._invoke_smart('config --set rpm-check-signatures=false')
866 for i in self.d.getVar('BAD_RECOMMENDATIONS', True).split():
867 self._invoke_smart('flag --set ignore-recommends %s' % i)
868
869 # Do the following configurations here, to avoid them being
870 # saved for field upgrade
871 if self.d.getVar('NO_RECOMMENDATIONS', True).strip() == "1":
872 self._invoke_smart('config --set ignore-all-recommends=1')
873 pkg_exclude = self.d.getVar('PACKAGE_EXCLUDE', True) or ""
874 for i in pkg_exclude.split():
875 self._invoke_smart('flag --set exclude-packages %s' % i)
876
877 # Optional debugging
878 # self._invoke_smart('config --set rpm-log-level=debug')
879 # cmd = 'config --set rpm-log-file=/tmp/smart-debug-logfile'
880 # self._invoke_smart(cmd)
881 ch_already_added = []
882 for canonical_arch in platform_extra:
883 arch = canonical_arch.split('-')[0]
884 arch_channel = os.path.join(self.deploy_dir, arch)
885 if os.path.exists(arch_channel) and not arch in ch_already_added:
886 bb.note('Note: adding Smart channel %s (%s)' %
887 (arch, channel_priority))
888 self._invoke_smart('channel --add %s type=rpm-md baseurl=%s -y'
889 % (arch, arch_channel))
890 self._invoke_smart('channel --set %s priority=%d' %
891 (arch, channel_priority))
892 channel_priority -= 5
893
894 ch_already_added.append(arch)
895
896 bb.note('adding Smart RPM DB channel')
897 self._invoke_smart('channel --add rpmsys type=rpm-sys -y')
898
899 # Construct install scriptlet wrapper.
900 # Scripts need to be ordered when executed, this ensures numeric order.
901 # If we ever run into needing more the 899 scripts, we'll have to.
902 # change num to start with 1000.
903 #
904 if self.rpm_version == 4:
905 scriptletcmd = "$2 $3 $4\n"
906 else:
907 scriptletcmd = "$2 $1/$3 $4\n"
908
909 SCRIPTLET_FORMAT = "#!/bin/bash\n" \
910 "\n" \
911 "export PATH=%s\n" \
912 "export D=%s\n" \
913 'export OFFLINE_ROOT="$D"\n' \
914 'export IPKG_OFFLINE_ROOT="$D"\n' \
915 'export OPKG_OFFLINE_ROOT="$D"\n' \
916 "export INTERCEPT_DIR=%s\n" \
917 "export NATIVE_ROOT=%s\n" \
918 "\n" \
919 + scriptletcmd + \
920 "if [ $? -ne 0 ]; then\n" \
921 " if [ $4 -eq 1 ]; then\n" \
922 " mkdir -p $1/etc/rpm-postinsts\n" \
923 " num=100\n" \
924 " while [ -e $1/etc/rpm-postinsts/${num}-* ]; do num=$((num + 1)); done\n" \
925 " name=`head -1 $1/$3 | cut -d\' \' -f 2`\n" \
926 ' echo "#!$2" > $1/etc/rpm-postinsts/${num}-${name}\n' \
927 ' echo "# Arg: $4" >> $1/etc/rpm-postinsts/${num}-${name}\n' \
928 " cat $1/$3 >> $1/etc/rpm-postinsts/${num}-${name}\n" \
929 " chmod +x $1/etc/rpm-postinsts/${num}-${name}\n" \
930 " else\n" \
931 ' echo "Error: pre/post remove scriptlet failed"\n' \
932 " fi\n" \
933 "fi\n"
934
935 intercept_dir = self.d.expand('${WORKDIR}/intercept_scripts')
936 native_root = self.d.getVar('STAGING_DIR_NATIVE', True)
937 scriptlet_content = SCRIPTLET_FORMAT % (os.environ['PATH'],
938 self.target_rootfs,
939 intercept_dir,
940 native_root)
941 open(self.scriptlet_wrapper, 'w+').write(scriptlet_content)
942
943 bb.note("Note: configuring RPM cross-install scriptlet_wrapper")
944 os.chmod(self.scriptlet_wrapper, 0755)
945 cmd = 'config --set rpm-extra-macros._cross_scriptlet_wrapper=%s' % \
946 self.scriptlet_wrapper
947 self._invoke_smart(cmd)
948
949 # Debug to show smart config info
950 # bb.note(self._invoke_smart('config --show'))
951
952 def update(self):
953 self._invoke_smart('update rpmsys')
954
955 '''
956 Install pkgs with smart, the pkg name is oe format
957 '''
958 def install(self, pkgs, attempt_only=False):
959
960 bb.note("Installing the following packages: %s" % ' '.join(pkgs))
961 if attempt_only and len(pkgs) == 0:
962 return
963 pkgs = self._pkg_translate_oe_to_smart(pkgs, attempt_only)
964
965 if not attempt_only:
966 bb.note('to be installed: %s' % ' '.join(pkgs))
967 cmd = "%s %s install -y %s" % \
968 (self.smart_cmd, self.smart_opt, ' '.join(pkgs))
969 bb.note(cmd)
970 else:
971 bb.note('installing attempt only packages...')
972 bb.note('Attempting %s' % ' '.join(pkgs))
973 cmd = "%s %s install --attempt -y %s" % \
974 (self.smart_cmd, self.smart_opt, ' '.join(pkgs))
975 try:
976 output = subprocess.check_output(cmd.split(), stderr=subprocess.STDOUT)
977 bb.note(output)
978 except subprocess.CalledProcessError as e:
979 bb.fatal("Unable to install packages. Command '%s' "
980 "returned %d:\n%s" % (cmd, e.returncode, e.output))
981
982 '''
983 Remove pkgs with smart, the pkg name is smart/rpm format
984 '''
985 def remove(self, pkgs, with_dependencies=True):
986 bb.note('to be removed: ' + ' '.join(pkgs))
987
988 if not with_dependencies:
989 cmd = "%s -e --nodeps " % self.rpm_cmd
990 cmd += "--root=%s " % self.target_rootfs
991 cmd += "--dbpath=/var/lib/rpm "
992 cmd += "--define='_cross_scriptlet_wrapper %s' " % \
993 self.scriptlet_wrapper
994 cmd += "--define='_tmppath /install/tmp' %s" % ' '.join(pkgs)
995 else:
996 # for pkg in pkgs:
997 # bb.note('Debug: What required: %s' % pkg)
998 # bb.note(self._invoke_smart('query %s --show-requiredby' % pkg))
999
1000 cmd = "%s %s remove -y %s" % (self.smart_cmd,
1001 self.smart_opt,
1002 ' '.join(pkgs))
1003
1004 try:
1005 bb.note(cmd)
1006 output = subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True)
1007 bb.note(output)
1008 except subprocess.CalledProcessError as e:
1009 bb.note("Unable to remove packages. Command '%s' "
1010 "returned %d:\n%s" % (cmd, e.returncode, e.output))
1011
1012 def upgrade(self):
1013 bb.note('smart upgrade')
1014 self._invoke_smart('upgrade')
1015
1016 def write_index(self):
1017 result = self.indexer.write_index()
1018
1019 if result is not None:
1020 bb.fatal(result)
1021
1022 def remove_packaging_data(self):
1023 bb.utils.remove(self.image_rpmlib, True)
1024 bb.utils.remove(os.path.join(self.target_rootfs, 'var/lib/smart'),
1025 True)
1026 bb.utils.remove(os.path.join(self.target_rootfs, 'var/lib/opkg'), True)
1027
1028 # remove temp directory
1029 bb.utils.remove(self.d.expand('${IMAGE_ROOTFS}/install'), True)
1030
1031 def backup_packaging_data(self):
1032 # Save the rpmlib for increment rpm image generation
1033 if os.path.exists(self.saved_rpmlib):
1034 bb.utils.remove(self.saved_rpmlib, True)
1035 shutil.copytree(self.image_rpmlib,
1036 self.saved_rpmlib,
1037 symlinks=True)
1038
1039 def recovery_packaging_data(self):
1040 # Move the rpmlib back
1041 if os.path.exists(self.saved_rpmlib):
1042 if os.path.exists(self.image_rpmlib):
1043 bb.utils.remove(self.image_rpmlib, True)
1044
1045 bb.note('Recovery packaging data')
1046 shutil.copytree(self.saved_rpmlib,
1047 self.image_rpmlib,
1048 symlinks=True)
1049
1050 def list_installed(self, format=None):
1051 return self.pkgs_list.list(format)
1052
1053 '''
1054 If incremental install, we need to determine what we've got,
1055 what we need to add, and what to remove...
1056 The dump_install_solution will dump and save the new install
1057 solution.
1058 '''
1059 def dump_install_solution(self, pkgs):
1060 bb.note('creating new install solution for incremental install')
1061 if len(pkgs) == 0:
1062 return
1063
1064 pkgs = self._pkg_translate_oe_to_smart(pkgs, False)
1065 install_pkgs = list()
1066
1067 cmd = "%s %s install -y --dump %s 2>%s" % \
1068 (self.smart_cmd,
1069 self.smart_opt,
1070 ' '.join(pkgs),
1071 self.solution_manifest)
1072 try:
1073 # Disable rpmsys channel for the fake install
1074 self._invoke_smart('channel --disable rpmsys')
1075
1076 subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True)
1077 with open(self.solution_manifest, 'r') as manifest:
1078 for pkg in manifest.read().split('\n'):
1079 if '@' in pkg:
1080 install_pkgs.append(pkg)
1081 except subprocess.CalledProcessError as e:
1082 bb.note("Unable to dump install packages. Command '%s' "
1083 "returned %d:\n%s" % (cmd, e.returncode, e.output))
1084 # Recovery rpmsys channel
1085 self._invoke_smart('channel --enable rpmsys')
1086 return install_pkgs
1087
1088 '''
1089 If incremental install, we need to determine what we've got,
1090 what we need to add, and what to remove...
1091 The load_old_install_solution will load the previous install
1092 solution
1093 '''
1094 def load_old_install_solution(self):
1095 bb.note('load old install solution for incremental install')
1096 installed_pkgs = list()
1097 if not os.path.exists(self.solution_manifest):
1098 bb.note('old install solution not exist')
1099 return installed_pkgs
1100
1101 with open(self.solution_manifest, 'r') as manifest:
1102 for pkg in manifest.read().split('\n'):
1103 if '@' in pkg:
1104 installed_pkgs.append(pkg.strip())
1105
1106 return installed_pkgs
1107
1108 '''
1109 Dump all available packages in feeds, it should be invoked after the
1110 newest rpm index was created
1111 '''
1112 def dump_all_available_pkgs(self):
1113 available_manifest = self.d.expand('${T}/saved/available_pkgs.txt')
1114 available_pkgs = list()
1115 cmd = "%s %s query --output %s" % \
1116 (self.smart_cmd, self.smart_opt, available_manifest)
1117 try:
1118 subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True)
1119 with open(available_manifest, 'r') as manifest:
1120 for pkg in manifest.read().split('\n'):
1121 if '@' in pkg:
1122 available_pkgs.append(pkg.strip())
1123 except subprocess.CalledProcessError as e:
1124 bb.note("Unable to list all available packages. Command '%s' "
1125 "returned %d:\n%s" % (cmd, e.returncode, e.output))
1126
1127 self.fullpkglist = available_pkgs
1128
1129 return
1130
1131 def save_rpmpostinst(self, pkg):
1132 mlibs = (self.d.getVar('MULTILIB_GLOBAL_VARIANTS') or "").split()
1133
1134 new_pkg = pkg
1135 # Remove any multilib prefix from the package name
1136 for mlib in mlibs:
1137 if mlib in pkg:
1138 new_pkg = pkg.replace(mlib + '-', '')
1139 break
1140
1141 bb.note(' * postponing %s' % new_pkg)
1142 saved_dir = self.target_rootfs + self.d.expand('${sysconfdir}/rpm-postinsts/') + new_pkg
1143
1144 cmd = self.rpm_cmd + ' -q --scripts --root ' + self.target_rootfs
1145 cmd += ' --dbpath=/var/lib/rpm ' + new_pkg
1146 cmd += ' | sed -n -e "/^postinstall scriptlet (using .*):$/,/^.* scriptlet (using .*):$/ {/.*/p}"'
1147 cmd += ' | sed -e "/postinstall scriptlet (using \(.*\)):$/d"'
1148 cmd += ' -e "/^.* scriptlet (using .*):$/d" > %s' % saved_dir
1149
1150 try:
1151 bb.note(cmd)
1152 output = subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True).strip()
1153 bb.note(output)
1154 os.chmod(saved_dir, 0755)
1155 except subprocess.CalledProcessError as e:
1156 bb.fatal("Invoke save_rpmpostinst failed. Command '%s' "
1157 "returned %d:\n%s" % (cmd, e.returncode, e.output))
1158
1159 '''Write common configuration for target usage'''
1160 def rpm_setup_smart_target_config(self):
1161 bb.utils.remove(os.path.join(self.target_rootfs, 'var/lib/smart'),
1162 True)
1163
1164 self._invoke_smart('config --set rpm-nolinktos=1')
1165 self._invoke_smart('config --set rpm-noparentdirs=1')
1166 for i in self.d.getVar('BAD_RECOMMENDATIONS', True).split():
1167 self._invoke_smart('flag --set ignore-recommends %s' % i)
1168 self._invoke_smart('channel --add rpmsys type=rpm-sys -y')
1169
1170 '''
1171 The rpm db lock files were produced after invoking rpm to query on
1172 build system, and they caused the rpm on target didn't work, so we
1173 need to unlock the rpm db by removing the lock files.
1174 '''
1175 def unlock_rpm_db(self):
1176 # Remove rpm db lock files
1177 rpm_db_locks = glob.glob('%s/var/lib/rpm/__db.*' % self.target_rootfs)
1178 for f in rpm_db_locks:
1179 bb.utils.remove(f, True)
1180
1181
1182class OpkgPM(PackageManager):
1183 def __init__(self, d, target_rootfs, config_file, archs, task_name='target'):
1184 super(OpkgPM, self).__init__(d)
1185
1186 self.target_rootfs = target_rootfs
1187 self.config_file = config_file
1188 self.pkg_archs = archs
1189 self.task_name = task_name
1190
1191 self.deploy_dir = self.d.getVar("DEPLOY_DIR_IPK", True)
1192 self.deploy_lock_file = os.path.join(self.deploy_dir, "deploy.lock")
1193 self.opkg_cmd = bb.utils.which(os.getenv('PATH'), "opkg-cl")
1194 self.opkg_args = "-f %s -o %s " % (self.config_file, target_rootfs)
1195 self.opkg_args += self.d.getVar("OPKG_ARGS", True)
1196
1197 opkg_lib_dir = self.d.getVar('OPKGLIBDIR', True)
1198 if opkg_lib_dir[0] == "/":
1199 opkg_lib_dir = opkg_lib_dir[1:]
1200
1201 self.opkg_dir = os.path.join(target_rootfs, opkg_lib_dir, "opkg")
1202
1203 bb.utils.mkdirhier(self.opkg_dir)
1204
1205 self.saved_opkg_dir = self.d.expand('${T}/saved/%s' % self.task_name)
1206 if not os.path.exists(self.d.expand('${T}/saved')):
1207 bb.utils.mkdirhier(self.d.expand('${T}/saved'))
1208
1209 if (self.d.getVar('BUILD_IMAGES_FROM_FEEDS', True) or "") != "1":
1210 self._create_config()
1211 else:
1212 self._create_custom_config()
1213
1214 self.indexer = OpkgIndexer(self.d, self.deploy_dir)
1215
1216 """
1217 This function will change a package's status in /var/lib/opkg/status file.
1218 If 'packages' is None then the new_status will be applied to all
1219 packages
1220 """
1221 def mark_packages(self, status_tag, packages=None):
1222 status_file = os.path.join(self.opkg_dir, "status")
1223
1224 with open(status_file, "r") as sf:
1225 with open(status_file + ".tmp", "w+") as tmp_sf:
1226 if packages is None:
1227 tmp_sf.write(re.sub(r"Package: (.*?)\n((?:[^\n]+\n)*?)Status: (.*)(?:unpacked|installed)",
1228 r"Package: \1\n\2Status: \3%s" % status_tag,
1229 sf.read()))
1230 else:
1231 if type(packages).__name__ != "list":
1232 raise TypeError("'packages' should be a list object")
1233
1234 status = sf.read()
1235 for pkg in packages:
1236 status = re.sub(r"Package: %s\n((?:[^\n]+\n)*?)Status: (.*)(?:unpacked|installed)" % pkg,
1237 r"Package: %s\n\1Status: \2%s" % (pkg, status_tag),
1238 status)
1239
1240 tmp_sf.write(status)
1241
1242 os.rename(status_file + ".tmp", status_file)
1243
1244 def _create_custom_config(self):
1245 bb.note("Building from feeds activated!")
1246
1247 with open(self.config_file, "w+") as config_file:
1248 priority = 1
1249 for arch in self.pkg_archs.split():
1250 config_file.write("arch %s %d\n" % (arch, priority))
1251 priority += 5
1252
1253 for line in (self.d.getVar('IPK_FEED_URIS', True) or "").split():
1254 feed_match = re.match("^[ \t]*(.*)##([^ \t]*)[ \t]*$", line)
1255
1256 if feed_match is not None:
1257 feed_name = feed_match.group(1)
1258 feed_uri = feed_match.group(2)
1259
1260 bb.note("Add %s feed with URL %s" % (feed_name, feed_uri))
1261
1262 config_file.write("src/gz %s %s\n" % (feed_name, feed_uri))
1263
1264 """
1265 Allow to use package deploy directory contents as quick devel-testing
1266 feed. This creates individual feed configs for each arch subdir of those
1267 specified as compatible for the current machine.
1268 NOTE: Development-helper feature, NOT a full-fledged feed.
1269 """
1270 if (self.d.getVar('FEED_DEPLOYDIR_BASE_URI', True) or "") != "":
1271 for arch in self.pkg_archs.split():
1272 cfg_file_name = os.path.join(self.target_rootfs,
1273 self.d.getVar("sysconfdir", True),
1274 "opkg",
1275 "local-%s-feed.conf" % arch)
1276
1277 with open(cfg_file_name, "w+") as cfg_file:
1278 cfg_file.write("src/gz local-%s %s/%s" %
1279 (arch,
1280 self.d.getVar('FEED_DEPLOYDIR_BASE_URI', True),
1281 arch))
1282
1283 def _create_config(self):
1284 with open(self.config_file, "w+") as config_file:
1285 priority = 1
1286 for arch in self.pkg_archs.split():
1287 config_file.write("arch %s %d\n" % (arch, priority))
1288 priority += 5
1289
1290 config_file.write("src oe file:%s\n" % self.deploy_dir)
1291
1292 for arch in self.pkg_archs.split():
1293 pkgs_dir = os.path.join(self.deploy_dir, arch)
1294 if os.path.isdir(pkgs_dir):
1295 config_file.write("src oe-%s file:%s\n" %
1296 (arch, pkgs_dir))
1297
1298 def insert_feeds_uris(self):
1299 if self.feed_uris == "":
1300 return
1301
1302 rootfs_config = os.path.join('%s/etc/opkg/base-feeds.conf'
1303 % self.target_rootfs)
1304
1305 with open(rootfs_config, "w+") as config_file:
1306 uri_iterator = 0
1307 for uri in self.feed_uris.split():
1308 config_file.write("src/gz url-%d %s/ipk\n" %
1309 (uri_iterator, uri))
1310
1311 for arch in self.pkg_archs.split():
1312 if not os.path.exists(os.path.join(self.deploy_dir, arch)):
1313 continue
1314 bb.note('Note: adding opkg channel url-%s-%d (%s)' %
1315 (arch, uri_iterator, uri))
1316
1317 config_file.write("src/gz uri-%s-%d %s/ipk/%s\n" %
1318 (arch, uri_iterator, uri, arch))
1319 uri_iterator += 1
1320
1321 def update(self):
1322 self.deploy_dir_lock()
1323
1324 cmd = "%s %s update" % (self.opkg_cmd, self.opkg_args)
1325
1326 try:
1327 subprocess.check_output(cmd.split(), stderr=subprocess.STDOUT)
1328 except subprocess.CalledProcessError as e:
1329 self.deploy_dir_unlock()
1330 bb.fatal("Unable to update the package index files. Command '%s' "
1331 "returned %d:\n%s" % (cmd, e.returncode, e.output))
1332
1333 self.deploy_dir_unlock()
1334
1335 def install(self, pkgs, attempt_only=False):
1336 if attempt_only and len(pkgs) == 0:
1337 return
1338
1339 cmd = "%s %s install %s" % (self.opkg_cmd, self.opkg_args, ' '.join(pkgs))
1340
1341 os.environ['D'] = self.target_rootfs
1342 os.environ['OFFLINE_ROOT'] = self.target_rootfs
1343 os.environ['IPKG_OFFLINE_ROOT'] = self.target_rootfs
1344 os.environ['OPKG_OFFLINE_ROOT'] = self.target_rootfs
1345 os.environ['INTERCEPT_DIR'] = os.path.join(self.d.getVar('WORKDIR', True),
1346 "intercept_scripts")
1347 os.environ['NATIVE_ROOT'] = self.d.getVar('STAGING_DIR_NATIVE', True)
1348
1349 try:
1350 bb.note("Installing the following packages: %s" % ' '.join(pkgs))
1351 bb.note(cmd)
1352 output = subprocess.check_output(cmd.split(), stderr=subprocess.STDOUT)
1353 bb.note(output)
1354 except subprocess.CalledProcessError as e:
1355 (bb.fatal, bb.note)[attempt_only]("Unable to install packages. "
1356 "Command '%s' returned %d:\n%s" %
1357 (cmd, e.returncode, e.output))
1358
1359 def remove(self, pkgs, with_dependencies=True):
1360 if with_dependencies:
1361 cmd = "%s %s --force-depends --force-remove --force-removal-of-dependent-packages remove %s" % \
1362 (self.opkg_cmd, self.opkg_args, ' '.join(pkgs))
1363 else:
1364 cmd = "%s %s --force-depends remove %s" % \
1365 (self.opkg_cmd, self.opkg_args, ' '.join(pkgs))
1366
1367 try:
1368 bb.note(cmd)
1369 output = subprocess.check_output(cmd.split(), stderr=subprocess.STDOUT)
1370 bb.note(output)
1371 except subprocess.CalledProcessError as e:
1372 bb.fatal("Unable to remove packages. Command '%s' "
1373 "returned %d:\n%s" % (e.cmd, e.returncode, e.output))
1374
1375 def write_index(self):
1376 self.deploy_dir_lock()
1377
1378 result = self.indexer.write_index()
1379
1380 self.deploy_dir_unlock()
1381
1382 if result is not None:
1383 bb.fatal(result)
1384
1385 def remove_packaging_data(self):
1386 bb.utils.remove(self.opkg_dir, True)
1387 # create the directory back, it's needed by PM lock
1388 bb.utils.mkdirhier(self.opkg_dir)
1389
1390 def list_installed(self, format=None):
1391 return OpkgPkgsList(self.d, self.target_rootfs, self.config_file).list(format)
1392
1393 def handle_bad_recommendations(self):
1394 bad_recommendations = self.d.getVar("BAD_RECOMMENDATIONS", True) or ""
1395 if bad_recommendations.strip() == "":
1396 return
1397
1398 status_file = os.path.join(self.opkg_dir, "status")
1399
1400 # If status file existed, it means the bad recommendations has already
1401 # been handled
1402 if os.path.exists(status_file):
1403 return
1404
1405 cmd = "%s %s info " % (self.opkg_cmd, self.opkg_args)
1406
1407 with open(status_file, "w+") as status:
1408 for pkg in bad_recommendations.split():
1409 pkg_info = cmd + pkg
1410
1411 try:
1412 output = subprocess.check_output(pkg_info.split(), stderr=subprocess.STDOUT).strip()
1413 except subprocess.CalledProcessError as e:
1414 bb.fatal("Cannot get package info. Command '%s' "
1415 "returned %d:\n%s" % (pkg_info, e.returncode, e.output))
1416
1417 if output == "":
1418 bb.note("Ignored bad recommendation: '%s' is "
1419 "not a package" % pkg)
1420 continue
1421
1422 for line in output.split('\n'):
1423 if line.startswith("Status:"):
1424 status.write("Status: deinstall hold not-installed\n")
1425 else:
1426 status.write(line + "\n")
1427
1428 # Append a blank line after each package entry to ensure that it
1429 # is separated from the following entry
1430 status.write("\n")
1431
1432 '''
1433 The following function dummy installs pkgs and returns the log of output.
1434 '''
1435 def dummy_install(self, pkgs):
1436 if len(pkgs) == 0:
1437 return
1438
1439 # Create an temp dir as opkg root for dummy installation
1440 temp_rootfs = self.d.expand('${T}/opkg')
1441 temp_opkg_dir = os.path.join(temp_rootfs, 'var/lib/opkg')
1442 bb.utils.mkdirhier(temp_opkg_dir)
1443
1444 opkg_args = "-f %s -o %s " % (self.config_file, temp_rootfs)
1445 opkg_args += self.d.getVar("OPKG_ARGS", True)
1446
1447 cmd = "%s %s update" % (self.opkg_cmd, opkg_args)
1448 try:
1449 subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True)
1450 except subprocess.CalledProcessError as e:
1451 bb.fatal("Unable to update. Command '%s' "
1452 "returned %d:\n%s" % (cmd, e.returncode, e.output))
1453
1454 # Dummy installation
1455 cmd = "%s %s --noaction install %s " % (self.opkg_cmd,
1456 opkg_args,
1457 ' '.join(pkgs))
1458 try:
1459 output = subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True)
1460 except subprocess.CalledProcessError as e:
1461 bb.fatal("Unable to dummy install packages. Command '%s' "
1462 "returned %d:\n%s" % (cmd, e.returncode, e.output))
1463
1464 bb.utils.remove(temp_rootfs, True)
1465
1466 return output
1467
1468 def backup_packaging_data(self):
1469 # Save the opkglib for increment ipk image generation
1470 if os.path.exists(self.saved_opkg_dir):
1471 bb.utils.remove(self.saved_opkg_dir, True)
1472 shutil.copytree(self.opkg_dir,
1473 self.saved_opkg_dir,
1474 symlinks=True)
1475
1476 def recover_packaging_data(self):
1477 # Move the opkglib back
1478 if os.path.exists(self.saved_opkg_dir):
1479 if os.path.exists(self.opkg_dir):
1480 bb.utils.remove(self.opkg_dir, True)
1481
1482 bb.note('Recover packaging data')
1483 shutil.copytree(self.saved_opkg_dir,
1484 self.opkg_dir,
1485 symlinks=True)
1486
1487
1488class DpkgPM(PackageManager):
1489 def __init__(self, d, target_rootfs, archs, base_archs, apt_conf_dir=None):
1490 super(DpkgPM, self).__init__(d)
1491 self.target_rootfs = target_rootfs
1492 self.deploy_dir = self.d.getVar('DEPLOY_DIR_DEB', True)
1493 if apt_conf_dir is None:
1494 self.apt_conf_dir = self.d.expand("${APTCONF_TARGET}/apt")
1495 else:
1496 self.apt_conf_dir = apt_conf_dir
1497 self.apt_conf_file = os.path.join(self.apt_conf_dir, "apt.conf")
1498 self.apt_get_cmd = bb.utils.which(os.getenv('PATH'), "apt-get")
1499
1500 self.apt_args = d.getVar("APT_ARGS", True)
1501
1502 self.all_arch_list = archs.split()
1503 all_mlb_pkg_arch_list = (self.d.getVar('ALL_MULTILIB_PACKAGE_ARCHS', True) or "").replace('-', '_').split()
1504 self.all_arch_list.extend(arch for arch in all_mlb_pkg_arch_list if arch not in self.all_arch_list)
1505
1506 self._create_configs(archs, base_archs)
1507
1508 self.indexer = DpkgIndexer(self.d, self.deploy_dir)
1509
1510 """
1511 This function will change a package's status in /var/lib/dpkg/status file.
1512 If 'packages' is None then the new_status will be applied to all
1513 packages
1514 """
1515 def mark_packages(self, status_tag, packages=None):
1516 status_file = self.target_rootfs + "/var/lib/dpkg/status"
1517
1518 with open(status_file, "r") as sf:
1519 with open(status_file + ".tmp", "w+") as tmp_sf:
1520 if packages is None:
1521 tmp_sf.write(re.sub(r"Package: (.*?)\n((?:[^\n]+\n)*?)Status: (.*)(?:unpacked|installed)",
1522 r"Package: \1\n\2Status: \3%s" % status_tag,
1523 sf.read()))
1524 else:
1525 if type(packages).__name__ != "list":
1526 raise TypeError("'packages' should be a list object")
1527
1528 status = sf.read()
1529 for pkg in packages:
1530 status = re.sub(r"Package: %s\n((?:[^\n]+\n)*?)Status: (.*)(?:unpacked|installed)" % pkg,
1531 r"Package: %s\n\1Status: \2%s" % (pkg, status_tag),
1532 status)
1533
1534 tmp_sf.write(status)
1535
1536 os.rename(status_file + ".tmp", status_file)
1537
1538 """
1539 Run the pre/post installs for package "package_name". If package_name is
1540 None, then run all pre/post install scriptlets.
1541 """
1542 def run_pre_post_installs(self, package_name=None):
1543 info_dir = self.target_rootfs + "/var/lib/dpkg/info"
1544 suffixes = [(".preinst", "Preinstall"), (".postinst", "Postinstall")]
1545 status_file = self.target_rootfs + "/var/lib/dpkg/status"
1546 installed_pkgs = []
1547
1548 with open(status_file, "r") as status:
1549 for line in status.read().split('\n'):
1550 m = re.match("^Package: (.*)", line)
1551 if m is not None:
1552 installed_pkgs.append(m.group(1))
1553
1554 if package_name is not None and not package_name in installed_pkgs:
1555 return
1556
1557 os.environ['D'] = self.target_rootfs
1558 os.environ['OFFLINE_ROOT'] = self.target_rootfs
1559 os.environ['IPKG_OFFLINE_ROOT'] = self.target_rootfs
1560 os.environ['OPKG_OFFLINE_ROOT'] = self.target_rootfs
1561 os.environ['INTERCEPT_DIR'] = os.path.join(self.d.getVar('WORKDIR', True),
1562 "intercept_scripts")
1563 os.environ['NATIVE_ROOT'] = self.d.getVar('STAGING_DIR_NATIVE', True)
1564
1565 failed_pkgs = []
1566 for pkg_name in installed_pkgs:
1567 for suffix in suffixes:
1568 p_full = os.path.join(info_dir, pkg_name + suffix[0])
1569 if os.path.exists(p_full):
1570 try:
1571 bb.note("Executing %s for package: %s ..." %
1572 (suffix[1].lower(), pkg_name))
1573 subprocess.check_output(p_full, stderr=subprocess.STDOUT)
1574 except subprocess.CalledProcessError as e:
1575 bb.note("%s for package %s failed with %d:\n%s" %
1576 (suffix[1], pkg_name, e.returncode, e.output))
1577 failed_pkgs.append(pkg_name)
1578 break
1579
1580 if len(failed_pkgs):
1581 self.mark_packages("unpacked", failed_pkgs)
1582
1583 def update(self):
1584 os.environ['APT_CONFIG'] = self.apt_conf_file
1585
1586 self.deploy_dir_lock()
1587
1588 cmd = "%s update" % self.apt_get_cmd
1589
1590 try:
1591 subprocess.check_output(cmd.split(), stderr=subprocess.STDOUT)
1592 except subprocess.CalledProcessError as e:
1593 bb.fatal("Unable to update the package index files. Command '%s' "
1594 "returned %d:\n%s" % (e.cmd, e.returncode, e.output))
1595
1596 self.deploy_dir_unlock()
1597
1598 def install(self, pkgs, attempt_only=False):
1599 if attempt_only and len(pkgs) == 0:
1600 return
1601
1602 os.environ['APT_CONFIG'] = self.apt_conf_file
1603
1604 cmd = "%s %s install --force-yes --allow-unauthenticated %s" % \
1605 (self.apt_get_cmd, self.apt_args, ' '.join(pkgs))
1606
1607 try:
1608 bb.note("Installing the following packages: %s" % ' '.join(pkgs))
1609 subprocess.check_output(cmd.split(), stderr=subprocess.STDOUT)
1610 except subprocess.CalledProcessError as e:
1611 (bb.fatal, bb.note)[attempt_only]("Unable to install packages. "
1612 "Command '%s' returned %d:\n%s" %
1613 (cmd, e.returncode, e.output))
1614
1615 # rename *.dpkg-new files/dirs
1616 for root, dirs, files in os.walk(self.target_rootfs):
1617 for dir in dirs:
1618 new_dir = re.sub("\.dpkg-new", "", dir)
1619 if dir != new_dir:
1620 os.rename(os.path.join(root, dir),
1621 os.path.join(root, new_dir))
1622
1623 for file in files:
1624 new_file = re.sub("\.dpkg-new", "", file)
1625 if file != new_file:
1626 os.rename(os.path.join(root, file),
1627 os.path.join(root, new_file))
1628
1629
1630 def remove(self, pkgs, with_dependencies=True):
1631 if with_dependencies:
1632 os.environ['APT_CONFIG'] = self.apt_conf_file
1633 cmd = "%s remove %s" % (self.apt_get_cmd, ' '.join(pkgs))
1634 else:
1635 cmd = "%s --admindir=%s/var/lib/dpkg --instdir=%s" \
1636 " -r --force-depends %s" % \
1637 (bb.utils.which(os.getenv('PATH'), "dpkg"),
1638 self.target_rootfs, self.target_rootfs, ' '.join(pkgs))
1639
1640 try:
1641 subprocess.check_output(cmd.split(), stderr=subprocess.STDOUT)
1642 except subprocess.CalledProcessError as e:
1643 bb.fatal("Unable to remove packages. Command '%s' "
1644 "returned %d:\n%s" % (e.cmd, e.returncode, e.output))
1645
1646 def write_index(self):
1647 self.deploy_dir_lock()
1648
1649 result = self.indexer.write_index()
1650
1651 self.deploy_dir_unlock()
1652
1653 if result is not None:
1654 bb.fatal(result)
1655
1656 def insert_feeds_uris(self):
1657 if self.feed_uris == "":
1658 return
1659
1660 sources_conf = os.path.join("%s/etc/apt/sources.list"
1661 % self.target_rootfs)
1662 arch_list = []
1663
1664 for arch in self.all_arch_list:
1665 if not os.path.exists(os.path.join(self.deploy_dir, arch)):
1666 continue
1667 arch_list.append(arch)
1668
1669 with open(sources_conf, "w+") as sources_file:
1670 for uri in self.feed_uris.split():
1671 for arch in arch_list:
1672 bb.note('Note: adding dpkg channel at (%s)' % uri)
1673 sources_file.write("deb %s/deb/%s ./\n" %
1674 (uri, arch))
1675
1676 def _create_configs(self, archs, base_archs):
1677 base_archs = re.sub("_", "-", base_archs)
1678
1679 if os.path.exists(self.apt_conf_dir):
1680 bb.utils.remove(self.apt_conf_dir, True)
1681
1682 bb.utils.mkdirhier(self.apt_conf_dir)
1683 bb.utils.mkdirhier(self.apt_conf_dir + "/lists/partial/")
1684 bb.utils.mkdirhier(self.apt_conf_dir + "/apt.conf.d/")
1685
1686 arch_list = []
1687 for arch in self.all_arch_list:
1688 if not os.path.exists(os.path.join(self.deploy_dir, arch)):
1689 continue
1690 arch_list.append(arch)
1691
1692 with open(os.path.join(self.apt_conf_dir, "preferences"), "w+") as prefs_file:
1693 priority = 801
1694 for arch in arch_list:
1695 prefs_file.write(
1696 "Package: *\n"
1697 "Pin: release l=%s\n"
1698 "Pin-Priority: %d\n\n" % (arch, priority))
1699
1700 priority += 5
1701
1702 pkg_exclude = self.d.getVar('PACKAGE_EXCLUDE', True) or ""
1703 for pkg in pkg_exclude.split():
1704 prefs_file.write(
1705 "Package: %s\n"
1706 "Pin: release *\n"
1707 "Pin-Priority: -1\n\n" % pkg)
1708
1709 arch_list.reverse()
1710
1711 with open(os.path.join(self.apt_conf_dir, "sources.list"), "w+") as sources_file:
1712 for arch in arch_list:
1713 sources_file.write("deb file:%s/ ./\n" %
1714 os.path.join(self.deploy_dir, arch))
1715
1716 base_arch_list = base_archs.split()
1717 multilib_variants = self.d.getVar("MULTILIB_VARIANTS", True);
1718 for variant in multilib_variants.split():
1719 if variant == "lib32":
1720 base_arch_list.append("i386")
1721 elif variant == "lib64":
1722 base_arch_list.append("amd64")
1723
1724 with open(self.apt_conf_file, "w+") as apt_conf:
1725 with open(self.d.expand("${STAGING_ETCDIR_NATIVE}/apt/apt.conf.sample")) as apt_conf_sample:
1726 for line in apt_conf_sample.read().split("\n"):
1727 match_arch = re.match(" Architecture \".*\";$", line)
1728 architectures = ""
1729 if match_arch:
1730 for base_arch in base_arch_list:
1731 architectures += "\"%s\";" % base_arch
1732 apt_conf.write(" Architectures {%s};\n" % architectures);
1733 apt_conf.write(" Architecture \"%s\";\n" % base_archs)
1734 else:
1735 line = re.sub("#ROOTFS#", self.target_rootfs, line)
1736 line = re.sub("#APTCONF#", self.apt_conf_dir, line)
1737 apt_conf.write(line + "\n")
1738
1739 target_dpkg_dir = "%s/var/lib/dpkg" % self.target_rootfs
1740 bb.utils.mkdirhier(os.path.join(target_dpkg_dir, "info"))
1741
1742 bb.utils.mkdirhier(os.path.join(target_dpkg_dir, "updates"))
1743
1744 if not os.path.exists(os.path.join(target_dpkg_dir, "status")):
1745 open(os.path.join(target_dpkg_dir, "status"), "w+").close()
1746 if not os.path.exists(os.path.join(target_dpkg_dir, "available")):
1747 open(os.path.join(target_dpkg_dir, "available"), "w+").close()
1748
1749 def remove_packaging_data(self):
1750 bb.utils.remove(os.path.join(self.target_rootfs,
1751 self.d.getVar('opkglibdir', True)), True)
1752 bb.utils.remove(self.target_rootfs + "/var/lib/dpkg/", True)
1753
1754 def fix_broken_dependencies(self):
1755 os.environ['APT_CONFIG'] = self.apt_conf_file
1756
1757 cmd = "%s %s -f install" % (self.apt_get_cmd, self.apt_args)
1758
1759 try:
1760 subprocess.check_output(cmd.split(), stderr=subprocess.STDOUT)
1761 except subprocess.CalledProcessError as e:
1762 bb.fatal("Cannot fix broken dependencies. Command '%s' "
1763 "returned %d:\n%s" % (cmd, e.returncode, e.output))
1764
1765 def list_installed(self, format=None):
1766 return DpkgPkgsList(self.d, self.target_rootfs).list()
1767
1768
1769def generate_index_files(d):
1770 classes = d.getVar('PACKAGE_CLASSES', True).replace("package_", "").split()
1771
1772 indexer_map = {
1773 "rpm": (RpmIndexer, d.getVar('DEPLOY_DIR_RPM', True)),
1774 "ipk": (OpkgIndexer, d.getVar('DEPLOY_DIR_IPK', True)),
1775 "deb": (DpkgIndexer, d.getVar('DEPLOY_DIR_DEB', True))
1776 }
1777
1778 result = None
1779
1780 for pkg_class in classes:
1781 if not pkg_class in indexer_map:
1782 continue
1783
1784 if os.path.exists(indexer_map[pkg_class][1]):
1785 result = indexer_map[pkg_class][0](d, indexer_map[pkg_class][1]).write_index()
1786
1787 if result is not None:
1788 bb.fatal(result)
1789
1790if __name__ == "__main__":
1791 """
1792 We should be able to run this as a standalone script, from outside bitbake
1793 environment.
1794 """
1795 """
1796 TBD
1797 """
diff --git a/meta/lib/oe/packagedata.py b/meta/lib/oe/packagedata.py
new file mode 100644
index 0000000000..cd5f0445f5
--- /dev/null
+++ b/meta/lib/oe/packagedata.py
@@ -0,0 +1,94 @@
1import codecs
2
3def packaged(pkg, d):
4 return os.access(get_subpkgedata_fn(pkg, d) + '.packaged', os.R_OK)
5
6def read_pkgdatafile(fn):
7 pkgdata = {}
8
9 def decode(str):
10 c = codecs.getdecoder("string_escape")
11 return c(str)[0]
12
13 if os.access(fn, os.R_OK):
14 import re
15 f = open(fn, 'r')
16 lines = f.readlines()
17 f.close()
18 r = re.compile("([^:]+):\s*(.*)")
19 for l in lines:
20 m = r.match(l)
21 if m:
22 pkgdata[m.group(1)] = decode(m.group(2))
23
24 return pkgdata
25
26def get_subpkgedata_fn(pkg, d):
27 return d.expand('${PKGDATA_DIR}/runtime/%s' % pkg)
28
29def has_subpkgdata(pkg, d):
30 return os.access(get_subpkgedata_fn(pkg, d), os.R_OK)
31
32def read_subpkgdata(pkg, d):
33 return read_pkgdatafile(get_subpkgedata_fn(pkg, d))
34
35def has_pkgdata(pn, d):
36 fn = d.expand('${PKGDATA_DIR}/%s' % pn)
37 return os.access(fn, os.R_OK)
38
39def read_pkgdata(pn, d):
40 fn = d.expand('${PKGDATA_DIR}/%s' % pn)
41 return read_pkgdatafile(fn)
42
43#
44# Collapse FOO_pkg variables into FOO
45#
46def read_subpkgdata_dict(pkg, d):
47 ret = {}
48 subd = read_pkgdatafile(get_subpkgedata_fn(pkg, d))
49 for var in subd:
50 newvar = var.replace("_" + pkg, "")
51 if newvar == var and var + "_" + pkg in subd:
52 continue
53 ret[newvar] = subd[var]
54 return ret
55
56def _pkgmap(d):
57 """Return a dictionary mapping package to recipe name."""
58
59 pkgdatadir = d.getVar("PKGDATA_DIR", True)
60
61 pkgmap = {}
62 try:
63 files = os.listdir(pkgdatadir)
64 except OSError:
65 bb.warn("No files in %s?" % pkgdatadir)
66 files = []
67
68 for pn in filter(lambda f: not os.path.isdir(os.path.join(pkgdatadir, f)), files):
69 try:
70 pkgdata = read_pkgdatafile(os.path.join(pkgdatadir, pn))
71 except OSError:
72 continue
73
74 packages = pkgdata.get("PACKAGES") or ""
75 for pkg in packages.split():
76 pkgmap[pkg] = pn
77
78 return pkgmap
79
80def pkgmap(d):
81 """Return a dictionary mapping package to recipe name.
82 Cache the mapping in the metadata"""
83
84 pkgmap_data = d.getVar("__pkgmap_data", False)
85 if pkgmap_data is None:
86 pkgmap_data = _pkgmap(d)
87 d.setVar("__pkgmap_data", pkgmap_data)
88
89 return pkgmap_data
90
91def recipename(pkg, d):
92 """Return the recipe name for the given binary package name."""
93
94 return pkgmap(d).get(pkg)
diff --git a/meta/lib/oe/packagegroup.py b/meta/lib/oe/packagegroup.py
new file mode 100644
index 0000000000..12eb4212ff
--- /dev/null
+++ b/meta/lib/oe/packagegroup.py
@@ -0,0 +1,36 @@
1import itertools
2
3def is_optional(feature, d):
4 packages = d.getVar("FEATURE_PACKAGES_%s" % feature, True)
5 if packages:
6 return bool(d.getVarFlag("FEATURE_PACKAGES_%s" % feature, "optional"))
7 else:
8 return bool(d.getVarFlag("PACKAGE_GROUP_%s" % feature, "optional"))
9
10def packages(features, d):
11 for feature in features:
12 packages = d.getVar("FEATURE_PACKAGES_%s" % feature, True)
13 if not packages:
14 packages = d.getVar("PACKAGE_GROUP_%s" % feature, True)
15 for pkg in (packages or "").split():
16 yield pkg
17
18def required_packages(features, d):
19 req = filter(lambda feature: not is_optional(feature, d), features)
20 return packages(req, d)
21
22def optional_packages(features, d):
23 opt = filter(lambda feature: is_optional(feature, d), features)
24 return packages(opt, d)
25
26def active_packages(features, d):
27 return itertools.chain(required_packages(features, d),
28 optional_packages(features, d))
29
30def active_recipes(features, d):
31 import oe.packagedata
32
33 for pkg in active_packages(features, d):
34 recipe = oe.packagedata.recipename(pkg, d)
35 if recipe:
36 yield recipe
diff --git a/meta/lib/oe/patch.py b/meta/lib/oe/patch.py
new file mode 100644
index 0000000000..b085c9d6b5
--- /dev/null
+++ b/meta/lib/oe/patch.py
@@ -0,0 +1,447 @@
1import oe.path
2
3class NotFoundError(bb.BBHandledException):
4 def __init__(self, path):
5 self.path = path
6
7 def __str__(self):
8 return "Error: %s not found." % self.path
9
10class CmdError(bb.BBHandledException):
11 def __init__(self, exitstatus, output):
12 self.status = exitstatus
13 self.output = output
14
15 def __str__(self):
16 return "Command Error: exit status: %d Output:\n%s" % (self.status, self.output)
17
18
19def runcmd(args, dir = None):
20 import pipes
21
22 if dir:
23 olddir = os.path.abspath(os.curdir)
24 if not os.path.exists(dir):
25 raise NotFoundError(dir)
26 os.chdir(dir)
27 # print("cwd: %s -> %s" % (olddir, dir))
28
29 try:
30 args = [ pipes.quote(str(arg)) for arg in args ]
31 cmd = " ".join(args)
32 # print("cmd: %s" % cmd)
33 (exitstatus, output) = oe.utils.getstatusoutput(cmd)
34 if exitstatus != 0:
35 raise CmdError(exitstatus >> 8, output)
36 return output
37
38 finally:
39 if dir:
40 os.chdir(olddir)
41
42class PatchError(Exception):
43 def __init__(self, msg):
44 self.msg = msg
45
46 def __str__(self):
47 return "Patch Error: %s" % self.msg
48
49class PatchSet(object):
50 defaults = {
51 "strippath": 1
52 }
53
54 def __init__(self, dir, d):
55 self.dir = dir
56 self.d = d
57 self.patches = []
58 self._current = None
59
60 def current(self):
61 return self._current
62
63 def Clean(self):
64 """
65 Clean out the patch set. Generally includes unapplying all
66 patches and wiping out all associated metadata.
67 """
68 raise NotImplementedError()
69
70 def Import(self, patch, force):
71 if not patch.get("file"):
72 if not patch.get("remote"):
73 raise PatchError("Patch file must be specified in patch import.")
74 else:
75 patch["file"] = bb.fetch2.localpath(patch["remote"], self.d)
76
77 for param in PatchSet.defaults:
78 if not patch.get(param):
79 patch[param] = PatchSet.defaults[param]
80
81 if patch.get("remote"):
82 patch["file"] = bb.data.expand(bb.fetch2.localpath(patch["remote"], self.d), self.d)
83
84 patch["filemd5"] = bb.utils.md5_file(patch["file"])
85
86 def Push(self, force):
87 raise NotImplementedError()
88
89 def Pop(self, force):
90 raise NotImplementedError()
91
92 def Refresh(self, remote = None, all = None):
93 raise NotImplementedError()
94
95
96class PatchTree(PatchSet):
97 def __init__(self, dir, d):
98 PatchSet.__init__(self, dir, d)
99 self.patchdir = os.path.join(self.dir, 'patches')
100 self.seriespath = os.path.join(self.dir, 'patches', 'series')
101 bb.utils.mkdirhier(self.patchdir)
102
103 def _appendPatchFile(self, patch, strippath):
104 with open(self.seriespath, 'a') as f:
105 f.write(os.path.basename(patch) + "," + strippath + "\n")
106 shellcmd = ["cat", patch, ">" , self.patchdir + "/" + os.path.basename(patch)]
107 runcmd(["sh", "-c", " ".join(shellcmd)], self.dir)
108
109 def _removePatch(self, p):
110 patch = {}
111 patch['file'] = p.split(",")[0]
112 patch['strippath'] = p.split(",")[1]
113 self._applypatch(patch, False, True)
114
115 def _removePatchFile(self, all = False):
116 if not os.path.exists(self.seriespath):
117 return
118 patches = open(self.seriespath, 'r+').readlines()
119 if all:
120 for p in reversed(patches):
121 self._removePatch(os.path.join(self.patchdir, p.strip()))
122 patches = []
123 else:
124 self._removePatch(os.path.join(self.patchdir, patches[-1].strip()))
125 patches.pop()
126 with open(self.seriespath, 'w') as f:
127 for p in patches:
128 f.write(p)
129
130 def Import(self, patch, force = None):
131 """"""
132 PatchSet.Import(self, patch, force)
133
134 if self._current is not None:
135 i = self._current + 1
136 else:
137 i = 0
138 self.patches.insert(i, patch)
139
140 def _applypatch(self, patch, force = False, reverse = False, run = True):
141 shellcmd = ["cat", patch['file'], "|", "patch", "-p", patch['strippath']]
142 if reverse:
143 shellcmd.append('-R')
144
145 if not run:
146 return "sh" + "-c" + " ".join(shellcmd)
147
148 if not force:
149 shellcmd.append('--dry-run')
150
151 output = runcmd(["sh", "-c", " ".join(shellcmd)], self.dir)
152
153 if force:
154 return
155
156 shellcmd.pop(len(shellcmd) - 1)
157 output = runcmd(["sh", "-c", " ".join(shellcmd)], self.dir)
158
159 if not reverse:
160 self._appendPatchFile(patch['file'], patch['strippath'])
161
162 return output
163
164 def Push(self, force = False, all = False, run = True):
165 bb.note("self._current is %s" % self._current)
166 bb.note("patches is %s" % self.patches)
167 if all:
168 for i in self.patches:
169 bb.note("applying patch %s" % i)
170 self._applypatch(i, force)
171 self._current = i
172 else:
173 if self._current is not None:
174 next = self._current + 1
175 else:
176 next = 0
177
178 bb.note("applying patch %s" % self.patches[next])
179 ret = self._applypatch(self.patches[next], force)
180
181 self._current = next
182 return ret
183
184 def Pop(self, force = None, all = None):
185 if all:
186 self._removePatchFile(True)
187 self._current = None
188 else:
189 self._removePatchFile(False)
190
191 if self._current == 0:
192 self._current = None
193
194 if self._current is not None:
195 self._current = self._current - 1
196
197 def Clean(self):
198 """"""
199 self.Pop(all=True)
200
201class GitApplyTree(PatchTree):
202 def __init__(self, dir, d):
203 PatchTree.__init__(self, dir, d)
204
205 def _applypatch(self, patch, force = False, reverse = False, run = True):
206 def _applypatchhelper(shellcmd, patch, force = False, reverse = False, run = True):
207 if reverse:
208 shellcmd.append('-R')
209
210 shellcmd.append(patch['file'])
211
212 if not run:
213 return "sh" + "-c" + " ".join(shellcmd)
214
215 return runcmd(["sh", "-c", " ".join(shellcmd)], self.dir)
216
217 try:
218 shellcmd = ["git", "--work-tree=.", "am", "-3", "-p%s" % patch['strippath']]
219 return _applypatchhelper(shellcmd, patch, force, reverse, run)
220 except CmdError:
221 shellcmd = ["git", "--git-dir=.", "apply", "-p%s" % patch['strippath']]
222 return _applypatchhelper(shellcmd, patch, force, reverse, run)
223
224
225class QuiltTree(PatchSet):
226 def _runcmd(self, args, run = True):
227 quiltrc = self.d.getVar('QUILTRCFILE', True)
228 if not run:
229 return ["quilt"] + ["--quiltrc"] + [quiltrc] + args
230 runcmd(["quilt"] + ["--quiltrc"] + [quiltrc] + args, self.dir)
231
232 def _quiltpatchpath(self, file):
233 return os.path.join(self.dir, "patches", os.path.basename(file))
234
235
236 def __init__(self, dir, d):
237 PatchSet.__init__(self, dir, d)
238 self.initialized = False
239 p = os.path.join(self.dir, 'patches')
240 if not os.path.exists(p):
241 os.makedirs(p)
242
243 def Clean(self):
244 try:
245 self._runcmd(["pop", "-a", "-f"])
246 oe.path.remove(os.path.join(self.dir, "patches","series"))
247 except Exception:
248 pass
249 self.initialized = True
250
251 def InitFromDir(self):
252 # read series -> self.patches
253 seriespath = os.path.join(self.dir, 'patches', 'series')
254 if not os.path.exists(self.dir):
255 raise NotFoundError(self.dir)
256 if os.path.exists(seriespath):
257 series = file(seriespath, 'r')
258 for line in series.readlines():
259 patch = {}
260 parts = line.strip().split()
261 patch["quiltfile"] = self._quiltpatchpath(parts[0])
262 patch["quiltfilemd5"] = bb.utils.md5_file(patch["quiltfile"])
263 if len(parts) > 1:
264 patch["strippath"] = parts[1][2:]
265 self.patches.append(patch)
266 series.close()
267
268 # determine which patches are applied -> self._current
269 try:
270 output = runcmd(["quilt", "applied"], self.dir)
271 except CmdError:
272 import sys
273 if sys.exc_value.output.strip() == "No patches applied":
274 return
275 else:
276 raise
277 output = [val for val in output.split('\n') if not val.startswith('#')]
278 for patch in self.patches:
279 if os.path.basename(patch["quiltfile"]) == output[-1]:
280 self._current = self.patches.index(patch)
281 self.initialized = True
282
283 def Import(self, patch, force = None):
284 if not self.initialized:
285 self.InitFromDir()
286 PatchSet.Import(self, patch, force)
287 oe.path.symlink(patch["file"], self._quiltpatchpath(patch["file"]), force=True)
288 f = open(os.path.join(self.dir, "patches","series"), "a");
289 f.write(os.path.basename(patch["file"]) + " -p" + patch["strippath"]+"\n")
290 f.close()
291 patch["quiltfile"] = self._quiltpatchpath(patch["file"])
292 patch["quiltfilemd5"] = bb.utils.md5_file(patch["quiltfile"])
293
294 # TODO: determine if the file being imported:
295 # 1) is already imported, and is the same
296 # 2) is already imported, but differs
297
298 self.patches.insert(self._current or 0, patch)
299
300
301 def Push(self, force = False, all = False, run = True):
302 # quilt push [-f]
303
304 args = ["push"]
305 if force:
306 args.append("-f")
307 if all:
308 args.append("-a")
309 if not run:
310 return self._runcmd(args, run)
311
312 self._runcmd(args)
313
314 if self._current is not None:
315 self._current = self._current + 1
316 else:
317 self._current = 0
318
319 def Pop(self, force = None, all = None):
320 # quilt pop [-f]
321 args = ["pop"]
322 if force:
323 args.append("-f")
324 if all:
325 args.append("-a")
326
327 self._runcmd(args)
328
329 if self._current == 0:
330 self._current = None
331
332 if self._current is not None:
333 self._current = self._current - 1
334
335 def Refresh(self, **kwargs):
336 if kwargs.get("remote"):
337 patch = self.patches[kwargs["patch"]]
338 if not patch:
339 raise PatchError("No patch found at index %s in patchset." % kwargs["patch"])
340 (type, host, path, user, pswd, parm) = bb.fetch.decodeurl(patch["remote"])
341 if type == "file":
342 import shutil
343 if not patch.get("file") and patch.get("remote"):
344 patch["file"] = bb.fetch2.localpath(patch["remote"], self.d)
345
346 shutil.copyfile(patch["quiltfile"], patch["file"])
347 else:
348 raise PatchError("Unable to do a remote refresh of %s, unsupported remote url scheme %s." % (os.path.basename(patch["quiltfile"]), type))
349 else:
350 # quilt refresh
351 args = ["refresh"]
352 if kwargs.get("quiltfile"):
353 args.append(os.path.basename(kwargs["quiltfile"]))
354 elif kwargs.get("patch"):
355 args.append(os.path.basename(self.patches[kwargs["patch"]]["quiltfile"]))
356 self._runcmd(args)
357
358class Resolver(object):
359 def __init__(self, patchset, terminal):
360 raise NotImplementedError()
361
362 def Resolve(self):
363 raise NotImplementedError()
364
365 def Revert(self):
366 raise NotImplementedError()
367
368 def Finalize(self):
369 raise NotImplementedError()
370
371class NOOPResolver(Resolver):
372 def __init__(self, patchset, terminal):
373 self.patchset = patchset
374 self.terminal = terminal
375
376 def Resolve(self):
377 olddir = os.path.abspath(os.curdir)
378 os.chdir(self.patchset.dir)
379 try:
380 self.patchset.Push()
381 except Exception:
382 import sys
383 os.chdir(olddir)
384 raise
385
386# Patch resolver which relies on the user doing all the work involved in the
387# resolution, with the exception of refreshing the remote copy of the patch
388# files (the urls).
389class UserResolver(Resolver):
390 def __init__(self, patchset, terminal):
391 self.patchset = patchset
392 self.terminal = terminal
393
394 # Force a push in the patchset, then drop to a shell for the user to
395 # resolve any rejected hunks
396 def Resolve(self):
397 olddir = os.path.abspath(os.curdir)
398 os.chdir(self.patchset.dir)
399 try:
400 self.patchset.Push(False)
401 except CmdError as v:
402 # Patch application failed
403 patchcmd = self.patchset.Push(True, False, False)
404
405 t = self.patchset.d.getVar('T', True)
406 if not t:
407 bb.msg.fatal("Build", "T not set")
408 bb.utils.mkdirhier(t)
409 import random
410 rcfile = "%s/bashrc.%s.%s" % (t, str(os.getpid()), random.random())
411 f = open(rcfile, "w")
412 f.write("echo '*** Manual patch resolution mode ***'\n")
413 f.write("echo 'Dropping to a shell, so patch rejects can be fixed manually.'\n")
414 f.write("echo 'Run \"quilt refresh\" when patch is corrected, press CTRL+D to exit.'\n")
415 f.write("echo ''\n")
416 f.write(" ".join(patchcmd) + "\n")
417 f.close()
418 os.chmod(rcfile, 0775)
419
420 self.terminal("bash --rcfile " + rcfile, 'Patch Rejects: Please fix patch rejects manually', self.patchset.d)
421
422 # Construct a new PatchSet after the user's changes, compare the
423 # sets, checking patches for modifications, and doing a remote
424 # refresh on each.
425 oldpatchset = self.patchset
426 self.patchset = oldpatchset.__class__(self.patchset.dir, self.patchset.d)
427
428 for patch in self.patchset.patches:
429 oldpatch = None
430 for opatch in oldpatchset.patches:
431 if opatch["quiltfile"] == patch["quiltfile"]:
432 oldpatch = opatch
433
434 if oldpatch:
435 patch["remote"] = oldpatch["remote"]
436 if patch["quiltfile"] == oldpatch["quiltfile"]:
437 if patch["quiltfilemd5"] != oldpatch["quiltfilemd5"]:
438 bb.note("Patch %s has changed, updating remote url %s" % (os.path.basename(patch["quiltfile"]), patch["remote"]))
439 # user change? remote refresh
440 self.patchset.Refresh(remote=True, patch=self.patchset.patches.index(patch))
441 else:
442 # User did not fix the problem. Abort.
443 raise PatchError("Patch application failed, and user did not fix and refresh the patch.")
444 except Exception:
445 os.chdir(olddir)
446 raise
447 os.chdir(olddir)
diff --git a/meta/lib/oe/path.py b/meta/lib/oe/path.py
new file mode 100644
index 0000000000..413ebfb395
--- /dev/null
+++ b/meta/lib/oe/path.py
@@ -0,0 +1,243 @@
1import errno
2import glob
3import shutil
4import subprocess
5import os.path
6
7def join(*paths):
8 """Like os.path.join but doesn't treat absolute RHS specially"""
9 return os.path.normpath("/".join(paths))
10
11def relative(src, dest):
12 """ Return a relative path from src to dest.
13
14 >>> relative("/usr/bin", "/tmp/foo/bar")
15 ../../tmp/foo/bar
16
17 >>> relative("/usr/bin", "/usr/lib")
18 ../lib
19
20 >>> relative("/tmp", "/tmp/foo/bar")
21 foo/bar
22 """
23
24 return os.path.relpath(dest, src)
25
26def make_relative_symlink(path):
27 """ Convert an absolute symlink to a relative one """
28 if not os.path.islink(path):
29 return
30 link = os.readlink(path)
31 if not os.path.isabs(link):
32 return
33
34 # find the common ancestor directory
35 ancestor = path
36 depth = 0
37 while ancestor and not link.startswith(ancestor):
38 ancestor = ancestor.rpartition('/')[0]
39 depth += 1
40
41 if not ancestor:
42 print("make_relative_symlink() Error: unable to find the common ancestor of %s and its target" % path)
43 return
44
45 base = link.partition(ancestor)[2].strip('/')
46 while depth > 1:
47 base = "../" + base
48 depth -= 1
49
50 os.remove(path)
51 os.symlink(base, path)
52
53def format_display(path, metadata):
54 """ Prepare a path for display to the user. """
55 rel = relative(metadata.getVar("TOPDIR", True), path)
56 if len(rel) > len(path):
57 return path
58 else:
59 return rel
60
61def copytree(src, dst):
62 # We could use something like shutil.copytree here but it turns out to
63 # to be slow. It takes twice as long copying to an empty directory.
64 # If dst already has contents performance can be 15 time slower
65 # This way we also preserve hardlinks between files in the tree.
66
67 bb.utils.mkdirhier(dst)
68 cmd = 'tar -cf - -C %s -p . | tar -xf - -C %s' % (src, dst)
69 check_output(cmd, shell=True, stderr=subprocess.STDOUT)
70
71def copyhardlinktree(src, dst):
72 """ Make the hard link when possible, otherwise copy. """
73 bb.utils.mkdirhier(dst)
74 if os.path.isdir(src) and not len(os.listdir(src)):
75 return
76
77 if (os.stat(src).st_dev == os.stat(dst).st_dev):
78 # Need to copy directories only with tar first since cp will error if two
79 # writers try and create a directory at the same time
80 cmd = 'cd %s; find . -type d -print | tar -cf - -C %s -p --files-from - --no-recursion | tar -xf - -C %s' % (src, src, dst)
81 check_output(cmd, shell=True, stderr=subprocess.STDOUT)
82 cmd = 'cd %s; find . -print0 | cpio --null -pdlu %s' % (src, dst)
83 check_output(cmd, shell=True, stderr=subprocess.STDOUT)
84 else:
85 copytree(src, dst)
86
87def remove(path, recurse=True):
88 """Equivalent to rm -f or rm -rf"""
89 for name in glob.glob(path):
90 try:
91 os.unlink(name)
92 except OSError as exc:
93 if recurse and exc.errno == errno.EISDIR:
94 shutil.rmtree(name)
95 elif exc.errno != errno.ENOENT:
96 raise
97
98def symlink(source, destination, force=False):
99 """Create a symbolic link"""
100 try:
101 if force:
102 remove(destination)
103 os.symlink(source, destination)
104 except OSError as e:
105 if e.errno != errno.EEXIST or os.readlink(destination) != source:
106 raise
107
108class CalledProcessError(Exception):
109 def __init__(self, retcode, cmd, output = None):
110 self.retcode = retcode
111 self.cmd = cmd
112 self.output = output
113 def __str__(self):
114 return "Command '%s' returned non-zero exit status %d with output %s" % (self.cmd, self.retcode, self.output)
115
116# Not needed when we move to python 2.7
117def check_output(*popenargs, **kwargs):
118 r"""Run command with arguments and return its output as a byte string.
119
120 If the exit code was non-zero it raises a CalledProcessError. The
121 CalledProcessError object will have the return code in the returncode
122 attribute and output in the output attribute.
123
124 The arguments are the same as for the Popen constructor. Example:
125
126 >>> check_output(["ls", "-l", "/dev/null"])
127 'crw-rw-rw- 1 root root 1, 3 Oct 18 2007 /dev/null\n'
128
129 The stdout argument is not allowed as it is used internally.
130 To capture standard error in the result, use stderr=STDOUT.
131
132 >>> check_output(["/bin/sh", "-c",
133 ... "ls -l non_existent_file ; exit 0"],
134 ... stderr=STDOUT)
135 'ls: non_existent_file: No such file or directory\n'
136 """
137 if 'stdout' in kwargs:
138 raise ValueError('stdout argument not allowed, it will be overridden.')
139 process = subprocess.Popen(stdout=subprocess.PIPE, *popenargs, **kwargs)
140 output, unused_err = process.communicate()
141 retcode = process.poll()
142 if retcode:
143 cmd = kwargs.get("args")
144 if cmd is None:
145 cmd = popenargs[0]
146 raise CalledProcessError(retcode, cmd, output=output)
147 return output
148
149def find(dir, **walkoptions):
150 """ Given a directory, recurses into that directory,
151 returning all files as absolute paths. """
152
153 for root, dirs, files in os.walk(dir, **walkoptions):
154 for file in files:
155 yield os.path.join(root, file)
156
157
158## realpath() related functions
159def __is_path_below(file, root):
160 return (file + os.path.sep).startswith(root)
161
162def __realpath_rel(start, rel_path, root, loop_cnt, assume_dir):
163 """Calculates real path of symlink 'start' + 'rel_path' below
164 'root'; no part of 'start' below 'root' must contain symlinks. """
165 have_dir = True
166
167 for d in rel_path.split(os.path.sep):
168 if not have_dir and not assume_dir:
169 raise OSError(errno.ENOENT, "no such directory %s" % start)
170
171 if d == os.path.pardir: # '..'
172 if len(start) >= len(root):
173 # do not follow '..' before root
174 start = os.path.dirname(start)
175 else:
176 # emit warning?
177 pass
178 else:
179 (start, have_dir) = __realpath(os.path.join(start, d),
180 root, loop_cnt, assume_dir)
181
182 assert(__is_path_below(start, root))
183
184 return start
185
186def __realpath(file, root, loop_cnt, assume_dir):
187 while os.path.islink(file) and len(file) >= len(root):
188 if loop_cnt == 0:
189 raise OSError(errno.ELOOP, file)
190
191 loop_cnt -= 1
192 target = os.path.normpath(os.readlink(file))
193
194 if not os.path.isabs(target):
195 tdir = os.path.dirname(file)
196 assert(__is_path_below(tdir, root))
197 else:
198 tdir = root
199
200 file = __realpath_rel(tdir, target, root, loop_cnt, assume_dir)
201
202 try:
203 is_dir = os.path.isdir(file)
204 except:
205 is_dir = false
206
207 return (file, is_dir)
208
209def realpath(file, root, use_physdir = True, loop_cnt = 100, assume_dir = False):
210 """ Returns the canonical path of 'file' with assuming a
211 toplevel 'root' directory. When 'use_physdir' is set, all
212 preceding path components of 'file' will be resolved first;
213 this flag should be set unless it is guaranteed that there is
214 no symlink in the path. When 'assume_dir' is not set, missing
215 path components will raise an ENOENT error"""
216
217 root = os.path.normpath(root)
218 file = os.path.normpath(file)
219
220 if not root.endswith(os.path.sep):
221 # letting root end with '/' makes some things easier
222 root = root + os.path.sep
223
224 if not __is_path_below(file, root):
225 raise OSError(errno.EINVAL, "file '%s' is not below root" % file)
226
227 try:
228 if use_physdir:
229 file = __realpath_rel(root, file[(len(root) - 1):], root, loop_cnt, assume_dir)
230 else:
231 file = __realpath(file, root, loop_cnt, assume_dir)[0]
232 except OSError as e:
233 if e.errno == errno.ELOOP:
234 # make ELOOP more readable; without catching it, there will
235 # be printed a backtrace with 100s of OSError exceptions
236 # else
237 raise OSError(errno.ELOOP,
238 "too much recursions while resolving '%s'; loop in '%s'" %
239 (file, e.strerror))
240
241 raise
242
243 return file
diff --git a/meta/lib/oe/prservice.py b/meta/lib/oe/prservice.py
new file mode 100644
index 0000000000..b0cbcb1fbc
--- /dev/null
+++ b/meta/lib/oe/prservice.py
@@ -0,0 +1,126 @@
1
2def prserv_make_conn(d, check = False):
3 import prserv.serv
4 host_params = filter(None, (d.getVar("PRSERV_HOST", True) or '').split(':'))
5 try:
6 conn = None
7 conn = prserv.serv.PRServerConnection(host_params[0], int(host_params[1]))
8 if check:
9 if not conn.ping():
10 raise Exception('service not available')
11 d.setVar("__PRSERV_CONN",conn)
12 except Exception, exc:
13 bb.fatal("Connecting to PR service %s:%s failed: %s" % (host_params[0], host_params[1], str(exc)))
14
15 return conn
16
17def prserv_dump_db(d):
18 if not d.getVar('PRSERV_HOST', True):
19 bb.error("Not using network based PR service")
20 return None
21
22 conn = d.getVar("__PRSERV_CONN", True)
23 if conn is None:
24 conn = prserv_make_conn(d)
25 if conn is None:
26 bb.error("Making connection failed to remote PR service")
27 return None
28
29 #dump db
30 opt_version = d.getVar('PRSERV_DUMPOPT_VERSION', True)
31 opt_pkgarch = d.getVar('PRSERV_DUMPOPT_PKGARCH', True)
32 opt_checksum = d.getVar('PRSERV_DUMPOPT_CHECKSUM', True)
33 opt_col = ("1" == d.getVar('PRSERV_DUMPOPT_COL', True))
34 return conn.export(opt_version, opt_pkgarch, opt_checksum, opt_col)
35
36def prserv_import_db(d, filter_version=None, filter_pkgarch=None, filter_checksum=None):
37 if not d.getVar('PRSERV_HOST', True):
38 bb.error("Not using network based PR service")
39 return None
40
41 conn = d.getVar("__PRSERV_CONN", True)
42 if conn is None:
43 conn = prserv_make_conn(d)
44 if conn is None:
45 bb.error("Making connection failed to remote PR service")
46 return None
47 #get the entry values
48 imported = []
49 prefix = "PRAUTO$"
50 for v in d.keys():
51 if v.startswith(prefix):
52 (remain, sep, checksum) = v.rpartition('$')
53 (remain, sep, pkgarch) = remain.rpartition('$')
54 (remain, sep, version) = remain.rpartition('$')
55 if (remain + '$' != prefix) or \
56 (filter_version and filter_version != version) or \
57 (filter_pkgarch and filter_pkgarch != pkgarch) or \
58 (filter_checksum and filter_checksum != checksum):
59 continue
60 try:
61 value = int(d.getVar(remain + '$' + version + '$' + pkgarch + '$' + checksum, True))
62 except BaseException as exc:
63 bb.debug("Not valid value of %s:%s" % (v,str(exc)))
64 continue
65 ret = conn.importone(version,pkgarch,checksum,value)
66 if ret != value:
67 bb.error("importing(%s,%s,%s,%d) failed. DB may have larger value %d" % (version,pkgarch,checksum,value,ret))
68 else:
69 imported.append((version,pkgarch,checksum,value))
70 return imported
71
72def prserv_export_tofile(d, metainfo, datainfo, lockdown, nomax=False):
73 import bb.utils
74 #initilize the output file
75 bb.utils.mkdirhier(d.getVar('PRSERV_DUMPDIR', True))
76 df = d.getVar('PRSERV_DUMPFILE', True)
77 #write data
78 lf = bb.utils.lockfile("%s.lock" % df)
79 f = open(df, "a")
80 if metainfo:
81 #dump column info
82 f.write("#PR_core_ver = \"%s\"\n\n" % metainfo['core_ver']);
83 f.write("#Table: %s\n" % metainfo['tbl_name'])
84 f.write("#Columns:\n")
85 f.write("#name \t type \t notn \t dflt \t pk\n")
86 f.write("#----------\t --------\t --------\t --------\t ----\n")
87 for i in range(len(metainfo['col_info'])):
88 f.write("#%10s\t %8s\t %8s\t %8s\t %4s\n" %
89 (metainfo['col_info'][i]['name'],
90 metainfo['col_info'][i]['type'],
91 metainfo['col_info'][i]['notnull'],
92 metainfo['col_info'][i]['dflt_value'],
93 metainfo['col_info'][i]['pk']))
94 f.write("\n")
95
96 if lockdown:
97 f.write("PRSERV_LOCKDOWN = \"1\"\n\n")
98
99 if datainfo:
100 idx = {}
101 for i in range(len(datainfo)):
102 pkgarch = datainfo[i]['pkgarch']
103 value = datainfo[i]['value']
104 if pkgarch not in idx:
105 idx[pkgarch] = i
106 elif value > datainfo[idx[pkgarch]]['value']:
107 idx[pkgarch] = i
108 f.write("PRAUTO$%s$%s$%s = \"%s\"\n" %
109 (str(datainfo[i]['version']), pkgarch, str(datainfo[i]['checksum']), str(value)))
110 if not nomax:
111 for i in idx:
112 f.write("PRAUTO_%s_%s = \"%s\"\n" % (str(datainfo[idx[i]]['version']),str(datainfo[idx[i]]['pkgarch']),str(datainfo[idx[i]]['value'])))
113 f.close()
114 bb.utils.unlockfile(lf)
115
116def prserv_check_avail(d):
117 host_params = filter(None, (d.getVar("PRSERV_HOST", True) or '').split(':'))
118 try:
119 if len(host_params) != 2:
120 raise TypeError
121 else:
122 int(host_params[1])
123 except TypeError:
124 bb.fatal('Undefined/incorrect PRSERV_HOST value. Format: "host:port"')
125 else:
126 prserv_make_conn(d, True)
diff --git a/meta/lib/oe/qa.py b/meta/lib/oe/qa.py
new file mode 100644
index 0000000000..d5cdaa0fcd
--- /dev/null
+++ b/meta/lib/oe/qa.py
@@ -0,0 +1,111 @@
1class ELFFile:
2 EI_NIDENT = 16
3
4 EI_CLASS = 4
5 EI_DATA = 5
6 EI_VERSION = 6
7 EI_OSABI = 7
8 EI_ABIVERSION = 8
9
10 # possible values for EI_CLASS
11 ELFCLASSNONE = 0
12 ELFCLASS32 = 1
13 ELFCLASS64 = 2
14
15 # possible value for EI_VERSION
16 EV_CURRENT = 1
17
18 # possible values for EI_DATA
19 ELFDATANONE = 0
20 ELFDATA2LSB = 1
21 ELFDATA2MSB = 2
22
23 def my_assert(self, expectation, result):
24 if not expectation == result:
25 #print "'%x','%x' %s" % (ord(expectation), ord(result), self.name)
26 raise Exception("This does not work as expected")
27
28 def __init__(self, name, bits = 0):
29 self.name = name
30 self.bits = bits
31 self.objdump_output = {}
32
33 def open(self):
34 self.file = file(self.name, "r")
35 self.data = self.file.read(ELFFile.EI_NIDENT+4)
36
37 self.my_assert(len(self.data), ELFFile.EI_NIDENT+4)
38 self.my_assert(self.data[0], chr(0x7f) )
39 self.my_assert(self.data[1], 'E')
40 self.my_assert(self.data[2], 'L')
41 self.my_assert(self.data[3], 'F')
42 if self.bits == 0:
43 if self.data[ELFFile.EI_CLASS] == chr(ELFFile.ELFCLASS32):
44 self.bits = 32
45 elif self.data[ELFFile.EI_CLASS] == chr(ELFFile.ELFCLASS64):
46 self.bits = 64
47 else:
48 # Not 32-bit or 64.. lets assert
49 raise Exception("ELF but not 32 or 64 bit.")
50 elif self.bits == 32:
51 self.my_assert(self.data[ELFFile.EI_CLASS], chr(ELFFile.ELFCLASS32))
52 elif self.bits == 64:
53 self.my_assert(self.data[ELFFile.EI_CLASS], chr(ELFFile.ELFCLASS64))
54 else:
55 raise Exception("Must specify unknown, 32 or 64 bit size.")
56 self.my_assert(self.data[ELFFile.EI_VERSION], chr(ELFFile.EV_CURRENT) )
57
58 self.sex = self.data[ELFFile.EI_DATA]
59 if self.sex == chr(ELFFile.ELFDATANONE):
60 raise Exception("self.sex == ELFDATANONE")
61 elif self.sex == chr(ELFFile.ELFDATA2LSB):
62 self.sex = "<"
63 elif self.sex == chr(ELFFile.ELFDATA2MSB):
64 self.sex = ">"
65 else:
66 raise Exception("Unknown self.sex")
67
68 def osAbi(self):
69 return ord(self.data[ELFFile.EI_OSABI])
70
71 def abiVersion(self):
72 return ord(self.data[ELFFile.EI_ABIVERSION])
73
74 def abiSize(self):
75 return self.bits
76
77 def isLittleEndian(self):
78 return self.sex == "<"
79
80 def isBigEngian(self):
81 return self.sex == ">"
82
83 def machine(self):
84 """
85 We know the sex stored in self.sex and we
86 know the position
87 """
88 import struct
89 (a,) = struct.unpack(self.sex+"H", self.data[18:20])
90 return a
91
92 def run_objdump(self, cmd, d):
93 import bb.process
94 import sys
95
96 if cmd in self.objdump_output:
97 return self.objdump_output[cmd]
98
99 objdump = d.getVar('OBJDUMP', True)
100
101 env = os.environ.copy()
102 env["LC_ALL"] = "C"
103 env["PATH"] = d.getVar('PATH', True)
104
105 try:
106 bb.note("%s %s %s" % (objdump, cmd, self.name))
107 self.objdump_output[cmd] = bb.process.run([objdump, cmd, self.name], env=env, shell=False)[0]
108 return self.objdump_output[cmd]
109 except Exception as e:
110 bb.note("%s %s %s failed: %s" % (objdump, cmd, self.name, e))
111 return ""
diff --git a/meta/lib/oe/rootfs.py b/meta/lib/oe/rootfs.py
new file mode 100644
index 0000000000..67ed9ef03d
--- /dev/null
+++ b/meta/lib/oe/rootfs.py
@@ -0,0 +1,800 @@
1from abc import ABCMeta, abstractmethod
2from oe.utils import execute_pre_post_process
3from oe.package_manager import *
4from oe.manifest import *
5import oe.path
6import filecmp
7import shutil
8import os
9import subprocess
10import re
11
12
13class Rootfs(object):
14 """
15 This is an abstract class. Do not instantiate this directly.
16 """
17 __metaclass__ = ABCMeta
18
19 def __init__(self, d):
20 self.d = d
21 self.pm = None
22 self.image_rootfs = self.d.getVar('IMAGE_ROOTFS', True)
23 self.deploy_dir_image = self.d.getVar('DEPLOY_DIR_IMAGE', True)
24
25 self.install_order = Manifest.INSTALL_ORDER
26
27 @abstractmethod
28 def _create(self):
29 pass
30
31 @abstractmethod
32 def _get_delayed_postinsts(self):
33 pass
34
35 @abstractmethod
36 def _save_postinsts(self):
37 pass
38
39 @abstractmethod
40 def _log_check(self):
41 pass
42
43 def _insert_feed_uris(self):
44 if bb.utils.contains("IMAGE_FEATURES", "package-management",
45 True, False, self.d):
46 self.pm.insert_feeds_uris()
47
48 @abstractmethod
49 def _handle_intercept_failure(self, failed_script):
50 pass
51
52 """
53 The _cleanup() method should be used to clean-up stuff that we don't really
54 want to end up on target. For example, in the case of RPM, the DB locks.
55 The method is called, once, at the end of create() method.
56 """
57 @abstractmethod
58 def _cleanup(self):
59 pass
60
61 def _exec_shell_cmd(self, cmd):
62 fakerootcmd = self.d.getVar('FAKEROOT', True)
63 if fakerootcmd is not None:
64 exec_cmd = [fakerootcmd, cmd]
65 else:
66 exec_cmd = cmd
67
68 try:
69 subprocess.check_output(exec_cmd, stderr=subprocess.STDOUT)
70 except subprocess.CalledProcessError as e:
71 return("Command '%s' returned %d:\n%s" % (e.cmd, e.returncode, e.output))
72
73 return None
74
75 def create(self):
76 bb.note("###### Generate rootfs #######")
77 pre_process_cmds = self.d.getVar("ROOTFS_PREPROCESS_COMMAND", True)
78 post_process_cmds = self.d.getVar("ROOTFS_POSTPROCESS_COMMAND", True)
79
80 intercepts_dir = os.path.join(self.d.getVar('WORKDIR', True),
81 "intercept_scripts")
82
83 bb.utils.remove(intercepts_dir, True)
84
85 bb.utils.mkdirhier(self.image_rootfs)
86
87 bb.utils.mkdirhier(self.deploy_dir_image)
88
89 shutil.copytree(self.d.expand("${COREBASE}/scripts/postinst-intercepts"),
90 intercepts_dir)
91
92 shutil.copy(self.d.expand("${COREBASE}/meta/files/deploydir_readme.txt"),
93 self.deploy_dir_image +
94 "/README_-_DO_NOT_DELETE_FILES_IN_THIS_DIRECTORY.txt")
95
96 execute_pre_post_process(self.d, pre_process_cmds)
97
98 # call the package manager dependent create method
99 self._create()
100
101 sysconfdir = self.image_rootfs + self.d.getVar('sysconfdir', True)
102 bb.utils.mkdirhier(sysconfdir)
103 with open(sysconfdir + "/version", "w+") as ver:
104 ver.write(self.d.getVar('BUILDNAME', True) + "\n")
105
106 self._run_intercepts()
107
108 execute_pre_post_process(self.d, post_process_cmds)
109
110 if bb.utils.contains("IMAGE_FEATURES", "read-only-rootfs",
111 True, False, self.d):
112 delayed_postinsts = self._get_delayed_postinsts()
113 if delayed_postinsts is not None:
114 bb.fatal("The following packages could not be configured "
115 "offline and rootfs is read-only: %s" %
116 delayed_postinsts)
117
118 if self.d.getVar('USE_DEVFS', True) != "1":
119 self._create_devfs()
120
121 self._uninstall_uneeded()
122
123 self._insert_feed_uris()
124
125 self._run_ldconfig()
126
127 self._generate_kernel_module_deps()
128
129 self._cleanup()
130
131 def _uninstall_uneeded(self):
132 # Remove unneeded init script symlinks
133 delayed_postinsts = self._get_delayed_postinsts()
134 if delayed_postinsts is None:
135 if os.path.exists(self.d.expand("${IMAGE_ROOTFS}${sysconfdir}/init.d/run-postinsts")):
136 self._exec_shell_cmd(["update-rc.d", "-f", "-r",
137 self.d.getVar('IMAGE_ROOTFS', True),
138 "run-postinsts", "remove"])
139
140 # Remove unneeded package-management related components
141 if bb.utils.contains("IMAGE_FEATURES", "package-management",
142 True, False, self.d):
143 return
144
145 if delayed_postinsts is None:
146 installed_pkgs_dir = self.d.expand('${WORKDIR}/installed_pkgs.txt')
147 pkgs_to_remove = list()
148 with open(installed_pkgs_dir, "r+") as installed_pkgs:
149 pkgs_installed = installed_pkgs.read().split('\n')
150 for pkg_installed in pkgs_installed[:]:
151 pkg = pkg_installed.split()[0]
152 if pkg in ["update-rc.d",
153 "base-passwd",
154 self.d.getVar("ROOTFS_BOOTSTRAP_INSTALL", True)
155 ]:
156 pkgs_to_remove.append(pkg)
157 pkgs_installed.remove(pkg_installed)
158
159 if len(pkgs_to_remove) > 0:
160 self.pm.remove(pkgs_to_remove, False)
161 # Update installed_pkgs.txt
162 open(installed_pkgs_dir, "w+").write('\n'.join(pkgs_installed))
163
164 else:
165 self._save_postinsts()
166
167 self.pm.remove_packaging_data()
168
169 def _run_intercepts(self):
170 intercepts_dir = os.path.join(self.d.getVar('WORKDIR', True),
171 "intercept_scripts")
172
173 bb.note("Running intercept scripts:")
174 os.environ['D'] = self.image_rootfs
175 for script in os.listdir(intercepts_dir):
176 script_full = os.path.join(intercepts_dir, script)
177
178 if script == "postinst_intercept" or not os.access(script_full, os.X_OK):
179 continue
180
181 bb.note("> Executing %s intercept ..." % script)
182
183 try:
184 subprocess.check_output(script_full)
185 except subprocess.CalledProcessError as e:
186 bb.warn("The postinstall intercept hook '%s' failed (exit code: %d)! See log for details!" %
187 (script, e.returncode))
188
189 with open(script_full) as intercept:
190 registered_pkgs = None
191 for line in intercept.read().split("\n"):
192 m = re.match("^##PKGS:(.*)", line)
193 if m is not None:
194 registered_pkgs = m.group(1).strip()
195 break
196
197 if registered_pkgs is not None:
198 bb.warn("The postinstalls for the following packages "
199 "will be postponed for first boot: %s" %
200 registered_pkgs)
201
202 # call the backend dependent handler
203 self._handle_intercept_failure(registered_pkgs)
204
205 def _run_ldconfig(self):
206 if self.d.getVar('LDCONFIGDEPEND', True):
207 bb.note("Executing: ldconfig -r" + self.image_rootfs + "-c new -v")
208 self._exec_shell_cmd(['ldconfig', '-r', self.image_rootfs, '-c',
209 'new', '-v'])
210
211 def _generate_kernel_module_deps(self):
212 kernel_abi_ver_file = os.path.join(self.d.getVar('STAGING_KERNEL_DIR', True),
213 'kernel-abiversion')
214 if os.path.exists(kernel_abi_ver_file):
215 kernel_ver = open(kernel_abi_ver_file).read().strip(' \n')
216 modules_dir = os.path.join(self.image_rootfs, 'lib', 'modules', kernel_ver)
217
218 bb.utils.mkdirhier(modules_dir)
219
220 self._exec_shell_cmd(['depmodwrapper', '-a', '-b', self.image_rootfs,
221 kernel_ver])
222
223 """
224 Create devfs:
225 * IMAGE_DEVICE_TABLE is the old name to an absolute path to a device table file
226 * IMAGE_DEVICE_TABLES is a new name for a file, or list of files, seached
227 for in the BBPATH
228 If neither are specified then the default name of files/device_table-minimal.txt
229 is searched for in the BBPATH (same as the old version.)
230 """
231 def _create_devfs(self):
232 devtable_list = []
233 devtable = self.d.getVar('IMAGE_DEVICE_TABLE', True)
234 if devtable is not None:
235 devtable_list.append(devtable)
236 else:
237 devtables = self.d.getVar('IMAGE_DEVICE_TABLES', True)
238 if devtables is None:
239 devtables = 'files/device_table-minimal.txt'
240 for devtable in devtables.split():
241 devtable_list.append("%s" % bb.utils.which(self.d.getVar('BBPATH', True), devtable))
242
243 for devtable in devtable_list:
244 self._exec_shell_cmd(["makedevs", "-r",
245 self.image_rootfs, "-D", devtable])
246
247
248class RpmRootfs(Rootfs):
249 def __init__(self, d, manifest_dir):
250 super(RpmRootfs, self).__init__(d)
251
252 self.manifest = RpmManifest(d, manifest_dir)
253
254 self.pm = RpmPM(d,
255 d.getVar('IMAGE_ROOTFS', True),
256 self.d.getVar('TARGET_VENDOR', True)
257 )
258
259 self.inc_rpm_image_gen = self.d.getVar('INC_RPM_IMAGE_GEN', True)
260 if self.inc_rpm_image_gen != "1":
261 bb.utils.remove(self.image_rootfs, True)
262 else:
263 self.pm.recovery_packaging_data()
264 bb.utils.remove(self.d.getVar('MULTILIB_TEMP_ROOTFS', True), True)
265
266 self.pm.create_configs()
267
268 '''
269 While rpm incremental image generation is enabled, it will remove the
270 unneeded pkgs by comparing the new install solution manifest and the
271 old installed manifest.
272 '''
273 def _create_incremental(self, pkgs_initial_install):
274 if self.inc_rpm_image_gen == "1":
275
276 pkgs_to_install = list()
277 for pkg_type in pkgs_initial_install:
278 pkgs_to_install += pkgs_initial_install[pkg_type]
279
280 installed_manifest = self.pm.load_old_install_solution()
281 solution_manifest = self.pm.dump_install_solution(pkgs_to_install)
282
283 pkg_to_remove = list()
284 for pkg in installed_manifest:
285 if pkg not in solution_manifest:
286 pkg_to_remove.append(pkg)
287
288 self.pm.update()
289
290 bb.note('incremental update -- upgrade packages in place ')
291 self.pm.upgrade()
292 if pkg_to_remove != []:
293 bb.note('incremental removed: %s' % ' '.join(pkg_to_remove))
294 self.pm.remove(pkg_to_remove)
295
296 def _create(self):
297 pkgs_to_install = self.manifest.parse_initial_manifest()
298
299 # update PM index files
300 self.pm.write_index()
301
302 self.pm.dump_all_available_pkgs()
303
304 if self.inc_rpm_image_gen == "1":
305 self._create_incremental(pkgs_to_install)
306
307 self.pm.update()
308
309 pkgs = []
310 pkgs_attempt = []
311 for pkg_type in pkgs_to_install:
312 if pkg_type == Manifest.PKG_TYPE_ATTEMPT_ONLY:
313 pkgs_attempt += pkgs_to_install[pkg_type]
314 else:
315 pkgs += pkgs_to_install[pkg_type]
316
317 self.pm.install(pkgs)
318
319 self.pm.install(pkgs_attempt, True)
320
321 self.pm.install_complementary()
322
323 self._log_check()
324
325 if self.inc_rpm_image_gen == "1":
326 self.pm.backup_packaging_data()
327
328 self.pm.rpm_setup_smart_target_config()
329
330 @staticmethod
331 def _depends_list():
332 return ['DEPLOY_DIR_RPM', 'INC_RPM_IMAGE_GEN', 'RPM_PREPROCESS_COMMANDS',
333 'RPM_POSTPROCESS_COMMANDS', 'RPM_PREFER_ELF_ARCH']
334
335 def _get_delayed_postinsts(self):
336 postinst_dir = self.d.expand("${IMAGE_ROOTFS}${sysconfdir}/rpm-postinsts")
337 if os.path.isdir(postinst_dir):
338 files = os.listdir(postinst_dir)
339 for f in files:
340 bb.note('Delayed package scriptlet: %s' % f)
341 return files
342
343 return None
344
345 def _save_postinsts(self):
346 # this is just a stub. For RPM, the failed postinstalls are
347 # already saved in /etc/rpm-postinsts
348 pass
349
350 def _log_check_warn(self):
351 r = re.compile('(warn|Warn)')
352 log_path = self.d.expand("${T}/log.do_rootfs")
353 with open(log_path, 'r') as log:
354 for line in log:
355 if 'log_check' in line:
356 continue
357
358 m = r.search(line)
359 if m:
360 bb.warn('[log_check] %s: found a warning message in the logfile (keyword \'%s\'):\n[log_check] %s'
361 % (self.d.getVar('PN', True), m.group(), line))
362
363 def _log_check_error(self):
364 r = re.compile('(unpacking of archive failed|Cannot find package|exit 1|ERR|Fail)')
365 log_path = self.d.expand("${T}/log.do_rootfs")
366 with open(log_path, 'r') as log:
367 found_error = 0
368 message = "\n"
369 for line in log:
370 if 'log_check' in line:
371 continue
372
373 m = r.search(line)
374 if m:
375 found_error = 1
376 bb.warn('[log_check] %s: found an error message in the logfile (keyword \'%s\'):\n[log_check] %s'
377 % (self.d.getVar('PN', True), m.group(), line))
378
379 if found_error >= 1 and found_error <= 5:
380 message += line + '\n'
381 found_error += 1
382
383 if found_error == 6:
384 bb.fatal(message)
385
386 def _log_check(self):
387 self._log_check_warn()
388 self._log_check_error()
389
390 def _handle_intercept_failure(self, registered_pkgs):
391 rpm_postinsts_dir = self.image_rootfs + self.d.expand('${sysconfdir}/rpm-postinsts/')
392 bb.utils.mkdirhier(rpm_postinsts_dir)
393
394 # Save the package postinstalls in /etc/rpm-postinsts
395 for pkg in registered_pkgs.split():
396 self.pm.save_rpmpostinst(pkg)
397
398 def _cleanup(self):
399 # during the execution of postprocess commands, rpm is called several
400 # times to get the files installed, dependencies, etc. This creates the
401 # __db.00* (Berkeley DB files that hold locks, rpm specific environment
402 # settings, etc.), that should not get into the final rootfs
403 self.pm.unlock_rpm_db()
404 bb.utils.remove(self.image_rootfs + "/install", True)
405
406
407class DpkgRootfs(Rootfs):
408 def __init__(self, d, manifest_dir):
409 super(DpkgRootfs, self).__init__(d)
410
411 bb.utils.remove(self.image_rootfs, True)
412 bb.utils.remove(self.d.getVar('MULTILIB_TEMP_ROOTFS', True), True)
413 self.manifest = DpkgManifest(d, manifest_dir)
414 self.pm = DpkgPM(d, d.getVar('IMAGE_ROOTFS', True),
415 d.getVar('PACKAGE_ARCHS', True),
416 d.getVar('DPKG_ARCH', True))
417
418
419 def _create(self):
420 pkgs_to_install = self.manifest.parse_initial_manifest()
421
422 alt_dir = self.d.expand("${IMAGE_ROOTFS}/var/lib/dpkg/alternatives")
423 bb.utils.mkdirhier(alt_dir)
424
425 # update PM index files
426 self.pm.write_index()
427
428 self.pm.update()
429
430 for pkg_type in self.install_order:
431 if pkg_type in pkgs_to_install:
432 self.pm.install(pkgs_to_install[pkg_type],
433 [False, True][pkg_type == Manifest.PKG_TYPE_ATTEMPT_ONLY])
434
435 self.pm.install_complementary()
436
437 self.pm.fix_broken_dependencies()
438
439 self.pm.mark_packages("installed")
440
441 self.pm.run_pre_post_installs()
442
443 @staticmethod
444 def _depends_list():
445 return ['DEPLOY_DIR_DEB', 'DEB_SDK_ARCH', 'APTCONF_TARGET', 'APT_ARGS', 'DPKG_ARCH', 'DEB_PREPROCESS_COMMANDS', 'DEB_POSTPROCESS_COMMAND']
446
447 def _get_delayed_postinsts(self):
448 pkg_list = []
449 with open(self.image_rootfs + "/var/lib/dpkg/status") as status:
450 for line in status:
451 m_pkg = re.match("^Package: (.*)", line)
452 m_status = re.match("^Status:.*unpacked", line)
453 if m_pkg is not None:
454 pkg_name = m_pkg.group(1)
455 elif m_status is not None:
456 pkg_list.append(pkg_name)
457
458 if len(pkg_list) == 0:
459 return None
460
461 return pkg_list
462
463 def _save_postinsts(self):
464 num = 0
465 for p in self._get_delayed_postinsts():
466 dst_postinst_dir = self.d.expand("${IMAGE_ROOTFS}${sysconfdir}/deb-postinsts")
467 src_postinst_dir = self.d.expand("${IMAGE_ROOTFS}/var/lib/dpkg/info")
468
469 bb.utils.mkdirhier(dst_postinst_dir)
470
471 if os.path.exists(os.path.join(src_postinst_dir, p + ".postinst")):
472 shutil.copy(os.path.join(src_postinst_dir, p + ".postinst"),
473 os.path.join(dst_postinst_dir, "%03d-%s" % (num, p)))
474
475 num += 1
476
477 def _handle_intercept_failure(self, registered_pkgs):
478 self.pm.mark_packages("unpacked", registered_pkgs.split())
479
480 def _log_check(self):
481 pass
482
483 def _cleanup(self):
484 pass
485
486
487class OpkgRootfs(Rootfs):
488 def __init__(self, d, manifest_dir):
489 super(OpkgRootfs, self).__init__(d)
490
491 self.manifest = OpkgManifest(d, manifest_dir)
492 self.opkg_conf = self.d.getVar("IPKGCONF_TARGET", True)
493 self.pkg_archs = self.d.getVar("ALL_MULTILIB_PACKAGE_ARCHS", True)
494
495 self.inc_opkg_image_gen = self.d.getVar('INC_IPK_IMAGE_GEN', True) or ""
496 if self._remove_old_rootfs():
497 bb.utils.remove(self.image_rootfs, True)
498 self.pm = OpkgPM(d,
499 self.image_rootfs,
500 self.opkg_conf,
501 self.pkg_archs)
502 else:
503 self.pm = OpkgPM(d,
504 self.image_rootfs,
505 self.opkg_conf,
506 self.pkg_archs)
507 self.pm.recover_packaging_data()
508
509 bb.utils.remove(self.d.getVar('MULTILIB_TEMP_ROOTFS', True), True)
510
511 def _prelink_file(self, root_dir, filename):
512 bb.note('prelink %s in %s' % (filename, root_dir))
513 prelink_cfg = oe.path.join(root_dir,
514 self.d.expand('${sysconfdir}/prelink.conf'))
515 if not os.path.exists(prelink_cfg):
516 shutil.copy(self.d.expand('${STAGING_DIR_NATIVE}${sysconfdir_native}/prelink.conf'),
517 prelink_cfg)
518
519 cmd_prelink = self.d.expand('${STAGING_DIR_NATIVE}${sbindir_native}/prelink')
520 self._exec_shell_cmd([cmd_prelink,
521 '--root',
522 root_dir,
523 '-amR',
524 '-N',
525 '-c',
526 self.d.expand('${sysconfdir}/prelink.conf')])
527
528 '''
529 Compare two files with the same key twice to see if they are equal.
530 If they are not equal, it means they are duplicated and come from
531 different packages.
532 1st: Comapre them directly;
533 2nd: While incremental image creation is enabled, one of the
534 files could be probaly prelinked in the previous image
535 creation and the file has been changed, so we need to
536 prelink the other one and compare them.
537 '''
538 def _file_equal(self, key, f1, f2):
539
540 # Both of them are not prelinked
541 if filecmp.cmp(f1, f2):
542 return True
543
544 if self.image_rootfs not in f1:
545 self._prelink_file(f1.replace(key, ''), f1)
546
547 if self.image_rootfs not in f2:
548 self._prelink_file(f2.replace(key, ''), f2)
549
550 # Both of them are prelinked
551 if filecmp.cmp(f1, f2):
552 return True
553
554 # Not equal
555 return False
556
557 """
558 This function was reused from the old implementation.
559 See commit: "image.bbclass: Added variables for multilib support." by
560 Lianhao Lu.
561 """
562 def _multilib_sanity_test(self, dirs):
563
564 allow_replace = self.d.getVar("MULTILIBRE_ALLOW_REP", True)
565 if allow_replace is None:
566 allow_replace = ""
567
568 allow_rep = re.compile(re.sub("\|$", "", allow_replace))
569 error_prompt = "Multilib check error:"
570
571 files = {}
572 for dir in dirs:
573 for root, subfolders, subfiles in os.walk(dir):
574 for file in subfiles:
575 item = os.path.join(root, file)
576 key = str(os.path.join("/", os.path.relpath(item, dir)))
577
578 valid = True
579 if key in files:
580 #check whether the file is allow to replace
581 if allow_rep.match(key):
582 valid = True
583 else:
584 if os.path.exists(files[key]) and \
585 os.path.exists(item) and \
586 not self._file_equal(key, files[key], item):
587 valid = False
588 bb.fatal("%s duplicate files %s %s is not the same\n" %
589 (error_prompt, item, files[key]))
590
591 #pass the check, add to list
592 if valid:
593 files[key] = item
594
595 def _multilib_test_install(self, pkgs):
596 ml_temp = self.d.getVar("MULTILIB_TEMP_ROOTFS", True)
597 bb.utils.mkdirhier(ml_temp)
598
599 dirs = [self.image_rootfs]
600
601 for variant in self.d.getVar("MULTILIB_VARIANTS", True).split():
602 ml_target_rootfs = os.path.join(ml_temp, variant)
603
604 bb.utils.remove(ml_target_rootfs, True)
605
606 ml_opkg_conf = os.path.join(ml_temp,
607 variant + "-" + os.path.basename(self.opkg_conf))
608
609 ml_pm = OpkgPM(self.d, ml_target_rootfs, ml_opkg_conf, self.pkg_archs)
610
611 ml_pm.update()
612 ml_pm.install(pkgs)
613
614 dirs.append(ml_target_rootfs)
615
616 self._multilib_sanity_test(dirs)
617
618 '''
619 While ipk incremental image generation is enabled, it will remove the
620 unneeded pkgs by comparing the old full manifest in previous existing
621 image and the new full manifest in the current image.
622 '''
623 def _remove_extra_packages(self, pkgs_initial_install):
624 if self.inc_opkg_image_gen == "1":
625 # Parse full manifest in previous existing image creation session
626 old_full_manifest = self.manifest.parse_full_manifest()
627
628 # Create full manifest for the current image session, the old one
629 # will be replaced by the new one.
630 self.manifest.create_full(self.pm)
631
632 # Parse full manifest in current image creation session
633 new_full_manifest = self.manifest.parse_full_manifest()
634
635 pkg_to_remove = list()
636 for pkg in old_full_manifest:
637 if pkg not in new_full_manifest:
638 pkg_to_remove.append(pkg)
639
640 if pkg_to_remove != []:
641 bb.note('decremental removed: %s' % ' '.join(pkg_to_remove))
642 self.pm.remove(pkg_to_remove)
643
644 '''
645 Compare with previous existing image creation, if some conditions
646 triggered, the previous old image should be removed.
647 The conditions include any of 'PACKAGE_EXCLUDE, NO_RECOMMENDATIONS
648 and BAD_RECOMMENDATIONS' has been changed.
649 '''
650 def _remove_old_rootfs(self):
651 if self.inc_opkg_image_gen != "1":
652 return True
653
654 vars_list_file = self.d.expand('${T}/vars_list')
655
656 old_vars_list = ""
657 if os.path.exists(vars_list_file):
658 old_vars_list = open(vars_list_file, 'r+').read()
659
660 new_vars_list = '%s:%s:%s\n' % \
661 ((self.d.getVar('BAD_RECOMMENDATIONS', True) or '').strip(),
662 (self.d.getVar('NO_RECOMMENDATIONS', True) or '').strip(),
663 (self.d.getVar('PACKAGE_EXCLUDE', True) or '').strip())
664 open(vars_list_file, 'w+').write(new_vars_list)
665
666 if old_vars_list != new_vars_list:
667 return True
668
669 return False
670
671 def _create(self):
672 pkgs_to_install = self.manifest.parse_initial_manifest()
673 opkg_pre_process_cmds = self.d.getVar('OPKG_PREPROCESS_COMMANDS', True)
674 opkg_post_process_cmds = self.d.getVar('OPKG_POSTPROCESS_COMMANDS', True)
675 rootfs_post_install_cmds = self.d.getVar('ROOTFS_POSTINSTALL_COMMAND', True)
676
677 # update PM index files, unless users provide their own feeds
678 if (self.d.getVar('BUILD_IMAGES_FROM_FEEDS', True) or "") != "1":
679 self.pm.write_index()
680
681 execute_pre_post_process(self.d, opkg_pre_process_cmds)
682
683 self.pm.update()
684
685 self.pm.handle_bad_recommendations()
686
687 if self.inc_opkg_image_gen == "1":
688 self._remove_extra_packages(pkgs_to_install)
689
690 for pkg_type in self.install_order:
691 if pkg_type in pkgs_to_install:
692 # For multilib, we perform a sanity test before final install
693 # If sanity test fails, it will automatically do a bb.fatal()
694 # and the installation will stop
695 if pkg_type == Manifest.PKG_TYPE_MULTILIB:
696 self._multilib_test_install(pkgs_to_install[pkg_type])
697
698 self.pm.install(pkgs_to_install[pkg_type],
699 [False, True][pkg_type == Manifest.PKG_TYPE_ATTEMPT_ONLY])
700
701 self.pm.install_complementary()
702
703 execute_pre_post_process(self.d, opkg_post_process_cmds)
704 execute_pre_post_process(self.d, rootfs_post_install_cmds)
705
706 if self.inc_opkg_image_gen == "1":
707 self.pm.backup_packaging_data()
708
709 @staticmethod
710 def _depends_list():
711 return ['IPKGCONF_SDK', 'IPK_FEED_URIS', 'DEPLOY_DIR_IPK', 'IPKGCONF_TARGET', 'INC_IPK_IMAGE_GEN', 'OPKG_ARGS', 'OPKGLIBDIR', 'OPKG_PREPROCESS_COMMANDS', 'OPKG_POSTPROCESS_COMMANDS', 'OPKGLIBDIR']
712
713 def _get_delayed_postinsts(self):
714 pkg_list = []
715 status_file = os.path.join(self.image_rootfs,
716 self.d.getVar('OPKGLIBDIR', True).strip('/'),
717 "opkg", "status")
718
719 with open(status_file) as status:
720 for line in status:
721 m_pkg = re.match("^Package: (.*)", line)
722 m_status = re.match("^Status:.*unpacked", line)
723 if m_pkg is not None:
724 pkg_name = m_pkg.group(1)
725 elif m_status is not None:
726 pkg_list.append(pkg_name)
727
728 if len(pkg_list) == 0:
729 return None
730
731 return pkg_list
732
733 def _save_postinsts(self):
734 num = 0
735 for p in self._get_delayed_postinsts():
736 dst_postinst_dir = self.d.expand("${IMAGE_ROOTFS}${sysconfdir}/ipk-postinsts")
737 src_postinst_dir = self.d.expand("${IMAGE_ROOTFS}${OPKGLIBDIR}/opkg/info")
738
739 bb.utils.mkdirhier(dst_postinst_dir)
740
741 if os.path.exists(os.path.join(src_postinst_dir, p + ".postinst")):
742 shutil.copy(os.path.join(src_postinst_dir, p + ".postinst"),
743 os.path.join(dst_postinst_dir, "%03d-%s" % (num, p)))
744
745 num += 1
746
747 def _handle_intercept_failure(self, registered_pkgs):
748 self.pm.mark_packages("unpacked", registered_pkgs.split())
749
750 def _log_check(self):
751 pass
752
753 def _cleanup(self):
754 pass
755
756def get_class_for_type(imgtype):
757 return {"rpm": RpmRootfs,
758 "ipk": OpkgRootfs,
759 "deb": DpkgRootfs}[imgtype]
760
761def variable_depends(d, manifest_dir=None):
762 img_type = d.getVar('IMAGE_PKGTYPE', True)
763 cls = get_class_for_type(img_type)
764 return cls._depends_list()
765
766def create_rootfs(d, manifest_dir=None):
767 env_bkp = os.environ.copy()
768
769 img_type = d.getVar('IMAGE_PKGTYPE', True)
770 if img_type == "rpm":
771 RpmRootfs(d, manifest_dir).create()
772 elif img_type == "ipk":
773 OpkgRootfs(d, manifest_dir).create()
774 elif img_type == "deb":
775 DpkgRootfs(d, manifest_dir).create()
776
777 os.environ.clear()
778 os.environ.update(env_bkp)
779
780
781def image_list_installed_packages(d, format=None, rootfs_dir=None):
782 if not rootfs_dir:
783 rootfs_dir = d.getVar('IMAGE_ROOTFS', True)
784
785 img_type = d.getVar('IMAGE_PKGTYPE', True)
786 if img_type == "rpm":
787 return RpmPkgsList(d, rootfs_dir).list(format)
788 elif img_type == "ipk":
789 return OpkgPkgsList(d, rootfs_dir, d.getVar("IPKGCONF_TARGET", True)).list(format)
790 elif img_type == "deb":
791 return DpkgPkgsList(d, rootfs_dir).list(format)
792
793if __name__ == "__main__":
794 """
795 We should be able to run this as a standalone script, from outside bitbake
796 environment.
797 """
798 """
799 TBD
800 """
diff --git a/meta/lib/oe/sdk.py b/meta/lib/oe/sdk.py
new file mode 100644
index 0000000000..c57a441941
--- /dev/null
+++ b/meta/lib/oe/sdk.py
@@ -0,0 +1,326 @@
1from abc import ABCMeta, abstractmethod
2from oe.utils import execute_pre_post_process
3from oe.manifest import *
4from oe.package_manager import *
5import os
6import shutil
7import glob
8
9
10class Sdk(object):
11 __metaclass__ = ABCMeta
12
13 def __init__(self, d, manifest_dir):
14 self.d = d
15 self.sdk_output = self.d.getVar('SDK_OUTPUT', True)
16 self.sdk_native_path = self.d.getVar('SDKPATHNATIVE', True).strip('/')
17 self.target_path = self.d.getVar('SDKTARGETSYSROOT', True).strip('/')
18 self.sysconfdir = self.d.getVar('sysconfdir', True).strip('/')
19
20 self.sdk_target_sysroot = os.path.join(self.sdk_output, self.target_path)
21 self.sdk_host_sysroot = self.sdk_output
22
23 if manifest_dir is None:
24 self.manifest_dir = self.d.getVar("SDK_DIR", True)
25 else:
26 self.manifest_dir = manifest_dir
27
28 bb.utils.remove(self.sdk_output, True)
29
30 self.install_order = Manifest.INSTALL_ORDER
31
32 @abstractmethod
33 def _populate(self):
34 pass
35
36 def populate(self):
37 bb.utils.mkdirhier(self.sdk_output)
38
39 # call backend dependent implementation
40 self._populate()
41
42 # Don't ship any libGL in the SDK
43 bb.utils.remove(os.path.join(self.sdk_output, self.sdk_native_path,
44 self.d.getVar('libdir_nativesdk', True).strip('/'),
45 "libGL*"))
46
47 # Fix or remove broken .la files
48 bb.utils.remove(os.path.join(self.sdk_output, self.sdk_native_path,
49 self.d.getVar('libdir_nativesdk', True).strip('/'),
50 "*.la"))
51
52 # Link the ld.so.cache file into the hosts filesystem
53 link_name = os.path.join(self.sdk_output, self.sdk_native_path,
54 self.sysconfdir, "ld.so.cache")
55 bb.utils.mkdirhier(os.path.dirname(link_name))
56 os.symlink("/etc/ld.so.cache", link_name)
57
58 execute_pre_post_process(self.d, self.d.getVar('SDK_POSTPROCESS_COMMAND', True))
59
60
61class RpmSdk(Sdk):
62 def __init__(self, d, manifest_dir=None):
63 super(RpmSdk, self).__init__(d, manifest_dir)
64
65 self.target_manifest = RpmManifest(d, self.manifest_dir,
66 Manifest.MANIFEST_TYPE_SDK_TARGET)
67 self.host_manifest = RpmManifest(d, self.manifest_dir,
68 Manifest.MANIFEST_TYPE_SDK_HOST)
69
70 target_providename = ['/bin/sh',
71 '/bin/bash',
72 '/usr/bin/env',
73 '/usr/bin/perl',
74 'pkgconfig'
75 ]
76
77 self.target_pm = RpmPM(d,
78 self.sdk_target_sysroot,
79 self.d.getVar('TARGET_VENDOR', True),
80 'target',
81 target_providename
82 )
83
84 sdk_providename = ['/bin/sh',
85 '/bin/bash',
86 '/usr/bin/env',
87 '/usr/bin/perl',
88 'pkgconfig',
89 'libGL.so()(64bit)',
90 'libGL.so'
91 ]
92
93 self.host_pm = RpmPM(d,
94 self.sdk_host_sysroot,
95 self.d.getVar('SDK_VENDOR', True),
96 'host',
97 sdk_providename,
98 "SDK_PACKAGE_ARCHS",
99 "SDK_OS"
100 )
101
102 def _populate_sysroot(self, pm, manifest):
103 pkgs_to_install = manifest.parse_initial_manifest()
104
105 pm.create_configs()
106 pm.write_index()
107 pm.dump_all_available_pkgs()
108 pm.update()
109
110 for pkg_type in self.install_order:
111 if pkg_type in pkgs_to_install:
112 pm.install(pkgs_to_install[pkg_type],
113 [False, True][pkg_type == Manifest.PKG_TYPE_ATTEMPT_ONLY])
114
115 def _populate(self):
116 bb.note("Installing TARGET packages")
117 self._populate_sysroot(self.target_pm, self.target_manifest)
118
119 self.target_pm.install_complementary(self.d.getVar('SDKIMAGE_INSTALL_COMPLEMENTARY', True))
120
121 execute_pre_post_process(self.d, self.d.getVar("POPULATE_SDK_POST_TARGET_COMMAND", True))
122
123 self.target_pm.remove_packaging_data()
124
125 bb.note("Installing NATIVESDK packages")
126 self._populate_sysroot(self.host_pm, self.host_manifest)
127
128 execute_pre_post_process(self.d, self.d.getVar("POPULATE_SDK_POST_HOST_COMMAND", True))
129
130 self.host_pm.remove_packaging_data()
131
132 # Move host RPM library data
133 native_rpm_state_dir = os.path.join(self.sdk_output,
134 self.sdk_native_path,
135 self.d.getVar('localstatedir_nativesdk', True).strip('/'),
136 "lib",
137 "rpm"
138 )
139 bb.utils.mkdirhier(native_rpm_state_dir)
140 for f in glob.glob(os.path.join(self.sdk_output,
141 "var",
142 "lib",
143 "rpm",
144 "*")):
145 bb.utils.movefile(f, native_rpm_state_dir)
146
147 bb.utils.remove(os.path.join(self.sdk_output, "var"), True)
148
149 # Move host sysconfig data
150 native_sysconf_dir = os.path.join(self.sdk_output,
151 self.sdk_native_path,
152 self.d.getVar('sysconfdir',
153 True).strip('/'),
154 )
155 bb.utils.mkdirhier(native_sysconf_dir)
156 for f in glob.glob(os.path.join(self.sdk_output, "etc", "*")):
157 bb.utils.movefile(f, native_sysconf_dir)
158 bb.utils.remove(os.path.join(self.sdk_output, "etc"), True)
159
160
161class OpkgSdk(Sdk):
162 def __init__(self, d, manifest_dir=None):
163 super(OpkgSdk, self).__init__(d, manifest_dir)
164
165 self.target_conf = self.d.getVar("IPKGCONF_TARGET", True)
166 self.host_conf = self.d.getVar("IPKGCONF_SDK", True)
167
168 self.target_manifest = OpkgManifest(d, self.manifest_dir,
169 Manifest.MANIFEST_TYPE_SDK_TARGET)
170 self.host_manifest = OpkgManifest(d, self.manifest_dir,
171 Manifest.MANIFEST_TYPE_SDK_HOST)
172
173 self.target_pm = OpkgPM(d, self.sdk_target_sysroot, self.target_conf,
174 self.d.getVar("ALL_MULTILIB_PACKAGE_ARCHS", True))
175
176 self.host_pm = OpkgPM(d, self.sdk_host_sysroot, self.host_conf,
177 self.d.getVar("SDK_PACKAGE_ARCHS", True))
178
179 def _populate_sysroot(self, pm, manifest):
180 pkgs_to_install = manifest.parse_initial_manifest()
181
182 if (self.d.getVar('BUILD_IMAGES_FROM_FEEDS', True) or "") != "1":
183 pm.write_index()
184
185 pm.update()
186
187 for pkg_type in self.install_order:
188 if pkg_type in pkgs_to_install:
189 pm.install(pkgs_to_install[pkg_type],
190 [False, True][pkg_type == Manifest.PKG_TYPE_ATTEMPT_ONLY])
191
192 def _populate(self):
193 bb.note("Installing TARGET packages")
194 self._populate_sysroot(self.target_pm, self.target_manifest)
195
196 self.target_pm.install_complementary(self.d.getVar('SDKIMAGE_INSTALL_COMPLEMENTARY', True))
197
198 execute_pre_post_process(self.d, self.d.getVar("POPULATE_SDK_POST_TARGET_COMMAND", True))
199
200 bb.note("Installing NATIVESDK packages")
201 self._populate_sysroot(self.host_pm, self.host_manifest)
202
203 execute_pre_post_process(self.d, self.d.getVar("POPULATE_SDK_POST_HOST_COMMAND", True))
204
205 target_sysconfdir = os.path.join(self.sdk_target_sysroot, self.sysconfdir)
206 host_sysconfdir = os.path.join(self.sdk_host_sysroot, self.sysconfdir)
207
208 bb.utils.mkdirhier(target_sysconfdir)
209 shutil.copy(self.target_conf, target_sysconfdir)
210 os.chmod(os.path.join(target_sysconfdir,
211 os.path.basename(self.target_conf)), 0644)
212
213 bb.utils.mkdirhier(host_sysconfdir)
214 shutil.copy(self.host_conf, host_sysconfdir)
215 os.chmod(os.path.join(host_sysconfdir,
216 os.path.basename(self.host_conf)), 0644)
217
218 native_opkg_state_dir = os.path.join(self.sdk_output, self.sdk_native_path,
219 self.d.getVar('localstatedir_nativesdk', True).strip('/'),
220 "lib", "opkg")
221 bb.utils.mkdirhier(native_opkg_state_dir)
222 for f in glob.glob(os.path.join(self.sdk_output, "var", "lib", "opkg", "*")):
223 bb.utils.movefile(f, native_opkg_state_dir)
224
225 bb.utils.remove(os.path.join(self.sdk_output, "var"), True)
226
227
228class DpkgSdk(Sdk):
229 def __init__(self, d, manifest_dir=None):
230 super(DpkgSdk, self).__init__(d, manifest_dir)
231
232 self.target_conf_dir = os.path.join(self.d.getVar("APTCONF_TARGET", True), "apt")
233 self.host_conf_dir = os.path.join(self.d.getVar("APTCONF_TARGET", True), "apt-sdk")
234
235 self.target_manifest = DpkgManifest(d, self.manifest_dir,
236 Manifest.MANIFEST_TYPE_SDK_TARGET)
237 self.host_manifest = DpkgManifest(d, self.manifest_dir,
238 Manifest.MANIFEST_TYPE_SDK_HOST)
239
240 self.target_pm = DpkgPM(d, self.sdk_target_sysroot,
241 self.d.getVar("PACKAGE_ARCHS", True),
242 self.d.getVar("DPKG_ARCH", True),
243 self.target_conf_dir)
244
245 self.host_pm = DpkgPM(d, self.sdk_host_sysroot,
246 self.d.getVar("SDK_PACKAGE_ARCHS", True),
247 self.d.getVar("DEB_SDK_ARCH", True),
248 self.host_conf_dir)
249
250 def _copy_apt_dir_to(self, dst_dir):
251 staging_etcdir_native = self.d.getVar("STAGING_ETCDIR_NATIVE", True)
252
253 bb.utils.remove(dst_dir, True)
254
255 shutil.copytree(os.path.join(staging_etcdir_native, "apt"), dst_dir)
256
257 def _populate_sysroot(self, pm, manifest):
258 pkgs_to_install = manifest.parse_initial_manifest()
259
260 pm.write_index()
261 pm.update()
262
263 for pkg_type in self.install_order:
264 if pkg_type in pkgs_to_install:
265 pm.install(pkgs_to_install[pkg_type],
266 [False, True][pkg_type == Manifest.PKG_TYPE_ATTEMPT_ONLY])
267
268 def _populate(self):
269 bb.note("Installing TARGET packages")
270 self._populate_sysroot(self.target_pm, self.target_manifest)
271
272 execute_pre_post_process(self.d, self.d.getVar("POPULATE_SDK_POST_TARGET_COMMAND", True))
273
274 self._copy_apt_dir_to(os.path.join(self.sdk_target_sysroot, "etc", "apt"))
275
276 bb.note("Installing NATIVESDK packages")
277 self._populate_sysroot(self.host_pm, self.host_manifest)
278
279 execute_pre_post_process(self.d, self.d.getVar("POPULATE_SDK_POST_HOST_COMMAND", True))
280
281 self._copy_apt_dir_to(os.path.join(self.sdk_output, self.sdk_native_path,
282 "etc", "apt"))
283
284 native_dpkg_state_dir = os.path.join(self.sdk_output, self.sdk_native_path,
285 "var", "lib", "dpkg")
286 bb.utils.mkdirhier(native_dpkg_state_dir)
287 for f in glob.glob(os.path.join(self.sdk_output, "var", "lib", "dpkg", "*")):
288 bb.utils.movefile(f, native_dpkg_state_dir)
289
290 bb.utils.remove(os.path.join(self.sdk_output, "var"), True)
291
292
293def sdk_list_installed_packages(d, target, format=None, rootfs_dir=None):
294 if rootfs_dir is None:
295 sdk_output = d.getVar('SDK_OUTPUT', True)
296 target_path = d.getVar('SDKTARGETSYSROOT', True).strip('/')
297
298 rootfs_dir = [sdk_output, os.path.join(sdk_output, target_path)][target is True]
299
300 img_type = d.getVar('IMAGE_PKGTYPE', True)
301 if img_type == "rpm":
302 arch_var = ["SDK_PACKAGE_ARCHS", None][target is True]
303 os_var = ["SDK_OS", None][target is True]
304 return RpmPkgsList(d, rootfs_dir, arch_var, os_var).list(format)
305 elif img_type == "ipk":
306 conf_file_var = ["IPKGCONF_SDK", "IPKGCONF_TARGET"][target is True]
307 return OpkgPkgsList(d, rootfs_dir, d.getVar(conf_file_var, True)).list(format)
308 elif img_type == "deb":
309 return DpkgPkgsList(d, rootfs_dir).list(format)
310
311def populate_sdk(d, manifest_dir=None):
312 env_bkp = os.environ.copy()
313
314 img_type = d.getVar('IMAGE_PKGTYPE', True)
315 if img_type == "rpm":
316 RpmSdk(d, manifest_dir).populate()
317 elif img_type == "ipk":
318 OpkgSdk(d, manifest_dir).populate()
319 elif img_type == "deb":
320 DpkgSdk(d, manifest_dir).populate()
321
322 os.environ.clear()
323 os.environ.update(env_bkp)
324
325if __name__ == "__main__":
326 pass
diff --git a/meta/lib/oe/sstatesig.py b/meta/lib/oe/sstatesig.py
new file mode 100644
index 0000000000..af7617ee61
--- /dev/null
+++ b/meta/lib/oe/sstatesig.py
@@ -0,0 +1,276 @@
1import bb.siggen
2
3def sstate_rundepfilter(siggen, fn, recipename, task, dep, depname, dataCache):
4 # Return True if we should keep the dependency, False to drop it
5 def isNative(x):
6 return x.endswith("-native")
7 def isCross(x):
8 return "-cross-" in x
9 def isNativeSDK(x):
10 return x.startswith("nativesdk-")
11 def isKernel(fn):
12 inherits = " ".join(dataCache.inherits[fn])
13 return inherits.find("/module-base.bbclass") != -1 or inherits.find("/linux-kernel-base.bbclass") != -1
14 def isPackageGroup(fn):
15 inherits = " ".join(dataCache.inherits[fn])
16 return "/packagegroup.bbclass" in inherits
17 def isAllArch(fn):
18 inherits = " ".join(dataCache.inherits[fn])
19 return "/allarch.bbclass" in inherits
20 def isImage(fn):
21 return "/image.bbclass" in " ".join(dataCache.inherits[fn])
22
23 # Always include our own inter-task dependencies
24 if recipename == depname:
25 return True
26
27 # Quilt (patch application) changing isn't likely to affect anything
28 excludelist = ['quilt-native', 'subversion-native', 'git-native']
29 if depname in excludelist and recipename != depname:
30 return False
31
32 # Exclude well defined recipe->dependency
33 if "%s->%s" % (recipename, depname) in siggen.saferecipedeps:
34 return False
35
36 # Don't change native/cross/nativesdk recipe dependencies any further
37 if isNative(recipename) or isCross(recipename) or isNativeSDK(recipename):
38 return True
39
40 # Only target packages beyond here
41
42 # allarch packagegroups are assumed to have well behaved names which don't change between architecures/tunes
43 if isPackageGroup(fn) and isAllArch(fn):
44 return False
45
46 # Exclude well defined machine specific configurations which don't change ABI
47 if depname in siggen.abisaferecipes and not isImage(fn):
48 return False
49
50 # Kernel modules are well namespaced. We don't want to depend on the kernel's checksum
51 # if we're just doing an RRECOMMENDS_xxx = "kernel-module-*", not least because the checksum
52 # is machine specific.
53 # Therefore if we're not a kernel or a module recipe (inheriting the kernel classes)
54 # and we reccomend a kernel-module, we exclude the dependency.
55 depfn = dep.rsplit(".", 1)[0]
56 if dataCache and isKernel(depfn) and not isKernel(fn):
57 for pkg in dataCache.runrecs[fn]:
58 if " ".join(dataCache.runrecs[fn][pkg]).find("kernel-module-") != -1:
59 return False
60
61 # Default to keep dependencies
62 return True
63
64def sstate_lockedsigs(d):
65 sigs = {}
66 types = (d.getVar("SIGGEN_LOCKEDSIGS_TYPES", True) or "").split()
67 for t in types:
68 lockedsigs = (d.getVar("SIGGEN_LOCKEDSIGS_%s" % t, True) or "").split()
69 for ls in lockedsigs:
70 pn, task, h = ls.split(":", 2)
71 if pn not in sigs:
72 sigs[pn] = {}
73 sigs[pn][task] = h
74 return sigs
75
76class SignatureGeneratorOEBasic(bb.siggen.SignatureGeneratorBasic):
77 name = "OEBasic"
78 def init_rundepcheck(self, data):
79 self.abisaferecipes = (data.getVar("SIGGEN_EXCLUDERECIPES_ABISAFE", True) or "").split()
80 self.saferecipedeps = (data.getVar("SIGGEN_EXCLUDE_SAFE_RECIPE_DEPS", True) or "").split()
81 pass
82 def rundep_check(self, fn, recipename, task, dep, depname, dataCache = None):
83 return sstate_rundepfilter(self, fn, recipename, task, dep, depname, dataCache)
84
85class SignatureGeneratorOEBasicHash(bb.siggen.SignatureGeneratorBasicHash):
86 name = "OEBasicHash"
87 def init_rundepcheck(self, data):
88 self.abisaferecipes = (data.getVar("SIGGEN_EXCLUDERECIPES_ABISAFE", True) or "").split()
89 self.saferecipedeps = (data.getVar("SIGGEN_EXCLUDE_SAFE_RECIPE_DEPS", True) or "").split()
90 self.lockedsigs = sstate_lockedsigs(data)
91 self.lockedhashes = {}
92 self.lockedpnmap = {}
93 self.lockedhashfn = {}
94 self.machine = data.getVar("MACHINE", True)
95 self.mismatch_msgs = []
96 pass
97 def rundep_check(self, fn, recipename, task, dep, depname, dataCache = None):
98 return sstate_rundepfilter(self, fn, recipename, task, dep, depname, dataCache)
99
100 def get_taskdata(self):
101 data = super(bb.siggen.SignatureGeneratorBasicHash, self).get_taskdata()
102 return (data, self.lockedpnmap, self.lockedhashfn)
103
104 def set_taskdata(self, data):
105 coredata, self.lockedpnmap, self.lockedhashfn = data
106 super(bb.siggen.SignatureGeneratorBasicHash, self).set_taskdata(coredata)
107
108 def dump_sigs(self, dataCache, options):
109 self.dump_lockedsigs()
110 return super(bb.siggen.SignatureGeneratorBasicHash, self).dump_sigs(dataCache, options)
111
112 def get_taskhash(self, fn, task, deps, dataCache):
113 h = super(bb.siggen.SignatureGeneratorBasicHash, self).get_taskhash(fn, task, deps, dataCache)
114
115 recipename = dataCache.pkg_fn[fn]
116 self.lockedpnmap[fn] = recipename
117 self.lockedhashfn[fn] = dataCache.hashfn[fn]
118 if recipename in self.lockedsigs:
119 if task in self.lockedsigs[recipename]:
120 k = fn + "." + task
121 h_locked = self.lockedsigs[recipename][task]
122 self.lockedhashes[k] = h_locked
123 self.taskhash[k] = h_locked
124 #bb.warn("Using %s %s %s" % (recipename, task, h))
125
126 if h != h_locked:
127 self.mismatch_msgs.append('The %s:%s sig (%s) changed, use locked sig %s to instead'
128 % (recipename, task, h, h_locked))
129
130 return h_locked
131 #bb.warn("%s %s %s" % (recipename, task, h))
132 return h
133
134 def dump_sigtask(self, fn, task, stampbase, runtime):
135 k = fn + "." + task
136 if k in self.lockedhashes:
137 return
138 super(bb.siggen.SignatureGeneratorBasicHash, self).dump_sigtask(fn, task, stampbase, runtime)
139
140 def dump_lockedsigs(self, sigfile=None):
141 if not sigfile:
142 sigfile = os.getcwd() + "/locked-sigs.inc"
143
144 bb.plain("Writing locked sigs to %s" % sigfile)
145 types = {}
146 for k in self.runtaskdeps:
147 fn = k.rsplit(".",1)[0]
148 t = self.lockedhashfn[fn].split(" ")[1].split(":")[5]
149 t = 't-' + t.replace('_', '-')
150 if t not in types:
151 types[t] = []
152 types[t].append(k)
153
154 with open(sigfile, "w") as f:
155 for t in types:
156 f.write('SIGGEN_LOCKEDSIGS_%s = "\\\n' % t)
157 types[t].sort()
158 sortedk = sorted(types[t], key=lambda k: self.lockedpnmap[k.rsplit(".",1)[0]])
159 for k in sortedk:
160 fn = k.rsplit(".",1)[0]
161 task = k.rsplit(".",1)[1]
162 if k not in self.taskhash:
163 continue
164 f.write(" " + self.lockedpnmap[fn] + ":" + task + ":" + self.taskhash[k] + " \\\n")
165 f.write(' "\n')
166 f.write('SIGGEN_LOCKEDSIGS_TYPES_%s = "%s"' % (self.machine, " ".join(types.keys())))
167
168 def checkhashes(self, missed, ret, sq_fn, sq_task, sq_hash, sq_hashfn, d):
169 checklevel = d.getVar("SIGGEN_LOCKEDSIGS_CHECK_LEVEL", True)
170 for task in range(len(sq_fn)):
171 if task not in ret:
172 for pn in self.lockedsigs:
173 if sq_hash[task] in self.lockedsigs[pn].itervalues():
174 self.mismatch_msgs.append("Locked sig is set for %s:%s (%s) yet not in sstate cache?"
175 % (pn, sq_task[task], sq_hash[task]))
176
177 if self.mismatch_msgs and checklevel == 'warn':
178 bb.warn("\n".join(self.mismatch_msgs))
179 elif self.mismatch_msgs and checklevel == 'error':
180 bb.fatal("\n".join(self.mismatch_msgs))
181
182
183# Insert these classes into siggen's namespace so it can see and select them
184bb.siggen.SignatureGeneratorOEBasic = SignatureGeneratorOEBasic
185bb.siggen.SignatureGeneratorOEBasicHash = SignatureGeneratorOEBasicHash
186
187
188def find_siginfo(pn, taskname, taskhashlist, d):
189 """ Find signature data files for comparison purposes """
190
191 import fnmatch
192 import glob
193
194 if taskhashlist:
195 hashfiles = {}
196
197 if not taskname:
198 # We have to derive pn and taskname
199 key = pn
200 splitit = key.split('.bb.')
201 taskname = splitit[1]
202 pn = os.path.basename(splitit[0]).split('_')[0]
203 if key.startswith('virtual:native:'):
204 pn = pn + '-native'
205
206 if taskname in ['do_fetch', 'do_unpack', 'do_patch', 'do_populate_lic']:
207 pn.replace("-native", "")
208
209 filedates = {}
210
211 # First search in stamps dir
212 localdata = d.createCopy()
213 localdata.setVar('MULTIMACH_TARGET_SYS', '*')
214 localdata.setVar('PN', pn)
215 localdata.setVar('PV', '*')
216 localdata.setVar('PR', '*')
217 localdata.setVar('EXTENDPE', '')
218 stamp = localdata.getVar('STAMP', True)
219 filespec = '%s.%s.sigdata.*' % (stamp, taskname)
220 foundall = False
221 import glob
222 for fullpath in glob.glob(filespec):
223 match = False
224 if taskhashlist:
225 for taskhash in taskhashlist:
226 if fullpath.endswith('.%s' % taskhash):
227 hashfiles[taskhash] = fullpath
228 if len(hashfiles) == len(taskhashlist):
229 foundall = True
230 break
231 else:
232 try:
233 filedates[fullpath] = os.stat(fullpath).st_mtime
234 except OSError:
235 continue
236
237 if not taskhashlist or (len(filedates) < 2 and not foundall):
238 # That didn't work, look in sstate-cache
239 hashes = taskhashlist or ['*']
240 localdata = bb.data.createCopy(d)
241 for hashval in hashes:
242 localdata.setVar('PACKAGE_ARCH', '*')
243 localdata.setVar('TARGET_VENDOR', '*')
244 localdata.setVar('TARGET_OS', '*')
245 localdata.setVar('PN', pn)
246 localdata.setVar('PV', '*')
247 localdata.setVar('PR', '*')
248 localdata.setVar('BB_TASKHASH', hashval)
249 if pn.endswith('-native') or "-cross-" in pn or "-crosssdk-" in pn:
250 localdata.setVar('SSTATE_EXTRAPATH', "${NATIVELSBSTRING}/")
251 sstatename = taskname[3:]
252 filespec = '%s_%s.*.siginfo' % (localdata.getVar('SSTATE_PKG', True), sstatename)
253
254 if hashval != '*':
255 sstatedir = "%s/%s" % (d.getVar('SSTATE_DIR', True), hashval[:2])
256 else:
257 sstatedir = d.getVar('SSTATE_DIR', True)
258
259 for root, dirs, files in os.walk(sstatedir):
260 for fn in files:
261 fullpath = os.path.join(root, fn)
262 if fnmatch.fnmatch(fullpath, filespec):
263 if taskhashlist:
264 hashfiles[hashval] = fullpath
265 else:
266 try:
267 filedates[fullpath] = os.stat(fullpath).st_mtime
268 except:
269 continue
270
271 if taskhashlist:
272 return hashfiles
273 else:
274 return filedates
275
276bb.siggen.find_siginfo = find_siginfo
diff --git a/meta/lib/oe/terminal.py b/meta/lib/oe/terminal.py
new file mode 100644
index 0000000000..0a623c75b1
--- /dev/null
+++ b/meta/lib/oe/terminal.py
@@ -0,0 +1,208 @@
1import logging
2import oe.classutils
3import shlex
4from bb.process import Popen, ExecutionError
5
6logger = logging.getLogger('BitBake.OE.Terminal')
7
8
9class UnsupportedTerminal(Exception):
10 pass
11
12class NoSupportedTerminals(Exception):
13 pass
14
15
16class Registry(oe.classutils.ClassRegistry):
17 command = None
18
19 def __init__(cls, name, bases, attrs):
20 super(Registry, cls).__init__(name.lower(), bases, attrs)
21
22 @property
23 def implemented(cls):
24 return bool(cls.command)
25
26
27class Terminal(Popen):
28 __metaclass__ = Registry
29
30 def __init__(self, sh_cmd, title=None, env=None, d=None):
31 fmt_sh_cmd = self.format_command(sh_cmd, title)
32 try:
33 Popen.__init__(self, fmt_sh_cmd, env=env)
34 except OSError as exc:
35 import errno
36 if exc.errno == errno.ENOENT:
37 raise UnsupportedTerminal(self.name)
38 else:
39 raise
40
41 def format_command(self, sh_cmd, title):
42 fmt = {'title': title or 'Terminal', 'command': sh_cmd}
43 if isinstance(self.command, basestring):
44 return shlex.split(self.command.format(**fmt))
45 else:
46 return [element.format(**fmt) for element in self.command]
47
48class XTerminal(Terminal):
49 def __init__(self, sh_cmd, title=None, env=None, d=None):
50 Terminal.__init__(self, sh_cmd, title, env, d)
51 if not os.environ.get('DISPLAY'):
52 raise UnsupportedTerminal(self.name)
53
54class Gnome(XTerminal):
55 command = 'gnome-terminal -t "{title}" -x {command}'
56 priority = 2
57
58class Mate(XTerminal):
59 command = 'mate-terminal -t "{title}" -x {command}'
60 priority = 2
61
62class Xfce(XTerminal):
63 command = 'xfce4-terminal -T "{title}" -e "{command}"'
64 priority = 2
65
66class Konsole(XTerminal):
67 command = 'konsole -T "{title}" -e {command}'
68 priority = 2
69
70 def __init__(self, sh_cmd, title=None, env=None, d=None):
71 # Check version
72 vernum = check_konsole_version("konsole")
73 if vernum:
74 if vernum.split('.')[0] == "2":
75 logger.debug(1, 'Konsole from KDE 4.x will not work as devshell, skipping')
76 raise UnsupportedTerminal(self.name)
77 XTerminal.__init__(self, sh_cmd, title, env, d)
78
79class XTerm(XTerminal):
80 command = 'xterm -T "{title}" -e {command}'
81 priority = 1
82
83class Rxvt(XTerminal):
84 command = 'rxvt -T "{title}" -e {command}'
85 priority = 1
86
87class Screen(Terminal):
88 command = 'screen -D -m -t "{title}" -S devshell {command}'
89
90 def __init__(self, sh_cmd, title=None, env=None, d=None):
91 s_id = "devshell_%i" % os.getpid()
92 self.command = "screen -D -m -t \"{title}\" -S %s {command}" % s_id
93 Terminal.__init__(self, sh_cmd, title, env, d)
94 msg = 'Screen started. Please connect in another terminal with ' \
95 '"screen -r %s"' % s_id
96 if (d):
97 bb.event.fire(bb.event.LogExecTTY(msg, "screen -r %s" % s_id,
98 0.5, 10), d)
99 else:
100 logger.warn(msg)
101
102class TmuxRunning(Terminal):
103 """Open a new pane in the current running tmux window"""
104 name = 'tmux-running'
105 command = 'tmux split-window "{command}"'
106 priority = 2.75
107
108 def __init__(self, sh_cmd, title=None, env=None, d=None):
109 if not bb.utils.which(os.getenv('PATH'), 'tmux'):
110 raise UnsupportedTerminal('tmux is not installed')
111
112 if not os.getenv('TMUX'):
113 raise UnsupportedTerminal('tmux is not running')
114
115 Terminal.__init__(self, sh_cmd, title, env, d)
116
117class Tmux(Terminal):
118 """Start a new tmux session and window"""
119 command = 'tmux new -d -s devshell -n devshell "{command}"'
120 priority = 0.75
121
122 def __init__(self, sh_cmd, title=None, env=None, d=None):
123 if not bb.utils.which(os.getenv('PATH'), 'tmux'):
124 raise UnsupportedTerminal('tmux is not installed')
125
126 # TODO: consider using a 'devshell' session shared amongst all
127 # devshells, if it's already there, add a new window to it.
128 window_name = 'devshell-%i' % os.getpid()
129
130 self.command = 'tmux new -d -s {0} -n {0} "{{command}}"'.format(window_name)
131 Terminal.__init__(self, sh_cmd, title, env, d)
132
133 attach_cmd = 'tmux att -t {0}'.format(window_name)
134 msg = 'Tmux started. Please connect in another terminal with `tmux att -t {0}`'.format(window_name)
135 if d:
136 bb.event.fire(bb.event.LogExecTTY(msg, attach_cmd, 0.5, 10), d)
137 else:
138 logger.warn(msg)
139
140class Custom(Terminal):
141 command = 'false' # This is a placeholder
142 priority = 3
143
144 def __init__(self, sh_cmd, title=None, env=None, d=None):
145 self.command = d and d.getVar('OE_TERMINAL_CUSTOMCMD', True)
146 if self.command:
147 if not '{command}' in self.command:
148 self.command += ' {command}'
149 Terminal.__init__(self, sh_cmd, title, env, d)
150 logger.warn('Custom terminal was started.')
151 else:
152 logger.debug(1, 'No custom terminal (OE_TERMINAL_CUSTOMCMD) set')
153 raise UnsupportedTerminal('OE_TERMINAL_CUSTOMCMD not set')
154
155
156def prioritized():
157 return Registry.prioritized()
158
159def spawn_preferred(sh_cmd, title=None, env=None, d=None):
160 """Spawn the first supported terminal, by priority"""
161 for terminal in prioritized():
162 try:
163 spawn(terminal.name, sh_cmd, title, env, d)
164 break
165 except UnsupportedTerminal:
166 continue
167 else:
168 raise NoSupportedTerminals()
169
170def spawn(name, sh_cmd, title=None, env=None, d=None):
171 """Spawn the specified terminal, by name"""
172 logger.debug(1, 'Attempting to spawn terminal "%s"', name)
173 try:
174 terminal = Registry.registry[name]
175 except KeyError:
176 raise UnsupportedTerminal(name)
177
178 pipe = terminal(sh_cmd, title, env, d)
179 output = pipe.communicate()[0]
180 if pipe.returncode != 0:
181 raise ExecutionError(sh_cmd, pipe.returncode, output)
182
183def check_konsole_version(konsole):
184 import subprocess as sub
185 try:
186 p = sub.Popen(['sh', '-c', '%s --version' % konsole],stdout=sub.PIPE,stderr=sub.PIPE)
187 out, err = p.communicate()
188 ver_info = out.rstrip().split('\n')
189 except OSError as exc:
190 import errno
191 if exc.errno == errno.ENOENT:
192 return None
193 else:
194 raise
195 vernum = None
196 for ver in ver_info:
197 if ver.startswith('Konsole'):
198 vernum = ver.split(' ')[-1]
199 return vernum
200
201def distro_name():
202 try:
203 p = Popen(['lsb_release', '-i'])
204 out, err = p.communicate()
205 distro = out.split(':')[1].strip().lower()
206 except:
207 distro = "unknown"
208 return distro
diff --git a/meta/lib/oe/tests/__init__.py b/meta/lib/oe/tests/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/meta/lib/oe/tests/__init__.py
diff --git a/meta/lib/oe/tests/test_license.py b/meta/lib/oe/tests/test_license.py
new file mode 100644
index 0000000000..c388886184
--- /dev/null
+++ b/meta/lib/oe/tests/test_license.py
@@ -0,0 +1,68 @@
1import unittest
2import oe.license
3
4class SeenVisitor(oe.license.LicenseVisitor):
5 def __init__(self):
6 self.seen = []
7 oe.license.LicenseVisitor.__init__(self)
8
9 def visit_Str(self, node):
10 self.seen.append(node.s)
11
12class TestSingleLicense(unittest.TestCase):
13 licenses = [
14 "GPLv2",
15 "LGPL-2.0",
16 "Artistic",
17 "MIT",
18 "GPLv3+",
19 "FOO_BAR",
20 ]
21 invalid_licenses = ["GPL/BSD"]
22
23 @staticmethod
24 def parse(licensestr):
25 visitor = SeenVisitor()
26 visitor.visit_string(licensestr)
27 return visitor.seen
28
29 def test_single_licenses(self):
30 for license in self.licenses:
31 licenses = self.parse(license)
32 self.assertListEqual(licenses, [license])
33
34 def test_invalid_licenses(self):
35 for license in self.invalid_licenses:
36 with self.assertRaises(oe.license.InvalidLicense) as cm:
37 self.parse(license)
38 self.assertEqual(cm.exception.license, license)
39
40class TestSimpleCombinations(unittest.TestCase):
41 tests = {
42 "FOO&BAR": ["FOO", "BAR"],
43 "BAZ & MOO": ["BAZ", "MOO"],
44 "ALPHA|BETA": ["ALPHA"],
45 "BAZ&MOO|FOO": ["FOO"],
46 "FOO&BAR|BAZ": ["FOO", "BAR"],
47 }
48 preferred = ["ALPHA", "FOO", "BAR"]
49
50 def test_tests(self):
51 def choose(a, b):
52 if all(lic in self.preferred for lic in b):
53 return b
54 else:
55 return a
56
57 for license, expected in self.tests.items():
58 licenses = oe.license.flattened_licenses(license, choose)
59 self.assertListEqual(licenses, expected)
60
61class TestComplexCombinations(TestSimpleCombinations):
62 tests = {
63 "FOO & (BAR | BAZ)&MOO": ["FOO", "BAR", "MOO"],
64 "(ALPHA|(BETA&THETA)|OMEGA)&DELTA": ["OMEGA", "DELTA"],
65 "((ALPHA|BETA)&FOO)|BAZ": ["BETA", "FOO"],
66 "(GPL-2.0|Proprietary)&BSD-4-clause&MIT": ["GPL-2.0", "BSD-4-clause", "MIT"],
67 }
68 preferred = ["BAR", "OMEGA", "BETA", "GPL-2.0"]
diff --git a/meta/lib/oe/tests/test_path.py b/meta/lib/oe/tests/test_path.py
new file mode 100644
index 0000000000..3d41ce157a
--- /dev/null
+++ b/meta/lib/oe/tests/test_path.py
@@ -0,0 +1,89 @@
1import unittest
2import oe, oe.path
3import tempfile
4import os
5import errno
6import shutil
7
8class TestRealPath(unittest.TestCase):
9 DIRS = [ "a", "b", "etc", "sbin", "usr", "usr/bin", "usr/binX", "usr/sbin", "usr/include", "usr/include/gdbm" ]
10 FILES = [ "etc/passwd", "b/file" ]
11 LINKS = [
12 ( "bin", "/usr/bin", "/usr/bin" ),
13 ( "binX", "usr/binX", "/usr/binX" ),
14 ( "c", "broken", "/broken" ),
15 ( "etc/passwd-1", "passwd", "/etc/passwd" ),
16 ( "etc/passwd-2", "passwd-1", "/etc/passwd" ),
17 ( "etc/passwd-3", "/etc/passwd-1", "/etc/passwd" ),
18 ( "etc/shadow-1", "/etc/shadow", "/etc/shadow" ),
19 ( "etc/shadow-2", "/etc/shadow-1", "/etc/shadow" ),
20 ( "prog-A", "bin/prog-A", "/usr/bin/prog-A" ),
21 ( "prog-B", "/bin/prog-B", "/usr/bin/prog-B" ),
22 ( "usr/bin/prog-C", "../../sbin/prog-C", "/sbin/prog-C" ),
23 ( "usr/bin/prog-D", "/sbin/prog-D", "/sbin/prog-D" ),
24 ( "usr/binX/prog-E", "../sbin/prog-E", None ),
25 ( "usr/bin/prog-F", "../../../sbin/prog-F", "/sbin/prog-F" ),
26 ( "loop", "a/loop", None ),
27 ( "a/loop", "../loop", None ),
28 ( "b/test", "file/foo", "/b/file/foo" ),
29 ]
30
31 LINKS_PHYS = [
32 ( "./", "/", "" ),
33 ( "binX/prog-E", "/usr/sbin/prog-E", "/sbin/prog-E" ),
34 ]
35
36 EXCEPTIONS = [
37 ( "loop", errno.ELOOP ),
38 ( "b/test", errno.ENOENT ),
39 ]
40
41 def __del__(self):
42 try:
43 #os.system("tree -F %s" % self.tmpdir)
44 shutil.rmtree(self.tmpdir)
45 except:
46 pass
47
48 def setUp(self):
49 self.tmpdir = tempfile.mkdtemp(prefix = "oe-test_path")
50 self.root = os.path.join(self.tmpdir, "R")
51
52 os.mkdir(os.path.join(self.tmpdir, "_real"))
53 os.symlink("_real", self.root)
54
55 for d in self.DIRS:
56 os.mkdir(os.path.join(self.root, d))
57 for f in self.FILES:
58 file(os.path.join(self.root, f), "w")
59 for l in self.LINKS:
60 os.symlink(l[1], os.path.join(self.root, l[0]))
61
62 def __realpath(self, file, use_physdir, assume_dir = True):
63 return oe.path.realpath(os.path.join(self.root, file), self.root,
64 use_physdir, assume_dir = assume_dir)
65
66 def test_norm(self):
67 for l in self.LINKS:
68 if l[2] == None:
69 continue
70
71 target_p = self.__realpath(l[0], True)
72 target_l = self.__realpath(l[0], False)
73
74 if l[2] != False:
75 self.assertEqual(target_p, target_l)
76 self.assertEqual(l[2], target_p[len(self.root):])
77
78 def test_phys(self):
79 for l in self.LINKS_PHYS:
80 target_p = self.__realpath(l[0], True)
81 target_l = self.__realpath(l[0], False)
82
83 self.assertEqual(l[1], target_p[len(self.root):])
84 self.assertEqual(l[2], target_l[len(self.root):])
85
86 def test_loop(self):
87 for e in self.EXCEPTIONS:
88 self.assertRaisesRegexp(OSError, r'\[Errno %u\]' % e[1],
89 self.__realpath, e[0], False, False)
diff --git a/meta/lib/oe/tests/test_types.py b/meta/lib/oe/tests/test_types.py
new file mode 100644
index 0000000000..367cc30e45
--- /dev/null
+++ b/meta/lib/oe/tests/test_types.py
@@ -0,0 +1,62 @@
1import unittest
2from oe.maketype import create, factory
3
4class TestTypes(unittest.TestCase):
5 def assertIsInstance(self, obj, cls):
6 return self.assertTrue(isinstance(obj, cls))
7
8 def assertIsNot(self, obj, other):
9 return self.assertFalse(obj is other)
10
11 def assertFactoryCreated(self, value, type, **flags):
12 cls = factory(type)
13 self.assertIsNot(cls, None)
14 self.assertIsInstance(create(value, type, **flags), cls)
15
16class TestBooleanType(TestTypes):
17 def test_invalid(self):
18 self.assertRaises(ValueError, create, '', 'boolean')
19 self.assertRaises(ValueError, create, 'foo', 'boolean')
20 self.assertRaises(TypeError, create, object(), 'boolean')
21
22 def test_true(self):
23 self.assertTrue(create('y', 'boolean'))
24 self.assertTrue(create('yes', 'boolean'))
25 self.assertTrue(create('1', 'boolean'))
26 self.assertTrue(create('t', 'boolean'))
27 self.assertTrue(create('true', 'boolean'))
28 self.assertTrue(create('TRUE', 'boolean'))
29 self.assertTrue(create('truE', 'boolean'))
30
31 def test_false(self):
32 self.assertFalse(create('n', 'boolean'))
33 self.assertFalse(create('no', 'boolean'))
34 self.assertFalse(create('0', 'boolean'))
35 self.assertFalse(create('f', 'boolean'))
36 self.assertFalse(create('false', 'boolean'))
37 self.assertFalse(create('FALSE', 'boolean'))
38 self.assertFalse(create('faLse', 'boolean'))
39
40 def test_bool_equality(self):
41 self.assertEqual(create('n', 'boolean'), False)
42 self.assertNotEqual(create('n', 'boolean'), True)
43 self.assertEqual(create('y', 'boolean'), True)
44 self.assertNotEqual(create('y', 'boolean'), False)
45
46class TestList(TestTypes):
47 def assertListEqual(self, value, valid, sep=None):
48 obj = create(value, 'list', separator=sep)
49 self.assertEqual(obj, valid)
50 if sep is not None:
51 self.assertEqual(obj.separator, sep)
52 self.assertEqual(str(obj), obj.separator.join(obj))
53
54 def test_list_nosep(self):
55 testlist = ['alpha', 'beta', 'theta']
56 self.assertListEqual('alpha beta theta', testlist)
57 self.assertListEqual('alpha beta\ttheta', testlist)
58 self.assertListEqual('alpha', ['alpha'])
59
60 def test_list_usersep(self):
61 self.assertListEqual('foo:bar', ['foo', 'bar'], ':')
62 self.assertListEqual('foo:bar:baz', ['foo', 'bar', 'baz'], ':')
diff --git a/meta/lib/oe/tests/test_utils.py b/meta/lib/oe/tests/test_utils.py
new file mode 100644
index 0000000000..5d9ac52e7d
--- /dev/null
+++ b/meta/lib/oe/tests/test_utils.py
@@ -0,0 +1,51 @@
1import unittest
2from oe.utils import packages_filter_out_system
3
4class TestPackagesFilterOutSystem(unittest.TestCase):
5 def test_filter(self):
6 """
7 Test that oe.utils.packages_filter_out_system works.
8 """
9 try:
10 import bb
11 except ImportError:
12 self.skipTest("Cannot import bb")
13
14 d = bb.data_smart.DataSmart()
15 d.setVar("PN", "foo")
16
17 d.setVar("PACKAGES", "foo foo-doc foo-dev")
18 pkgs = packages_filter_out_system(d)
19 self.assertEqual(pkgs, [])
20
21 d.setVar("PACKAGES", "foo foo-doc foo-data foo-dev")
22 pkgs = packages_filter_out_system(d)
23 self.assertEqual(pkgs, ["foo-data"])
24
25 d.setVar("PACKAGES", "foo foo-locale-en-gb")
26 pkgs = packages_filter_out_system(d)
27 self.assertEqual(pkgs, [])
28
29 d.setVar("PACKAGES", "foo foo-data foo-locale-en-gb")
30 pkgs = packages_filter_out_system(d)
31 self.assertEqual(pkgs, ["foo-data"])
32
33
34class TestTrimVersion(unittest.TestCase):
35 def test_version_exception(self):
36 with self.assertRaises(TypeError):
37 trim_version(None, 2)
38 with self.assertRaises(TypeError):
39 trim_version((1, 2, 3), 2)
40
41 def test_num_exception(self):
42 with self.assertRaises(ValueError):
43 trim_version("1.2.3", 0)
44 with self.assertRaises(ValueError):
45 trim_version("1.2.3", -1)
46
47 def test_valid(self):
48 self.assertEqual(trim_version("1.2.3", 1), "1")
49 self.assertEqual(trim_version("1.2.3", 2), "1.2")
50 self.assertEqual(trim_version("1.2.3", 3), "1.2.3")
51 self.assertEqual(trim_version("1.2.3", 4), "1.2.3")
diff --git a/meta/lib/oe/types.py b/meta/lib/oe/types.py
new file mode 100644
index 0000000000..7f47c17d0e
--- /dev/null
+++ b/meta/lib/oe/types.py
@@ -0,0 +1,153 @@
1import errno
2import re
3import os
4
5
6class OEList(list):
7 """OpenEmbedded 'list' type
8
9 Acts as an ordinary list, but is constructed from a string value and a
10 separator (optional), and re-joins itself when converted to a string with
11 str(). Set the variable type flag to 'list' to use this type, and the
12 'separator' flag may be specified (defaulting to whitespace)."""
13
14 name = "list"
15
16 def __init__(self, value, separator = None):
17 if value is not None:
18 list.__init__(self, value.split(separator))
19 else:
20 list.__init__(self)
21
22 if separator is None:
23 self.separator = " "
24 else:
25 self.separator = separator
26
27 def __str__(self):
28 return self.separator.join(self)
29
30def choice(value, choices):
31 """OpenEmbedded 'choice' type
32
33 Acts as a multiple choice for the user. To use this, set the variable
34 type flag to 'choice', and set the 'choices' flag to a space separated
35 list of valid values."""
36 if not isinstance(value, basestring):
37 raise TypeError("choice accepts a string, not '%s'" % type(value))
38
39 value = value.lower()
40 choices = choices.lower()
41 if value not in choices.split():
42 raise ValueError("Invalid choice '%s'. Valid choices: %s" %
43 (value, choices))
44 return value
45
46class NoMatch(object):
47 """Stub python regex pattern object which never matches anything"""
48 def findall(self, string, flags=0):
49 return None
50
51 def finditer(self, string, flags=0):
52 return None
53
54 def match(self, flags=0):
55 return None
56
57 def search(self, string, flags=0):
58 return None
59
60 def split(self, string, maxsplit=0):
61 return None
62
63 def sub(pattern, repl, string, count=0):
64 return None
65
66 def subn(pattern, repl, string, count=0):
67 return None
68
69NoMatch = NoMatch()
70
71def regex(value, regexflags=None):
72 """OpenEmbedded 'regex' type
73
74 Acts as a regular expression, returning the pre-compiled regular
75 expression pattern object. To use this type, set the variable type flag
76 to 'regex', and optionally, set the 'regexflags' type to a space separated
77 list of the flags to control the regular expression matching (e.g.
78 FOO[regexflags] += 'ignorecase'). See the python documentation on the
79 're' module for a list of valid flags."""
80
81 flagval = 0
82 if regexflags:
83 for flag in regexflags.split():
84 flag = flag.upper()
85 try:
86 flagval |= getattr(re, flag)
87 except AttributeError:
88 raise ValueError("Invalid regex flag '%s'" % flag)
89
90 if not value:
91 # Let's ensure that the default behavior for an undefined or empty
92 # variable is to match nothing. If the user explicitly wants to match
93 # anything, they can match '.*' instead.
94 return NoMatch
95
96 try:
97 return re.compile(value, flagval)
98 except re.error as exc:
99 raise ValueError("Invalid regex value '%s': %s" %
100 (value, exc.args[0]))
101
102def boolean(value):
103 """OpenEmbedded 'boolean' type
104
105 Valid values for true: 'yes', 'y', 'true', 't', '1'
106 Valid values for false: 'no', 'n', 'false', 'f', '0'
107 """
108
109 if not isinstance(value, basestring):
110 raise TypeError("boolean accepts a string, not '%s'" % type(value))
111
112 value = value.lower()
113 if value in ('yes', 'y', 'true', 't', '1'):
114 return True
115 elif value in ('no', 'n', 'false', 'f', '0'):
116 return False
117 raise ValueError("Invalid boolean value '%s'" % value)
118
119def integer(value, numberbase=10):
120 """OpenEmbedded 'integer' type
121
122 Defaults to base 10, but this can be specified using the optional
123 'numberbase' flag."""
124
125 return int(value, int(numberbase))
126
127_float = float
128def float(value, fromhex='false'):
129 """OpenEmbedded floating point type
130
131 To use this type, set the type flag to 'float', and optionally set the
132 'fromhex' flag to a true value (obeying the same rules as for the
133 'boolean' type) if the value is in base 16 rather than base 10."""
134
135 if boolean(fromhex):
136 return _float.fromhex(value)
137 else:
138 return _float(value)
139
140def path(value, relativeto='', normalize='true', mustexist='false'):
141 value = os.path.join(relativeto, value)
142
143 if boolean(normalize):
144 value = os.path.normpath(value)
145
146 if boolean(mustexist):
147 try:
148 open(value, 'r')
149 except IOError as exc:
150 if exc.errno == errno.ENOENT:
151 raise ValueError("{0}: {1}".format(value, os.strerror(errno.ENOENT)))
152
153 return value
diff --git a/meta/lib/oe/utils.py b/meta/lib/oe/utils.py
new file mode 100644
index 0000000000..35442568e2
--- /dev/null
+++ b/meta/lib/oe/utils.py
@@ -0,0 +1,182 @@
1try:
2 # Python 2
3 import commands as cmdstatus
4except ImportError:
5 # Python 3
6 import subprocess as cmdstatus
7
8def read_file(filename):
9 try:
10 f = open( filename, "r" )
11 except IOError as reason:
12 return "" # WARNING: can't raise an error now because of the new RDEPENDS handling. This is a bit ugly. :M:
13 else:
14 data = f.read().strip()
15 f.close()
16 return data
17 return None
18
19def ifelse(condition, iftrue = True, iffalse = False):
20 if condition:
21 return iftrue
22 else:
23 return iffalse
24
25def conditional(variable, checkvalue, truevalue, falsevalue, d):
26 if d.getVar(variable,1) == checkvalue:
27 return truevalue
28 else:
29 return falsevalue
30
31def less_or_equal(variable, checkvalue, truevalue, falsevalue, d):
32 if float(d.getVar(variable,1)) <= float(checkvalue):
33 return truevalue
34 else:
35 return falsevalue
36
37def version_less_or_equal(variable, checkvalue, truevalue, falsevalue, d):
38 result = bb.utils.vercmp_string(d.getVar(variable,True), checkvalue)
39 if result <= 0:
40 return truevalue
41 else:
42 return falsevalue
43
44def both_contain(variable1, variable2, checkvalue, d):
45 if d.getVar(variable1,1).find(checkvalue) != -1 and d.getVar(variable2,1).find(checkvalue) != -1:
46 return checkvalue
47 else:
48 return ""
49
50def prune_suffix(var, suffixes, d):
51 # See if var ends with any of the suffixes listed and
52 # remove it if found
53 for suffix in suffixes:
54 if var.endswith(suffix):
55 var = var.replace(suffix, "")
56
57 prefix = d.getVar("MLPREFIX", True)
58 if prefix and var.startswith(prefix):
59 var = var.replace(prefix, "")
60
61 return var
62
63def str_filter(f, str, d):
64 from re import match
65 return " ".join(filter(lambda x: match(f, x, 0), str.split()))
66
67def str_filter_out(f, str, d):
68 from re import match
69 return " ".join(filter(lambda x: not match(f, x, 0), str.split()))
70
71def param_bool(cfg, field, dflt = None):
72 """Lookup <field> in <cfg> map and convert it to a boolean; take
73 <dflt> when this <field> does not exist"""
74 value = cfg.get(field, dflt)
75 strvalue = str(value).lower()
76 if strvalue in ('yes', 'y', 'true', 't', '1'):
77 return True
78 elif strvalue in ('no', 'n', 'false', 'f', '0'):
79 return False
80 raise ValueError("invalid value for boolean parameter '%s': '%s'" % (field, value))
81
82def inherits(d, *classes):
83 """Return True if the metadata inherits any of the specified classes"""
84 return any(bb.data.inherits_class(cls, d) for cls in classes)
85
86def features_backfill(var,d):
87 # This construct allows the addition of new features to variable specified
88 # as var
89 # Example for var = "DISTRO_FEATURES"
90 # This construct allows the addition of new features to DISTRO_FEATURES
91 # that if not present would disable existing functionality, without
92 # disturbing distributions that have already set DISTRO_FEATURES.
93 # Distributions wanting to elide a value in DISTRO_FEATURES_BACKFILL should
94 # add the feature to DISTRO_FEATURES_BACKFILL_CONSIDERED
95 features = (d.getVar(var, True) or "").split()
96 backfill = (d.getVar(var+"_BACKFILL", True) or "").split()
97 considered = (d.getVar(var+"_BACKFILL_CONSIDERED", True) or "").split()
98
99 addfeatures = []
100 for feature in backfill:
101 if feature not in features and feature not in considered:
102 addfeatures.append(feature)
103
104 if addfeatures:
105 d.appendVar(var, " " + " ".join(addfeatures))
106
107
108def packages_filter_out_system(d):
109 """
110 Return a list of packages from PACKAGES with the "system" packages such as
111 PN-dbg PN-doc PN-locale-eb-gb removed.
112 """
113 pn = d.getVar('PN', True)
114 blacklist = map(lambda suffix: pn + suffix, ('', '-dbg', '-dev', '-doc', '-locale', '-staticdev'))
115 localepkg = pn + "-locale-"
116 pkgs = []
117
118 for pkg in d.getVar('PACKAGES', True).split():
119 if pkg not in blacklist and localepkg not in pkg:
120 pkgs.append(pkg)
121 return pkgs
122
123def getstatusoutput(cmd):
124 return cmdstatus.getstatusoutput(cmd)
125
126
127def trim_version(version, num_parts=2):
128 """
129 Return just the first <num_parts> of <version>, split by periods. For
130 example, trim_version("1.2.3", 2) will return "1.2".
131 """
132 if type(version) is not str:
133 raise TypeError("Version should be a string")
134 if num_parts < 1:
135 raise ValueError("Cannot split to parts < 1")
136
137 parts = version.split(".")
138 trimmed = ".".join(parts[:num_parts])
139 return trimmed
140
141def cpu_count():
142 import multiprocessing
143 return multiprocessing.cpu_count()
144
145def execute_pre_post_process(d, cmds):
146 if cmds is None:
147 return
148
149 for cmd in cmds.strip().split(';'):
150 cmd = cmd.strip()
151 if cmd != '':
152 bb.note("Executing %s ..." % cmd)
153 bb.build.exec_func(cmd, d)
154
155def multiprocess_exec(commands, function):
156 import signal
157 import multiprocessing
158
159 if not commands:
160 return []
161
162 def init_worker():
163 signal.signal(signal.SIGINT, signal.SIG_IGN)
164
165 nproc = min(multiprocessing.cpu_count(), len(commands))
166 pool = bb.utils.multiprocessingpool(nproc, init_worker)
167 imap = pool.imap(function, commands)
168
169 try:
170 res = list(imap)
171 pool.close()
172 pool.join()
173 results = []
174 for result in res:
175 if result is not None:
176 results.append(result)
177 return results
178
179 except KeyboardInterrupt:
180 pool.terminate()
181 pool.join()
182 raise
diff --git a/meta/lib/oeqa/__init__.py b/meta/lib/oeqa/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/meta/lib/oeqa/__init__.py
diff --git a/meta/lib/oeqa/controllers/__init__.py b/meta/lib/oeqa/controllers/__init__.py
new file mode 100644
index 0000000000..8eda92763c
--- /dev/null
+++ b/meta/lib/oeqa/controllers/__init__.py
@@ -0,0 +1,3 @@
1# Enable other layers to have modules in the same named directory
2from pkgutil import extend_path
3__path__ = extend_path(__path__, __name__)
diff --git a/meta/lib/oeqa/controllers/masterimage.py b/meta/lib/oeqa/controllers/masterimage.py
new file mode 100644
index 0000000000..311f0cf68c
--- /dev/null
+++ b/meta/lib/oeqa/controllers/masterimage.py
@@ -0,0 +1,201 @@
1# Copyright (C) 2014 Intel Corporation
2#
3# Released under the MIT license (see COPYING.MIT)
4
5# This module adds support to testimage.bbclass to deploy images and run
6# tests using a "master image" - this is a "known good" image that is
7# installed onto the device as part of initial setup and will be booted into
8# with no interaction; we can then use it to deploy the image to be tested
9# to a second partition before running the tests.
10#
11# For an example master image, see core-image-testmaster
12# (meta/recipes-extended/images/core-image-testmaster.bb)
13
14import os
15import bb
16import traceback
17import time
18import subprocess
19
20import oeqa.targetcontrol
21import oeqa.utils.sshcontrol as sshcontrol
22import oeqa.utils.commands as commands
23from oeqa.utils import CommandError
24
25from abc import ABCMeta, abstractmethod
26
27class MasterImageHardwareTarget(oeqa.targetcontrol.BaseTarget):
28
29 __metaclass__ = ABCMeta
30
31 supported_image_fstypes = ['tar.gz', 'tar.bz2']
32
33 def __init__(self, d):
34 super(MasterImageHardwareTarget, self).__init__(d)
35
36 # target ip
37 addr = d.getVar("TEST_TARGET_IP", True) or bb.fatal('Please set TEST_TARGET_IP with the IP address of the machine you want to run the tests on.')
38 self.ip = addr.split(":")[0]
39 try:
40 self.port = addr.split(":")[1]
41 except IndexError:
42 self.port = None
43 bb.note("Target IP: %s" % self.ip)
44 self.server_ip = d.getVar("TEST_SERVER_IP", True)
45 if not self.server_ip:
46 try:
47 self.server_ip = subprocess.check_output(['ip', 'route', 'get', self.ip ]).split("\n")[0].split()[-1]
48 except Exception as e:
49 bb.fatal("Failed to determine the host IP address (alternatively you can set TEST_SERVER_IP with the IP address of this machine): %s" % e)
50 bb.note("Server IP: %s" % self.server_ip)
51
52 # test rootfs + kernel
53 self.image_fstype = self.get_image_fstype(d)
54 self.rootfs = os.path.join(d.getVar("DEPLOY_DIR_IMAGE", True), d.getVar("IMAGE_LINK_NAME", True) + '.' + self.image_fstype)
55 self.kernel = os.path.join(d.getVar("DEPLOY_DIR_IMAGE", True), d.getVar("KERNEL_IMAGETYPE") + '-' + d.getVar('MACHINE') + '.bin')
56 if not os.path.isfile(self.rootfs):
57 # we could've checked that IMAGE_FSTYPES contains tar.gz but the config for running testimage might not be
58 # the same as the config with which the image was build, ie
59 # you bitbake core-image-sato with IMAGE_FSTYPES += "tar.gz"
60 # and your autobuilder overwrites the config, adds the test bits and runs bitbake core-image-sato -c testimage
61 bb.fatal("No rootfs found. Did you build the image ?\nIf yes, did you build it with IMAGE_FSTYPES += \"tar.gz\" ? \
62 \nExpected path: %s" % self.rootfs)
63 if not os.path.isfile(self.kernel):
64 bb.fatal("No kernel found. Expected path: %s" % self.kernel)
65
66 # master ssh connection
67 self.master = None
68 # if the user knows what they are doing, then by all means...
69 self.user_cmds = d.getVar("TEST_DEPLOY_CMDS", True)
70 self.deploy_cmds = None
71
72 # this is the name of the command that controls the power for a board
73 # e.g: TEST_POWERCONTROL_CMD = "/home/user/myscripts/powercontrol.py ${MACHINE} what-ever-other-args-the-script-wants"
74 # the command should take as the last argument "off" and "on" and "cycle" (off, on)
75 self.powercontrol_cmd = d.getVar("TEST_POWERCONTROL_CMD", True) or None
76 self.powercontrol_args = d.getVar("TEST_POWERCONTROL_EXTRA_ARGS") or ""
77
78 self.serialcontrol_cmd = d.getVar("TEST_SERIALCONTROL_CMD", True) or None
79 self.serialcontrol_args = d.getVar("TEST_SERIALCONTROL_EXTRA_ARGS") or ""
80
81 self.origenv = os.environ
82 if self.powercontrol_cmd or self.serialcontrol_cmd:
83 # the external script for controlling power might use ssh
84 # ssh + keys means we need the original user env
85 bborigenv = d.getVar("BB_ORIGENV", False) or {}
86 for key in bborigenv:
87 val = bborigenv.getVar(key, True)
88 if val is not None:
89 self.origenv[key] = str(val)
90
91 if self.powercontrol_cmd:
92 if self.powercontrol_args:
93 self.powercontrol_cmd = "%s %s" % (self.powercontrol_cmd, self.powercontrol_args)
94 if self.serialcontrol_cmd:
95 if self.serialcontrol_args:
96 self.serialcontrol_cmd = "%s %s" % (self.serialcontrol_cmd, self.serialcontrol_args)
97
98 def power_ctl(self, msg):
99 if self.powercontrol_cmd:
100 cmd = "%s %s" % (self.powercontrol_cmd, msg)
101 try:
102 commands.runCmd(cmd, assert_error=False, preexec_fn=os.setsid, env=self.origenv)
103 except CommandError as e:
104 bb.fatal(str(e))
105
106 def power_cycle(self, conn):
107 if self.powercontrol_cmd:
108 # be nice, don't just cut power
109 conn.run("shutdown -h now")
110 time.sleep(10)
111 self.power_ctl("cycle")
112 else:
113 status, output = conn.run("reboot")
114 if status != 0:
115 bb.error("Failed rebooting target and no power control command defined. You need to manually reset the device.\n%s" % output)
116
117 def _wait_until_booted(self):
118 ''' Waits until the target device has booted (if we have just power cycled it) '''
119 # Subclasses with better methods of determining boot can override this
120 time.sleep(120)
121
122 def deploy(self):
123 # base class just sets the ssh log file for us
124 super(MasterImageHardwareTarget, self).deploy()
125 self.master = sshcontrol.SSHControl(ip=self.ip, logfile=self.sshlog, timeout=600, port=self.port)
126 status, output = self.master.run("cat /etc/masterimage")
127 if status != 0:
128 # We're not booted into the master image, so try rebooting
129 bb.plain("%s - booting into the master image" % self.pn)
130 self.power_ctl("cycle")
131 self._wait_until_booted()
132
133 bb.plain("%s - deploying image on target" % self.pn)
134 status, output = self.master.run("cat /etc/masterimage")
135 if status != 0:
136 bb.fatal("No ssh connectivity or target isn't running a master image.\n%s" % output)
137 if self.user_cmds:
138 self.deploy_cmds = self.user_cmds.split("\n")
139 try:
140 self._deploy()
141 except Exception as e:
142 bb.fatal("Failed deploying test image: %s" % e)
143
144 @abstractmethod
145 def _deploy(self):
146 pass
147
148 def start(self, params=None):
149 bb.plain("%s - boot test image on target" % self.pn)
150 self._start()
151 # set the ssh object for the target/test image
152 self.connection = sshcontrol.SSHControl(self.ip, logfile=self.sshlog, port=self.port)
153 bb.plain("%s - start running tests" % self.pn)
154
155 @abstractmethod
156 def _start(self):
157 pass
158
159 def stop(self):
160 bb.plain("%s - reboot/powercycle target" % self.pn)
161 self.power_cycle(self.connection)
162
163
164class GummibootTarget(MasterImageHardwareTarget):
165
166 def __init__(self, d):
167 super(GummibootTarget, self).__init__(d)
168 # this the value we need to set in the LoaderEntryOneShot EFI variable
169 # so the system boots the 'test' bootloader label and not the default
170 # The first four bytes are EFI bits, and the rest is an utf-16le string
171 # (EFI vars values need to be utf-16)
172 # $ echo -en "test\0" | iconv -f ascii -t utf-16le | hexdump -C
173 # 00000000 74 00 65 00 73 00 74 00 00 00 |t.e.s.t...|
174 self.efivarvalue = r'\x07\x00\x00\x00\x74\x00\x65\x00\x73\x00\x74\x00\x00\x00'
175 self.deploy_cmds = [
176 'mount -L boot /boot',
177 'mkdir -p /mnt/testrootfs',
178 'mount -L testrootfs /mnt/testrootfs',
179 'modprobe efivarfs',
180 'mount -t efivarfs efivarfs /sys/firmware/efi/efivars',
181 'cp ~/test-kernel /boot',
182 'rm -rf /mnt/testrootfs/*',
183 'tar xvf ~/test-rootfs.%s -C /mnt/testrootfs' % self.image_fstype,
184 'printf "%s" > /sys/firmware/efi/efivars/LoaderEntryOneShot-4a67b082-0a4c-41cf-b6c7-440b29bb8c4f' % self.efivarvalue
185 ]
186
187 def _deploy(self):
188 # make sure these aren't mounted
189 self.master.run("umount /boot; umount /mnt/testrootfs; umount /sys/firmware/efi/efivars;")
190 # from now on, every deploy cmd should return 0
191 # else an exception will be thrown by sshcontrol
192 self.master.ignore_status = False
193 self.master.copy_to(self.rootfs, "~/test-rootfs." + self.image_fstype)
194 self.master.copy_to(self.kernel, "~/test-kernel")
195 for cmd in self.deploy_cmds:
196 self.master.run(cmd)
197
198 def _start(self, params=None):
199 self.power_cycle(self.master)
200 # there are better ways than a timeout but this should work for now
201 time.sleep(120)
diff --git a/meta/lib/oeqa/controllers/testtargetloader.py b/meta/lib/oeqa/controllers/testtargetloader.py
new file mode 100644
index 0000000000..a1b7b1d92b
--- /dev/null
+++ b/meta/lib/oeqa/controllers/testtargetloader.py
@@ -0,0 +1,70 @@
1import types
2import bb
3import os
4
5# This class is responsible for loading a test target controller
6class TestTargetLoader:
7
8 # Search oeqa.controllers module directory for and return a controller
9 # corresponding to the given target name.
10 # AttributeError raised if not found.
11 # ImportError raised if a provided module can not be imported.
12 def get_controller_module(self, target, bbpath):
13 controllerslist = self.get_controller_modulenames(bbpath)
14 bb.note("Available controller modules: %s" % str(controllerslist))
15 controller = self.load_controller_from_name(target, controllerslist)
16 return controller
17
18 # Return a list of all python modules in lib/oeqa/controllers for each
19 # layer in bbpath
20 def get_controller_modulenames(self, bbpath):
21
22 controllerslist = []
23
24 def add_controller_list(path):
25 if not os.path.exists(os.path.join(path, '__init__.py')):
26 bb.fatal('Controllers directory %s exists but is missing __init__.py' % path)
27 files = sorted([f for f in os.listdir(path) if f.endswith('.py') and not f.startswith('_')])
28 for f in files:
29 module = 'oeqa.controllers.' + f[:-3]
30 if module not in controllerslist:
31 controllerslist.append(module)
32 else:
33 bb.warn("Duplicate controller module found for %s, only one added. Layers should create unique controller module names" % module)
34
35 for p in bbpath:
36 controllerpath = os.path.join(p, 'lib', 'oeqa', 'controllers')
37 bb.debug(2, 'Searching for target controllers in %s' % controllerpath)
38 if os.path.exists(controllerpath):
39 add_controller_list(controllerpath)
40 return controllerslist
41
42 # Search for and return a controller from given target name and
43 # set of module names.
44 # Raise AttributeError if not found.
45 # Raise ImportError if a provided module can not be imported
46 def load_controller_from_name(self, target, modulenames):
47 for name in modulenames:
48 obj = self.load_controller_from_module(target, name)
49 if obj:
50 return obj
51 raise AttributeError("Unable to load {0} from available modules: {1}".format(target, str(modulenames)))
52
53 # Search for and return a controller or None from given module name
54 def load_controller_from_module(self, target, modulename):
55 obj = None
56 # import module, allowing it to raise import exception
57 module = __import__(modulename, globals(), locals(), [target])
58 # look for target class in the module, catching any exceptions as it
59 # is valid that a module may not have the target class.
60 try:
61 obj = getattr(module, target)
62 if obj:
63 from oeqa.targetcontrol import BaseTarget
64 if (not isinstance(obj, (type, types.ClassType))):
65 bb.warn("Target {0} found, but not of type Class".format(target))
66 if( not issubclass(obj, BaseTarget)):
67 bb.warn("Target {0} found, but subclass is not BaseTarget".format(target))
68 except:
69 obj = None
70 return obj
diff --git a/meta/lib/oeqa/oetest.py b/meta/lib/oeqa/oetest.py
new file mode 100644
index 0000000000..0b7e7dc42d
--- /dev/null
+++ b/meta/lib/oeqa/oetest.py
@@ -0,0 +1,106 @@
1# Copyright (C) 2013 Intel Corporation
2#
3# Released under the MIT license (see COPYING.MIT)
4
5# Main unittest module used by testimage.bbclass
6# This provides the oeRuntimeTest base class which is inherited by all tests in meta/lib/oeqa/runtime.
7
8# It also has some helper functions and it's responsible for actually starting the tests
9
10import os, re, mmap
11import unittest
12import inspect
13import subprocess
14from oeqa.utils.decorators import LogResults
15
16def loadTests(tc, type="runtime"):
17 if type == "runtime":
18 # set the context object passed from the test class
19 setattr(oeTest, "tc", tc)
20 # set ps command to use
21 setattr(oeRuntimeTest, "pscmd", "ps -ef" if oeTest.hasPackage("procps") else "ps")
22 # prepare test suite, loader and runner
23 suite = unittest.TestSuite()
24 elif type == "sdk":
25 # set the context object passed from the test class
26 setattr(oeTest, "tc", tc)
27 testloader = unittest.TestLoader()
28 testloader.sortTestMethodsUsing = None
29 suite = testloader.loadTestsFromNames(tc.testslist)
30
31 return suite
32
33def runTests(tc, type="runtime"):
34
35 suite = loadTests(tc, type)
36 print("Test modules %s" % tc.testslist)
37 print("Found %s tests" % suite.countTestCases())
38 runner = unittest.TextTestRunner(verbosity=2)
39 result = runner.run(suite)
40
41 return result
42
43@LogResults
44class oeTest(unittest.TestCase):
45
46 longMessage = True
47
48 @classmethod
49 def hasPackage(self, pkg):
50
51 if re.search(pkg, oeTest.tc.pkgmanifest):
52 return True
53 return False
54
55 @classmethod
56 def hasFeature(self,feature):
57
58 if feature in oeTest.tc.imagefeatures or \
59 feature in oeTest.tc.distrofeatures:
60 return True
61 else:
62 return False
63
64class oeRuntimeTest(oeTest):
65 def __init__(self, methodName='runTest'):
66 self.target = oeRuntimeTest.tc.target
67 super(oeRuntimeTest, self).__init__(methodName)
68
69 #TODO: use package_manager.py to install packages on any type of image
70 def install_packages(self, packagelist):
71 for package in packagelist:
72 (status, result) = self.target.run("smart install -y "+package)
73 if status != 0:
74 return status
75
76class oeSDKTest(oeTest):
77 def __init__(self, methodName='runTest'):
78 self.sdktestdir = oeSDKTest.tc.sdktestdir
79 super(oeSDKTest, self).__init__(methodName)
80
81def getmodule(pos=2):
82 # stack returns a list of tuples containg frame information
83 # First element of the list the is current frame, caller is 1
84 frameinfo = inspect.stack()[pos]
85 modname = inspect.getmodulename(frameinfo[1])
86 #modname = inspect.getmodule(frameinfo[0]).__name__
87 return modname
88
89def skipModule(reason, pos=2):
90 modname = getmodule(pos)
91 if modname not in oeTest.tc.testsrequired:
92 raise unittest.SkipTest("%s: %s" % (modname, reason))
93 else:
94 raise Exception("\nTest %s wants to be skipped.\nReason is: %s" \
95 "\nTest was required in TEST_SUITES, so either the condition for skipping is wrong" \
96 "\nor the image really doesn't have the required feature/package when it should." % (modname, reason))
97
98def skipModuleIf(cond, reason):
99
100 if cond:
101 skipModule(reason, 3)
102
103def skipModuleUnless(cond, reason):
104
105 if not cond:
106 skipModule(reason, 3)
diff --git a/meta/lib/oeqa/runexported.py b/meta/lib/oeqa/runexported.py
new file mode 100755
index 0000000000..e1b6642ec2
--- /dev/null
+++ b/meta/lib/oeqa/runexported.py
@@ -0,0 +1,140 @@
1#!/usr/bin/env python
2
3
4# Copyright (C) 2013 Intel Corporation
5#
6# Released under the MIT license (see COPYING.MIT)
7
8# This script should be used outside of the build system to run image tests.
9# It needs a json file as input as exported by the build.
10# E.g for an already built image:
11#- export the tests:
12# TEST_EXPORT_ONLY = "1"
13# TEST_TARGET = "simpleremote"
14# TEST_TARGET_IP = "192.168.7.2"
15# TEST_SERVER_IP = "192.168.7.1"
16# bitbake core-image-sato -c testimage
17# Setup your target, e.g for qemu: runqemu core-image-sato
18# cd build/tmp/testimage/core-image-sato
19# ./runexported.py testdata.json
20
21import sys
22import os
23import time
24from optparse import OptionParser
25
26try:
27 import simplejson as json
28except ImportError:
29 import json
30
31sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), "oeqa")))
32
33from oeqa.oetest import runTests
34from oeqa.utils.sshcontrol import SSHControl
35
36# this isn't pretty but we need a fake target object
37# for running the tests externally as we don't care
38# about deploy/start we only care about the connection methods (run, copy)
39class FakeTarget(object):
40 def __init__(self, d):
41 self.connection = None
42 self.ip = None
43 self.server_ip = None
44 self.datetime = time.strftime('%Y%m%d%H%M%S',time.gmtime())
45 self.testdir = d.getVar("TEST_LOG_DIR", True)
46 self.pn = d.getVar("PN", True)
47
48 def exportStart(self):
49 self.sshlog = os.path.join(self.testdir, "ssh_target_log.%s" % self.datetime)
50 sshloglink = os.path.join(self.testdir, "ssh_target_log")
51 if os.path.islink(sshloglink):
52 os.unlink(sshloglink)
53 os.symlink(self.sshlog, sshloglink)
54 print("SSH log file: %s" % self.sshlog)
55 self.connection = SSHControl(self.ip, logfile=self.sshlog)
56
57 def run(self, cmd, timeout=None):
58 return self.connection.run(cmd, timeout)
59
60 def copy_to(self, localpath, remotepath):
61 return self.connection.copy_to(localpath, remotepath)
62
63 def copy_from(self, remotepath, localpath):
64 return self.connection.copy_from(remotepath, localpath)
65
66
67class MyDataDict(dict):
68 def getVar(self, key, unused = None):
69 return self.get(key, "")
70
71class TestContext(object):
72 def __init__(self):
73 self.d = None
74 self.target = None
75
76def main():
77
78 usage = "usage: %prog [options] <json file>"
79 parser = OptionParser(usage=usage)
80 parser.add_option("-t", "--target-ip", dest="ip", help="The IP address of the target machine. Use this to \
81 overwrite the value determined from TEST_TARGET_IP at build time")
82 parser.add_option("-s", "--server-ip", dest="server_ip", help="The IP address of this machine. Use this to \
83 overwrite the value determined from TEST_SERVER_IP at build time.")
84 parser.add_option("-d", "--deploy-dir", dest="deploy_dir", help="Full path to the package feeds, that this \
85 the contents of what used to be DEPLOY_DIR on the build machine. If not specified it will use the value \
86 specified in the json if that directory actually exists or it will error out.")
87 parser.add_option("-l", "--log-dir", dest="log_dir", help="This sets the path for TEST_LOG_DIR. If not specified \
88 the current dir is used. This is used for usually creating a ssh log file and a scp test file.")
89
90 (options, args) = parser.parse_args()
91 if len(args) != 1:
92 parser.error("Incorrect number of arguments. The one and only argument should be a json file exported by the build system")
93
94 with open(args[0], "r") as f:
95 loaded = json.load(f)
96
97 if options.ip:
98 loaded["target"]["ip"] = options.ip
99 if options.server_ip:
100 loaded["target"]["server_ip"] = options.server_ip
101
102 d = MyDataDict()
103 for key in loaded["d"].keys():
104 d[key] = loaded["d"][key]
105
106 if options.log_dir:
107 d["TEST_LOG_DIR"] = options.log_dir
108 else:
109 d["TEST_LOG_DIR"] = os.path.abspath(os.path.dirname(__file__))
110 if options.deploy_dir:
111 d["DEPLOY_DIR"] = options.deploy_dir
112 else:
113 if not os.path.isdir(d["DEPLOY_DIR"]):
114 raise Exception("The path to DEPLOY_DIR does not exists: %s" % d["DEPLOY_DIR"])
115
116
117 target = FakeTarget(d)
118 for key in loaded["target"].keys():
119 setattr(target, key, loaded["target"][key])
120
121 tc = TestContext()
122 setattr(tc, "d", d)
123 setattr(tc, "target", target)
124 for key in loaded.keys():
125 if key != "d" and key != "target":
126 setattr(tc, key, loaded[key])
127
128 target.exportStart()
129 runTests(tc)
130
131 return 0
132
133if __name__ == "__main__":
134 try:
135 ret = main()
136 except Exception:
137 ret = 1
138 import traceback
139 traceback.print_exc(5)
140 sys.exit(ret)
diff --git a/meta/lib/oeqa/runtime/__init__.py b/meta/lib/oeqa/runtime/__init__.py
new file mode 100644
index 0000000000..4cf3fa76b6
--- /dev/null
+++ b/meta/lib/oeqa/runtime/__init__.py
@@ -0,0 +1,3 @@
1# Enable other layers to have tests in the same named directory
2from pkgutil import extend_path
3__path__ = extend_path(__path__, __name__)
diff --git a/meta/lib/oeqa/runtime/_ptest.py b/meta/lib/oeqa/runtime/_ptest.py
new file mode 100644
index 0000000000..4c58dc1d7f
--- /dev/null
+++ b/meta/lib/oeqa/runtime/_ptest.py
@@ -0,0 +1,124 @@
1import unittest, os, shutil
2from oeqa.oetest import oeRuntimeTest, skipModule
3from oeqa.utils.decorators import *
4from oeqa.utils.logparser import *
5from oeqa.utils.httpserver import HTTPService
6import bb
7import glob
8from oe.package_manager import RpmPkgsList
9import subprocess
10
11def setUpModule():
12 if not oeRuntimeTest.hasFeature("package-management"):
13 skipModule("Image doesn't have package management feature")
14 if not oeRuntimeTest.hasPackage("smart"):
15 skipModule("Image doesn't have smart installed")
16 if "package_rpm" != oeRuntimeTest.tc.d.getVar("PACKAGE_CLASSES", True).split()[0]:
17 skipModule("Rpm is not the primary package manager")
18
19class PtestRunnerTest(oeRuntimeTest):
20
21 # a ptest log parser
22 def parse_ptest(self, logfile):
23 parser = Lparser(test_0_pass_regex="^PASS:(.+)", test_0_fail_regex="^FAIL:(.+)", section_0_begin_regex="^BEGIN: .*/(.+)/ptest", section_0_end_regex="^END: .*/(.+)/ptest")
24 parser.init()
25 result = Result()
26
27 with open(logfile) as f:
28 for line in f:
29 result_tuple = parser.parse_line(line)
30 if not result_tuple:
31 continue
32 result_tuple = line_type, category, status, name = parser.parse_line(line)
33
34 if line_type == 'section' and status == 'begin':
35 current_section = name
36 continue
37
38 if line_type == 'section' and status == 'end':
39 current_section = None
40 continue
41
42 if line_type == 'test' and status == 'pass':
43 result.store(current_section, name, status)
44 continue
45
46 if line_type == 'test' and status == 'fail':
47 result.store(current_section, name, status)
48 continue
49
50 result.sort_tests()
51 return result
52
53 @classmethod
54 def setUpClass(self):
55 #note the existing channels that are on the board before creating new ones
56# self.existingchannels = set()
57# (status, result) = oeRuntimeTest.tc.target.run('smart channel --show | grep "\["', 0)
58# for x in result.split("\n"):
59# self.existingchannels.add(x)
60 self.repo_server = HTTPService(oeRuntimeTest.tc.d.getVar('DEPLOY_DIR', True), oeRuntimeTest.tc.target.server_ip)
61 self.repo_server.start()
62
63 @classmethod
64 def tearDownClass(self):
65 self.repo_server.stop()
66 #remove created channels to be able to repeat the tests on same image
67# (status, result) = oeRuntimeTest.tc.target.run('smart channel --show | grep "\["', 0)
68# for x in result.split("\n"):
69# if x not in self.existingchannels:
70# oeRuntimeTest.tc.target.run('smart channel --remove '+x[1:-1]+' -y', 0)
71
72 def add_smart_channel(self):
73 image_pkgtype = self.tc.d.getVar('IMAGE_PKGTYPE', True)
74 deploy_url = 'http://%s:%s/%s' %(self.target.server_ip, self.repo_server.port, image_pkgtype)
75 pkgarchs = self.tc.d.getVar('PACKAGE_ARCHS', True).replace("-","_").split()
76 for arch in os.listdir('%s/%s' % (self.repo_server.root_dir, image_pkgtype)):
77 if arch in pkgarchs:
78 self.target.run('smart channel -y --add {a} type=rpm-md baseurl={u}/{a}'.format(a=arch, u=deploy_url), 0)
79 self.target.run('smart update', 0)
80
81 def install_complementary(self, globs=None):
82 installed_pkgs_file = os.path.join(oeRuntimeTest.tc.d.getVar('WORKDIR', True),
83 "installed_pkgs.txt")
84 self.pkgs_list = RpmPkgsList(oeRuntimeTest.tc.d, oeRuntimeTest.tc.d.getVar('IMAGE_ROOTFS', True), oeRuntimeTest.tc.d.getVar('arch_var', True), oeRuntimeTest.tc.d.getVar('os_var', True))
85 with open(installed_pkgs_file, "w+") as installed_pkgs:
86 installed_pkgs.write(self.pkgs_list.list("arch"))
87
88 cmd = [bb.utils.which(os.getenv('PATH'), "oe-pkgdata-util"),
89 "glob", oeRuntimeTest.tc.d.getVar('PKGDATA_DIR', True), installed_pkgs_file,
90 globs]
91 try:
92 bb.note("Installing complementary packages ...")
93 complementary_pkgs = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
94 except subprocess.CalledProcessError as e:
95 bb.fatal("Could not compute complementary packages list. Command "
96 "'%s' returned %d:\n%s" %
97 (' '.join(cmd), e.returncode, e.output))
98
99 return complementary_pkgs.split()
100
101 def setUp(self):
102 self.buildhist_dir = oeRuntimeTest.tc.d.getVar("BUILDHISTORY_DIR_IMAGE", True)
103 self.assertTrue(os.path.exists(self.buildhist_dir))
104 self.ptest_log = os.path.join(oeRuntimeTest.tc.d.getVar("TEST_LOG_DIR",True), "ptest-%s.log" % oeRuntimeTest.tc.d.getVar('DATETIME', True))
105
106 @skipUnlessPassed('test_ssh')
107 def test_ptestrunner(self):
108 self.add_smart_channel()
109 cond = oeRuntimeTest.hasPackage("ptest-runner") and oeRuntimeTest.hasFeature("ptest") and oeRuntimeTest.hasPackage("-ptest")
110 if not cond:
111 self.install_packages(self.install_complementary("*-ptest"))
112 self.install_packages(['ptest-runner'])
113
114 self.target.run('/usr/bin/ptest-runner > /tmp/ptest.log 2>&1', 0)
115 self.target.copy_from('/tmp/ptest.log', self.ptest_log)
116 shutil.copyfile(self.ptest_log, os.path.join(self.buildhist_dir, "ptest.log"))
117
118 result = self.parse_ptest(os.path.join(self.buildhist_dir, "ptest.log"))
119 log_results_to_location = "./results"
120 if os.path.exists(log_results_to_location):
121 shutil.rmtree(log_results_to_location)
122 os.makedirs(log_results_to_location)
123
124 result.log_as_files(log_results_to_location, test_status = ['fail'])
diff --git a/meta/lib/oeqa/runtime/buildcvs.py b/meta/lib/oeqa/runtime/buildcvs.py
new file mode 100644
index 0000000000..fe6cbfbcd5
--- /dev/null
+++ b/meta/lib/oeqa/runtime/buildcvs.py
@@ -0,0 +1,31 @@
1from oeqa.oetest import oeRuntimeTest, skipModule
2from oeqa.utils.decorators import *
3from oeqa.utils.targetbuild import TargetBuildProject
4
5def setUpModule():
6 if not oeRuntimeTest.hasFeature("tools-sdk"):
7 skipModule("Image doesn't have tools-sdk in IMAGE_FEATURES")
8
9class BuildCvsTest(oeRuntimeTest):
10
11 @classmethod
12 def setUpClass(self):
13 self.project = TargetBuildProject(oeRuntimeTest.tc.target, oeRuntimeTest.tc.d,
14 "http://ftp.gnu.org/non-gnu/cvs/source/feature/1.12.13/cvs-1.12.13.tar.bz2")
15 self.project.download_archive()
16
17 @testcase(205)
18 @skipUnlessPassed("test_ssh")
19 def test_cvs(self):
20 self.assertEqual(self.project.run_configure(), 0,
21 msg="Running configure failed")
22
23 self.assertEqual(self.project.run_make(), 0,
24 msg="Running make failed")
25
26 self.assertEqual(self.project.run_install(), 0,
27 msg="Running make install failed")
28
29 @classmethod
30 def tearDownClass(self):
31 self.project.clean()
diff --git a/meta/lib/oeqa/runtime/buildiptables.py b/meta/lib/oeqa/runtime/buildiptables.py
new file mode 100644
index 0000000000..09e252df8c
--- /dev/null
+++ b/meta/lib/oeqa/runtime/buildiptables.py
@@ -0,0 +1,31 @@
1from oeqa.oetest import oeRuntimeTest, skipModule
2from oeqa.utils.decorators import *
3from oeqa.utils.targetbuild import TargetBuildProject
4
5def setUpModule():
6 if not oeRuntimeTest.hasFeature("tools-sdk"):
7 skipModule("Image doesn't have tools-sdk in IMAGE_FEATURES")
8
9class BuildIptablesTest(oeRuntimeTest):
10
11 @classmethod
12 def setUpClass(self):
13 self.project = TargetBuildProject(oeRuntimeTest.tc.target, oeRuntimeTest.tc.d,
14 "http://netfilter.org/projects/iptables/files/iptables-1.4.13.tar.bz2")
15 self.project.download_archive()
16
17 @testcase(206)
18 @skipUnlessPassed("test_ssh")
19 def test_iptables(self):
20 self.assertEqual(self.project.run_configure(), 0,
21 msg="Running configure failed")
22
23 self.assertEqual(self.project.run_make(), 0,
24 msg="Running make failed")
25
26 self.assertEqual(self.project.run_install(), 0,
27 msg="Running make install failed")
28
29 @classmethod
30 def tearDownClass(self):
31 self.project.clean()
diff --git a/meta/lib/oeqa/runtime/buildsudoku.py b/meta/lib/oeqa/runtime/buildsudoku.py
new file mode 100644
index 0000000000..802b060010
--- /dev/null
+++ b/meta/lib/oeqa/runtime/buildsudoku.py
@@ -0,0 +1,28 @@
1from oeqa.oetest import oeRuntimeTest, skipModule
2from oeqa.utils.decorators import *
3from oeqa.utils.targetbuild import TargetBuildProject
4
5def setUpModule():
6 if not oeRuntimeTest.hasFeature("tools-sdk"):
7 skipModule("Image doesn't have tools-sdk in IMAGE_FEATURES")
8
9class SudokuTest(oeRuntimeTest):
10
11 @classmethod
12 def setUpClass(self):
13 self.project = TargetBuildProject(oeRuntimeTest.tc.target, oeRuntimeTest.tc.d,
14 "http://downloads.sourceforge.net/project/sudoku-savant/sudoku-savant/sudoku-savant-1.3/sudoku-savant-1.3.tar.bz2")
15 self.project.download_archive()
16
17 @testcase(207)
18 @skipUnlessPassed("test_ssh")
19 def test_sudoku(self):
20 self.assertEqual(self.project.run_configure(), 0,
21 msg="Running configure failed")
22
23 self.assertEqual(self.project.run_make(), 0,
24 msg="Running make failed")
25
26 @classmethod
27 def tearDownClass(self):
28 self.project.clean()
diff --git a/meta/lib/oeqa/runtime/connman.py b/meta/lib/oeqa/runtime/connman.py
new file mode 100644
index 0000000000..cc537f7766
--- /dev/null
+++ b/meta/lib/oeqa/runtime/connman.py
@@ -0,0 +1,30 @@
1import unittest
2from oeqa.oetest import oeRuntimeTest, skipModule
3from oeqa.utils.decorators import *
4
5def setUpModule():
6 if not oeRuntimeTest.hasPackage("connman"):
7 skipModule("No connman package in image")
8
9
10class ConnmanTest(oeRuntimeTest):
11
12 def service_status(self, service):
13 if oeRuntimeTest.hasFeature("systemd"):
14 (status, output) = self.target.run('systemctl status -l %s' % service)
15 return output
16 else:
17 return "Unable to get status or logs for %s" % service
18
19 @skipUnlessPassed('test_ssh')
20 def test_connmand_help(self):
21 (status, output) = self.target.run('/usr/sbin/connmand --help')
22 self.assertEqual(status, 0, msg="status and output: %s and %s" % (status,output))
23
24 @testcase(221)
25 @skipUnlessPassed('test_connmand_help')
26 def test_connmand_running(self):
27 (status, output) = self.target.run(oeRuntimeTest.pscmd + ' | grep [c]onnmand')
28 if status != 0:
29 print self.service_status("connman")
30 self.fail("No connmand process running")
diff --git a/meta/lib/oeqa/runtime/date.py b/meta/lib/oeqa/runtime/date.py
new file mode 100644
index 0000000000..97e8ee42ad
--- /dev/null
+++ b/meta/lib/oeqa/runtime/date.py
@@ -0,0 +1,23 @@
1from oeqa.oetest import oeRuntimeTest
2from oeqa.utils.decorators import *
3import re
4
5class DateTest(oeRuntimeTest):
6
7 @testcase(211)
8 @skipUnlessPassed("test_ssh")
9 def test_date(self):
10 (status, output) = self.target.run('date +"%Y-%m-%d %T"')
11 self.assertEqual(status, 0, msg="Failed to get initial date, output: %s" % output)
12 oldDate = output
13
14 sampleDate = '"2016-08-09 10:00:00"'
15 (status, output) = self.target.run("date -s %s" % sampleDate)
16 self.assertEqual(status, 0, msg="Date set failed, output: %s" % output)
17
18 (status, output) = self.target.run("date -R")
19 p = re.match('Tue, 09 Aug 2016 10:00:.. \+0000', output)
20 self.assertTrue(p, msg="The date was not set correctly, output: %s" % output)
21
22 (status, output) = self.target.run('date -s "%s"' % oldDate)
23 self.assertEqual(status, 0, msg="Failed to reset date, output: %s" % output)
diff --git a/meta/lib/oeqa/runtime/df.py b/meta/lib/oeqa/runtime/df.py
new file mode 100644
index 0000000000..09569d5ff6
--- /dev/null
+++ b/meta/lib/oeqa/runtime/df.py
@@ -0,0 +1,12 @@
1import unittest
2from oeqa.oetest import oeRuntimeTest
3from oeqa.utils.decorators import *
4
5
6class DfTest(oeRuntimeTest):
7
8 @testcase(234)
9 @skipUnlessPassed("test_ssh")
10 def test_df(self):
11 (status,output) = self.target.run("df / | sed -n '2p' | awk '{print $4}'")
12 self.assertTrue(int(output)>5120, msg="Not enough space on image. Current size is %s" % output)
diff --git a/meta/lib/oeqa/runtime/dmesg.py b/meta/lib/oeqa/runtime/dmesg.py
new file mode 100644
index 0000000000..5831471e50
--- /dev/null
+++ b/meta/lib/oeqa/runtime/dmesg.py
@@ -0,0 +1,12 @@
1import unittest
2from oeqa.oetest import oeRuntimeTest
3from oeqa.utils.decorators import *
4
5
6class DmesgTest(oeRuntimeTest):
7
8 @testcase(215)
9 @skipUnlessPassed('test_ssh')
10 def test_dmesg(self):
11 (status, output) = self.target.run('dmesg | grep -v mmci-pl18x | grep -v "error changing net interface name" | grep -iv "dma timeout" | grep -v usbhid | grep -i error')
12 self.assertEqual(status, 1, msg = "Error messages in dmesg log: %s" % output)
diff --git a/meta/lib/oeqa/runtime/files/hellomod.c b/meta/lib/oeqa/runtime/files/hellomod.c
new file mode 100644
index 0000000000..a383397e93
--- /dev/null
+++ b/meta/lib/oeqa/runtime/files/hellomod.c
@@ -0,0 +1,19 @@
1#include <linux/module.h>
2#include <linux/kernel.h>
3#include <linux/init.h>
4
5static int __init hello_init(void)
6{
7 printk(KERN_INFO "Hello world!\n");
8 return 0;
9}
10
11static void __exit hello_cleanup(void)
12{
13 printk(KERN_INFO "Cleaning up hellomod.\n");
14}
15
16module_init(hello_init);
17module_exit(hello_cleanup);
18
19MODULE_LICENSE("GPL");
diff --git a/meta/lib/oeqa/runtime/files/hellomod_makefile b/meta/lib/oeqa/runtime/files/hellomod_makefile
new file mode 100644
index 0000000000..b92d5c8fe0
--- /dev/null
+++ b/meta/lib/oeqa/runtime/files/hellomod_makefile
@@ -0,0 +1,8 @@
1obj-m := hellomod.o
2KDIR := /usr/src/kernel
3
4all:
5 $(MAKE) -C $(KDIR) M=$(PWD) modules
6
7clean:
8 $(MAKE) -C $(KDIR) M=$(PWD) clean
diff --git a/meta/lib/oeqa/runtime/files/test.c b/meta/lib/oeqa/runtime/files/test.c
new file mode 100644
index 0000000000..2d8389c92e
--- /dev/null
+++ b/meta/lib/oeqa/runtime/files/test.c
@@ -0,0 +1,26 @@
1#include <stdio.h>
2#include <math.h>
3#include <stdlib.h>
4
5double convert(long long l)
6{
7 return (double)l;
8}
9
10int main(int argc, char * argv[]) {
11
12 long long l = 10;
13 double f;
14 double check = 10.0;
15
16 f = convert(l);
17 printf("convert: %lld => %f\n", l, f);
18 if ( f != check ) exit(1);
19
20 f = 1234.67;
21 check = 1234.0;
22 printf("floorf(%f) = %f\n", f, floorf(f));
23 if ( floorf(f) != check) exit(1);
24
25 return 0;
26}
diff --git a/meta/lib/oeqa/runtime/files/test.cpp b/meta/lib/oeqa/runtime/files/test.cpp
new file mode 100644
index 0000000000..9e1a76473d
--- /dev/null
+++ b/meta/lib/oeqa/runtime/files/test.cpp
@@ -0,0 +1,3 @@
1#include <limits>
2
3int main() {} \ No newline at end of file
diff --git a/meta/lib/oeqa/runtime/files/test.pl b/meta/lib/oeqa/runtime/files/test.pl
new file mode 100644
index 0000000000..689c8f1635
--- /dev/null
+++ b/meta/lib/oeqa/runtime/files/test.pl
@@ -0,0 +1,2 @@
1$a = 9.01e+21 - 9.01e+21 + 0.01;
2print ("the value of a is ", $a, "\n");
diff --git a/meta/lib/oeqa/runtime/files/test.py b/meta/lib/oeqa/runtime/files/test.py
new file mode 100644
index 0000000000..f3a2273c52
--- /dev/null
+++ b/meta/lib/oeqa/runtime/files/test.py
@@ -0,0 +1,6 @@
1import os
2
3os.system('touch /tmp/testfile.python')
4
5a = 9.01e+21 - 9.01e+21 + 0.01
6print "the value of a is %s" % a
diff --git a/meta/lib/oeqa/runtime/files/testmakefile b/meta/lib/oeqa/runtime/files/testmakefile
new file mode 100644
index 0000000000..ca1844e930
--- /dev/null
+++ b/meta/lib/oeqa/runtime/files/testmakefile
@@ -0,0 +1,5 @@
1test: test.o
2 gcc -o test test.o -lm
3test.o: test.c
4 gcc -c test.c
5
diff --git a/meta/lib/oeqa/runtime/gcc.py b/meta/lib/oeqa/runtime/gcc.py
new file mode 100644
index 0000000000..a7f62e1758
--- /dev/null
+++ b/meta/lib/oeqa/runtime/gcc.py
@@ -0,0 +1,46 @@
1import unittest
2import os
3from oeqa.oetest import oeRuntimeTest, skipModule
4from oeqa.utils.decorators import *
5
6def setUpModule():
7 if not oeRuntimeTest.hasFeature("tools-sdk"):
8 skipModule("Image doesn't have tools-sdk in IMAGE_FEATURES")
9
10
11class GccCompileTest(oeRuntimeTest):
12
13 @classmethod
14 def setUpClass(self):
15 oeRuntimeTest.tc.target.copy_to(os.path.join(oeRuntimeTest.tc.filesdir, "test.c"), "/tmp/test.c")
16 oeRuntimeTest.tc.target.copy_to(os.path.join(oeRuntimeTest.tc.filesdir, "testmakefile"), "/tmp/testmakefile")
17 oeRuntimeTest.tc.target.copy_to(os.path.join(oeRuntimeTest.tc.filesdir, "test.cpp"), "/tmp/test.cpp")
18
19 @testcase(203)
20 def test_gcc_compile(self):
21 (status, output) = self.target.run('gcc /tmp/test.c -o /tmp/test -lm')
22 self.assertEqual(status, 0, msg="gcc compile failed, output: %s" % output)
23 (status, output) = self.target.run('/tmp/test')
24 self.assertEqual(status, 0, msg="running compiled file failed, output %s" % output)
25
26 @testcase(200)
27 def test_gpp_compile(self):
28 (status, output) = self.target.run('g++ /tmp/test.c -o /tmp/test -lm')
29 self.assertEqual(status, 0, msg="g++ compile failed, output: %s" % output)
30 (status, output) = self.target.run('/tmp/test')
31 self.assertEqual(status, 0, msg="running compiled file failed, output %s" % output)
32
33 def test_gpp2_compile(self):
34 (status, output) = self.target.run('g++ /tmp/test.cpp -o /tmp/test -lm')
35 self.assertEqual(status, 0, msg="g++ compile failed, output: %s" % output)
36 (status, output) = self.target.run('/tmp/test')
37 self.assertEqual(status, 0, msg="running compiled file failed, output %s" % output)
38
39 @testcase(204)
40 def test_make(self):
41 (status, output) = self.target.run('cd /tmp; make -f testmakefile')
42 self.assertEqual(status, 0, msg="running make failed, output %s" % output)
43
44 @classmethod
45 def tearDownClass(self):
46 oeRuntimeTest.tc.target.run("rm /tmp/test.c /tmp/test.o /tmp/test /tmp/testmakefile")
diff --git a/meta/lib/oeqa/runtime/kernelmodule.py b/meta/lib/oeqa/runtime/kernelmodule.py
new file mode 100644
index 0000000000..2e81720327
--- /dev/null
+++ b/meta/lib/oeqa/runtime/kernelmodule.py
@@ -0,0 +1,34 @@
1import unittest
2import os
3from oeqa.oetest import oeRuntimeTest, skipModule
4from oeqa.utils.decorators import *
5
6def setUpModule():
7 if not oeRuntimeTest.hasFeature("tools-sdk"):
8 skipModule("Image doesn't have tools-sdk in IMAGE_FEATURES")
9
10
11class KernelModuleTest(oeRuntimeTest):
12
13 def setUp(self):
14 self.target.copy_to(os.path.join(oeRuntimeTest.tc.filesdir, "hellomod.c"), "/tmp/hellomod.c")
15 self.target.copy_to(os.path.join(oeRuntimeTest.tc.filesdir, "hellomod_makefile"), "/tmp/Makefile")
16
17 @testcase('316')
18 @skipUnlessPassed('test_ssh')
19 @skipUnlessPassed('test_gcc_compile')
20 def test_kernel_module(self):
21 cmds = [
22 'cd /usr/src/kernel && make scripts',
23 'cd /tmp && make',
24 'cd /tmp && insmod hellomod.ko',
25 'lsmod | grep hellomod',
26 'dmesg | grep Hello',
27 'rmmod hellomod', 'dmesg | grep "Cleaning up hellomod"'
28 ]
29 for cmd in cmds:
30 (status, output) = self.target.run(cmd, 900)
31 self.assertEqual(status, 0, msg="\n".join([cmd, output]))
32
33 def tearDown(self):
34 self.target.run('rm -f /tmp/Makefile /tmp/hellomod.c')
diff --git a/meta/lib/oeqa/runtime/ldd.py b/meta/lib/oeqa/runtime/ldd.py
new file mode 100644
index 0000000000..bce56c4270
--- /dev/null
+++ b/meta/lib/oeqa/runtime/ldd.py
@@ -0,0 +1,20 @@
1import unittest
2from oeqa.oetest import oeRuntimeTest, skipModule
3from oeqa.utils.decorators import *
4
5def setUpModule():
6 if not oeRuntimeTest.hasFeature("tools-sdk"):
7 skipModule("Image doesn't have tools-sdk in IMAGE_FEATURES")
8
9class LddTest(oeRuntimeTest):
10
11 @skipUnlessPassed('test_ssh')
12 def test_ldd_exists(self):
13 (status, output) = self.target.run('which ldd')
14 self.assertEqual(status, 0, msg = "ldd does not exist in PATH: which ldd: %s" % output)
15
16 @testcase(239)
17 @skipUnlessPassed('test_ldd_exists')
18 def test_ldd_rtldlist_check(self):
19 (status, output) = self.target.run('for i in $(which ldd | xargs cat | grep "^RTLDLIST"|cut -d\'=\' -f2|tr -d \'"\'); do test -f $i && echo $i && break; done')
20 self.assertEqual(status, 0, msg = "ldd path not correct or RTLDLIST files don't exist. ")
diff --git a/meta/lib/oeqa/runtime/logrotate.py b/meta/lib/oeqa/runtime/logrotate.py
new file mode 100644
index 0000000000..86d791c300
--- /dev/null
+++ b/meta/lib/oeqa/runtime/logrotate.py
@@ -0,0 +1,28 @@
1# This test should cover https://bugzilla.yoctoproject.org/tr_show_case.cgi?case_id=289 testcase
2# Note that the image under test must have logrotate installed
3
4import unittest
5from oeqa.oetest import oeRuntimeTest, skipModule
6from oeqa.utils.decorators import *
7
8def setUpModule():
9 if not oeRuntimeTest.hasPackage("logrotate"):
10 skipModule("No logrotate package in image")
11
12
13class LogrotateTest(oeRuntimeTest):
14
15 @skipUnlessPassed("test_ssh")
16 def test_1_logrotate_setup(self):
17 (status, output) = self.target.run('mkdir /home/root/logrotate_dir')
18 self.assertEqual(status, 0, msg = "Could not create logrotate_dir. Output: %s" % output)
19 (status, output) = self.target.run("sed -i 's#wtmp {#wtmp {\\n olddir /home/root/logrotate_dir#' /etc/logrotate.conf")
20 self.assertEqual(status, 0, msg = "Could not write to logrotate.conf file. Status and output: %s and %s)" % (status, output))
21
22 @testcase(289)
23 @skipUnlessPassed("test_1_logrotate_setup")
24 def test_2_logrotate(self):
25 (status, output) = self.target.run('logrotate -f /etc/logrotate.conf')
26 self.assertEqual(status, 0, msg = "logrotate service could not be reloaded. Status and output: %s and %s" % (status, output))
27 output = self.target.run('ls -la /home/root/logrotate_dir/ | wc -l')[1]
28 self.assertTrue(int(output)>=3, msg = "new logfile could not be created. List of files within log directory: %s" %(self.target.run('ls -la /home/root/logrotate_dir')[1]))
diff --git a/meta/lib/oeqa/runtime/multilib.py b/meta/lib/oeqa/runtime/multilib.py
new file mode 100644
index 0000000000..ab0a6ccd69
--- /dev/null
+++ b/meta/lib/oeqa/runtime/multilib.py
@@ -0,0 +1,18 @@
1import unittest
2from oeqa.oetest import oeRuntimeTest, skipModule
3from oeqa.utils.decorators import *
4
5def setUpModule():
6 multilibs = oeRuntimeTest.tc.d.getVar("MULTILIBS", True) or ""
7 if "multilib:lib32" not in multilibs:
8 skipModule("this isn't a multilib:lib32 image")
9
10
11class MultilibTest(oeRuntimeTest):
12
13 @testcase('279')
14 @skipUnlessPassed('test_ssh')
15 def test_file_connman(self):
16 self.assertTrue(oeRuntimeTest.hasPackage('connman-gnome'), msg="This test assumes connman-gnome is installed")
17 (status, output) = self.target.run("readelf -h /usr/bin/connman-applet | sed -n '3p' | awk '{print $2}'")
18 self.assertEqual(output, "ELF32", msg="connman-applet isn't an ELF32 binary. readelf says: %s" % self.target.run("readelf -h /usr/bin/connman-applet")[1])
diff --git a/meta/lib/oeqa/runtime/pam.py b/meta/lib/oeqa/runtime/pam.py
new file mode 100644
index 0000000000..c8205c9abc
--- /dev/null
+++ b/meta/lib/oeqa/runtime/pam.py
@@ -0,0 +1,25 @@
1# This test should cover https://bugzilla.yoctoproject.org/tr_show_case.cgi?case_id=287 testcase
2# Note that the image under test must have "pam" in DISTRO_FEATURES
3
4import unittest
5from oeqa.oetest import oeRuntimeTest, skipModule
6from oeqa.utils.decorators import *
7
8def setUpModule():
9 if not oeRuntimeTest.hasFeature("pam"):
10 skipModule("target doesn't have 'pam' in DISTRO_FEATURES")
11
12
13class PamBasicTest(oeRuntimeTest):
14
15 @testcase(287)
16 @skipUnlessPassed('test_ssh')
17 def test_pam(self):
18 (status, output) = self.target.run('login --help')
19 self.assertEqual(status, 1, msg = "login command does not work as expected. Status and output:%s and %s" %(status, output))
20 (status, output) = self.target.run('passwd --help')
21 self.assertEqual(status, 0, msg = "passwd command does not work as expected. Status and output:%s and %s" %(status, output))
22 (status, output) = self.target.run('su --help')
23 self.assertEqual(status, 0, msg = "su command does not work as expected. Status and output:%s and %s" %(status, output))
24 (status, output) = self.target.run('useradd --help')
25 self.assertEqual(status, 0, msg = "useradd command does not work as expected. Status and output:%s and %s" %(status, output))
diff --git a/meta/lib/oeqa/runtime/parselogs.py b/meta/lib/oeqa/runtime/parselogs.py
new file mode 100644
index 0000000000..42cb1b5e6f
--- /dev/null
+++ b/meta/lib/oeqa/runtime/parselogs.py
@@ -0,0 +1,178 @@
1import os
2import unittest
3from oeqa.oetest import oeRuntimeTest
4from oeqa.utils.decorators import *
5
6#in the future these lists could be moved outside of module
7errors = ["error", "cannot", "can\'t", "failed"]
8
9common_errors = [
10 '(WW) warning, (EE) error, (NI) not implemented, (??) unknown.',
11 'dma timeout',
12 'can\'t add hid device:',
13 'usbhid: probe of ',
14 ]
15
16x86_common = [
17 '[drm:psb_do_init] *ERROR* Debug is',
18 'wrong ELF class',
19 'Could not enable PowerButton event',
20 'probe of LNXPWRBN:00 failed with error -22',
21] + common_errors
22
23qemux86_common = [
24 'Fast TSC calibration',
25 '_OSC failed (AE_NOT_FOUND); disabling ASPM',
26 'Open ACPI failed (/var/run/acpid.socket) (No such file or directory)',
27 'Failed to load module "vesa"',
28 'Failed to load module "modesetting"',
29 'Failed to load module "glx"',
30 'wrong ELF class',
31] + common_errors
32
33ignore_errors = {
34 'default' : common_errors,
35 'qemux86' : [
36 'Failed to access perfctr msr (MSR c1 is 0)',
37 "fail to add MMCONFIG information, can't access extended PCI configuration space under this bridge.",
38 ] + qemux86_common,
39 'qemux86-64' : qemux86_common,
40 'qemumips' : [
41 'Failed to load module "glx"',
42 ] + common_errors,
43 'qemuppc' : [
44 'PCI 0000:00 Cannot reserve Legacy IO [io 0x0000-0x0fff]',
45 'mode "640x480" test failed',
46 'Failed to load module "glx"',
47 ] + common_errors,
48 'qemuarm' : [
49 'mmci-pl18x: probe of fpga:05 failed with error -22',
50 'mmci-pl18x: probe of fpga:0b failed with error -22',
51 'Failed to load module "glx"'
52 ] + common_errors,
53 'emenlow' : x86_common,
54 'crownbay' : x86_common,
55 'genericx86' : x86_common,
56 'genericx86-64' : x86_common,
57}
58
59log_locations = ["/var/log/","/var/log/dmesg", "/tmp/dmesg_output.log"]
60
61class ParseLogsTest(oeRuntimeTest):
62
63 @classmethod
64 def setUpClass(self):
65 self.errors = errors
66 self.ignore_errors = ignore_errors
67 self.log_locations = log_locations
68 self.msg = ""
69
70 def getMachine(self):
71 (status, output) = self.target.run("uname -n")
72 return output
73
74 #get some information on the CPU of the machine to display at the beginning of the output. This info might be useful in some cases.
75 def getHardwareInfo(self):
76 hwi = ""
77 (status, cpu_name) = self.target.run("cat /proc/cpuinfo | grep \"model name\" | head -n1 | awk 'BEGIN{FS=\":\"}{print $2}'")
78 (status, cpu_physical_cores) = self.target.run("cat /proc/cpuinfo | grep \"cpu cores\" | head -n1 | awk {'print $4'}")
79 (status, cpu_logical_cores) = self.target.run("cat /proc/cpuinfo | grep \"processor\" | wc -l")
80 (status, cpu_arch) = self.target.run("uname -m")
81 hwi += "Machine information: \n"
82 hwi += "*******************************\n"
83 hwi += "Machine name: "+self.getMachine()+"\n"
84 hwi += "CPU: "+str(cpu_name)+"\n"
85 hwi += "Arch: "+str(cpu_arch)+"\n"
86 hwi += "Physical cores: "+str(cpu_physical_cores)+"\n"
87 hwi += "Logical cores: "+str(cpu_logical_cores)+"\n"
88 hwi += "*******************************\n"
89 return hwi
90
91 #go through the log locations provided and if it's a folder create a list with all the .log files in it, if it's a file just add
92 #it to that list
93 def getLogList(self, log_locations):
94 logs = []
95 for location in log_locations:
96 (status, output) = self.target.run("test -f "+str(location))
97 if (status == 0):
98 logs.append(str(location))
99 else:
100 (status, output) = self.target.run("test -d "+str(location))
101 if (status == 0):
102 (status, output) = self.target.run("find "+str(location)+"/*.log -maxdepth 1 -type f")
103 output = output.splitlines()
104 for logfile in output:
105 logs.append(os.path.join(location,str(logfile)))
106 return logs
107
108 #build the grep command to be used with filters and exclusions
109 def build_grepcmd(self, errors, ignore_errors, log):
110 grepcmd = "grep "
111 grepcmd +="-Ei \""
112 for error in errors:
113 grepcmd += error+"|"
114 grepcmd = grepcmd[:-1]
115 grepcmd += "\" "+str(log)+" | grep -Eiv \'"
116 try:
117 errorlist = ignore_errors[self.getMachine()]
118 except KeyError:
119 self.msg += "No ignore list found for this machine, using default\n"
120 errorlist = ignore_errors['default']
121 for ignore_error in errorlist:
122 ignore_error = ignore_error.replace("(", "\(")
123 ignore_error = ignore_error.replace(")", "\)")
124 ignore_error = ignore_error.replace("'", ".")
125 ignore_error = ignore_error.replace("?", "\?")
126 ignore_error = ignore_error.replace("[", "\[")
127 ignore_error = ignore_error.replace("]", "\]")
128 ignore_error = ignore_error.replace("*", "\*")
129 grepcmd += ignore_error+"|"
130 grepcmd = grepcmd[:-1]
131 grepcmd += "\'"
132 return grepcmd
133
134 #grep only the errors so that their context could be collected. Default context is 10 lines before and after the error itself
135 def parse_logs(self, errors, ignore_errors, logs, lines_before = 10, lines_after = 10):
136 results = {}
137 rez = []
138 for log in logs:
139 thegrep = self.build_grepcmd(errors, ignore_errors, log)
140 try:
141 (status, result) = self.target.run(thegrep)
142 except:
143 pass
144 if result:
145 results[log] = {}
146 rez = result.splitlines()
147 for xrez in rez:
148 command = "grep \"\\"+str(xrez)+"\" -B "+str(lines_before)+" -A "+str(lines_after)+" "+str(log)
149 try:
150 (status, yrez) = self.target.run(command)
151 except:
152 pass
153 results[log][xrez]=yrez
154 return results
155
156 #get the output of dmesg and write it in a file. This file is added to log_locations.
157 def write_dmesg(self):
158 (status, dmesg) = self.target.run("dmesg")
159 (status, dmesg2) = self.target.run("echo \""+str(dmesg)+"\" > /tmp/dmesg_output.log")
160
161 @skipUnlessPassed('test_ssh')
162 def test_parselogs(self):
163 self.write_dmesg()
164 log_list = self.getLogList(self.log_locations)
165 result = self.parse_logs(self.errors, self.ignore_errors, log_list)
166 print self.getHardwareInfo()
167 errcount = 0
168 for log in result:
169 self.msg += "Log: "+log+"\n"
170 self.msg += "-----------------------\n"
171 for error in result[log]:
172 errcount += 1
173 self.msg += "Central error: "+str(error)+"\n"
174 self.msg += "***********************\n"
175 self.msg += result[str(log)][str(error)]+"\n"
176 self.msg += "***********************\n"
177 self.msg += "%s errors found in logs." % errcount
178 self.assertEqual(errcount, 0, msg=self.msg)
diff --git a/meta/lib/oeqa/runtime/perl.py b/meta/lib/oeqa/runtime/perl.py
new file mode 100644
index 0000000000..65da028d4b
--- /dev/null
+++ b/meta/lib/oeqa/runtime/perl.py
@@ -0,0 +1,29 @@
1import unittest
2import os
3from oeqa.oetest import oeRuntimeTest, skipModule
4from oeqa.utils.decorators import *
5
6def setUpModule():
7 if not oeRuntimeTest.hasPackage("perl"):
8 skipModule("No perl package in the image")
9
10
11class PerlTest(oeRuntimeTest):
12
13 @classmethod
14 def setUpClass(self):
15 oeRuntimeTest.tc.target.copy_to(os.path.join(oeRuntimeTest.tc.filesdir, "test.pl"), "/tmp/test.pl")
16
17 def test_perl_exists(self):
18 (status, output) = self.target.run('which perl')
19 self.assertEqual(status, 0, msg="Perl binary not in PATH or not on target.")
20
21 @testcase(208)
22 def test_perl_works(self):
23 (status, output) = self.target.run('perl /tmp/test.pl')
24 self.assertEqual(status, 0, msg="Exit status was not 0. Output: %s" % output)
25 self.assertEqual(output, "the value of a is 0.01", msg="Incorrect output: %s" % output)
26
27 @classmethod
28 def tearDownClass(self):
29 oeRuntimeTest.tc.target.run("rm /tmp/test.pl")
diff --git a/meta/lib/oeqa/runtime/ping.py b/meta/lib/oeqa/runtime/ping.py
new file mode 100644
index 0000000000..a73c72402a
--- /dev/null
+++ b/meta/lib/oeqa/runtime/ping.py
@@ -0,0 +1,20 @@
1import subprocess
2import unittest
3import sys
4import time
5from oeqa.oetest import oeRuntimeTest
6
7class PingTest(oeRuntimeTest):
8
9 def test_ping(self):
10 output = ''
11 count = 0
12 endtime = time.time() + 60
13 while count < 5 and time.time() < endtime:
14 proc = subprocess.Popen("ping -c 1 %s" % self.target.ip, shell=True, stdout=subprocess.PIPE)
15 output += proc.communicate()[0]
16 if proc.poll() == 0:
17 count += 1
18 else:
19 count = 0
20 self.assertEqual(count, 5, msg = "Expected 5 consecutive replies, got %d.\nping output is:\n%s" % (count,output))
diff --git a/meta/lib/oeqa/runtime/python.py b/meta/lib/oeqa/runtime/python.py
new file mode 100644
index 0000000000..0387b9a03e
--- /dev/null
+++ b/meta/lib/oeqa/runtime/python.py
@@ -0,0 +1,34 @@
1import unittest
2import os
3from oeqa.oetest import oeRuntimeTest, skipModule
4from oeqa.utils.decorators import *
5
6def setUpModule():
7 if not oeRuntimeTest.hasPackage("python"):
8 skipModule("No python package in the image")
9
10
11class PythonTest(oeRuntimeTest):
12
13 @classmethod
14 def setUpClass(self):
15 oeRuntimeTest.tc.target.copy_to(os.path.join(oeRuntimeTest.tc.filesdir, "test.py"), "/tmp/test.py")
16
17 def test_python_exists(self):
18 (status, output) = self.target.run('which python')
19 self.assertEqual(status, 0, msg="Python binary not in PATH or not on target.")
20
21 @testcase(965)
22 def test_python_stdout(self):
23 (status, output) = self.target.run('python /tmp/test.py')
24 self.assertEqual(status, 0, msg="Exit status was not 0. Output: %s" % output)
25 self.assertEqual(output, "the value of a is 0.01", msg="Incorrect output: %s" % output)
26
27 def test_python_testfile(self):
28 (status, output) = self.target.run('ls /tmp/testfile.python')
29 self.assertEqual(status, 0, msg="Python test file generate failed.")
30
31
32 @classmethod
33 def tearDownClass(self):
34 oeRuntimeTest.tc.target.run("rm /tmp/test.py /tmp/testfile.python")
diff --git a/meta/lib/oeqa/runtime/rpm.py b/meta/lib/oeqa/runtime/rpm.py
new file mode 100644
index 0000000000..b17e8b46a8
--- /dev/null
+++ b/meta/lib/oeqa/runtime/rpm.py
@@ -0,0 +1,53 @@
1import unittest
2import os
3import fnmatch
4from oeqa.oetest import oeRuntimeTest, skipModule
5from oeqa.utils.decorators import *
6
7def setUpModule():
8 if not oeRuntimeTest.hasFeature("package-management"):
9 skipModule("rpm module skipped: target doesn't have package-management in IMAGE_FEATURES")
10 if "package_rpm" != oeRuntimeTest.tc.d.getVar("PACKAGE_CLASSES", True).split()[0]:
11 skipModule("rpm module skipped: target doesn't have rpm as primary package manager")
12
13
14class RpmBasicTest(oeRuntimeTest):
15
16 @skipUnlessPassed('test_ssh')
17 def test_rpm_help(self):
18 (status, output) = self.target.run('rpm --help')
19 self.assertEqual(status, 0, msg="status and output: %s and %s" % (status,output))
20
21 @testcase(191)
22 @skipUnlessPassed('test_rpm_help')
23 def test_rpm_query(self):
24 (status, output) = self.target.run('rpm -q rpm')
25 self.assertEqual(status, 0, msg="status and output: %s and %s" % (status,output))
26
27class RpmInstallRemoveTest(oeRuntimeTest):
28
29 @classmethod
30 def setUpClass(self):
31 pkgarch = oeRuntimeTest.tc.d.getVar('TUNE_PKGARCH', True).replace("-", "_")
32 rpmdir = os.path.join(oeRuntimeTest.tc.d.getVar('DEPLOY_DIR', True), "rpm", pkgarch)
33 # pick rpm-doc as a test file to get installed, because it's small and it will always be built for standard targets
34 for f in fnmatch.filter(os.listdir(rpmdir), "rpm-doc-*.%s.rpm" % pkgarch):
35 testrpmfile = f
36 oeRuntimeTest.tc.target.copy_to(os.path.join(rpmdir,testrpmfile), "/tmp/rpm-doc.rpm")
37
38 @testcase(192)
39 @skipUnlessPassed('test_rpm_help')
40 def test_rpm_install(self):
41 (status, output) = self.target.run('rpm -ivh /tmp/rpm-doc.rpm')
42 self.assertEqual(status, 0, msg="Failed to install rpm-doc package: %s" % output)
43
44 @testcase(194)
45 @skipUnlessPassed('test_rpm_install')
46 def test_rpm_remove(self):
47 (status,output) = self.target.run('rpm -e rpm-doc')
48 self.assertEqual(status, 0, msg="Failed to remove rpm-doc package: %s" % output)
49
50 @classmethod
51 def tearDownClass(self):
52 oeRuntimeTest.tc.target.run('rm -f /tmp/rpm-doc.rpm')
53
diff --git a/meta/lib/oeqa/runtime/scanelf.py b/meta/lib/oeqa/runtime/scanelf.py
new file mode 100644
index 0000000000..43a024ab9a
--- /dev/null
+++ b/meta/lib/oeqa/runtime/scanelf.py
@@ -0,0 +1,28 @@
1import unittest
2from oeqa.oetest import oeRuntimeTest, skipModule
3from oeqa.utils.decorators import *
4
5def setUpModule():
6 if not oeRuntimeTest.hasPackage("pax-utils"):
7 skipModule("pax-utils package not installed")
8
9class ScanelfTest(oeRuntimeTest):
10
11 def setUp(self):
12 self.scancmd = 'scanelf --quiet --recursive --mount --ldpath --path'
13
14 @testcase(966)
15 @skipUnlessPassed('test_ssh')
16 def test_scanelf_textrel(self):
17 # print TEXTREL information
18 self.scancmd += " --textrel"
19 (status, output) = self.target.run(self.scancmd)
20 self.assertEqual(output.strip(), "", "\n".join([self.scancmd, output]))
21
22 @testcase(967)
23 @skipUnlessPassed('test_ssh')
24 def test_scanelf_rpath(self):
25 # print RPATH information
26 self.scancmd += " --rpath"
27 (status, output) = self.target.run(self.scancmd)
28 self.assertEqual(output.strip(), "", "\n".join([self.scancmd, output]))
diff --git a/meta/lib/oeqa/runtime/scp.py b/meta/lib/oeqa/runtime/scp.py
new file mode 100644
index 0000000000..48e87d2d0b
--- /dev/null
+++ b/meta/lib/oeqa/runtime/scp.py
@@ -0,0 +1,22 @@
1import os
2from oeqa.oetest import oeRuntimeTest, skipModule
3from oeqa.utils.decorators import skipUnlessPassed, testcase
4
5def setUpModule():
6 if not (oeRuntimeTest.hasPackage("dropbear") or oeRuntimeTest.hasPackage("openssh-sshd")):
7 skipModule("No ssh package in image")
8
9class ScpTest(oeRuntimeTest):
10
11 @testcase(220)
12 @skipUnlessPassed('test_ssh')
13 def test_scp_file(self):
14 test_log_dir = oeRuntimeTest.tc.d.getVar("TEST_LOG_DIR", True)
15 test_file_path = os.path.join(test_log_dir, 'test_scp_file')
16 with open(test_file_path, 'w') as test_scp_file:
17 test_scp_file.seek(2 ** 22 - 1)
18 test_scp_file.write(os.linesep)
19 (status, output) = self.target.copy_to(test_file_path, '/tmp/test_scp_file')
20 self.assertEqual(status, 0, msg = "File could not be copied. Output: %s" % output)
21 (status, output) = self.target.run("ls -la /tmp/test_scp_file")
22 self.assertEqual(status, 0, msg = "SCP test failed")
diff --git a/meta/lib/oeqa/runtime/skeletoninit.py b/meta/lib/oeqa/runtime/skeletoninit.py
new file mode 100644
index 0000000000..7c7f402e5d
--- /dev/null
+++ b/meta/lib/oeqa/runtime/skeletoninit.py
@@ -0,0 +1,29 @@
1# This test should cover https://bugzilla.yoctoproject.org/tr_show_case.cgi?case_id=284 testcase
2# Note that the image under test must have meta-skeleton layer in bblayers and IMAGE_INSTALL_append = " service" in local.conf
3
4import unittest
5from oeqa.oetest import oeRuntimeTest, skipModule
6from oeqa.utils.decorators import *
7
8def setUpModule():
9 if not oeRuntimeTest.hasPackage("service"):
10 skipModule("No service package in image")
11
12
13class SkeletonBasicTest(oeRuntimeTest):
14
15 @skipUnlessPassed('test_ssh')
16 @unittest.skipIf("systemd" == oeRuntimeTest.tc.d.getVar("VIRTUAL-RUNTIME_init_manager"), "Not appropiate for systemd image")
17 def test_skeleton_availability(self):
18 (status, output) = self.target.run('ls /etc/init.d/skeleton')
19 self.assertEqual(status, 0, msg = "skeleton init script not found. Output:\n%s " % output)
20 (status, output) = self.target.run('ls /usr/sbin/skeleton-test')
21 self.assertEqual(status, 0, msg = "skeleton-test not found. Output:\n%s" % output)
22
23 @testcase(284)
24 @skipUnlessPassed('test_skeleton_availability')
25 @unittest.skipIf("systemd" == oeRuntimeTest.tc.d.getVar("VIRTUAL-RUNTIME_init_manager"), "Not appropiate for systemd image")
26 def test_skeleton_script(self):
27 output1 = self.target.run("/etc/init.d/skeleton start")[1]
28 (status, output2) = self.target.run(oeRuntimeTest.pscmd + ' | grep [s]keleton-test')
29 self.assertEqual(status, 0, msg = "Skeleton script could not be started:\n%s\n%s" % (output1, output2))
diff --git a/meta/lib/oeqa/runtime/smart.py b/meta/lib/oeqa/runtime/smart.py
new file mode 100644
index 0000000000..3b49314df7
--- /dev/null
+++ b/meta/lib/oeqa/runtime/smart.py
@@ -0,0 +1,121 @@
1import unittest
2import re
3from oeqa.oetest import oeRuntimeTest, skipModule
4from oeqa.utils.decorators import *
5from oeqa.utils.httpserver import HTTPService
6
7def setUpModule():
8 if not oeRuntimeTest.hasFeature("package-management"):
9 skipModule("Image doesn't have package management feature")
10 if not oeRuntimeTest.hasPackage("smart"):
11 skipModule("Image doesn't have smart installed")
12 if "package_rpm" != oeRuntimeTest.tc.d.getVar("PACKAGE_CLASSES", True).split()[0]:
13 skipModule("Rpm is not the primary package manager")
14
15class SmartTest(oeRuntimeTest):
16
17 @skipUnlessPassed('test_smart_help')
18 def smart(self, command, expected = 0):
19 command = 'smart %s' % command
20 status, output = self.target.run(command, 1500)
21 message = os.linesep.join([command, output])
22 self.assertEqual(status, expected, message)
23 self.assertFalse("Cannot allocate memory" in output, message)
24 return output
25
26class SmartBasicTest(SmartTest):
27
28 @testcase(716)
29 @skipUnlessPassed('test_ssh')
30 def test_smart_help(self):
31 self.smart('--help')
32
33 def test_smart_version(self):
34 self.smart('--version')
35
36 @testcase(721)
37 def test_smart_info(self):
38 self.smart('info python-smartpm')
39
40 @testcase(421)
41 def test_smart_query(self):
42 self.smart('query python-smartpm')
43
44 @testcase(720)
45 def test_smart_search(self):
46 self.smart('search python-smartpm')
47
48 @testcase(722)
49 def test_smart_stats(self):
50 self.smart('stats')
51
52class SmartRepoTest(SmartTest):
53
54 @classmethod
55 def setUpClass(self):
56 self.repo_server = HTTPService(oeRuntimeTest.tc.d.getVar('DEPLOY_DIR', True), oeRuntimeTest.tc.target.server_ip)
57 self.repo_server.start()
58
59 @classmethod
60 def tearDownClass(self):
61 self.repo_server.stop()
62
63 def test_smart_channel(self):
64 self.smart('channel', 1)
65
66 @testcase(719)
67 def test_smart_channel_add(self):
68 image_pkgtype = self.tc.d.getVar('IMAGE_PKGTYPE', True)
69 deploy_url = 'http://%s:%s/%s' %(self.target.server_ip, self.repo_server.port, image_pkgtype)
70 pkgarchs = self.tc.d.getVar('PACKAGE_ARCHS', True).replace("-","_").split()
71 for arch in os.listdir('%s/%s' % (self.repo_server.root_dir, image_pkgtype)):
72 if arch in pkgarchs:
73 self.smart('channel -y --add {a} type=rpm-md baseurl={u}/{a}'.format(a=arch, u=deploy_url))
74 self.smart('update')
75
76 def test_smart_channel_help(self):
77 self.smart('channel --help')
78
79 def test_smart_channel_list(self):
80 self.smart('channel --list')
81
82 def test_smart_channel_show(self):
83 self.smart('channel --show')
84
85 @testcase(717)
86 def test_smart_channel_rpmsys(self):
87 self.smart('channel --show rpmsys')
88 self.smart('channel --disable rpmsys')
89 self.smart('channel --enable rpmsys')
90
91 @skipUnlessPassed('test_smart_channel_add')
92 def test_smart_install(self):
93 self.smart('remove -y psplash-default')
94 self.smart('install -y psplash-default')
95
96 @testcase(728)
97 @skipUnlessPassed('test_smart_install')
98 def test_smart_install_dependency(self):
99 self.smart('remove -y psplash')
100 self.smart('install -y psplash-default')
101
102 @testcase(723)
103 @skipUnlessPassed('test_smart_channel_add')
104 def test_smart_install_from_disk(self):
105 self.smart('remove -y psplash-default')
106 self.smart('download psplash-default')
107 self.smart('install -y ./psplash-default*')
108
109 @testcase(725)
110 @skipUnlessPassed('test_smart_channel_add')
111 def test_smart_install_from_http(self):
112 output = self.smart('download --urls psplash-default')
113 url = re.search('(http://.*/psplash-default.*\.rpm)', output)
114 self.assertTrue(url, msg="Couln't find download url in %s" % output)
115 self.smart('remove -y psplash-default')
116 self.smart('install -y %s' % url.group(0))
117
118 @testcase(729)
119 @skipUnlessPassed('test_smart_install')
120 def test_smart_reinstall(self):
121 self.smart('reinstall -y psplash-default')
diff --git a/meta/lib/oeqa/runtime/ssh.py b/meta/lib/oeqa/runtime/ssh.py
new file mode 100644
index 0000000000..0e76d5d512
--- /dev/null
+++ b/meta/lib/oeqa/runtime/ssh.py
@@ -0,0 +1,19 @@
1import subprocess
2import unittest
3import sys
4from oeqa.oetest import oeRuntimeTest, skipModule
5from oeqa.utils.decorators import *
6
7def setUpModule():
8 if not (oeRuntimeTest.hasPackage("dropbear") or oeRuntimeTest.hasPackage("openssh")):
9 skipModule("No ssh package in image")
10
11class SshTest(oeRuntimeTest):
12
13 @testcase(224)
14 @skipUnlessPassed('test_ping')
15 def test_ssh(self):
16 (status, output) = self.target.run('uname -a')
17 self.assertEqual(status, 0, msg="SSH Test failed: %s" % output)
18 (status, output) = self.target.run('cat /etc/masterimage')
19 self.assertEqual(status, 1, msg="This isn't the right image - /etc/masterimage shouldn't be here %s" % output)
diff --git a/meta/lib/oeqa/runtime/syslog.py b/meta/lib/oeqa/runtime/syslog.py
new file mode 100644
index 0000000000..7fa018e97f
--- /dev/null
+++ b/meta/lib/oeqa/runtime/syslog.py
@@ -0,0 +1,48 @@
1import unittest
2from oeqa.oetest import oeRuntimeTest, skipModule
3from oeqa.utils.decorators import *
4
5def setUpModule():
6 if not oeRuntimeTest.hasPackage("syslog"):
7 skipModule("No syslog package in image")
8
9class SyslogTest(oeRuntimeTest):
10
11 @skipUnlessPassed("test_ssh")
12 def test_syslog_help(self):
13 (status,output) = self.target.run('/sbin/syslogd --help')
14 self.assertEqual(status, 0, msg="status and output: %s and %s" % (status,output))
15
16 @testcase(201)
17 @skipUnlessPassed("test_syslog_help")
18 def test_syslog_running(self):
19 (status,output) = self.target.run(oeRuntimeTest.pscmd + ' | grep -i [s]yslogd')
20 self.assertEqual(status, 0, msg="no syslogd process, ps output: %s" % self.target.run(oeRuntimeTest.pscmd)[1])
21
22
23class SyslogTestConfig(oeRuntimeTest):
24
25 @skipUnlessPassed("test_syslog_running")
26 def test_syslog_logger(self):
27 (status,output) = self.target.run('logger foobar && test -e /var/log/messages && grep foobar /var/log/messages || logread | grep foobar')
28 self.assertEqual(status, 0, msg="Test log string not found in /var/log/messages. Output: %s " % output)
29
30 @skipUnlessPassed("test_syslog_running")
31 def test_syslog_restart(self):
32 if "systemd" != oeRuntimeTest.tc.d.getVar("VIRTUAL-RUNTIME_init_manager"):
33 (status,output) = self.target.run('/etc/init.d/syslog restart')
34 else:
35 (status,output) = self.target.run('systemctl restart syslog.service')
36
37 @testcase(202)
38 @skipUnlessPassed("test_syslog_restart")
39 @skipUnlessPassed("test_syslog_logger")
40 @unittest.skipIf("systemd" == oeRuntimeTest.tc.d.getVar("VIRTUAL-RUNTIME_init_manager"), "Not appropiate for systemd image")
41 def test_syslog_startup_config(self):
42 self.target.run('echo "LOGFILE=/var/log/test" >> /etc/syslog-startup.conf')
43 (status,output) = self.target.run('/etc/init.d/syslog restart')
44 self.assertEqual(status, 0, msg="Could not restart syslog service. Status and output: %s and %s" % (status,output))
45 (status,output) = self.target.run('logger foobar && grep foobar /var/log/test')
46 self.assertEqual(status, 0, msg="Test log string not found. Output: %s " % output)
47 self.target.run("sed -i 's#LOGFILE=/var/log/test##' /etc/syslog-startup.conf")
48 self.target.run('/etc/init.d/syslog restart')
diff --git a/meta/lib/oeqa/runtime/systemd.py b/meta/lib/oeqa/runtime/systemd.py
new file mode 100644
index 0000000000..1451698bb3
--- /dev/null
+++ b/meta/lib/oeqa/runtime/systemd.py
@@ -0,0 +1,88 @@
1import unittest
2import re
3from oeqa.oetest import oeRuntimeTest, skipModule
4from oeqa.utils.decorators import *
5
6def setUpModule():
7 if not oeRuntimeTest.hasFeature("systemd"):
8 skipModule("target doesn't have systemd in DISTRO_FEATURES")
9 if "systemd" != oeRuntimeTest.tc.d.getVar("VIRTUAL-RUNTIME_init_manager", True):
10 skipModule("systemd is not the init manager for this image")
11
12
13class SystemdTest(oeRuntimeTest):
14
15 def systemctl(self, action = '', target = '', expected = 0, verbose = False):
16 command = 'systemctl %s %s' % (action, target)
17 status, output = self.target.run(command)
18 message = '\n'.join([command, output])
19 if status != expected and verbose:
20 message += self.target.run('systemctl status --full %s' % target)[1]
21 self.assertEqual(status, expected, message)
22 return output
23
24
25class SystemdBasicTests(SystemdTest):
26
27 @skipUnlessPassed('test_ssh')
28 def test_systemd_basic(self):
29 self.systemctl('--version')
30
31 @testcase(551)
32 @skipUnlessPassed('test_system_basic')
33 def test_systemd_list(self):
34 self.systemctl('list-unit-files')
35
36 def settle(self):
37 """
38 Block until systemd has finished activating any units being activated,
39 or until two minutes has elapsed.
40
41 Returns a tuple, either (True, '') if all units have finished
42 activating, or (False, message string) if there are still units
43 activating (generally, failing units that restart).
44 """
45 import time
46 endtime = time.time() + (60 * 2)
47 while True:
48 status, output = self.target.run('systemctl --state=activating')
49 if "0 loaded units listed" in output:
50 return (True, '')
51 if time.time() >= endtime:
52 return (False, output)
53 time.sleep(10)
54
55 @testcase(550)
56 @skipUnlessPassed('test_systemd_basic')
57 def test_systemd_failed(self):
58 settled, output = self.settle()
59 self.assertTrue(settled, msg="Timed out waiting for systemd to settle:\n" + output)
60
61 output = self.systemctl('list-units', '--failed')
62 match = re.search("0 loaded units listed", output)
63 if not match:
64 output += self.systemctl('status --full --failed')
65 self.assertTrue(match, msg="Some systemd units failed:\n%s" % output)
66
67
68class SystemdServiceTests(SystemdTest):
69
70 @skipUnlessPassed('test_systemd_basic')
71 def test_systemd_status(self):
72 self.systemctl('status --full', 'avahi-daemon.service')
73
74 @testcase(695)
75 @skipUnlessPassed('test_systemd_status')
76 def test_systemd_stop_start(self):
77 self.systemctl('stop', 'avahi-daemon.service')
78 self.systemctl('is-active', 'avahi-daemon.service', expected=3, verbose=True)
79 self.systemctl('start','avahi-daemon.service')
80 self.systemctl('is-active', 'avahi-daemon.service', verbose=True)
81
82 @testcase(696)
83 @skipUnlessPassed('test_systemd_basic')
84 def test_systemd_disable_enable(self):
85 self.systemctl('disable', 'avahi-daemon.service')
86 self.systemctl('is-enabled', 'avahi-daemon.service', expected=1)
87 self.systemctl('enable', 'avahi-daemon.service')
88 self.systemctl('is-enabled', 'avahi-daemon.service')
diff --git a/meta/lib/oeqa/runtime/vnc.py b/meta/lib/oeqa/runtime/vnc.py
new file mode 100644
index 0000000000..f31deff306
--- /dev/null
+++ b/meta/lib/oeqa/runtime/vnc.py
@@ -0,0 +1,20 @@
1from oeqa.oetest import oeRuntimeTest, skipModuleUnless
2from oeqa.utils.decorators import *
3import re
4
5def setUpModule():
6 skipModuleUnless(oeRuntimeTest.hasPackage('x11vnc'), "No x11vnc package in image")
7
8class VNCTest(oeRuntimeTest):
9
10 @testcase(213)
11 @skipUnlessPassed('test_ssh')
12 def test_vnc(self):
13 (status, output) = self.target.run('x11vnc -display :0 -bg -o x11vnc.log')
14 self.assertEqual(status, 0, msg="x11vnc server failed to start: %s" % output)
15 port = re.search('PORT=[0-9]*', output)
16 self.assertTrue(port, msg="Listening port not specified in command output: %s" %output)
17
18 vncport = port.group(0).split('=')[1]
19 (status, output) = self.target.run('netstat -ntl | grep ":%s"' % vncport)
20 self.assertEqual(status, 0, msg="x11vnc server not running on port %s\n\n%s" % (vncport, self.target.run('netstat -ntl; cat x11vnc.log')[1]))
diff --git a/meta/lib/oeqa/runtime/x32lib.py b/meta/lib/oeqa/runtime/x32lib.py
new file mode 100644
index 0000000000..ce5e214035
--- /dev/null
+++ b/meta/lib/oeqa/runtime/x32lib.py
@@ -0,0 +1,18 @@
1import unittest
2from oeqa.oetest import oeRuntimeTest, skipModule
3from oeqa.utils.decorators import *
4
5def setUpModule():
6 #check if DEFAULTTUNE is set and it's value is: x86-64-x32
7 defaulttune = oeRuntimeTest.tc.d.getVar("DEFAULTTUNE", True)
8 if "x86-64-x32" not in defaulttune:
9 skipModule("DEFAULTTUNE is not set to x86-64-x32")
10
11class X32libTest(oeRuntimeTest):
12
13 @testcase(281)
14 @skipUnlessPassed("test_ssh")
15 def test_x32_file(self):
16 status1 = self.target.run("readelf -h /bin/ls | grep Class | grep ELF32")[0]
17 status2 = self.target.run("readelf -h /bin/ls | grep Machine | grep X86-64")[0]
18 self.assertTrue(status1 == 0 and status2 == 0, msg="/bin/ls isn't an X86-64 ELF32 binary. readelf says: %s" % self.target.run("readelf -h /bin/ls")[1])
diff --git a/meta/lib/oeqa/runtime/xorg.py b/meta/lib/oeqa/runtime/xorg.py
new file mode 100644
index 0000000000..a07031e5c8
--- /dev/null
+++ b/meta/lib/oeqa/runtime/xorg.py
@@ -0,0 +1,17 @@
1import unittest
2from oeqa.oetest import oeRuntimeTest, skipModule
3from oeqa.utils.decorators import *
4
5def setUpModule():
6 if not oeRuntimeTest.hasFeature("x11-base"):
7 skipModule("target doesn't have x11 in IMAGE_FEATURES")
8
9
10class XorgTest(oeRuntimeTest):
11
12 @skipUnlessPassed('test_ssh')
13 def test_xorg_running(self):
14 (status, output) = self.target.run(oeRuntimeTest.pscmd + ' | grep -v xinit | grep [X]org')
15 self.assertEqual(status, 0, msg="Xorg does not appear to be running %s" % self.target.run(oeRuntimeTest.pscmd)[1])
16
17
diff --git a/meta/lib/oeqa/sdk/__init__.py b/meta/lib/oeqa/sdk/__init__.py
new file mode 100644
index 0000000000..4cf3fa76b6
--- /dev/null
+++ b/meta/lib/oeqa/sdk/__init__.py
@@ -0,0 +1,3 @@
1# Enable other layers to have tests in the same named directory
2from pkgutil import extend_path
3__path__ = extend_path(__path__, __name__)
diff --git a/meta/lib/oeqa/sdk/buildcvs.py b/meta/lib/oeqa/sdk/buildcvs.py
new file mode 100644
index 0000000000..c7146fa4af
--- /dev/null
+++ b/meta/lib/oeqa/sdk/buildcvs.py
@@ -0,0 +1,25 @@
1from oeqa.oetest import oeSDKTest, skipModule
2from oeqa.utils.decorators import *
3from oeqa.utils.targetbuild import SDKBuildProject
4
5class BuildCvsTest(oeSDKTest):
6
7 @classmethod
8 def setUpClass(self):
9 self.project = SDKBuildProject(oeSDKTest.tc.sdktestdir + "/cvs/", oeSDKTest.tc.sdkenv, oeSDKTest.tc.d,
10 "http://ftp.gnu.org/non-gnu/cvs/source/feature/1.12.13/cvs-1.12.13.tar.bz2")
11 self.project.download_archive()
12
13 def test_cvs(self):
14 self.assertEqual(self.project.run_configure(), 0,
15 msg="Running configure failed")
16
17 self.assertEqual(self.project.run_make(), 0,
18 msg="Running make failed")
19
20 self.assertEqual(self.project.run_install(), 0,
21 msg="Running make install failed")
22
23 @classmethod
24 def tearDownClass(self):
25 self.project.clean()
diff --git a/meta/lib/oeqa/sdk/buildiptables.py b/meta/lib/oeqa/sdk/buildiptables.py
new file mode 100644
index 0000000000..062e5316e7
--- /dev/null
+++ b/meta/lib/oeqa/sdk/buildiptables.py
@@ -0,0 +1,26 @@
1from oeqa.oetest import oeSDKTest
2from oeqa.utils.decorators import *
3from oeqa.utils.targetbuild import SDKBuildProject
4
5
6class BuildIptablesTest(oeSDKTest):
7
8 @classmethod
9 def setUpClass(self):
10 self.project = SDKBuildProject(oeSDKTest.tc.sdktestdir + "/iptables/", oeSDKTest.tc.sdkenv, oeSDKTest.tc.d,
11 "http://netfilter.org/projects/iptables/files/iptables-1.4.13.tar.bz2")
12 self.project.download_archive()
13
14 def test_iptables(self):
15 self.assertEqual(self.project.run_configure(), 0,
16 msg="Running configure failed")
17
18 self.assertEqual(self.project.run_make(), 0,
19 msg="Running make failed")
20
21 self.assertEqual(self.project.run_install(), 0,
22 msg="Running make install failed")
23
24 @classmethod
25 def tearDownClass(self):
26 self.project.clean()
diff --git a/meta/lib/oeqa/sdk/buildsudoku.py b/meta/lib/oeqa/sdk/buildsudoku.py
new file mode 100644
index 0000000000..dea77c6599
--- /dev/null
+++ b/meta/lib/oeqa/sdk/buildsudoku.py
@@ -0,0 +1,26 @@
1from oeqa.oetest import oeSDKTest, skipModule
2from oeqa.utils.decorators import *
3from oeqa.utils.targetbuild import SDKBuildProject
4
5def setUpModule():
6 if not oeSDKTest.hasPackage("gtk\+"):
7 skipModule("Image doesn't have gtk+ in manifest")
8
9class SudokuTest(oeSDKTest):
10
11 @classmethod
12 def setUpClass(self):
13 self.project = SDKBuildProject(oeSDKTest.tc.sdktestdir + "/sudoku/", oeSDKTest.tc.sdkenv, oeSDKTest.tc.d,
14 "http://downloads.sourceforge.net/project/sudoku-savant/sudoku-savant/sudoku-savant-1.3/sudoku-savant-1.3.tar.bz2")
15 self.project.download_archive()
16
17 def test_sudoku(self):
18 self.assertEqual(self.project.run_configure(), 0,
19 msg="Running configure failed")
20
21 self.assertEqual(self.project.run_make(), 0,
22 msg="Running make failed")
23
24 @classmethod
25 def tearDownClass(self):
26 self.project.clean()
diff --git a/meta/lib/oeqa/selftest/__init__.py b/meta/lib/oeqa/selftest/__init__.py
new file mode 100644
index 0000000000..3ad9513f40
--- /dev/null
+++ b/meta/lib/oeqa/selftest/__init__.py
@@ -0,0 +1,2 @@
1from pkgutil import extend_path
2__path__ = extend_path(__path__, __name__)
diff --git a/meta/lib/oeqa/selftest/_sstatetests_noauto.py b/meta/lib/oeqa/selftest/_sstatetests_noauto.py
new file mode 100644
index 0000000000..fc9ae7efb9
--- /dev/null
+++ b/meta/lib/oeqa/selftest/_sstatetests_noauto.py
@@ -0,0 +1,95 @@
1import datetime
2import unittest
3import os
4import re
5import shutil
6
7import oeqa.utils.ftools as ftools
8from oeqa.selftest.base import oeSelfTest
9from oeqa.utils.commands import runCmd, bitbake, get_bb_var, get_test_layer
10from oeqa.selftest.sstate import SStateBase
11
12
13class RebuildFromSState(SStateBase):
14
15 @classmethod
16 def setUpClass(self):
17 self.builddir = os.path.join(os.environ.get('BUILDDIR'))
18
19 def get_dep_targets(self, primary_targets):
20 found_targets = []
21 bitbake("-g " + ' '.join(map(str, primary_targets)))
22 with open(os.path.join(self.builddir, 'pn-buildlist'), 'r') as pnfile:
23 found_targets = pnfile.read().splitlines()
24 return found_targets
25
26 def configure_builddir(self, builddir):
27 os.mkdir(builddir)
28 self.track_for_cleanup(builddir)
29 os.mkdir(os.path.join(builddir, 'conf'))
30 shutil.copyfile(os.path.join(os.environ.get('BUILDDIR'), 'conf/local.conf'), os.path.join(builddir, 'conf/local.conf'))
31 config = {}
32 config['default_sstate_dir'] = "SSTATE_DIR ?= \"${TOPDIR}/sstate-cache\""
33 config['null_sstate_mirrors'] = "SSTATE_MIRRORS = \"\""
34 config['default_tmp_dir'] = "TMPDIR = \"${TOPDIR}/tmp\""
35 for key in config:
36 ftools.append_file(os.path.join(builddir, 'conf/selftest.inc'), config[key])
37 shutil.copyfile(os.path.join(os.environ.get('BUILDDIR'), 'conf/bblayers.conf'), os.path.join(builddir, 'conf/bblayers.conf'))
38 try:
39 shutil.copyfile(os.path.join(os.environ.get('BUILDDIR'), 'conf/auto.conf'), os.path.join(builddir, 'conf/auto.conf'))
40 except:
41 pass
42
43 def hardlink_tree(self, src, dst):
44 os.mkdir(dst)
45 self.track_for_cleanup(dst)
46 for root, dirs, files in os.walk(src):
47 if root == src:
48 continue
49 os.mkdir(os.path.join(dst, root.split(src)[1][1:]))
50 for sstate_file in files:
51 os.link(os.path.join(root, sstate_file), os.path.join(dst, root.split(src)[1][1:], sstate_file))
52
53 def run_test_sstate_rebuild(self, primary_targets, relocate=False, rebuild_dependencies=False):
54 buildA = os.path.join(self.builddir, 'buildA')
55 if relocate:
56 buildB = os.path.join(self.builddir, 'buildB')
57 else:
58 buildB = buildA
59
60 if rebuild_dependencies:
61 rebuild_targets = self.get_dep_targets(primary_targets)
62 else:
63 rebuild_targets = primary_targets
64
65 self.configure_builddir(buildA)
66 runCmd((". %s/oe-init-build-env %s && " % (get_bb_var('COREBASE'), buildA)) + 'bitbake ' + ' '.join(map(str, primary_targets)), shell=True, executable='/bin/bash')
67 self.hardlink_tree(os.path.join(buildA, 'sstate-cache'), os.path.join(self.builddir, 'sstate-cache-buildA'))
68 shutil.rmtree(buildA)
69
70 failed_rebuild = []
71 failed_cleansstate = []
72 for target in rebuild_targets:
73 self.configure_builddir(buildB)
74 self.hardlink_tree(os.path.join(self.builddir, 'sstate-cache-buildA'), os.path.join(buildB, 'sstate-cache'))
75
76 result_cleansstate = runCmd((". %s/oe-init-build-env %s && " % (get_bb_var('COREBASE'), buildB)) + 'bitbake -ccleansstate ' + target, ignore_status=True, shell=True, executable='/bin/bash')
77 if not result_cleansstate.status == 0:
78 failed_cleansstate.append(target)
79 shutil.rmtree(buildB)
80 continue
81
82 result_build = runCmd((". %s/oe-init-build-env %s && " % (get_bb_var('COREBASE'), buildB)) + 'bitbake ' + target, ignore_status=True, shell=True, executable='/bin/bash')
83 if not result_build.status == 0:
84 failed_rebuild.append(target)
85
86 shutil.rmtree(buildB)
87
88 self.assertFalse(failed_rebuild, msg="The following recipes have failed to rebuild: %s" % ' '.join(map(str, failed_rebuild)))
89 self.assertFalse(failed_cleansstate, msg="The following recipes have failed cleansstate(all others have passed both cleansstate and rebuild from sstate tests): %s" % ' '.join(map(str, failed_cleansstate)))
90
91 def test_sstate_relocation(self):
92 self.run_test_sstate_rebuild(['core-image-sato-sdk'], relocate=True, rebuild_dependencies=True)
93
94 def test_sstate_rebuild(self):
95 self.run_test_sstate_rebuild(['core-image-sato-sdk'], relocate=False, rebuild_dependencies=True)
diff --git a/meta/lib/oeqa/selftest/_toaster.py b/meta/lib/oeqa/selftest/_toaster.py
new file mode 100644
index 0000000000..1cf28a0144
--- /dev/null
+++ b/meta/lib/oeqa/selftest/_toaster.py
@@ -0,0 +1,445 @@
1import unittest
2import os
3import sys
4import shlex, subprocess
5import urllib, commands, time, getpass, re, json, shlex
6
7import oeqa.utils.ftools as ftools
8from oeqa.selftest.base import oeSelfTest
9from oeqa.utils.commands import runCmd
10
11sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '../../../../', 'bitbake/lib/toaster')))
12os.environ.setdefault("DJANGO_SETTINGS_MODULE", "toastermain.settings")
13
14import toastermain.settings
15from django.db.models import Q
16from orm.models import *
17from oeqa.utils.decorators import testcase
18
19class ToasterSetup(oeSelfTest):
20
21 def recipe_parse(self, file_path, var):
22 for line in open(file_path,'r'):
23 if line.find(var) > -1:
24 val = line.split(" = ")[1].replace("\"", "").strip()
25 return val
26
27 def fix_file_path(self, file_path):
28 if ":" in file_path:
29 file_path=file_path.split(":")[2]
30 return file_path
31
32class Toaster_DB_Tests(ToasterSetup):
33
34 # Check if build name is unique - tc_id=795
35 @testcase(795)
36 def test_Build_Unique_Name(self):
37 all_builds = Build.objects.all().count()
38 distinct_builds = Build.objects.values('id').distinct().count()
39 self.assertEqual(distinct_builds, all_builds, msg = 'Build name is not unique')
40
41 # Check if build coocker log path is unique - tc_id=819
42 @testcase(819)
43 def test_Build_Unique_Cooker_Log_Path(self):
44 distinct_path = Build.objects.values('cooker_log_path').distinct().count()
45 total_builds = Build.objects.values('id').count()
46 self.assertEqual(distinct_path, total_builds, msg = 'Build coocker log path is not unique')
47
48 # Check if the number of errors matches the number of orm_logmessage.level entries with value 2 - tc_id=820
49 @testcase(820)
50 def test_Build_Errors_No(self):
51 builds = Build.objects.values('id', 'errors_no')
52 cnt_err = []
53 for build in builds:
54 log_mess_err_no = LogMessage.objects.filter(build = build['id'], level = 2).count()
55 if (build['errors_no'] != log_mess_err_no):
56 cnt_err.append(build['id'])
57 self.assertEqual(len(cnt_err), 0, msg = 'Errors for build id: %s' % cnt_err)
58
59 # Check if the number of warnings matches the number of orm_logmessage.level entries with value 1 - tc=821
60 @testcase(821)
61 def test_Build_Warnings_No(self):
62 builds = Build.objects.values('id', 'warnings_no')
63 cnt_err = []
64 for build in builds:
65 log_mess_warn_no = LogMessage.objects.filter(build = build['id'], level = 1).count()
66 if (build['warnings_no'] != log_mess_warn_no):
67 cnt_err.append(build['id'])
68 self.assertEqual(len(cnt_err), 0, msg = 'Errors for build id: %s' % cnt_err)
69
70 # Check if the build succeeded then the errors_no is 0 - tc_id=822
71 @testcase(822)
72 def test_Build_Suceeded_Errors_No(self):
73 builds = Build.objects.filter(outcome = 0).values('id', 'errors_no')
74 cnt_err = []
75 for build in builds:
76 if (build['errors_no'] != 0):
77 cnt_err.append(build['id'])
78 self.assertEqual(len(cnt_err), 0, msg = 'Errors for build id: %s' % cnt_err)
79
80 # Check if task order is unique for one build - tc=824
81 @testcase(824)
82 def test_Task_Unique_Order(self):
83 builds = Build.objects.values('id')
84 cnt_err = []
85 for build in builds:
86 total_task_order = Task.objects.filter(build = build['id']).values('order').count()
87 distinct_task_order = Task.objects.filter(build = build['id']).values('order').distinct().count()
88 if (total_task_order != distinct_task_order):
89 cnt_err.append(build['id'])
90 self.assertEqual(len(cnt_err), 0, msg = 'Errors for build id: %s' % cnt_err)
91
92 # Check task order sequence for one build - tc=825
93 @testcase(825)
94 def test_Task_Order_Sequence(self):
95 builds = builds = Build.objects.values('id')
96 cnt_err = []
97 for build in builds:
98 tasks = Task.objects.filter(Q(build = build['id']), ~Q(order = None), ~Q(task_name__contains = '_setscene')).values('id', 'order').order_by("order")
99 cnt_tasks = 0
100 for task in tasks:
101 cnt_tasks += 1
102 if (task['order'] != cnt_tasks):
103 cnt_err.append(task['id'])
104 self.assertEqual(len(cnt_err), 0, msg = 'Errors for task id: %s' % cnt_err)
105
106 # Check if disk_io matches the difference between EndTimeIO and StartTimeIO in build stats - tc=828
107 ### this needs to be updated ###
108 #def test_Task_Disk_IO_TC828(self):
109
110 # Check if outcome = 2 (SSTATE) then sstate_result must be 3 (RESTORED) - tc=832
111 @testcase(832)
112 def test_Task_If_Outcome_2_Sstate_Result_Must_Be_3(self):
113 tasks = Task.objects.filter(outcome = 2).values('id', 'sstate_result')
114 cnt_err = []
115 for task in tasks:
116 if (row['sstate_result'] != 3):
117 cnt_err.append(task['id'])
118 self.assertEqual(len(cnt_err), 0, msg = 'Errors for task id: %s' % cnt_err)
119
120 # Check if outcome = 1 (COVERED) or 3 (EXISTING) then sstate_result must be 0 (SSTATE_NA) - tc=833
121 @testcase(833)
122 def test_Task_If_Outcome_1_3_Sstate_Result_Must_Be_0(self):
123 tasks = Task.objects.filter(outcome__in = (1, 3)).values('id', 'sstate_result')
124 cnt_err = []
125 for task in tasks:
126 if (task['sstate_result'] != 0):
127 cnt_err.append(task['id'])
128 self.assertEqual(len(cnt_err), 0, msg = 'Errors for task id: %s' % cnt_err)
129
130 # Check if outcome is 0 (SUCCESS) or 4 (FAILED) then sstate_result must be 0 (NA), 1 (MISS) or 2 (FAILED) - tc=834
131 @testcase(834)
132 def test_Task_If_Outcome_0_4_Sstate_Result_Must_Be_0_1_2(self):
133 tasks = Task.objects.filter(outcome__in = (0, 4)).values('id', 'sstate_result')
134 cnt_err = []
135 for task in tasks:
136 if (task['sstate_result'] not in [0, 1, 2]):
137 cnt_err.append(task['id'])
138 self.assertEqual(len(cnt_err), 0, msg = 'Errors for task id: %s' % cnt_err)
139
140 # Check if task_executed = TRUE (1), script_type must be 0 (CODING_NA), 2 (CODING_PYTHON), 3 (CODING_SHELL) - tc=891
141 @testcase(891)
142 def test_Task_If_Task_Executed_True_Script_Type_0_2_3(self):
143 tasks = Task.objects.filter(task_executed = 1).values('id', 'script_type')
144 cnt_err = []
145 for task in tasks:
146 if (task['script_type'] not in [0, 2, 3]):
147 cnt_err.append(task['id'])
148 self.assertEqual(len(cnt_err), 0, msg = 'Errors for task id: %s' % cnt_err)
149
150 # Check if task_executed = TRUE (1), outcome must be 0 (SUCCESS) or 4 (FAILED) - tc=836
151 @testcase(836)
152 def test_Task_If_Task_Executed_True_Outcome_0_4(self):
153 tasks = Task.objects.filter(task_executed = 1).values('id', 'outcome')
154 cnt_err = []
155 for task in tasks:
156 if (task['outcome'] not in [0, 4]):
157 cnt_err.append(task['id'])
158 self.assertEqual(len(cnt_err), 0, msg = 'Errors for task id: %s' % cnt_err)
159
160 # Check if task_executed = FALSE (0), script_type must be 0 - tc=890
161 @testcase(890)
162 def test_Task_If_Task_Executed_False_Script_Type_0(self):
163 tasks = Task.objects.filter(task_executed = 0).values('id', 'script_type')
164 cnt_err = []
165 for task in tasks:
166 if (task['script_type'] != 0):
167 cnt_err.append(task['id'])
168 self.assertEqual(len(cnt_err), 0, msg = 'Errors for task id: %s' % cnt_err)
169
170 # Check if task_executed = FALSE (0) and build outcome = SUCCEEDED (0), task outcome must be 1 (COVERED), 2 (CACHED), 3 (PREBUILT), 5 (EMPTY) - tc=837
171 @testcase(837)
172 def test_Task_If_Task_Executed_False_Outcome_1_2_3_5(self):
173 builds = Build.objects.filter(outcome = 0).values('id')
174 cnt_err = []
175 for build in builds:
176 tasks = Task.objects.filter(build = build['id'], task_executed = 0).values('id', 'outcome')
177 for task in tasks:
178 if (task['outcome'] not in [1, 2, 3, 5]):
179 cnt_err.append(task['id'])
180 self.assertEqual(len(cnt_err), 0, msg = 'Errors for task id: %s' % cnt_err)
181
182 # Key verification - tc=888
183 @testcase(888)
184 def test_Target_Installed_Package(self):
185 rows = Target_Installed_Package.objects.values('id', 'target_id', 'package_id')
186 cnt_err = []
187 for row in rows:
188 target = Target.objects.filter(id = row['target_id']).values('id')
189 package = Package.objects.filter(id = row['package_id']).values('id')
190 if (not target or not package):
191 cnt_err.append(row['id'])
192 self.assertEqual(len(cnt_err), 0, msg = 'Errors for target installed package id: %s' % cnt_err)
193
194 # Key verification - tc=889
195 @testcase(889)
196 def test_Task_Dependency(self):
197 rows = Task_Dependency.objects.values('id', 'task_id', 'depends_on_id')
198 cnt_err = []
199 for row in rows:
200 task_id = Task.objects.filter(id = row['task_id']).values('id')
201 depends_on_id = Task.objects.filter(id = row['depends_on_id']).values('id')
202 if (not task_id or not depends_on_id):
203 cnt_err.append(row['id'])
204 self.assertEqual(len(cnt_err), 0, msg = 'Errors for task dependency id: %s' % cnt_err)
205
206 # Check if build target file_name is populated only if is_image=true AND orm_build.outcome=0 then if the file exists and its size matches the file_size value
207 ### Need to add the tc in the test run
208 @testcase(1037)
209 def test_Target_File_Name_Populated(self):
210 builds = Build.objects.filter(outcome = 0).values('id')
211 for build in builds:
212 targets = Target.objects.filter(build_id = build['id'], is_image = 1).values('id')
213 for target in targets:
214 target_files = Target_Image_File.objects.filter(target_id = target['id']).values('id', 'file_name', 'file_size')
215 cnt_err = []
216 for file_info in target_files:
217 target_id = file_info['id']
218 target_file_name = file_info['file_name']
219 target_file_size = file_info['file_size']
220 if (not target_file_name or not target_file_size):
221 cnt_err.append(target_id)
222 else:
223 if (not os.path.exists(target_file_name)):
224 cnt_err.append(target_id)
225 else:
226 if (os.path.getsize(target_file_name) != target_file_size):
227 cnt_err.append(target_id)
228 self.assertEqual(len(cnt_err), 0, msg = 'Errors for target image file id: %s' % cnt_err)
229
230 # Key verification - tc=884
231 @testcase(884)
232 def test_Package_Dependency(self):
233 cnt_err = []
234 deps = Package_Dependency.objects.values('id', 'package_id', 'depends_on_id')
235 for dep in deps:
236 if (dep['package_id'] == dep['depends_on_id']):
237 cnt_err.append(dep['id'])
238 self.assertEqual(len(cnt_err), 0, msg = 'Errors for package dependency id: %s' % cnt_err)
239
240 # Check if recipe name does not start with a number (0-9) - tc=838
241 @testcase(838)
242 def test_Recipe_Name(self):
243 recipes = Recipe.objects.values('id', 'name')
244 cnt_err = []
245 for recipe in recipes:
246 if (recipe['name'][0].isdigit() is True):
247 cnt_err.append(recipe['id'])
248 self.assertEqual(len(cnt_err), 0, msg = 'Errors for recipe id: %s' % cnt_err)
249
250 # Check if recipe section matches the content of the SECTION variable (if set) in file_path - tc=839
251 @testcase(839)
252 def test_Recipe_DB_Section_Match_Recipe_File_Section(self):
253 recipes = Recipe.objects.values('id', 'section', 'file_path')
254 cnt_err = []
255 for recipe in recipes:
256 file_path = self.fix_file_path(recipe['file_path'])
257 file_exists = os.path.isfile(file_path)
258 if (not file_path or (file_exists is False)):
259 cnt_err.append(recipe['id'])
260 else:
261 file_section = self.recipe_parse(file_path, "SECTION = ")
262 db_section = recipe['section']
263 if file_section:
264 if (db_section != file_section):
265 cnt_err.append(recipe['id'])
266 self.assertEqual(len(cnt_err), 0, msg = 'Errors for recipe id: %s' % cnt_err)
267
268 # Check if recipe license matches the content of the LICENSE variable (if set) in file_path - tc=840
269 @testcase(840)
270 def test_Recipe_DB_License_Match_Recipe_File_License(self):
271 recipes = Recipe.objects.values('id', 'license', 'file_path')
272 cnt_err = []
273 for recipe in recipes:
274 file_path = self.fix_file_path(recipe['file_path'])
275 file_exists = os.path.isfile(file_path)
276 if (not file_path or (file_exists is False)):
277 cnt_err.append(recipe['id'])
278 else:
279 file_license = self.recipe_parse(file_path, "LICENSE = ")
280 db_license = recipe['license']
281 if file_license:
282 if (db_license != file_license):
283 cnt_err.append(recipe['id'])
284 self.assertEqual(len(cnt_err), 0, msg = 'Errors for recipe id: %s' % cnt_err)
285
286 # Check if recipe homepage matches the content of the HOMEPAGE variable (if set) in file_path - tc=841
287 @testcase(841)
288 def test_Recipe_DB_Homepage_Match_Recipe_File_Homepage(self):
289 recipes = Recipe.objects.values('id', 'homepage', 'file_path')
290 cnt_err = []
291 for recipe in recipes:
292 file_path = self.fix_file_path(recipe['file_path'])
293 file_exists = os.path.isfile(file_path)
294 if (not file_path or (file_exists is False)):
295 cnt_err.append(recipe['id'])
296 else:
297 file_homepage = self.recipe_parse(file_path, "HOMEPAGE = ")
298 db_homepage = recipe['homepage']
299 if file_homepage:
300 if (db_homepage != file_homepage):
301 cnt_err.append(recipe['id'])
302 self.assertEqual(len(cnt_err), 0, msg = 'Errors for recipe id: %s' % cnt_err)
303
304 # Check if recipe bugtracker matches the content of the BUGTRACKER variable (if set) in file_path - tc=842
305 @testcase(842)
306 def test_Recipe_DB_Bugtracker_Match_Recipe_File_Bugtracker(self):
307 recipes = Recipe.objects.values('id', 'bugtracker', 'file_path')
308 cnt_err = []
309 for recipe in recipes:
310 file_path = self.fix_file_path(recipe['file_path'])
311 file_exists = os.path.isfile(file_path)
312 if (not file_path or (file_exists is False)):
313 cnt_err.append(recipe['id'])
314 else:
315 file_bugtracker = self.recipe_parse(file_path, "BUGTRACKER = ")
316 db_bugtracker = recipe['bugtracker']
317 if file_bugtracker:
318 if (db_bugtracker != file_bugtracker):
319 cnt_err.append(recipe['id'])
320 self.assertEqual(len(cnt_err), 0, msg = 'Errors for recipe id: %s' % cnt_err)
321
322 # Recipe key verification, recipe name does not depends on a recipe having the same name - tc=883
323 @testcase(883)
324 def test_Recipe_Dependency(self):
325 deps = Recipe_Dependency.objects.values('id', 'recipe_id', 'depends_on_id')
326 cnt_err = []
327 for dep in deps:
328 if (not dep['recipe_id'] or not dep['depends_on_id']):
329 cnt_err.append(dep['id'])
330 else:
331 name = Recipe.objects.filter(id = dep['recipe_id']).values('name')
332 dep_name = Recipe.objects.filter(id = dep['depends_on_id']).values('name')
333 if (name == dep_name):
334 cnt_err.append(dep['id'])
335 self.assertEqual(len(cnt_err), 0, msg = 'Errors for recipe dependency id: %s' % cnt_err)
336
337 # Check if package name does not start with a number (0-9) - tc=846
338 @testcase(846)
339 def test_Package_Name_For_Number(self):
340 packages = Package.objects.filter(~Q(size = -1)).values('id', 'name')
341 cnt_err = []
342 for package in packages:
343 if (package['name'][0].isdigit() is True):
344 cnt_err.append(package['id'])
345 self.assertEqual(len(cnt_err), 0, msg = 'Errors for package id: %s' % cnt_err)
346
347 # Check if package version starts with a number (0-9) - tc=847
348 @testcase(847)
349 def test_Package_Version_Starts_With_Number(self):
350 packages = Package.objects.filter(~Q(size = -1)).values('id', 'version')
351 cnt_err = []
352 for package in packages:
353 if (package['version'][0].isdigit() is False):
354 cnt_err.append(package['id'])
355 self.assertEqual(len(cnt_err), 0, msg = 'Errors for package id: %s' % cnt_err)
356
357 # Check if package revision starts with 'r' - tc=848
358 @testcase(848)
359 def test_Package_Revision_Starts_With_r(self):
360 packages = Package.objects.filter(~Q(size = -1)).values('id', 'revision')
361 cnt_err = []
362 for package in packages:
363 if (package['revision'][0].startswith("r") is False):
364 cnt_err.append(package['id'])
365 self.assertEqual(len(cnt_err), 0, msg = 'Errors for package id: %s' % cnt_err)
366
367 # Check the validity of the package build_id
368 ### TC must be added in test run
369 @testcase(1038)
370 def test_Package_Build_Id(self):
371 packages = Package.objects.filter(~Q(size = -1)).values('id', 'build_id')
372 cnt_err = []
373 for package in packages:
374 build_id = Build.objects.filter(id = package['build_id']).values('id')
375 if (not build_id):
376 cnt_err.append(package['id'])
377 self.assertEqual(len(cnt_err), 0, msg = 'Errors for package id: %s' % cnt_err)
378
379 # Check the validity of package recipe_id
380 ### TC must be added in test run
381 @testcase(1039)
382 def test_Package_Recipe_Id(self):
383 packages = Package.objects.filter(~Q(size = -1)).values('id', 'recipe_id')
384 cnt_err = []
385 for package in packages:
386 recipe_id = Recipe.objects.filter(id = package['recipe_id']).values('id')
387 if (not recipe_id):
388 cnt_err.append(package['id'])
389 self.assertEqual(len(cnt_err), 0, msg = 'Errors for package id: %s' % cnt_err)
390
391 # Check if package installed_size field is not null
392 ### TC must be aded in test run
393 @testcase(1040)
394 def test_Package_Installed_Size_Not_NULL(self):
395 packages = Package.objects.filter(installed_size__isnull = True).values('id')
396 cnt_err = []
397 for package in packages:
398 cnt_err.append(package['id'])
399 self.assertEqual(len(cnt_err), 0, msg = 'Errors for package id: %s' % cnt_err)
400
401 # Check if all layers requests return exit code is 200 - tc=843
402 @testcase(843)
403 def test_Layers_Requests_Exit_Code(self):
404 layers = Layer.objects.values('id', 'layer_index_url')
405 cnt_err = []
406 for layer in layers:
407 resp = urllib.urlopen(layer['layer_index_url'])
408 if (resp.getcode() != 200):
409 cnt_err.append(layer['id'])
410 self.assertEqual(len(cnt_err), 0, msg = 'Errors for layer id: %s' % cnt_err)
411
412 # Check if the output of bitbake-layers show_layers matches the info from database - tc=895
413 @testcase(895)
414 def test_Layers_Show_Layers(self):
415 layers = Layer.objects.values('id', 'name', 'local_path')
416 cmd = commands.getoutput('bitbake-layers show_layers')
417 cnt_err = []
418 for layer in layers:
419 if (layer['name'] or layer['local_path']) not in cmd:
420 cnt_err.append(layer['id'])
421 self.assertEqual(len(cnt_err), 0, msg = 'Errors for layer id: %s' % cnt_err)
422
423 # Check if django server starts regardless of the timezone set on the machine - tc=905
424 @testcase(905)
425 def test_Start_Django_Timezone(self):
426 current_path = os.getcwd()
427 zonefilelist = []
428 ZONEINFOPATH = '/usr/share/zoneinfo/'
429 os.chdir("../bitbake/lib/toaster/")
430 cnt_err = 0
431 for filename in os.listdir(ZONEINFOPATH):
432 if os.path.isfile(os.path.join(ZONEINFOPATH, filename)):
433 zonefilelist.append(filename)
434 for k in range(len(zonefilelist)):
435 if k <= 5:
436 files = zonefilelist[k]
437 os.system("export TZ="+str(files)+"; python manage.py runserver > /dev/null 2>&1 &")
438 time.sleep(3)
439 pid = subprocess.check_output("ps aux | grep '[/u]sr/bin/python manage.py runserver' | awk '{print $2}'", shell = True)
440 if pid:
441 os.system("kill -9 "+str(pid))
442 else:
443 cnt_err.append(zonefilelist[k])
444 self.assertEqual(cnt_err, 0, msg = 'Errors django server does not start with timezone: %s' % cnt_err)
445 os.chdir(current_path)
diff --git a/meta/lib/oeqa/selftest/base.py b/meta/lib/oeqa/selftest/base.py
new file mode 100644
index 0000000000..80b9b4b312
--- /dev/null
+++ b/meta/lib/oeqa/selftest/base.py
@@ -0,0 +1,131 @@
1# Copyright (c) 2013 Intel Corporation
2#
3# Released under the MIT license (see COPYING.MIT)
4
5
6# DESCRIPTION
7# Base class inherited by test classes in meta/lib/selftest
8
9import unittest
10import os
11import sys
12import shutil
13import logging
14import errno
15
16import oeqa.utils.ftools as ftools
17from oeqa.utils.commands import runCmd, bitbake, get_bb_var, get_test_layer
18from oeqa.utils.decorators import LogResults
19
20@LogResults
21class oeSelfTest(unittest.TestCase):
22
23 log = logging.getLogger("selftest.base")
24 longMessage = True
25
26 def __init__(self, methodName="runTest"):
27 self.builddir = os.environ.get("BUILDDIR")
28 self.localconf_path = os.path.join(self.builddir, "conf/local.conf")
29 self.testinc_path = os.path.join(self.builddir, "conf/selftest.inc")
30 self.testlayer_path = oeSelfTest.testlayer_path
31 self._extra_tear_down_commands = []
32 self._track_for_cleanup = []
33 super(oeSelfTest, self).__init__(methodName)
34
35 def setUp(self):
36 os.chdir(self.builddir)
37 # we don't know what the previous test left around in config or inc files
38 # if it failed so we need a fresh start
39 try:
40 os.remove(self.testinc_path)
41 except OSError as e:
42 if e.errno != errno.ENOENT:
43 raise
44 for root, _, files in os.walk(self.testlayer_path):
45 for f in files:
46 if f == 'test_recipe.inc':
47 os.remove(os.path.join(root, f))
48 # tests might need their own setup
49 # but if they overwrite this one they have to call
50 # super each time, so let's give them an alternative
51 self.setUpLocal()
52
53 def setUpLocal(self):
54 pass
55
56 def tearDown(self):
57 if self._extra_tear_down_commands:
58 failed_extra_commands = []
59 for command in self._extra_tear_down_commands:
60 result = runCmd(command, ignore_status=True)
61 if not result.status == 0:
62 failed_extra_commands.append(command)
63 if failed_extra_commands:
64 self.log.warning("tearDown commands have failed: %s" % ', '.join(map(str, failed_extra_commands)))
65 self.log.debug("Trying to move on.")
66 self._extra_tear_down_commands = []
67
68 if self._track_for_cleanup:
69 for path in self._track_for_cleanup:
70 if os.path.isdir(path):
71 shutil.rmtree(path)
72 if os.path.isfile(path):
73 os.remove(path)
74 self._track_for_cleanup = []
75
76 self.tearDownLocal()
77
78 def tearDownLocal(self):
79 pass
80
81 # add test specific commands to the tearDown method.
82 def add_command_to_tearDown(self, command):
83 self.log.debug("Adding command '%s' to tearDown for this test." % command)
84 self._extra_tear_down_commands.append(command)
85 # add test specific files or directories to be removed in the tearDown method
86 def track_for_cleanup(self, path):
87 self.log.debug("Adding path '%s' to be cleaned up when test is over" % path)
88 self._track_for_cleanup.append(path)
89
90 # write to <builddir>/conf/selftest.inc
91 def write_config(self, data):
92 self.log.debug("Writing to: %s\n%s\n" % (self.testinc_path, data))
93 ftools.write_file(self.testinc_path, data)
94
95 # append to <builddir>/conf/selftest.inc
96 def append_config(self, data):
97 self.log.debug("Appending to: %s\n%s\n" % (self.testinc_path, data))
98 ftools.append_file(self.testinc_path, data)
99
100 # remove data from <builddir>/conf/selftest.inc
101 def remove_config(self, data):
102 self.log.debug("Removing from: %s\n\%s\n" % (self.testinc_path, data))
103 ftools.remove_from_file(self.testinc_path, data)
104
105 # write to meta-sefltest/recipes-test/<recipe>/test_recipe.inc
106 def write_recipeinc(self, recipe, data):
107 inc_file = os.path.join(self.testlayer_path, 'recipes-test', recipe, 'test_recipe.inc')
108 self.log.debug("Writing to: %s\n%s\n" % (inc_file, data))
109 ftools.write_file(inc_file, data)
110
111 # append data to meta-sefltest/recipes-test/<recipe>/test_recipe.inc
112 def append_recipeinc(self, recipe, data):
113 inc_file = os.path.join(self.testlayer_path, 'recipes-test', recipe, 'test_recipe.inc')
114 self.log.debug("Appending to: %s\n%s\n" % (inc_file, data))
115 ftools.append_file(inc_file, data)
116
117 # remove data from meta-sefltest/recipes-test/<recipe>/test_recipe.inc
118 def remove_recipeinc(self, recipe, data):
119 inc_file = os.path.join(self.testlayer_path, 'recipes-test', recipe, 'test_recipe.inc')
120 self.log.debug("Removing from: %s\n%s\n" % (inc_file, data))
121 ftools.remove_from_file(inc_file, data)
122
123 # delete meta-sefltest/recipes-test/<recipe>/test_recipe.inc file
124 def delete_recipeinc(self, recipe):
125 inc_file = os.path.join(self.testlayer_path, 'recipes-test', recipe, 'test_recipe.inc')
126 self.log.debug("Deleting file: %s" % inc_file)
127 try:
128 os.remove(inc_file)
129 except OSError as e:
130 if e.errno != errno.ENOENT:
131 raise
diff --git a/meta/lib/oeqa/selftest/bblayers.py b/meta/lib/oeqa/selftest/bblayers.py
new file mode 100644
index 0000000000..1ead8e8671
--- /dev/null
+++ b/meta/lib/oeqa/selftest/bblayers.py
@@ -0,0 +1,43 @@
1import unittest
2import os
3import logging
4import re
5import shutil
6
7import oeqa.utils.ftools as ftools
8from oeqa.selftest.base import oeSelfTest
9from oeqa.utils.commands import runCmd
10from oeqa.utils.decorators import testcase
11
12class BitbakeLayers(oeSelfTest):
13
14 @testcase(756)
15 def test_bitbakelayers_showcrossdepends(self):
16 result = runCmd('bitbake-layers show-cross-depends')
17 self.assertTrue('aspell' in result.output)
18
19 @testcase(83)
20 def test_bitbakelayers_showlayers(self):
21 result = runCmd('bitbake-layers show_layers')
22 self.assertTrue('meta-selftest' in result.output)
23
24 @testcase(93)
25 def test_bitbakelayers_showappends(self):
26 result = runCmd('bitbake-layers show_appends')
27 self.assertTrue('xcursor-transparent-theme_0.1.1.bbappend' in result.output, msg='xcursor-transparent-theme_0.1.1.bbappend file was not recognised')
28
29 @testcase(90)
30 def test_bitbakelayers_showoverlayed(self):
31 result = runCmd('bitbake-layers show_overlayed')
32 self.assertTrue('aspell' in result.output, msg='xcursor-transparent-theme_0.1.1.bbappend file was not recognised')
33
34 @testcase(95)
35 def test_bitbakelayers_flatten(self):
36 self.assertFalse(os.path.isdir(os.path.join(self.builddir, 'test')))
37 result = runCmd('bitbake-layers flatten test')
38 bb_file = os.path.join(self.builddir, 'test/recipes-graphics/xcursor-transparent-theme/xcursor-transparent-theme_0.1.1.bb')
39 self.assertTrue(os.path.isfile(bb_file))
40 contents = ftools.read_file(bb_file)
41 find_in_contents = re.search("##### bbappended from meta-selftest #####\n(.*\n)*include test_recipe.inc", contents)
42 shutil.rmtree(os.path.join(self.builddir, 'test'))
43 self.assertTrue(find_in_contents)
diff --git a/meta/lib/oeqa/selftest/bbtests.py b/meta/lib/oeqa/selftest/bbtests.py
new file mode 100644
index 0000000000..68f97bd8e3
--- /dev/null
+++ b/meta/lib/oeqa/selftest/bbtests.py
@@ -0,0 +1,178 @@
1import unittest
2import os
3import logging
4import re
5import shutil
6
7import oeqa.utils.ftools as ftools
8from oeqa.selftest.base import oeSelfTest
9from oeqa.utils.commands import runCmd, bitbake, get_bb_var
10from oeqa.utils.decorators import testcase
11
12class BitbakeTests(oeSelfTest):
13
14 @testcase(789)
15 def test_run_bitbake_from_dir_1(self):
16 os.chdir(os.path.join(self.builddir, 'conf'))
17 bitbake('-e')
18
19 @testcase(790)
20 def test_run_bitbake_from_dir_2(self):
21 my_env = os.environ.copy()
22 my_env['BBPATH'] = my_env['BUILDDIR']
23 os.chdir(os.path.dirname(os.environ['BUILDDIR']))
24 bitbake('-e', env=my_env)
25
26 @testcase(806)
27 def test_event_handler(self):
28 self.write_config("INHERIT += \"test_events\"")
29 result = bitbake('m4-native')
30 find_build_started = re.search("NOTE: Test for bb\.event\.BuildStarted(\n.*)*NOTE: Preparing runqueue", result.output)
31 find_build_completed = re.search("Tasks Summary:.*(\n.*)*NOTE: Test for bb\.event\.BuildCompleted", result.output)
32 self.assertTrue(find_build_started, msg = "Match failed in:\n%s" % result.output)
33 self.assertTrue(find_build_completed, msg = "Match failed in:\n%s" % result.output)
34 self.assertFalse('Test for bb.event.InvalidEvent' in result.output)
35
36 @testcase(103)
37 def test_local_sstate(self):
38 bitbake('m4-native -ccleansstate')
39 bitbake('m4-native')
40 bitbake('m4-native -cclean')
41 result = bitbake('m4-native')
42 find_setscene = re.search("m4-native.*do_.*_setscene", result.output)
43 self.assertTrue(find_setscene)
44
45 @testcase(105)
46 def test_bitbake_invalid_recipe(self):
47 result = bitbake('-b asdf', ignore_status=True)
48 self.assertTrue("ERROR: Unable to find any recipe file matching 'asdf'" in result.output)
49
50 @testcase(107)
51 def test_bitbake_invalid_target(self):
52 result = bitbake('asdf', ignore_status=True)
53 self.assertTrue("ERROR: Nothing PROVIDES 'asdf'" in result.output)
54
55 @testcase(106)
56 def test_warnings_errors(self):
57 result = bitbake('-b asdf', ignore_status=True)
58 find_warnings = re.search("Summary: There w.{2,3}? [1-9][0-9]* WARNING messages* shown", result.output)
59 find_errors = re.search("Summary: There w.{2,3}? [1-9][0-9]* ERROR messages* shown", result.output)
60 self.assertTrue(find_warnings, msg="Did not find the mumber of warnings at the end of the build:\n" + result.output)
61 self.assertTrue(find_errors, msg="Did not find the mumber of errors at the end of the build:\n" + result.output)
62
63 @testcase(108)
64 def test_invalid_patch(self):
65 self.write_recipeinc('man', 'SRC_URI += "file://man-1.5h1-make.patch"')
66 result = bitbake('man -c patch', ignore_status=True)
67 self.delete_recipeinc('man')
68 bitbake('-cclean man')
69 self.assertTrue("ERROR: Function failed: patch_do_patch" in result.output)
70
71 @testcase(163)
72 def test_force_task(self):
73 bitbake('m4-native')
74 result = bitbake('-C compile m4-native')
75 look_for_tasks = ['do_compile', 'do_install', 'do_populate_sysroot']
76 for task in look_for_tasks:
77 find_task = re.search("m4-native.*%s" % task, result.output)
78 self.assertTrue(find_task)
79
80 @testcase(167)
81 def test_bitbake_g(self):
82 result = bitbake('-g core-image-full-cmdline')
83 self.assertTrue('NOTE: PN build list saved to \'pn-buildlist\'' in result.output)
84 self.assertTrue('openssh' in ftools.read_file(os.path.join(self.builddir, 'pn-buildlist')))
85 for f in ['pn-buildlist', 'pn-depends.dot', 'package-depends.dot', 'task-depends.dot']:
86 os.remove(f)
87
88 @testcase(899)
89 def test_image_manifest(self):
90 bitbake('core-image-minimal')
91 deploydir = get_bb_var("DEPLOY_DIR_IMAGE", target="core-image-minimal")
92 imagename = get_bb_var("IMAGE_LINK_NAME", target="core-image-minimal")
93 manifest = os.path.join(deploydir, imagename + ".manifest")
94 self.assertTrue(os.path.islink(manifest), msg="No manifest file created for image")
95
96 @testcase(168)
97 def test_invalid_recipe_src_uri(self):
98 data = 'SRC_URI = "file://invalid"'
99 self.write_recipeinc('man', data)
100 bitbake('-ccleanall man')
101 result = bitbake('-c fetch man', ignore_status=True)
102 bitbake('-ccleanall man')
103 self.delete_recipeinc('man')
104 self.assertEqual(result.status, 1, msg='Command succeded when it should have failed')
105 self.assertTrue('Fetcher failure: Unable to find file file://invalid anywhere. The paths that were searched were:' in result.output)
106 self.assertTrue('ERROR: Function failed: Fetcher failure for URL: \'file://invalid\'. Unable to fetch URL from any source.' in result.output)
107
108 @testcase(171)
109 def test_rename_downloaded_file(self):
110 data = 'SRC_URI_append = ";downloadfilename=test-aspell.tar.gz"'
111 self.write_recipeinc('aspell', data)
112 bitbake('-ccleanall aspell')
113 result = bitbake('-c fetch aspell', ignore_status=True)
114 self.delete_recipeinc('aspell')
115 self.assertEqual(result.status, 0)
116 self.assertTrue(os.path.isfile(os.path.join(get_bb_var("DL_DIR"), 'test-aspell.tar.gz')))
117 self.assertTrue(os.path.isfile(os.path.join(get_bb_var("DL_DIR"), 'test-aspell.tar.gz.done')))
118 bitbake('-ccleanall aspell')
119
120 @testcase(1028)
121 def test_environment(self):
122 self.append_config("TEST_ENV=\"localconf\"")
123 result = runCmd('bitbake -e | grep TEST_ENV=')
124 self.assertTrue('localconf' in result.output)
125 self.remove_config("TEST_ENV=\"localconf\"")
126
127 @testcase(1029)
128 def test_dry_run(self):
129 result = runCmd('bitbake -n m4-native')
130 self.assertEqual(0, result.status)
131
132 @testcase(1030)
133 def test_just_parse(self):
134 result = runCmd('bitbake -p')
135 self.assertEqual(0, result.status)
136
137 @testcase(1031)
138 def test_version(self):
139 result = runCmd('bitbake -s | grep wget')
140 find = re.search("wget *:([0-9a-zA-Z\.\-]+)", result.output)
141 self.assertTrue(find)
142
143 @testcase(1032)
144 def test_prefile(self):
145 preconf = os.path.join(self.builddir, 'conf/prefile.conf')
146 self.track_for_cleanup(preconf)
147 ftools.write_file(preconf ,"TEST_PREFILE=\"prefile\"")
148 result = runCmd('bitbake -r conf/prefile.conf -e | grep TEST_PREFILE=')
149 self.assertTrue('prefile' in result.output)
150 self.append_config("TEST_PREFILE=\"localconf\"")
151 result = runCmd('bitbake -r conf/prefile.conf -e | grep TEST_PREFILE=')
152 self.assertTrue('localconf' in result.output)
153 self.remove_config("TEST_PREFILE=\"localconf\"")
154
155 @testcase(1033)
156 def test_postfile(self):
157 postconf = os.path.join(self.builddir, 'conf/postfile.conf')
158 self.track_for_cleanup(postconf)
159 ftools.write_file(postconf , "TEST_POSTFILE=\"postfile\"")
160 self.append_config("TEST_POSTFILE=\"localconf\"")
161 result = runCmd('bitbake -R conf/postfile.conf -e | grep TEST_POSTFILE=')
162 self.assertTrue('postfile' in result.output)
163 self.remove_config("TEST_POSTFILE=\"localconf\"")
164
165 @testcase(1034)
166 def test_checkuri(self):
167 result = runCmd('bitbake -c checkuri m4')
168 self.assertEqual(0, result.status)
169
170 @testcase(1035)
171 def test_continue(self):
172 self.write_recipeinc('man',"\ndo_fail_task () {\nexit 1 \n}\n\naddtask do_fail_task before do_fetch\n" )
173 runCmd('bitbake -c cleanall man xcursor-transparent-theme')
174 result = runCmd('bitbake man xcursor-transparent-theme -k', ignore_status=True)
175 errorpos = result.output.find('ERROR: Function failed: do_fail_task')
176 manver = re.search("NOTE: recipe xcursor-transparent-theme-(.*?): task do_unpack: Started", result.output)
177 continuepos = result.output.find('NOTE: recipe xcursor-transparent-theme-%s: task do_unpack: Started' % manver.group(1))
178 self.assertLess(errorpos,continuepos)
diff --git a/meta/lib/oeqa/selftest/buildhistory.py b/meta/lib/oeqa/selftest/buildhistory.py
new file mode 100644
index 0000000000..d8cae4664b
--- /dev/null
+++ b/meta/lib/oeqa/selftest/buildhistory.py
@@ -0,0 +1,45 @@
1import unittest
2import os
3import re
4import shutil
5import datetime
6
7import oeqa.utils.ftools as ftools
8from oeqa.selftest.base import oeSelfTest
9from oeqa.utils.commands import Command, runCmd, bitbake, get_bb_var, get_test_layer
10
11
12class BuildhistoryBase(oeSelfTest):
13
14 def config_buildhistory(self, tmp_bh_location=False):
15 if (not 'buildhistory' in get_bb_var('USER_CLASSES')) and (not 'buildhistory' in get_bb_var('INHERIT')):
16 add_buildhistory_config = 'INHERIT += "buildhistory"\nBUILDHISTORY_COMMIT = "1"'
17 self.append_config(add_buildhistory_config)
18
19 if tmp_bh_location:
20 # Using a temporary buildhistory location for testing
21 tmp_bh_dir = os.path.join(self.builddir, "tmp_buildhistory_%s" % datetime.datetime.now().strftime('%Y%m%d%H%M%S'))
22 buildhistory_dir_config = "BUILDHISTORY_DIR = \"%s\"" % tmp_bh_dir
23 self.append_config(buildhistory_dir_config)
24 self.track_for_cleanup(tmp_bh_dir)
25
26 def run_buildhistory_operation(self, target, global_config='', target_config='', change_bh_location=False, expect_error=False, error_regex=''):
27 if change_bh_location:
28 tmp_bh_location = True
29 else:
30 tmp_bh_location = False
31 self.config_buildhistory(tmp_bh_location)
32
33 self.append_config(global_config)
34 self.append_recipeinc(target, target_config)
35 bitbake("-cclean %s" % target)
36 result = bitbake(target, ignore_status=True)
37 self.remove_config(global_config)
38 self.remove_recipeinc(target, target_config)
39
40 if expect_error:
41 self.assertEqual(result.status, 1, msg="Error expected for global config '%s' and target config '%s'" % (global_config, target_config))
42 search_for_error = re.search(error_regex, result.output)
43 self.assertTrue(search_for_error, msg="Could not find desired error in output: %s" % error_regex)
44 else:
45 self.assertEqual(result.status, 0, msg="Command 'bitbake %s' has failed unexpectedly: %s" % (target, result.output))
diff --git a/meta/lib/oeqa/selftest/buildoptions.py b/meta/lib/oeqa/selftest/buildoptions.py
new file mode 100644
index 0000000000..a250cae0e1
--- /dev/null
+++ b/meta/lib/oeqa/selftest/buildoptions.py
@@ -0,0 +1,120 @@
1import unittest
2import os
3import logging
4import re
5
6from oeqa.selftest.base import oeSelfTest
7from oeqa.selftest.buildhistory import BuildhistoryBase
8from oeqa.utils.commands import runCmd, bitbake, get_bb_var
9import oeqa.utils.ftools as ftools
10from oeqa.utils.decorators import testcase
11
12class ImageOptionsTests(oeSelfTest):
13
14 @testcase(761)
15 def test_incremental_image_generation(self):
16 bitbake("-c cleanall core-image-minimal")
17 self.write_config('INC_RPM_IMAGE_GEN = "1"')
18 self.append_config('IMAGE_FEATURES += "ssh-server-openssh"')
19 bitbake("core-image-minimal")
20 res = runCmd("grep 'Installing openssh-sshd' %s" % (os.path.join(get_bb_var("WORKDIR", "core-image-minimal"), "temp/log.do_rootfs")), ignore_status=True)
21 self.remove_config('IMAGE_FEATURES += "ssh-server-openssh"')
22 self.assertEqual(0, res.status, msg="No match for openssh-sshd in log.do_rootfs")
23 bitbake("core-image-minimal")
24 res = runCmd("grep 'Removing openssh-sshd' %s" %(os.path.join(get_bb_var("WORKDIR", "core-image-minimal"), "temp/log.do_rootfs")),ignore_status=True)
25 self.assertEqual(0, res.status, msg="openssh-sshd was not removed from image")
26
27 @testcase(925)
28 def test_rm_old_image(self):
29 bitbake("core-image-minimal")
30 deploydir = get_bb_var("DEPLOY_DIR_IMAGE", target="core-image-minimal")
31 imagename = get_bb_var("IMAGE_LINK_NAME", target="core-image-minimal")
32 deploydir_files = os.listdir(deploydir)
33 track_original_files = []
34 for image_file in deploydir_files:
35 if imagename in image_file and os.path.islink(os.path.join(deploydir, image_file)):
36 track_original_files.append(os.path.realpath(os.path.join(deploydir, image_file)))
37 self.append_config("RM_OLD_IMAGE = \"1\"")
38 bitbake("-C rootfs core-image-minimal")
39 deploydir_files = os.listdir(deploydir)
40 remaining_not_expected = [path for path in track_original_files if os.path.basename(path) in deploydir_files]
41 self.assertFalse(remaining_not_expected, msg="\nThe following image files ware not removed: %s" % ', '.join(map(str, remaining_not_expected)))
42
43 @testcase(286)
44 def test_ccache_tool(self):
45 bitbake("ccache-native")
46 self.assertTrue(os.path.isfile(os.path.join(get_bb_var('STAGING_BINDIR_NATIVE', 'ccache-native'), "ccache")))
47 self.write_config('INHERIT += "ccache"')
48 bitbake("m4 -c cleansstate")
49 bitbake("m4 -c compile")
50 res = runCmd("grep ccache %s" % (os.path.join(get_bb_var("WORKDIR","m4"),"temp/log.do_compile")), ignore_status=True)
51 self.assertEqual(0, res.status, msg="No match for ccache in m4 log.do_compile")
52 bitbake("ccache-native -ccleansstate")
53
54
55class DiskMonTest(oeSelfTest):
56
57 @testcase(277)
58 def test_stoptask_behavior(self):
59 self.write_config('BB_DISKMON_DIRS = "STOPTASKS,${TMPDIR},100000G,100K"')
60 res = bitbake("m4", ignore_status = True)
61 self.assertTrue('ERROR: No new tasks can be executed since the disk space monitor action is "STOPTASKS"!' in res.output)
62 self.assertEqual(res.status, 1)
63 self.write_config('BB_DISKMON_DIRS = "ABORT,${TMPDIR},100000G,100K"')
64 res = bitbake("m4", ignore_status = True)
65 self.assertTrue('ERROR: Immediately abort since the disk space monitor action is "ABORT"!' in res.output)
66 self.assertEqual(res.status, 1)
67 self.write_config('BB_DISKMON_DIRS = "WARN,${TMPDIR},100000G,100K"')
68 res = bitbake("m4")
69 self.assertTrue('WARNING: The free space' in res.output)
70
71class SanityOptionsTest(oeSelfTest):
72
73 @testcase(927)
74 def test_options_warnqa_errorqa_switch(self):
75 bitbake("xcursor-transparent-theme -ccleansstate")
76
77 if "packages-list" not in get_bb_var("ERROR_QA"):
78 self.write_config("ERROR_QA_append = \" packages-list\"")
79
80 self.write_recipeinc('xcursor-transparent-theme', 'PACKAGES += \"${PN}-dbg\"')
81 res = bitbake("xcursor-transparent-theme", ignore_status=True)
82 self.delete_recipeinc('xcursor-transparent-theme')
83 self.assertTrue("ERROR: QA Issue: xcursor-transparent-theme-dbg is listed in PACKAGES multiple times, this leads to packaging errors." in res.output, msg=res.output)
84 self.assertEqual(res.status, 1)
85 self.write_recipeinc('xcursor-transparent-theme', 'PACKAGES += \"${PN}-dbg\"')
86 self.append_config('ERROR_QA_remove = "packages-list"')
87 self.append_config('WARN_QA_append = " packages-list"')
88 bitbake("xcursor-transparent-theme -ccleansstate")
89 res = bitbake("xcursor-transparent-theme")
90 self.delete_recipeinc('xcursor-transparent-theme')
91 self.assertTrue("WARNING: QA Issue: xcursor-transparent-theme-dbg is listed in PACKAGES multiple times, this leads to packaging errors." in res.output, msg=res.output)
92
93 @testcase(278)
94 def test_sanity_userspace_dependency(self):
95 self.append_config('WARN_QA_append = " unsafe-references-in-binaries unsafe-references-in-scripts"')
96 bitbake("-ccleansstate gzip nfs-utils")
97 res = bitbake("gzip nfs-utils")
98 self.assertTrue("WARNING: QA Issue: gzip" in res.output)
99 self.assertTrue("WARNING: QA Issue: nfs-utils" in res.output)
100
101class BuildhistoryTests(BuildhistoryBase):
102
103 @testcase(293)
104 def test_buildhistory_basic(self):
105 self.run_buildhistory_operation('xcursor-transparent-theme')
106 self.assertTrue(os.path.isdir(get_bb_var('BUILDHISTORY_DIR')))
107
108 @testcase(294)
109 def test_buildhistory_buildtime_pr_backwards(self):
110 self.add_command_to_tearDown('cleanup-workdir')
111 target = 'xcursor-transparent-theme'
112 error = "ERROR: QA Issue: Package version for package %s went backwards which would break package feeds from (.*-r1 to .*-r0)" % target
113 self.run_buildhistory_operation(target, target_config="PR = \"r1\"", change_bh_location=True)
114 self.run_buildhistory_operation(target, target_config="PR = \"r0\"", change_bh_location=False, expect_error=True, error_regex=error)
115
116
117
118
119
120
diff --git a/meta/lib/oeqa/selftest/oescripts.py b/meta/lib/oeqa/selftest/oescripts.py
new file mode 100644
index 0000000000..31cd50809c
--- /dev/null
+++ b/meta/lib/oeqa/selftest/oescripts.py
@@ -0,0 +1,54 @@
1import datetime
2import unittest
3import os
4import re
5import shutil
6
7import oeqa.utils.ftools as ftools
8from oeqa.selftest.base import oeSelfTest
9from oeqa.selftest.buildhistory import BuildhistoryBase
10from oeqa.utils.commands import Command, runCmd, bitbake, get_bb_var, get_test_layer
11from oeqa.utils.decorators import testcase
12
13class TestScripts(oeSelfTest):
14
15 @testcase(300)
16 def test_cleanup_workdir(self):
17 path = os.path.dirname(get_bb_var('WORKDIR', 'gzip'))
18 old_version_recipe = os.path.join(get_bb_var('COREBASE'), 'meta/recipes-extended/gzip/gzip_1.3.12.bb')
19 old_version = '1.3.12'
20 bitbake("-ccleansstate gzip")
21 bitbake("-ccleansstate -b %s" % old_version_recipe)
22 if os.path.exists(get_bb_var('WORKDIR', "-b %s" % old_version_recipe)):
23 shutil.rmtree(get_bb_var('WORKDIR', "-b %s" % old_version_recipe))
24 if os.path.exists(get_bb_var('WORKDIR', 'gzip')):
25 shutil.rmtree(get_bb_var('WORKDIR', 'gzip'))
26
27 if os.path.exists(path):
28 initial_contents = os.listdir(path)
29 else:
30 initial_contents = []
31
32 bitbake('gzip')
33 intermediary_contents = os.listdir(path)
34 bitbake("-b %s" % old_version_recipe)
35 runCmd('cleanup-workdir')
36 remaining_contents = os.listdir(path)
37
38 expected_contents = [x for x in intermediary_contents if x not in initial_contents]
39 remaining_not_expected = [x for x in remaining_contents if x not in expected_contents]
40 self.assertFalse(remaining_not_expected, msg="Not all necessary content has been deleted from %s: %s" % (path, ', '.join(map(str, remaining_not_expected))))
41 expected_not_remaining = [x for x in expected_contents if x not in remaining_contents]
42 self.assertFalse(expected_not_remaining, msg="The script removed extra contents from %s: %s" % (path, ', '.join(map(str, expected_not_remaining))))
43
44class BuildhistoryDiffTests(BuildhistoryBase):
45
46 @testcase(295)
47 def test_buildhistory_diff(self):
48 self.add_command_to_tearDown('cleanup-workdir')
49 target = 'xcursor-transparent-theme'
50 self.run_buildhistory_operation(target, target_config="PR = \"r1\"", change_bh_location=True)
51 self.run_buildhistory_operation(target, target_config="PR = \"r0\"", change_bh_location=False, expect_error=True)
52 result = runCmd("buildhistory-diff -p %s" % get_bb_var('BUILDHISTORY_DIR'))
53 expected_output = 'PR changed from "r1" to "r0"'
54 self.assertTrue(expected_output in result.output, msg="Did not find expected output: %s" % result.output)
diff --git a/meta/lib/oeqa/selftest/prservice.py b/meta/lib/oeqa/selftest/prservice.py
new file mode 100644
index 0000000000..fb6d68d3bf
--- /dev/null
+++ b/meta/lib/oeqa/selftest/prservice.py
@@ -0,0 +1,121 @@
1import unittest
2import os
3import logging
4import re
5import shutil
6import datetime
7
8import oeqa.utils.ftools as ftools
9from oeqa.selftest.base import oeSelfTest
10from oeqa.utils.commands import runCmd, bitbake, get_bb_var
11from oeqa.utils.decorators import testcase
12
13class BitbakePrTests(oeSelfTest):
14
15 def get_pr_version(self, package_name):
16 pkgdata_dir = get_bb_var('PKGDATA_DIR')
17 package_data_file = os.path.join(pkgdata_dir, 'runtime', package_name)
18 package_data = ftools.read_file(package_data_file)
19 find_pr = re.search("PKGR: r[0-9]+\.([0-9]+)", package_data)
20 self.assertTrue(find_pr)
21 return int(find_pr.group(1))
22
23 def get_task_stamp(self, package_name, recipe_task):
24 stampdata = get_bb_var('STAMP', target=package_name).split('/')
25 prefix = stampdata[-1]
26 package_stamps_path = "/".join(stampdata[:-1])
27 stamps = []
28 for stamp in os.listdir(package_stamps_path):
29 find_stamp = re.match("%s\.%s\.([a-z0-9]{32})" % (prefix, recipe_task), stamp)
30 if find_stamp:
31 stamps.append(find_stamp.group(1))
32 self.assertFalse(len(stamps) == 0, msg="Cound not find stamp for task %s for recipe %s" % (recipe_task, package_name))
33 self.assertFalse(len(stamps) > 1, msg="Found multiple %s stamps for the %s recipe in the %s directory." % (recipe_task, package_name, package_stamps_path))
34 return str(stamps[0])
35
36 def increment_package_pr(self, package_name):
37 inc_data = "do_package_append() {\nbb.build.exec_func('do_test_prserv', d)\n}\ndo_test_prserv() {\necho \"The current date is: %s\"\n}" % datetime.datetime.now()
38 self.write_recipeinc(package_name, inc_data)
39 bitbake("-ccleansstate %s" % package_name)
40 res = bitbake(package_name, ignore_status=True)
41 self.delete_recipeinc(package_name)
42 self.assertEqual(res.status, 0, msg=res.output)
43 self.assertTrue("NOTE: Started PRServer with DBfile" in res.output, msg=res.output)
44
45 def config_pr_tests(self, package_name, package_type='rpm', pr_socket='localhost:0'):
46 config_package_data = 'PACKAGE_CLASSES = "package_%s"' % package_type
47 self.write_config(config_package_data)
48 config_server_data = 'PRSERV_HOST = "%s"' % pr_socket
49 self.append_config(config_server_data)
50
51 def run_test_pr_service(self, package_name, package_type='rpm', track_task='do_package', pr_socket='localhost:0'):
52 self.config_pr_tests(package_name, package_type, pr_socket)
53
54 self.increment_package_pr(package_name)
55 pr_1 = self.get_pr_version(package_name)
56 stamp_1 = self.get_task_stamp(package_name, track_task)
57
58 self.increment_package_pr(package_name)
59 pr_2 = self.get_pr_version(package_name)
60 stamp_2 = self.get_task_stamp(package_name, track_task)
61
62 bitbake("-ccleansstate %s" % package_name)
63 self.assertTrue(pr_2 - pr_1 == 1)
64 self.assertTrue(stamp_1 != stamp_2)
65
66 def run_test_pr_export_import(self, package_name, replace_current_db=True):
67 self.config_pr_tests(package_name)
68
69 self.increment_package_pr(package_name)
70 pr_1 = self.get_pr_version(package_name)
71
72 exported_db_path = os.path.join(self.builddir, 'export.inc')
73 export_result = runCmd("bitbake-prserv-tool export %s" % exported_db_path, ignore_status=True)
74 self.assertEqual(export_result.status, 0, msg="PR Service database export failed: %s" % export_result.output)
75
76 if replace_current_db:
77 current_db_path = os.path.join(get_bb_var('PERSISTENT_DIR'), 'prserv.sqlite3')
78 self.assertTrue(os.path.exists(current_db_path), msg="Path to current PR Service database is invalid: %s" % current_db_path)
79 os.remove(current_db_path)
80
81 import_result = runCmd("bitbake-prserv-tool import %s" % exported_db_path, ignore_status=True)
82 os.remove(exported_db_path)
83 self.assertEqual(import_result.status, 0, msg="PR Service database import failed: %s" % import_result.output)
84
85 self.increment_package_pr(package_name)
86 pr_2 = self.get_pr_version(package_name)
87
88 bitbake("-ccleansstate %s" % package_name)
89 self.assertTrue(pr_2 - pr_1 == 1)
90
91 @testcase(930)
92 def test_import_export_replace_db(self):
93 self.run_test_pr_export_import('m4')
94
95 @testcase(931)
96 def test_import_export_override_db(self):
97 self.run_test_pr_export_import('m4', replace_current_db=False)
98
99 @testcase(932)
100 def test_pr_service_rpm_arch_dep(self):
101 self.run_test_pr_service('m4', 'rpm', 'do_package')
102
103 @testcase(934)
104 def test_pr_service_deb_arch_dep(self):
105 self.run_test_pr_service('m4', 'deb', 'do_package')
106
107 @testcase(933)
108 def test_pr_service_ipk_arch_dep(self):
109 self.run_test_pr_service('m4', 'ipk', 'do_package')
110
111 @testcase(935)
112 def test_pr_service_rpm_arch_indep(self):
113 self.run_test_pr_service('xcursor-transparent-theme', 'rpm', 'do_package')
114
115 @testcase(937)
116 def test_pr_service_deb_arch_indep(self):
117 self.run_test_pr_service('xcursor-transparent-theme', 'deb', 'do_package')
118
119 @testcase(936)
120 def test_pr_service_ipk_arch_indep(self):
121 self.run_test_pr_service('xcursor-transparent-theme', 'ipk', 'do_package')
diff --git a/meta/lib/oeqa/selftest/sstate.py b/meta/lib/oeqa/selftest/sstate.py
new file mode 100644
index 0000000000..5989724432
--- /dev/null
+++ b/meta/lib/oeqa/selftest/sstate.py
@@ -0,0 +1,53 @@
1import datetime
2import unittest
3import os
4import re
5import shutil
6
7import oeqa.utils.ftools as ftools
8from oeqa.selftest.base import oeSelfTest
9from oeqa.utils.commands import runCmd, bitbake, get_bb_var, get_test_layer
10
11
12class SStateBase(oeSelfTest):
13
14 def setUpLocal(self):
15 self.temp_sstate_location = None
16 self.sstate_path = get_bb_var('SSTATE_DIR')
17 self.distro = get_bb_var('NATIVELSBSTRING')
18 self.distro_specific_sstate = os.path.join(self.sstate_path, self.distro)
19
20 # Creates a special sstate configuration with the option to add sstate mirrors
21 def config_sstate(self, temp_sstate_location=False, add_local_mirrors=[]):
22 self.temp_sstate_location = temp_sstate_location
23
24 if self.temp_sstate_location:
25 temp_sstate_path = os.path.join(self.builddir, "temp_sstate_%s" % datetime.datetime.now().strftime('%Y%m%d%H%M%S'))
26 config_temp_sstate = "SSTATE_DIR = \"%s\"" % temp_sstate_path
27 self.append_config(config_temp_sstate)
28 self.track_for_cleanup(temp_sstate_path)
29 self.sstate_path = get_bb_var('SSTATE_DIR')
30 self.distro = get_bb_var('NATIVELSBSTRING')
31 self.distro_specific_sstate = os.path.join(self.sstate_path, self.distro)
32
33 if add_local_mirrors:
34 config_set_sstate_if_not_set = 'SSTATE_MIRRORS ?= ""'
35 self.append_config(config_set_sstate_if_not_set)
36 for local_mirror in add_local_mirrors:
37 self.assertFalse(os.path.join(local_mirror) == os.path.join(self.sstate_path), msg='Cannot add the current sstate path as a sstate mirror')
38 config_sstate_mirror = "SSTATE_MIRRORS += \"file://.* file:///%s/PATH\"" % local_mirror
39 self.append_config(config_sstate_mirror)
40
41 # Returns a list containing sstate files
42 def search_sstate(self, filename_regex, distro_specific=True, distro_nonspecific=True):
43 result = []
44 for root, dirs, files in os.walk(self.sstate_path):
45 if distro_specific and re.search("%s/[a-z0-9]{2}$" % self.distro, root):
46 for f in files:
47 if re.search(filename_regex, f):
48 result.append(f)
49 if distro_nonspecific and re.search("%s/[a-z0-9]{2}$" % self.sstate_path, root):
50 for f in files:
51 if re.search(filename_regex, f):
52 result.append(f)
53 return result
diff --git a/meta/lib/oeqa/selftest/sstatetests.py b/meta/lib/oeqa/selftest/sstatetests.py
new file mode 100644
index 0000000000..d578ddd489
--- /dev/null
+++ b/meta/lib/oeqa/selftest/sstatetests.py
@@ -0,0 +1,204 @@
1import datetime
2import unittest
3import os
4import re
5import shutil
6
7import oeqa.utils.ftools as ftools
8from oeqa.selftest.base import oeSelfTest
9from oeqa.utils.commands import runCmd, bitbake, get_bb_var, get_test_layer
10from oeqa.selftest.sstate import SStateBase
11from oeqa.utils.decorators import testcase
12
13class SStateTests(SStateBase):
14
15 # Test sstate files creation and their location
16 def run_test_sstate_creation(self, targets, distro_specific=True, distro_nonspecific=True, temp_sstate_location=True, should_pass=True):
17 self.config_sstate(temp_sstate_location)
18
19 if self.temp_sstate_location:
20 bitbake(['-cclean'] + targets)
21 else:
22 bitbake(['-ccleansstate'] + targets)
23
24 bitbake(targets)
25 file_tracker = self.search_sstate('|'.join(map(str, targets)), distro_specific, distro_nonspecific)
26 if should_pass:
27 self.assertTrue(file_tracker , msg="Could not find sstate files for: %s" % ', '.join(map(str, targets)))
28 else:
29 self.assertTrue(not file_tracker , msg="Found sstate files in the wrong place for: %s" % ', '.join(map(str, targets)))
30
31 @testcase(975)
32 def test_sstate_creation_distro_specific_pass(self):
33 targetarch = get_bb_var('TUNE_ARCH')
34 self.run_test_sstate_creation(['binutils-cross-'+ targetarch, 'binutils-native'], distro_specific=True, distro_nonspecific=False, temp_sstate_location=True)
35
36 @testcase(975)
37 def test_sstate_creation_distro_specific_fail(self):
38 targetarch = get_bb_var('TUNE_ARCH')
39 self.run_test_sstate_creation(['binutils-cross-'+ targetarch, 'binutils-native'], distro_specific=False, distro_nonspecific=True, temp_sstate_location=True, should_pass=False)
40
41 @testcase(976)
42 def test_sstate_creation_distro_nonspecific_pass(self):
43 self.run_test_sstate_creation(['glibc-initial'], distro_specific=False, distro_nonspecific=True, temp_sstate_location=True)
44
45 @testcase(976)
46 def test_sstate_creation_distro_nonspecific_fail(self):
47 self.run_test_sstate_creation(['glibc-initial'], distro_specific=True, distro_nonspecific=False, temp_sstate_location=True, should_pass=False)
48
49
50 # Test the sstate files deletion part of the do_cleansstate task
51 def run_test_cleansstate_task(self, targets, distro_specific=True, distro_nonspecific=True, temp_sstate_location=True):
52 self.config_sstate(temp_sstate_location)
53
54 bitbake(['-ccleansstate'] + targets)
55
56 bitbake(targets)
57 tgz_created = self.search_sstate('|'.join(map(str, [s + '.*?\.tgz$' for s in targets])), distro_specific, distro_nonspecific)
58 self.assertTrue(tgz_created, msg="Could not find sstate .tgz files for: %s" % ', '.join(map(str, targets)))
59
60 siginfo_created = self.search_sstate('|'.join(map(str, [s + '.*?\.siginfo$' for s in targets])), distro_specific, distro_nonspecific)
61 self.assertTrue(siginfo_created, msg="Could not find sstate .siginfo files for: %s" % ', '.join(map(str, targets)))
62
63 bitbake(['-ccleansstate'] + targets)
64 tgz_removed = self.search_sstate('|'.join(map(str, [s + '.*?\.tgz$' for s in targets])), distro_specific, distro_nonspecific)
65 self.assertTrue(not tgz_removed, msg="do_cleansstate didn't remove .tgz sstate files for: %s" % ', '.join(map(str, targets)))
66
67 @testcase(977)
68 def test_cleansstate_task_distro_specific_nonspecific(self):
69 targetarch = get_bb_var('TUNE_ARCH')
70 self.run_test_cleansstate_task(['binutils-cross-' + targetarch, 'binutils-native', 'glibc-initial'], distro_specific=True, distro_nonspecific=True, temp_sstate_location=True)
71
72 @testcase(977)
73 def test_cleansstate_task_distro_nonspecific(self):
74 self.run_test_cleansstate_task(['glibc-initial'], distro_specific=False, distro_nonspecific=True, temp_sstate_location=True)
75
76 @testcase(977)
77 def test_cleansstate_task_distro_specific(self):
78 targetarch = get_bb_var('TUNE_ARCH')
79 self.run_test_cleansstate_task(['binutils-cross-'+ targetarch, 'binutils-native', 'glibc-initial'], distro_specific=True, distro_nonspecific=False, temp_sstate_location=True)
80
81
82 # Test rebuilding of distro-specific sstate files
83 def run_test_rebuild_distro_specific_sstate(self, targets, temp_sstate_location=True):
84 self.config_sstate(temp_sstate_location)
85
86 bitbake(['-ccleansstate'] + targets)
87
88 bitbake(targets)
89 self.assertTrue(self.search_sstate('|'.join(map(str, [s + '.*?\.tgz$' for s in targets])), distro_specific=False, distro_nonspecific=True) == [], msg="Found distro non-specific sstate for: %s" % ', '.join(map(str, targets)))
90 file_tracker_1 = self.search_sstate('|'.join(map(str, [s + '.*?\.tgz$' for s in targets])), distro_specific=True, distro_nonspecific=False)
91 self.assertTrue(len(file_tracker_1) >= len(targets), msg = "Not all sstate files ware created for: %s" % ', '.join(map(str, targets)))
92
93 self.track_for_cleanup(self.distro_specific_sstate + "_old")
94 shutil.copytree(self.distro_specific_sstate, self.distro_specific_sstate + "_old")
95 shutil.rmtree(self.distro_specific_sstate)
96
97 bitbake(['-cclean'] + targets)
98 bitbake(targets)
99 file_tracker_2 = self.search_sstate('|'.join(map(str, [s + '.*?\.tgz$' for s in targets])), distro_specific=True, distro_nonspecific=False)
100 self.assertTrue(len(file_tracker_2) >= len(targets), msg = "Not all sstate files ware created for: %s" % ', '.join(map(str, targets)))
101
102 not_recreated = [x for x in file_tracker_1 if x not in file_tracker_2]
103 self.assertTrue(not_recreated == [], msg="The following sstate files ware not recreated: %s" % ', '.join(map(str, not_recreated)))
104
105 created_once = [x for x in file_tracker_2 if x not in file_tracker_1]
106 self.assertTrue(created_once == [], msg="The following sstate files ware created only in the second run: %s" % ', '.join(map(str, created_once)))
107
108 @testcase(175)
109 def test_rebuild_distro_specific_sstate_cross_native_targets(self):
110 targetarch = get_bb_var('TUNE_ARCH')
111 self.run_test_rebuild_distro_specific_sstate(['binutils-cross-' + targetarch, 'binutils-native'], temp_sstate_location=True)
112
113 @testcase(175)
114 def test_rebuild_distro_specific_sstate_cross_target(self):
115 targetarch = get_bb_var('TUNE_ARCH')
116 self.run_test_rebuild_distro_specific_sstate(['binutils-cross-' + targetarch], temp_sstate_location=True)
117
118 @testcase(175)
119 def test_rebuild_distro_specific_sstate_native_target(self):
120 self.run_test_rebuild_distro_specific_sstate(['binutils-native'], temp_sstate_location=True)
121
122
123 # Test the sstate-cache-management script. Each element in the global_config list is used with the corresponding element in the target_config list
124 # global_config elements are expected to not generate any sstate files that would be removed by sstate-cache-management.sh (such as changing the value of MACHINE)
125 def run_test_sstate_cache_management_script(self, target, global_config=[''], target_config=[''], ignore_patterns=[]):
126 self.assertTrue(global_config)
127 self.assertTrue(target_config)
128 self.assertTrue(len(global_config) == len(target_config), msg='Lists global_config and target_config should have the same number of elements')
129 self.config_sstate(temp_sstate_location=True, add_local_mirrors=[self.sstate_path])
130
131 # If buildhistory is enabled, we need to disable version-going-backwards QA checks for this test. It may report errors otherwise.
132 if ('buildhistory' in get_bb_var('USER_CLASSES')) or ('buildhistory' in get_bb_var('INHERIT')):
133 remove_errors_config = 'ERROR_QA_remove = "version-going-backwards"'
134 self.append_config(remove_errors_config)
135
136 # For not this only checks if random sstate tasks are handled correctly as a group.
137 # In the future we should add control over what tasks we check for.
138
139 sstate_archs_list = []
140 expected_remaining_sstate = []
141 for idx in range(len(target_config)):
142 self.append_config(global_config[idx])
143 self.append_recipeinc(target, target_config[idx])
144 sstate_arch = get_bb_var('SSTATE_PKGARCH', target)
145 if not sstate_arch in sstate_archs_list:
146 sstate_archs_list.append(sstate_arch)
147 if target_config[idx] == target_config[-1]:
148 target_sstate_before_build = self.search_sstate(target + '.*?\.tgz$')
149 bitbake("-cclean %s" % target)
150 result = bitbake(target, ignore_status=True)
151 if target_config[idx] == target_config[-1]:
152 target_sstate_after_build = self.search_sstate(target + '.*?\.tgz$')
153 expected_remaining_sstate += [x for x in target_sstate_after_build if x not in target_sstate_before_build if not any(pattern in x for pattern in ignore_patterns)]
154 self.remove_config(global_config[idx])
155 self.remove_recipeinc(target, target_config[idx])
156 self.assertEqual(result.status, 0)
157
158 runCmd("sstate-cache-management.sh -y --cache-dir=%s --remove-duplicated --extra-archs=%s" % (self.sstate_path, ','.join(map(str, sstate_archs_list))))
159 actual_remaining_sstate = [x for x in self.search_sstate(target + '.*?\.tgz$') if not any(pattern in x for pattern in ignore_patterns)]
160
161 actual_not_expected = [x for x in actual_remaining_sstate if x not in expected_remaining_sstate]
162 self.assertFalse(actual_not_expected, msg="Files should have been removed but ware not: %s" % ', '.join(map(str, actual_not_expected)))
163 expected_not_actual = [x for x in expected_remaining_sstate if x not in actual_remaining_sstate]
164 self.assertFalse(expected_not_actual, msg="Extra files ware removed: %s" ', '.join(map(str, expected_not_actual)))
165
166 @testcase(973)
167 def test_sstate_cache_management_script_using_pr_1(self):
168 global_config = []
169 target_config = []
170 global_config.append('')
171 target_config.append('PR = "0"')
172 self.run_test_sstate_cache_management_script('m4', global_config, target_config, ignore_patterns=['populate_lic'])
173
174 @testcase(978)
175 def test_sstate_cache_management_script_using_pr_2(self):
176 global_config = []
177 target_config = []
178 global_config.append('')
179 target_config.append('PR = "0"')
180 global_config.append('')
181 target_config.append('PR = "1"')
182 self.run_test_sstate_cache_management_script('m4', global_config, target_config, ignore_patterns=['populate_lic'])
183
184 @testcase(979)
185 def test_sstate_cache_management_script_using_pr_3(self):
186 global_config = []
187 target_config = []
188 global_config.append('MACHINE = "qemux86-64"')
189 target_config.append('PR = "0"')
190 global_config.append(global_config[0])
191 target_config.append('PR = "1"')
192 global_config.append('MACHINE = "qemux86"')
193 target_config.append('PR = "1"')
194 self.run_test_sstate_cache_management_script('m4', global_config, target_config, ignore_patterns=['populate_lic'])
195
196 @testcase(974)
197 def test_sstate_cache_management_script_using_machine(self):
198 global_config = []
199 target_config = []
200 global_config.append('MACHINE = "qemux86-64"')
201 target_config.append('')
202 global_config.append('MACHINE = "qemux86"')
203 target_config.append('')
204 self.run_test_sstate_cache_management_script('m4', global_config, target_config, ignore_patterns=['populate_lic'])
diff --git a/meta/lib/oeqa/targetcontrol.py b/meta/lib/oeqa/targetcontrol.py
new file mode 100644
index 0000000000..cc582dd1ad
--- /dev/null
+++ b/meta/lib/oeqa/targetcontrol.py
@@ -0,0 +1,199 @@
1# Copyright (C) 2013 Intel Corporation
2#
3# Released under the MIT license (see COPYING.MIT)
4
5# This module is used by testimage.bbclass for setting up and controlling a target machine.
6
7import os
8import shutil
9import subprocess
10import bb
11import traceback
12import sys
13from oeqa.utils.sshcontrol import SSHControl
14from oeqa.utils.qemurunner import QemuRunner
15from oeqa.controllers.testtargetloader import TestTargetLoader
16from abc import ABCMeta, abstractmethod
17
18def get_target_controller(d):
19 testtarget = d.getVar("TEST_TARGET", True)
20 # old, simple names
21 if testtarget == "qemu":
22 return QemuTarget(d)
23 elif testtarget == "simpleremote":
24 return SimpleRemoteTarget(d)
25 else:
26 # use the class name
27 try:
28 # is it a core class defined here?
29 controller = getattr(sys.modules[__name__], testtarget)
30 except AttributeError:
31 # nope, perhaps a layer defined one
32 try:
33 bbpath = d.getVar("BBPATH", True).split(':')
34 testtargetloader = TestTargetLoader()
35 controller = testtargetloader.get_controller_module(testtarget, bbpath)
36 except ImportError as e:
37 bb.fatal("Failed to import {0} from available controller modules:\n{1}".format(testtarget,traceback.format_exc()))
38 except AttributeError as e:
39 bb.fatal("Invalid TEST_TARGET - " + str(e))
40 return controller(d)
41
42
43class BaseTarget(object):
44
45 __metaclass__ = ABCMeta
46
47 supported_image_fstypes = []
48
49 def __init__(self, d):
50 self.connection = None
51 self.ip = None
52 self.server_ip = None
53 self.datetime = d.getVar('DATETIME', True)
54 self.testdir = d.getVar("TEST_LOG_DIR", True)
55 self.pn = d.getVar("PN", True)
56
57 @abstractmethod
58 def deploy(self):
59
60 self.sshlog = os.path.join(self.testdir, "ssh_target_log.%s" % self.datetime)
61 sshloglink = os.path.join(self.testdir, "ssh_target_log")
62 if os.path.islink(sshloglink):
63 os.unlink(sshloglink)
64 os.symlink(self.sshlog, sshloglink)
65 bb.note("SSH log file: %s" % self.sshlog)
66
67 @abstractmethod
68 def start(self, params=None):
69 pass
70
71 @abstractmethod
72 def stop(self):
73 pass
74
75 @classmethod
76 def get_extra_files(self):
77 return None
78
79 @classmethod
80 def match_image_fstype(self, d, image_fstypes=None):
81 if not image_fstypes:
82 image_fstypes = d.getVar('IMAGE_FSTYPES', True).split(' ')
83 possible_image_fstypes = [fstype for fstype in self.supported_image_fstypes if fstype in image_fstypes]
84 if possible_image_fstypes:
85 return possible_image_fstypes[0]
86 else:
87 return None
88
89 def get_image_fstype(self, d):
90 image_fstype = self.match_image_fstype(d)
91 if image_fstype:
92 return image_fstype
93 else:
94 bb.fatal("IMAGE_FSTYPES should contain a Target Controller supported image fstype: %s " % ', '.join(map(str, self.supported_image_fstypes)))
95
96 def restart(self, params=None):
97 self.stop()
98 self.start(params)
99
100 def run(self, cmd, timeout=None):
101 return self.connection.run(cmd, timeout)
102
103 def copy_to(self, localpath, remotepath):
104 return self.connection.copy_to(localpath, remotepath)
105
106 def copy_from(self, remotepath, localpath):
107 return self.connection.copy_from(remotepath, localpath)
108
109
110
111class QemuTarget(BaseTarget):
112
113 supported_image_fstypes = ['ext3']
114
115 def __init__(self, d):
116
117 super(QemuTarget, self).__init__(d)
118
119 self.image_fstype = self.get_image_fstype(d)
120 self.qemulog = os.path.join(self.testdir, "qemu_boot_log.%s" % self.datetime)
121 self.origrootfs = os.path.join(d.getVar("DEPLOY_DIR_IMAGE", True), d.getVar("IMAGE_LINK_NAME", True) + '.' + self.image_fstype)
122 self.rootfs = os.path.join(self.testdir, d.getVar("IMAGE_LINK_NAME", True) + '-testimage.' + self.image_fstype)
123
124 self.runner = QemuRunner(machine=d.getVar("MACHINE", True),
125 rootfs=self.rootfs,
126 tmpdir = d.getVar("TMPDIR", True),
127 deploy_dir_image = d.getVar("DEPLOY_DIR_IMAGE", True),
128 display = d.getVar("BB_ORIGENV", False).getVar("DISPLAY", True),
129 logfile = self.qemulog,
130 boottime = int(d.getVar("TEST_QEMUBOOT_TIMEOUT", True)))
131
132 def deploy(self):
133 try:
134 shutil.copyfile(self.origrootfs, self.rootfs)
135 except Exception as e:
136 bb.fatal("Error copying rootfs: %s" % e)
137
138 qemuloglink = os.path.join(self.testdir, "qemu_boot_log")
139 if os.path.islink(qemuloglink):
140 os.unlink(qemuloglink)
141 os.symlink(self.qemulog, qemuloglink)
142
143 bb.note("rootfs file: %s" % self.rootfs)
144 bb.note("Qemu log file: %s" % self.qemulog)
145 super(QemuTarget, self).deploy()
146
147 def start(self, params=None):
148 if self.runner.start(params):
149 self.ip = self.runner.ip
150 self.server_ip = self.runner.server_ip
151 self.connection = SSHControl(ip=self.ip, logfile=self.sshlog)
152 else:
153 self.stop()
154 raise bb.build.FuncFailed("%s - FAILED to start qemu - check the task log and the boot log" % self.pn)
155
156 def stop(self):
157 self.runner.stop()
158 self.connection = None
159 self.ip = None
160 self.server_ip = None
161
162 def restart(self, params=None):
163 if self.runner.restart(params):
164 self.ip = self.runner.ip
165 self.server_ip = self.runner.server_ip
166 self.connection = SSHControl(ip=self.ip, logfile=self.sshlog)
167 else:
168 raise bb.build.FuncFailed("%s - FAILED to re-start qemu - check the task log and the boot log" % self.pn)
169
170
171class SimpleRemoteTarget(BaseTarget):
172
173 def __init__(self, d):
174 super(SimpleRemoteTarget, self).__init__(d)
175 addr = d.getVar("TEST_TARGET_IP", True) or bb.fatal('Please set TEST_TARGET_IP with the IP address of the machine you want to run the tests on.')
176 self.ip = addr.split(":")[0]
177 try:
178 self.port = addr.split(":")[1]
179 except IndexError:
180 self.port = None
181 bb.note("Target IP: %s" % self.ip)
182 self.server_ip = d.getVar("TEST_SERVER_IP", True)
183 if not self.server_ip:
184 try:
185 self.server_ip = subprocess.check_output(['ip', 'route', 'get', self.ip ]).split("\n")[0].split()[-1]
186 except Exception as e:
187 bb.fatal("Failed to determine the host IP address (alternatively you can set TEST_SERVER_IP with the IP address of this machine): %s" % e)
188 bb.note("Server IP: %s" % self.server_ip)
189
190 def deploy(self):
191 super(SimpleRemoteTarget, self).deploy()
192
193 def start(self, params=None):
194 self.connection = SSHControl(self.ip, logfile=self.sshlog, port=self.port)
195
196 def stop(self):
197 self.connection = None
198 self.ip = None
199 self.server_ip = None
diff --git a/meta/lib/oeqa/utils/__init__.py b/meta/lib/oeqa/utils/__init__.py
new file mode 100644
index 0000000000..2260046026
--- /dev/null
+++ b/meta/lib/oeqa/utils/__init__.py
@@ -0,0 +1,15 @@
1# Enable other layers to have modules in the same named directory
2from pkgutil import extend_path
3__path__ = extend_path(__path__, __name__)
4
5
6# Borrowed from CalledProcessError
7
8class CommandError(Exception):
9 def __init__(self, retcode, cmd, output = None):
10 self.retcode = retcode
11 self.cmd = cmd
12 self.output = output
13 def __str__(self):
14 return "Command '%s' returned non-zero exit status %d with output: %s" % (self.cmd, self.retcode, self.output)
15
diff --git a/meta/lib/oeqa/utils/commands.py b/meta/lib/oeqa/utils/commands.py
new file mode 100644
index 0000000000..802bc2f208
--- /dev/null
+++ b/meta/lib/oeqa/utils/commands.py
@@ -0,0 +1,154 @@
1# Copyright (c) 2013-2014 Intel Corporation
2#
3# Released under the MIT license (see COPYING.MIT)
4
5# DESCRIPTION
6# This module is mainly used by scripts/oe-selftest and modules under meta/oeqa/selftest
7# It provides a class and methods for running commands on the host in a convienent way for tests.
8
9
10
11import os
12import sys
13import signal
14import subprocess
15import threading
16import logging
17from oeqa.utils import CommandError
18from oeqa.utils import ftools
19
20class Command(object):
21 def __init__(self, command, bg=False, timeout=None, data=None, **options):
22
23 self.defaultopts = {
24 "stdout": subprocess.PIPE,
25 "stderr": subprocess.STDOUT,
26 "stdin": None,
27 "shell": False,
28 "bufsize": -1,
29 }
30
31 self.cmd = command
32 self.bg = bg
33 self.timeout = timeout
34 self.data = data
35
36 self.options = dict(self.defaultopts)
37 if isinstance(self.cmd, basestring):
38 self.options["shell"] = True
39 if self.data:
40 self.options['stdin'] = subprocess.PIPE
41 self.options.update(options)
42
43 self.status = None
44 self.output = None
45 self.error = None
46 self.thread = None
47
48 self.log = logging.getLogger("utils.commands")
49
50 def run(self):
51 self.process = subprocess.Popen(self.cmd, **self.options)
52
53 def commThread():
54 self.output, self.error = self.process.communicate(self.data)
55
56 self.thread = threading.Thread(target=commThread)
57 self.thread.start()
58
59 self.log.debug("Running command '%s'" % self.cmd)
60
61 if not self.bg:
62 self.thread.join(self.timeout)
63 self.stop()
64
65 def stop(self):
66 if self.thread.isAlive():
67 self.process.terminate()
68 # let's give it more time to terminate gracefully before killing it
69 self.thread.join(5)
70 if self.thread.isAlive():
71 self.process.kill()
72 self.thread.join()
73
74 self.output = self.output.rstrip()
75 self.status = self.process.poll()
76
77 self.log.debug("Command '%s' returned %d as exit code." % (self.cmd, self.status))
78 # logging the complete output is insane
79 # bitbake -e output is really big
80 # and makes the log file useless
81 if self.status:
82 lout = "\n".join(self.output.splitlines()[-20:])
83 self.log.debug("Last 20 lines:\n%s" % lout)
84
85
86class Result(object):
87 pass
88
89
90def runCmd(command, ignore_status=False, timeout=None, assert_error=True, **options):
91 result = Result()
92
93 cmd = Command(command, timeout=timeout, **options)
94 cmd.run()
95
96 result.command = command
97 result.status = cmd.status
98 result.output = cmd.output
99 result.pid = cmd.process.pid
100
101 if result.status and not ignore_status:
102 if assert_error:
103 raise AssertionError("Command '%s' returned non-zero exit status %d:\n%s" % (command, result.status, result.output))
104 else:
105 raise CommandError(result.status, command, result.output)
106
107 return result
108
109
110def bitbake(command, ignore_status=False, timeout=None, postconfig=None, **options):
111
112 if postconfig:
113 postconfig_file = os.path.join(os.environ.get('BUILDDIR'), 'oeqa-post.conf')
114 ftools.write_file(postconfig_file, postconfig)
115 extra_args = "-R %s" % postconfig_file
116 else:
117 extra_args = ""
118
119 if isinstance(command, basestring):
120 cmd = "bitbake " + extra_args + " " + command
121 else:
122 cmd = [ "bitbake" ] + [a for a in (command + extra_args.split(" ")) if a not in [""]]
123
124 try:
125 return runCmd(cmd, ignore_status, timeout, **options)
126 finally:
127 if postconfig:
128 os.remove(postconfig_file)
129
130
131def get_bb_env(target=None, postconfig=None):
132 if target:
133 return bitbake("-e %s" % target, postconfig=postconfig).output
134 else:
135 return bitbake("-e", postconfig=postconfig).output
136
137def get_bb_var(var, target=None, postconfig=None):
138 val = None
139 bbenv = get_bb_env(target, postconfig=postconfig)
140 for line in bbenv.splitlines():
141 if line.startswith(var + "="):
142 val = line.split('=')[1]
143 val = val.replace('\"','')
144 break
145 return val
146
147def get_test_layer():
148 layers = get_bb_var("BBLAYERS").split()
149 testlayer = None
150 for l in layers:
151 if "/meta-selftest" in l and os.path.isdir(l):
152 testlayer = l
153 break
154 return testlayer
diff --git a/meta/lib/oeqa/utils/decorators.py b/meta/lib/oeqa/utils/decorators.py
new file mode 100644
index 0000000000..40bd4ef2db
--- /dev/null
+++ b/meta/lib/oeqa/utils/decorators.py
@@ -0,0 +1,158 @@
1# Copyright (C) 2013 Intel Corporation
2#
3# Released under the MIT license (see COPYING.MIT)
4
5# Some custom decorators that can be used by unittests
6# Most useful is skipUnlessPassed which can be used for
7# creating dependecies between two test methods.
8
9import os
10import logging
11import sys
12import unittest
13
14#get the "result" object from one of the upper frames provided that one of these upper frames is a unittest.case frame
15class getResults(object):
16 def __init__(self):
17 #dynamically determine the unittest.case frame and use it to get the name of the test method
18 upperf = sys._current_frames().values()[0]
19 while (upperf.f_globals['__name__'] != 'unittest.case'):
20 upperf = upperf.f_back
21
22 def handleList(items):
23 ret = []
24 # items is a list of tuples, (test, failure) or (_ErrorHandler(), Exception())
25 for i in items:
26 s = i[0].id()
27 #Handle the _ErrorHolder objects from skipModule failures
28 if "setUpModule (" in s:
29 ret.append(s.replace("setUpModule (", "").replace(")",""))
30 else:
31 ret.append(s)
32 return ret
33 self.faillist = handleList(upperf.f_locals['result'].failures)
34 self.errorlist = handleList(upperf.f_locals['result'].errors)
35 self.skiplist = handleList(upperf.f_locals['result'].skipped)
36
37 def getFailList(self):
38 return self.faillist
39
40 def getErrorList(self):
41 return self.errorlist
42
43 def getSkipList(self):
44 return self.skiplist
45
46class skipIfFailure(object):
47
48 def __init__(self,testcase):
49 self.testcase = testcase
50
51 def __call__(self,f):
52 def wrapped_f(*args):
53 res = getResults()
54 if self.testcase in (res.getFailList() or res.getErrorList()):
55 raise unittest.SkipTest("Testcase dependency not met: %s" % self.testcase)
56 return f(*args)
57 wrapped_f.__name__ = f.__name__
58 return wrapped_f
59
60class skipIfSkipped(object):
61
62 def __init__(self,testcase):
63 self.testcase = testcase
64
65 def __call__(self,f):
66 def wrapped_f(*args):
67 res = getResults()
68 if self.testcase in res.getSkipList():
69 raise unittest.SkipTest("Testcase dependency not met: %s" % self.testcase)
70 return f(*args)
71 wrapped_f.__name__ = f.__name__
72 return wrapped_f
73
74class skipUnlessPassed(object):
75
76 def __init__(self,testcase):
77 self.testcase = testcase
78
79 def __call__(self,f):
80 def wrapped_f(*args):
81 res = getResults()
82 if self.testcase in res.getSkipList() or \
83 self.testcase in res.getFailList() or \
84 self.testcase in res.getErrorList():
85 raise unittest.SkipTest("Testcase dependency not met: %s" % self.testcase)
86 return f(*args)
87 wrapped_f.__name__ = f.__name__
88 return wrapped_f
89
90class testcase(object):
91
92 def __init__(self, test_case):
93 self.test_case = test_case
94
95 def __call__(self, func):
96 def wrapped_f(*args):
97 return func(*args)
98 wrapped_f.test_case = self.test_case
99 return wrapped_f
100
101class NoParsingFilter(logging.Filter):
102 def filter(self, record):
103 return record.levelno == 100
104
105def LogResults(original_class):
106 orig_method = original_class.run
107
108 #rewrite the run method of unittest.TestCase to add testcase logging
109 def run(self, result, *args, **kws):
110 orig_method(self, result, *args, **kws)
111 passed = True
112 testMethod = getattr(self, self._testMethodName)
113
114 #if test case is decorated then use it's number, else use it's name
115 try:
116 test_case = testMethod.test_case
117 except AttributeError:
118 test_case = self._testMethodName
119
120 #create custom logging level for filtering.
121 custom_log_level = 100
122 logging.addLevelName(custom_log_level, 'RESULTS')
123 caller = os.path.basename(sys.argv[0])
124
125 def results(self, message, *args, **kws):
126 if self.isEnabledFor(custom_log_level):
127 self.log(custom_log_level, message, *args, **kws)
128 logging.Logger.results = results
129
130 logging.basicConfig(filename=os.path.join(os.getcwd(),'results-'+caller+'.log'),
131 filemode='w',
132 format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
133 datefmt='%H:%M:%S',
134 level=custom_log_level)
135 for handler in logging.root.handlers:
136 handler.addFilter(NoParsingFilter())
137 local_log = logging.getLogger(caller)
138
139 #check status of tests and record it
140 for (name, msg) in result.errors:
141 if self._testMethodName == str(name).split(' ')[0]:
142 local_log.results("Testcase "+str(test_case)+": ERROR")
143 local_log.results("Testcase "+str(test_case)+":\n"+msg)
144 passed = False
145 for (name, msg) in result.failures:
146 if self._testMethodName == str(name).split(' ')[0]:
147 local_log.results("Testcase "+str(test_case)+": FAILED")
148 local_log.results("Testcase "+str(test_case)+":\n"+msg)
149 passed = False
150 for (name, msg) in result.skipped:
151 if self._testMethodName == str(name).split(' ')[0]:
152 local_log.results("Testcase "+str(test_case)+": SKIPPED")
153 passed = False
154 if passed:
155 local_log.results("Testcase "+str(test_case)+": PASSED")
156
157 original_class.run = run
158 return original_class
diff --git a/meta/lib/oeqa/utils/ftools.py b/meta/lib/oeqa/utils/ftools.py
new file mode 100644
index 0000000000..64ebe3d217
--- /dev/null
+++ b/meta/lib/oeqa/utils/ftools.py
@@ -0,0 +1,27 @@
1import os
2import re
3
4def write_file(path, data):
5 wdata = data.rstrip() + "\n"
6 with open(path, "w") as f:
7 f.write(wdata)
8
9def append_file(path, data):
10 wdata = data.rstrip() + "\n"
11 with open(path, "a") as f:
12 f.write(wdata)
13
14def read_file(path):
15 data = None
16 with open(path) as f:
17 data = f.read()
18 return data
19
20def remove_from_file(path, data):
21 lines = read_file(path).splitlines()
22 rmdata = data.strip().splitlines()
23 for l in rmdata:
24 for c in range(0, lines.count(l)):
25 i = lines.index(l)
26 del(lines[i])
27 write_file(path, "\n".join(lines))
diff --git a/meta/lib/oeqa/utils/httpserver.py b/meta/lib/oeqa/utils/httpserver.py
new file mode 100644
index 0000000000..76518d8ef9
--- /dev/null
+++ b/meta/lib/oeqa/utils/httpserver.py
@@ -0,0 +1,35 @@
1import SimpleHTTPServer
2import multiprocessing
3import os
4
5class HTTPServer(SimpleHTTPServer.BaseHTTPServer.HTTPServer):
6
7 def server_start(self, root_dir):
8 import signal
9 signal.signal(signal.SIGTERM, signal.SIG_DFL)
10 os.chdir(root_dir)
11 self.serve_forever()
12
13class HTTPRequestHandler(SimpleHTTPServer.SimpleHTTPRequestHandler):
14
15 def log_message(self, format_str, *args):
16 pass
17
18class HTTPService(object):
19
20 def __init__(self, root_dir, host=''):
21 self.root_dir = root_dir
22 self.host = host
23 self.port = 0
24
25 def start(self):
26 self.server = HTTPServer((self.host, self.port), HTTPRequestHandler)
27 if self.port == 0:
28 self.port = self.server.server_port
29 self.process = multiprocessing.Process(target=self.server.server_start, args=[self.root_dir])
30 self.process.start()
31
32 def stop(self):
33 self.server.server_close()
34 self.process.terminate()
35 self.process.join()
diff --git a/meta/lib/oeqa/utils/logparser.py b/meta/lib/oeqa/utils/logparser.py
new file mode 100644
index 0000000000..87b50354cd
--- /dev/null
+++ b/meta/lib/oeqa/utils/logparser.py
@@ -0,0 +1,125 @@
1#!/usr/bin/env python
2
3import sys
4import os
5import re
6import ftools
7
8
9# A parser that can be used to identify weather a line is a test result or a section statement.
10class Lparser(object):
11
12 def __init__(self, test_0_pass_regex, test_0_fail_regex, section_0_begin_regex=None, section_0_end_regex=None, **kwargs):
13 # Initialize the arguments dictionary
14 if kwargs:
15 self.args = kwargs
16 else:
17 self.args = {}
18
19 # Add the default args to the dictionary
20 self.args['test_0_pass_regex'] = test_0_pass_regex
21 self.args['test_0_fail_regex'] = test_0_fail_regex
22 if section_0_begin_regex:
23 self.args['section_0_begin_regex'] = section_0_begin_regex
24 if section_0_end_regex:
25 self.args['section_0_end_regex'] = section_0_end_regex
26
27 self.test_possible_status = ['pass', 'fail', 'error']
28 self.section_possible_status = ['begin', 'end']
29
30 self.initialized = False
31
32
33 # Initialize the parser with the current configuration
34 def init(self):
35
36 # extra arguments can be added by the user to define new test and section categories. They must follow a pre-defined pattern: <type>_<category_name>_<status>_regex
37 self.test_argument_pattern = "^test_(.+?)_(%s)_regex" % '|'.join(map(str, self.test_possible_status))
38 self.section_argument_pattern = "^section_(.+?)_(%s)_regex" % '|'.join(map(str, self.section_possible_status))
39
40 # Initialize the test and section regex dictionaries
41 self.test_regex = {}
42 self.section_regex ={}
43
44 for arg, value in self.args.items():
45 if not value:
46 raise Exception('The value of provided argument %s is %s. Should have a valid value.' % (key, value))
47 is_test = re.search(self.test_argument_pattern, arg)
48 is_section = re.search(self.section_argument_pattern, arg)
49 if is_test:
50 if not is_test.group(1) in self.test_regex:
51 self.test_regex[is_test.group(1)] = {}
52 self.test_regex[is_test.group(1)][is_test.group(2)] = re.compile(value)
53 elif is_section:
54 if not is_section.group(1) in self.section_regex:
55 self.section_regex[is_section.group(1)] = {}
56 self.section_regex[is_section.group(1)][is_section.group(2)] = re.compile(value)
57 else:
58 # TODO: Make these call a traceback instead of a simple exception..
59 raise Exception("The provided argument name does not correspond to any valid type. Please give one of the following types:\nfor tests: %s\nfor sections: %s" % (self.test_argument_pattern, self.section_argument_pattern))
60
61 self.initialized = True
62
63 # Parse a line and return a tuple containing the type of result (test/section) and its category, status and name
64 def parse_line(self, line):
65 if not self.initialized:
66 raise Exception("The parser is not initialized..")
67
68 for test_category, test_status_list in self.test_regex.items():
69 for test_status, status_regex in test_status_list.items():
70 test_name = status_regex.search(line)
71 if test_name:
72 return ['test', test_category, test_status, test_name.group(1)]
73
74 for section_category, section_status_list in self.section_regex.items():
75 for section_status, status_regex in section_status_list.items():
76 section_name = status_regex.search(line)
77 if section_name:
78 return ['section', section_category, section_status, section_name.group(1)]
79 return None
80
81
82class Result(object):
83
84 def __init__(self):
85 self.result_dict = {}
86
87 def store(self, section, test, status):
88 if not section in self.result_dict:
89 self.result_dict[section] = []
90
91 self.result_dict[section].append((test, status))
92
93 # sort tests by the test name(the first element of the tuple), for each section. This can be helpful when using git to diff for changes by making sure they are always in the same order.
94 def sort_tests(self):
95 for package in self.result_dict:
96 sorted_results = sorted(self.result_dict[package], key=lambda tup: tup[0])
97 self.result_dict[package] = sorted_results
98
99 # Log the results as files. The file name is the section name and the contents are the tests in that section.
100 def log_as_files(self, target_dir, test_status):
101 status_regex = re.compile('|'.join(map(str, test_status)))
102 if not type(test_status) == type([]):
103 raise Exception("test_status should be a list. Got " + str(test_status) + " instead.")
104 if not os.path.exists(target_dir):
105 raise Exception("Target directory does not exist: %s" % target_dir)
106
107 for section, test_results in self.result_dict.items():
108 prefix = ''
109 for x in test_status:
110 prefix +=x+'.'
111 if (section != ''):
112 prefix += section
113 section_file = os.path.join(target_dir, prefix)
114 # purge the file contents if it exists
115 open(section_file, 'w').close()
116 for test_result in test_results:
117 (test_name, status) = test_result
118 # we log only the tests with status in the test_status list
119 match_status = status_regex.search(status)
120 if match_status:
121 ftools.append_file(section_file, status + ": " + test_name)
122
123 # Not yet implemented!
124 def log_to_lava(self):
125 pass
diff --git a/meta/lib/oeqa/utils/qemurunner.py b/meta/lib/oeqa/utils/qemurunner.py
new file mode 100644
index 0000000000..f1a7e24ab7
--- /dev/null
+++ b/meta/lib/oeqa/utils/qemurunner.py
@@ -0,0 +1,237 @@
1# Copyright (C) 2013 Intel Corporation
2#
3# Released under the MIT license (see COPYING.MIT)
4
5# This module provides a class for starting qemu images using runqemu.
6# It's used by testimage.bbclass.
7
8import subprocess
9import os
10import time
11import signal
12import re
13import socket
14import select
15import bb
16
17class QemuRunner:
18
19 def __init__(self, machine, rootfs, display, tmpdir, deploy_dir_image, logfile, boottime):
20
21 # Popen object for runqemu
22 self.runqemu = None
23 # pid of the qemu process that runqemu will start
24 self.qemupid = None
25 # target ip - from the command line
26 self.ip = None
27 # host ip - where qemu is running
28 self.server_ip = None
29
30 self.machine = machine
31 self.rootfs = rootfs
32 self.display = display
33 self.tmpdir = tmpdir
34 self.deploy_dir_image = deploy_dir_image
35 self.logfile = logfile
36 self.boottime = boottime
37
38 self.runqemutime = 60
39
40 self.create_socket()
41
42
43 def create_socket(self):
44
45 self.bootlog = ''
46 self.qemusock = None
47
48 try:
49 self.server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
50 self.server_socket.setblocking(0)
51 self.server_socket.bind(("127.0.0.1",0))
52 self.server_socket.listen(2)
53 self.serverport = self.server_socket.getsockname()[1]
54 bb.note("Created listening socket for qemu serial console on: 127.0.0.1:%s" % self.serverport)
55 except socket.error, msg:
56 self.server_socket.close()
57 bb.fatal("Failed to create listening socket: %s" %msg[1])
58
59
60 def log(self, msg):
61 if self.logfile:
62 with open(self.logfile, "a") as f:
63 f.write("%s" % msg)
64
65 def start(self, qemuparams = None):
66
67 if self.display:
68 os.environ["DISPLAY"] = self.display
69 else:
70 bb.error("To start qemu I need a X desktop, please set DISPLAY correctly (e.g. DISPLAY=:1)")
71 return False
72 if not os.path.exists(self.rootfs):
73 bb.error("Invalid rootfs %s" % self.rootfs)
74 return False
75 if not os.path.exists(self.tmpdir):
76 bb.error("Invalid TMPDIR path %s" % self.tmpdir)
77 return False
78 else:
79 os.environ["OE_TMPDIR"] = self.tmpdir
80 if not os.path.exists(self.deploy_dir_image):
81 bb.error("Invalid DEPLOY_DIR_IMAGE path %s" % self.deploy_dir_image)
82 return False
83 else:
84 os.environ["DEPLOY_DIR_IMAGE"] = self.deploy_dir_image
85
86 # Set this flag so that Qemu doesn't do any grabs as SDL grabs interact
87 # badly with screensavers.
88 os.environ["QEMU_DONT_GRAB"] = "1"
89 self.qemuparams = 'bootparams="console=tty1 console=ttyS0,115200n8" qemuparams="-serial tcp:127.0.0.1:%s"' % self.serverport
90 if qemuparams:
91 self.qemuparams = self.qemuparams[:-1] + " " + qemuparams + " " + '\"'
92
93 launch_cmd = 'runqemu %s %s %s' % (self.machine, self.rootfs, self.qemuparams)
94 self.runqemu = subprocess.Popen(launch_cmd,shell=True,stdout=subprocess.PIPE,stderr=subprocess.STDOUT,preexec_fn=os.setpgrp)
95
96 bb.note("runqemu started, pid is %s" % self.runqemu.pid)
97 bb.note("waiting at most %s seconds for qemu pid" % self.runqemutime)
98 endtime = time.time() + self.runqemutime
99 while not self.is_alive() and time.time() < endtime:
100 time.sleep(1)
101
102 if self.is_alive():
103 bb.note("qemu started - qemu procces pid is %s" % self.qemupid)
104 cmdline = ''
105 with open('/proc/%s/cmdline' % self.qemupid) as p:
106 cmdline = p.read()
107 ips = re.findall("((?:[0-9]{1,3}\.){3}[0-9]{1,3})", cmdline.split("ip=")[1])
108 if not ips or len(ips) != 3:
109 bb.note("Couldn't get ip from qemu process arguments! Here is the qemu command line used: %s" % cmdline)
110 self.stop()
111 return False
112 else:
113 self.ip = ips[0]
114 self.server_ip = ips[1]
115 bb.note("Target IP: %s" % self.ip)
116 bb.note("Server IP: %s" % self.server_ip)
117 bb.note("Waiting at most %d seconds for login banner" % self.boottime )
118 endtime = time.time() + self.boottime
119 socklist = [self.server_socket]
120 reachedlogin = False
121 stopread = False
122 while time.time() < endtime and not stopread:
123 sread, swrite, serror = select.select(socklist, [], [], 5)
124 for sock in sread:
125 if sock is self.server_socket:
126 self.qemusock, addr = self.server_socket.accept()
127 self.qemusock.setblocking(0)
128 socklist.append(self.qemusock)
129 socklist.remove(self.server_socket)
130 bb.note("Connection from %s:%s" % addr)
131 else:
132 data = sock.recv(1024)
133 if data:
134 self.log(data)
135 self.bootlog += data
136 if re.search("qemu.* login:", self.bootlog):
137 stopread = True
138 reachedlogin = True
139 bb.note("Reached login banner")
140 else:
141 socklist.remove(sock)
142 sock.close()
143 stopread = True
144
145 if not reachedlogin:
146 bb.note("Target didn't reached login boot in %d seconds" % self.boottime)
147 lines = "\n".join(self.bootlog.splitlines()[-5:])
148 bb.note("Last 5 lines of text:\n%s" % lines)
149 bb.note("Check full boot log: %s" % self.logfile)
150 self.stop()
151 return False
152 else:
153 bb.note("Qemu pid didn't appeared in %s seconds" % self.runqemutime)
154 output = self.runqemu.stdout
155 self.stop()
156 bb.note("Output from runqemu:\n%s" % output.read())
157 return False
158
159 return self.is_alive()
160
161 def stop(self):
162
163 if self.runqemu:
164 bb.note("Sending SIGTERM to runqemu")
165 os.killpg(self.runqemu.pid, signal.SIGTERM)
166 endtime = time.time() + self.runqemutime
167 while self.runqemu.poll() is None and time.time() < endtime:
168 time.sleep(1)
169 if self.runqemu.poll() is None:
170 bb.note("Sending SIGKILL to runqemu")
171 os.killpg(self.runqemu.pid, signal.SIGKILL)
172 self.runqemu = None
173 if self.server_socket:
174 self.server_socket.close()
175 self.server_socket = None
176 self.qemupid = None
177 self.ip = None
178
179 def restart(self, qemuparams = None):
180 bb.note("Restarting qemu process")
181 if self.runqemu.poll() is None:
182 self.stop()
183 self.create_socket()
184 if self.start(qemuparams):
185 return True
186 return False
187
188 def is_alive(self):
189 qemu_child = self.find_child(str(self.runqemu.pid))
190 if qemu_child:
191 self.qemupid = qemu_child[0]
192 if os.path.exists("/proc/" + str(self.qemupid)):
193 return True
194 return False
195
196 def find_child(self,parent_pid):
197 #
198 # Walk the process tree from the process specified looking for a qemu-system. Return its [pid'cmd]
199 #
200 ps = subprocess.Popen(['ps', 'axww', '-o', 'pid,ppid,command'], stdout=subprocess.PIPE).communicate()[0]
201 processes = ps.split('\n')
202 nfields = len(processes[0].split()) - 1
203 pids = {}
204 commands = {}
205 for row in processes[1:]:
206 data = row.split(None, nfields)
207 if len(data) != 3:
208 continue
209 if data[1] not in pids:
210 pids[data[1]] = []
211
212 pids[data[1]].append(data[0])
213 commands[data[0]] = data[2]
214
215 if parent_pid not in pids:
216 return []
217
218 parents = []
219 newparents = pids[parent_pid]
220 while newparents:
221 next = []
222 for p in newparents:
223 if p in pids:
224 for n in pids[p]:
225 if n not in parents and n not in next:
226 next.append(n)
227 if p not in parents:
228 parents.append(p)
229 newparents = next
230 #print "Children matching %s:" % str(parents)
231 for p in parents:
232 # Need to be careful here since runqemu-internal runs "ldd qemu-system-xxxx"
233 # Also, old versions of ldd (2.11) run "LD_XXXX qemu-system-xxxx"
234 basecmd = commands[p].split()[0]
235 basecmd = os.path.basename(basecmd)
236 if "qemu-system" in basecmd and "-serial tcp" in commands[p]:
237 return [int(p),commands[p]]
diff --git a/meta/lib/oeqa/utils/sshcontrol.py b/meta/lib/oeqa/utils/sshcontrol.py
new file mode 100644
index 0000000000..1c81795a87
--- /dev/null
+++ b/meta/lib/oeqa/utils/sshcontrol.py
@@ -0,0 +1,138 @@
1# Copyright (C) 2013 Intel Corporation
2#
3# Released under the MIT license (see COPYING.MIT)
4
5# Provides a class for setting up ssh connections,
6# running commands and copying files to/from a target.
7# It's used by testimage.bbclass and tests in lib/oeqa/runtime.
8
9import subprocess
10import time
11import os
12import select
13
14
15class SSHProcess(object):
16 def __init__(self, **options):
17
18 self.defaultopts = {
19 "stdout": subprocess.PIPE,
20 "stderr": subprocess.STDOUT,
21 "stdin": None,
22 "shell": False,
23 "bufsize": -1,
24 "preexec_fn": os.setsid,
25 }
26 self.options = dict(self.defaultopts)
27 self.options.update(options)
28 self.status = None
29 self.output = None
30 self.process = None
31 self.starttime = None
32 self.logfile = None
33
34 def log(self, msg):
35 if self.logfile:
36 with open(self.logfile, "a") as f:
37 f.write("%s" % msg)
38
39 def run(self, command, timeout=None, logfile=None):
40 self.logfile = logfile
41 self.starttime = time.time()
42 output = ''
43 self.process = subprocess.Popen(command, **self.options)
44 if timeout:
45 endtime = self.starttime + timeout
46 eof = False
47 while time.time() < endtime and not eof:
48 if select.select([self.process.stdout], [], [], 5)[0] != []:
49 data = os.read(self.process.stdout.fileno(), 1024)
50 if not data:
51 self.process.stdout.close()
52 eof = True
53 else:
54 output += data
55 self.log(data)
56 endtime = time.time() + timeout
57
58
59 # process hasn't returned yet
60 if not eof:
61 self.process.terminate()
62 time.sleep(5)
63 try:
64 self.process.kill()
65 except OSError:
66 pass
67 lastline = "\nProcess killed - no output for %d seconds. Total running time: %d seconds." % (timeout, time.time() - self.starttime)
68 self.log(lastline)
69 output += lastline
70 else:
71 output = self.process.communicate()[0]
72 self.log(output.rstrip())
73
74 self.status = self.process.wait()
75 self.output = output.rstrip()
76 return (self.status, self.output)
77
78
79class SSHControl(object):
80 def __init__(self, ip, logfile=None, timeout=300, user='root', port=None):
81 self.ip = ip
82 self.defaulttimeout = timeout
83 self.ignore_status = True
84 self.logfile = logfile
85 self.user = user
86 self.ssh_options = [
87 '-o', 'UserKnownHostsFile=/dev/null',
88 '-o', 'StrictHostKeyChecking=no',
89 '-o', 'LogLevel=ERROR'
90 ]
91 self.ssh = ['ssh', '-l', self.user ] + self.ssh_options
92 self.scp = ['scp'] + self.ssh_options
93 if port:
94 self.ssh = self.ssh + [ '-p', port ]
95 self.scp = self.scp + [ '-P', port ]
96
97 def log(self, msg):
98 if self.logfile:
99 with open(self.logfile, "a") as f:
100 f.write("%s\n" % msg)
101
102 def _internal_run(self, command, timeout=None, ignore_status = True):
103 self.log("[Running]$ %s" % " ".join(command))
104
105 proc = SSHProcess()
106 status, output = proc.run(command, timeout, logfile=self.logfile)
107
108 self.log("[Command returned '%d' after %.2f seconds]" % (status, time.time() - proc.starttime))
109
110 if status and not ignore_status:
111 raise AssertionError("Command '%s' returned non-zero exit status %d:\n%s" % (command, status, output))
112
113 return (status, output)
114
115 def run(self, command, timeout=None):
116 """
117 command - ssh command to run
118 timeout=<val> - kill command if there is no output after <val> seconds
119 timeout=None - kill command if there is no output after a default value seconds
120 timeout=0 - no timeout, let command run until it returns
121 """
122
123 # We need to source /etc/profile for a proper PATH on the target
124 command = self.ssh + [self.ip, ' . /etc/profile; ' + command]
125
126 if timeout is None:
127 return self._internal_run(command, self.defaulttimeout, self.ignore_status)
128 if timeout == 0:
129 return self._internal_run(command, None, self.ignore_status)
130 return self._internal_run(command, timeout, self.ignore_status)
131
132 def copy_to(self, localpath, remotepath):
133 command = self.scp + [localpath, '%s@%s:%s' % (self.user, self.ip, remotepath)]
134 return self._internal_run(command, ignore_status=False)
135
136 def copy_from(self, remotepath, localpath):
137 command = self.scp + ['%s@%s:%s' % (self.user, self.ip, remotepath), localpath]
138 return self._internal_run(command, ignore_status=False)
diff --git a/meta/lib/oeqa/utils/targetbuild.py b/meta/lib/oeqa/utils/targetbuild.py
new file mode 100644
index 0000000000..eeb08ba716
--- /dev/null
+++ b/meta/lib/oeqa/utils/targetbuild.py
@@ -0,0 +1,132 @@
1# Copyright (C) 2013 Intel Corporation
2#
3# Released under the MIT license (see COPYING.MIT)
4
5# Provides a class for automating build tests for projects
6
7import os
8import re
9import bb.utils
10import subprocess
11from abc import ABCMeta, abstractmethod
12
13class BuildProject():
14
15 __metaclass__ = ABCMeta
16
17 def __init__(self, d, uri, foldername=None, tmpdir="/tmp/"):
18 self.d = d
19 self.uri = uri
20 self.archive = os.path.basename(uri)
21 self.localarchive = os.path.join(tmpdir,self.archive)
22 self.fname = re.sub(r'.tar.bz2|tar.gz$', '', self.archive)
23 if foldername:
24 self.fname = foldername
25
26 # Download self.archive to self.localarchive
27 def _download_archive(self):
28
29 exportvars = ['HTTP_PROXY', 'http_proxy',
30 'HTTPS_PROXY', 'https_proxy',
31 'FTP_PROXY', 'ftp_proxy',
32 'FTPS_PROXY', 'ftps_proxy',
33 'NO_PROXY', 'no_proxy',
34 'ALL_PROXY', 'all_proxy',
35 'SOCKS5_USER', 'SOCKS5_PASSWD']
36
37 cmd = ''
38 for var in exportvars:
39 val = self.d.getVar(var, True)
40 if val:
41 cmd = 'export ' + var + '=\"%s\"; %s' % (val, cmd)
42
43 cmd = cmd + "wget -O %s %s" % (self.localarchive, self.uri)
44 subprocess.check_call(cmd, shell=True)
45
46 # This method should provide a way to run a command in the desired environment.
47 @abstractmethod
48 def _run(self, cmd):
49 pass
50
51 # The timeout parameter of target.run is set to 0 to make the ssh command
52 # run with no timeout.
53 def run_configure(self, configure_args=''):
54 return self._run('cd %s; ./configure %s' % (self.targetdir, configure_args))
55
56 def run_make(self, make_args=''):
57 return self._run('cd %s; make %s' % (self.targetdir, make_args))
58
59 def run_install(self, install_args=''):
60 return self._run('cd %s; make install %s' % (self.targetdir, install_args))
61
62 def clean(self):
63 self._run('rm -rf %s' % self.targetdir)
64 subprocess.call('rm -f %s' % self.localarchive, shell=True)
65 pass
66
67class TargetBuildProject(BuildProject):
68
69 def __init__(self, target, d, uri, foldername=None):
70 self.target = target
71 self.targetdir = "~/"
72 BuildProject.__init__(self, d, uri, foldername, tmpdir="/tmp")
73
74 def download_archive(self):
75
76 self._download_archive()
77
78 (status, output) = self.target.copy_to(self.localarchive, self.targetdir)
79 if status != 0:
80 raise Exception("Failed to copy archive to target, output: %s" % output)
81
82 (status, output) = self.target.run('tar xf %s%s -C %s' % (self.targetdir, self.archive, self.targetdir))
83 if status != 0:
84 raise Exception("Failed to extract archive, output: %s" % output)
85
86 #Change targetdir to project folder
87 self.targetdir = self.targetdir + self.fname
88
89 # The timeout parameter of target.run is set to 0 to make the ssh command
90 # run with no timeout.
91 def _run(self, cmd):
92 return self.target.run(cmd, 0)[0]
93
94
95class SDKBuildProject(BuildProject):
96
97 def __init__(self, testpath, sdkenv, d, uri, foldername=None):
98 self.sdkenv = sdkenv
99 self.testdir = testpath
100 self.targetdir = testpath
101 bb.utils.mkdirhier(testpath)
102 self.datetime = d.getVar('DATETIME', True)
103 self.testlogdir = d.getVar("TEST_LOG_DIR", True)
104 bb.utils.mkdirhier(self.testlogdir)
105 self.logfile = os.path.join(self.testlogdir, "sdk_target_log.%s" % self.datetime)
106 BuildProject.__init__(self, d, uri, foldername, tmpdir=testpath)
107
108 def download_archive(self):
109
110 self._download_archive()
111
112 cmd = 'tar xf %s%s -C %s' % (self.targetdir, self.archive, self.targetdir)
113 subprocess.check_call(cmd, shell=True)
114
115 #Change targetdir to project folder
116 self.targetdir = self.targetdir + self.fname
117
118 def run_configure(self, configure_args=''):
119 return super(SDKBuildProject, self).run_configure(configure_args=(configure_args or '$CONFIGURE_FLAGS'))
120
121 def run_install(self, install_args=''):
122 return super(SDKBuildProject, self).run_install(install_args=(install_args or "DESTDIR=%s/../install" % self.targetdir))
123
124 def log(self, msg):
125 if self.logfile:
126 with open(self.logfile, "a") as f:
127 f.write("%s\n" % msg)
128
129 def _run(self, cmd):
130 self.log("Running source %s; " % self.sdkenv + cmd)
131 return subprocess.call("source %s; " % self.sdkenv + cmd, shell=True)
132