summaryrefslogtreecommitdiffstats
path: root/meta/lib
diff options
context:
space:
mode:
authorTudor Florea <tudor.florea@enea.com>2015-10-09 20:59:03 (GMT)
committerTudor Florea <tudor.florea@enea.com>2015-10-09 20:59:03 (GMT)
commit972dcfcdbfe75dcfeb777150c136576cf1a71e99 (patch)
tree97a61cd7e293d7ae9d56ef7ed0f81253365bb026 /meta/lib
downloadpoky-972dcfcdbfe75dcfeb777150c136576cf1a71e99.tar.gz
initial commit for Enea Linux 5.0 arm
Signed-off-by: Tudor Florea <tudor.florea@enea.com>
Diffstat (limited to 'meta/lib')
-rw-r--r--meta/lib/oe/__init__.py2
-rw-r--r--meta/lib/oe/buildhistory_analysis.py456
-rw-r--r--meta/lib/oe/cachedpath.py233
-rw-r--r--meta/lib/oe/classextend.py118
-rw-r--r--meta/lib/oe/classutils.py43
-rw-r--r--meta/lib/oe/data.py17
-rw-r--r--meta/lib/oe/distro_check.py383
-rw-r--r--meta/lib/oe/image.py345
-rw-r--r--meta/lib/oe/license.py116
-rw-r--r--meta/lib/oe/lsb.py81
-rw-r--r--meta/lib/oe/maketype.py99
-rw-r--r--meta/lib/oe/manifest.py345
-rw-r--r--meta/lib/oe/package.py99
-rw-r--r--meta/lib/oe/package_manager.py1797
-rw-r--r--meta/lib/oe/packagedata.py94
-rw-r--r--meta/lib/oe/packagegroup.py36
-rw-r--r--meta/lib/oe/patch.py447
-rw-r--r--meta/lib/oe/path.py243
-rw-r--r--meta/lib/oe/prservice.py126
-rw-r--r--meta/lib/oe/qa.py111
-rw-r--r--meta/lib/oe/rootfs.py800
-rw-r--r--meta/lib/oe/sdk.py326
-rw-r--r--meta/lib/oe/sstatesig.py276
-rw-r--r--meta/lib/oe/terminal.py208
-rw-r--r--meta/lib/oe/tests/__init__.py0
-rw-r--r--meta/lib/oe/tests/test_license.py68
-rw-r--r--meta/lib/oe/tests/test_path.py89
-rw-r--r--meta/lib/oe/tests/test_types.py62
-rw-r--r--meta/lib/oe/tests/test_utils.py51
-rw-r--r--meta/lib/oe/types.py153
-rw-r--r--meta/lib/oe/utils.py182
-rw-r--r--meta/lib/oeqa/__init__.py0
-rw-r--r--meta/lib/oeqa/controllers/__init__.py3
-rw-r--r--meta/lib/oeqa/controllers/masterimage.py201
-rw-r--r--meta/lib/oeqa/controllers/testtargetloader.py70
-rw-r--r--meta/lib/oeqa/oetest.py106
-rwxr-xr-xmeta/lib/oeqa/runexported.py140
-rw-r--r--meta/lib/oeqa/runtime/__init__.py3
-rw-r--r--meta/lib/oeqa/runtime/_ptest.py124
-rw-r--r--meta/lib/oeqa/runtime/buildcvs.py31
-rw-r--r--meta/lib/oeqa/runtime/buildiptables.py31
-rw-r--r--meta/lib/oeqa/runtime/buildsudoku.py28
-rw-r--r--meta/lib/oeqa/runtime/connman.py30
-rw-r--r--meta/lib/oeqa/runtime/date.py23
-rw-r--r--meta/lib/oeqa/runtime/df.py12
-rw-r--r--meta/lib/oeqa/runtime/dmesg.py12
-rw-r--r--meta/lib/oeqa/runtime/files/hellomod.c19
-rw-r--r--meta/lib/oeqa/runtime/files/hellomod_makefile8
-rw-r--r--meta/lib/oeqa/runtime/files/test.c26
-rw-r--r--meta/lib/oeqa/runtime/files/test.cpp3
-rw-r--r--meta/lib/oeqa/runtime/files/test.pl2
-rw-r--r--meta/lib/oeqa/runtime/files/test.py6
-rw-r--r--meta/lib/oeqa/runtime/files/testmakefile5
-rw-r--r--meta/lib/oeqa/runtime/gcc.py46
-rw-r--r--meta/lib/oeqa/runtime/kernelmodule.py34
-rw-r--r--meta/lib/oeqa/runtime/ldd.py20
-rw-r--r--meta/lib/oeqa/runtime/logrotate.py28
-rw-r--r--meta/lib/oeqa/runtime/multilib.py18
-rw-r--r--meta/lib/oeqa/runtime/pam.py25
-rw-r--r--meta/lib/oeqa/runtime/parselogs.py178
-rw-r--r--meta/lib/oeqa/runtime/perl.py29
-rw-r--r--meta/lib/oeqa/runtime/ping.py20
-rw-r--r--meta/lib/oeqa/runtime/python.py34
-rw-r--r--meta/lib/oeqa/runtime/rpm.py53
-rw-r--r--meta/lib/oeqa/runtime/scanelf.py28
-rw-r--r--meta/lib/oeqa/runtime/scp.py22
-rw-r--r--meta/lib/oeqa/runtime/skeletoninit.py29
-rw-r--r--meta/lib/oeqa/runtime/smart.py121
-rw-r--r--meta/lib/oeqa/runtime/ssh.py19
-rw-r--r--meta/lib/oeqa/runtime/syslog.py48
-rw-r--r--meta/lib/oeqa/runtime/systemd.py88
-rw-r--r--meta/lib/oeqa/runtime/vnc.py20
-rw-r--r--meta/lib/oeqa/runtime/x32lib.py18
-rw-r--r--meta/lib/oeqa/runtime/xorg.py17
-rw-r--r--meta/lib/oeqa/sdk/__init__.py3
-rw-r--r--meta/lib/oeqa/sdk/buildcvs.py25
-rw-r--r--meta/lib/oeqa/sdk/buildiptables.py26
-rw-r--r--meta/lib/oeqa/sdk/buildsudoku.py26
-rw-r--r--meta/lib/oeqa/selftest/__init__.py2
-rw-r--r--meta/lib/oeqa/selftest/_sstatetests_noauto.py95
-rw-r--r--meta/lib/oeqa/selftest/_toaster.py445
-rw-r--r--meta/lib/oeqa/selftest/base.py131
-rw-r--r--meta/lib/oeqa/selftest/bblayers.py43
-rw-r--r--meta/lib/oeqa/selftest/bbtests.py178
-rw-r--r--meta/lib/oeqa/selftest/buildhistory.py45
-rw-r--r--meta/lib/oeqa/selftest/buildoptions.py120
-rw-r--r--meta/lib/oeqa/selftest/oescripts.py54
-rw-r--r--meta/lib/oeqa/selftest/prservice.py121
-rw-r--r--meta/lib/oeqa/selftest/sstate.py53
-rw-r--r--meta/lib/oeqa/selftest/sstatetests.py204
-rw-r--r--meta/lib/oeqa/targetcontrol.py199
-rw-r--r--meta/lib/oeqa/utils/__init__.py15
-rw-r--r--meta/lib/oeqa/utils/commands.py154
-rw-r--r--meta/lib/oeqa/utils/decorators.py158
-rw-r--r--meta/lib/oeqa/utils/ftools.py27
-rw-r--r--meta/lib/oeqa/utils/httpserver.py35
-rw-r--r--meta/lib/oeqa/utils/logparser.py125
-rw-r--r--meta/lib/oeqa/utils/qemurunner.py237
-rw-r--r--meta/lib/oeqa/utils/sshcontrol.py138
-rw-r--r--meta/lib/oeqa/utils/targetbuild.py132
100 files changed, 11975 insertions, 0 deletions
diff --git a/meta/lib/oe/__init__.py b/meta/lib/oe/__init__.py
new file mode 100644
index 0000000..3ad9513
--- /dev/null
+++ b/meta/lib/oe/__init__.py
@@ -0,0 +1,2 @@
1from pkgutil import extend_path
2__path__ = extend_path(__path__, __name__)
diff --git a/meta/lib/oe/buildhistory_analysis.py b/meta/lib/oe/buildhistory_analysis.py
new file mode 100644
index 0000000..5395c76
--- /dev/null
+++ b/meta/lib/oe/buildhistory_analysis.py
@@ -0,0 +1,456 @@
1# Report significant differences in the buildhistory repository since a specific revision
2#
3# Copyright (C) 2012 Intel Corporation
4# Author: Paul Eggleton <paul.eggleton@linux.intel.com>
5#
6# Note: requires GitPython 0.3.1+
7#
8# You can use this from the command line by running scripts/buildhistory-diff
9#
10
11import sys
12import os.path
13import difflib
14import git
15import re
16import bb.utils
17
18
19# How to display fields
20list_fields = ['DEPENDS', 'RPROVIDES', 'RDEPENDS', 'RRECOMMENDS', 'RSUGGESTS', 'RREPLACES', 'RCONFLICTS', 'FILES', 'FILELIST', 'USER_CLASSES', 'IMAGE_CLASSES', 'IMAGE_FEATURES', 'IMAGE_LINGUAS', 'IMAGE_INSTALL', 'BAD_RECOMMENDATIONS', 'PACKAGE_EXCLUDE']
21list_order_fields = ['PACKAGES']
22defaultval_map = {'PKG': 'PKG', 'PKGE': 'PE', 'PKGV': 'PV', 'PKGR': 'PR'}
23numeric_fields = ['PKGSIZE', 'IMAGESIZE']
24# Fields to monitor
25monitor_fields = ['RPROVIDES', 'RDEPENDS', 'RRECOMMENDS', 'RREPLACES', 'RCONFLICTS', 'PACKAGES', 'FILELIST', 'PKGSIZE', 'IMAGESIZE', 'PKG']
26ver_monitor_fields = ['PKGE', 'PKGV', 'PKGR']
27# Percentage change to alert for numeric fields
28monitor_numeric_threshold = 10
29# Image files to monitor (note that image-info.txt is handled separately)
30img_monitor_files = ['installed-package-names.txt', 'files-in-image.txt']
31# Related context fields for reporting (note: PE, PV & PR are always reported for monitored package fields)
32related_fields = {}
33related_fields['RDEPENDS'] = ['DEPENDS']
34related_fields['RRECOMMENDS'] = ['DEPENDS']
35related_fields['FILELIST'] = ['FILES']
36related_fields['PKGSIZE'] = ['FILELIST']
37related_fields['files-in-image.txt'] = ['installed-package-names.txt', 'USER_CLASSES', 'IMAGE_CLASSES', 'ROOTFS_POSTPROCESS_COMMAND', 'IMAGE_POSTPROCESS_COMMAND']
38related_fields['installed-package-names.txt'] = ['IMAGE_FEATURES', 'IMAGE_LINGUAS', 'IMAGE_INSTALL', 'BAD_RECOMMENDATIONS', 'NO_RECOMMENDATIONS', 'PACKAGE_EXCLUDE']
39
40
41class ChangeRecord:
42 def __init__(self, path, fieldname, oldvalue, newvalue, monitored):
43 self.path = path
44 self.fieldname = fieldname
45 self.oldvalue = oldvalue
46 self.newvalue = newvalue
47 self.monitored = monitored
48 self.related = []
49 self.filechanges = None
50
51 def __str__(self):
52 return self._str_internal(True)
53
54 def _str_internal(self, outer):
55 if outer:
56 if '/image-files/' in self.path:
57 prefix = '%s: ' % self.path.split('/image-files/')[0]
58 else:
59 prefix = '%s: ' % self.path
60 else:
61 prefix = ''
62
63 def pkglist_combine(depver):
64 pkglist = []
65 for k,v in depver.iteritems():
66 if v:
67 pkglist.append("%s (%s)" % (k,v))
68 else:
69 pkglist.append(k)
70 return pkglist
71
72 if self.fieldname in list_fields or self.fieldname in list_order_fields:
73 if self.fieldname in ['RPROVIDES', 'RDEPENDS', 'RRECOMMENDS', 'RSUGGESTS', 'RREPLACES', 'RCONFLICTS']:
74 (depvera, depverb) = compare_pkg_lists(self.oldvalue, self.newvalue)
75 aitems = pkglist_combine(depvera)
76 bitems = pkglist_combine(depverb)
77 else:
78 aitems = self.oldvalue.split()
79 bitems = self.newvalue.split()
80 removed = list(set(aitems) - set(bitems))
81 added = list(set(bitems) - set(aitems))
82
83 if removed or added:
84 if removed and not bitems:
85 out = '%s: removed all items "%s"' % (self.fieldname, ' '.join(removed))
86 else:
87 out = '%s:%s%s' % (self.fieldname, ' removed "%s"' % ' '.join(removed) if removed else '', ' added "%s"' % ' '.join(added) if added else '')
88 else:
89 out = '%s changed order' % self.fieldname
90 elif self.fieldname in numeric_fields:
91 aval = int(self.oldvalue or 0)
92 bval = int(self.newvalue or 0)
93 if aval != 0:
94 percentchg = ((bval - aval) / float(aval)) * 100
95 else:
96 percentchg = 100
97 out = '%s changed from %s to %s (%s%d%%)' % (self.fieldname, self.oldvalue or "''", self.newvalue or "''", '+' if percentchg > 0 else '', percentchg)
98 elif self.fieldname in defaultval_map:
99 out = '%s changed from %s to %s' % (self.fieldname, self.oldvalue, self.newvalue)
100 if self.fieldname == 'PKG' and '[default]' in self.newvalue:
101 out += ' - may indicate debian renaming failure'
102 elif self.fieldname in ['pkg_preinst', 'pkg_postinst', 'pkg_prerm', 'pkg_postrm']:
103 if self.oldvalue and self.newvalue:
104 out = '%s changed:\n ' % self.fieldname
105 elif self.newvalue:
106 out = '%s added:\n ' % self.fieldname
107 elif self.oldvalue:
108 out = '%s cleared:\n ' % self.fieldname
109 alines = self.oldvalue.splitlines()
110 blines = self.newvalue.splitlines()
111 diff = difflib.unified_diff(alines, blines, self.fieldname, self.fieldname, lineterm='')
112 out += '\n '.join(list(diff)[2:])
113 out += '\n --'
114 elif self.fieldname in img_monitor_files or '/image-files/' in self.path:
115 fieldname = self.fieldname
116 if '/image-files/' in self.path:
117 fieldname = os.path.join('/' + self.path.split('/image-files/')[1], self.fieldname)
118 out = 'Changes to %s:\n ' % fieldname
119 else:
120 if outer:
121 prefix = 'Changes to %s ' % self.path
122 out = '(%s):\n ' % self.fieldname
123 if self.filechanges:
124 out += '\n '.join(['%s' % i for i in self.filechanges])
125 else:
126 alines = self.oldvalue.splitlines()
127 blines = self.newvalue.splitlines()
128 diff = difflib.unified_diff(alines, blines, fieldname, fieldname, lineterm='')
129 out += '\n '.join(list(diff))
130 out += '\n --'
131 else:
132 out = '%s changed from "%s" to "%s"' % (self.fieldname, self.oldvalue, self.newvalue)
133
134 if self.related:
135 for chg in self.related:
136 if not outer and chg.fieldname in ['PE', 'PV', 'PR']:
137 continue
138 for line in chg._str_internal(False).splitlines():
139 out += '\n * %s' % line
140
141 return '%s%s' % (prefix, out)
142
143class FileChange:
144 changetype_add = 'A'
145 changetype_remove = 'R'
146 changetype_type = 'T'
147 changetype_perms = 'P'
148 changetype_ownergroup = 'O'
149 changetype_link = 'L'
150
151 def __init__(self, path, changetype, oldvalue = None, newvalue = None):
152 self.path = path
153 self.changetype = changetype
154 self.oldvalue = oldvalue
155 self.newvalue = newvalue
156
157 def _ftype_str(self, ftype):
158 if ftype == '-':
159 return 'file'
160 elif ftype == 'd':
161 return 'directory'
162 elif ftype == 'l':
163 return 'symlink'
164 elif ftype == 'c':
165 return 'char device'
166 elif ftype == 'b':
167 return 'block device'
168 elif ftype == 'p':
169 return 'fifo'
170 elif ftype == 's':
171 return 'socket'
172 else:
173 return 'unknown (%s)' % ftype
174
175 def __str__(self):
176 if self.changetype == self.changetype_add:
177 return '%s was added' % self.path
178 elif self.changetype == self.changetype_remove:
179 return '%s was removed' % self.path
180 elif self.changetype == self.changetype_type:
181 return '%s changed type from %s to %s' % (self.path, self._ftype_str(self.oldvalue), self._ftype_str(self.newvalue))
182 elif self.changetype == self.changetype_perms:
183 return '%s changed permissions from %s to %s' % (self.path, self.oldvalue, self.newvalue)
184 elif self.changetype == self.changetype_ownergroup:
185 return '%s changed owner/group from %s to %s' % (self.path, self.oldvalue, self.newvalue)
186 elif self.changetype == self.changetype_link:
187 return '%s changed symlink target from %s to %s' % (self.path, self.oldvalue, self.newvalue)
188 else:
189 return '%s changed (unknown)' % self.path
190
191
192def blob_to_dict(blob):
193 alines = blob.data_stream.read().splitlines()
194 adict = {}
195 for line in alines:
196 splitv = [i.strip() for i in line.split('=',1)]
197 if len(splitv) > 1:
198 adict[splitv[0]] = splitv[1]
199 return adict
200
201
202def file_list_to_dict(lines):
203 adict = {}
204 for line in lines:
205 # Leave the last few fields intact so we handle file names containing spaces
206 splitv = line.split(None,4)
207 # Grab the path and remove the leading .
208 path = splitv[4][1:].strip()
209 # Handle symlinks
210 if(' -> ' in path):
211 target = path.split(' -> ')[1]
212 path = path.split(' -> ')[0]
213 adict[path] = splitv[0:3] + [target]
214 else:
215 adict[path] = splitv[0:3]
216 return adict
217
218
219def compare_file_lists(alines, blines):
220 adict = file_list_to_dict(alines)
221 bdict = file_list_to_dict(blines)
222 filechanges = []
223 for path, splitv in adict.iteritems():
224 newsplitv = bdict.pop(path, None)
225 if newsplitv:
226 # Check type
227 oldvalue = splitv[0][0]
228 newvalue = newsplitv[0][0]
229 if oldvalue != newvalue:
230 filechanges.append(FileChange(path, FileChange.changetype_type, oldvalue, newvalue))
231 # Check permissions
232 oldvalue = splitv[0][1:]
233 newvalue = newsplitv[0][1:]
234 if oldvalue != newvalue:
235 filechanges.append(FileChange(path, FileChange.changetype_perms, oldvalue, newvalue))
236 # Check owner/group
237 oldvalue = '%s/%s' % (splitv[1], splitv[2])
238 newvalue = '%s/%s' % (newsplitv[1], newsplitv[2])
239 if oldvalue != newvalue:
240 filechanges.append(FileChange(path, FileChange.changetype_ownergroup, oldvalue, newvalue))
241 # Check symlink target
242 if newsplitv[0][0] == 'l':
243 if len(splitv) > 3:
244 oldvalue = splitv[3]
245 else:
246 oldvalue = None
247 newvalue = newsplitv[3]
248 if oldvalue != newvalue:
249 filechanges.append(FileChange(path, FileChange.changetype_link, oldvalue, newvalue))
250 else:
251 filechanges.append(FileChange(path, FileChange.changetype_remove))
252
253 # Whatever is left over has been added
254 for path in bdict:
255 filechanges.append(FileChange(path, FileChange.changetype_add))
256
257 return filechanges
258
259
260def compare_lists(alines, blines):
261 removed = list(set(alines) - set(blines))
262 added = list(set(blines) - set(alines))
263
264 filechanges = []
265 for pkg in removed:
266 filechanges.append(FileChange(pkg, FileChange.changetype_remove))
267 for pkg in added:
268 filechanges.append(FileChange(pkg, FileChange.changetype_add))
269
270 return filechanges
271
272
273def compare_pkg_lists(astr, bstr):
274 depvera = bb.utils.explode_dep_versions2(astr)
275 depverb = bb.utils.explode_dep_versions2(bstr)
276
277 # Strip out changes where the version has increased
278 remove = []
279 for k in depvera:
280 if k in depverb:
281 dva = depvera[k]
282 dvb = depverb[k]
283 if dva and dvb and len(dva) == len(dvb):
284 # Since length is the same, sort so that prefixes (e.g. >=) will line up
285 dva.sort()
286 dvb.sort()
287 removeit = True
288 for dvai, dvbi in zip(dva, dvb):
289 if dvai != dvbi:
290 aiprefix = dvai.split(' ')[0]
291 biprefix = dvbi.split(' ')[0]
292 if aiprefix == biprefix and aiprefix in ['>=', '=']:
293 if bb.utils.vercmp(bb.utils.split_version(dvai), bb.utils.split_version(dvbi)) > 0:
294 removeit = False
295 break
296 else:
297 removeit = False
298 break
299 if removeit:
300 remove.append(k)
301
302 for k in remove:
303 depvera.pop(k)
304 depverb.pop(k)
305
306 return (depvera, depverb)
307
308
309def compare_dict_blobs(path, ablob, bblob, report_all, report_ver):
310 adict = blob_to_dict(ablob)
311 bdict = blob_to_dict(bblob)
312
313 pkgname = os.path.basename(path)
314
315 defaultvals = {}
316 defaultvals['PKG'] = pkgname
317 defaultvals['PKGE'] = '0'
318
319 changes = []
320 keys = list(set(adict.keys()) | set(bdict.keys()) | set(defaultval_map.keys()))
321 for key in keys:
322 astr = adict.get(key, '')
323 bstr = bdict.get(key, '')
324 if key in ver_monitor_fields:
325 monitored = report_ver or astr or bstr
326 else:
327 monitored = key in monitor_fields
328 mapped_key = defaultval_map.get(key, '')
329 if mapped_key:
330 if not astr:
331 astr = '%s [default]' % adict.get(mapped_key, defaultvals.get(key, ''))
332 if not bstr:
333 bstr = '%s [default]' % bdict.get(mapped_key, defaultvals.get(key, ''))
334
335 if astr != bstr:
336 if (not report_all) and key in numeric_fields:
337 aval = int(astr or 0)
338 bval = int(bstr or 0)
339 if aval != 0:
340 percentchg = ((bval - aval) / float(aval)) * 100
341 else:
342 percentchg = 100
343 if abs(percentchg) < monitor_numeric_threshold:
344 continue
345 elif (not report_all) and key in list_fields:
346 if key == "FILELIST" and path.endswith("-dbg") and bstr.strip() != '':
347 continue
348 if key in ['RPROVIDES', 'RDEPENDS', 'RRECOMMENDS', 'RSUGGESTS', 'RREPLACES', 'RCONFLICTS']:
349 (depvera, depverb) = compare_pkg_lists(astr, bstr)
350 if depvera == depverb:
351 continue
352 alist = astr.split()
353 alist.sort()
354 blist = bstr.split()
355 blist.sort()
356 # We don't care about the removal of self-dependencies
357 if pkgname in alist and not pkgname in blist:
358 alist.remove(pkgname)
359 if ' '.join(alist) == ' '.join(blist):
360 continue
361
362 chg = ChangeRecord(path, key, astr, bstr, monitored)
363 changes.append(chg)
364 return changes
365
366
367def process_changes(repopath, revision1, revision2='HEAD', report_all=False, report_ver=False):
368 repo = git.Repo(repopath)
369 assert repo.bare == False
370 commit = repo.commit(revision1)
371 diff = commit.diff(revision2)
372
373 changes = []
374 for d in diff.iter_change_type('M'):
375 path = os.path.dirname(d.a_blob.path)
376 if path.startswith('packages/'):
377 filename = os.path.basename(d.a_blob.path)
378 if filename == 'latest':
379 changes.extend(compare_dict_blobs(path, d.a_blob, d.b_blob, report_all, report_ver))
380 elif filename.startswith('latest.'):
381 chg = ChangeRecord(path, filename, d.a_blob.data_stream.read(), d.b_blob.data_stream.read(), True)
382 changes.append(chg)
383 elif path.startswith('images/'):
384 filename = os.path.basename(d.a_blob.path)
385 if filename in img_monitor_files:
386 if filename == 'files-in-image.txt':
387 alines = d.a_blob.data_stream.read().splitlines()
388 blines = d.b_blob.data_stream.read().splitlines()
389 filechanges = compare_file_lists(alines,blines)
390 if filechanges:
391 chg = ChangeRecord(path, filename, None, None, True)
392 chg.filechanges = filechanges
393 changes.append(chg)
394 elif filename == 'installed-package-names.txt':
395 alines = d.a_blob.data_stream.read().splitlines()
396 blines = d.b_blob.data_stream.read().splitlines()
397 filechanges = compare_lists(alines,blines)
398 if filechanges:
399 chg = ChangeRecord(path, filename, None, None, True)
400 chg.filechanges = filechanges
401 changes.append(chg)
402 else:
403 chg = ChangeRecord(path, filename, d.a_blob.data_stream.read(), d.b_blob.data_stream.read(), True)
404 changes.append(chg)
405 elif filename == 'image-info.txt':
406 changes.extend(compare_dict_blobs(path, d.a_blob, d.b_blob, report_all, report_ver))
407 elif '/image-files/' in path:
408 chg = ChangeRecord(path, filename, d.a_blob.data_stream.read(), d.b_blob.data_stream.read(), True)
409 changes.append(chg)
410
411 # Look for added preinst/postinst/prerm/postrm
412 # (without reporting newly added recipes)
413 addedpkgs = []
414 addedchanges = []
415 for d in diff.iter_change_type('A'):
416 path = os.path.dirname(d.b_blob.path)
417 if path.startswith('packages/'):
418 filename = os.path.basename(d.b_blob.path)
419 if filename == 'latest':
420 addedpkgs.append(path)
421 elif filename.startswith('latest.'):
422 chg = ChangeRecord(path, filename[7:], '', d.b_blob.data_stream.read(), True)
423 addedchanges.append(chg)
424 for chg in addedchanges:
425 found = False
426 for pkg in addedpkgs:
427 if chg.path.startswith(pkg):
428 found = True
429 break
430 if not found:
431 changes.append(chg)
432
433 # Look for cleared preinst/postinst/prerm/postrm
434 for d in diff.iter_change_type('D'):
435 path = os.path.dirname(d.a_blob.path)
436 if path.startswith('packages/'):
437 filename = os.path.basename(d.a_blob.path)
438 if filename != 'latest' and filename.startswith('latest.'):
439 chg = ChangeRecord(path, filename[7:], d.a_blob.data_stream.read(), '', True)
440 changes.append(chg)
441
442 # Link related changes
443 for chg in changes:
444 if chg.monitored:
445 for chg2 in changes:
446 # (Check dirname in the case of fields from recipe info files)
447 if chg.path == chg2.path or os.path.dirname(chg.path) == chg2.path:
448 if chg2.fieldname in related_fields.get(chg.fieldname, []):
449 chg.related.append(chg2)
450 elif chg.path == chg2.path and chg.path.startswith('packages/') and chg2.fieldname in ['PE', 'PV', 'PR']:
451 chg.related.append(chg2)
452
453 if report_all:
454 return changes
455 else:
456 return [chg for chg in changes if chg.monitored]
diff --git a/meta/lib/oe/cachedpath.py b/meta/lib/oe/cachedpath.py
new file mode 100644
index 0000000..0840cc4
--- /dev/null
+++ b/meta/lib/oe/cachedpath.py
@@ -0,0 +1,233 @@
1#
2# Based on standard python library functions but avoid
3# repeated stat calls. Its assumed the files will not change from under us
4# so we can cache stat calls.
5#
6
7import os
8import errno
9import stat as statmod
10
11class CachedPath(object):
12 def __init__(self):
13 self.statcache = {}
14 self.lstatcache = {}
15 self.normpathcache = {}
16 return
17
18 def updatecache(self, x):
19 x = self.normpath(x)
20 if x in self.statcache:
21 del self.statcache[x]
22 if x in self.lstatcache:
23 del self.lstatcache[x]
24
25 def normpath(self, path):
26 if path in self.normpathcache:
27 return self.normpathcache[path]
28 newpath = os.path.normpath(path)
29 self.normpathcache[path] = newpath
30 return newpath
31
32 def _callstat(self, path):
33 if path in self.statcache:
34 return self.statcache[path]
35 try:
36 st = os.stat(path)
37 self.statcache[path] = st
38 return st
39 except os.error:
40 self.statcache[path] = False
41 return False
42
43 # We might as well call lstat and then only
44 # call stat as well in the symbolic link case
45 # since this turns out to be much more optimal
46 # in real world usage of this cache
47 def callstat(self, path):
48 path = self.normpath(path)
49 self.calllstat(path)
50 return self.statcache[path]
51
52 def calllstat(self, path):
53 path = self.normpath(path)
54 if path in self.lstatcache:
55 return self.lstatcache[path]
56 #bb.error("LStatpath:" + path)
57 try:
58 lst = os.lstat(path)
59 self.lstatcache[path] = lst
60 if not statmod.S_ISLNK(lst.st_mode):
61 self.statcache[path] = lst
62 else:
63 self._callstat(path)
64 return lst
65 except (os.error, AttributeError):
66 self.lstatcache[path] = False
67 self.statcache[path] = False
68 return False
69
70 # This follows symbolic links, so both islink() and isdir() can be true
71 # for the same path ono systems that support symlinks
72 def isfile(self, path):
73 """Test whether a path is a regular file"""
74 st = self.callstat(path)
75 if not st:
76 return False
77 return statmod.S_ISREG(st.st_mode)
78
79 # Is a path a directory?
80 # This follows symbolic links, so both islink() and isdir()
81 # can be true for the same path on systems that support symlinks
82 def isdir(self, s):
83 """Return true if the pathname refers to an existing directory."""
84 st = self.callstat(s)
85 if not st:
86 return False
87 return statmod.S_ISDIR(st.st_mode)
88
89 def islink(self, path):
90 """Test whether a path is a symbolic link"""
91 st = self.calllstat(path)
92 if not st:
93 return False
94 return statmod.S_ISLNK(st.st_mode)
95
96 # Does a path exist?
97 # This is false for dangling symbolic links on systems that support them.
98 def exists(self, path):
99 """Test whether a path exists. Returns False for broken symbolic links"""
100 if self.callstat(path):
101 return True
102 return False
103
104 def lexists(self, path):
105 """Test whether a path exists. Returns True for broken symbolic links"""
106 if self.calllstat(path):
107 return True
108 return False
109
110 def stat(self, path):
111 return self.callstat(path)
112
113 def lstat(self, path):
114 return self.calllstat(path)
115
116 def walk(self, top, topdown=True, onerror=None, followlinks=False):
117 # Matches os.walk, not os.path.walk()
118
119 # We may not have read permission for top, in which case we can't
120 # get a list of the files the directory contains. os.path.walk
121 # always suppressed the exception then, rather than blow up for a
122 # minor reason when (say) a thousand readable directories are still
123 # left to visit. That logic is copied here.
124 try:
125 names = os.listdir(top)
126 except os.error as err:
127 if onerror is not None:
128 onerror(err)
129 return
130
131 dirs, nondirs = [], []
132 for name in names:
133 if self.isdir(os.path.join(top, name)):
134 dirs.append(name)
135 else:
136 nondirs.append(name)
137
138 if topdown:
139 yield top, dirs, nondirs
140 for name in dirs:
141 new_path = os.path.join(top, name)
142 if followlinks or not self.islink(new_path):
143 for x in self.walk(new_path, topdown, onerror, followlinks):
144 yield x
145 if not topdown:
146 yield top, dirs, nondirs
147
148 ## realpath() related functions
149 def __is_path_below(self, file, root):
150 return (file + os.path.sep).startswith(root)
151
152 def __realpath_rel(self, start, rel_path, root, loop_cnt, assume_dir):
153 """Calculates real path of symlink 'start' + 'rel_path' below
154 'root'; no part of 'start' below 'root' must contain symlinks. """
155 have_dir = True
156
157 for d in rel_path.split(os.path.sep):
158 if not have_dir and not assume_dir:
159 raise OSError(errno.ENOENT, "no such directory %s" % start)
160
161 if d == os.path.pardir: # '..'
162 if len(start) >= len(root):
163 # do not follow '..' before root
164 start = os.path.dirname(start)
165 else:
166 # emit warning?
167 pass
168 else:
169 (start, have_dir) = self.__realpath(os.path.join(start, d),
170 root, loop_cnt, assume_dir)
171
172 assert(self.__is_path_below(start, root))
173
174 return start
175
176 def __realpath(self, file, root, loop_cnt, assume_dir):
177 while self.islink(file) and len(file) >= len(root):
178 if loop_cnt == 0:
179 raise OSError(errno.ELOOP, file)
180
181 loop_cnt -= 1
182 target = os.path.normpath(os.readlink(file))
183
184 if not os.path.isabs(target):
185 tdir = os.path.dirname(file)
186 assert(self.__is_path_below(tdir, root))
187 else:
188 tdir = root
189
190 file = self.__realpath_rel(tdir, target, root, loop_cnt, assume_dir)
191
192 try:
193 is_dir = self.isdir(file)
194 except:
195 is_dir = False
196
197 return (file, is_dir)
198
199 def realpath(self, file, root, use_physdir = True, loop_cnt = 100, assume_dir = False):
200 """ Returns the canonical path of 'file' with assuming a
201 toplevel 'root' directory. When 'use_physdir' is set, all
202 preceding path components of 'file' will be resolved first;
203 this flag should be set unless it is guaranteed that there is
204 no symlink in the path. When 'assume_dir' is not set, missing
205 path components will raise an ENOENT error"""
206
207 root = os.path.normpath(root)
208 file = os.path.normpath(file)
209
210 if not root.endswith(os.path.sep):
211 # letting root end with '/' makes some things easier
212 root = root + os.path.sep
213
214 if not self.__is_path_below(file, root):
215 raise OSError(errno.EINVAL, "file '%s' is not below root" % file)
216
217 try:
218 if use_physdir:
219 file = self.__realpath_rel(root, file[(len(root) - 1):], root, loop_cnt, assume_dir)
220 else:
221 file = self.__realpath(file, root, loop_cnt, assume_dir)[0]
222 except OSError as e:
223 if e.errno == errno.ELOOP:
224 # make ELOOP more readable; without catching it, there will
225 # be printed a backtrace with 100s of OSError exceptions
226 # else
227 raise OSError(errno.ELOOP,
228 "too much recursions while resolving '%s'; loop in '%s'" %
229 (file, e.strerror))
230
231 raise
232
233 return file
diff --git a/meta/lib/oe/classextend.py b/meta/lib/oe/classextend.py
new file mode 100644
index 0000000..8da87b7
--- /dev/null
+++ b/meta/lib/oe/classextend.py
@@ -0,0 +1,118 @@
1class ClassExtender(object):
2 def __init__(self, extname, d):
3 self.extname = extname
4 self.d = d
5 self.pkgs_mapping = []
6
7 def extend_name(self, name):
8 if name.startswith("kernel-") or name == "virtual/kernel":
9 return name
10 if name.startswith("rtld"):
11 return name
12 if name.endswith("-crosssdk"):
13 return name
14 if name.endswith("-" + self.extname):
15 name = name.replace("-" + self.extname, "")
16 if name.startswith("virtual/"):
17 subs = name.split("/", 1)[1]
18 if not subs.startswith(self.extname):
19 return "virtual/" + self.extname + "-" + subs
20 return name
21 if not name.startswith(self.extname):
22 return self.extname + "-" + name
23 return name
24
25 def map_variable(self, varname, setvar = True):
26 var = self.d.getVar(varname, True)
27 if not var:
28 return ""
29 var = var.split()
30 newvar = []
31 for v in var:
32 newvar.append(self.extend_name(v))
33 newdata = " ".join(newvar)
34 if setvar:
35 self.d.setVar(varname, newdata)
36 return newdata
37
38 def map_regexp_variable(self, varname, setvar = True):
39 var = self.d.getVar(varname, True)
40 if not var:
41 return ""
42 var = var.split()
43 newvar = []
44 for v in var:
45 if v.startswith("^" + self.extname):
46 newvar.append(v)
47 elif v.startswith("^"):
48 newvar.append("^" + self.extname + "-" + v[1:])
49 else:
50 newvar.append(self.extend_name(v))
51 newdata = " ".join(newvar)
52 if setvar:
53 self.d.setVar(varname, newdata)
54 return newdata
55
56 def map_depends(self, dep):
57 if dep.endswith(("-native", "-native-runtime")) or ('nativesdk-' in dep) or ('cross-canadian' in dep) or ('-crosssdk-' in dep):
58 return dep
59 else:
60 # Do not extend for that already have multilib prefix
61 var = self.d.getVar("MULTILIB_VARIANTS", True)
62 if var:
63 var = var.split()
64 for v in var:
65 if dep.startswith(v):
66 return dep
67 return self.extend_name(dep)
68
69 def map_depends_variable(self, varname, suffix = ""):
70 # We need to preserve EXTENDPKGV so it can be expanded correctly later
71 if suffix:
72 varname = varname + "_" + suffix
73 orig = self.d.getVar("EXTENDPKGV", False)
74 self.d.setVar("EXTENDPKGV", "EXTENDPKGV")
75 deps = self.d.getVar(varname, True)
76 if not deps:
77 self.d.setVar("EXTENDPKGV", orig)
78 return
79 deps = bb.utils.explode_dep_versions2(deps)
80 newdeps = {}
81 for dep in deps:
82 newdeps[self.map_depends(dep)] = deps[dep]
83
84 self.d.setVar(varname, bb.utils.join_deps(newdeps, False).replace("EXTENDPKGV", "${EXTENDPKGV}"))
85 self.d.setVar("EXTENDPKGV", orig)
86
87 def map_packagevars(self):
88 for pkg in (self.d.getVar("PACKAGES", True).split() + [""]):
89 self.map_depends_variable("RDEPENDS", pkg)
90 self.map_depends_variable("RRECOMMENDS", pkg)
91 self.map_depends_variable("RSUGGESTS", pkg)
92 self.map_depends_variable("RPROVIDES", pkg)
93 self.map_depends_variable("RREPLACES", pkg)
94 self.map_depends_variable("RCONFLICTS", pkg)
95 self.map_depends_variable("PKG", pkg)
96
97 def rename_packages(self):
98 for pkg in (self.d.getVar("PACKAGES", True) or "").split():
99 if pkg.startswith(self.extname):
100 self.pkgs_mapping.append([pkg.split(self.extname + "-")[1], pkg])
101 continue
102 self.pkgs_mapping.append([pkg, self.extend_name(pkg)])
103
104 self.d.setVar("PACKAGES", " ".join([row[1] for row in self.pkgs_mapping]))
105
106 def rename_package_variables(self, variables):
107 for pkg_mapping in self.pkgs_mapping:
108 for subs in variables:
109 self.d.renameVar("%s_%s" % (subs, pkg_mapping[0]), "%s_%s" % (subs, pkg_mapping[1]))
110
111class NativesdkClassExtender(ClassExtender):
112 def map_depends(self, dep):
113 if dep.endswith(("-gcc-initial", "-gcc", "-g++")):
114 return dep + "-crosssdk"
115 elif dep.endswith(("-native", "-native-runtime")) or ('nativesdk-' in dep) or ('-cross-' in dep) or ('-crosssdk-' in dep):
116 return dep
117 else:
118 return self.extend_name(dep)
diff --git a/meta/lib/oe/classutils.py b/meta/lib/oe/classutils.py
new file mode 100644
index 0000000..58188fd
--- /dev/null
+++ b/meta/lib/oe/classutils.py
@@ -0,0 +1,43 @@
1class ClassRegistry(type):
2 """Maintain a registry of classes, indexed by name.
3
4Note that this implementation requires that the names be unique, as it uses
5a dictionary to hold the classes by name.
6
7The name in the registry can be overridden via the 'name' attribute of the
8class, and the 'priority' attribute controls priority. The prioritized()
9method returns the registered classes in priority order.
10
11Subclasses of ClassRegistry may define an 'implemented' property to exert
12control over whether the class will be added to the registry (e.g. to keep
13abstract base classes out of the registry)."""
14 priority = 0
15 class __metaclass__(type):
16 """Give each ClassRegistry their own registry"""
17 def __init__(cls, name, bases, attrs):
18 cls.registry = {}
19 type.__init__(cls, name, bases, attrs)
20
21 def __init__(cls, name, bases, attrs):
22 super(ClassRegistry, cls).__init__(name, bases, attrs)
23 try:
24 if not cls.implemented:
25 return
26 except AttributeError:
27 pass
28
29 try:
30 cls.name
31 except AttributeError:
32 cls.name = name
33 cls.registry[cls.name] = cls
34
35 @classmethod
36 def prioritized(tcls):
37 return sorted(tcls.registry.values(),
38 key=lambda v: v.priority, reverse=True)
39
40 def unregister(cls):
41 for key in cls.registry.keys():
42 if cls.registry[key] is cls:
43 del cls.registry[key]
diff --git a/meta/lib/oe/data.py b/meta/lib/oe/data.py
new file mode 100644
index 0000000..4cc0e02
--- /dev/null
+++ b/meta/lib/oe/data.py
@@ -0,0 +1,17 @@
1import oe.maketype
2
3def typed_value(key, d):
4 """Construct a value for the specified metadata variable, using its flags
5 to determine the type and parameters for construction."""
6 var_type = d.getVarFlag(key, 'type')
7 flags = d.getVarFlags(key)
8 if flags is not None:
9 flags = dict((flag, d.expand(value))
10 for flag, value in flags.iteritems())
11 else:
12 flags = {}
13
14 try:
15 return oe.maketype.create(d.getVar(key, True) or '', var_type, **flags)
16 except (TypeError, ValueError), exc:
17 bb.msg.fatal("Data", "%s: %s" % (key, str(exc)))
diff --git a/meta/lib/oe/distro_check.py b/meta/lib/oe/distro_check.py
new file mode 100644
index 0000000..8ed5b0e
--- /dev/null
+++ b/meta/lib/oe/distro_check.py
@@ -0,0 +1,383 @@
1def get_links_from_url(url):
2 "Return all the href links found on the web location"
3
4 import urllib, sgmllib
5
6 class LinksParser(sgmllib.SGMLParser):
7 def parse(self, s):
8 "Parse the given string 's'."
9 self.feed(s)
10 self.close()
11
12 def __init__(self, verbose=0):
13 "Initialise an object passing 'verbose' to the superclass."
14 sgmllib.SGMLParser.__init__(self, verbose)
15 self.hyperlinks = []
16
17 def start_a(self, attributes):
18 "Process a hyperlink and its 'attributes'."
19 for name, value in attributes:
20 if name == "href":
21 self.hyperlinks.append(value.strip('/'))
22
23 def get_hyperlinks(self):
24 "Return the list of hyperlinks."
25 return self.hyperlinks
26
27 sock = urllib.urlopen(url)
28 webpage = sock.read()
29 sock.close()
30
31 linksparser = LinksParser()
32 linksparser.parse(webpage)
33 return linksparser.get_hyperlinks()
34
35def find_latest_numeric_release(url):
36 "Find the latest listed numeric release on the given url"
37 max=0
38 maxstr=""
39 for link in get_links_from_url(url):
40 try:
41 release = float(link)
42 except:
43 release = 0
44 if release > max:
45 max = release
46 maxstr = link
47 return maxstr
48
49def is_src_rpm(name):
50 "Check if the link is pointing to a src.rpm file"
51 if name[-8:] == ".src.rpm":
52 return True
53 else:
54 return False
55
56def package_name_from_srpm(srpm):
57 "Strip out the package name from the src.rpm filename"
58 strings = srpm.split('-')
59 package_name = strings[0]
60 for i in range(1, len (strings) - 1):
61 str = strings[i]
62 if not str[0].isdigit():
63 package_name += '-' + str
64 return package_name
65
66def clean_package_list(package_list):
67 "Removes multiple entries of packages and sorts the list"
68 set = {}
69 map(set.__setitem__, package_list, [])
70 return set.keys()
71
72
73def get_latest_released_meego_source_package_list():
74 "Returns list of all the name os packages in the latest meego distro"
75
76 package_names = []
77 try:
78 f = open("/tmp/Meego-1.1", "r")
79 for line in f:
80 package_names.append(line[:-1] + ":" + "main") # Also strip the '\n' at the end
81 except IOError: pass
82 package_list=clean_package_list(package_names)
83 return "1.0", package_list
84
85def get_source_package_list_from_url(url, section):
86 "Return a sectioned list of package names from a URL list"
87
88 bb.note("Reading %s: %s" % (url, section))
89 links = get_links_from_url(url)
90 srpms = filter(is_src_rpm, links)
91 names_list = map(package_name_from_srpm, srpms)
92
93 new_pkgs = []
94 for pkgs in names_list:
95 new_pkgs.append(pkgs + ":" + section)
96
97 return new_pkgs
98
99def get_latest_released_fedora_source_package_list():
100 "Returns list of all the name os packages in the latest fedora distro"
101 latest = find_latest_numeric_release("http://archive.fedoraproject.org/pub/fedora/linux/releases/")
102
103 package_names = get_source_package_list_from_url("http://archive.fedoraproject.org/pub/fedora/linux/releases/%s/Fedora/source/SRPMS/" % latest, "main")
104
105# package_names += get_source_package_list_from_url("http://download.fedora.redhat.com/pub/fedora/linux/releases/%s/Everything/source/SPRMS/" % latest, "everything")
106 package_names += get_source_package_list_from_url("http://archive.fedoraproject.org/pub/fedora/linux/updates/%s/SRPMS/" % latest, "updates")
107
108 package_list=clean_package_list(package_names)
109
110 return latest, package_list
111
112def get_latest_released_opensuse_source_package_list():
113 "Returns list of all the name os packages in the latest opensuse distro"
114 latest = find_latest_numeric_release("http://download.opensuse.org/source/distribution/")
115
116 package_names = get_source_package_list_from_url("http://download.opensuse.org/source/distribution/%s/repo/oss/suse/src/" % latest, "main")
117 package_names += get_source_package_list_from_url("http://download.opensuse.org/update/%s/rpm/src/" % latest, "updates")
118
119 package_list=clean_package_list(package_names)
120 return latest, package_list
121
122def get_latest_released_mandriva_source_package_list():
123 "Returns list of all the name os packages in the latest mandriva distro"
124 latest = find_latest_numeric_release("http://distrib-coffee.ipsl.jussieu.fr/pub/linux/MandrivaLinux/official/")
125 package_names = get_source_package_list_from_url("http://distrib-coffee.ipsl.jussieu.fr/pub/linux/MandrivaLinux/official/%s/SRPMS/main/release/" % latest, "main")
126# package_names += get_source_package_list_from_url("http://distrib-coffee.ipsl.jussieu.fr/pub/linux/MandrivaLinux/official/%s/SRPMS/contrib/release/" % latest, "contrib")
127 package_names += get_source_package_list_from_url("http://distrib-coffee.ipsl.jussieu.fr/pub/linux/MandrivaLinux/official/%s/SRPMS/main/updates/" % latest, "updates")
128
129 package_list=clean_package_list(package_names)
130 return latest, package_list
131
132def find_latest_debian_release(url):
133 "Find the latest listed debian release on the given url"
134
135 releases = []
136 for link in get_links_from_url(url):
137 if link[:6] == "Debian":
138 if ';' not in link:
139 releases.append(link)
140 releases.sort()
141 try:
142 return releases.pop()[6:]
143 except:
144 return "_NotFound_"
145
146def get_debian_style_source_package_list(url, section):
147 "Return the list of package-names stored in the debian style Sources.gz file"
148 import urllib
149 sock = urllib.urlopen(url)
150 import tempfile
151 tmpfile = tempfile.NamedTemporaryFile(mode='wb', prefix='oecore.', suffix='.tmp', delete=False)
152 tmpfilename=tmpfile.name
153 tmpfile.write(sock.read())
154 sock.close()
155 tmpfile.close()
156 import gzip
157 bb.note("Reading %s: %s" % (url, section))
158
159 f = gzip.open(tmpfilename)
160 package_names = []
161 for line in f:
162 if line[:9] == "Package: ":
163 package_names.append(line[9:-1] + ":" + section) # Also strip the '\n' at the end
164 os.unlink(tmpfilename)
165
166 return package_names
167
168def get_latest_released_debian_source_package_list():
169 "Returns list of all the name os packages in the latest debian distro"
170 latest = find_latest_debian_release("http://ftp.debian.org/debian/dists/")
171 url = "http://ftp.debian.org/debian/dists/stable/main/source/Sources.gz"
172 package_names = get_debian_style_source_package_list(url, "main")
173# url = "http://ftp.debian.org/debian/dists/stable/contrib/source/Sources.gz"
174# package_names += get_debian_style_source_package_list(url, "contrib")
175 url = "http://ftp.debian.org/debian/dists/stable-proposed-updates/main/source/Sources.gz"
176 package_names += get_debian_style_source_package_list(url, "updates")
177 package_list=clean_package_list(package_names)
178 return latest, package_list
179
180def find_latest_ubuntu_release(url):
181 "Find the latest listed ubuntu release on the given url"
182 url += "?C=M;O=D" # Descending Sort by Last Modified
183 for link in get_links_from_url(url):
184 if link[-8:] == "-updates":
185 return link[:-8]
186 return "_NotFound_"
187
188def get_latest_released_ubuntu_source_package_list():
189 "Returns list of all the name os packages in the latest ubuntu distro"
190 latest = find_latest_ubuntu_release("http://archive.ubuntu.com/ubuntu/dists/")
191 url = "http://archive.ubuntu.com/ubuntu/dists/%s/main/source/Sources.gz" % latest
192 package_names = get_debian_style_source_package_list(url, "main")
193# url = "http://archive.ubuntu.com/ubuntu/dists/%s/multiverse/source/Sources.gz" % latest
194# package_names += get_debian_style_source_package_list(url, "multiverse")
195# url = "http://archive.ubuntu.com/ubuntu/dists/%s/universe/source/Sources.gz" % latest
196# package_names += get_debian_style_source_package_list(url, "universe")
197 url = "http://archive.ubuntu.com/ubuntu/dists/%s-updates/main/source/Sources.gz" % latest
198 package_names += get_debian_style_source_package_list(url, "updates")
199 package_list=clean_package_list(package_names)
200 return latest, package_list
201
202def create_distro_packages_list(distro_check_dir):
203 pkglst_dir = os.path.join(distro_check_dir, "package_lists")
204 if not os.path.isdir (pkglst_dir):
205 os.makedirs(pkglst_dir)
206 # first clear old stuff
207 for file in os.listdir(pkglst_dir):
208 os.unlink(os.path.join(pkglst_dir, file))
209
210 per_distro_functions = [
211 ["Debian", get_latest_released_debian_source_package_list],
212 ["Ubuntu", get_latest_released_ubuntu_source_package_list],
213 ["Fedora", get_latest_released_fedora_source_package_list],
214 ["OpenSuSE", get_latest_released_opensuse_source_package_list],
215 ["Mandriva", get_latest_released_mandriva_source_package_list],
216 ["Meego", get_latest_released_meego_source_package_list]
217 ]
218
219 from datetime import datetime
220 begin = datetime.now()
221 for distro in per_distro_functions:
222 name = distro[0]
223 release, package_list = distro[1]()
224 bb.note("Distro: %s, Latest Release: %s, # src packages: %d" % (name, release, len(package_list)))
225 package_list_file = os.path.join(pkglst_dir, name + "-" + release)
226 f = open(package_list_file, "w+b")
227 for pkg in package_list:
228 f.write(pkg + "\n")
229 f.close()
230 end = datetime.now()
231 delta = end - begin
232 bb.note("package_list generatiosn took this much time: %d seconds" % delta.seconds)
233
234def update_distro_data(distro_check_dir, datetime):
235 """
236 If distro packages list data is old then rebuild it.
237 The operations has to be protected by a lock so that
238 only one thread performes it at a time.
239 """
240 if not os.path.isdir (distro_check_dir):
241 try:
242 bb.note ("Making new directory: %s" % distro_check_dir)
243 os.makedirs (distro_check_dir)
244 except OSError:
245 raise Exception('Unable to create directory %s' % (distro_check_dir))
246
247
248 datetime_file = os.path.join(distro_check_dir, "build_datetime")
249 saved_datetime = "_invalid_"
250 import fcntl
251 try:
252 if not os.path.exists(datetime_file):
253 open(datetime_file, 'w+b').close() # touch the file so that the next open won't fail
254
255 f = open(datetime_file, "r+b")
256 fcntl.lockf(f, fcntl.LOCK_EX)
257 saved_datetime = f.read()
258 if saved_datetime[0:8] != datetime[0:8]:
259 bb.note("The build datetime did not match: saved:%s current:%s" % (saved_datetime, datetime))
260 bb.note("Regenerating distro package lists")
261 create_distro_packages_list(distro_check_dir)
262 f.seek(0)
263 f.write(datetime)
264
265 except OSError:
266 raise Exception('Unable to read/write this file: %s' % (datetime_file))
267 finally:
268 fcntl.lockf(f, fcntl.LOCK_UN)
269 f.close()
270
271def compare_in_distro_packages_list(distro_check_dir, d):
272 if not os.path.isdir(distro_check_dir):
273 raise Exception("compare_in_distro_packages_list: invalid distro_check_dir passed")
274
275 localdata = bb.data.createCopy(d)
276 pkglst_dir = os.path.join(distro_check_dir, "package_lists")
277 matching_distros = []
278 pn = d.getVar('PN', True)
279 recipe_name = d.getVar('PN', True)
280 bb.note("Checking: %s" % pn)
281
282 trim_dict = dict({"-native":"-native", "-cross":"-cross", "-initial":"-initial"})
283
284 if pn.find("-native") != -1:
285 pnstripped = pn.split("-native")
286 localdata.setVar('OVERRIDES', "pn-" + pnstripped[0] + ":" + d.getVar('OVERRIDES', True))
287 bb.data.update_data(localdata)
288 recipe_name = pnstripped[0]
289
290 if pn.startswith("nativesdk-"):
291 pnstripped = pn.split("nativesdk-")
292 localdata.setVar('OVERRIDES', "pn-" + pnstripped[1] + ":" + d.getVar('OVERRIDES', True))
293 bb.data.update_data(localdata)
294 recipe_name = pnstripped[1]
295
296 if pn.find("-cross") != -1:
297 pnstripped = pn.split("-cross")
298 localdata.setVar('OVERRIDES', "pn-" + pnstripped[0] + ":" + d.getVar('OVERRIDES', True))
299 bb.data.update_data(localdata)
300 recipe_name = pnstripped[0]
301
302 if pn.find("-initial") != -1:
303 pnstripped = pn.split("-initial")
304 localdata.setVar('OVERRIDES', "pn-" + pnstripped[0] + ":" + d.getVar('OVERRIDES', True))
305 bb.data.update_data(localdata)
306 recipe_name = pnstripped[0]
307
308 bb.note("Recipe: %s" % recipe_name)
309 tmp = localdata.getVar('DISTRO_PN_ALIAS', True)
310
311 distro_exceptions = dict({"OE-Core":'OE-Core', "OpenedHand":'OpenedHand', "Intel":'Intel', "Upstream":'Upstream', "Windriver":'Windriver', "OSPDT":'OSPDT Approved', "Poky":'poky'})
312
313 if tmp:
314 list = tmp.split(' ')
315 for str in list:
316 if str and str.find("=") == -1 and distro_exceptions[str]:
317 matching_distros.append(str)
318
319 distro_pn_aliases = {}
320 if tmp:
321 list = tmp.split(' ')
322 for str in list:
323 if str.find("=") != -1:
324 (dist, pn_alias) = str.split('=')
325 distro_pn_aliases[dist.strip().lower()] = pn_alias.strip()
326
327 for file in os.listdir(pkglst_dir):
328 (distro, distro_release) = file.split("-")
329 f = open(os.path.join(pkglst_dir, file), "rb")
330 for line in f:
331 (pkg, section) = line.split(":")
332 if distro.lower() in distro_pn_aliases:
333 pn = distro_pn_aliases[distro.lower()]
334 else:
335 pn = recipe_name
336 if pn == pkg:
337 matching_distros.append(distro + "-" + section[:-1]) # strip the \n at the end
338 f.close()
339 break
340 f.close()
341
342
343 if tmp != None:
344 list = tmp.split(' ')
345 for item in list:
346 matching_distros.append(item)
347 bb.note("Matching: %s" % matching_distros)
348 return matching_distros
349
350def create_log_file(d, logname):
351 import subprocess
352 logpath = d.getVar('LOG_DIR', True)
353 bb.utils.mkdirhier(logpath)
354 logfn, logsuffix = os.path.splitext(logname)
355 logfile = os.path.join(logpath, "%s.%s%s" % (logfn, d.getVar('DATETIME', True), logsuffix))
356 if not os.path.exists(logfile):
357 slogfile = os.path.join(logpath, logname)
358 if os.path.exists(slogfile):
359 os.remove(slogfile)
360 subprocess.call("touch %s" % logfile, shell=True)
361 os.symlink(logfile, slogfile)
362 d.setVar('LOG_FILE', logfile)
363 return logfile
364
365
366def save_distro_check_result(result, datetime, result_file, d):
367 pn = d.getVar('PN', True)
368 logdir = d.getVar('LOG_DIR', True)
369 if not logdir:
370 bb.error("LOG_DIR variable is not defined, can't write the distro_check results")
371 return
372 if not os.path.isdir(logdir):
373 os.makedirs(logdir)
374 line = pn
375 for i in result:
376 line = line + "," + i
377 f = open(result_file, "a")
378 import fcntl
379 fcntl.lockf(f, fcntl.LOCK_EX)
380 f.seek(0, os.SEEK_END) # seek to the end of file
381 f.write(line + "\n")
382 fcntl.lockf(f, fcntl.LOCK_UN)
383 f.close()
diff --git a/meta/lib/oe/image.py b/meta/lib/oe/image.py
new file mode 100644
index 0000000..7e080b0
--- /dev/null
+++ b/meta/lib/oe/image.py
@@ -0,0 +1,345 @@
1from oe.utils import execute_pre_post_process
2import os
3import subprocess
4import multiprocessing
5
6
7def generate_image(arg):
8 (type, subimages, create_img_cmd) = arg
9
10 bb.note("Running image creation script for %s: %s ..." %
11 (type, create_img_cmd))
12
13 try:
14 subprocess.check_output(create_img_cmd, stderr=subprocess.STDOUT)
15 except subprocess.CalledProcessError as e:
16 return("Error: The image creation script '%s' returned %d:\n%s" %
17 (e.cmd, e.returncode, e.output))
18
19 return None
20
21
22"""
23This class will help compute IMAGE_FSTYPE dependencies and group them in batches
24that can be executed in parallel.
25
26The next example is for illustration purposes, highly unlikely to happen in real life.
27It's just one of the test cases I used to test the algorithm:
28
29For:
30IMAGE_FSTYPES = "i1 i2 i3 i4 i5"
31IMAGE_TYPEDEP_i4 = "i2"
32IMAGE_TYPEDEP_i5 = "i6 i4"
33IMAGE_TYPEDEP_i6 = "i7"
34IMAGE_TYPEDEP_i7 = "i2"
35
36We get the following list of batches that can be executed in parallel, having the
37dependencies satisfied:
38
39[['i1', 'i3', 'i2'], ['i4', 'i7'], ['i6'], ['i5']]
40"""
41class ImageDepGraph(object):
42 def __init__(self, d):
43 self.d = d
44 self.graph = dict()
45 self.deps_array = dict()
46
47 def _construct_dep_graph(self, image_fstypes):
48 graph = dict()
49
50 def add_node(node):
51 deps = (self.d.getVar('IMAGE_TYPEDEP_' + node, True) or "")
52 if deps != "":
53 graph[node] = deps
54
55 for dep in deps.split():
56 if not dep in graph:
57 add_node(dep)
58 else:
59 graph[node] = ""
60
61 for fstype in image_fstypes:
62 add_node(fstype)
63
64 return graph
65
66 def _clean_graph(self):
67 # Live and VMDK images will be processed via inheriting
68 # bbclass and does not get processed here. Remove them from the fstypes
69 # graph. Their dependencies are already added, so no worries here.
70 remove_list = (self.d.getVar('IMAGE_TYPES_MASKED', True) or "").split()
71
72 for item in remove_list:
73 self.graph.pop(item, None)
74
75 def _compute_dependencies(self):
76 """
77 returns dict object of nodes with [no_of_depends_on, no_of_depended_by]
78 for each node
79 """
80 deps_array = dict()
81 for node in self.graph:
82 deps_array[node] = [0, 0]
83
84 for node in self.graph:
85 deps = self.graph[node].split()
86 deps_array[node][0] += len(deps)
87 for dep in deps:
88 deps_array[dep][1] += 1
89
90 return deps_array
91
92 def _sort_graph(self):
93 sorted_list = []
94 group = []
95 for node in self.graph:
96 if node not in self.deps_array:
97 continue
98
99 depends_on = self.deps_array[node][0]
100
101 if depends_on == 0:
102 group.append(node)
103
104 if len(group) == 0 and len(self.deps_array) != 0:
105 bb.fatal("possible fstype circular dependency...")
106
107 sorted_list.append(group)
108
109 # remove added nodes from deps_array
110 for item in group:
111 for node in self.graph:
112 if item in self.graph[node].split():
113 self.deps_array[node][0] -= 1
114
115 self.deps_array.pop(item, None)
116
117 if len(self.deps_array):
118 # recursive call, to find the next group
119 sorted_list += self._sort_graph()
120
121 return sorted_list
122
123 def group_fstypes(self, image_fstypes):
124 self.graph = self._construct_dep_graph(image_fstypes)
125
126 self._clean_graph()
127
128 self.deps_array = self._compute_dependencies()
129
130 alltypes = [node for node in self.graph]
131
132 return (alltypes, self._sort_graph())
133
134
135class Image(ImageDepGraph):
136 def __init__(self, d):
137 self.d = d
138
139 super(Image, self).__init__(d)
140
141 def _get_rootfs_size(self):
142 """compute the rootfs size"""
143 rootfs_alignment = int(self.d.getVar('IMAGE_ROOTFS_ALIGNMENT', True))
144 overhead_factor = float(self.d.getVar('IMAGE_OVERHEAD_FACTOR', True))
145 rootfs_req_size = int(self.d.getVar('IMAGE_ROOTFS_SIZE', True))
146 rootfs_extra_space = eval(self.d.getVar('IMAGE_ROOTFS_EXTRA_SPACE', True))
147 rootfs_maxsize = self.d.getVar('IMAGE_ROOTFS_MAXSIZE', True)
148
149 output = subprocess.check_output(['du', '-ks',
150 self.d.getVar('IMAGE_ROOTFS', True)])
151 size_kb = int(output.split()[0])
152 base_size = size_kb * overhead_factor
153 base_size = (base_size, rootfs_req_size)[base_size < rootfs_req_size] + \
154 rootfs_extra_space
155
156 if base_size != int(base_size):
157 base_size = int(base_size + 1)
158
159 base_size += rootfs_alignment - 1
160 base_size -= base_size % rootfs_alignment
161
162 # Check the rootfs size against IMAGE_ROOTFS_MAXSIZE (if set)
163 if rootfs_maxsize:
164 rootfs_maxsize_int = int(rootfs_maxsize)
165 if base_size > rootfs_maxsize_int:
166 bb.fatal("The rootfs size %d(K) overrides the max size %d(K)" % \
167 (base_size, rootfs_maxsize_int))
168
169 return base_size
170
171 def _create_symlinks(self, subimages):
172 """create symlinks to the newly created image"""
173 deploy_dir = self.d.getVar('DEPLOY_DIR_IMAGE', True)
174 img_name = self.d.getVar('IMAGE_NAME', True)
175 link_name = self.d.getVar('IMAGE_LINK_NAME', True)
176 manifest_name = self.d.getVar('IMAGE_MANIFEST', True)
177
178 os.chdir(deploy_dir)
179
180 if link_name is not None:
181 for type in subimages:
182 if os.path.exists(img_name + ".rootfs." + type):
183 dst = link_name + "." + type
184 src = img_name + ".rootfs." + type
185 bb.note("Creating symlink: %s -> %s" % (dst, src))
186 os.symlink(src, dst)
187
188 if manifest_name is not None and \
189 os.path.exists(manifest_name) and \
190 not os.path.exists(link_name + ".manifest"):
191 os.symlink(os.path.basename(manifest_name),
192 link_name + ".manifest")
193
194 def _remove_old_symlinks(self):
195 """remove the symlinks to old binaries"""
196
197 if self.d.getVar('IMAGE_LINK_NAME', True):
198 deploy_dir = self.d.getVar('DEPLOY_DIR_IMAGE', True)
199 for img in os.listdir(deploy_dir):
200 if img.find(self.d.getVar('IMAGE_LINK_NAME', True)) == 0:
201 img = os.path.join(deploy_dir, img)
202 if os.path.islink(img):
203 if self.d.getVar('RM_OLD_IMAGE', True) == "1" and \
204 os.path.exists(os.path.realpath(img)):
205 os.remove(os.path.realpath(img))
206
207 os.remove(img)
208
209 """
210 This function will just filter out the compressed image types from the
211 fstype groups returning a (filtered_fstype_groups, cimages) tuple.
212 """
213 def _filter_out_commpressed(self, fstype_groups):
214 ctypes = self.d.getVar('COMPRESSIONTYPES', True).split()
215 cimages = {}
216
217 filtered_groups = []
218 for group in fstype_groups:
219 filtered_group = []
220 for type in group:
221 basetype = None
222 for ctype in ctypes:
223 if type.endswith("." + ctype):
224 basetype = type[:-len("." + ctype)]
225 if basetype not in filtered_group:
226 filtered_group.append(basetype)
227 if basetype not in cimages:
228 cimages[basetype] = []
229 if ctype not in cimages[basetype]:
230 cimages[basetype].append(ctype)
231 break
232 if not basetype and type not in filtered_group:
233 filtered_group.append(type)
234
235 filtered_groups.append(filtered_group)
236
237 return (filtered_groups, cimages)
238
239 def _get_image_types(self):
240 """returns a (types, cimages) tuple"""
241
242 alltypes, fstype_groups = self.group_fstypes(self.d.getVar('IMAGE_FSTYPES', True).split())
243
244 filtered_groups, cimages = self._filter_out_commpressed(fstype_groups)
245
246 return (alltypes, filtered_groups, cimages)
247
248 def _write_script(self, type, cmds):
249 tempdir = self.d.getVar('T', True)
250 script_name = os.path.join(tempdir, "create_image." + type)
251
252 self.d.setVar('img_creation_func', '\n'.join(cmds))
253 self.d.setVarFlag('img_creation_func', 'func', 1)
254 self.d.setVarFlag('img_creation_func', 'fakeroot', 1)
255
256 with open(script_name, "w+") as script:
257 script.write("%s" % bb.build.shell_trap_code())
258 script.write("export ROOTFS_SIZE=%d\n" % self._get_rootfs_size())
259 bb.data.emit_func('img_creation_func', script, self.d)
260 script.write("img_creation_func\n")
261
262 os.chmod(script_name, 0775)
263
264 return script_name
265
266 def _get_imagecmds(self):
267 old_overrides = self.d.getVar('OVERRIDES', 0)
268
269 alltypes, fstype_groups, cimages = self._get_image_types()
270
271 image_cmd_groups = []
272
273 bb.note("The image creation groups are: %s" % str(fstype_groups))
274 for fstype_group in fstype_groups:
275 image_cmds = []
276 for type in fstype_group:
277 cmds = []
278 subimages = []
279
280 localdata = bb.data.createCopy(self.d)
281 localdata.setVar('OVERRIDES', '%s:%s' % (type, old_overrides))
282 bb.data.update_data(localdata)
283 localdata.setVar('type', type)
284
285 cmds.append("\t" + localdata.getVar("IMAGE_CMD", True))
286 cmds.append(localdata.expand("\tcd ${DEPLOY_DIR_IMAGE}"))
287
288 if type in cimages:
289 for ctype in cimages[type]:
290 cmds.append("\t" + localdata.getVar("COMPRESS_CMD_" + ctype, True))
291 subimages.append(type + "." + ctype)
292
293 if type not in alltypes:
294 cmds.append(localdata.expand("\trm ${IMAGE_NAME}.rootfs.${type}"))
295 else:
296 subimages.append(type)
297
298 script_name = self._write_script(type, cmds)
299
300 image_cmds.append((type, subimages, script_name))
301
302 image_cmd_groups.append(image_cmds)
303
304 return image_cmd_groups
305
306 def create(self):
307 bb.note("###### Generate images #######")
308 pre_process_cmds = self.d.getVar("IMAGE_PREPROCESS_COMMAND", True)
309 post_process_cmds = self.d.getVar("IMAGE_POSTPROCESS_COMMAND", True)
310
311 execute_pre_post_process(self.d, pre_process_cmds)
312
313 self._remove_old_symlinks()
314
315 image_cmd_groups = self._get_imagecmds()
316
317 for image_cmds in image_cmd_groups:
318 # create the images in parallel
319 nproc = multiprocessing.cpu_count()
320 pool = bb.utils.multiprocessingpool(nproc)
321 results = list(pool.imap(generate_image, image_cmds))
322 pool.close()
323 pool.join()
324
325 for result in results:
326 if result is not None:
327 bb.fatal(result)
328
329 for image_type, subimages, script in image_cmds:
330 bb.note("Creating symlinks for %s image ..." % image_type)
331 self._create_symlinks(subimages)
332
333 execute_pre_post_process(self.d, post_process_cmds)
334
335
336def create_image(d):
337 Image(d).create()
338
339if __name__ == "__main__":
340 """
341 Image creation can be called independent from bitbake environment.
342 """
343 """
344 TBD
345 """
diff --git a/meta/lib/oe/license.py b/meta/lib/oe/license.py
new file mode 100644
index 0000000..340da61
--- /dev/null
+++ b/meta/lib/oe/license.py
@@ -0,0 +1,116 @@
1# vi:sts=4:sw=4:et
2"""Code for parsing OpenEmbedded license strings"""
3
4import ast
5import re
6from fnmatch import fnmatchcase as fnmatch
7
8class LicenseError(Exception):
9 pass
10
11class LicenseSyntaxError(LicenseError):
12 def __init__(self, licensestr, exc):
13 self.licensestr = licensestr
14 self.exc = exc
15 LicenseError.__init__(self)
16
17 def __str__(self):
18 return "error in '%s': %s" % (self.licensestr, self.exc)
19
20class InvalidLicense(LicenseError):
21 def __init__(self, license):
22 self.license = license
23 LicenseError.__init__(self)
24
25 def __str__(self):
26 return "invalid characters in license '%s'" % self.license
27
28license_operator = re.compile('([&|() ])')
29license_pattern = re.compile('[a-zA-Z0-9.+_\-]+$')
30
31class LicenseVisitor(ast.NodeVisitor):
32 """Syntax tree visitor which can accept OpenEmbedded license strings"""
33 def visit_string(self, licensestr):
34 new_elements = []
35 elements = filter(lambda x: x.strip(), license_operator.split(licensestr))
36 for pos, element in enumerate(elements):
37 if license_pattern.match(element):
38 if pos > 0 and license_pattern.match(elements[pos-1]):
39 new_elements.append('&')
40 element = '"' + element + '"'
41 elif not license_operator.match(element):
42 raise InvalidLicense(element)
43 new_elements.append(element)
44
45 self.visit(ast.parse(' '.join(new_elements)))
46
47class FlattenVisitor(LicenseVisitor):
48 """Flatten a license tree (parsed from a string) by selecting one of each
49 set of OR options, in the way the user specifies"""
50 def __init__(self, choose_licenses):
51 self.choose_licenses = choose_licenses
52 self.licenses = []
53 LicenseVisitor.__init__(self)
54
55 def visit_Str(self, node):
56 self.licenses.append(node.s)
57
58 def visit_BinOp(self, node):
59 if isinstance(node.op, ast.BitOr):
60 left = FlattenVisitor(self.choose_licenses)
61 left.visit(node.left)
62
63 right = FlattenVisitor(self.choose_licenses)
64 right.visit(node.right)
65
66 selected = self.choose_licenses(left.licenses, right.licenses)
67 self.licenses.extend(selected)
68 else:
69 self.generic_visit(node)
70
71def flattened_licenses(licensestr, choose_licenses):
72 """Given a license string and choose_licenses function, return a flat list of licenses"""
73 flatten = FlattenVisitor(choose_licenses)
74 try:
75 flatten.visit_string(licensestr)
76 except SyntaxError as exc:
77 raise LicenseSyntaxError(licensestr, exc)
78 return flatten.licenses
79
80def is_included(licensestr, whitelist=None, blacklist=None):
81 """Given a license string and whitelist and blacklist, determine if the
82 license string matches the whitelist and does not match the blacklist.
83
84 Returns a tuple holding the boolean state and a list of the applicable
85 licenses which were excluded (or None, if the state is True)
86 """
87
88 def include_license(license):
89 return any(fnmatch(license, pattern) for pattern in whitelist)
90
91 def exclude_license(license):
92 return any(fnmatch(license, pattern) for pattern in blacklist)
93
94 def choose_licenses(alpha, beta):
95 """Select the option in an OR which is the 'best' (has the most
96 included licenses)."""
97 alpha_weight = len(filter(include_license, alpha))
98 beta_weight = len(filter(include_license, beta))
99 if alpha_weight > beta_weight:
100 return alpha
101 else:
102 return beta
103
104 if not whitelist:
105 whitelist = ['*']
106
107 if not blacklist:
108 blacklist = []
109
110 licenses = flattened_licenses(licensestr, choose_licenses)
111 excluded = filter(lambda lic: exclude_license(lic), licenses)
112 included = filter(lambda lic: include_license(lic), licenses)
113 if excluded:
114 return False, excluded
115 else:
116 return True, included
diff --git a/meta/lib/oe/lsb.py b/meta/lib/oe/lsb.py
new file mode 100644
index 0000000..b53f361
--- /dev/null
+++ b/meta/lib/oe/lsb.py
@@ -0,0 +1,81 @@
1def release_dict():
2 """Return the output of lsb_release -ir as a dictionary"""
3 from subprocess import PIPE
4
5 try:
6 output, err = bb.process.run(['lsb_release', '-ir'], stderr=PIPE)
7 except bb.process.CmdError as exc:
8 return None
9
10 data = {}
11 for line in output.splitlines():
12 try:
13 key, value = line.split(":\t", 1)
14 except ValueError:
15 continue
16 else:
17 data[key] = value
18 return data
19
20def release_dict_file():
21 """ Try to gather LSB release information manually when lsb_release tool is unavailable """
22 data = None
23 try:
24 if os.path.exists('/etc/lsb-release'):
25 data = {}
26 with open('/etc/lsb-release') as f:
27 for line in f:
28 key, value = line.split("=", 1)
29 data[key] = value.strip()
30 elif os.path.exists('/etc/redhat-release'):
31 data = {}
32 with open('/etc/redhat-release') as f:
33 distro = f.readline().strip()
34 import re
35 match = re.match(r'(.*) release (.*) \((.*)\)', distro)
36 if match:
37 data['DISTRIB_ID'] = match.group(1)
38 data['DISTRIB_RELEASE'] = match.group(2)
39 elif os.path.exists('/etc/SuSE-release'):
40 data = {}
41 data['DISTRIB_ID'] = 'SUSE LINUX'
42 with open('/etc/SuSE-release') as f:
43 for line in f:
44 if line.startswith('VERSION = '):
45 data['DISTRIB_RELEASE'] = line[10:].rstrip()
46 break
47 elif os.path.exists('/etc/os-release'):
48 data = {}
49 with open('/etc/os-release') as f:
50 for line in f:
51 if line.startswith('NAME='):
52 data['DISTRIB_ID'] = line[5:].rstrip().strip('"')
53 if line.startswith('VERSION_ID='):
54 data['DISTRIB_RELEASE'] = line[11:].rstrip().strip('"')
55 except IOError:
56 return None
57 return data
58
59def distro_identifier(adjust_hook=None):
60 """Return a distro identifier string based upon lsb_release -ri,
61 with optional adjustment via a hook"""
62
63 lsb_data = release_dict()
64 if lsb_data:
65 distro_id, release = lsb_data['Distributor ID'], lsb_data['Release']
66 else:
67 lsb_data_file = release_dict_file()
68 if lsb_data_file:
69 distro_id, release = lsb_data_file['DISTRIB_ID'], lsb_data_file.get('DISTRIB_RELEASE', None)
70 else:
71 distro_id, release = None, None
72
73 if adjust_hook:
74 distro_id, release = adjust_hook(distro_id, release)
75 if not distro_id:
76 return "Unknown"
77 if release:
78 id_str = '{0}-{1}'.format(distro_id, release)
79 else:
80 id_str = distro_id
81 return id_str.replace(' ','-').replace('/','-')
diff --git a/meta/lib/oe/maketype.py b/meta/lib/oe/maketype.py
new file mode 100644
index 0000000..139f333
--- /dev/null
+++ b/meta/lib/oe/maketype.py
@@ -0,0 +1,99 @@
1"""OpenEmbedded variable typing support
2
3Types are defined in the metadata by name, using the 'type' flag on a
4variable. Other flags may be utilized in the construction of the types. See
5the arguments of the type's factory for details.
6"""
7
8import inspect
9import types
10
11available_types = {}
12
13class MissingFlag(TypeError):
14 """A particular flag is required to construct the type, but has not been
15 provided."""
16 def __init__(self, flag, type):
17 self.flag = flag
18 self.type = type
19 TypeError.__init__(self)
20
21 def __str__(self):
22 return "Type '%s' requires flag '%s'" % (self.type, self.flag)
23
24def factory(var_type):
25 """Return the factory for a specified type."""
26 if var_type is None:
27 raise TypeError("No type specified. Valid types: %s" %
28 ', '.join(available_types))
29 try:
30 return available_types[var_type]
31 except KeyError:
32 raise TypeError("Invalid type '%s':\n Valid types: %s" %
33 (var_type, ', '.join(available_types)))
34
35def create(value, var_type, **flags):
36 """Create an object of the specified type, given the specified flags and
37 string value."""
38 obj = factory(var_type)
39 objflags = {}
40 for flag in obj.flags:
41 if flag not in flags:
42 if flag not in obj.optflags:
43 raise MissingFlag(flag, var_type)
44 else:
45 objflags[flag] = flags[flag]
46
47 return obj(value, **objflags)
48
49def get_callable_args(obj):
50 """Grab all but the first argument of the specified callable, returning
51 the list, as well as a list of which of the arguments have default
52 values."""
53 if type(obj) is type:
54 obj = obj.__init__
55
56 args, varargs, keywords, defaults = inspect.getargspec(obj)
57 flaglist = []
58 if args:
59 if len(args) > 1 and args[0] == 'self':
60 args = args[1:]
61 flaglist.extend(args)
62
63 optional = set()
64 if defaults:
65 optional |= set(flaglist[-len(defaults):])
66 return flaglist, optional
67
68def factory_setup(name, obj):
69 """Prepare a factory for use."""
70 args, optional = get_callable_args(obj)
71 extra_args = args[1:]
72 if extra_args:
73 obj.flags, optional = extra_args, optional
74 obj.optflags = set(optional)
75 else:
76 obj.flags = obj.optflags = ()
77
78 if not hasattr(obj, 'name'):
79 obj.name = name
80
81def register(name, factory):
82 """Register a type, given its name and a factory callable.
83
84 Determines the required and optional flags from the factory's
85 arguments."""
86 factory_setup(name, factory)
87 available_types[factory.name] = factory
88
89
90# Register all our included types
91for name in dir(types):
92 if name.startswith('_'):
93 continue
94
95 obj = getattr(types, name)
96 if not callable(obj):
97 continue
98
99 register(name, obj)
diff --git a/meta/lib/oe/manifest.py b/meta/lib/oe/manifest.py
new file mode 100644
index 0000000..42832f1
--- /dev/null
+++ b/meta/lib/oe/manifest.py
@@ -0,0 +1,345 @@
1from abc import ABCMeta, abstractmethod
2import os
3import re
4import bb
5
6
7class Manifest(object):
8 """
9 This is an abstract class. Do not instantiate this directly.
10 """
11 __metaclass__ = ABCMeta
12
13 PKG_TYPE_MUST_INSTALL = "mip"
14 PKG_TYPE_MULTILIB = "mlp"
15 PKG_TYPE_LANGUAGE = "lgp"
16 PKG_TYPE_ATTEMPT_ONLY = "aop"
17
18 MANIFEST_TYPE_IMAGE = "image"
19 MANIFEST_TYPE_SDK_HOST = "sdk_host"
20 MANIFEST_TYPE_SDK_TARGET = "sdk_target"
21
22 var_maps = {
23 MANIFEST_TYPE_IMAGE: {
24 "PACKAGE_INSTALL": PKG_TYPE_MUST_INSTALL,
25 "PACKAGE_INSTALL_ATTEMPTONLY": PKG_TYPE_ATTEMPT_ONLY,
26 "LINGUAS_INSTALL": PKG_TYPE_LANGUAGE
27 },
28 MANIFEST_TYPE_SDK_HOST: {
29 "TOOLCHAIN_HOST_TASK": PKG_TYPE_MUST_INSTALL,
30 "TOOLCHAIN_HOST_TASK_ATTEMPTONLY": PKG_TYPE_ATTEMPT_ONLY
31 },
32 MANIFEST_TYPE_SDK_TARGET: {
33 "TOOLCHAIN_TARGET_TASK": PKG_TYPE_MUST_INSTALL,
34 "TOOLCHAIN_TARGET_TASK_ATTEMPTONLY": PKG_TYPE_ATTEMPT_ONLY
35 }
36 }
37
38 INSTALL_ORDER = [
39 PKG_TYPE_LANGUAGE,
40 PKG_TYPE_MUST_INSTALL,
41 PKG_TYPE_ATTEMPT_ONLY,
42 PKG_TYPE_MULTILIB
43 ]
44
45 initial_manifest_file_header = \
46 "# This file was generated automatically and contains the packages\n" \
47 "# passed on to the package manager in order to create the rootfs.\n\n" \
48 "# Format:\n" \
49 "# <package_type>,<package_name>\n" \
50 "# where:\n" \
51 "# <package_type> can be:\n" \
52 "# 'mip' = must install package\n" \
53 "# 'aop' = attempt only package\n" \
54 "# 'mlp' = multilib package\n" \
55 "# 'lgp' = language package\n\n"
56
57 def __init__(self, d, manifest_dir=None, manifest_type=MANIFEST_TYPE_IMAGE):
58 self.d = d
59 self.manifest_type = manifest_type
60
61 if manifest_dir is None:
62 if manifest_type != self.MANIFEST_TYPE_IMAGE:
63 self.manifest_dir = self.d.getVar('SDK_DIR', True)
64 else:
65 self.manifest_dir = self.d.getVar('WORKDIR', True)
66 else:
67 self.manifest_dir = manifest_dir
68
69 bb.utils.mkdirhier(self.manifest_dir)
70
71 self.initial_manifest = os.path.join(self.manifest_dir, "%s_initial_manifest" % manifest_type)
72 self.final_manifest = os.path.join(self.manifest_dir, "%s_final_manifest" % manifest_type)
73 self.full_manifest = os.path.join(self.manifest_dir, "%s_full_manifest" % manifest_type)
74
75 # packages in the following vars will be split in 'must install' and
76 # 'multilib'
77 self.vars_to_split = ["PACKAGE_INSTALL",
78 "TOOLCHAIN_HOST_TASK",
79 "TOOLCHAIN_TARGET_TASK"]
80
81 """
82 This creates a standard initial manifest for core-image-(minimal|sato|sato-sdk).
83 This will be used for testing until the class is implemented properly!
84 """
85 def _create_dummy_initial(self):
86 image_rootfs = self.d.getVar('IMAGE_ROOTFS', True)
87 pkg_list = dict()
88 if image_rootfs.find("core-image-sato-sdk") > 0:
89 pkg_list[self.PKG_TYPE_MUST_INSTALL] = \
90 "packagegroup-core-x11-sato-games packagegroup-base-extended " \
91 "packagegroup-core-x11-sato packagegroup-core-x11-base " \
92 "packagegroup-core-sdk packagegroup-core-tools-debug " \
93 "packagegroup-core-boot packagegroup-core-tools-testapps " \
94 "packagegroup-core-eclipse-debug packagegroup-core-qt-demoapps " \
95 "apt packagegroup-core-tools-profile psplash " \
96 "packagegroup-core-standalone-sdk-target " \
97 "packagegroup-core-ssh-openssh dpkg kernel-dev"
98 pkg_list[self.PKG_TYPE_LANGUAGE] = \
99 "locale-base-en-us locale-base-en-gb"
100 elif image_rootfs.find("core-image-sato") > 0:
101 pkg_list[self.PKG_TYPE_MUST_INSTALL] = \
102 "packagegroup-core-ssh-dropbear packagegroup-core-x11-sato-games " \
103 "packagegroup-core-x11-base psplash apt dpkg packagegroup-base-extended " \
104 "packagegroup-core-x11-sato packagegroup-core-boot"
105 pkg_list['lgp'] = \
106 "locale-base-en-us locale-base-en-gb"
107 elif image_rootfs.find("core-image-minimal") > 0:
108 pkg_list[self.PKG_TYPE_MUST_INSTALL] = "run-postinsts packagegroup-core-boot"
109
110 with open(self.initial_manifest, "w+") as manifest:
111 manifest.write(self.initial_manifest_file_header)
112
113 for pkg_type in pkg_list:
114 for pkg in pkg_list[pkg_type].split():
115 manifest.write("%s,%s\n" % (pkg_type, pkg))
116
117 """
118 This will create the initial manifest which will be used by Rootfs class to
119 generate the rootfs
120 """
121 @abstractmethod
122 def create_initial(self):
123 pass
124
125 """
126 This creates the manifest after everything has been installed.
127 """
128 @abstractmethod
129 def create_final(self):
130 pass
131
132 """
133 This creates the manifest after the package in initial manifest has been
134 dummy installed. It lists all *to be installed* packages. There is no real
135 installation, just a test.
136 """
137 @abstractmethod
138 def create_full(self, pm):
139 pass
140
141 """
142 The following function parses an initial manifest and returns a dictionary
143 object with the must install, attempt only, multilib and language packages.
144 """
145 def parse_initial_manifest(self):
146 pkgs = dict()
147
148 with open(self.initial_manifest) as manifest:
149 for line in manifest.read().split('\n'):
150 comment = re.match("^#.*", line)
151 pattern = "^(%s|%s|%s|%s),(.*)$" % \
152 (self.PKG_TYPE_MUST_INSTALL,
153 self.PKG_TYPE_ATTEMPT_ONLY,
154 self.PKG_TYPE_MULTILIB,
155 self.PKG_TYPE_LANGUAGE)
156 pkg = re.match(pattern, line)
157
158 if comment is not None:
159 continue
160
161 if pkg is not None:
162 pkg_type = pkg.group(1)
163 pkg_name = pkg.group(2)
164
165 if not pkg_type in pkgs:
166 pkgs[pkg_type] = [pkg_name]
167 else:
168 pkgs[pkg_type].append(pkg_name)
169
170 return pkgs
171
172 '''
173 This following function parses a full manifest and return a list
174 object with packages.
175 '''
176 def parse_full_manifest(self):
177 installed_pkgs = list()
178 if not os.path.exists(self.full_manifest):
179 bb.note('full manifest not exist')
180 return installed_pkgs
181
182 with open(self.full_manifest, 'r') as manifest:
183 for pkg in manifest.read().split('\n'):
184 installed_pkgs.append(pkg.strip())
185
186 return installed_pkgs
187
188
189class RpmManifest(Manifest):
190 """
191 Returns a dictionary object with mip and mlp packages.
192 """
193 def _split_multilib(self, pkg_list):
194 pkgs = dict()
195
196 for pkg in pkg_list.split():
197 pkg_type = self.PKG_TYPE_MUST_INSTALL
198
199 ml_variants = self.d.getVar('MULTILIB_VARIANTS', True).split()
200
201 for ml_variant in ml_variants:
202 if pkg.startswith(ml_variant + '-'):
203 pkg_type = self.PKG_TYPE_MULTILIB
204
205 if not pkg_type in pkgs:
206 pkgs[pkg_type] = pkg
207 else:
208 pkgs[pkg_type] += " " + pkg
209
210 return pkgs
211
212 def create_initial(self):
213 pkgs = dict()
214
215 with open(self.initial_manifest, "w+") as manifest:
216 manifest.write(self.initial_manifest_file_header)
217
218 for var in self.var_maps[self.manifest_type]:
219 if var in self.vars_to_split:
220 split_pkgs = self._split_multilib(self.d.getVar(var, True))
221 if split_pkgs is not None:
222 pkgs = dict(pkgs.items() + split_pkgs.items())
223 else:
224 pkg_list = self.d.getVar(var, True)
225 if pkg_list is not None:
226 pkgs[self.var_maps[self.manifest_type][var]] = self.d.getVar(var, True)
227
228 for pkg_type in pkgs:
229 for pkg in pkgs[pkg_type].split():
230 manifest.write("%s,%s\n" % (pkg_type, pkg))
231
232 def create_final(self):
233 pass
234
235 def create_full(self, pm):
236 pass
237
238
239class OpkgManifest(Manifest):
240 """
241 Returns a dictionary object with mip and mlp packages.
242 """
243 def _split_multilib(self, pkg_list):
244 pkgs = dict()
245
246 for pkg in pkg_list.split():
247 pkg_type = self.PKG_TYPE_MUST_INSTALL
248
249 ml_variants = self.d.getVar('MULTILIB_VARIANTS', True).split()
250
251 for ml_variant in ml_variants:
252 if pkg.startswith(ml_variant + '-'):
253 pkg_type = self.PKG_TYPE_MULTILIB
254
255 if not pkg_type in pkgs:
256 pkgs[pkg_type] = pkg
257 else:
258 pkgs[pkg_type] += " " + pkg
259
260 return pkgs
261
262 def create_initial(self):
263 pkgs = dict()
264
265 with open(self.initial_manifest, "w+") as manifest:
266 manifest.write(self.initial_manifest_file_header)
267
268 for var in self.var_maps[self.manifest_type]:
269 if var in self.vars_to_split:
270 split_pkgs = self._split_multilib(self.d.getVar(var, True))
271 if split_pkgs is not None:
272 pkgs = dict(pkgs.items() + split_pkgs.items())
273 else:
274 pkg_list = self.d.getVar(var, True)
275 if pkg_list is not None:
276 pkgs[self.var_maps[self.manifest_type][var]] = self.d.getVar(var, True)
277
278 for pkg_type in pkgs:
279 for pkg in pkgs[pkg_type].split():
280 manifest.write("%s,%s\n" % (pkg_type, pkg))
281
282 def create_final(self):
283 pass
284
285 def create_full(self, pm):
286 if not os.path.exists(self.initial_manifest):
287 self.create_initial()
288
289 initial_manifest = self.parse_initial_manifest()
290 pkgs_to_install = list()
291 for pkg_type in initial_manifest:
292 pkgs_to_install += initial_manifest[pkg_type]
293 if len(pkgs_to_install) == 0:
294 return
295
296 output = pm.dummy_install(pkgs_to_install)
297
298 with open(self.full_manifest, 'w+') as manifest:
299 pkg_re = re.compile('^Installing ([^ ]+) [^ ].*')
300 for line in set(output.split('\n')):
301 m = pkg_re.match(line)
302 if m:
303 manifest.write(m.group(1) + '\n')
304
305 return
306
307
308class DpkgManifest(Manifest):
309 def create_initial(self):
310 with open(self.initial_manifest, "w+") as manifest:
311 manifest.write(self.initial_manifest_file_header)
312
313 for var in self.var_maps[self.manifest_type]:
314 pkg_list = self.d.getVar(var, True)
315
316 if pkg_list is None:
317 continue
318
319 for pkg in pkg_list.split():
320 manifest.write("%s,%s\n" %
321 (self.var_maps[self.manifest_type][var], pkg))
322
323 def create_final(self):
324 pass
325
326 def create_full(self, pm):
327 pass
328
329
330def create_manifest(d, final_manifest=False, manifest_dir=None,
331 manifest_type=Manifest.MANIFEST_TYPE_IMAGE):
332 manifest_map = {'rpm': RpmManifest,
333 'ipk': OpkgManifest,
334 'deb': DpkgManifest}
335
336 manifest = manifest_map[d.getVar('IMAGE_PKGTYPE', True)](d, manifest_dir, manifest_type)
337
338 if final_manifest:
339 manifest.create_final()
340 else:
341 manifest.create_initial()
342
343
344if __name__ == "__main__":
345 pass
diff --git a/meta/lib/oe/package.py b/meta/lib/oe/package.py
new file mode 100644
index 0000000..f8b5322
--- /dev/null
+++ b/meta/lib/oe/package.py
@@ -0,0 +1,99 @@
1def runstrip(arg):
2 # Function to strip a single file, called from split_and_strip_files below
3 # A working 'file' (one which works on the target architecture)
4 #
5 # The elftype is a bit pattern (explained in split_and_strip_files) to tell
6 # us what type of file we're processing...
7 # 4 - executable
8 # 8 - shared library
9 # 16 - kernel module
10
11 import commands, stat, subprocess
12
13 (file, elftype, strip) = arg
14
15 newmode = None
16 if not os.access(file, os.W_OK) or os.access(file, os.R_OK):
17 origmode = os.stat(file)[stat.ST_MODE]
18 newmode = origmode | stat.S_IWRITE | stat.S_IREAD
19 os.chmod(file, newmode)
20
21 extraflags = ""
22
23 # kernel module
24 if elftype & 16:
25 extraflags = "--strip-debug --remove-section=.comment --remove-section=.note --preserve-dates"
26 # .so and shared library
27 elif ".so" in file and elftype & 8:
28 extraflags = "--remove-section=.comment --remove-section=.note --strip-unneeded"
29 # shared or executable:
30 elif elftype & 8 or elftype & 4:
31 extraflags = "--remove-section=.comment --remove-section=.note"
32
33 stripcmd = "'%s' %s '%s'" % (strip, extraflags, file)
34 bb.debug(1, "runstrip: %s" % stripcmd)
35
36 ret = subprocess.call(stripcmd, shell=True)
37
38 if newmode:
39 os.chmod(file, origmode)
40
41 if ret:
42 bb.error("runstrip: '%s' strip command failed" % stripcmd)
43
44 return
45
46
47def file_translate(file):
48 ft = file.replace("@", "@at@")
49 ft = ft.replace(" ", "@space@")
50 ft = ft.replace("\t", "@tab@")
51 ft = ft.replace("[", "@openbrace@")
52 ft = ft.replace("]", "@closebrace@")
53 ft = ft.replace("_", "@underscore@")
54 return ft
55
56def filedeprunner(arg):
57 import re, subprocess, shlex
58
59 (pkg, pkgfiles, rpmdeps, pkgdest) = arg
60 provides = {}
61 requires = {}
62
63 r = re.compile(r'[<>=]+ +[^ ]*')
64
65 def process_deps(pipe, pkg, pkgdest, provides, requires):
66 for line in pipe:
67 f = line.split(" ", 1)[0].strip()
68 line = line.split(" ", 1)[1].strip()
69
70 if line.startswith("Requires:"):
71 i = requires
72 elif line.startswith("Provides:"):
73 i = provides
74 else:
75 continue
76
77 file = f.replace(pkgdest + "/" + pkg, "")
78 file = file_translate(file)
79 value = line.split(":", 1)[1].strip()
80 value = r.sub(r'(\g<0>)', value)
81
82 if value.startswith("rpmlib("):
83 continue
84 if value == "python":
85 continue
86 if file not in i:
87 i[file] = []
88 i[file].append(value)
89
90 return provides, requires
91
92 try:
93 dep_popen = subprocess.Popen(shlex.split(rpmdeps) + pkgfiles, stdout=subprocess.PIPE)
94 provides, requires = process_deps(dep_popen.stdout, pkg, pkgdest, provides, requires)
95 except OSError as e:
96 bb.error("rpmdeps: '%s' command failed, '%s'" % (shlex.split(rpmdeps) + pkgfiles, e))
97 raise e
98
99 return (pkg, provides, requires)
diff --git a/meta/lib/oe/package_manager.py b/meta/lib/oe/package_manager.py
new file mode 100644
index 0000000..5055095
--- /dev/null
+++ b/meta/lib/oe/package_manager.py
@@ -0,0 +1,1797 @@
1from abc import ABCMeta, abstractmethod
2import os
3import glob
4import subprocess
5import shutil
6import multiprocessing
7import re
8import bb
9import tempfile
10import oe.utils
11
12
13# this can be used by all PM backends to create the index files in parallel
14def create_index(arg):
15 index_cmd = arg
16
17 try:
18 bb.note("Executing '%s' ..." % index_cmd)
19 result = subprocess.check_output(index_cmd, stderr=subprocess.STDOUT, shell=True)
20 except subprocess.CalledProcessError as e:
21 return("Index creation command '%s' failed with return code %d:\n%s" %
22 (e.cmd, e.returncode, e.output))
23
24 if result:
25 bb.note(result)
26
27 return None
28
29
30class Indexer(object):
31 __metaclass__ = ABCMeta
32
33 def __init__(self, d, deploy_dir):
34 self.d = d
35 self.deploy_dir = deploy_dir
36
37 @abstractmethod
38 def write_index(self):
39 pass
40
41
42class RpmIndexer(Indexer):
43 def get_ml_prefix_and_os_list(self, arch_var=None, os_var=None):
44 package_archs = {
45 'default': [],
46 }
47
48 target_os = {
49 'default': "",
50 }
51
52 if arch_var is not None and os_var is not None:
53 package_archs['default'] = self.d.getVar(arch_var, True).split()
54 package_archs['default'].reverse()
55 target_os['default'] = self.d.getVar(os_var, True).strip()
56 else:
57 package_archs['default'] = self.d.getVar("PACKAGE_ARCHS", True).split()
58 # arch order is reversed. This ensures the -best- match is
59 # listed first!
60 package_archs['default'].reverse()
61 target_os['default'] = self.d.getVar("TARGET_OS", True).strip()
62 multilibs = self.d.getVar('MULTILIBS', True) or ""
63 for ext in multilibs.split():
64 eext = ext.split(':')
65 if len(eext) > 1 and eext[0] == 'multilib':
66 localdata = bb.data.createCopy(self.d)
67 default_tune_key = "DEFAULTTUNE_virtclass-multilib-" + eext[1]
68 default_tune = localdata.getVar(default_tune_key, False)
69 if default_tune is None:
70 default_tune_key = "DEFAULTTUNE_ML_" + eext[1]
71 default_tune = localdata.getVar(default_tune_key, False)
72 if default_tune:
73 localdata.setVar("DEFAULTTUNE", default_tune)
74 bb.data.update_data(localdata)
75 package_archs[eext[1]] = localdata.getVar('PACKAGE_ARCHS',
76 True).split()
77 package_archs[eext[1]].reverse()
78 target_os[eext[1]] = localdata.getVar("TARGET_OS",
79 True).strip()
80
81 ml_prefix_list = dict()
82 for mlib in package_archs:
83 if mlib == 'default':
84 ml_prefix_list[mlib] = package_archs[mlib]
85 else:
86 ml_prefix_list[mlib] = list()
87 for arch in package_archs[mlib]:
88 if arch in ['all', 'noarch', 'any']:
89 ml_prefix_list[mlib].append(arch)
90 else:
91 ml_prefix_list[mlib].append(mlib + "_" + arch)
92
93 return (ml_prefix_list, target_os)
94
95 def write_index(self):
96 sdk_pkg_archs = (self.d.getVar('SDK_PACKAGE_ARCHS', True) or "").replace('-', '_').split()
97 all_mlb_pkg_archs = (self.d.getVar('ALL_MULTILIB_PACKAGE_ARCHS', True) or "").replace('-', '_').split()
98
99 mlb_prefix_list = self.get_ml_prefix_and_os_list()[0]
100
101 archs = set()
102 for item in mlb_prefix_list:
103 archs = archs.union(set(i.replace('-', '_') for i in mlb_prefix_list[item]))
104
105 if len(archs) == 0:
106 archs = archs.union(set(all_mlb_pkg_archs))
107
108 archs = archs.union(set(sdk_pkg_archs))
109
110 rpm_createrepo = bb.utils.which(os.getenv('PATH'), "createrepo")
111 index_cmds = []
112 rpm_dirs_found = False
113 for arch in archs:
114 arch_dir = os.path.join(self.deploy_dir, arch)
115 if not os.path.isdir(arch_dir):
116 continue
117
118 index_cmds.append("%s --update -q %s" % (rpm_createrepo, arch_dir))
119
120 rpm_dirs_found = True
121
122 if not rpm_dirs_found:
123 bb.note("There are no packages in %s" % self.deploy_dir)
124 return
125
126 result = oe.utils.multiprocess_exec(index_cmds, create_index)
127 if result:
128 bb.fatal('%s' % ('\n'.join(result)))
129
130
131class OpkgIndexer(Indexer):
132 def write_index(self):
133 arch_vars = ["ALL_MULTILIB_PACKAGE_ARCHS",
134 "SDK_PACKAGE_ARCHS",
135 "MULTILIB_ARCHS"]
136
137 opkg_index_cmd = bb.utils.which(os.getenv('PATH'), "opkg-make-index")
138
139 if not os.path.exists(os.path.join(self.deploy_dir, "Packages")):
140 open(os.path.join(self.deploy_dir, "Packages"), "w").close()
141
142 index_cmds = []
143 for arch_var in arch_vars:
144 archs = self.d.getVar(arch_var, True)
145 if archs is None:
146 continue
147
148 for arch in archs.split():
149 pkgs_dir = os.path.join(self.deploy_dir, arch)
150 pkgs_file = os.path.join(pkgs_dir, "Packages")
151
152 if not os.path.isdir(pkgs_dir):
153 continue
154
155 if not os.path.exists(pkgs_file):
156 open(pkgs_file, "w").close()
157
158 index_cmds.append('%s -r %s -p %s -m %s' %
159 (opkg_index_cmd, pkgs_file, pkgs_file, pkgs_dir))
160
161 if len(index_cmds) == 0:
162 bb.note("There are no packages in %s!" % self.deploy_dir)
163 return
164
165 result = oe.utils.multiprocess_exec(index_cmds, create_index)
166 if result:
167 bb.fatal('%s' % ('\n'.join(result)))
168
169
170
171class DpkgIndexer(Indexer):
172 def write_index(self):
173 pkg_archs = self.d.getVar('PACKAGE_ARCHS', True)
174 if pkg_archs is not None:
175 arch_list = pkg_archs.split()
176 sdk_pkg_archs = self.d.getVar('SDK_PACKAGE_ARCHS', True)
177 if sdk_pkg_archs is not None:
178 for a in sdk_pkg_archs.split():
179 if a not in pkg_archs:
180 arch_list.append(a)
181
182 all_mlb_pkg_arch_list = (self.d.getVar('ALL_MULTILIB_PACKAGE_ARCHS', True) or "").replace('-', '_').split()
183 arch_list.extend(arch for arch in all_mlb_pkg_arch_list if arch not in arch_list)
184
185 apt_ftparchive = bb.utils.which(os.getenv('PATH'), "apt-ftparchive")
186 gzip = bb.utils.which(os.getenv('PATH'), "gzip")
187
188 index_cmds = []
189 deb_dirs_found = False
190 for arch in arch_list:
191 arch_dir = os.path.join(self.deploy_dir, arch)
192 if not os.path.isdir(arch_dir):
193 continue
194
195 cmd = "cd %s; PSEUDO_UNLOAD=1 %s packages . > Packages;" % (arch_dir, apt_ftparchive)
196
197 cmd += "%s -fc Packages > Packages.gz;" % gzip
198
199 with open(os.path.join(arch_dir, "Release"), "w+") as release:
200 release.write("Label: %s\n" % arch)
201
202 cmd += "PSEUDO_UNLOAD=1 %s release . >> Release" % apt_ftparchive
203
204 index_cmds.append(cmd)
205
206 deb_dirs_found = True
207
208 if not deb_dirs_found:
209 bb.note("There are no packages in %s" % self.deploy_dir)
210 return
211
212 result = oe.utils.multiprocess_exec(index_cmds, create_index)
213 if result:
214 bb.fatal('%s' % ('\n'.join(result)))
215
216
217
218class PkgsList(object):
219 __metaclass__ = ABCMeta
220
221 def __init__(self, d, rootfs_dir):
222 self.d = d
223 self.rootfs_dir = rootfs_dir
224
225 @abstractmethod
226 def list(self, format=None):
227 pass
228
229
230class RpmPkgsList(PkgsList):
231 def __init__(self, d, rootfs_dir, arch_var=None, os_var=None):
232 super(RpmPkgsList, self).__init__(d, rootfs_dir)
233
234 self.rpm_cmd = bb.utils.which(os.getenv('PATH'), "rpm")
235 self.image_rpmlib = os.path.join(self.rootfs_dir, 'var/lib/rpm')
236
237 self.ml_prefix_list, self.ml_os_list = \
238 RpmIndexer(d, rootfs_dir).get_ml_prefix_and_os_list(arch_var, os_var)
239
240 # Determine rpm version
241 cmd = "%s --version" % self.rpm_cmd
242 try:
243 output = subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True)
244 except subprocess.CalledProcessError as e:
245 bb.fatal("Getting rpm version failed. Command '%s' "
246 "returned %d:\n%s" % (cmd, e.returncode, e.output))
247 self.rpm_version = int(output.split()[-1].split('.')[0])
248
249 '''
250 Translate the RPM/Smart format names to the OE multilib format names
251 '''
252 def _pkg_translate_smart_to_oe(self, pkg, arch):
253 new_pkg = pkg
254 new_arch = arch
255 fixed_arch = arch.replace('_', '-')
256 found = 0
257 for mlib in self.ml_prefix_list:
258 for cmp_arch in self.ml_prefix_list[mlib]:
259 fixed_cmp_arch = cmp_arch.replace('_', '-')
260 if fixed_arch == fixed_cmp_arch:
261 if mlib == 'default':
262 new_pkg = pkg
263 new_arch = cmp_arch
264 else:
265 new_pkg = mlib + '-' + pkg
266 # We need to strip off the ${mlib}_ prefix on the arch
267 new_arch = cmp_arch.replace(mlib + '_', '')
268
269 # Workaround for bug 3565. Simply look to see if we
270 # know of a package with that name, if not try again!
271 filename = os.path.join(self.d.getVar('PKGDATA_DIR', True),
272 'runtime-reverse',
273 new_pkg)
274 if os.path.exists(filename):
275 found = 1
276 break
277
278 if found == 1 and fixed_arch == fixed_cmp_arch:
279 break
280 #bb.note('%s, %s -> %s, %s' % (pkg, arch, new_pkg, new_arch))
281 return new_pkg, new_arch
282
283 def _list_pkg_deps(self):
284 cmd = [bb.utils.which(os.getenv('PATH'), "rpmresolve"),
285 "-t", self.image_rpmlib]
286
287 try:
288 output = subprocess.check_output(cmd, stderr=subprocess.STDOUT).strip()
289 except subprocess.CalledProcessError as e:
290 bb.fatal("Cannot get the package dependencies. Command '%s' "
291 "returned %d:\n%s" % (' '.join(cmd), e.returncode, e.output))
292
293 return output
294
295 def list(self, format=None):
296 if format == "deps":
297 if self.rpm_version == 4:
298 bb.fatal("'deps' format dependency listings are not supported with rpm 4 since rpmresolve does not work")
299 return self._list_pkg_deps()
300
301 cmd = self.rpm_cmd + ' --root ' + self.rootfs_dir
302 cmd += ' -D "_dbpath /var/lib/rpm" -qa'
303 if self.rpm_version == 4:
304 cmd += " --qf '[%{NAME} %{ARCH} %{VERSION}\n]'"
305 else:
306 cmd += " --qf '[%{NAME} %{ARCH} %{VERSION} %{PACKAGEORIGIN}\n]'"
307
308 try:
309 # bb.note(cmd)
310 tmp_output = subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True).strip()
311
312 except subprocess.CalledProcessError as e:
313 bb.fatal("Cannot get the installed packages list. Command '%s' "
314 "returned %d:\n%s" % (cmd, e.returncode, e.output))
315
316 output = list()
317 for line in tmp_output.split('\n'):
318 if len(line.strip()) == 0:
319 continue
320 pkg = line.split()[0]
321 arch = line.split()[1]
322 ver = line.split()[2]
323 if self.rpm_version == 4:
324 pkgorigin = "unknown"
325 else:
326 pkgorigin = line.split()[3]
327 new_pkg, new_arch = self._pkg_translate_smart_to_oe(pkg, arch)
328
329 if format == "arch":
330 output.append('%s %s' % (new_pkg, new_arch))
331 elif format == "file":
332 output.append('%s %s %s' % (new_pkg, pkgorigin, new_arch))
333 elif format == "ver":
334 output.append('%s %s %s' % (new_pkg, new_arch, ver))
335 else:
336 output.append('%s' % (new_pkg))
337
338 output.sort()
339
340 return '\n'.join(output)
341
342
343class OpkgPkgsList(PkgsList):
344 def __init__(self, d, rootfs_dir, config_file):
345 super(OpkgPkgsList, self).__init__(d, rootfs_dir)
346
347 self.opkg_cmd = bb.utils.which(os.getenv('PATH'), "opkg-cl")
348 self.opkg_args = "-f %s -o %s " % (config_file, rootfs_dir)
349 self.opkg_args += self.d.getVar("OPKG_ARGS", True)
350
351 def list(self, format=None):
352 opkg_query_cmd = bb.utils.which(os.getenv('PATH'), "opkg-query-helper.py")
353
354 if format == "arch":
355 cmd = "%s %s status | %s -a" % \
356 (self.opkg_cmd, self.opkg_args, opkg_query_cmd)
357 elif format == "file":
358 cmd = "%s %s status | %s -f" % \
359 (self.opkg_cmd, self.opkg_args, opkg_query_cmd)
360 elif format == "ver":
361 cmd = "%s %s status | %s -v" % \
362 (self.opkg_cmd, self.opkg_args, opkg_query_cmd)
363 elif format == "deps":
364 cmd = "%s %s status | %s" % \
365 (self.opkg_cmd, self.opkg_args, opkg_query_cmd)
366 else:
367 cmd = "%s %s list_installed | cut -d' ' -f1" % \
368 (self.opkg_cmd, self.opkg_args)
369
370 try:
371 output = subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True).strip()
372 except subprocess.CalledProcessError as e:
373 bb.fatal("Cannot get the installed packages list. Command '%s' "
374 "returned %d:\n%s" % (cmd, e.returncode, e.output))
375
376 if output and format == "file":
377 tmp_output = ""
378 for line in output.split('\n'):
379 pkg, pkg_file, pkg_arch = line.split()
380 full_path = os.path.join(self.rootfs_dir, pkg_arch, pkg_file)
381 if os.path.exists(full_path):
382 tmp_output += "%s %s %s\n" % (pkg, full_path, pkg_arch)
383 else:
384 tmp_output += "%s %s %s\n" % (pkg, pkg_file, pkg_arch)
385
386 output = tmp_output
387
388 return output
389
390
391class DpkgPkgsList(PkgsList):
392 def list(self, format=None):
393 cmd = [bb.utils.which(os.getenv('PATH'), "dpkg-query"),
394 "--admindir=%s/var/lib/dpkg" % self.rootfs_dir,
395 "-W"]
396
397 if format == "arch":
398 cmd.append("-f=${Package} ${PackageArch}\n")
399 elif format == "file":
400 cmd.append("-f=${Package} ${Package}_${Version}_${Architecture}.deb ${PackageArch}\n")
401 elif format == "ver":
402 cmd.append("-f=${Package} ${PackageArch} ${Version}\n")
403 elif format == "deps":
404 cmd.append("-f=Package: ${Package}\nDepends: ${Depends}\nRecommends: ${Recommends}\n\n")
405 else:
406 cmd.append("-f=${Package}\n")
407
408 try:
409 output = subprocess.check_output(cmd, stderr=subprocess.STDOUT).strip()
410 except subprocess.CalledProcessError as e:
411 bb.fatal("Cannot get the installed packages list. Command '%s' "
412 "returned %d:\n%s" % (' '.join(cmd), e.returncode, e.output))
413
414 if format == "file":
415 tmp_output = ""
416 for line in tuple(output.split('\n')):
417 pkg, pkg_file, pkg_arch = line.split()
418 full_path = os.path.join(self.rootfs_dir, pkg_arch, pkg_file)
419 if os.path.exists(full_path):
420 tmp_output += "%s %s %s\n" % (pkg, full_path, pkg_arch)
421 else:
422 tmp_output += "%s %s %s\n" % (pkg, pkg_file, pkg_arch)
423
424 output = tmp_output
425 elif format == "deps":
426 opkg_query_cmd = bb.utils.which(os.getenv('PATH'), "opkg-query-helper.py")
427 file_out = tempfile.NamedTemporaryFile()
428 file_out.write(output)
429 file_out.flush()
430
431 try:
432 output = subprocess.check_output("cat %s | %s" %
433 (file_out.name, opkg_query_cmd),
434 stderr=subprocess.STDOUT,
435 shell=True)
436 except subprocess.CalledProcessError as e:
437 file_out.close()
438 bb.fatal("Cannot compute packages dependencies. Command '%s' "
439 "returned %d:\n%s" % (e.cmd, e.returncode, e.output))
440
441 file_out.close()
442
443 return output
444
445
446class PackageManager(object):
447 """
448 This is an abstract class. Do not instantiate this directly.
449 """
450 __metaclass__ = ABCMeta
451
452 def __init__(self, d):
453 self.d = d
454 self.deploy_dir = None
455 self.deploy_lock = None
456 self.feed_uris = self.d.getVar('PACKAGE_FEED_URIS', True) or ""
457
458 """
459 Update the package manager package database.
460 """
461 @abstractmethod
462 def update(self):
463 pass
464
465 """
466 Install a list of packages. 'pkgs' is a list object. If 'attempt_only' is
467 True, installation failures are ignored.
468 """
469 @abstractmethod
470 def install(self, pkgs, attempt_only=False):
471 pass
472
473 """
474 Remove a list of packages. 'pkgs' is a list object. If 'with_dependencies'
475 is False, the any dependencies are left in place.
476 """
477 @abstractmethod
478 def remove(self, pkgs, with_dependencies=True):
479 pass
480
481 """
482 This function creates the index files
483 """
484 @abstractmethod
485 def write_index(self):
486 pass
487
488 @abstractmethod
489 def remove_packaging_data(self):
490 pass
491
492 @abstractmethod
493 def list_installed(self, format=None):
494 pass
495
496 @abstractmethod
497 def insert_feeds_uris(self):
498 pass
499
500 """
501 Install complementary packages based upon the list of currently installed
502 packages e.g. locales, *-dev, *-dbg, etc. This will only attempt to install
503 these packages, if they don't exist then no error will occur. Note: every
504 backend needs to call this function explicitly after the normal package
505 installation
506 """
507 def install_complementary(self, globs=None):
508 # we need to write the list of installed packages to a file because the
509 # oe-pkgdata-util reads it from a file
510 installed_pkgs_file = os.path.join(self.d.getVar('WORKDIR', True),
511 "installed_pkgs.txt")
512 with open(installed_pkgs_file, "w+") as installed_pkgs:
513 installed_pkgs.write(self.list_installed("arch"))
514
515 if globs is None:
516 globs = self.d.getVar('IMAGE_INSTALL_COMPLEMENTARY', True)
517 split_linguas = set()
518
519 for translation in self.d.getVar('IMAGE_LINGUAS', True).split():
520 split_linguas.add(translation)
521 split_linguas.add(translation.split('-')[0])
522
523 split_linguas = sorted(split_linguas)
524
525 for lang in split_linguas:
526 globs += " *-locale-%s" % lang
527
528 if globs is None:
529 return
530
531 cmd = [bb.utils.which(os.getenv('PATH'), "oe-pkgdata-util"),
532 "glob", self.d.getVar('PKGDATA_DIR', True), installed_pkgs_file,
533 globs]
534 exclude = self.d.getVar('PACKAGE_EXCLUDE_COMPLEMENTARY', True)
535 if exclude:
536 cmd.extend(['-x', exclude])
537 try:
538 bb.note("Installing complementary packages ...")
539 complementary_pkgs = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
540 except subprocess.CalledProcessError as e:
541 bb.fatal("Could not compute complementary packages list. Command "
542 "'%s' returned %d:\n%s" %
543 (' '.join(cmd), e.returncode, e.output))
544
545 self.install(complementary_pkgs.split(), attempt_only=True)
546
547 def deploy_dir_lock(self):
548 if self.deploy_dir is None:
549 raise RuntimeError("deploy_dir is not set!")
550
551 lock_file_name = os.path.join(self.deploy_dir, "deploy.lock")
552
553 self.deploy_lock = bb.utils.lockfile(lock_file_name)
554
555 def deploy_dir_unlock(self):
556 if self.deploy_lock is None:
557 return
558
559 bb.utils.unlockfile(self.deploy_lock)
560
561 self.deploy_lock = None
562
563
564class RpmPM(PackageManager):
565 def __init__(self,
566 d,
567 target_rootfs,
568 target_vendor,
569 task_name='target',
570 providename=None,
571 arch_var=None,
572 os_var=None):
573 super(RpmPM, self).__init__(d)
574 self.target_rootfs = target_rootfs
575 self.target_vendor = target_vendor
576 self.task_name = task_name
577 self.providename = providename
578 self.fullpkglist = list()
579 self.deploy_dir = self.d.getVar('DEPLOY_DIR_RPM', True)
580 self.etcrpm_dir = os.path.join(self.target_rootfs, "etc/rpm")
581 self.install_dir = os.path.join(self.target_rootfs, "install")
582 self.rpm_cmd = bb.utils.which(os.getenv('PATH'), "rpm")
583 self.smart_cmd = bb.utils.which(os.getenv('PATH'), "smart")
584 self.smart_opt = "--quiet --data-dir=" + os.path.join(target_rootfs,
585 'var/lib/smart')
586 self.scriptlet_wrapper = self.d.expand('${WORKDIR}/scriptlet_wrapper')
587 self.solution_manifest = self.d.expand('${T}/saved/%s_solution' %
588 self.task_name)
589 self.saved_rpmlib = self.d.expand('${T}/saved/%s' % self.task_name)
590 self.image_rpmlib = os.path.join(self.target_rootfs, 'var/lib/rpm')
591
592 if not os.path.exists(self.d.expand('${T}/saved')):
593 bb.utils.mkdirhier(self.d.expand('${T}/saved'))
594
595 self.indexer = RpmIndexer(self.d, self.deploy_dir)
596 self.pkgs_list = RpmPkgsList(self.d, self.target_rootfs, arch_var, os_var)
597 self.rpm_version = self.pkgs_list.rpm_version
598
599 self.ml_prefix_list, self.ml_os_list = self.indexer.get_ml_prefix_and_os_list(arch_var, os_var)
600
601 def insert_feeds_uris(self):
602 if self.feed_uris == "":
603 return
604
605 # List must be prefered to least preferred order
606 default_platform_extra = set()
607 platform_extra = set()
608 bbextendvariant = self.d.getVar('BBEXTENDVARIANT', True) or ""
609 for mlib in self.ml_os_list:
610 for arch in self.ml_prefix_list[mlib]:
611 plt = arch.replace('-', '_') + '-.*-' + self.ml_os_list[mlib]
612 if mlib == bbextendvariant:
613 default_platform_extra.add(plt)
614 else:
615 platform_extra.add(plt)
616
617 platform_extra = platform_extra.union(default_platform_extra)
618
619 arch_list = []
620 for canonical_arch in platform_extra:
621 arch = canonical_arch.split('-')[0]
622 if not os.path.exists(os.path.join(self.deploy_dir, arch)):
623 continue
624 arch_list.append(arch)
625
626 uri_iterator = 0
627 channel_priority = 10 + 5 * len(self.feed_uris.split()) * len(arch_list)
628
629 for uri in self.feed_uris.split():
630 for arch in arch_list:
631 bb.note('Note: adding Smart channel url%d%s (%s)' %
632 (uri_iterator, arch, channel_priority))
633 self._invoke_smart('channel --add url%d-%s type=rpm-md baseurl=%s/rpm/%s -y'
634 % (uri_iterator, arch, uri, arch))
635 self._invoke_smart('channel --set url%d-%s priority=%d' %
636 (uri_iterator, arch, channel_priority))
637 channel_priority -= 5
638 uri_iterator += 1
639
640 '''
641 Create configs for rpm and smart, and multilib is supported
642 '''
643 def create_configs(self):
644 target_arch = self.d.getVar('TARGET_ARCH', True)
645 platform = '%s%s-%s' % (target_arch.replace('-', '_'),
646 self.target_vendor,
647 self.ml_os_list['default'])
648
649 # List must be prefered to least preferred order
650 default_platform_extra = list()
651 platform_extra = list()
652 bbextendvariant = self.d.getVar('BBEXTENDVARIANT', True) or ""
653 for mlib in self.ml_os_list:
654 for arch in self.ml_prefix_list[mlib]:
655 plt = arch.replace('-', '_') + '-.*-' + self.ml_os_list[mlib]
656 if mlib == bbextendvariant:
657 if plt not in default_platform_extra:
658 default_platform_extra.append(plt)
659 else:
660 if plt not in platform_extra:
661 platform_extra.append(plt)
662 platform_extra = default_platform_extra + platform_extra
663
664 self._create_configs(platform, platform_extra)
665
666 def _invoke_smart(self, args):
667 cmd = "%s %s %s" % (self.smart_cmd, self.smart_opt, args)
668 # bb.note(cmd)
669 try:
670 complementary_pkgs = subprocess.check_output(cmd,
671 stderr=subprocess.STDOUT,
672 shell=True)
673 # bb.note(complementary_pkgs)
674 return complementary_pkgs
675 except subprocess.CalledProcessError as e:
676 bb.fatal("Could not invoke smart. Command "
677 "'%s' returned %d:\n%s" % (cmd, e.returncode, e.output))
678
679 def _search_pkg_name_in_feeds(self, pkg, feed_archs):
680 for arch in feed_archs:
681 arch = arch.replace('-', '_')
682 for p in self.fullpkglist:
683 regex_match = r"^%s-[^-]*-[^-]*@%s$" % \
684 (re.escape(pkg), re.escape(arch))
685 if re.match(regex_match, p) is not None:
686 # First found is best match
687 # bb.note('%s -> %s' % (pkg, pkg + '@' + arch))
688 return pkg + '@' + arch
689
690 return ""
691
692 '''
693 Translate the OE multilib format names to the RPM/Smart format names
694 It searched the RPM/Smart format names in probable multilib feeds first,
695 and then searched the default base feed.
696 '''
697 def _pkg_translate_oe_to_smart(self, pkgs, attempt_only=False):
698 new_pkgs = list()
699
700 for pkg in pkgs:
701 new_pkg = pkg
702 # Search new_pkg in probable multilibs first
703 for mlib in self.ml_prefix_list:
704 # Jump the default archs
705 if mlib == 'default':
706 continue
707
708 subst = pkg.replace(mlib + '-', '')
709 # if the pkg in this multilib feed
710 if subst != pkg:
711 feed_archs = self.ml_prefix_list[mlib]
712 new_pkg = self._search_pkg_name_in_feeds(subst, feed_archs)
713 if not new_pkg:
714 # Failed to translate, package not found!
715 err_msg = '%s not found in the %s feeds (%s).\n' % \
716 (pkg, mlib, " ".join(feed_archs))
717 if not attempt_only:
718 err_msg += " ".join(self.fullpkglist)
719 bb.fatal(err_msg)
720 bb.warn(err_msg)
721 else:
722 new_pkgs.append(new_pkg)
723
724 break
725
726 # Apparently not a multilib package...
727 if pkg == new_pkg:
728 # Search new_pkg in default archs
729 default_archs = self.ml_prefix_list['default']
730 new_pkg = self._search_pkg_name_in_feeds(pkg, default_archs)
731 if not new_pkg:
732 err_msg = '%s not found in the base feeds (%s).\n' % \
733 (pkg, ' '.join(default_archs))
734 if not attempt_only:
735 err_msg += " ".join(self.fullpkglist)
736 bb.fatal(err_msg)
737 bb.warn(err_msg)
738 else:
739 new_pkgs.append(new_pkg)
740
741 return new_pkgs
742
743 def _create_configs(self, platform, platform_extra):
744 # Setup base system configuration
745 bb.note("configuring RPM platform settings")
746
747 # Configure internal RPM environment when using Smart
748 os.environ['RPM_ETCRPM'] = self.etcrpm_dir
749 bb.utils.mkdirhier(self.etcrpm_dir)
750
751 # Setup temporary directory -- install...
752 if os.path.exists(self.install_dir):
753 bb.utils.remove(self.install_dir, True)
754 bb.utils.mkdirhier(os.path.join(self.install_dir, 'tmp'))
755
756 channel_priority = 5
757 platform_dir = os.path.join(self.etcrpm_dir, "platform")
758 sdkos = self.d.getVar("SDK_OS", True)
759 with open(platform_dir, "w+") as platform_fd:
760 platform_fd.write(platform + '\n')
761 for pt in platform_extra:
762 channel_priority += 5
763 if sdkos:
764 tmp = re.sub("-%s$" % sdkos, "-%s\n" % sdkos, pt)
765 tmp = re.sub("-linux.*$", "-linux.*\n", tmp)
766 platform_fd.write(tmp)
767
768 # Tell RPM that the "/" directory exist and is available
769 bb.note("configuring RPM system provides")
770 sysinfo_dir = os.path.join(self.etcrpm_dir, "sysinfo")
771 bb.utils.mkdirhier(sysinfo_dir)
772 with open(os.path.join(sysinfo_dir, "Dirnames"), "w+") as dirnames:
773 dirnames.write("/\n")
774
775 if self.providename:
776 providename_dir = os.path.join(sysinfo_dir, "Providename")
777 if not os.path.exists(providename_dir):
778 providename_content = '\n'.join(self.providename)
779 providename_content += '\n'
780 open(providename_dir, "w+").write(providename_content)
781
782 # Configure RPM... we enforce these settings!
783 bb.note("configuring RPM DB settings")
784 # After change the __db.* cache size, log file will not be
785 # generated automatically, that will raise some warnings,
786 # so touch a bare log for rpm write into it.
787 if self.rpm_version == 5:
788 rpmlib_log = os.path.join(self.image_rpmlib, 'log', 'log.0000000001')
789 if not os.path.exists(rpmlib_log):
790 bb.utils.mkdirhier(os.path.join(self.image_rpmlib, 'log'))
791 open(rpmlib_log, 'w+').close()
792
793 DB_CONFIG_CONTENT = "# ================ Environment\n" \
794 "set_data_dir .\n" \
795 "set_create_dir .\n" \
796 "set_lg_dir ./log\n" \
797 "set_tmp_dir ./tmp\n" \
798 "set_flags db_log_autoremove on\n" \
799 "\n" \
800 "# -- thread_count must be >= 8\n" \
801 "set_thread_count 64\n" \
802 "\n" \
803 "# ================ Logging\n" \
804 "\n" \
805 "# ================ Memory Pool\n" \
806 "set_cachesize 0 1048576 0\n" \
807 "set_mp_mmapsize 268435456\n" \
808 "\n" \
809 "# ================ Locking\n" \
810 "set_lk_max_locks 16384\n" \
811 "set_lk_max_lockers 16384\n" \
812 "set_lk_max_objects 16384\n" \
813 "mutex_set_max 163840\n" \
814 "\n" \
815 "# ================ Replication\n"
816
817 db_config_dir = os.path.join(self.image_rpmlib, 'DB_CONFIG')
818 if not os.path.exists(db_config_dir):
819 open(db_config_dir, 'w+').write(DB_CONFIG_CONTENT)
820
821 # Create database so that smart doesn't complain (lazy init)
822 opt = "-qa"
823 if self.rpm_version == 4:
824 opt = "--initdb"
825 cmd = "%s --root %s --dbpath /var/lib/rpm %s > /dev/null" % (
826 self.rpm_cmd, self.target_rootfs, opt)
827 try:
828 subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True)
829 except subprocess.CalledProcessError as e:
830 bb.fatal("Create rpm database failed. Command '%s' "
831 "returned %d:\n%s" % (cmd, e.returncode, e.output))
832
833 # Configure smart
834 bb.note("configuring Smart settings")
835 bb.utils.remove(os.path.join(self.target_rootfs, 'var/lib/smart'),
836 True)
837 self._invoke_smart('config --set rpm-root=%s' % self.target_rootfs)
838 self._invoke_smart('config --set rpm-dbpath=/var/lib/rpm')
839 self._invoke_smart('config --set rpm-extra-macros._var=%s' %
840 self.d.getVar('localstatedir', True))
841 cmd = 'config --set rpm-extra-macros._tmppath=/install/tmp'
842
843 prefer_color = self.d.getVar('RPM_PREFER_ELF_ARCH', True)
844 if prefer_color:
845 if prefer_color not in ['0', '1', '2', '4']:
846 bb.fatal("Invalid RPM_PREFER_ELF_ARCH: %s, it should be one of:\n"
847 "\t1: ELF32 wins\n"
848 "\t2: ELF64 wins\n"
849 "\t4: ELF64 N32 wins (mips64 or mips64el only)" %
850 prefer_color)
851 if prefer_color == "4" and self.d.getVar("TUNE_ARCH", True) not in \
852 ['mips64', 'mips64el']:
853 bb.fatal("RPM_PREFER_ELF_ARCH = \"4\" is for mips64 or mips64el "
854 "only.")
855 self._invoke_smart('config --set rpm-extra-macros._prefer_color=%s'
856 % prefer_color)
857
858 self._invoke_smart(cmd)
859
860 # Write common configuration for host and target usage
861 self._invoke_smart('config --set rpm-nolinktos=1')
862 self._invoke_smart('config --set rpm-noparentdirs=1')
863 check_signature = self.d.getVar('RPM_CHECK_SIGNATURES', True)
864 if check_signature and check_signature.strip() == "0":
865 self._invoke_smart('config --set rpm-check-signatures=false')
866 for i in self.d.getVar('BAD_RECOMMENDATIONS', True).split():
867 self._invoke_smart('flag --set ignore-recommends %s' % i)
868
869 # Do the following configurations here, to avoid them being
870 # saved for field upgrade
871 if self.d.getVar('NO_RECOMMENDATIONS', True).strip() == "1":
872 self._invoke_smart('config --set ignore-all-recommends=1')
873 pkg_exclude = self.d.getVar('PACKAGE_EXCLUDE', True) or ""
874 for i in pkg_exclude.split():
875 self._invoke_smart('flag --set exclude-packages %s' % i)
876
877 # Optional debugging
878 # self._invoke_smart('config --set rpm-log-level=debug')
879 # cmd = 'config --set rpm-log-file=/tmp/smart-debug-logfile'
880 # self._invoke_smart(cmd)
881 ch_already_added = []
882 for canonical_arch in platform_extra:
883 arch = canonical_arch.split('-')[0]
884 arch_channel = os.path.join(self.deploy_dir, arch)
885 if os.path.exists(arch_channel) and not arch in ch_already_added:
886 bb.note('Note: adding Smart channel %s (%s)' %
887 (arch, channel_priority))
888 self._invoke_smart('channel --add %s type=rpm-md baseurl=%s -y'
889 % (arch, arch_channel))
890 self._invoke_smart('channel --set %s priority=%d' %
891 (arch, channel_priority))
892 channel_priority -= 5
893
894 ch_already_added.append(arch)
895
896 bb.note('adding Smart RPM DB channel')
897 self._invoke_smart('channel --add rpmsys type=rpm-sys -y')
898
899 # Construct install scriptlet wrapper.
900 # Scripts need to be ordered when executed, this ensures numeric order.
901 # If we ever run into needing more the 899 scripts, we'll have to.
902 # change num to start with 1000.
903 #
904 if self.rpm_version == 4:
905 scriptletcmd = "$2 $3 $4\n"
906 else:
907 scriptletcmd = "$2 $1/$3 $4\n"
908
909 SCRIPTLET_FORMAT = "#!/bin/bash\n" \
910 "\n" \
911 "export PATH=%s\n" \
912 "export D=%s\n" \
913 'export OFFLINE_ROOT="$D"\n' \
914 'export IPKG_OFFLINE_ROOT="$D"\n' \
915 'export OPKG_OFFLINE_ROOT="$D"\n' \
916 "export INTERCEPT_DIR=%s\n" \
917 "export NATIVE_ROOT=%s\n" \
918 "\n" \
919 + scriptletcmd + \
920 "if [ $? -ne 0 ]; then\n" \
921 " if [ $4 -eq 1 ]; then\n" \
922 " mkdir -p $1/etc/rpm-postinsts\n" \
923 " num=100\n" \
924 " while [ -e $1/etc/rpm-postinsts/${num}-* ]; do num=$((num + 1)); done\n" \
925 " name=`head -1 $1/$3 | cut -d\' \' -f 2`\n" \
926 ' echo "#!$2" > $1/etc/rpm-postinsts/${num}-${name}\n' \
927 ' echo "# Arg: $4" >> $1/etc/rpm-postinsts/${num}-${name}\n' \
928 " cat $1/$3 >> $1/etc/rpm-postinsts/${num}-${name}\n" \
929 " chmod +x $1/etc/rpm-postinsts/${num}-${name}\n" \
930 " else\n" \
931 ' echo "Error: pre/post remove scriptlet failed"\n' \
932 " fi\n" \
933 "fi\n"
934
935 intercept_dir = self.d.expand('${WORKDIR}/intercept_scripts')
936 native_root = self.d.getVar('STAGING_DIR_NATIVE', True)
937 scriptlet_content = SCRIPTLET_FORMAT % (os.environ['PATH'],
938 self.target_rootfs,
939 intercept_dir,
940 native_root)
941 open(self.scriptlet_wrapper, 'w+').write(scriptlet_content)
942
943 bb.note("Note: configuring RPM cross-install scriptlet_wrapper")
944 os.chmod(self.scriptlet_wrapper, 0755)
945 cmd = 'config --set rpm-extra-macros._cross_scriptlet_wrapper=%s' % \
946 self.scriptlet_wrapper
947 self._invoke_smart(cmd)
948
949 # Debug to show smart config info
950 # bb.note(self._invoke_smart('config --show'))
951
952 def update(self):
953 self._invoke_smart('update rpmsys')
954
955 '''
956 Install pkgs with smart, the pkg name is oe format
957 '''
958 def install(self, pkgs, attempt_only=False):
959
960 bb.note("Installing the following packages: %s" % ' '.join(pkgs))
961 if attempt_only and len(pkgs) == 0:
962 return
963 pkgs = self._pkg_translate_oe_to_smart(pkgs, attempt_only)
964
965 if not attempt_only:
966 bb.note('to be installed: %s' % ' '.join(pkgs))
967 cmd = "%s %s install -y %s" % \
968 (self.smart_cmd, self.smart_opt, ' '.join(pkgs))
969 bb.note(cmd)
970 else:
971 bb.note('installing attempt only packages...')
972 bb.note('Attempting %s' % ' '.join(pkgs))
973 cmd = "%s %s install --attempt -y %s" % \
974 (self.smart_cmd, self.smart_opt, ' '.join(pkgs))
975 try:
976 output = subprocess.check_output(cmd.split(), stderr=subprocess.STDOUT)
977 bb.note(output)
978 except subprocess.CalledProcessError as e:
979 bb.fatal("Unable to install packages. Command '%s' "
980 "returned %d:\n%s" % (cmd, e.returncode, e.output))
981
982 '''
983 Remove pkgs with smart, the pkg name is smart/rpm format
984 '''
985 def remove(self, pkgs, with_dependencies=True):
986 bb.note('to be removed: ' + ' '.join(pkgs))
987
988 if not with_dependencies:
989 cmd = "%s -e --nodeps " % self.rpm_cmd
990 cmd += "--root=%s " % self.target_rootfs
991 cmd += "--dbpath=/var/lib/rpm "
992 cmd += "--define='_cross_scriptlet_wrapper %s' " % \
993 self.scriptlet_wrapper
994 cmd += "--define='_tmppath /install/tmp' %s" % ' '.join(pkgs)
995 else:
996 # for pkg in pkgs:
997 # bb.note('Debug: What required: %s' % pkg)
998 # bb.note(self._invoke_smart('query %s --show-requiredby' % pkg))
999
1000 cmd = "%s %s remove -y %s" % (self.smart_cmd,
1001 self.smart_opt,
1002 ' '.join(pkgs))
1003
1004 try:
1005 bb.note(cmd)
1006 output = subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True)
1007 bb.note(output)
1008 except subprocess.CalledProcessError as e:
1009 bb.note("Unable to remove packages. Command '%s' "
1010 "returned %d:\n%s" % (cmd, e.returncode, e.output))
1011
1012 def upgrade(self):
1013 bb.note('smart upgrade')
1014 self._invoke_smart('upgrade')
1015
1016 def write_index(self):
1017 result = self.indexer.write_index()
1018
1019 if result is not None:
1020 bb.fatal(result)
1021
1022 def remove_packaging_data(self):
1023 bb.utils.remove(self.image_rpmlib, True)
1024 bb.utils.remove(os.path.join(self.target_rootfs, 'var/lib/smart'),
1025 True)
1026 bb.utils.remove(os.path.join(self.target_rootfs, 'var/lib/opkg'), True)
1027
1028 # remove temp directory
1029 bb.utils.remove(self.d.expand('${IMAGE_ROOTFS}/install'), True)
1030
1031 def backup_packaging_data(self):
1032 # Save the rpmlib for increment rpm image generation
1033 if os.path.exists(self.saved_rpmlib):
1034 bb.utils.remove(self.saved_rpmlib, True)
1035 shutil.copytree(self.image_rpmlib,
1036 self.saved_rpmlib,
1037 symlinks=True)
1038
1039 def recovery_packaging_data(self):
1040 # Move the rpmlib back
1041 if os.path.exists(self.saved_rpmlib):
1042 if os.path.exists(self.image_rpmlib):
1043 bb.utils.remove(self.image_rpmlib, True)
1044
1045 bb.note('Recovery packaging data')
1046 shutil.copytree(self.saved_rpmlib,
1047 self.image_rpmlib,
1048 symlinks=True)
1049
1050 def list_installed(self, format=None):
1051 return self.pkgs_list.list(format)
1052
1053 '''
1054 If incremental install, we need to determine what we've got,
1055 what we need to add, and what to remove...
1056 The dump_install_solution will dump and save the new install
1057 solution.
1058 '''
1059 def dump_install_solution(self, pkgs):
1060 bb.note('creating new install solution for incremental install')
1061 if len(pkgs) == 0:
1062 return
1063
1064 pkgs = self._pkg_translate_oe_to_smart(pkgs, False)
1065 install_pkgs = list()
1066
1067 cmd = "%s %s install -y --dump %s 2>%s" % \
1068 (self.smart_cmd,
1069 self.smart_opt,
1070 ' '.join(pkgs),
1071 self.solution_manifest)
1072 try:
1073 # Disable rpmsys channel for the fake install
1074 self._invoke_smart('channel --disable rpmsys')
1075
1076 subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True)
1077 with open(self.solution_manifest, 'r') as manifest:
1078 for pkg in manifest.read().split('\n'):
1079 if '@' in pkg:
1080 install_pkgs.append(pkg)
1081 except subprocess.CalledProcessError as e:
1082 bb.note("Unable to dump install packages. Command '%s' "
1083 "returned %d:\n%s" % (cmd, e.returncode, e.output))
1084 # Recovery rpmsys channel
1085 self._invoke_smart('channel --enable rpmsys')
1086 return install_pkgs
1087
1088 '''
1089 If incremental install, we need to determine what we've got,
1090 what we need to add, and what to remove...
1091 The load_old_install_solution will load the previous install
1092 solution
1093 '''
1094 def load_old_install_solution(self):
1095 bb.note('load old install solution for incremental install')
1096 installed_pkgs = list()
1097 if not os.path.exists(self.solution_manifest):
1098 bb.note('old install solution not exist')
1099 return installed_pkgs
1100
1101 with open(self.solution_manifest, 'r') as manifest:
1102 for pkg in manifest.read().split('\n'):
1103 if '@' in pkg:
1104 installed_pkgs.append(pkg.strip())
1105
1106 return installed_pkgs
1107
1108 '''
1109 Dump all available packages in feeds, it should be invoked after the
1110 newest rpm index was created
1111 '''
1112 def dump_all_available_pkgs(self):
1113 available_manifest = self.d.expand('${T}/saved/available_pkgs.txt')
1114 available_pkgs = list()
1115 cmd = "%s %s query --output %s" % \
1116 (self.smart_cmd, self.smart_opt, available_manifest)
1117 try:
1118 subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True)
1119 with open(available_manifest, 'r') as manifest:
1120 for pkg in manifest.read().split('\n'):
1121 if '@' in pkg:
1122 available_pkgs.append(pkg.strip())
1123 except subprocess.CalledProcessError as e:
1124 bb.note("Unable to list all available packages. Command '%s' "
1125 "returned %d:\n%s" % (cmd, e.returncode, e.output))
1126
1127 self.fullpkglist = available_pkgs
1128
1129 return
1130
1131 def save_rpmpostinst(self, pkg):
1132 mlibs = (self.d.getVar('MULTILIB_GLOBAL_VARIANTS') or "").split()
1133
1134 new_pkg = pkg
1135 # Remove any multilib prefix from the package name
1136 for mlib in mlibs:
1137 if mlib in pkg:
1138 new_pkg = pkg.replace(mlib + '-', '')
1139 break
1140
1141 bb.note(' * postponing %s' % new_pkg)
1142 saved_dir = self.target_rootfs + self.d.expand('${sysconfdir}/rpm-postinsts/') + new_pkg
1143
1144 cmd = self.rpm_cmd + ' -q --scripts --root ' + self.target_rootfs
1145 cmd += ' --dbpath=/var/lib/rpm ' + new_pkg
1146 cmd += ' | sed -n -e "/^postinstall scriptlet (using .*):$/,/^.* scriptlet (using .*):$/ {/.*/p}"'
1147 cmd += ' | sed -e "/postinstall scriptlet (using \(.*\)):$/d"'
1148 cmd += ' -e "/^.* scriptlet (using .*):$/d" > %s' % saved_dir
1149
1150 try:
1151 bb.note(cmd)
1152 output = subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True).strip()
1153 bb.note(output)
1154 os.chmod(saved_dir, 0755)
1155 except subprocess.CalledProcessError as e:
1156 bb.fatal("Invoke save_rpmpostinst failed. Command '%s' "
1157 "returned %d:\n%s" % (cmd, e.returncode, e.output))
1158
1159 '''Write common configuration for target usage'''
1160 def rpm_setup_smart_target_config(self):
1161 bb.utils.remove(os.path.join(self.target_rootfs, 'var/lib/smart'),
1162 True)
1163
1164 self._invoke_smart('config --set rpm-nolinktos=1')
1165 self._invoke_smart('config --set rpm-noparentdirs=1')
1166 for i in self.d.getVar('BAD_RECOMMENDATIONS', True).split():
1167 self._invoke_smart('flag --set ignore-recommends %s' % i)
1168 self._invoke_smart('channel --add rpmsys type=rpm-sys -y')
1169
1170 '''
1171 The rpm db lock files were produced after invoking rpm to query on
1172 build system, and they caused the rpm on target didn't work, so we
1173 need to unlock the rpm db by removing the lock files.
1174 '''
1175 def unlock_rpm_db(self):
1176 # Remove rpm db lock files
1177 rpm_db_locks = glob.glob('%s/var/lib/rpm/__db.*' % self.target_rootfs)
1178 for f in rpm_db_locks:
1179 bb.utils.remove(f, True)
1180
1181
1182class OpkgPM(PackageManager):
1183 def __init__(self, d, target_rootfs, config_file, archs, task_name='target'):
1184 super(OpkgPM, self).__init__(d)
1185
1186 self.target_rootfs = target_rootfs
1187 self.config_file = config_file
1188 self.pkg_archs = archs
1189 self.task_name = task_name
1190
1191 self.deploy_dir = self.d.getVar("DEPLOY_DIR_IPK", True)
1192 self.deploy_lock_file = os.path.join(self.deploy_dir, "deploy.lock")
1193 self.opkg_cmd = bb.utils.which(os.getenv('PATH'), "opkg-cl")
1194 self.opkg_args = "-f %s -o %s " % (self.config_file, target_rootfs)
1195 self.opkg_args += self.d.getVar("OPKG_ARGS", True)
1196
1197 opkg_lib_dir = self.d.getVar('OPKGLIBDIR', True)
1198 if opkg_lib_dir[0] == "/":
1199 opkg_lib_dir = opkg_lib_dir[1:]
1200
1201 self.opkg_dir = os.path.join(target_rootfs, opkg_lib_dir, "opkg")
1202
1203 bb.utils.mkdirhier(self.opkg_dir)
1204
1205 self.saved_opkg_dir = self.d.expand('${T}/saved/%s' % self.task_name)
1206 if not os.path.exists(self.d.expand('${T}/saved')):
1207 bb.utils.mkdirhier(self.d.expand('${T}/saved'))
1208
1209 if (self.d.getVar('BUILD_IMAGES_FROM_FEEDS', True) or "") != "1":
1210 self._create_config()
1211 else:
1212 self._create_custom_config()
1213
1214 self.indexer = OpkgIndexer(self.d, self.deploy_dir)
1215
1216 """
1217 This function will change a package's status in /var/lib/opkg/status file.
1218 If 'packages' is None then the new_status will be applied to all
1219 packages
1220 """
1221 def mark_packages(self, status_tag, packages=None):
1222 status_file = os.path.join(self.opkg_dir, "status")
1223
1224 with open(status_file, "r") as sf:
1225 with open(status_file + ".tmp", "w+") as tmp_sf:
1226 if packages is None:
1227 tmp_sf.write(re.sub(r"Package: (.*?)\n((?:[^\n]+\n)*?)Status: (.*)(?:unpacked|installed)",
1228 r"Package: \1\n\2Status: \3%s" % status_tag,
1229 sf.read()))
1230 else:
1231 if type(packages).__name__ != "list":
1232 raise TypeError("'packages' should be a list object")
1233
1234 status = sf.read()
1235 for pkg in packages:
1236 status = re.sub(r"Package: %s\n((?:[^\n]+\n)*?)Status: (.*)(?:unpacked|installed)" % pkg,
1237 r"Package: %s\n\1Status: \2%s" % (pkg, status_tag),
1238 status)
1239
1240 tmp_sf.write(status)
1241
1242 os.rename(status_file + ".tmp", status_file)
1243
1244 def _create_custom_config(self):
1245 bb.note("Building from feeds activated!")
1246
1247 with open(self.config_file, "w+") as config_file:
1248 priority = 1
1249 for arch in self.pkg_archs.split():
1250 config_file.write("arch %s %d\n" % (arch, priority))
1251 priority += 5
1252
1253 for line in (self.d.getVar('IPK_FEED_URIS', True) or "").split():
1254 feed_match = re.match("^[ \t]*(.*)##([^ \t]*)[ \t]*$", line)
1255
1256 if feed_match is not None:
1257 feed_name = feed_match.group(1)
1258 feed_uri = feed_match.group(2)
1259
1260 bb.note("Add %s feed with URL %s" % (feed_name, feed_uri))
1261
1262 config_file.write("src/gz %s %s\n" % (feed_name, feed_uri))
1263
1264 """
1265 Allow to use package deploy directory contents as quick devel-testing
1266 feed. This creates individual feed configs for each arch subdir of those
1267 specified as compatible for the current machine.
1268 NOTE: Development-helper feature, NOT a full-fledged feed.
1269 """
1270 if (self.d.getVar('FEED_DEPLOYDIR_BASE_URI', True) or "") != "":
1271 for arch in self.pkg_archs.split():
1272 cfg_file_name = os.path.join(self.target_rootfs,
1273 self.d.getVar("sysconfdir", True),
1274 "opkg",
1275 "local-%s-feed.conf" % arch)
1276
1277 with open(cfg_file_name, "w+") as cfg_file:
1278 cfg_file.write("src/gz local-%s %s/%s" %
1279 (arch,
1280 self.d.getVar('FEED_DEPLOYDIR_BASE_URI', True),
1281 arch))
1282
1283 def _create_config(self):
1284 with open(self.config_file, "w+") as config_file:
1285 priority = 1
1286 for arch in self.pkg_archs.split():
1287 config_file.write("arch %s %d\n" % (arch, priority))