diff options
| author | Adrian Dudau <adrian.dudau@enea.com> | 2014-06-26 14:36:22 +0200 |
|---|---|---|
| committer | Adrian Dudau <adrian.dudau@enea.com> | 2014-06-26 15:32:53 +0200 |
| commit | f4cf9fe05bb3f32fabea4e54dd92d368967a80da (patch) | |
| tree | 487180fa9866985ea7b28e625651765d86f515c3 /meta/lib | |
| download | poky-f4cf9fe05bb3f32fabea4e54dd92d368967a80da.tar.gz | |
initial commit for Enea Linux 4.0
Migrated from the internal git server on the daisy-enea branch
Signed-off-by: Adrian Dudau <adrian.dudau@enea.com>
Diffstat (limited to 'meta/lib')
91 files changed, 10308 insertions, 0 deletions
diff --git a/meta/lib/oe/__init__.py b/meta/lib/oe/__init__.py new file mode 100644 index 0000000000..3ad9513f40 --- /dev/null +++ b/meta/lib/oe/__init__.py | |||
| @@ -0,0 +1,2 @@ | |||
| 1 | from pkgutil import extend_path | ||
| 2 | __path__ = extend_path(__path__, __name__) | ||
diff --git a/meta/lib/oe/buildhistory_analysis.py b/meta/lib/oe/buildhistory_analysis.py new file mode 100644 index 0000000000..5395c768a3 --- /dev/null +++ b/meta/lib/oe/buildhistory_analysis.py | |||
| @@ -0,0 +1,456 @@ | |||
| 1 | # Report significant differences in the buildhistory repository since a specific revision | ||
| 2 | # | ||
| 3 | # Copyright (C) 2012 Intel Corporation | ||
| 4 | # Author: Paul Eggleton <paul.eggleton@linux.intel.com> | ||
| 5 | # | ||
| 6 | # Note: requires GitPython 0.3.1+ | ||
| 7 | # | ||
| 8 | # You can use this from the command line by running scripts/buildhistory-diff | ||
| 9 | # | ||
| 10 | |||
| 11 | import sys | ||
| 12 | import os.path | ||
| 13 | import difflib | ||
| 14 | import git | ||
| 15 | import re | ||
| 16 | import bb.utils | ||
| 17 | |||
| 18 | |||
| 19 | # How to display fields | ||
| 20 | list_fields = ['DEPENDS', 'RPROVIDES', 'RDEPENDS', 'RRECOMMENDS', 'RSUGGESTS', 'RREPLACES', 'RCONFLICTS', 'FILES', 'FILELIST', 'USER_CLASSES', 'IMAGE_CLASSES', 'IMAGE_FEATURES', 'IMAGE_LINGUAS', 'IMAGE_INSTALL', 'BAD_RECOMMENDATIONS', 'PACKAGE_EXCLUDE'] | ||
| 21 | list_order_fields = ['PACKAGES'] | ||
| 22 | defaultval_map = {'PKG': 'PKG', 'PKGE': 'PE', 'PKGV': 'PV', 'PKGR': 'PR'} | ||
| 23 | numeric_fields = ['PKGSIZE', 'IMAGESIZE'] | ||
| 24 | # Fields to monitor | ||
| 25 | monitor_fields = ['RPROVIDES', 'RDEPENDS', 'RRECOMMENDS', 'RREPLACES', 'RCONFLICTS', 'PACKAGES', 'FILELIST', 'PKGSIZE', 'IMAGESIZE', 'PKG'] | ||
| 26 | ver_monitor_fields = ['PKGE', 'PKGV', 'PKGR'] | ||
| 27 | # Percentage change to alert for numeric fields | ||
| 28 | monitor_numeric_threshold = 10 | ||
| 29 | # Image files to monitor (note that image-info.txt is handled separately) | ||
| 30 | img_monitor_files = ['installed-package-names.txt', 'files-in-image.txt'] | ||
| 31 | # Related context fields for reporting (note: PE, PV & PR are always reported for monitored package fields) | ||
| 32 | related_fields = {} | ||
| 33 | related_fields['RDEPENDS'] = ['DEPENDS'] | ||
| 34 | related_fields['RRECOMMENDS'] = ['DEPENDS'] | ||
| 35 | related_fields['FILELIST'] = ['FILES'] | ||
| 36 | related_fields['PKGSIZE'] = ['FILELIST'] | ||
| 37 | related_fields['files-in-image.txt'] = ['installed-package-names.txt', 'USER_CLASSES', 'IMAGE_CLASSES', 'ROOTFS_POSTPROCESS_COMMAND', 'IMAGE_POSTPROCESS_COMMAND'] | ||
| 38 | related_fields['installed-package-names.txt'] = ['IMAGE_FEATURES', 'IMAGE_LINGUAS', 'IMAGE_INSTALL', 'BAD_RECOMMENDATIONS', 'NO_RECOMMENDATIONS', 'PACKAGE_EXCLUDE'] | ||
| 39 | |||
| 40 | |||
| 41 | class ChangeRecord: | ||
| 42 | def __init__(self, path, fieldname, oldvalue, newvalue, monitored): | ||
| 43 | self.path = path | ||
| 44 | self.fieldname = fieldname | ||
| 45 | self.oldvalue = oldvalue | ||
| 46 | self.newvalue = newvalue | ||
| 47 | self.monitored = monitored | ||
| 48 | self.related = [] | ||
| 49 | self.filechanges = None | ||
| 50 | |||
| 51 | def __str__(self): | ||
| 52 | return self._str_internal(True) | ||
| 53 | |||
| 54 | def _str_internal(self, outer): | ||
| 55 | if outer: | ||
| 56 | if '/image-files/' in self.path: | ||
| 57 | prefix = '%s: ' % self.path.split('/image-files/')[0] | ||
| 58 | else: | ||
| 59 | prefix = '%s: ' % self.path | ||
| 60 | else: | ||
| 61 | prefix = '' | ||
| 62 | |||
| 63 | def pkglist_combine(depver): | ||
| 64 | pkglist = [] | ||
| 65 | for k,v in depver.iteritems(): | ||
| 66 | if v: | ||
| 67 | pkglist.append("%s (%s)" % (k,v)) | ||
| 68 | else: | ||
| 69 | pkglist.append(k) | ||
| 70 | return pkglist | ||
| 71 | |||
| 72 | if self.fieldname in list_fields or self.fieldname in list_order_fields: | ||
| 73 | if self.fieldname in ['RPROVIDES', 'RDEPENDS', 'RRECOMMENDS', 'RSUGGESTS', 'RREPLACES', 'RCONFLICTS']: | ||
| 74 | (depvera, depverb) = compare_pkg_lists(self.oldvalue, self.newvalue) | ||
| 75 | aitems = pkglist_combine(depvera) | ||
| 76 | bitems = pkglist_combine(depverb) | ||
| 77 | else: | ||
| 78 | aitems = self.oldvalue.split() | ||
| 79 | bitems = self.newvalue.split() | ||
| 80 | removed = list(set(aitems) - set(bitems)) | ||
| 81 | added = list(set(bitems) - set(aitems)) | ||
| 82 | |||
| 83 | if removed or added: | ||
| 84 | if removed and not bitems: | ||
| 85 | out = '%s: removed all items "%s"' % (self.fieldname, ' '.join(removed)) | ||
| 86 | else: | ||
| 87 | out = '%s:%s%s' % (self.fieldname, ' removed "%s"' % ' '.join(removed) if removed else '', ' added "%s"' % ' '.join(added) if added else '') | ||
| 88 | else: | ||
| 89 | out = '%s changed order' % self.fieldname | ||
| 90 | elif self.fieldname in numeric_fields: | ||
| 91 | aval = int(self.oldvalue or 0) | ||
| 92 | bval = int(self.newvalue or 0) | ||
| 93 | if aval != 0: | ||
| 94 | percentchg = ((bval - aval) / float(aval)) * 100 | ||
| 95 | else: | ||
| 96 | percentchg = 100 | ||
| 97 | out = '%s changed from %s to %s (%s%d%%)' % (self.fieldname, self.oldvalue or "''", self.newvalue or "''", '+' if percentchg > 0 else '', percentchg) | ||
| 98 | elif self.fieldname in defaultval_map: | ||
| 99 | out = '%s changed from %s to %s' % (self.fieldname, self.oldvalue, self.newvalue) | ||
| 100 | if self.fieldname == 'PKG' and '[default]' in self.newvalue: | ||
| 101 | out += ' - may indicate debian renaming failure' | ||
| 102 | elif self.fieldname in ['pkg_preinst', 'pkg_postinst', 'pkg_prerm', 'pkg_postrm']: | ||
| 103 | if self.oldvalue and self.newvalue: | ||
| 104 | out = '%s changed:\n ' % self.fieldname | ||
| 105 | elif self.newvalue: | ||
| 106 | out = '%s added:\n ' % self.fieldname | ||
| 107 | elif self.oldvalue: | ||
| 108 | out = '%s cleared:\n ' % self.fieldname | ||
| 109 | alines = self.oldvalue.splitlines() | ||
| 110 | blines = self.newvalue.splitlines() | ||
| 111 | diff = difflib.unified_diff(alines, blines, self.fieldname, self.fieldname, lineterm='') | ||
| 112 | out += '\n '.join(list(diff)[2:]) | ||
| 113 | out += '\n --' | ||
| 114 | elif self.fieldname in img_monitor_files or '/image-files/' in self.path: | ||
| 115 | fieldname = self.fieldname | ||
| 116 | if '/image-files/' in self.path: | ||
| 117 | fieldname = os.path.join('/' + self.path.split('/image-files/')[1], self.fieldname) | ||
| 118 | out = 'Changes to %s:\n ' % fieldname | ||
| 119 | else: | ||
| 120 | if outer: | ||
| 121 | prefix = 'Changes to %s ' % self.path | ||
| 122 | out = '(%s):\n ' % self.fieldname | ||
| 123 | if self.filechanges: | ||
| 124 | out += '\n '.join(['%s' % i for i in self.filechanges]) | ||
| 125 | else: | ||
| 126 | alines = self.oldvalue.splitlines() | ||
| 127 | blines = self.newvalue.splitlines() | ||
| 128 | diff = difflib.unified_diff(alines, blines, fieldname, fieldname, lineterm='') | ||
| 129 | out += '\n '.join(list(diff)) | ||
| 130 | out += '\n --' | ||
| 131 | else: | ||
| 132 | out = '%s changed from "%s" to "%s"' % (self.fieldname, self.oldvalue, self.newvalue) | ||
| 133 | |||
| 134 | if self.related: | ||
| 135 | for chg in self.related: | ||
| 136 | if not outer and chg.fieldname in ['PE', 'PV', 'PR']: | ||
| 137 | continue | ||
| 138 | for line in chg._str_internal(False).splitlines(): | ||
| 139 | out += '\n * %s' % line | ||
| 140 | |||
| 141 | return '%s%s' % (prefix, out) | ||
| 142 | |||
| 143 | class FileChange: | ||
| 144 | changetype_add = 'A' | ||
| 145 | changetype_remove = 'R' | ||
| 146 | changetype_type = 'T' | ||
| 147 | changetype_perms = 'P' | ||
| 148 | changetype_ownergroup = 'O' | ||
| 149 | changetype_link = 'L' | ||
| 150 | |||
| 151 | def __init__(self, path, changetype, oldvalue = None, newvalue = None): | ||
| 152 | self.path = path | ||
| 153 | self.changetype = changetype | ||
| 154 | self.oldvalue = oldvalue | ||
| 155 | self.newvalue = newvalue | ||
| 156 | |||
| 157 | def _ftype_str(self, ftype): | ||
| 158 | if ftype == '-': | ||
| 159 | return 'file' | ||
| 160 | elif ftype == 'd': | ||
| 161 | return 'directory' | ||
| 162 | elif ftype == 'l': | ||
| 163 | return 'symlink' | ||
| 164 | elif ftype == 'c': | ||
| 165 | return 'char device' | ||
| 166 | elif ftype == 'b': | ||
| 167 | return 'block device' | ||
| 168 | elif ftype == 'p': | ||
| 169 | return 'fifo' | ||
| 170 | elif ftype == 's': | ||
| 171 | return 'socket' | ||
| 172 | else: | ||
| 173 | return 'unknown (%s)' % ftype | ||
| 174 | |||
| 175 | def __str__(self): | ||
| 176 | if self.changetype == self.changetype_add: | ||
| 177 | return '%s was added' % self.path | ||
| 178 | elif self.changetype == self.changetype_remove: | ||
| 179 | return '%s was removed' % self.path | ||
| 180 | elif self.changetype == self.changetype_type: | ||
| 181 | return '%s changed type from %s to %s' % (self.path, self._ftype_str(self.oldvalue), self._ftype_str(self.newvalue)) | ||
| 182 | elif self.changetype == self.changetype_perms: | ||
| 183 | return '%s changed permissions from %s to %s' % (self.path, self.oldvalue, self.newvalue) | ||
| 184 | elif self.changetype == self.changetype_ownergroup: | ||
| 185 | return '%s changed owner/group from %s to %s' % (self.path, self.oldvalue, self.newvalue) | ||
| 186 | elif self.changetype == self.changetype_link: | ||
| 187 | return '%s changed symlink target from %s to %s' % (self.path, self.oldvalue, self.newvalue) | ||
| 188 | else: | ||
| 189 | return '%s changed (unknown)' % self.path | ||
| 190 | |||
| 191 | |||
| 192 | def blob_to_dict(blob): | ||
| 193 | alines = blob.data_stream.read().splitlines() | ||
| 194 | adict = {} | ||
| 195 | for line in alines: | ||
| 196 | splitv = [i.strip() for i in line.split('=',1)] | ||
| 197 | if len(splitv) > 1: | ||
| 198 | adict[splitv[0]] = splitv[1] | ||
| 199 | return adict | ||
| 200 | |||
| 201 | |||
| 202 | def file_list_to_dict(lines): | ||
| 203 | adict = {} | ||
| 204 | for line in lines: | ||
| 205 | # Leave the last few fields intact so we handle file names containing spaces | ||
| 206 | splitv = line.split(None,4) | ||
| 207 | # Grab the path and remove the leading . | ||
| 208 | path = splitv[4][1:].strip() | ||
| 209 | # Handle symlinks | ||
| 210 | if(' -> ' in path): | ||
| 211 | target = path.split(' -> ')[1] | ||
| 212 | path = path.split(' -> ')[0] | ||
| 213 | adict[path] = splitv[0:3] + [target] | ||
| 214 | else: | ||
| 215 | adict[path] = splitv[0:3] | ||
| 216 | return adict | ||
| 217 | |||
| 218 | |||
| 219 | def compare_file_lists(alines, blines): | ||
| 220 | adict = file_list_to_dict(alines) | ||
| 221 | bdict = file_list_to_dict(blines) | ||
| 222 | filechanges = [] | ||
| 223 | for path, splitv in adict.iteritems(): | ||
| 224 | newsplitv = bdict.pop(path, None) | ||
| 225 | if newsplitv: | ||
| 226 | # Check type | ||
| 227 | oldvalue = splitv[0][0] | ||
| 228 | newvalue = newsplitv[0][0] | ||
| 229 | if oldvalue != newvalue: | ||
| 230 | filechanges.append(FileChange(path, FileChange.changetype_type, oldvalue, newvalue)) | ||
| 231 | # Check permissions | ||
| 232 | oldvalue = splitv[0][1:] | ||
| 233 | newvalue = newsplitv[0][1:] | ||
| 234 | if oldvalue != newvalue: | ||
| 235 | filechanges.append(FileChange(path, FileChange.changetype_perms, oldvalue, newvalue)) | ||
| 236 | # Check owner/group | ||
| 237 | oldvalue = '%s/%s' % (splitv[1], splitv[2]) | ||
| 238 | newvalue = '%s/%s' % (newsplitv[1], newsplitv[2]) | ||
| 239 | if oldvalue != newvalue: | ||
| 240 | filechanges.append(FileChange(path, FileChange.changetype_ownergroup, oldvalue, newvalue)) | ||
| 241 | # Check symlink target | ||
| 242 | if newsplitv[0][0] == 'l': | ||
| 243 | if len(splitv) > 3: | ||
| 244 | oldvalue = splitv[3] | ||
| 245 | else: | ||
| 246 | oldvalue = None | ||
| 247 | newvalue = newsplitv[3] | ||
| 248 | if oldvalue != newvalue: | ||
| 249 | filechanges.append(FileChange(path, FileChange.changetype_link, oldvalue, newvalue)) | ||
| 250 | else: | ||
| 251 | filechanges.append(FileChange(path, FileChange.changetype_remove)) | ||
| 252 | |||
| 253 | # Whatever is left over has been added | ||
| 254 | for path in bdict: | ||
| 255 | filechanges.append(FileChange(path, FileChange.changetype_add)) | ||
| 256 | |||
| 257 | return filechanges | ||
| 258 | |||
| 259 | |||
| 260 | def compare_lists(alines, blines): | ||
| 261 | removed = list(set(alines) - set(blines)) | ||
| 262 | added = list(set(blines) - set(alines)) | ||
| 263 | |||
| 264 | filechanges = [] | ||
| 265 | for pkg in removed: | ||
| 266 | filechanges.append(FileChange(pkg, FileChange.changetype_remove)) | ||
| 267 | for pkg in added: | ||
| 268 | filechanges.append(FileChange(pkg, FileChange.changetype_add)) | ||
| 269 | |||
| 270 | return filechanges | ||
| 271 | |||
| 272 | |||
| 273 | def compare_pkg_lists(astr, bstr): | ||
| 274 | depvera = bb.utils.explode_dep_versions2(astr) | ||
| 275 | depverb = bb.utils.explode_dep_versions2(bstr) | ||
| 276 | |||
| 277 | # Strip out changes where the version has increased | ||
| 278 | remove = [] | ||
| 279 | for k in depvera: | ||
| 280 | if k in depverb: | ||
| 281 | dva = depvera[k] | ||
| 282 | dvb = depverb[k] | ||
| 283 | if dva and dvb and len(dva) == len(dvb): | ||
| 284 | # Since length is the same, sort so that prefixes (e.g. >=) will line up | ||
| 285 | dva.sort() | ||
| 286 | dvb.sort() | ||
| 287 | removeit = True | ||
| 288 | for dvai, dvbi in zip(dva, dvb): | ||
| 289 | if dvai != dvbi: | ||
| 290 | aiprefix = dvai.split(' ')[0] | ||
| 291 | biprefix = dvbi.split(' ')[0] | ||
| 292 | if aiprefix == biprefix and aiprefix in ['>=', '=']: | ||
| 293 | if bb.utils.vercmp(bb.utils.split_version(dvai), bb.utils.split_version(dvbi)) > 0: | ||
| 294 | removeit = False | ||
| 295 | break | ||
| 296 | else: | ||
| 297 | removeit = False | ||
| 298 | break | ||
| 299 | if removeit: | ||
| 300 | remove.append(k) | ||
| 301 | |||
| 302 | for k in remove: | ||
| 303 | depvera.pop(k) | ||
| 304 | depverb.pop(k) | ||
| 305 | |||
| 306 | return (depvera, depverb) | ||
| 307 | |||
| 308 | |||
| 309 | def compare_dict_blobs(path, ablob, bblob, report_all, report_ver): | ||
| 310 | adict = blob_to_dict(ablob) | ||
| 311 | bdict = blob_to_dict(bblob) | ||
| 312 | |||
| 313 | pkgname = os.path.basename(path) | ||
| 314 | |||
| 315 | defaultvals = {} | ||
| 316 | defaultvals['PKG'] = pkgname | ||
| 317 | defaultvals['PKGE'] = '0' | ||
| 318 | |||
| 319 | changes = [] | ||
| 320 | keys = list(set(adict.keys()) | set(bdict.keys()) | set(defaultval_map.keys())) | ||
| 321 | for key in keys: | ||
| 322 | astr = adict.get(key, '') | ||
| 323 | bstr = bdict.get(key, '') | ||
| 324 | if key in ver_monitor_fields: | ||
| 325 | monitored = report_ver or astr or bstr | ||
| 326 | else: | ||
| 327 | monitored = key in monitor_fields | ||
| 328 | mapped_key = defaultval_map.get(key, '') | ||
| 329 | if mapped_key: | ||
| 330 | if not astr: | ||
| 331 | astr = '%s [default]' % adict.get(mapped_key, defaultvals.get(key, '')) | ||
| 332 | if not bstr: | ||
| 333 | bstr = '%s [default]' % bdict.get(mapped_key, defaultvals.get(key, '')) | ||
| 334 | |||
| 335 | if astr != bstr: | ||
| 336 | if (not report_all) and key in numeric_fields: | ||
| 337 | aval = int(astr or 0) | ||
| 338 | bval = int(bstr or 0) | ||
| 339 | if aval != 0: | ||
| 340 | percentchg = ((bval - aval) / float(aval)) * 100 | ||
| 341 | else: | ||
| 342 | percentchg = 100 | ||
| 343 | if abs(percentchg) < monitor_numeric_threshold: | ||
| 344 | continue | ||
| 345 | elif (not report_all) and key in list_fields: | ||
| 346 | if key == "FILELIST" and path.endswith("-dbg") and bstr.strip() != '': | ||
| 347 | continue | ||
| 348 | if key in ['RPROVIDES', 'RDEPENDS', 'RRECOMMENDS', 'RSUGGESTS', 'RREPLACES', 'RCONFLICTS']: | ||
| 349 | (depvera, depverb) = compare_pkg_lists(astr, bstr) | ||
| 350 | if depvera == depverb: | ||
| 351 | continue | ||
| 352 | alist = astr.split() | ||
| 353 | alist.sort() | ||
| 354 | blist = bstr.split() | ||
| 355 | blist.sort() | ||
| 356 | # We don't care about the removal of self-dependencies | ||
| 357 | if pkgname in alist and not pkgname in blist: | ||
| 358 | alist.remove(pkgname) | ||
| 359 | if ' '.join(alist) == ' '.join(blist): | ||
| 360 | continue | ||
| 361 | |||
| 362 | chg = ChangeRecord(path, key, astr, bstr, monitored) | ||
| 363 | changes.append(chg) | ||
| 364 | return changes | ||
| 365 | |||
| 366 | |||
| 367 | def process_changes(repopath, revision1, revision2='HEAD', report_all=False, report_ver=False): | ||
| 368 | repo = git.Repo(repopath) | ||
| 369 | assert repo.bare == False | ||
| 370 | commit = repo.commit(revision1) | ||
| 371 | diff = commit.diff(revision2) | ||
| 372 | |||
| 373 | changes = [] | ||
| 374 | for d in diff.iter_change_type('M'): | ||
| 375 | path = os.path.dirname(d.a_blob.path) | ||
| 376 | if path.startswith('packages/'): | ||
| 377 | filename = os.path.basename(d.a_blob.path) | ||
| 378 | if filename == 'latest': | ||
| 379 | changes.extend(compare_dict_blobs(path, d.a_blob, d.b_blob, report_all, report_ver)) | ||
| 380 | elif filename.startswith('latest.'): | ||
| 381 | chg = ChangeRecord(path, filename, d.a_blob.data_stream.read(), d.b_blob.data_stream.read(), True) | ||
| 382 | changes.append(chg) | ||
| 383 | elif path.startswith('images/'): | ||
| 384 | filename = os.path.basename(d.a_blob.path) | ||
| 385 | if filename in img_monitor_files: | ||
| 386 | if filename == 'files-in-image.txt': | ||
| 387 | alines = d.a_blob.data_stream.read().splitlines() | ||
| 388 | blines = d.b_blob.data_stream.read().splitlines() | ||
| 389 | filechanges = compare_file_lists(alines,blines) | ||
| 390 | if filechanges: | ||
| 391 | chg = ChangeRecord(path, filename, None, None, True) | ||
| 392 | chg.filechanges = filechanges | ||
| 393 | changes.append(chg) | ||
| 394 | elif filename == 'installed-package-names.txt': | ||
| 395 | alines = d.a_blob.data_stream.read().splitlines() | ||
| 396 | blines = d.b_blob.data_stream.read().splitlines() | ||
| 397 | filechanges = compare_lists(alines,blines) | ||
| 398 | if filechanges: | ||
| 399 | chg = ChangeRecord(path, filename, None, None, True) | ||
| 400 | chg.filechanges = filechanges | ||
| 401 | changes.append(chg) | ||
| 402 | else: | ||
| 403 | chg = ChangeRecord(path, filename, d.a_blob.data_stream.read(), d.b_blob.data_stream.read(), True) | ||
| 404 | changes.append(chg) | ||
| 405 | elif filename == 'image-info.txt': | ||
| 406 | changes.extend(compare_dict_blobs(path, d.a_blob, d.b_blob, report_all, report_ver)) | ||
| 407 | elif '/image-files/' in path: | ||
| 408 | chg = ChangeRecord(path, filename, d.a_blob.data_stream.read(), d.b_blob.data_stream.read(), True) | ||
| 409 | changes.append(chg) | ||
| 410 | |||
| 411 | # Look for added preinst/postinst/prerm/postrm | ||
| 412 | # (without reporting newly added recipes) | ||
| 413 | addedpkgs = [] | ||
| 414 | addedchanges = [] | ||
| 415 | for d in diff.iter_change_type('A'): | ||
| 416 | path = os.path.dirname(d.b_blob.path) | ||
| 417 | if path.startswith('packages/'): | ||
| 418 | filename = os.path.basename(d.b_blob.path) | ||
| 419 | if filename == 'latest': | ||
| 420 | addedpkgs.append(path) | ||
| 421 | elif filename.startswith('latest.'): | ||
| 422 | chg = ChangeRecord(path, filename[7:], '', d.b_blob.data_stream.read(), True) | ||
| 423 | addedchanges.append(chg) | ||
| 424 | for chg in addedchanges: | ||
| 425 | found = False | ||
| 426 | for pkg in addedpkgs: | ||
| 427 | if chg.path.startswith(pkg): | ||
| 428 | found = True | ||
| 429 | break | ||
| 430 | if not found: | ||
| 431 | changes.append(chg) | ||
| 432 | |||
| 433 | # Look for cleared preinst/postinst/prerm/postrm | ||
| 434 | for d in diff.iter_change_type('D'): | ||
| 435 | path = os.path.dirname(d.a_blob.path) | ||
| 436 | if path.startswith('packages/'): | ||
| 437 | filename = os.path.basename(d.a_blob.path) | ||
| 438 | if filename != 'latest' and filename.startswith('latest.'): | ||
| 439 | chg = ChangeRecord(path, filename[7:], d.a_blob.data_stream.read(), '', True) | ||
| 440 | changes.append(chg) | ||
| 441 | |||
| 442 | # Link related changes | ||
| 443 | for chg in changes: | ||
| 444 | if chg.monitored: | ||
| 445 | for chg2 in changes: | ||
| 446 | # (Check dirname in the case of fields from recipe info files) | ||
| 447 | if chg.path == chg2.path or os.path.dirname(chg.path) == chg2.path: | ||
| 448 | if chg2.fieldname in related_fields.get(chg.fieldname, []): | ||
| 449 | chg.related.append(chg2) | ||
| 450 | elif chg.path == chg2.path and chg.path.startswith('packages/') and chg2.fieldname in ['PE', 'PV', 'PR']: | ||
| 451 | chg.related.append(chg2) | ||
| 452 | |||
| 453 | if report_all: | ||
| 454 | return changes | ||
| 455 | else: | ||
| 456 | return [chg for chg in changes if chg.monitored] | ||
diff --git a/meta/lib/oe/cachedpath.py b/meta/lib/oe/cachedpath.py new file mode 100644 index 0000000000..0840cc4c3f --- /dev/null +++ b/meta/lib/oe/cachedpath.py | |||
| @@ -0,0 +1,233 @@ | |||
| 1 | # | ||
| 2 | # Based on standard python library functions but avoid | ||
| 3 | # repeated stat calls. Its assumed the files will not change from under us | ||
| 4 | # so we can cache stat calls. | ||
| 5 | # | ||
| 6 | |||
| 7 | import os | ||
| 8 | import errno | ||
| 9 | import stat as statmod | ||
| 10 | |||
| 11 | class CachedPath(object): | ||
| 12 | def __init__(self): | ||
| 13 | self.statcache = {} | ||
| 14 | self.lstatcache = {} | ||
| 15 | self.normpathcache = {} | ||
| 16 | return | ||
| 17 | |||
| 18 | def updatecache(self, x): | ||
| 19 | x = self.normpath(x) | ||
| 20 | if x in self.statcache: | ||
| 21 | del self.statcache[x] | ||
| 22 | if x in self.lstatcache: | ||
| 23 | del self.lstatcache[x] | ||
| 24 | |||
| 25 | def normpath(self, path): | ||
| 26 | if path in self.normpathcache: | ||
| 27 | return self.normpathcache[path] | ||
| 28 | newpath = os.path.normpath(path) | ||
| 29 | self.normpathcache[path] = newpath | ||
| 30 | return newpath | ||
| 31 | |||
| 32 | def _callstat(self, path): | ||
| 33 | if path in self.statcache: | ||
| 34 | return self.statcache[path] | ||
| 35 | try: | ||
| 36 | st = os.stat(path) | ||
| 37 | self.statcache[path] = st | ||
| 38 | return st | ||
| 39 | except os.error: | ||
| 40 | self.statcache[path] = False | ||
| 41 | return False | ||
| 42 | |||
| 43 | # We might as well call lstat and then only | ||
| 44 | # call stat as well in the symbolic link case | ||
| 45 | # since this turns out to be much more optimal | ||
| 46 | # in real world usage of this cache | ||
| 47 | def callstat(self, path): | ||
| 48 | path = self.normpath(path) | ||
| 49 | self.calllstat(path) | ||
| 50 | return self.statcache[path] | ||
| 51 | |||
| 52 | def calllstat(self, path): | ||
| 53 | path = self.normpath(path) | ||
| 54 | if path in self.lstatcache: | ||
| 55 | return self.lstatcache[path] | ||
| 56 | #bb.error("LStatpath:" + path) | ||
| 57 | try: | ||
| 58 | lst = os.lstat(path) | ||
| 59 | self.lstatcache[path] = lst | ||
| 60 | if not statmod.S_ISLNK(lst.st_mode): | ||
| 61 | self.statcache[path] = lst | ||
| 62 | else: | ||
| 63 | self._callstat(path) | ||
| 64 | return lst | ||
| 65 | except (os.error, AttributeError): | ||
| 66 | self.lstatcache[path] = False | ||
| 67 | self.statcache[path] = False | ||
| 68 | return False | ||
| 69 | |||
| 70 | # This follows symbolic links, so both islink() and isdir() can be true | ||
| 71 | # for the same path ono systems that support symlinks | ||
| 72 | def isfile(self, path): | ||
| 73 | """Test whether a path is a regular file""" | ||
| 74 | st = self.callstat(path) | ||
| 75 | if not st: | ||
| 76 | return False | ||
| 77 | return statmod.S_ISREG(st.st_mode) | ||
| 78 | |||
| 79 | # Is a path a directory? | ||
| 80 | # This follows symbolic links, so both islink() and isdir() | ||
| 81 | # can be true for the same path on systems that support symlinks | ||
| 82 | def isdir(self, s): | ||
| 83 | """Return true if the pathname refers to an existing directory.""" | ||
| 84 | st = self.callstat(s) | ||
| 85 | if not st: | ||
| 86 | return False | ||
| 87 | return statmod.S_ISDIR(st.st_mode) | ||
| 88 | |||
| 89 | def islink(self, path): | ||
| 90 | """Test whether a path is a symbolic link""" | ||
| 91 | st = self.calllstat(path) | ||
| 92 | if not st: | ||
| 93 | return False | ||
| 94 | return statmod.S_ISLNK(st.st_mode) | ||
| 95 | |||
| 96 | # Does a path exist? | ||
| 97 | # This is false for dangling symbolic links on systems that support them. | ||
| 98 | def exists(self, path): | ||
| 99 | """Test whether a path exists. Returns False for broken symbolic links""" | ||
| 100 | if self.callstat(path): | ||
| 101 | return True | ||
| 102 | return False | ||
| 103 | |||
| 104 | def lexists(self, path): | ||
| 105 | """Test whether a path exists. Returns True for broken symbolic links""" | ||
| 106 | if self.calllstat(path): | ||
| 107 | return True | ||
| 108 | return False | ||
| 109 | |||
| 110 | def stat(self, path): | ||
| 111 | return self.callstat(path) | ||
| 112 | |||
| 113 | def lstat(self, path): | ||
| 114 | return self.calllstat(path) | ||
| 115 | |||
| 116 | def walk(self, top, topdown=True, onerror=None, followlinks=False): | ||
| 117 | # Matches os.walk, not os.path.walk() | ||
| 118 | |||
| 119 | # We may not have read permission for top, in which case we can't | ||
| 120 | # get a list of the files the directory contains. os.path.walk | ||
| 121 | # always suppressed the exception then, rather than blow up for a | ||
| 122 | # minor reason when (say) a thousand readable directories are still | ||
| 123 | # left to visit. That logic is copied here. | ||
| 124 | try: | ||
| 125 | names = os.listdir(top) | ||
| 126 | except os.error as err: | ||
| 127 | if onerror is not None: | ||
| 128 | onerror(err) | ||
| 129 | return | ||
| 130 | |||
| 131 | dirs, nondirs = [], [] | ||
| 132 | for name in names: | ||
| 133 | if self.isdir(os.path.join(top, name)): | ||
| 134 | dirs.append(name) | ||
| 135 | else: | ||
| 136 | nondirs.append(name) | ||
| 137 | |||
| 138 | if topdown: | ||
| 139 | yield top, dirs, nondirs | ||
| 140 | for name in dirs: | ||
| 141 | new_path = os.path.join(top, name) | ||
| 142 | if followlinks or not self.islink(new_path): | ||
| 143 | for x in self.walk(new_path, topdown, onerror, followlinks): | ||
| 144 | yield x | ||
| 145 | if not topdown: | ||
| 146 | yield top, dirs, nondirs | ||
| 147 | |||
| 148 | ## realpath() related functions | ||
| 149 | def __is_path_below(self, file, root): | ||
| 150 | return (file + os.path.sep).startswith(root) | ||
| 151 | |||
| 152 | def __realpath_rel(self, start, rel_path, root, loop_cnt, assume_dir): | ||
| 153 | """Calculates real path of symlink 'start' + 'rel_path' below | ||
| 154 | 'root'; no part of 'start' below 'root' must contain symlinks. """ | ||
| 155 | have_dir = True | ||
| 156 | |||
| 157 | for d in rel_path.split(os.path.sep): | ||
| 158 | if not have_dir and not assume_dir: | ||
| 159 | raise OSError(errno.ENOENT, "no such directory %s" % start) | ||
| 160 | |||
| 161 | if d == os.path.pardir: # '..' | ||
| 162 | if len(start) >= len(root): | ||
| 163 | # do not follow '..' before root | ||
| 164 | start = os.path.dirname(start) | ||
| 165 | else: | ||
| 166 | # emit warning? | ||
| 167 | pass | ||
| 168 | else: | ||
| 169 | (start, have_dir) = self.__realpath(os.path.join(start, d), | ||
| 170 | root, loop_cnt, assume_dir) | ||
| 171 | |||
| 172 | assert(self.__is_path_below(start, root)) | ||
| 173 | |||
| 174 | return start | ||
| 175 | |||
| 176 | def __realpath(self, file, root, loop_cnt, assume_dir): | ||
| 177 | while self.islink(file) and len(file) >= len(root): | ||
| 178 | if loop_cnt == 0: | ||
| 179 | raise OSError(errno.ELOOP, file) | ||
| 180 | |||
| 181 | loop_cnt -= 1 | ||
| 182 | target = os.path.normpath(os.readlink(file)) | ||
| 183 | |||
| 184 | if not os.path.isabs(target): | ||
| 185 | tdir = os.path.dirname(file) | ||
| 186 | assert(self.__is_path_below(tdir, root)) | ||
| 187 | else: | ||
| 188 | tdir = root | ||
| 189 | |||
| 190 | file = self.__realpath_rel(tdir, target, root, loop_cnt, assume_dir) | ||
| 191 | |||
| 192 | try: | ||
| 193 | is_dir = self.isdir(file) | ||
| 194 | except: | ||
| 195 | is_dir = False | ||
| 196 | |||
| 197 | return (file, is_dir) | ||
| 198 | |||
| 199 | def realpath(self, file, root, use_physdir = True, loop_cnt = 100, assume_dir = False): | ||
| 200 | """ Returns the canonical path of 'file' with assuming a | ||
| 201 | toplevel 'root' directory. When 'use_physdir' is set, all | ||
| 202 | preceding path components of 'file' will be resolved first; | ||
| 203 | this flag should be set unless it is guaranteed that there is | ||
| 204 | no symlink in the path. When 'assume_dir' is not set, missing | ||
| 205 | path components will raise an ENOENT error""" | ||
| 206 | |||
| 207 | root = os.path.normpath(root) | ||
| 208 | file = os.path.normpath(file) | ||
| 209 | |||
| 210 | if not root.endswith(os.path.sep): | ||
| 211 | # letting root end with '/' makes some things easier | ||
| 212 | root = root + os.path.sep | ||
| 213 | |||
| 214 | if not self.__is_path_below(file, root): | ||
| 215 | raise OSError(errno.EINVAL, "file '%s' is not below root" % file) | ||
| 216 | |||
| 217 | try: | ||
| 218 | if use_physdir: | ||
| 219 | file = self.__realpath_rel(root, file[(len(root) - 1):], root, loop_cnt, assume_dir) | ||
| 220 | else: | ||
| 221 | file = self.__realpath(file, root, loop_cnt, assume_dir)[0] | ||
| 222 | except OSError as e: | ||
| 223 | if e.errno == errno.ELOOP: | ||
| 224 | # make ELOOP more readable; without catching it, there will | ||
| 225 | # be printed a backtrace with 100s of OSError exceptions | ||
| 226 | # else | ||
| 227 | raise OSError(errno.ELOOP, | ||
| 228 | "too much recursions while resolving '%s'; loop in '%s'" % | ||
| 229 | (file, e.strerror)) | ||
| 230 | |||
| 231 | raise | ||
| 232 | |||
| 233 | return file | ||
diff --git a/meta/lib/oe/classextend.py b/meta/lib/oe/classextend.py new file mode 100644 index 0000000000..e2ae7e9f94 --- /dev/null +++ b/meta/lib/oe/classextend.py | |||
| @@ -0,0 +1,104 @@ | |||
| 1 | class ClassExtender(object): | ||
| 2 | def __init__(self, extname, d): | ||
| 3 | self.extname = extname | ||
| 4 | self.d = d | ||
| 5 | self.pkgs_mapping = [] | ||
| 6 | |||
| 7 | def extend_name(self, name): | ||
| 8 | if name.startswith("kernel-") or name == "virtual/kernel": | ||
| 9 | return name | ||
| 10 | if name.startswith("rtld"): | ||
| 11 | return name | ||
| 12 | if name.endswith("-" + self.extname): | ||
| 13 | name = name.replace("-" + self.extname, "") | ||
| 14 | if name.startswith("virtual/"): | ||
| 15 | subs = name.split("/", 1)[1] | ||
| 16 | if not subs.startswith(self.extname): | ||
| 17 | return "virtual/" + self.extname + "-" + subs | ||
| 18 | return name | ||
| 19 | if not name.startswith(self.extname): | ||
| 20 | return self.extname + "-" + name | ||
| 21 | return name | ||
| 22 | |||
| 23 | def map_variable(self, varname, setvar = True): | ||
| 24 | var = self.d.getVar(varname, True) | ||
| 25 | if not var: | ||
| 26 | return "" | ||
| 27 | var = var.split() | ||
| 28 | newvar = [] | ||
| 29 | for v in var: | ||
| 30 | newvar.append(self.extend_name(v)) | ||
| 31 | newdata = " ".join(newvar) | ||
| 32 | if setvar: | ||
| 33 | self.d.setVar(varname, newdata) | ||
| 34 | return newdata | ||
| 35 | |||
| 36 | def map_regexp_variable(self, varname, setvar = True): | ||
| 37 | var = self.d.getVar(varname, True) | ||
| 38 | if not var: | ||
| 39 | return "" | ||
| 40 | var = var.split() | ||
| 41 | newvar = [] | ||
| 42 | for v in var: | ||
| 43 | if v.startswith("^" + self.extname): | ||
| 44 | newvar.append(v) | ||
| 45 | elif v.startswith("^"): | ||
| 46 | newvar.append("^" + self.extname + "-" + v[1:]) | ||
| 47 | else: | ||
| 48 | newvar.append(self.extend_name(v)) | ||
| 49 | newdata = " ".join(newvar) | ||
| 50 | if setvar: | ||
| 51 | self.d.setVar(varname, newdata) | ||
| 52 | return newdata | ||
| 53 | |||
| 54 | def map_depends(self, dep): | ||
| 55 | if dep.endswith(("-native", "-native-runtime", "-crosssdk")) or ('nativesdk-' in dep) or ('cross-canadian' in dep): | ||
| 56 | return dep | ||
| 57 | else: | ||
| 58 | return self.extend_name(dep) | ||
| 59 | |||
| 60 | def map_depends_variable(self, varname, suffix = ""): | ||
| 61 | if suffix: | ||
| 62 | varname = varname + "_" + suffix | ||
| 63 | deps = self.d.getVar(varname, True) | ||
| 64 | if not deps: | ||
| 65 | return | ||
| 66 | deps = bb.utils.explode_dep_versions2(deps) | ||
| 67 | newdeps = {} | ||
| 68 | for dep in deps: | ||
| 69 | newdeps[self.map_depends(dep)] = deps[dep] | ||
| 70 | |||
| 71 | self.d.setVar(varname, bb.utils.join_deps(newdeps, False)) | ||
| 72 | |||
| 73 | def map_packagevars(self): | ||
| 74 | for pkg in (self.d.getVar("PACKAGES", True).split() + [""]): | ||
| 75 | self.map_depends_variable("RDEPENDS", pkg) | ||
| 76 | self.map_depends_variable("RRECOMMENDS", pkg) | ||
| 77 | self.map_depends_variable("RSUGGESTS", pkg) | ||
| 78 | self.map_depends_variable("RPROVIDES", pkg) | ||
| 79 | self.map_depends_variable("RREPLACES", pkg) | ||
| 80 | self.map_depends_variable("RCONFLICTS", pkg) | ||
| 81 | self.map_depends_variable("PKG", pkg) | ||
| 82 | |||
| 83 | def rename_packages(self): | ||
| 84 | for pkg in (self.d.getVar("PACKAGES", True) or "").split(): | ||
| 85 | if pkg.startswith(self.extname): | ||
| 86 | self.pkgs_mapping.append([pkg.split(self.extname + "-")[1], pkg]) | ||
| 87 | continue | ||
| 88 | self.pkgs_mapping.append([pkg, self.extend_name(pkg)]) | ||
| 89 | |||
| 90 | self.d.setVar("PACKAGES", " ".join([row[1] for row in self.pkgs_mapping])) | ||
| 91 | |||
| 92 | def rename_package_variables(self, variables): | ||
| 93 | for pkg_mapping in self.pkgs_mapping: | ||
| 94 | for subs in variables: | ||
| 95 | self.d.renameVar("%s_%s" % (subs, pkg_mapping[0]), "%s_%s" % (subs, pkg_mapping[1])) | ||
| 96 | |||
| 97 | class NativesdkClassExtender(ClassExtender): | ||
| 98 | def map_depends(self, dep): | ||
| 99 | if dep.endswith(("-native", "-native-runtime", "-cross", "-crosssdk")) or ('nativesdk-' in dep): | ||
| 100 | return dep | ||
| 101 | elif dep.endswith(("-gcc-intermediate", "-gcc-initial", "-gcc", "-g++")): | ||
| 102 | return dep + "-crosssdk" | ||
| 103 | else: | ||
| 104 | return self.extend_name(dep) | ||
diff --git a/meta/lib/oe/classutils.py b/meta/lib/oe/classutils.py new file mode 100644 index 0000000000..58188fdd6e --- /dev/null +++ b/meta/lib/oe/classutils.py | |||
| @@ -0,0 +1,43 @@ | |||
| 1 | class ClassRegistry(type): | ||
| 2 | """Maintain a registry of classes, indexed by name. | ||
| 3 | |||
| 4 | Note that this implementation requires that the names be unique, as it uses | ||
| 5 | a dictionary to hold the classes by name. | ||
| 6 | |||
| 7 | The name in the registry can be overridden via the 'name' attribute of the | ||
| 8 | class, and the 'priority' attribute controls priority. The prioritized() | ||
| 9 | method returns the registered classes in priority order. | ||
| 10 | |||
| 11 | Subclasses of ClassRegistry may define an 'implemented' property to exert | ||
| 12 | control over whether the class will be added to the registry (e.g. to keep | ||
| 13 | abstract base classes out of the registry).""" | ||
| 14 | priority = 0 | ||
| 15 | class __metaclass__(type): | ||
| 16 | """Give each ClassRegistry their own registry""" | ||
| 17 | def __init__(cls, name, bases, attrs): | ||
| 18 | cls.registry = {} | ||
| 19 | type.__init__(cls, name, bases, attrs) | ||
| 20 | |||
| 21 | def __init__(cls, name, bases, attrs): | ||
| 22 | super(ClassRegistry, cls).__init__(name, bases, attrs) | ||
| 23 | try: | ||
| 24 | if not cls.implemented: | ||
| 25 | return | ||
| 26 | except AttributeError: | ||
| 27 | pass | ||
| 28 | |||
| 29 | try: | ||
| 30 | cls.name | ||
| 31 | except AttributeError: | ||
| 32 | cls.name = name | ||
| 33 | cls.registry[cls.name] = cls | ||
| 34 | |||
| 35 | @classmethod | ||
| 36 | def prioritized(tcls): | ||
| 37 | return sorted(tcls.registry.values(), | ||
| 38 | key=lambda v: v.priority, reverse=True) | ||
| 39 | |||
| 40 | def unregister(cls): | ||
| 41 | for key in cls.registry.keys(): | ||
| 42 | if cls.registry[key] is cls: | ||
| 43 | del cls.registry[key] | ||
diff --git a/meta/lib/oe/data.py b/meta/lib/oe/data.py new file mode 100644 index 0000000000..4cc0e02968 --- /dev/null +++ b/meta/lib/oe/data.py | |||
| @@ -0,0 +1,17 @@ | |||
| 1 | import oe.maketype | ||
| 2 | |||
| 3 | def typed_value(key, d): | ||
| 4 | """Construct a value for the specified metadata variable, using its flags | ||
| 5 | to determine the type and parameters for construction.""" | ||
| 6 | var_type = d.getVarFlag(key, 'type') | ||
| 7 | flags = d.getVarFlags(key) | ||
| 8 | if flags is not None: | ||
| 9 | flags = dict((flag, d.expand(value)) | ||
| 10 | for flag, value in flags.iteritems()) | ||
| 11 | else: | ||
| 12 | flags = {} | ||
| 13 | |||
| 14 | try: | ||
| 15 | return oe.maketype.create(d.getVar(key, True) or '', var_type, **flags) | ||
| 16 | except (TypeError, ValueError), exc: | ||
| 17 | bb.msg.fatal("Data", "%s: %s" % (key, str(exc))) | ||
diff --git a/meta/lib/oe/distro_check.py b/meta/lib/oe/distro_check.py new file mode 100644 index 0000000000..8ed5b0ec80 --- /dev/null +++ b/meta/lib/oe/distro_check.py | |||
| @@ -0,0 +1,383 @@ | |||
| 1 | def get_links_from_url(url): | ||
| 2 | "Return all the href links found on the web location" | ||
| 3 | |||
| 4 | import urllib, sgmllib | ||
| 5 | |||
| 6 | class LinksParser(sgmllib.SGMLParser): | ||
| 7 | def parse(self, s): | ||
| 8 | "Parse the given string 's'." | ||
| 9 | self.feed(s) | ||
| 10 | self.close() | ||
| 11 | |||
| 12 | def __init__(self, verbose=0): | ||
| 13 | "Initialise an object passing 'verbose' to the superclass." | ||
| 14 | sgmllib.SGMLParser.__init__(self, verbose) | ||
| 15 | self.hyperlinks = [] | ||
| 16 | |||
| 17 | def start_a(self, attributes): | ||
| 18 | "Process a hyperlink and its 'attributes'." | ||
| 19 | for name, value in attributes: | ||
| 20 | if name == "href": | ||
| 21 | self.hyperlinks.append(value.strip('/')) | ||
| 22 | |||
| 23 | def get_hyperlinks(self): | ||
| 24 | "Return the list of hyperlinks." | ||
| 25 | return self.hyperlinks | ||
| 26 | |||
| 27 | sock = urllib.urlopen(url) | ||
| 28 | webpage = sock.read() | ||
| 29 | sock.close() | ||
| 30 | |||
| 31 | linksparser = LinksParser() | ||
| 32 | linksparser.parse(webpage) | ||
| 33 | return linksparser.get_hyperlinks() | ||
| 34 | |||
| 35 | def find_latest_numeric_release(url): | ||
| 36 | "Find the latest listed numeric release on the given url" | ||
| 37 | max=0 | ||
| 38 | maxstr="" | ||
| 39 | for link in get_links_from_url(url): | ||
| 40 | try: | ||
| 41 | release = float(link) | ||
| 42 | except: | ||
| 43 | release = 0 | ||
| 44 | if release > max: | ||
| 45 | max = release | ||
| 46 | maxstr = link | ||
| 47 | return maxstr | ||
| 48 | |||
| 49 | def is_src_rpm(name): | ||
| 50 | "Check if the link is pointing to a src.rpm file" | ||
| 51 | if name[-8:] == ".src.rpm": | ||
| 52 | return True | ||
| 53 | else: | ||
| 54 | return False | ||
| 55 | |||
| 56 | def package_name_from_srpm(srpm): | ||
| 57 | "Strip out the package name from the src.rpm filename" | ||
| 58 | strings = srpm.split('-') | ||
| 59 | package_name = strings[0] | ||
| 60 | for i in range(1, len (strings) - 1): | ||
| 61 | str = strings[i] | ||
| 62 | if not str[0].isdigit(): | ||
| 63 | package_name += '-' + str | ||
| 64 | return package_name | ||
| 65 | |||
| 66 | def clean_package_list(package_list): | ||
| 67 | "Removes multiple entries of packages and sorts the list" | ||
| 68 | set = {} | ||
| 69 | map(set.__setitem__, package_list, []) | ||
| 70 | return set.keys() | ||
| 71 | |||
| 72 | |||
| 73 | def get_latest_released_meego_source_package_list(): | ||
| 74 | "Returns list of all the name os packages in the latest meego distro" | ||
| 75 | |||
| 76 | package_names = [] | ||
| 77 | try: | ||
| 78 | f = open("/tmp/Meego-1.1", "r") | ||
| 79 | for line in f: | ||
| 80 | package_names.append(line[:-1] + ":" + "main") # Also strip the '\n' at the end | ||
| 81 | except IOError: pass | ||
| 82 | package_list=clean_package_list(package_names) | ||
| 83 | return "1.0", package_list | ||
| 84 | |||
| 85 | def get_source_package_list_from_url(url, section): | ||
| 86 | "Return a sectioned list of package names from a URL list" | ||
| 87 | |||
| 88 | bb.note("Reading %s: %s" % (url, section)) | ||
| 89 | links = get_links_from_url(url) | ||
| 90 | srpms = filter(is_src_rpm, links) | ||
| 91 | names_list = map(package_name_from_srpm, srpms) | ||
| 92 | |||
| 93 | new_pkgs = [] | ||
| 94 | for pkgs in names_list: | ||
| 95 | new_pkgs.append(pkgs + ":" + section) | ||
| 96 | |||
| 97 | return new_pkgs | ||
| 98 | |||
| 99 | def get_latest_released_fedora_source_package_list(): | ||
| 100 | "Returns list of all the name os packages in the latest fedora distro" | ||
| 101 | latest = find_latest_numeric_release("http://archive.fedoraproject.org/pub/fedora/linux/releases/") | ||
| 102 | |||
| 103 | package_names = get_source_package_list_from_url("http://archive.fedoraproject.org/pub/fedora/linux/releases/%s/Fedora/source/SRPMS/" % latest, "main") | ||
| 104 | |||
| 105 | # package_names += get_source_package_list_from_url("http://download.fedora.redhat.com/pub/fedora/linux/releases/%s/Everything/source/SPRMS/" % latest, "everything") | ||
| 106 | package_names += get_source_package_list_from_url("http://archive.fedoraproject.org/pub/fedora/linux/updates/%s/SRPMS/" % latest, "updates") | ||
| 107 | |||
| 108 | package_list=clean_package_list(package_names) | ||
| 109 | |||
| 110 | return latest, package_list | ||
| 111 | |||
| 112 | def get_latest_released_opensuse_source_package_list(): | ||
| 113 | "Returns list of all the name os packages in the latest opensuse distro" | ||
| 114 | latest = find_latest_numeric_release("http://download.opensuse.org/source/distribution/") | ||
| 115 | |||
| 116 | package_names = get_source_package_list_from_url("http://download.opensuse.org/source/distribution/%s/repo/oss/suse/src/" % latest, "main") | ||
| 117 | package_names += get_source_package_list_from_url("http://download.opensuse.org/update/%s/rpm/src/" % latest, "updates") | ||
| 118 | |||
| 119 | package_list=clean_package_list(package_names) | ||
| 120 | return latest, package_list | ||
| 121 | |||
| 122 | def get_latest_released_mandriva_source_package_list(): | ||
| 123 | "Returns list of all the name os packages in the latest mandriva distro" | ||
| 124 | latest = find_latest_numeric_release("http://distrib-coffee.ipsl.jussieu.fr/pub/linux/MandrivaLinux/official/") | ||
| 125 | package_names = get_source_package_list_from_url("http://distrib-coffee.ipsl.jussieu.fr/pub/linux/MandrivaLinux/official/%s/SRPMS/main/release/" % latest, "main") | ||
| 126 | # package_names += get_source_package_list_from_url("http://distrib-coffee.ipsl.jussieu.fr/pub/linux/MandrivaLinux/official/%s/SRPMS/contrib/release/" % latest, "contrib") | ||
| 127 | package_names += get_source_package_list_from_url("http://distrib-coffee.ipsl.jussieu.fr/pub/linux/MandrivaLinux/official/%s/SRPMS/main/updates/" % latest, "updates") | ||
| 128 | |||
| 129 | package_list=clean_package_list(package_names) | ||
| 130 | return latest, package_list | ||
| 131 | |||
| 132 | def find_latest_debian_release(url): | ||
| 133 | "Find the latest listed debian release on the given url" | ||
| 134 | |||
| 135 | releases = [] | ||
| 136 | for link in get_links_from_url(url): | ||
| 137 | if link[:6] == "Debian": | ||
| 138 | if ';' not in link: | ||
| 139 | releases.append(link) | ||
| 140 | releases.sort() | ||
| 141 | try: | ||
| 142 | return releases.pop()[6:] | ||
| 143 | except: | ||
| 144 | return "_NotFound_" | ||
| 145 | |||
| 146 | def get_debian_style_source_package_list(url, section): | ||
| 147 | "Return the list of package-names stored in the debian style Sources.gz file" | ||
| 148 | import urllib | ||
| 149 | sock = urllib.urlopen(url) | ||
| 150 | import tempfile | ||
| 151 | tmpfile = tempfile.NamedTemporaryFile(mode='wb', prefix='oecore.', suffix='.tmp', delete=False) | ||
| 152 | tmpfilename=tmpfile.name | ||
| 153 | tmpfile.write(sock.read()) | ||
| 154 | sock.close() | ||
| 155 | tmpfile.close() | ||
| 156 | import gzip | ||
| 157 | bb.note("Reading %s: %s" % (url, section)) | ||
| 158 | |||
| 159 | f = gzip.open(tmpfilename) | ||
| 160 | package_names = [] | ||
| 161 | for line in f: | ||
| 162 | if line[:9] == "Package: ": | ||
| 163 | package_names.append(line[9:-1] + ":" + section) # Also strip the '\n' at the end | ||
| 164 | os.unlink(tmpfilename) | ||
| 165 | |||
| 166 | return package_names | ||
| 167 | |||
| 168 | def get_latest_released_debian_source_package_list(): | ||
| 169 | "Returns list of all the name os packages in the latest debian distro" | ||
| 170 | latest = find_latest_debian_release("http://ftp.debian.org/debian/dists/") | ||
| 171 | url = "http://ftp.debian.org/debian/dists/stable/main/source/Sources.gz" | ||
| 172 | package_names = get_debian_style_source_package_list(url, "main") | ||
| 173 | # url = "http://ftp.debian.org/debian/dists/stable/contrib/source/Sources.gz" | ||
| 174 | # package_names += get_debian_style_source_package_list(url, "contrib") | ||
| 175 | url = "http://ftp.debian.org/debian/dists/stable-proposed-updates/main/source/Sources.gz" | ||
| 176 | package_names += get_debian_style_source_package_list(url, "updates") | ||
| 177 | package_list=clean_package_list(package_names) | ||
| 178 | return latest, package_list | ||
| 179 | |||
| 180 | def find_latest_ubuntu_release(url): | ||
| 181 | "Find the latest listed ubuntu release on the given url" | ||
| 182 | url += "?C=M;O=D" # Descending Sort by Last Modified | ||
| 183 | for link in get_links_from_url(url): | ||
| 184 | if link[-8:] == "-updates": | ||
| 185 | return link[:-8] | ||
| 186 | return "_NotFound_" | ||
| 187 | |||
| 188 | def get_latest_released_ubuntu_source_package_list(): | ||
| 189 | "Returns list of all the name os packages in the latest ubuntu distro" | ||
| 190 | latest = find_latest_ubuntu_release("http://archive.ubuntu.com/ubuntu/dists/") | ||
| 191 | url = "http://archive.ubuntu.com/ubuntu/dists/%s/main/source/Sources.gz" % latest | ||
| 192 | package_names = get_debian_style_source_package_list(url, "main") | ||
| 193 | # url = "http://archive.ubuntu.com/ubuntu/dists/%s/multiverse/source/Sources.gz" % latest | ||
| 194 | # package_names += get_debian_style_source_package_list(url, "multiverse") | ||
| 195 | # url = "http://archive.ubuntu.com/ubuntu/dists/%s/universe/source/Sources.gz" % latest | ||
| 196 | # package_names += get_debian_style_source_package_list(url, "universe") | ||
| 197 | url = "http://archive.ubuntu.com/ubuntu/dists/%s-updates/main/source/Sources.gz" % latest | ||
| 198 | package_names += get_debian_style_source_package_list(url, "updates") | ||
| 199 | package_list=clean_package_list(package_names) | ||
| 200 | return latest, package_list | ||
| 201 | |||
| 202 | def create_distro_packages_list(distro_check_dir): | ||
| 203 | pkglst_dir = os.path.join(distro_check_dir, "package_lists") | ||
| 204 | if not os.path.isdir (pkglst_dir): | ||
| 205 | os.makedirs(pkglst_dir) | ||
| 206 | # first clear old stuff | ||
| 207 | for file in os.listdir(pkglst_dir): | ||
| 208 | os.unlink(os.path.join(pkglst_dir, file)) | ||
| 209 | |||
| 210 | per_distro_functions = [ | ||
| 211 | ["Debian", get_latest_released_debian_source_package_list], | ||
| 212 | ["Ubuntu", get_latest_released_ubuntu_source_package_list], | ||
| 213 | ["Fedora", get_latest_released_fedora_source_package_list], | ||
| 214 | ["OpenSuSE", get_latest_released_opensuse_source_package_list], | ||
| 215 | ["Mandriva", get_latest_released_mandriva_source_package_list], | ||
| 216 | ["Meego", get_latest_released_meego_source_package_list] | ||
| 217 | ] | ||
| 218 | |||
| 219 | from datetime import datetime | ||
| 220 | begin = datetime.now() | ||
| 221 | for distro in per_distro_functions: | ||
| 222 | name = distro[0] | ||
| 223 | release, package_list = distro[1]() | ||
| 224 | bb.note("Distro: %s, Latest Release: %s, # src packages: %d" % (name, release, len(package_list))) | ||
| 225 | package_list_file = os.path.join(pkglst_dir, name + "-" + release) | ||
| 226 | f = open(package_list_file, "w+b") | ||
| 227 | for pkg in package_list: | ||
| 228 | f.write(pkg + "\n") | ||
| 229 | f.close() | ||
| 230 | end = datetime.now() | ||
| 231 | delta = end - begin | ||
| 232 | bb.note("package_list generatiosn took this much time: %d seconds" % delta.seconds) | ||
| 233 | |||
| 234 | def update_distro_data(distro_check_dir, datetime): | ||
| 235 | """ | ||
| 236 | If distro packages list data is old then rebuild it. | ||
| 237 | The operations has to be protected by a lock so that | ||
| 238 | only one thread performes it at a time. | ||
| 239 | """ | ||
| 240 | if not os.path.isdir (distro_check_dir): | ||
| 241 | try: | ||
| 242 | bb.note ("Making new directory: %s" % distro_check_dir) | ||
| 243 | os.makedirs (distro_check_dir) | ||
| 244 | except OSError: | ||
| 245 | raise Exception('Unable to create directory %s' % (distro_check_dir)) | ||
| 246 | |||
| 247 | |||
| 248 | datetime_file = os.path.join(distro_check_dir, "build_datetime") | ||
| 249 | saved_datetime = "_invalid_" | ||
| 250 | import fcntl | ||
| 251 | try: | ||
| 252 | if not os.path.exists(datetime_file): | ||
| 253 | open(datetime_file, 'w+b').close() # touch the file so that the next open won't fail | ||
| 254 | |||
| 255 | f = open(datetime_file, "r+b") | ||
| 256 | fcntl.lockf(f, fcntl.LOCK_EX) | ||
| 257 | saved_datetime = f.read() | ||
| 258 | if saved_datetime[0:8] != datetime[0:8]: | ||
| 259 | bb.note("The build datetime did not match: saved:%s current:%s" % (saved_datetime, datetime)) | ||
| 260 | bb.note("Regenerating distro package lists") | ||
| 261 | create_distro_packages_list(distro_check_dir) | ||
| 262 | f.seek(0) | ||
| 263 | f.write(datetime) | ||
| 264 | |||
| 265 | except OSError: | ||
| 266 | raise Exception('Unable to read/write this file: %s' % (datetime_file)) | ||
| 267 | finally: | ||
| 268 | fcntl.lockf(f, fcntl.LOCK_UN) | ||
| 269 | f.close() | ||
| 270 | |||
| 271 | def compare_in_distro_packages_list(distro_check_dir, d): | ||
| 272 | if not os.path.isdir(distro_check_dir): | ||
| 273 | raise Exception("compare_in_distro_packages_list: invalid distro_check_dir passed") | ||
| 274 | |||
| 275 | localdata = bb.data.createCopy(d) | ||
| 276 | pkglst_dir = os.path.join(distro_check_dir, "package_lists") | ||
| 277 | matching_distros = [] | ||
| 278 | pn = d.getVar('PN', True) | ||
| 279 | recipe_name = d.getVar('PN', True) | ||
| 280 | bb.note("Checking: %s" % pn) | ||
| 281 | |||
| 282 | trim_dict = dict({"-native":"-native", "-cross":"-cross", "-initial":"-initial"}) | ||
| 283 | |||
| 284 | if pn.find("-native") != -1: | ||
| 285 | pnstripped = pn.split("-native") | ||
| 286 | localdata.setVar('OVERRIDES', "pn-" + pnstripped[0] + ":" + d.getVar('OVERRIDES', True)) | ||
| 287 | bb.data.update_data(localdata) | ||
| 288 | recipe_name = pnstripped[0] | ||
| 289 | |||
| 290 | if pn.startswith("nativesdk-"): | ||
| 291 | pnstripped = pn.split("nativesdk-") | ||
| 292 | localdata.setVar('OVERRIDES', "pn-" + pnstripped[1] + ":" + d.getVar('OVERRIDES', True)) | ||
| 293 | bb.data.update_data(localdata) | ||
| 294 | recipe_name = pnstripped[1] | ||
| 295 | |||
| 296 | if pn.find("-cross") != -1: | ||
| 297 | pnstripped = pn.split("-cross") | ||
| 298 | localdata.setVar('OVERRIDES', "pn-" + pnstripped[0] + ":" + d.getVar('OVERRIDES', True)) | ||
| 299 | bb.data.update_data(localdata) | ||
| 300 | recipe_name = pnstripped[0] | ||
| 301 | |||
| 302 | if pn.find("-initial") != -1: | ||
| 303 | pnstripped = pn.split("-initial") | ||
| 304 | localdata.setVar('OVERRIDES', "pn-" + pnstripped[0] + ":" + d.getVar('OVERRIDES', True)) | ||
| 305 | bb.data.update_data(localdata) | ||
| 306 | recipe_name = pnstripped[0] | ||
| 307 | |||
| 308 | bb.note("Recipe: %s" % recipe_name) | ||
| 309 | tmp = localdata.getVar('DISTRO_PN_ALIAS', True) | ||
| 310 | |||
| 311 | distro_exceptions = dict({"OE-Core":'OE-Core', "OpenedHand":'OpenedHand', "Intel":'Intel', "Upstream":'Upstream', "Windriver":'Windriver', "OSPDT":'OSPDT Approved', "Poky":'poky'}) | ||
| 312 | |||
| 313 | if tmp: | ||
| 314 | list = tmp.split(' ') | ||
| 315 | for str in list: | ||
| 316 | if str and str.find("=") == -1 and distro_exceptions[str]: | ||
| 317 | matching_distros.append(str) | ||
| 318 | |||
| 319 | distro_pn_aliases = {} | ||
| 320 | if tmp: | ||
| 321 | list = tmp.split(' ') | ||
| 322 | for str in list: | ||
| 323 | if str.find("=") != -1: | ||
| 324 | (dist, pn_alias) = str.split('=') | ||
| 325 | distro_pn_aliases[dist.strip().lower()] = pn_alias.strip() | ||
| 326 | |||
| 327 | for file in os.listdir(pkglst_dir): | ||
| 328 | (distro, distro_release) = file.split("-") | ||
| 329 | f = open(os.path.join(pkglst_dir, file), "rb") | ||
| 330 | for line in f: | ||
| 331 | (pkg, section) = line.split(":") | ||
| 332 | if distro.lower() in distro_pn_aliases: | ||
| 333 | pn = distro_pn_aliases[distro.lower()] | ||
| 334 | else: | ||
| 335 | pn = recipe_name | ||
| 336 | if pn == pkg: | ||
| 337 | matching_distros.append(distro + "-" + section[:-1]) # strip the \n at the end | ||
| 338 | f.close() | ||
| 339 | break | ||
| 340 | f.close() | ||
| 341 | |||
| 342 | |||
| 343 | if tmp != None: | ||
| 344 | list = tmp.split(' ') | ||
| 345 | for item in list: | ||
| 346 | matching_distros.append(item) | ||
| 347 | bb.note("Matching: %s" % matching_distros) | ||
| 348 | return matching_distros | ||
| 349 | |||
| 350 | def create_log_file(d, logname): | ||
| 351 | import subprocess | ||
| 352 | logpath = d.getVar('LOG_DIR', True) | ||
| 353 | bb.utils.mkdirhier(logpath) | ||
| 354 | logfn, logsuffix = os.path.splitext(logname) | ||
| 355 | logfile = os.path.join(logpath, "%s.%s%s" % (logfn, d.getVar('DATETIME', True), logsuffix)) | ||
| 356 | if not os.path.exists(logfile): | ||
| 357 | slogfile = os.path.join(logpath, logname) | ||
| 358 | if os.path.exists(slogfile): | ||
| 359 | os.remove(slogfile) | ||
| 360 | subprocess.call("touch %s" % logfile, shell=True) | ||
| 361 | os.symlink(logfile, slogfile) | ||
| 362 | d.setVar('LOG_FILE', logfile) | ||
| 363 | return logfile | ||
| 364 | |||
| 365 | |||
| 366 | def save_distro_check_result(result, datetime, result_file, d): | ||
| 367 | pn = d.getVar('PN', True) | ||
| 368 | logdir = d.getVar('LOG_DIR', True) | ||
| 369 | if not logdir: | ||
| 370 | bb.error("LOG_DIR variable is not defined, can't write the distro_check results") | ||
| 371 | return | ||
| 372 | if not os.path.isdir(logdir): | ||
| 373 | os.makedirs(logdir) | ||
| 374 | line = pn | ||
| 375 | for i in result: | ||
| 376 | line = line + "," + i | ||
| 377 | f = open(result_file, "a") | ||
| 378 | import fcntl | ||
| 379 | fcntl.lockf(f, fcntl.LOCK_EX) | ||
| 380 | f.seek(0, os.SEEK_END) # seek to the end of file | ||
| 381 | f.write(line + "\n") | ||
| 382 | fcntl.lockf(f, fcntl.LOCK_UN) | ||
| 383 | f.close() | ||
diff --git a/meta/lib/oe/image.py b/meta/lib/oe/image.py new file mode 100644 index 0000000000..c9b9033132 --- /dev/null +++ b/meta/lib/oe/image.py | |||
| @@ -0,0 +1,337 @@ | |||
| 1 | from oe.utils import execute_pre_post_process | ||
| 2 | import os | ||
| 3 | import subprocess | ||
| 4 | import multiprocessing | ||
| 5 | |||
| 6 | |||
| 7 | def generate_image(arg): | ||
| 8 | (type, subimages, create_img_cmd) = arg | ||
| 9 | |||
| 10 | bb.note("Running image creation script for %s: %s ..." % | ||
| 11 | (type, create_img_cmd)) | ||
| 12 | |||
| 13 | try: | ||
| 14 | subprocess.check_output(create_img_cmd, stderr=subprocess.STDOUT) | ||
| 15 | except subprocess.CalledProcessError as e: | ||
| 16 | return("Error: The image creation script '%s' returned %d:\n%s" % | ||
| 17 | (e.cmd, e.returncode, e.output)) | ||
| 18 | |||
| 19 | return None | ||
| 20 | |||
| 21 | |||
| 22 | """ | ||
| 23 | This class will help compute IMAGE_FSTYPE dependencies and group them in batches | ||
| 24 | that can be executed in parallel. | ||
| 25 | |||
| 26 | The next example is for illustration purposes, highly unlikely to happen in real life. | ||
| 27 | It's just one of the test cases I used to test the algorithm: | ||
| 28 | |||
| 29 | For: | ||
| 30 | IMAGE_FSTYPES = "i1 i2 i3 i4 i5" | ||
| 31 | IMAGE_TYPEDEP_i4 = "i2" | ||
| 32 | IMAGE_TYPEDEP_i5 = "i6 i4" | ||
| 33 | IMAGE_TYPEDEP_i6 = "i7" | ||
| 34 | IMAGE_TYPEDEP_i7 = "i2" | ||
| 35 | |||
| 36 | We get the following list of batches that can be executed in parallel, having the | ||
| 37 | dependencies satisfied: | ||
| 38 | |||
| 39 | [['i1', 'i3', 'i2'], ['i4', 'i7'], ['i6'], ['i5']] | ||
| 40 | """ | ||
| 41 | class ImageDepGraph(object): | ||
| 42 | def __init__(self, d): | ||
| 43 | self.d = d | ||
| 44 | self.graph = dict() | ||
| 45 | self.deps_array = dict() | ||
| 46 | |||
| 47 | def _construct_dep_graph(self, image_fstypes): | ||
| 48 | graph = dict() | ||
| 49 | |||
| 50 | def add_node(node): | ||
| 51 | deps = (self.d.getVar('IMAGE_TYPEDEP_' + node, True) or "") | ||
| 52 | if deps != "": | ||
| 53 | graph[node] = deps | ||
| 54 | |||
| 55 | for dep in deps.split(): | ||
| 56 | if not dep in graph: | ||
| 57 | add_node(dep) | ||
| 58 | else: | ||
| 59 | graph[node] = "" | ||
| 60 | |||
| 61 | for fstype in image_fstypes: | ||
| 62 | add_node(fstype) | ||
| 63 | |||
| 64 | return graph | ||
| 65 | |||
| 66 | def _clean_graph(self): | ||
| 67 | # Live and VMDK images will be processed via inheriting | ||
| 68 | # bbclass and does not get processed here. Remove them from the fstypes | ||
| 69 | # graph. Their dependencies are already added, so no worries here. | ||
| 70 | remove_list = (self.d.getVar('IMAGE_TYPES_MASKED', True) or "").split() | ||
| 71 | |||
| 72 | for item in remove_list: | ||
| 73 | self.graph.pop(item, None) | ||
| 74 | |||
| 75 | def _compute_dependencies(self): | ||
| 76 | """ | ||
| 77 | returns dict object of nodes with [no_of_depends_on, no_of_depended_by] | ||
| 78 | for each node | ||
| 79 | """ | ||
| 80 | deps_array = dict() | ||
| 81 | for node in self.graph: | ||
| 82 | deps_array[node] = [0, 0] | ||
| 83 | |||
| 84 | for node in self.graph: | ||
| 85 | deps = self.graph[node].split() | ||
| 86 | deps_array[node][0] += len(deps) | ||
| 87 | for dep in deps: | ||
| 88 | deps_array[dep][1] += 1 | ||
| 89 | |||
| 90 | return deps_array | ||
| 91 | |||
| 92 | def _sort_graph(self): | ||
| 93 | sorted_list = [] | ||
| 94 | group = [] | ||
| 95 | for node in self.graph: | ||
| 96 | if node not in self.deps_array: | ||
| 97 | continue | ||
| 98 | |||
| 99 | depends_on = self.deps_array[node][0] | ||
| 100 | |||
| 101 | if depends_on == 0: | ||
| 102 | group.append(node) | ||
| 103 | |||
| 104 | if len(group) == 0 and len(self.deps_array) != 0: | ||
| 105 | bb.fatal("possible fstype circular dependency...") | ||
| 106 | |||
| 107 | sorted_list.append(group) | ||
| 108 | |||
| 109 | # remove added nodes from deps_array | ||
| 110 | for item in group: | ||
| 111 | for node in self.graph: | ||
| 112 | if item in self.graph[node]: | ||
| 113 | self.deps_array[node][0] -= 1 | ||
| 114 | |||
| 115 | self.deps_array.pop(item, None) | ||
| 116 | |||
| 117 | if len(self.deps_array): | ||
| 118 | # recursive call, to find the next group | ||
| 119 | sorted_list += self._sort_graph() | ||
| 120 | |||
| 121 | return sorted_list | ||
| 122 | |||
| 123 | def group_fstypes(self, image_fstypes): | ||
| 124 | self.graph = self._construct_dep_graph(image_fstypes) | ||
| 125 | |||
| 126 | self._clean_graph() | ||
| 127 | |||
| 128 | self.deps_array = self._compute_dependencies() | ||
| 129 | |||
| 130 | alltypes = [node for node in self.graph] | ||
| 131 | |||
| 132 | return (alltypes, self._sort_graph()) | ||
| 133 | |||
| 134 | |||
| 135 | class Image(ImageDepGraph): | ||
| 136 | def __init__(self, d): | ||
| 137 | self.d = d | ||
| 138 | |||
| 139 | super(Image, self).__init__(d) | ||
| 140 | |||
| 141 | def _get_rootfs_size(self): | ||
| 142 | """compute the rootfs size""" | ||
| 143 | rootfs_alignment = int(self.d.getVar('IMAGE_ROOTFS_ALIGNMENT', True)) | ||
| 144 | overhead_factor = float(self.d.getVar('IMAGE_OVERHEAD_FACTOR', True)) | ||
| 145 | rootfs_req_size = int(self.d.getVar('IMAGE_ROOTFS_SIZE', True)) | ||
| 146 | rootfs_extra_space = eval(self.d.getVar('IMAGE_ROOTFS_EXTRA_SPACE', True)) | ||
| 147 | |||
| 148 | output = subprocess.check_output(['du', '-ks', | ||
| 149 | self.d.getVar('IMAGE_ROOTFS', True)]) | ||
| 150 | size_kb = int(output.split()[0]) | ||
| 151 | base_size = size_kb * overhead_factor | ||
| 152 | base_size = (base_size, rootfs_req_size)[base_size < rootfs_req_size] + \ | ||
| 153 | rootfs_extra_space | ||
| 154 | |||
| 155 | if base_size != int(base_size): | ||
| 156 | base_size = int(base_size + 1) | ||
| 157 | |||
| 158 | base_size += rootfs_alignment - 1 | ||
| 159 | base_size -= base_size % rootfs_alignment | ||
| 160 | |||
| 161 | return base_size | ||
| 162 | |||
| 163 | def _create_symlinks(self, subimages): | ||
| 164 | """create symlinks to the newly created image""" | ||
| 165 | deploy_dir = self.d.getVar('DEPLOY_DIR_IMAGE', True) | ||
| 166 | img_name = self.d.getVar('IMAGE_NAME', True) | ||
| 167 | link_name = self.d.getVar('IMAGE_LINK_NAME', True) | ||
| 168 | manifest_name = self.d.getVar('IMAGE_MANIFEST', True) | ||
| 169 | |||
| 170 | os.chdir(deploy_dir) | ||
| 171 | |||
| 172 | if link_name is not None: | ||
| 173 | for type in subimages: | ||
| 174 | if os.path.exists(img_name + ".rootfs." + type): | ||
| 175 | dst = link_name + "." + type | ||
| 176 | src = img_name + ".rootfs." + type | ||
| 177 | bb.note("Creating symlink: %s -> %s" % (dst, src)) | ||
| 178 | os.symlink(src, dst) | ||
| 179 | |||
| 180 | if manifest_name is not None and \ | ||
| 181 | os.path.exists(manifest_name) and \ | ||
| 182 | not os.path.exists(link_name + ".manifest"): | ||
| 183 | os.symlink(os.path.basename(manifest_name), | ||
| 184 | link_name + ".manifest") | ||
| 185 | |||
| 186 | def _remove_old_symlinks(self): | ||
| 187 | """remove the symlinks to old binaries""" | ||
| 188 | |||
| 189 | if self.d.getVar('IMAGE_LINK_NAME', True): | ||
| 190 | deploy_dir = self.d.getVar('DEPLOY_DIR_IMAGE', True) | ||
| 191 | for img in os.listdir(deploy_dir): | ||
| 192 | if img.find(self.d.getVar('IMAGE_LINK_NAME', True)) == 0: | ||
| 193 | img = os.path.join(deploy_dir, img) | ||
| 194 | if os.path.islink(img): | ||
| 195 | if self.d.getVar('RM_OLD_IMAGE', True) == "1" and \ | ||
| 196 | os.path.exists(os.path.realpath(img)): | ||
| 197 | os.remove(os.path.realpath(img)) | ||
| 198 | |||
| 199 | os.remove(img) | ||
| 200 | |||
| 201 | """ | ||
| 202 | This function will just filter out the compressed image types from the | ||
| 203 | fstype groups returning a (filtered_fstype_groups, cimages) tuple. | ||
| 204 | """ | ||
| 205 | def _filter_out_commpressed(self, fstype_groups): | ||
| 206 | ctypes = self.d.getVar('COMPRESSIONTYPES', True).split() | ||
| 207 | cimages = {} | ||
| 208 | |||
| 209 | filtered_groups = [] | ||
| 210 | for group in fstype_groups: | ||
| 211 | filtered_group = [] | ||
| 212 | for type in group: | ||
| 213 | basetype = None | ||
| 214 | for ctype in ctypes: | ||
| 215 | if type.endswith("." + ctype): | ||
| 216 | basetype = type[:-len("." + ctype)] | ||
| 217 | if basetype not in filtered_group: | ||
| 218 | filtered_group.append(basetype) | ||
| 219 | if basetype not in cimages: | ||
| 220 | cimages[basetype] = [] | ||
| 221 | if ctype not in cimages[basetype]: | ||
| 222 | cimages[basetype].append(ctype) | ||
| 223 | break | ||
| 224 | if not basetype and type not in filtered_group: | ||
| 225 | filtered_group.append(type) | ||
| 226 | |||
| 227 | filtered_groups.append(filtered_group) | ||
| 228 | |||
| 229 | return (filtered_groups, cimages) | ||
| 230 | |||
| 231 | def _get_image_types(self): | ||
| 232 | """returns a (types, cimages) tuple""" | ||
| 233 | |||
| 234 | alltypes, fstype_groups = self.group_fstypes(self.d.getVar('IMAGE_FSTYPES', True).split()) | ||
| 235 | |||
| 236 | filtered_groups, cimages = self._filter_out_commpressed(fstype_groups) | ||
| 237 | |||
| 238 | return (alltypes, filtered_groups, cimages) | ||
| 239 | |||
| 240 | def _write_script(self, type, cmds): | ||
| 241 | tempdir = self.d.getVar('T', True) | ||
| 242 | script_name = os.path.join(tempdir, "create_image." + type) | ||
| 243 | |||
| 244 | self.d.setVar('img_creation_func', '\n'.join(cmds)) | ||
| 245 | self.d.setVarFlag('img_creation_func', 'func', 1) | ||
| 246 | self.d.setVarFlag('img_creation_func', 'fakeroot', 1) | ||
| 247 | |||
| 248 | with open(script_name, "w+") as script: | ||
| 249 | script.write("%s" % bb.build.shell_trap_code()) | ||
| 250 | script.write("export ROOTFS_SIZE=%d\n" % self._get_rootfs_size()) | ||
| 251 | bb.data.emit_func('img_creation_func', script, self.d) | ||
| 252 | script.write("img_creation_func\n") | ||
| 253 | |||
| 254 | os.chmod(script_name, 0775) | ||
| 255 | |||
| 256 | return script_name | ||
| 257 | |||
| 258 | def _get_imagecmds(self): | ||
| 259 | old_overrides = self.d.getVar('OVERRIDES', 0) | ||
| 260 | |||
| 261 | alltypes, fstype_groups, cimages = self._get_image_types() | ||
| 262 | |||
| 263 | image_cmd_groups = [] | ||
| 264 | |||
| 265 | bb.note("The image creation groups are: %s" % str(fstype_groups)) | ||
| 266 | for fstype_group in fstype_groups: | ||
| 267 | image_cmds = [] | ||
| 268 | for type in fstype_group: | ||
| 269 | cmds = [] | ||
| 270 | subimages = [] | ||
| 271 | |||
| 272 | localdata = bb.data.createCopy(self.d) | ||
| 273 | localdata.setVar('OVERRIDES', '%s:%s' % (type, old_overrides)) | ||
| 274 | bb.data.update_data(localdata) | ||
| 275 | localdata.setVar('type', type) | ||
| 276 | |||
| 277 | cmds.append("\t" + localdata.getVar("IMAGE_CMD", True)) | ||
| 278 | cmds.append(localdata.expand("\tcd ${DEPLOY_DIR_IMAGE}")) | ||
| 279 | |||
| 280 | if type in cimages: | ||
| 281 | for ctype in cimages[type]: | ||
| 282 | cmds.append("\t" + localdata.getVar("COMPRESS_CMD_" + ctype, True)) | ||
| 283 | subimages.append(type + "." + ctype) | ||
| 284 | |||
| 285 | if type not in alltypes: | ||
| 286 | cmds.append(localdata.expand("\trm ${IMAGE_NAME}.rootfs.${type}")) | ||
| 287 | else: | ||
| 288 | subimages.append(type) | ||
| 289 | |||
| 290 | script_name = self._write_script(type, cmds) | ||
| 291 | |||
| 292 | image_cmds.append((type, subimages, script_name)) | ||
| 293 | |||
| 294 | image_cmd_groups.append(image_cmds) | ||
| 295 | |||
| 296 | return image_cmd_groups | ||
| 297 | |||
| 298 | def create(self): | ||
| 299 | bb.note("###### Generate images #######") | ||
| 300 | pre_process_cmds = self.d.getVar("IMAGE_PREPROCESS_COMMAND", True) | ||
| 301 | post_process_cmds = self.d.getVar("IMAGE_POSTPROCESS_COMMAND", True) | ||
| 302 | |||
| 303 | execute_pre_post_process(self.d, pre_process_cmds) | ||
| 304 | |||
| 305 | self._remove_old_symlinks() | ||
| 306 | |||
| 307 | image_cmd_groups = self._get_imagecmds() | ||
| 308 | |||
| 309 | for image_cmds in image_cmd_groups: | ||
| 310 | # create the images in parallel | ||
| 311 | nproc = multiprocessing.cpu_count() | ||
| 312 | pool = bb.utils.multiprocessingpool(nproc) | ||
| 313 | results = list(pool.imap(generate_image, image_cmds)) | ||
| 314 | pool.close() | ||
| 315 | pool.join() | ||
| 316 | |||
| 317 | for result in results: | ||
| 318 | if result is not None: | ||
| 319 | bb.fatal(result) | ||
| 320 | |||
| 321 | for image_type, subimages, script in image_cmds: | ||
| 322 | bb.note("Creating symlinks for %s image ..." % image_type) | ||
| 323 | self._create_symlinks(subimages) | ||
| 324 | |||
| 325 | execute_pre_post_process(self.d, post_process_cmds) | ||
| 326 | |||
| 327 | |||
| 328 | def create_image(d): | ||
| 329 | Image(d).create() | ||
| 330 | |||
| 331 | if __name__ == "__main__": | ||
| 332 | """ | ||
| 333 | Image creation can be called independent from bitbake environment. | ||
| 334 | """ | ||
| 335 | """ | ||
| 336 | TBD | ||
| 337 | """ | ||
diff --git a/meta/lib/oe/license.py b/meta/lib/oe/license.py new file mode 100644 index 0000000000..340da61102 --- /dev/null +++ b/meta/lib/oe/license.py | |||
| @@ -0,0 +1,116 @@ | |||
| 1 | # vi:sts=4:sw=4:et | ||
| 2 | """Code for parsing OpenEmbedded license strings""" | ||
| 3 | |||
| 4 | import ast | ||
| 5 | import re | ||
| 6 | from fnmatch import fnmatchcase as fnmatch | ||
| 7 | |||
| 8 | class LicenseError(Exception): | ||
| 9 | pass | ||
| 10 | |||
| 11 | class LicenseSyntaxError(LicenseError): | ||
| 12 | def __init__(self, licensestr, exc): | ||
| 13 | self.licensestr = licensestr | ||
| 14 | self.exc = exc | ||
| 15 | LicenseError.__init__(self) | ||
| 16 | |||
| 17 | def __str__(self): | ||
| 18 | return "error in '%s': %s" % (self.licensestr, self.exc) | ||
| 19 | |||
| 20 | class InvalidLicense(LicenseError): | ||
| 21 | def __init__(self, license): | ||
| 22 | self.license = license | ||
| 23 | LicenseError.__init__(self) | ||
| 24 | |||
| 25 | def __str__(self): | ||
| 26 | return "invalid characters in license '%s'" % self.license | ||
| 27 | |||
| 28 | license_operator = re.compile('([&|() ])') | ||
| 29 | license_pattern = re.compile('[a-zA-Z0-9.+_\-]+$') | ||
| 30 | |||
| 31 | class LicenseVisitor(ast.NodeVisitor): | ||
| 32 | """Syntax tree visitor which can accept OpenEmbedded license strings""" | ||
| 33 | def visit_string(self, licensestr): | ||
| 34 | new_elements = [] | ||
| 35 | elements = filter(lambda x: x.strip(), license_operator.split(licensestr)) | ||
| 36 | for pos, element in enumerate(elements): | ||
| 37 | if license_pattern.match(element): | ||
| 38 | if pos > 0 and license_pattern.match(elements[pos-1]): | ||
| 39 | new_elements.append('&') | ||
| 40 | element = '"' + element + '"' | ||
| 41 | elif not license_operator.match(element): | ||
| 42 | raise InvalidLicense(element) | ||
| 43 | new_elements.append(element) | ||
| 44 | |||
| 45 | self.visit(ast.parse(' '.join(new_elements))) | ||
| 46 | |||
| 47 | class FlattenVisitor(LicenseVisitor): | ||
| 48 | """Flatten a license tree (parsed from a string) by selecting one of each | ||
| 49 | set of OR options, in the way the user specifies""" | ||
| 50 | def __init__(self, choose_licenses): | ||
| 51 | self.choose_licenses = choose_licenses | ||
| 52 | self.licenses = [] | ||
| 53 | LicenseVisitor.__init__(self) | ||
| 54 | |||
| 55 | def visit_Str(self, node): | ||
| 56 | self.licenses.append(node.s) | ||
| 57 | |||
| 58 | def visit_BinOp(self, node): | ||
| 59 | if isinstance(node.op, ast.BitOr): | ||
| 60 | left = FlattenVisitor(self.choose_licenses) | ||
| 61 | left.visit(node.left) | ||
| 62 | |||
| 63 | right = FlattenVisitor(self.choose_licenses) | ||
| 64 | right.visit(node.right) | ||
| 65 | |||
| 66 | selected = self.choose_licenses(left.licenses, right.licenses) | ||
| 67 | self.licenses.extend(selected) | ||
| 68 | else: | ||
| 69 | self.generic_visit(node) | ||
| 70 | |||
| 71 | def flattened_licenses(licensestr, choose_licenses): | ||
| 72 | """Given a license string and choose_licenses function, return a flat list of licenses""" | ||
| 73 | flatten = FlattenVisitor(choose_licenses) | ||
| 74 | try: | ||
| 75 | flatten.visit_string(licensestr) | ||
| 76 | except SyntaxError as exc: | ||
| 77 | raise LicenseSyntaxError(licensestr, exc) | ||
| 78 | return flatten.licenses | ||
| 79 | |||
| 80 | def is_included(licensestr, whitelist=None, blacklist=None): | ||
| 81 | """Given a license string and whitelist and blacklist, determine if the | ||
| 82 | license string matches the whitelist and does not match the blacklist. | ||
| 83 | |||
| 84 | Returns a tuple holding the boolean state and a list of the applicable | ||
| 85 | licenses which were excluded (or None, if the state is True) | ||
| 86 | """ | ||
| 87 | |||
| 88 | def include_license(license): | ||
| 89 | return any(fnmatch(license, pattern) for pattern in whitelist) | ||
| 90 | |||
| 91 | def exclude_license(license): | ||
| 92 | return any(fnmatch(license, pattern) for pattern in blacklist) | ||
| 93 | |||
| 94 | def choose_licenses(alpha, beta): | ||
| 95 | """Select the option in an OR which is the 'best' (has the most | ||
| 96 | included licenses).""" | ||
| 97 | alpha_weight = len(filter(include_license, alpha)) | ||
| 98 | beta_weight = len(filter(include_license, beta)) | ||
| 99 | if alpha_weight > beta_weight: | ||
| 100 | return alpha | ||
| 101 | else: | ||
| 102 | return beta | ||
| 103 | |||
| 104 | if not whitelist: | ||
| 105 | whitelist = ['*'] | ||
| 106 | |||
| 107 | if not blacklist: | ||
| 108 | blacklist = [] | ||
| 109 | |||
| 110 | licenses = flattened_licenses(licensestr, choose_licenses) | ||
| 111 | excluded = filter(lambda lic: exclude_license(lic), licenses) | ||
| 112 | included = filter(lambda lic: include_license(lic), licenses) | ||
| 113 | if excluded: | ||
| 114 | return False, excluded | ||
| 115 | else: | ||
| 116 | return True, included | ||
diff --git a/meta/lib/oe/lsb.py b/meta/lib/oe/lsb.py new file mode 100644 index 0000000000..b53f361035 --- /dev/null +++ b/meta/lib/oe/lsb.py | |||
| @@ -0,0 +1,81 @@ | |||
| 1 | def release_dict(): | ||
| 2 | """Return the output of lsb_release -ir as a dictionary""" | ||
| 3 | from subprocess import PIPE | ||
| 4 | |||
| 5 | try: | ||
| 6 | output, err = bb.process.run(['lsb_release', '-ir'], stderr=PIPE) | ||
| 7 | except bb.process.CmdError as exc: | ||
| 8 | return None | ||
| 9 | |||
| 10 | data = {} | ||
| 11 | for line in output.splitlines(): | ||
| 12 | try: | ||
| 13 | key, value = line.split(":\t", 1) | ||
| 14 | except ValueError: | ||
| 15 | continue | ||
| 16 | else: | ||
| 17 | data[key] = value | ||
| 18 | return data | ||
| 19 | |||
| 20 | def release_dict_file(): | ||
| 21 | """ Try to gather LSB release information manually when lsb_release tool is unavailable """ | ||
| 22 | data = None | ||
| 23 | try: | ||
| 24 | if os.path.exists('/etc/lsb-release'): | ||
| 25 | data = {} | ||
| 26 | with open('/etc/lsb-release') as f: | ||
| 27 | for line in f: | ||
| 28 | key, value = line.split("=", 1) | ||
| 29 | data[key] = value.strip() | ||
| 30 | elif os.path.exists('/etc/redhat-release'): | ||
| 31 | data = {} | ||
| 32 | with open('/etc/redhat-release') as f: | ||
| 33 | distro = f.readline().strip() | ||
| 34 | import re | ||
| 35 | match = re.match(r'(.*) release (.*) \((.*)\)', distro) | ||
| 36 | if match: | ||
| 37 | data['DISTRIB_ID'] = match.group(1) | ||
| 38 | data['DISTRIB_RELEASE'] = match.group(2) | ||
| 39 | elif os.path.exists('/etc/SuSE-release'): | ||
| 40 | data = {} | ||
| 41 | data['DISTRIB_ID'] = 'SUSE LINUX' | ||
| 42 | with open('/etc/SuSE-release') as f: | ||
| 43 | for line in f: | ||
| 44 | if line.startswith('VERSION = '): | ||
| 45 | data['DISTRIB_RELEASE'] = line[10:].rstrip() | ||
| 46 | break | ||
| 47 | elif os.path.exists('/etc/os-release'): | ||
| 48 | data = {} | ||
| 49 | with open('/etc/os-release') as f: | ||
| 50 | for line in f: | ||
| 51 | if line.startswith('NAME='): | ||
| 52 | data['DISTRIB_ID'] = line[5:].rstrip().strip('"') | ||
| 53 | if line.startswith('VERSION_ID='): | ||
| 54 | data['DISTRIB_RELEASE'] = line[11:].rstrip().strip('"') | ||
| 55 | except IOError: | ||
| 56 | return None | ||
| 57 | return data | ||
| 58 | |||
| 59 | def distro_identifier(adjust_hook=None): | ||
| 60 | """Return a distro identifier string based upon lsb_release -ri, | ||
| 61 | with optional adjustment via a hook""" | ||
| 62 | |||
| 63 | lsb_data = release_dict() | ||
| 64 | if lsb_data: | ||
| 65 | distro_id, release = lsb_data['Distributor ID'], lsb_data['Release'] | ||
| 66 | else: | ||
| 67 | lsb_data_file = release_dict_file() | ||
| 68 | if lsb_data_file: | ||
| 69 | distro_id, release = lsb_data_file['DISTRIB_ID'], lsb_data_file.get('DISTRIB_RELEASE', None) | ||
| 70 | else: | ||
| 71 | distro_id, release = None, None | ||
| 72 | |||
| 73 | if adjust_hook: | ||
| 74 | distro_id, release = adjust_hook(distro_id, release) | ||
| 75 | if not distro_id: | ||
| 76 | return "Unknown" | ||
| 77 | if release: | ||
| 78 | id_str = '{0}-{1}'.format(distro_id, release) | ||
| 79 | else: | ||
| 80 | id_str = distro_id | ||
| 81 | return id_str.replace(' ','-').replace('/','-') | ||
diff --git a/meta/lib/oe/maketype.py b/meta/lib/oe/maketype.py new file mode 100644 index 0000000000..139f333691 --- /dev/null +++ b/meta/lib/oe/maketype.py | |||
| @@ -0,0 +1,99 @@ | |||
| 1 | """OpenEmbedded variable typing support | ||
| 2 | |||
| 3 | Types are defined in the metadata by name, using the 'type' flag on a | ||
| 4 | variable. Other flags may be utilized in the construction of the types. See | ||
| 5 | the arguments of the type's factory for details. | ||
| 6 | """ | ||
| 7 | |||
| 8 | import inspect | ||
| 9 | import types | ||
| 10 | |||
| 11 | available_types = {} | ||
| 12 | |||
| 13 | class MissingFlag(TypeError): | ||
| 14 | """A particular flag is required to construct the type, but has not been | ||
| 15 | provided.""" | ||
| 16 | def __init__(self, flag, type): | ||
| 17 | self.flag = flag | ||
| 18 | self.type = type | ||
| 19 | TypeError.__init__(self) | ||
| 20 | |||
| 21 | def __str__(self): | ||
| 22 | return "Type '%s' requires flag '%s'" % (self.type, self.flag) | ||
| 23 | |||
| 24 | def factory(var_type): | ||
| 25 | """Return the factory for a specified type.""" | ||
| 26 | if var_type is None: | ||
| 27 | raise TypeError("No type specified. Valid types: %s" % | ||
| 28 | ', '.join(available_types)) | ||
| 29 | try: | ||
| 30 | return available_types[var_type] | ||
| 31 | except KeyError: | ||
| 32 | raise TypeError("Invalid type '%s':\n Valid types: %s" % | ||
| 33 | (var_type, ', '.join(available_types))) | ||
| 34 | |||
| 35 | def create(value, var_type, **flags): | ||
| 36 | """Create an object of the specified type, given the specified flags and | ||
| 37 | string value.""" | ||
| 38 | obj = factory(var_type) | ||
| 39 | objflags = {} | ||
| 40 | for flag in obj.flags: | ||
| 41 | if flag not in flags: | ||
| 42 | if flag not in obj.optflags: | ||
| 43 | raise MissingFlag(flag, var_type) | ||
| 44 | else: | ||
| 45 | objflags[flag] = flags[flag] | ||
| 46 | |||
| 47 | return obj(value, **objflags) | ||
| 48 | |||
| 49 | def get_callable_args(obj): | ||
| 50 | """Grab all but the first argument of the specified callable, returning | ||
| 51 | the list, as well as a list of which of the arguments have default | ||
| 52 | values.""" | ||
| 53 | if type(obj) is type: | ||
| 54 | obj = obj.__init__ | ||
| 55 | |||
| 56 | args, varargs, keywords, defaults = inspect.getargspec(obj) | ||
| 57 | flaglist = [] | ||
| 58 | if args: | ||
| 59 | if len(args) > 1 and args[0] == 'self': | ||
| 60 | args = args[1:] | ||
| 61 | flaglist.extend(args) | ||
| 62 | |||
| 63 | optional = set() | ||
| 64 | if defaults: | ||
| 65 | optional |= set(flaglist[-len(defaults):]) | ||
| 66 | return flaglist, optional | ||
| 67 | |||
| 68 | def factory_setup(name, obj): | ||
| 69 | """Prepare a factory for use.""" | ||
| 70 | args, optional = get_callable_args(obj) | ||
| 71 | extra_args = args[1:] | ||
| 72 | if extra_args: | ||
| 73 | obj.flags, optional = extra_args, optional | ||
| 74 | obj.optflags = set(optional) | ||
| 75 | else: | ||
| 76 | obj.flags = obj.optflags = () | ||
| 77 | |||
| 78 | if not hasattr(obj, 'name'): | ||
| 79 | obj.name = name | ||
| 80 | |||
| 81 | def register(name, factory): | ||
| 82 | """Register a type, given its name and a factory callable. | ||
| 83 | |||
| 84 | Determines the required and optional flags from the factory's | ||
| 85 | arguments.""" | ||
| 86 | factory_setup(name, factory) | ||
| 87 | available_types[factory.name] = factory | ||
| 88 | |||
| 89 | |||
| 90 | # Register all our included types | ||
| 91 | for name in dir(types): | ||
| 92 | if name.startswith('_'): | ||
| 93 | continue | ||
| 94 | |||
| 95 | obj = getattr(types, name) | ||
| 96 | if not callable(obj): | ||
| 97 | continue | ||
| 98 | |||
| 99 | register(name, obj) | ||
diff --git a/meta/lib/oe/manifest.py b/meta/lib/oe/manifest.py new file mode 100644 index 0000000000..afda76be66 --- /dev/null +++ b/meta/lib/oe/manifest.py | |||
| @@ -0,0 +1,345 @@ | |||
| 1 | from abc import ABCMeta, abstractmethod | ||
| 2 | import os | ||
| 3 | import re | ||
| 4 | import bb | ||
| 5 | |||
| 6 | |||
| 7 | class Manifest(object): | ||
| 8 | """ | ||
| 9 | This is an abstract class. Do not instantiate this directly. | ||
| 10 | """ | ||
| 11 | __metaclass__ = ABCMeta | ||
| 12 | |||
| 13 | PKG_TYPE_MUST_INSTALL = "mip" | ||
| 14 | PKG_TYPE_MULTILIB = "mlp" | ||
| 15 | PKG_TYPE_LANGUAGE = "lgp" | ||
| 16 | PKG_TYPE_ATTEMPT_ONLY = "aop" | ||
| 17 | |||
| 18 | MANIFEST_TYPE_IMAGE = "image" | ||
| 19 | MANIFEST_TYPE_SDK_HOST = "sdk_host" | ||
| 20 | MANIFEST_TYPE_SDK_TARGET = "sdk_target" | ||
| 21 | |||
| 22 | var_maps = { | ||
| 23 | MANIFEST_TYPE_IMAGE: { | ||
| 24 | "PACKAGE_INSTALL": PKG_TYPE_MUST_INSTALL, | ||
| 25 | "PACKAGE_INSTALL_ATTEMPTONLY": PKG_TYPE_ATTEMPT_ONLY, | ||
| 26 | "LINGUAS_INSTALL": PKG_TYPE_LANGUAGE | ||
| 27 | }, | ||
| 28 | MANIFEST_TYPE_SDK_HOST: { | ||
| 29 | "TOOLCHAIN_HOST_TASK": PKG_TYPE_MUST_INSTALL, | ||
| 30 | "TOOLCHAIN_HOST_TASK_ATTEMPTONLY": PKG_TYPE_ATTEMPT_ONLY | ||
| 31 | }, | ||
| 32 | MANIFEST_TYPE_SDK_TARGET: { | ||
| 33 | "TOOLCHAIN_TARGET_TASK": PKG_TYPE_MUST_INSTALL, | ||
| 34 | "TOOLCHAIN_TARGET_ATTEMPTONLY": PKG_TYPE_ATTEMPT_ONLY | ||
| 35 | } | ||
| 36 | } | ||
| 37 | |||
| 38 | INSTALL_ORDER = [ | ||
| 39 | PKG_TYPE_LANGUAGE, | ||
| 40 | PKG_TYPE_MUST_INSTALL, | ||
| 41 | PKG_TYPE_ATTEMPT_ONLY, | ||
| 42 | PKG_TYPE_MULTILIB | ||
| 43 | ] | ||
| 44 | |||
| 45 | initial_manifest_file_header = \ | ||
| 46 | "# This file was generated automatically and contains the packages\n" \ | ||
| 47 | "# passed on to the package manager in order to create the rootfs.\n\n" \ | ||
| 48 | "# Format:\n" \ | ||
| 49 | "# <package_type>,<package_name>\n" \ | ||
| 50 | "# where:\n" \ | ||
| 51 | "# <package_type> can be:\n" \ | ||
| 52 | "# 'mip' = must install package\n" \ | ||
| 53 | "# 'aop' = attempt only package\n" \ | ||
| 54 | "# 'mlp' = multilib package\n" \ | ||
| 55 | "# 'lgp' = language package\n\n" | ||
| 56 | |||
| 57 | def __init__(self, d, manifest_dir=None, manifest_type=MANIFEST_TYPE_IMAGE): | ||
| 58 | self.d = d | ||
| 59 | self.manifest_type = manifest_type | ||
| 60 | |||
| 61 | if manifest_dir is None: | ||
| 62 | if manifest_type != self.MANIFEST_TYPE_IMAGE: | ||
| 63 | self.manifest_dir = self.d.getVar('SDK_DIR', True) | ||
| 64 | else: | ||
| 65 | self.manifest_dir = self.d.getVar('WORKDIR', True) | ||
| 66 | else: | ||
| 67 | self.manifest_dir = manifest_dir | ||
| 68 | |||
| 69 | bb.utils.mkdirhier(self.manifest_dir) | ||
| 70 | |||
| 71 | self.initial_manifest = os.path.join(self.manifest_dir, "%s_initial_manifest" % manifest_type) | ||
| 72 | self.final_manifest = os.path.join(self.manifest_dir, "%s_final_manifest" % manifest_type) | ||
| 73 | self.full_manifest = os.path.join(self.manifest_dir, "%s_full_manifest" % manifest_type) | ||
| 74 | |||
| 75 | # packages in the following vars will be split in 'must install' and | ||
| 76 | # 'multilib' | ||
| 77 | self.vars_to_split = ["PACKAGE_INSTALL", | ||
| 78 | "TOOLCHAIN_HOST_TASK", | ||
| 79 | "TOOLCHAIN_TARGET_TASK"] | ||
| 80 | |||
| 81 | """ | ||
| 82 | This creates a standard initial manifest for core-image-(minimal|sato|sato-sdk). | ||
| 83 | This will be used for testing until the class is implemented properly! | ||
| 84 | """ | ||
| 85 | def _create_dummy_initial(self): | ||
| 86 | image_rootfs = self.d.getVar('IMAGE_ROOTFS', True) | ||
| 87 | pkg_list = dict() | ||
| 88 | if image_rootfs.find("core-image-sato-sdk") > 0: | ||
| 89 | pkg_list[self.PKG_TYPE_MUST_INSTALL] = \ | ||
| 90 | "packagegroup-core-x11-sato-games packagegroup-base-extended " \ | ||
| 91 | "packagegroup-core-x11-sato packagegroup-core-x11-base " \ | ||
| 92 | "packagegroup-core-sdk packagegroup-core-tools-debug " \ | ||
| 93 | "packagegroup-core-boot packagegroup-core-tools-testapps " \ | ||
| 94 | "packagegroup-core-eclipse-debug packagegroup-core-qt-demoapps " \ | ||
| 95 | "apt packagegroup-core-tools-profile psplash " \ | ||
| 96 | "packagegroup-core-standalone-sdk-target " \ | ||
| 97 | "packagegroup-core-ssh-openssh dpkg kernel-dev" | ||
| 98 | pkg_list[self.PKG_TYPE_LANGUAGE] = \ | ||
| 99 | "locale-base-en-us locale-base-en-gb" | ||
| 100 | elif image_rootfs.find("core-image-sato") > 0: | ||
| 101 | pkg_list[self.PKG_TYPE_MUST_INSTALL] = \ | ||
| 102 | "packagegroup-core-ssh-dropbear packagegroup-core-x11-sato-games " \ | ||
| 103 | "packagegroup-core-x11-base psplash apt dpkg packagegroup-base-extended " \ | ||
| 104 | "packagegroup-core-x11-sato packagegroup-core-boot" | ||
| 105 | pkg_list['lgp'] = \ | ||
| 106 | "locale-base-en-us locale-base-en-gb" | ||
| 107 | elif image_rootfs.find("core-image-minimal") > 0: | ||
| 108 | pkg_list[self.PKG_TYPE_MUST_INSTALL] = "run-postinsts packagegroup-core-boot" | ||
| 109 | |||
| 110 | with open(self.initial_manifest, "w+") as manifest: | ||
| 111 | manifest.write(self.initial_manifest_file_header) | ||
| 112 | |||
| 113 | for pkg_type in pkg_list: | ||
| 114 | for pkg in pkg_list[pkg_type].split(): | ||
| 115 | manifest.write("%s,%s\n" % (pkg_type, pkg)) | ||
| 116 | |||
| 117 | """ | ||
| 118 | This will create the initial manifest which will be used by Rootfs class to | ||
| 119 | generate the rootfs | ||
| 120 | """ | ||
| 121 | @abstractmethod | ||
| 122 | def create_initial(self): | ||
| 123 | pass | ||
| 124 | |||
| 125 | """ | ||
| 126 | This creates the manifest after everything has been installed. | ||
| 127 | """ | ||
| 128 | @abstractmethod | ||
| 129 | def create_final(self): | ||
| 130 | pass | ||
| 131 | |||
| 132 | """ | ||
| 133 | This creates the manifest after the package in initial manifest has been | ||
| 134 | dummy installed. It lists all *to be installed* packages. There is no real | ||
| 135 | installation, just a test. | ||
| 136 | """ | ||
| 137 | @abstractmethod | ||
| 138 | def create_full(self, pm): | ||
| 139 | pass | ||
| 140 | |||
| 141 | """ | ||
| 142 | The following function parses an initial manifest and returns a dictionary | ||
| 143 | object with the must install, attempt only, multilib and language packages. | ||
| 144 | """ | ||
| 145 | def parse_initial_manifest(self): | ||
| 146 | pkgs = dict() | ||
| 147 | |||
| 148 | with open(self.initial_manifest) as manifest: | ||
| 149 | for line in manifest.read().split('\n'): | ||
| 150 | comment = re.match("^#.*", line) | ||
| 151 | pattern = "^(%s|%s|%s|%s),(.*)$" % \ | ||
| 152 | (self.PKG_TYPE_MUST_INSTALL, | ||
| 153 | self.PKG_TYPE_ATTEMPT_ONLY, | ||
| 154 | self.PKG_TYPE_MULTILIB, | ||
| 155 | self.PKG_TYPE_LANGUAGE) | ||
| 156 | pkg = re.match(pattern, line) | ||
| 157 | |||
| 158 | if comment is not None: | ||
| 159 | continue | ||
| 160 | |||
| 161 | if pkg is not None: | ||
| 162 | pkg_type = pkg.group(1) | ||
| 163 | pkg_name = pkg.group(2) | ||
| 164 | |||
| 165 | if not pkg_type in pkgs: | ||
| 166 | pkgs[pkg_type] = [pkg_name] | ||
| 167 | else: | ||
| 168 | pkgs[pkg_type].append(pkg_name) | ||
| 169 | |||
| 170 | return pkgs | ||
| 171 | |||
| 172 | ''' | ||
| 173 | This following function parses a full manifest and return a list | ||
| 174 | object with packages. | ||
| 175 | ''' | ||
| 176 | def parse_full_manifest(self): | ||
| 177 | installed_pkgs = list() | ||
| 178 | if not os.path.exists(self.full_manifest): | ||
| 179 | bb.note('full manifest not exist') | ||
| 180 | return installed_pkgs | ||
| 181 | |||
| 182 | with open(self.full_manifest, 'r') as manifest: | ||
| 183 | for pkg in manifest.read().split('\n'): | ||
| 184 | installed_pkgs.append(pkg.strip()) | ||
| 185 | |||
| 186 | return installed_pkgs | ||
| 187 | |||
| 188 | |||
| 189 | class RpmManifest(Manifest): | ||
| 190 | """ | ||
| 191 | Returns a dictionary object with mip and mlp packages. | ||
| 192 | """ | ||
| 193 | def _split_multilib(self, pkg_list): | ||
| 194 | pkgs = dict() | ||
| 195 | |||
| 196 | for pkg in pkg_list.split(): | ||
| 197 | pkg_type = self.PKG_TYPE_MUST_INSTALL | ||
| 198 | |||
| 199 | ml_variants = self.d.getVar('MULTILIB_VARIANTS', True).split() | ||
| 200 | |||
| 201 | for ml_variant in ml_variants: | ||
| 202 | if pkg.startswith(ml_variant + '-'): | ||
| 203 | pkg_type = self.PKG_TYPE_MULTILIB | ||
| 204 | |||
| 205 | if not pkg_type in pkgs: | ||
| 206 | pkgs[pkg_type] = pkg | ||
| 207 | else: | ||
| 208 | pkgs[pkg_type] += " " + pkg | ||
| 209 | |||
| 210 | return pkgs | ||
| 211 | |||
| 212 | def create_initial(self): | ||
| 213 | pkgs = dict() | ||
| 214 | |||
| 215 | with open(self.initial_manifest, "w+") as manifest: | ||
| 216 | manifest.write(self.initial_manifest_file_header) | ||
| 217 | |||
| 218 | for var in self.var_maps[self.manifest_type]: | ||
| 219 | if var in self.vars_to_split: | ||
| 220 | split_pkgs = self._split_multilib(self.d.getVar(var, True)) | ||
| 221 | if split_pkgs is not None: | ||
| 222 | pkgs = dict(pkgs.items() + split_pkgs.items()) | ||
| 223 | else: | ||
| 224 | pkg_list = self.d.getVar(var, True) | ||
| 225 | if pkg_list is not None: | ||
| 226 | pkgs[self.var_maps[self.manifest_type][var]] = self.d.getVar(var, True) | ||
| 227 | |||
| 228 | for pkg_type in pkgs: | ||
| 229 | for pkg in pkgs[pkg_type].split(): | ||
| 230 | manifest.write("%s,%s\n" % (pkg_type, pkg)) | ||
| 231 | |||
| 232 | def create_final(self): | ||
| 233 | pass | ||
| 234 | |||
| 235 | def create_full(self, pm): | ||
| 236 | pass | ||
| 237 | |||
| 238 | |||
| 239 | class OpkgManifest(Manifest): | ||
| 240 | """ | ||
| 241 | Returns a dictionary object with mip and mlp packages. | ||
| 242 | """ | ||
| 243 | def _split_multilib(self, pkg_list): | ||
| 244 | pkgs = dict() | ||
| 245 | |||
| 246 | for pkg in pkg_list.split(): | ||
| 247 | pkg_type = self.PKG_TYPE_MUST_INSTALL | ||
| 248 | |||
| 249 | ml_variants = self.d.getVar('MULTILIB_VARIANTS', True).split() | ||
| 250 | |||
| 251 | for ml_variant in ml_variants: | ||
| 252 | if pkg.startswith(ml_variant + '-'): | ||
| 253 | pkg_type = self.PKG_TYPE_MULTILIB | ||
| 254 | |||
| 255 | if not pkg_type in pkgs: | ||
| 256 | pkgs[pkg_type] = pkg | ||
| 257 | else: | ||
| 258 | pkgs[pkg_type] += " " + pkg | ||
| 259 | |||
| 260 | return pkgs | ||
| 261 | |||
| 262 | def create_initial(self): | ||
| 263 | pkgs = dict() | ||
| 264 | |||
| 265 | with open(self.initial_manifest, "w+") as manifest: | ||
| 266 | manifest.write(self.initial_manifest_file_header) | ||
| 267 | |||
| 268 | for var in self.var_maps[self.manifest_type]: | ||
| 269 | if var in self.vars_to_split: | ||
| 270 | split_pkgs = self._split_multilib(self.d.getVar(var, True)) | ||
| 271 | if split_pkgs is not None: | ||
| 272 | pkgs = dict(pkgs.items() + split_pkgs.items()) | ||
| 273 | else: | ||
| 274 | pkg_list = self.d.getVar(var, True) | ||
| 275 | if pkg_list is not None: | ||
| 276 | pkgs[self.var_maps[self.manifest_type][var]] = self.d.getVar(var, True) | ||
| 277 | |||
| 278 | for pkg_type in pkgs: | ||
| 279 | for pkg in pkgs[pkg_type].split(): | ||
| 280 | manifest.write("%s,%s\n" % (pkg_type, pkg)) | ||
| 281 | |||
| 282 | def create_final(self): | ||
| 283 | pass | ||
| 284 | |||
| 285 | def create_full(self, pm): | ||
| 286 | if not os.path.exists(self.initial_manifest): | ||
| 287 | self.create_initial() | ||
| 288 | |||
| 289 | initial_manifest = self.parse_initial_manifest() | ||
| 290 | pkgs_to_install = list() | ||
| 291 | for pkg_type in initial_manifest: | ||
| 292 | pkgs_to_install += initial_manifest[pkg_type] | ||
| 293 | if len(pkgs_to_install) == 0: | ||
| 294 | return | ||
| 295 | |||
| 296 | output = pm.dummy_install(pkgs_to_install) | ||
| 297 | |||
| 298 | with open(self.full_manifest, 'w+') as manifest: | ||
| 299 | pkg_re = re.compile('^Installing ([^ ]+) [^ ].*') | ||
| 300 | for line in set(output.split('\n')): | ||
| 301 | m = pkg_re.match(line) | ||
| 302 | if m: | ||
| 303 | manifest.write(m.group(1) + '\n') | ||
| 304 | |||
| 305 | return | ||
| 306 | |||
| 307 | |||
| 308 | class DpkgManifest(Manifest): | ||
| 309 | def create_initial(self): | ||
| 310 | with open(self.initial_manifest, "w+") as manifest: | ||
| 311 | manifest.write(self.initial_manifest_file_header) | ||
| 312 | |||
| 313 | for var in self.var_maps[self.manifest_type]: | ||
| 314 | pkg_list = self.d.getVar(var, True) | ||
| 315 | |||
| 316 | if pkg_list is None: | ||
| 317 | continue | ||
| 318 | |||
| 319 | for pkg in pkg_list.split(): | ||
| 320 | manifest.write("%s,%s\n" % | ||
| 321 | (self.var_maps[self.manifest_type][var], pkg)) | ||
| 322 | |||
| 323 | def create_final(self): | ||
| 324 | pass | ||
| 325 | |||
| 326 | def create_full(self, pm): | ||
| 327 | pass | ||
| 328 | |||
| 329 | |||
| 330 | def create_manifest(d, final_manifest=False, manifest_dir=None, | ||
| 331 | manifest_type=Manifest.MANIFEST_TYPE_IMAGE): | ||
| 332 | manifest_map = {'rpm': RpmManifest, | ||
| 333 | 'ipk': OpkgManifest, | ||
| 334 | 'deb': DpkgManifest} | ||
| 335 | |||
| 336 | manifest = manifest_map[d.getVar('IMAGE_PKGTYPE', True)](d, manifest_dir, manifest_type) | ||
| 337 | |||
| 338 | if final_manifest: | ||
| 339 | manifest.create_final() | ||
| 340 | else: | ||
| 341 | manifest.create_initial() | ||
| 342 | |||
| 343 | |||
| 344 | if __name__ == "__main__": | ||
| 345 | pass | ||
diff --git a/meta/lib/oe/package.py b/meta/lib/oe/package.py new file mode 100644 index 0000000000..f8b532220a --- /dev/null +++ b/meta/lib/oe/package.py | |||
| @@ -0,0 +1,99 @@ | |||
| 1 | def runstrip(arg): | ||
| 2 | # Function to strip a single file, called from split_and_strip_files below | ||
| 3 | # A working 'file' (one which works on the target architecture) | ||
| 4 | # | ||
| 5 | # The elftype is a bit pattern (explained in split_and_strip_files) to tell | ||
| 6 | # us what type of file we're processing... | ||
| 7 | # 4 - executable | ||
| 8 | # 8 - shared library | ||
| 9 | # 16 - kernel module | ||
| 10 | |||
| 11 | import commands, stat, subprocess | ||
| 12 | |||
| 13 | (file, elftype, strip) = arg | ||
| 14 | |||
| 15 | newmode = None | ||
| 16 | if not os.access(file, os.W_OK) or os.access(file, os.R_OK): | ||
| 17 | origmode = os.stat(file)[stat.ST_MODE] | ||
| 18 | newmode = origmode | stat.S_IWRITE | stat.S_IREAD | ||
| 19 | os.chmod(file, newmode) | ||
| 20 | |||
| 21 | extraflags = "" | ||
| 22 | |||
| 23 | # kernel module | ||
| 24 | if elftype & 16: | ||
| 25 | extraflags = "--strip-debug --remove-section=.comment --remove-section=.note --preserve-dates" | ||
| 26 | # .so and shared library | ||
| 27 | elif ".so" in file and elftype & 8: | ||
| 28 | extraflags = "--remove-section=.comment --remove-section=.note --strip-unneeded" | ||
| 29 | # shared or executable: | ||
| 30 | elif elftype & 8 or elftype & 4: | ||
| 31 | extraflags = "--remove-section=.comment --remove-section=.note" | ||
| 32 | |||
| 33 | stripcmd = "'%s' %s '%s'" % (strip, extraflags, file) | ||
| 34 | bb.debug(1, "runstrip: %s" % stripcmd) | ||
| 35 | |||
| 36 | ret = subprocess.call(stripcmd, shell=True) | ||
| 37 | |||
| 38 | if newmode: | ||
| 39 | os.chmod(file, origmode) | ||
| 40 | |||
| 41 | if ret: | ||
| 42 | bb.error("runstrip: '%s' strip command failed" % stripcmd) | ||
| 43 | |||
| 44 | return | ||
| 45 | |||
| 46 | |||
| 47 | def file_translate(file): | ||
| 48 | ft = file.replace("@", "@at@") | ||
| 49 | ft = ft.replace(" ", "@space@") | ||
| 50 | ft = ft.replace("\t", "@tab@") | ||
| 51 | ft = ft.replace("[", "@openbrace@") | ||
| 52 | ft = ft.replace("]", "@closebrace@") | ||
| 53 | ft = ft.replace("_", "@underscore@") | ||
| 54 | return ft | ||
| 55 | |||
| 56 | def filedeprunner(arg): | ||
| 57 | import re, subprocess, shlex | ||
| 58 | |||
| 59 | (pkg, pkgfiles, rpmdeps, pkgdest) = arg | ||
| 60 | provides = {} | ||
| 61 | requires = {} | ||
| 62 | |||
| 63 | r = re.compile(r'[<>=]+ +[^ ]*') | ||
| 64 | |||
| 65 | def process_deps(pipe, pkg, pkgdest, provides, requires): | ||
| 66 | for line in pipe: | ||
| 67 | f = line.split(" ", 1)[0].strip() | ||
| 68 | line = line.split(" ", 1)[1].strip() | ||
| 69 | |||
| 70 | if line.startswith("Requires:"): | ||
| 71 | i = requires | ||
| 72 | elif line.startswith("Provides:"): | ||
| 73 | i = provides | ||
| 74 | else: | ||
| 75 | continue | ||
| 76 | |||
| 77 | file = f.replace(pkgdest + "/" + pkg, "") | ||
| 78 | file = file_translate(file) | ||
| 79 | value = line.split(":", 1)[1].strip() | ||
| 80 | value = r.sub(r'(\g<0>)', value) | ||
| 81 | |||
| 82 | if value.startswith("rpmlib("): | ||
| 83 | continue | ||
| 84 | if value == "python": | ||
| 85 | continue | ||
| 86 | if file not in i: | ||
| 87 | i[file] = [] | ||
| 88 | i[file].append(value) | ||
| 89 | |||
| 90 | return provides, requires | ||
| 91 | |||
| 92 | try: | ||
| 93 | dep_popen = subprocess.Popen(shlex.split(rpmdeps) + pkgfiles, stdout=subprocess.PIPE) | ||
| 94 | provides, requires = process_deps(dep_popen.stdout, pkg, pkgdest, provides, requires) | ||
| 95 | except OSError as e: | ||
| 96 | bb.error("rpmdeps: '%s' command failed, '%s'" % (shlex.split(rpmdeps) + pkgfiles, e)) | ||
| 97 | raise e | ||
| 98 | |||
| 99 | return (pkg, provides, requires) | ||
diff --git a/meta/lib/oe/package_manager.py b/meta/lib/oe/package_manager.py new file mode 100644 index 0000000000..a8360fe983 --- /dev/null +++ b/meta/lib/oe/package_manager.py | |||
| @@ -0,0 +1,1721 @@ | |||
| 1 | from abc import ABCMeta, abstractmethod | ||
| 2 | import os | ||
| 3 | import glob | ||
| 4 | import subprocess | ||
| 5 | import shutil | ||
| 6 | import multiprocessing | ||
| 7 | import re | ||
| 8 | import bb | ||
| 9 | |||
| 10 | |||
| 11 | # this can be used by all PM backends to create the index files in parallel | ||
| 12 | def create_index(arg): | ||
| 13 | index_cmd = arg | ||
| 14 | |||
| 15 | try: | ||
| 16 | bb.note("Executing '%s' ..." % index_cmd) | ||
| 17 | subprocess.check_output(index_cmd, stderr=subprocess.STDOUT, shell=True) | ||
| 18 | except subprocess.CalledProcessError as e: | ||
| 19 | return("Index creation command '%s' failed with return code %d:\n%s" % | ||
| 20 | (e.cmd, e.returncode, e.output)) | ||
| 21 | |||
| 22 | return None | ||
| 23 | |||
| 24 | |||
| 25 | class Indexer(object): | ||
| 26 | __metaclass__ = ABCMeta | ||
| 27 | |||
| 28 | def __init__(self, d, deploy_dir): | ||
| 29 | self.d = d | ||
| 30 | self.deploy_dir = deploy_dir | ||
| 31 | |||
| 32 | @abstractmethod | ||
| 33 | def write_index(self): | ||
| 34 | pass | ||
| 35 | |||
| 36 | |||
| 37 | class RpmIndexer(Indexer): | ||
| 38 | def get_ml_prefix_and_os_list(self, arch_var=None, os_var=None): | ||
| 39 | package_archs = { | ||
| 40 | 'default': [], | ||
| 41 | } | ||
| 42 | |||
| 43 | target_os = { | ||
| 44 | 'default': "", | ||
| 45 | } | ||
| 46 | |||
| 47 | if arch_var is not None and os_var is not None: | ||
| 48 | package_archs['default'] = self.d.getVar(arch_var, True).split() | ||
| 49 | package_archs['default'].reverse() | ||
| 50 | target_os['default'] = self.d.getVar(os_var, True).strip() | ||
| 51 | else: | ||
| 52 | package_archs['default'] = self.d.getVar("PACKAGE_ARCHS", True).split() | ||
| 53 | # arch order is reversed. This ensures the -best- match is | ||
| 54 | # listed first! | ||
| 55 | package_archs['default'].reverse() | ||
| 56 | target_os['default'] = self.d.getVar("TARGET_OS", True).strip() | ||
| 57 | multilibs = self.d.getVar('MULTILIBS', True) or "" | ||
| 58 | for ext in multilibs.split(): | ||
| 59 | eext = ext.split(':') | ||
| 60 | if len(eext) > 1 and eext[0] == 'multilib': | ||
| 61 | localdata = bb.data.createCopy(self.d) | ||
| 62 | default_tune_key = "DEFAULTTUNE_virtclass-multilib-" + eext[1] | ||
| 63 | default_tune = localdata.getVar(default_tune_key, False) | ||
| 64 | if default_tune: | ||
| 65 | localdata.setVar("DEFAULTTUNE", default_tune) | ||
| 66 | bb.data.update_data(localdata) | ||
| 67 | package_archs[eext[1]] = localdata.getVar('PACKAGE_ARCHS', | ||
| 68 | True).split() | ||
| 69 | package_archs[eext[1]].reverse() | ||
| 70 | target_os[eext[1]] = localdata.getVar("TARGET_OS", | ||
| 71 | True).strip() | ||
| 72 | |||
| 73 | ml_prefix_list = dict() | ||
| 74 | for mlib in package_archs: | ||
| 75 | if mlib == 'default': | ||
| 76 | ml_prefix_list[mlib] = package_archs[mlib] | ||
| 77 | else: | ||
| 78 | ml_prefix_list[mlib] = list() | ||
| 79 | for arch in package_archs[mlib]: | ||
| 80 | if arch in ['all', 'noarch', 'any']: | ||
| 81 | ml_prefix_list[mlib].append(arch) | ||
| 82 | else: | ||
| 83 | ml_prefix_list[mlib].append(mlib + "_" + arch) | ||
| 84 | |||
| 85 | return (ml_prefix_list, target_os) | ||
| 86 | |||
| 87 | def write_index(self): | ||
| 88 | sdk_pkg_archs = (self.d.getVar('SDK_PACKAGE_ARCHS', True) or "").replace('-', '_').split() | ||
| 89 | all_mlb_pkg_archs = (self.d.getVar('ALL_MULTILIB_PACKAGE_ARCHS', True) or "").replace('-', '_').split() | ||
| 90 | |||
| 91 | mlb_prefix_list = self.get_ml_prefix_and_os_list()[0] | ||
| 92 | |||
| 93 | archs = set() | ||
| 94 | for item in mlb_prefix_list: | ||
| 95 | archs = archs.union(set(i.replace('-', '_') for i in mlb_prefix_list[item])) | ||
| 96 | |||
| 97 | if len(archs) == 0: | ||
| 98 | archs = archs.union(set(all_mlb_pkg_archs)) | ||
| 99 | |||
| 100 | archs = archs.union(set(sdk_pkg_archs)) | ||
| 101 | |||
| 102 | rpm_createrepo = bb.utils.which(os.getenv('PATH'), "createrepo") | ||
| 103 | index_cmds = [] | ||
| 104 | rpm_dirs_found = False | ||
| 105 | for arch in archs: | ||
| 106 | arch_dir = os.path.join(self.deploy_dir, arch) | ||
| 107 | if not os.path.isdir(arch_dir): | ||
| 108 | continue | ||
| 109 | |||
| 110 | index_cmds.append("%s --update -q %s" % (rpm_createrepo, arch_dir)) | ||
| 111 | |||
| 112 | rpm_dirs_found = True | ||
| 113 | |||
| 114 | if not rpm_dirs_found: | ||
| 115 | bb.note("There are no packages in %s" % self.deploy_dir) | ||
| 116 | return | ||
| 117 | |||
| 118 | nproc = multiprocessing.cpu_count() | ||
| 119 | pool = bb.utils.multiprocessingpool(nproc) | ||
| 120 | results = list(pool.imap(create_index, index_cmds)) | ||
| 121 | pool.close() | ||
| 122 | pool.join() | ||
| 123 | |||
| 124 | for result in results: | ||
| 125 | if result is not None: | ||
| 126 | return(result) | ||
| 127 | |||
| 128 | |||
| 129 | class OpkgIndexer(Indexer): | ||
| 130 | def write_index(self): | ||
| 131 | arch_vars = ["ALL_MULTILIB_PACKAGE_ARCHS", | ||
| 132 | "SDK_PACKAGE_ARCHS", | ||
| 133 | "MULTILIB_ARCHS"] | ||
| 134 | |||
| 135 | opkg_index_cmd = bb.utils.which(os.getenv('PATH'), "opkg-make-index") | ||
| 136 | |||
| 137 | if not os.path.exists(os.path.join(self.deploy_dir, "Packages")): | ||
| 138 | open(os.path.join(self.deploy_dir, "Packages"), "w").close() | ||
| 139 | |||
| 140 | index_cmds = [] | ||
| 141 | for arch_var in arch_vars: | ||
| 142 | archs = self.d.getVar(arch_var, True) | ||
| 143 | if archs is None: | ||
| 144 | continue | ||
| 145 | |||
| 146 | for arch in archs.split(): | ||
| 147 | pkgs_dir = os.path.join(self.deploy_dir, arch) | ||
| 148 | pkgs_file = os.path.join(pkgs_dir, "Packages") | ||
| 149 | |||
| 150 | if not os.path.isdir(pkgs_dir): | ||
| 151 | continue | ||
| 152 | |||
| 153 | if not os.path.exists(pkgs_file): | ||
| 154 | open(pkgs_file, "w").close() | ||
| 155 | |||
| 156 | index_cmds.append('%s -r %s -p %s -m %s' % | ||
| 157 | (opkg_index_cmd, pkgs_file, pkgs_file, pkgs_dir)) | ||
| 158 | |||
| 159 | if len(index_cmds) == 0: | ||
| 160 | bb.note("There are no packages in %s!" % self.deploy_dir) | ||
| 161 | return | ||
| 162 | |||
| 163 | nproc = multiprocessing.cpu_count() | ||
| 164 | pool = bb.utils.multiprocessingpool(nproc) | ||
| 165 | results = list(pool.imap(create_index, index_cmds)) | ||
| 166 | pool.close() | ||
| 167 | pool.join() | ||
| 168 | |||
| 169 | for result in results: | ||
| 170 | if result is not None: | ||
| 171 | return(result) | ||
| 172 | |||
| 173 | |||
| 174 | class DpkgIndexer(Indexer): | ||
| 175 | def write_index(self): | ||
| 176 | pkg_archs = self.d.getVar('PACKAGE_ARCHS', True) | ||
| 177 | if pkg_archs is not None: | ||
| 178 | arch_list = pkg_archs.split() | ||
| 179 | sdk_pkg_archs = self.d.getVar('SDK_PACKAGE_ARCHS', True) | ||
| 180 | if sdk_pkg_archs is not None: | ||
| 181 | for a in sdk_pkg_archs.split(): | ||
| 182 | if a not in pkg_archs: | ||
| 183 | arch_list.append(a) | ||
| 184 | |||
| 185 | apt_ftparchive = bb.utils.which(os.getenv('PATH'), "apt-ftparchive") | ||
| 186 | gzip = bb.utils.which(os.getenv('PATH'), "gzip") | ||
| 187 | |||
| 188 | index_cmds = [] | ||
| 189 | deb_dirs_found = False | ||
| 190 | for arch in arch_list: | ||
| 191 | arch_dir = os.path.join(self.deploy_dir, arch) | ||
| 192 | if not os.path.isdir(arch_dir): | ||
| 193 | continue | ||
| 194 | |||
| 195 | cmd = "cd %s; PSEUDO_UNLOAD=1 %s packages . > Packages;" % (arch_dir, apt_ftparchive) | ||
| 196 | |||
| 197 | cmd += "%s -fc Packages > Packages.gz;" % gzip | ||
| 198 | |||
| 199 | with open(os.path.join(arch_dir, "Release"), "w+") as release: | ||
| 200 | release.write("Label: %s\n" % arch) | ||
| 201 | |||
| 202 | cmd += "PSEUDO_UNLOAD=1 %s release . >> Release" % apt_ftparchive | ||
| 203 | |||
| 204 | index_cmds.append(cmd) | ||
| 205 | |||
| 206 | deb_dirs_found = True | ||
| 207 | |||
| 208 | if not deb_dirs_found: | ||
| 209 | bb.note("There are no packages in %s" % self.deploy_dir) | ||
| 210 | return | ||
| 211 | |||
| 212 | nproc = multiprocessing.cpu_count() | ||
| 213 | pool = bb.utils.multiprocessingpool(nproc) | ||
| 214 | results = list(pool.imap(create_index, index_cmds)) | ||
| 215 | pool.close() | ||
| 216 | pool.join() | ||
| 217 | |||
| 218 | for result in results: | ||
| 219 | if result is not None: | ||
| 220 | return(result) | ||
| 221 | |||
| 222 | |||
| 223 | class PkgsList(object): | ||
| 224 | __metaclass__ = ABCMeta | ||
| 225 | |||
| 226 | def __init__(self, d, rootfs_dir): | ||
| 227 | self.d = d | ||
| 228 | self.rootfs_dir = rootfs_dir | ||
| 229 | |||
| 230 | @abstractmethod | ||
| 231 | def list(self, format=None): | ||
| 232 | pass | ||
| 233 | |||
| 234 | |||
| 235 | class RpmPkgsList(PkgsList): | ||
| 236 | def __init__(self, d, rootfs_dir, arch_var=None, os_var=None): | ||
| 237 | super(RpmPkgsList, self).__init__(d, rootfs_dir) | ||
| 238 | |||
| 239 | self.rpm_cmd = bb.utils.which(os.getenv('PATH'), "rpm") | ||
| 240 | self.image_rpmlib = os.path.join(self.rootfs_dir, 'var/lib/rpm') | ||
| 241 | |||
| 242 | self.ml_prefix_list, self.ml_os_list = \ | ||
| 243 | RpmIndexer(d, rootfs_dir).get_ml_prefix_and_os_list(arch_var, os_var) | ||
| 244 | |||
| 245 | ''' | ||
| 246 | Translate the RPM/Smart format names to the OE multilib format names | ||
| 247 | ''' | ||
| 248 | def _pkg_translate_smart_to_oe(self, pkg, arch): | ||
| 249 | new_pkg = pkg | ||
| 250 | fixed_arch = arch.replace('_', '-') | ||
| 251 | found = 0 | ||
| 252 | for mlib in self.ml_prefix_list: | ||
| 253 | for cmp_arch in self.ml_prefix_list[mlib]: | ||
| 254 | fixed_cmp_arch = cmp_arch.replace('_', '-') | ||
| 255 | if fixed_arch == fixed_cmp_arch: | ||
| 256 | if mlib == 'default': | ||
| 257 | new_pkg = pkg | ||
| 258 | new_arch = cmp_arch | ||
| 259 | else: | ||
| 260 | new_pkg = mlib + '-' + pkg | ||
| 261 | # We need to strip off the ${mlib}_ prefix on the arch | ||
| 262 | new_arch = cmp_arch.replace(mlib + '_', '') | ||
| 263 | |||
| 264 | # Workaround for bug 3565. Simply look to see if we | ||
| 265 | # know of a package with that name, if not try again! | ||
| 266 | filename = os.path.join(self.d.getVar('PKGDATA_DIR', True), | ||
| 267 | 'runtime-reverse', | ||
| 268 | new_pkg) | ||
| 269 | if os.path.exists(filename): | ||
| 270 | found = 1 | ||
| 271 | break | ||
| 272 | |||
| 273 | if found == 1 and fixed_arch == fixed_cmp_arch: | ||
| 274 | break | ||
| 275 | #bb.note('%s, %s -> %s, %s' % (pkg, arch, new_pkg, new_arch)) | ||
| 276 | return new_pkg, new_arch | ||
| 277 | |||
| 278 | def _list_pkg_deps(self): | ||
| 279 | cmd = [bb.utils.which(os.getenv('PATH'), "rpmresolve"), | ||
| 280 | "-t", self.image_rpmlib] | ||
| 281 | |||
| 282 | try: | ||
| 283 | output = subprocess.check_output(cmd, stderr=subprocess.STDOUT).strip() | ||
| 284 | except subprocess.CalledProcessError as e: | ||
| 285 | bb.fatal("Cannot get the package dependencies. Command '%s' " | ||
| 286 | "returned %d:\n%s" % (' '.join(cmd), e.returncode, e.output)) | ||
| 287 | |||
| 288 | return output | ||
| 289 | |||
| 290 | def list(self, format=None): | ||
| 291 | if format == "deps": | ||
| 292 | return self._list_pkg_deps() | ||
| 293 | |||
| 294 | cmd = self.rpm_cmd + ' --root ' + self.rootfs_dir | ||
| 295 | cmd += ' -D "_dbpath /var/lib/rpm" -qa' | ||
| 296 | cmd += " --qf '[%{NAME} %{ARCH} %{VERSION} %{PACKAGEORIGIN}\n]'" | ||
| 297 | |||
| 298 | try: | ||
| 299 | # bb.note(cmd) | ||
| 300 | tmp_output = subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True).strip() | ||
| 301 | |||
| 302 | except subprocess.CalledProcessError as e: | ||
| 303 | bb.fatal("Cannot get the installed packages list. Command '%s' " | ||
| 304 | "returned %d:\n%s" % (cmd, e.returncode, e.output)) | ||
| 305 | |||
| 306 | output = list() | ||
| 307 | for line in tmp_output.split('\n'): | ||
| 308 | if len(line.strip()) == 0: | ||
| 309 | continue | ||
| 310 | pkg = line.split()[0] | ||
| 311 | arch = line.split()[1] | ||
| 312 | ver = line.split()[2] | ||
| 313 | pkgorigin = line.split()[3] | ||
| 314 | new_pkg, new_arch = self._pkg_translate_smart_to_oe(pkg, arch) | ||
| 315 | |||
| 316 | if format == "arch": | ||
| 317 | output.append('%s %s' % (new_pkg, new_arch)) | ||
| 318 | elif format == "file": | ||
| 319 | output.append('%s %s %s' % (new_pkg, pkgorigin, new_arch)) | ||
| 320 | elif format == "ver": | ||
| 321 | output.append('%s %s %s' % (new_pkg, new_arch, ver)) | ||
| 322 | else: | ||
| 323 | output.append('%s' % (new_pkg)) | ||
| 324 | |||
| 325 | output.sort() | ||
| 326 | |||
| 327 | return '\n'.join(output) | ||
| 328 | |||
| 329 | |||
| 330 | class OpkgPkgsList(PkgsList): | ||
| 331 | def __init__(self, d, rootfs_dir, config_file): | ||
| 332 | super(OpkgPkgsList, self).__init__(d, rootfs_dir) | ||
| 333 | |||
| 334 | self.opkg_cmd = bb.utils.which(os.getenv('PATH'), "opkg-cl") | ||
| 335 | self.opkg_args = "-f %s -o %s " % (config_file, rootfs_dir) | ||
| 336 | self.opkg_args += self.d.getVar("OPKG_ARGS", True) | ||
| 337 | |||
| 338 | def list(self, format=None): | ||
| 339 | opkg_query_cmd = bb.utils.which(os.getenv('PATH'), "opkg-query-helper.py") | ||
| 340 | |||
| 341 | if format == "arch": | ||
| 342 | cmd = "%s %s status | %s -a" % \ | ||
| 343 | (self.opkg_cmd, self.opkg_args, opkg_query_cmd) | ||
| 344 | elif format == "file": | ||
| 345 | cmd = "%s %s status | %s -f" % \ | ||
| 346 | (self.opkg_cmd, self.opkg_args, opkg_query_cmd) | ||
| 347 | elif format == "ver": | ||
| 348 | cmd = "%s %s status | %s -v" % \ | ||
| 349 | (self.opkg_cmd, self.opkg_args, opkg_query_cmd) | ||
| 350 | elif format == "deps": | ||
| 351 | cmd = "%s %s status | %s" % \ | ||
| 352 | (self.opkg_cmd, self.opkg_args, opkg_query_cmd) | ||
| 353 | else: | ||
| 354 | cmd = "%s %s list_installed | cut -d' ' -f1" % \ | ||
| 355 | (self.opkg_cmd, self.opkg_args) | ||
| 356 | |||
| 357 | try: | ||
| 358 | output = subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True).strip() | ||
| 359 | except subprocess.CalledProcessError as e: | ||
| 360 | bb.fatal("Cannot get the installed packages list. Command '%s' " | ||
| 361 | "returned %d:\n%s" % (cmd, e.returncode, e.output)) | ||
| 362 | |||
| 363 | if output and format == "file": | ||
| 364 | tmp_output = "" | ||
| 365 | for line in output.split('\n'): | ||
| 366 | pkg, pkg_file, pkg_arch = line.split() | ||
| 367 | full_path = os.path.join(self.rootfs_dir, pkg_arch, pkg_file) | ||
| 368 | if os.path.exists(full_path): | ||
| 369 | tmp_output += "%s %s %s\n" % (pkg, full_path, pkg_arch) | ||
| 370 | else: | ||
| 371 | tmp_output += "%s %s %s\n" % (pkg, pkg_file, pkg_arch) | ||
| 372 | |||
| 373 | output = tmp_output | ||
| 374 | |||
| 375 | return output | ||
| 376 | |||
| 377 | |||
| 378 | class DpkgPkgsList(PkgsList): | ||
| 379 | def list(self, format=None): | ||
| 380 | cmd = [bb.utils.which(os.getenv('PATH'), "dpkg-query"), | ||
| 381 | "--admindir=%s/var/lib/dpkg" % self.rootfs_dir, | ||
| 382 | "-W"] | ||
| 383 | |||
| 384 | if format == "arch": | ||
| 385 | cmd.append("-f=${Package} ${PackageArch}\n") | ||
| 386 | elif format == "file": | ||
| 387 | cmd.append("-f=${Package} ${Package}_${Version}_${Architecture}.deb ${PackageArch}\n") | ||
| 388 | elif format == "ver": | ||
| 389 | cmd.append("-f=${Package} ${PackageArch} ${Version}\n") | ||
| 390 | elif format == "deps": | ||
| 391 | cmd.append("-f=Package: ${Package}\nDepends: ${Depends}\nRecommends: ${Recommends}\n\n") | ||
| 392 | else: | ||
| 393 | cmd.append("-f=${Package}\n") | ||
| 394 | |||
| 395 | try: | ||
| 396 | output = subprocess.check_output(cmd, stderr=subprocess.STDOUT).strip() | ||
| 397 | except subprocess.CalledProcessError as e: | ||
| 398 | bb.fatal("Cannot get the installed packages list. Command '%s' " | ||
| 399 | "returned %d:\n%s" % (' '.join(cmd), e.returncode, e.output)) | ||
| 400 | |||
| 401 | if format == "file": | ||
| 402 | tmp_output = "" | ||
| 403 | for line in tuple(output.split('\n')): | ||
| 404 | pkg, pkg_file, pkg_arch = line.split() | ||
| 405 | full_path = os.path.join(self.rootfs_dir, pkg_arch, pkg_file) | ||
| 406 | if os.path.exists(full_path): | ||
| 407 | tmp_output += "%s %s %s\n" % (pkg, full_path, pkg_arch) | ||
| 408 | else: | ||
| 409 | tmp_output += "%s %s %s\n" % (pkg, pkg_file, pkg_arch) | ||
| 410 | |||
| 411 | output = tmp_output | ||
| 412 | elif format == "deps": | ||
| 413 | opkg_query_cmd = bb.utils.which(os.getenv('PATH'), "opkg-query-helper.py") | ||
| 414 | |||
| 415 | try: | ||
| 416 | output = subprocess.check_output("echo -e '%s' | %s" % | ||
| 417 | (output, opkg_query_cmd), | ||
| 418 | stderr=subprocess.STDOUT, | ||
| 419 | shell=True) | ||
| 420 | except subprocess.CalledProcessError as e: | ||
| 421 | bb.fatal("Cannot compute packages dependencies. Command '%s' " | ||
| 422 | "returned %d:\n%s" % (e.cmd, e.returncode, e.output)) | ||
| 423 | |||
| 424 | return output | ||
| 425 | |||
| 426 | |||
| 427 | class PackageManager(object): | ||
| 428 | """ | ||
| 429 | This is an abstract class. Do not instantiate this directly. | ||
| 430 | """ | ||
| 431 | __metaclass__ = ABCMeta | ||
| 432 | |||
| 433 | def __init__(self, d): | ||
| 434 | self.d = d | ||
| 435 | self.deploy_dir = None | ||
| 436 | self.deploy_lock = None | ||
| 437 | self.feed_uris = self.d.getVar('PACKAGE_FEED_URIS', True) or "" | ||
| 438 | |||
| 439 | """ | ||
| 440 | Update the package manager package database. | ||
| 441 | """ | ||
| 442 | @abstractmethod | ||
| 443 | def update(self): | ||
| 444 | pass | ||
| 445 | |||
| 446 | """ | ||
| 447 | Install a list of packages. 'pkgs' is a list object. If 'attempt_only' is | ||
| 448 | True, installation failures are ignored. | ||
| 449 | """ | ||
| 450 | @abstractmethod | ||
| 451 | def install(self, pkgs, attempt_only=False): | ||
| 452 | pass | ||
| 453 | |||
| 454 | """ | ||
| 455 | Remove a list of packages. 'pkgs' is a list object. If 'with_dependencies' | ||
| 456 | is False, the any dependencies are left in place. | ||
| 457 | """ | ||
| 458 | @abstractmethod | ||
| 459 | def remove(self, pkgs, with_dependencies=True): | ||
| 460 | pass | ||
| 461 | |||
| 462 | """ | ||
| 463 | This function creates the index files | ||
| 464 | """ | ||
| 465 | @abstractmethod | ||
| 466 | def write_index(self): | ||
| 467 | pass | ||
| 468 | |||
| 469 | @abstractmethod | ||
| 470 | def remove_packaging_data(self): | ||
| 471 | pass | ||
| 472 | |||
| 473 | @abstractmethod | ||
| 474 | def list_installed(self, format=None): | ||
| 475 | pass | ||
| 476 | |||
| 477 | @abstractmethod | ||
| 478 | def insert_feeds_uris(self): | ||
| 479 | pass | ||
| 480 | |||
| 481 | """ | ||
| 482 | Install complementary packages based upon the list of currently installed | ||
| 483 | packages e.g. locales, *-dev, *-dbg, etc. This will only attempt to install | ||
| 484 | these packages, if they don't exist then no error will occur. Note: every | ||
| 485 | backend needs to call this function explicitly after the normal package | ||
| 486 | installation | ||
| 487 | """ | ||
| 488 | def install_complementary(self, globs=None): | ||
| 489 | # we need to write the list of installed packages to a file because the | ||
| 490 | # oe-pkgdata-util reads it from a file | ||
| 491 | installed_pkgs_file = os.path.join(self.d.getVar('WORKDIR', True), | ||
| 492 | "installed_pkgs.txt") | ||
| 493 | with open(installed_pkgs_file, "w+") as installed_pkgs: | ||
| 494 | installed_pkgs.write(self.list_installed("arch")) | ||
| 495 | |||
| 496 | if globs is None: | ||
| 497 | globs = self.d.getVar('IMAGE_INSTALL_COMPLEMENTARY', True) | ||
| 498 | split_linguas = set() | ||
| 499 | |||
| 500 | for translation in self.d.getVar('IMAGE_LINGUAS', True).split(): | ||
| 501 | split_linguas.add(translation) | ||
| 502 | split_linguas.add(translation.split('-')[0]) | ||
| 503 | |||
| 504 | split_linguas = sorted(split_linguas) | ||
| 505 | |||
| 506 | for lang in split_linguas: | ||
| 507 | globs += " *-locale-%s" % lang | ||
| 508 | |||
| 509 | if globs is None: | ||
| 510 | return | ||
| 511 | |||
| 512 | cmd = [bb.utils.which(os.getenv('PATH'), "oe-pkgdata-util"), | ||
| 513 | "glob", self.d.getVar('PKGDATA_DIR', True), installed_pkgs_file, | ||
| 514 | globs] | ||
| 515 | try: | ||
| 516 | bb.note("Installing complementary packages ...") | ||
| 517 | complementary_pkgs = subprocess.check_output(cmd, stderr=subprocess.STDOUT) | ||
| 518 | except subprocess.CalledProcessError as e: | ||
| 519 | bb.fatal("Could not compute complementary packages list. Command " | ||
| 520 | "'%s' returned %d:\n%s" % | ||
| 521 | (' '.join(cmd), e.returncode, e.output)) | ||
| 522 | |||
| 523 | self.install(complementary_pkgs.split(), attempt_only=True) | ||
| 524 | |||
| 525 | def deploy_dir_lock(self): | ||
| 526 | if self.deploy_dir is None: | ||
| 527 | raise RuntimeError("deploy_dir is not set!") | ||
| 528 | |||
| 529 | lock_file_name = os.path.join(self.deploy_dir, "deploy.lock") | ||
| 530 | |||
| 531 | self.deploy_lock = bb.utils.lockfile(lock_file_name) | ||
| 532 | |||
| 533 | def deploy_dir_unlock(self): | ||
| 534 | if self.deploy_lock is None: | ||
| 535 | return | ||
| 536 | |||
| 537 | bb.utils.unlockfile(self.deploy_lock) | ||
| 538 | |||
| 539 | self.deploy_lock = None | ||
| 540 | |||
| 541 | |||
| 542 | class RpmPM(PackageManager): | ||
| 543 | def __init__(self, | ||
| 544 | d, | ||
| 545 | target_rootfs, | ||
| 546 | target_vendor, | ||
| 547 | task_name='target', | ||
| 548 | providename=None, | ||
| 549 | arch_var=None, | ||
| 550 | os_var=None): | ||
| 551 | super(RpmPM, self).__init__(d) | ||
| 552 | self.target_rootfs = target_rootfs | ||
| 553 | self.target_vendor = target_vendor | ||
| 554 | self.task_name = task_name | ||
| 555 | self.providename = providename | ||
| 556 | self.fullpkglist = list() | ||
| 557 | self.deploy_dir = self.d.getVar('DEPLOY_DIR_RPM', True) | ||
| 558 | self.etcrpm_dir = os.path.join(self.target_rootfs, "etc/rpm") | ||
| 559 | self.install_dir = os.path.join(self.target_rootfs, "install") | ||
| 560 | self.rpm_cmd = bb.utils.which(os.getenv('PATH'), "rpm") | ||
| 561 | self.smart_cmd = bb.utils.which(os.getenv('PATH'), "smart") | ||
| 562 | self.smart_opt = "--data-dir=" + os.path.join(target_rootfs, | ||
| 563 | 'var/lib/smart') | ||
| 564 | self.scriptlet_wrapper = self.d.expand('${WORKDIR}/scriptlet_wrapper') | ||
| 565 | self.solution_manifest = self.d.expand('${T}/saved/%s_solution' % | ||
| 566 | self.task_name) | ||
| 567 | self.saved_rpmlib = self.d.expand('${T}/saved/%s' % self.task_name) | ||
| 568 | self.image_rpmlib = os.path.join(self.target_rootfs, 'var/lib/rpm') | ||
| 569 | |||
| 570 | if not os.path.exists(self.d.expand('${T}/saved')): | ||
| 571 | bb.utils.mkdirhier(self.d.expand('${T}/saved')) | ||
| 572 | |||
| 573 | self.indexer = RpmIndexer(self.d, self.deploy_dir) | ||
| 574 | self.pkgs_list = RpmPkgsList(self.d, self.target_rootfs, arch_var, os_var) | ||
| 575 | |||
| 576 | self.ml_prefix_list, self.ml_os_list = self.indexer.get_ml_prefix_and_os_list(arch_var, os_var) | ||
| 577 | |||
| 578 | def insert_feeds_uris(self): | ||
| 579 | if self.feed_uris == "": | ||
| 580 | return | ||
| 581 | |||
| 582 | # List must be prefered to least preferred order | ||
| 583 | default_platform_extra = set() | ||
| 584 | platform_extra = set() | ||
| 585 | bbextendvariant = self.d.getVar('BBEXTENDVARIANT', True) or "" | ||
| 586 | for mlib in self.ml_os_list: | ||
| 587 | for arch in self.ml_prefix_list[mlib]: | ||
| 588 | plt = arch.replace('-', '_') + '-.*-' + self.ml_os_list[mlib] | ||
| 589 | if mlib == bbextendvariant: | ||
| 590 | default_platform_extra.add(plt) | ||
| 591 | else: | ||
| 592 | platform_extra.add(plt) | ||
| 593 | |||
| 594 | platform_extra = platform_extra.union(default_platform_extra) | ||
| 595 | |||
| 596 | arch_list = [] | ||
| 597 | for canonical_arch in platform_extra: | ||
| 598 | arch = canonical_arch.split('-')[0] | ||
| 599 | if not os.path.exists(os.path.join(self.deploy_dir, arch)): | ||
| 600 | continue | ||
| 601 | arch_list.append(arch) | ||
| 602 | |||
| 603 | uri_iterator = 0 | ||
| 604 | channel_priority = 10 + 5 * len(self.feed_uris.split()) * len(arch_list) | ||
| 605 | |||
| 606 | for uri in self.feed_uris.split(): | ||
| 607 | for arch in arch_list: | ||
| 608 | bb.note('Note: adding Smart channel url%d%s (%s)' % | ||
| 609 | (uri_iterator, arch, channel_priority)) | ||
| 610 | self._invoke_smart('channel --add url%d-%s type=rpm-md baseurl=%s/rpm/%s -y' | ||
| 611 | % (uri_iterator, arch, uri, arch)) | ||
| 612 | self._invoke_smart('channel --set url%d-%s priority=%d' % | ||
| 613 | (uri_iterator, arch, channel_priority)) | ||
| 614 | channel_priority -= 5 | ||
| 615 | uri_iterator += 1 | ||
| 616 | |||
| 617 | ''' | ||
| 618 | Create configs for rpm and smart, and multilib is supported | ||
| 619 | ''' | ||
| 620 | def create_configs(self): | ||
| 621 | target_arch = self.d.getVar('TARGET_ARCH', True) | ||
| 622 | platform = '%s%s-%s' % (target_arch.replace('-', '_'), | ||
| 623 | self.target_vendor, | ||
| 624 | self.ml_os_list['default']) | ||
| 625 | |||
| 626 | # List must be prefered to least preferred order | ||
| 627 | default_platform_extra = list() | ||
| 628 | platform_extra = list() | ||
| 629 | bbextendvariant = self.d.getVar('BBEXTENDVARIANT', True) or "" | ||
| 630 | for mlib in self.ml_os_list: | ||
| 631 | for arch in self.ml_prefix_list[mlib]: | ||
| 632 | plt = arch.replace('-', '_') + '-.*-' + self.ml_os_list[mlib] | ||
| 633 | if mlib == bbextendvariant: | ||
| 634 | if plt not in default_platform_extra: | ||
| 635 | default_platform_extra.append(plt) | ||
| 636 | else: | ||
| 637 | if plt not in platform_extra: | ||
| 638 | platform_extra.append(plt) | ||
| 639 | platform_extra = default_platform_extra + platform_extra | ||
| 640 | |||
| 641 | self._create_configs(platform, platform_extra) | ||
| 642 | |||
| 643 | def _invoke_smart(self, args): | ||
| 644 | cmd = "%s %s %s" % (self.smart_cmd, self.smart_opt, args) | ||
| 645 | # bb.note(cmd) | ||
| 646 | try: | ||
| 647 | complementary_pkgs = subprocess.check_output(cmd, | ||
| 648 | stderr=subprocess.STDOUT, | ||
| 649 | shell=True) | ||
| 650 | # bb.note(complementary_pkgs) | ||
| 651 | return complementary_pkgs | ||
| 652 | except subprocess.CalledProcessError as e: | ||
| 653 | bb.fatal("Could not invoke smart. Command " | ||
| 654 | "'%s' returned %d:\n%s" % (cmd, e.returncode, e.output)) | ||
| 655 | |||
| 656 | def _search_pkg_name_in_feeds(self, pkg, feed_archs): | ||
| 657 | for arch in feed_archs: | ||
| 658 | arch = arch.replace('-', '_') | ||
| 659 | for p in self.fullpkglist: | ||
| 660 | regex_match = r"^%s-[^-]*-[^-]*@%s$" % \ | ||
| 661 | (re.escape(pkg), re.escape(arch)) | ||
| 662 | if re.match(regex_match, p) is not None: | ||
| 663 | # First found is best match | ||
| 664 | # bb.note('%s -> %s' % (pkg, pkg + '@' + arch)) | ||
| 665 | return pkg + '@' + arch | ||
| 666 | |||
| 667 | return "" | ||
| 668 | |||
| 669 | ''' | ||
| 670 | Translate the OE multilib format names to the RPM/Smart format names | ||
| 671 | It searched the RPM/Smart format names in probable multilib feeds first, | ||
| 672 | and then searched the default base feed. | ||
| 673 | ''' | ||
| 674 | def _pkg_translate_oe_to_smart(self, pkgs, attempt_only=False): | ||
| 675 | new_pkgs = list() | ||
| 676 | |||
| 677 | for pkg in pkgs: | ||
| 678 | new_pkg = pkg | ||
| 679 | # Search new_pkg in probable multilibs first | ||
| 680 | for mlib in self.ml_prefix_list: | ||
| 681 | # Jump the default archs | ||
| 682 | if mlib == 'default': | ||
| 683 | continue | ||
| 684 | |||
| 685 | subst = pkg.replace(mlib + '-', '') | ||
| 686 | # if the pkg in this multilib feed | ||
| 687 | if subst != pkg: | ||
| 688 | feed_archs = self.ml_prefix_list[mlib] | ||
| 689 | new_pkg = self._search_pkg_name_in_feeds(subst, feed_archs) | ||
| 690 | if not new_pkg: | ||
| 691 | # Failed to translate, package not found! | ||
| 692 | err_msg = '%s not found in the %s feeds (%s).\n' % \ | ||
| 693 | (pkg, mlib, " ".join(feed_archs)) | ||
| 694 | if not attempt_only: | ||
| 695 | err_msg += " ".join(self.fullpkglist) | ||
| 696 | bb.fatal(err_msg) | ||
| 697 | bb.warn(err_msg) | ||
| 698 | else: | ||
| 699 | new_pkgs.append(new_pkg) | ||
| 700 | |||
| 701 | break | ||
| 702 | |||
| 703 | # Apparently not a multilib package... | ||
| 704 | if pkg == new_pkg: | ||
| 705 | # Search new_pkg in default archs | ||
| 706 | default_archs = self.ml_prefix_list['default'] | ||
| 707 | new_pkg = self._search_pkg_name_in_feeds(pkg, default_archs) | ||
| 708 | if not new_pkg: | ||
| 709 | err_msg = '%s not found in the base feeds (%s).\n' % \ | ||
| 710 | (pkg, ' '.join(default_archs)) | ||
| 711 | if not attempt_only: | ||
| 712 | err_msg += " ".join(self.fullpkglist) | ||
| 713 | bb.fatal(err_msg) | ||
| 714 | bb.warn(err_msg) | ||
| 715 | else: | ||
| 716 | new_pkgs.append(new_pkg) | ||
| 717 | |||
| 718 | return new_pkgs | ||
| 719 | |||
| 720 | def _create_configs(self, platform, platform_extra): | ||
| 721 | # Setup base system configuration | ||
| 722 | bb.note("configuring RPM platform settings") | ||
| 723 | |||
| 724 | # Configure internal RPM environment when using Smart | ||
| 725 | os.environ['RPM_ETCRPM'] = self.etcrpm_dir | ||
| 726 | bb.utils.mkdirhier(self.etcrpm_dir) | ||
| 727 | |||
| 728 | # Setup temporary directory -- install... | ||
| 729 | if os.path.exists(self.install_dir): | ||
| 730 | bb.utils.remove(self.install_dir, True) | ||
| 731 | bb.utils.mkdirhier(os.path.join(self.install_dir, 'tmp')) | ||
| 732 | |||
| 733 | channel_priority = 5 | ||
| 734 | platform_dir = os.path.join(self.etcrpm_dir, "platform") | ||
| 735 | with open(platform_dir, "w+") as platform_fd: | ||
| 736 | platform_fd.write(platform + '\n') | ||
| 737 | for pt in platform_extra: | ||
| 738 | channel_priority += 5 | ||
| 739 | platform_fd.write(re.sub("-linux.*$", "-linux.*\n", pt)) | ||
| 740 | |||
| 741 | # Tell RPM that the "/" directory exist and is available | ||
| 742 | bb.note("configuring RPM system provides") | ||
| 743 | sysinfo_dir = os.path.join(self.etcrpm_dir, "sysinfo") | ||
| 744 | bb.utils.mkdirhier(sysinfo_dir) | ||
| 745 | with open(os.path.join(sysinfo_dir, "Dirnames"), "w+") as dirnames: | ||
| 746 | dirnames.write("/\n") | ||
| 747 | |||
| 748 | if self.providename: | ||
| 749 | providename_dir = os.path.join(sysinfo_dir, "Providename") | ||
| 750 | if not os.path.exists(providename_dir): | ||
| 751 | providename_content = '\n'.join(self.providename) | ||
| 752 | providename_content += '\n' | ||
| 753 | open(providename_dir, "w+").write(providename_content) | ||
| 754 | |||
| 755 | # Configure RPM... we enforce these settings! | ||
| 756 | bb.note("configuring RPM DB settings") | ||
| 757 | # After change the __db.* cache size, log file will not be | ||
| 758 | # generated automatically, that will raise some warnings, | ||
| 759 | # so touch a bare log for rpm write into it. | ||
| 760 | rpmlib_log = os.path.join(self.image_rpmlib, 'log', 'log.0000000001') | ||
| 761 | if not os.path.exists(rpmlib_log): | ||
| 762 | bb.utils.mkdirhier(os.path.join(self.image_rpmlib, 'log')) | ||
| 763 | open(rpmlib_log, 'w+').close() | ||
| 764 | |||
| 765 | DB_CONFIG_CONTENT = "# ================ Environment\n" \ | ||
| 766 | "set_data_dir .\n" \ | ||
| 767 | "set_create_dir .\n" \ | ||
| 768 | "set_lg_dir ./log\n" \ | ||
| 769 | "set_tmp_dir ./tmp\n" \ | ||
| 770 | "set_flags db_log_autoremove on\n" \ | ||
| 771 | "\n" \ | ||
| 772 | "# -- thread_count must be >= 8\n" \ | ||
| 773 | "set_thread_count 64\n" \ | ||
| 774 | "\n" \ | ||
| 775 | "# ================ Logging\n" \ | ||
| 776 | "\n" \ | ||
| 777 | "# ================ Memory Pool\n" \ | ||
| 778 | "set_cachesize 0 1048576 0\n" \ | ||
| 779 | "set_mp_mmapsize 268435456\n" \ | ||
| 780 | "\n" \ | ||
| 781 | "# ================ Locking\n" \ | ||
| 782 | "set_lk_max_locks 16384\n" \ | ||
| 783 | "set_lk_max_lockers 16384\n" \ | ||
| 784 | "set_lk_max_objects 16384\n" \ | ||
| 785 | "mutex_set_max 163840\n" \ | ||
| 786 | "\n" \ | ||
| 787 | "# ================ Replication\n" | ||
| 788 | |||
| 789 | db_config_dir = os.path.join(self.image_rpmlib, 'DB_CONFIG') | ||
| 790 | if not os.path.exists(db_config_dir): | ||
| 791 | open(db_config_dir, 'w+').write(DB_CONFIG_CONTENT) | ||
| 792 | |||
| 793 | # Create database so that smart doesn't complain (lazy init) | ||
| 794 | cmd = "%s --root %s --dbpath /var/lib/rpm -qa > /dev/null" % ( | ||
| 795 | self.rpm_cmd, | ||
| 796 | self.target_rootfs) | ||
| 797 | try: | ||
| 798 | subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True) | ||
| 799 | except subprocess.CalledProcessError as e: | ||
| 800 | bb.fatal("Create rpm database failed. Command '%s' " | ||
| 801 | "returned %d:\n%s" % (cmd, e.returncode, e.output)) | ||
| 802 | |||
| 803 | # Configure smart | ||
| 804 | bb.note("configuring Smart settings") | ||
| 805 | bb.utils.remove(os.path.join(self.target_rootfs, 'var/lib/smart'), | ||
| 806 | True) | ||
| 807 | self._invoke_smart('config --set rpm-root=%s' % self.target_rootfs) | ||
| 808 | self._invoke_smart('config --set rpm-dbpath=/var/lib/rpm') | ||
| 809 | self._invoke_smart('config --set rpm-extra-macros._var=%s' % | ||
| 810 | self.d.getVar('localstatedir', True)) | ||
| 811 | cmd = 'config --set rpm-extra-macros._tmppath=/install/tmp' | ||
| 812 | self._invoke_smart(cmd) | ||
| 813 | |||
| 814 | # Write common configuration for host and target usage | ||
| 815 | self._invoke_smart('config --set rpm-nolinktos=1') | ||
| 816 | self._invoke_smart('config --set rpm-noparentdirs=1') | ||
| 817 | for i in self.d.getVar('BAD_RECOMMENDATIONS', True).split(): | ||
| 818 | self._invoke_smart('flag --set ignore-recommends %s' % i) | ||
| 819 | |||
| 820 | # Do the following configurations here, to avoid them being | ||
| 821 | # saved for field upgrade | ||
| 822 | if self.d.getVar('NO_RECOMMENDATIONS', True).strip() == "1": | ||
| 823 | self._invoke_smart('config --set ignore-all-recommends=1') | ||
| 824 | pkg_exclude = self.d.getVar('PACKAGE_EXCLUDE', True) or "" | ||
| 825 | for i in pkg_exclude.split(): | ||
| 826 | self._invoke_smart('flag --set exclude-packages %s' % i) | ||
| 827 | |||
| 828 | # Optional debugging | ||
| 829 | # self._invoke_smart('config --set rpm-log-level=debug') | ||
| 830 | # cmd = 'config --set rpm-log-file=/tmp/smart-debug-logfile' | ||
| 831 | # self._invoke_smart(cmd) | ||
| 832 | ch_already_added = [] | ||
| 833 | for canonical_arch in platform_extra: | ||
| 834 | arch = canonical_arch.split('-')[0] | ||
| 835 | arch_channel = os.path.join(self.deploy_dir, arch) | ||
| 836 | if os.path.exists(arch_channel) and not arch in ch_already_added: | ||
| 837 | bb.note('Note: adding Smart channel %s (%s)' % | ||
| 838 | (arch, channel_priority)) | ||
| 839 | self._invoke_smart('channel --add %s type=rpm-md baseurl=%s -y' | ||
| 840 | % (arch, arch_channel)) | ||
| 841 | self._invoke_smart('channel --set %s priority=%d' % | ||
| 842 | (arch, channel_priority)) | ||
| 843 | channel_priority -= 5 | ||
| 844 | |||
| 845 | ch_already_added.append(arch) | ||
| 846 | |||
| 847 | bb.note('adding Smart RPM DB channel') | ||
| 848 | self._invoke_smart('channel --add rpmsys type=rpm-sys -y') | ||
| 849 | |||
| 850 | # Construct install scriptlet wrapper. | ||
| 851 | # Scripts need to be ordered when executed, this ensures numeric order. | ||
| 852 | # If we ever run into needing more the 899 scripts, we'll have to. | ||
| 853 | # change num to start with 1000. | ||
| 854 | # | ||
| 855 | SCRIPTLET_FORMAT = "#!/bin/bash\n" \ | ||
| 856 | "\n" \ | ||
| 857 | "export PATH=%s\n" \ | ||
| 858 | "export D=%s\n" \ | ||
| 859 | 'export OFFLINE_ROOT="$D"\n' \ | ||
| 860 | 'export IPKG_OFFLINE_ROOT="$D"\n' \ | ||
| 861 | 'export OPKG_OFFLINE_ROOT="$D"\n' \ | ||
| 862 | "export INTERCEPT_DIR=%s\n" \ | ||
| 863 | "export NATIVE_ROOT=%s\n" \ | ||
| 864 | "\n" \ | ||
| 865 | "$2 $1/$3 $4\n" \ | ||
| 866 | "if [ $? -ne 0 ]; then\n" \ | ||
| 867 | " if [ $4 -eq 1 ]; then\n" \ | ||
| 868 | " mkdir -p $1/etc/rpm-postinsts\n" \ | ||
| 869 | " num=100\n" \ | ||
| 870 | " while [ -e $1/etc/rpm-postinsts/${num}-* ]; do num=$((num + 1)); done\n" \ | ||
| 871 | " name=`head -1 $1/$3 | cut -d\' \' -f 2`\n" \ | ||
| 872 | ' echo "#!$2" > $1/etc/rpm-postinsts/${num}-${name}\n' \ | ||
| 873 | ' echo "# Arg: $4" >> $1/etc/rpm-postinsts/${num}-${name}\n' \ | ||
| 874 | " cat $1/$3 >> $1/etc/rpm-postinsts/${num}-${name}\n" \ | ||
| 875 | " chmod +x $1/etc/rpm-postinsts/${num}-${name}\n" \ | ||
| 876 | " else\n" \ | ||
| 877 | ' echo "Error: pre/post remove scriptlet failed"\n' \ | ||
| 878 | " fi\n" \ | ||
| 879 | "fi\n" | ||
| 880 | |||
| 881 | intercept_dir = self.d.expand('${WORKDIR}/intercept_scripts') | ||
| 882 | native_root = self.d.getVar('STAGING_DIR_NATIVE', True) | ||
| 883 | scriptlet_content = SCRIPTLET_FORMAT % (os.environ['PATH'], | ||
| 884 | self.target_rootfs, | ||
| 885 | intercept_dir, | ||
| 886 | native_root) | ||
| 887 | open(self.scriptlet_wrapper, 'w+').write(scriptlet_content) | ||
| 888 | |||
| 889 | bb.note("Note: configuring RPM cross-install scriptlet_wrapper") | ||
| 890 | os.chmod(self.scriptlet_wrapper, 0755) | ||
| 891 | cmd = 'config --set rpm-extra-macros._cross_scriptlet_wrapper=%s' % \ | ||
| 892 | self.scriptlet_wrapper | ||
| 893 | self._invoke_smart(cmd) | ||
| 894 | |||
| 895 | # Debug to show smart config info | ||
| 896 | # bb.note(self._invoke_smart('config --show')) | ||
| 897 | |||
| 898 | def update(self): | ||
| 899 | self._invoke_smart('update rpmsys') | ||
| 900 | |||
| 901 | ''' | ||
| 902 | Install pkgs with smart, the pkg name is oe format | ||
| 903 | ''' | ||
| 904 | def install(self, pkgs, attempt_only=False): | ||
| 905 | |||
| 906 | bb.note("Installing the following packages: %s" % ' '.join(pkgs)) | ||
| 907 | if attempt_only and len(pkgs) == 0: | ||
| 908 | return | ||
| 909 | pkgs = self._pkg_translate_oe_to_smart(pkgs, attempt_only) | ||
| 910 | |||
| 911 | if not attempt_only: | ||
| 912 | bb.note('to be installed: %s' % ' '.join(pkgs)) | ||
| 913 | cmd = "%s %s install -y %s" % \ | ||
| 914 | (self.smart_cmd, self.smart_opt, ' '.join(pkgs)) | ||
| 915 | bb.note(cmd) | ||
| 916 | else: | ||
| 917 | bb.note('installing attempt only packages...') | ||
| 918 | bb.note('Attempting %s' % ' '.join(pkgs)) | ||
| 919 | cmd = "%s %s install --attempt -y %s" % \ | ||
| 920 | (self.smart_cmd, self.smart_opt, ' '.join(pkgs)) | ||
| 921 | try: | ||
| 922 | output = subprocess.check_output(cmd.split(), stderr=subprocess.STDOUT) | ||
| 923 | bb.note(output) | ||
| 924 | except subprocess.CalledProcessError as e: | ||
| 925 | bb.fatal("Unable to install packages. Command '%s' " | ||
| 926 | "returned %d:\n%s" % (cmd, e.returncode, e.output)) | ||
| 927 | |||
| 928 | ''' | ||
| 929 | Remove pkgs with smart, the pkg name is smart/rpm format | ||
| 930 | ''' | ||
| 931 | def remove(self, pkgs, with_dependencies=True): | ||
| 932 | bb.note('to be removed: ' + ' '.join(pkgs)) | ||
| 933 | |||
| 934 | if not with_dependencies: | ||
| 935 | cmd = "%s -e --nodeps " % self.rpm_cmd | ||
| 936 | cmd += "--root=%s " % self.target_rootfs | ||
| 937 | cmd += "--dbpath=/var/lib/rpm " | ||
| 938 | cmd += "--define='_cross_scriptlet_wrapper %s' " % \ | ||
| 939 | self.scriptlet_wrapper | ||
| 940 | cmd += "--define='_tmppath /install/tmp' %s" % ' '.join(pkgs) | ||
| 941 | else: | ||
| 942 | # for pkg in pkgs: | ||
| 943 | # bb.note('Debug: What required: %s' % pkg) | ||
| 944 | # bb.note(self._invoke_smart('query %s --show-requiredby' % pkg)) | ||
| 945 | |||
| 946 | cmd = "%s %s remove -y %s" % (self.smart_cmd, | ||
| 947 | self.smart_opt, | ||
| 948 | ' '.join(pkgs)) | ||
| 949 | |||
| 950 | try: | ||
| 951 | bb.note(cmd) | ||
| 952 | output = subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True) | ||
| 953 | bb.note(output) | ||
| 954 | except subprocess.CalledProcessError as e: | ||
| 955 | bb.note("Unable to remove packages. Command '%s' " | ||
| 956 | "returned %d:\n%s" % (cmd, e.returncode, e.output)) | ||
| 957 | |||
| 958 | def upgrade(self): | ||
| 959 | bb.note('smart upgrade') | ||
| 960 | self._invoke_smart('upgrade') | ||
| 961 | |||
| 962 | def write_index(self): | ||
| 963 | result = self.indexer.write_index() | ||
| 964 | |||
| 965 | if result is not None: | ||
| 966 | bb.fatal(result) | ||
| 967 | |||
| 968 | def remove_packaging_data(self): | ||
| 969 | bb.utils.remove(self.image_rpmlib, True) | ||
| 970 | bb.utils.remove(os.path.join(self.target_rootfs, 'var/lib/smart'), | ||
| 971 | True) | ||
| 972 | bb.utils.remove(os.path.join(self.target_rootfs, 'var/lib/opkg'), True) | ||
| 973 | |||
| 974 | # remove temp directory | ||
| 975 | bb.utils.remove(self.d.expand('${IMAGE_ROOTFS}/install'), True) | ||
| 976 | |||
| 977 | def backup_packaging_data(self): | ||
| 978 | # Save the rpmlib for increment rpm image generation | ||
| 979 | if os.path.exists(self.saved_rpmlib): | ||
| 980 | bb.utils.remove(self.saved_rpmlib, True) | ||
| 981 | shutil.copytree(self.image_rpmlib, | ||
| 982 | self.saved_rpmlib, | ||
| 983 | symlinks=True) | ||
| 984 | |||
| 985 | def recovery_packaging_data(self): | ||
| 986 | # Move the rpmlib back | ||
| 987 | if os.path.exists(self.saved_rpmlib): | ||
| 988 | if os.path.exists(self.image_rpmlib): | ||
| 989 | bb.utils.remove(self.image_rpmlib, True) | ||
| 990 | |||
| 991 | bb.note('Recovery packaging data') | ||
| 992 | shutil.copytree(self.saved_rpmlib, | ||
| 993 | self.image_rpmlib, | ||
| 994 | symlinks=True) | ||
| 995 | |||
| 996 | def list_installed(self, format=None): | ||
| 997 | return self.pkgs_list.list(format) | ||
| 998 | |||
| 999 | ''' | ||
| 1000 | If incremental install, we need to determine what we've got, | ||
| 1001 | what we need to add, and what to remove... | ||
| 1002 | The dump_install_solution will dump and save the new install | ||
| 1003 | solution. | ||
| 1004 | ''' | ||
| 1005 | def dump_install_solution(self, pkgs): | ||
| 1006 | bb.note('creating new install solution for incremental install') | ||
| 1007 | if len(pkgs) == 0: | ||
| 1008 | return | ||
| 1009 | |||
| 1010 | pkgs = self._pkg_translate_oe_to_smart(pkgs, False) | ||
| 1011 | install_pkgs = list() | ||
| 1012 | |||
| 1013 | cmd = "%s %s install -y --dump %s 2>%s" % \ | ||
| 1014 | (self.smart_cmd, | ||
| 1015 | self.smart_opt, | ||
| 1016 | ' '.join(pkgs), | ||
| 1017 | self.solution_manifest) | ||
| 1018 | try: | ||
| 1019 | # Disable rpmsys channel for the fake install | ||
| 1020 | self._invoke_smart('channel --disable rpmsys') | ||
| 1021 | |||
| 1022 | subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True) | ||
| 1023 | with open(self.solution_manifest, 'r') as manifest: | ||
| 1024 | for pkg in manifest.read().split('\n'): | ||
| 1025 | if '@' in pkg: | ||
| 1026 | install_pkgs.append(pkg) | ||
| 1027 | except subprocess.CalledProcessError as e: | ||
| 1028 | bb.note("Unable to dump install packages. Command '%s' " | ||
| 1029 | "returned %d:\n%s" % (cmd, e.returncode, e.output)) | ||
| 1030 | # Recovery rpmsys channel | ||
| 1031 | self._invoke_smart('channel --enable rpmsys') | ||
| 1032 | return install_pkgs | ||
| 1033 | |||
| 1034 | ''' | ||
| 1035 | If incremental install, we need to determine what we've got, | ||
| 1036 | what we need to add, and what to remove... | ||
| 1037 | The load_old_install_solution will load the previous install | ||
| 1038 | solution | ||
| 1039 | ''' | ||
| 1040 | def load_old_install_solution(self): | ||
| 1041 | bb.note('load old install solution for incremental install') | ||
| 1042 | installed_pkgs = list() | ||
| 1043 | if not os.path.exists(self.solution_manifest): | ||
| 1044 | bb.note('old install solution not exist') | ||
| 1045 | return installed_pkgs | ||
| 1046 | |||
| 1047 | with open(self.solution_manifest, 'r') as manifest: | ||
| 1048 | for pkg in manifest.read().split('\n'): | ||
| 1049 | if '@' in pkg: | ||
| 1050 | installed_pkgs.append(pkg.strip()) | ||
| 1051 | |||
| 1052 | return installed_pkgs | ||
| 1053 | |||
| 1054 | ''' | ||
| 1055 | Dump all available packages in feeds, it should be invoked after the | ||
| 1056 | newest rpm index was created | ||
| 1057 | ''' | ||
| 1058 | def dump_all_available_pkgs(self): | ||
| 1059 | available_manifest = self.d.expand('${T}/saved/available_pkgs.txt') | ||
| 1060 | available_pkgs = list() | ||
| 1061 | cmd = "%s %s query --output %s" % \ | ||
| 1062 | (self.smart_cmd, self.smart_opt, available_manifest) | ||
| 1063 | try: | ||
| 1064 | subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True) | ||
| 1065 | with open(available_manifest, 'r') as manifest: | ||
| 1066 | for pkg in manifest.read().split('\n'): | ||
| 1067 | if '@' in pkg: | ||
| 1068 | available_pkgs.append(pkg.strip()) | ||
| 1069 | except subprocess.CalledProcessError as e: | ||
| 1070 | bb.note("Unable to list all available packages. Command '%s' " | ||
| 1071 | "returned %d:\n%s" % (cmd, e.returncode, e.output)) | ||
| 1072 | |||
| 1073 | self.fullpkglist = available_pkgs | ||
| 1074 | |||
| 1075 | return | ||
| 1076 | |||
| 1077 | def save_rpmpostinst(self, pkg): | ||
| 1078 | mlibs = (self.d.getVar('MULTILIB_GLOBAL_VARIANTS') or "").split() | ||
| 1079 | |||
| 1080 | new_pkg = pkg | ||
| 1081 | # Remove any multilib prefix from the package name | ||
| 1082 | for mlib in mlibs: | ||
| 1083 | if mlib in pkg: | ||
| 1084 | new_pkg = pkg.replace(mlib + '-', '') | ||
| 1085 | break | ||
| 1086 | |||
| 1087 | bb.note(' * postponing %s' % new_pkg) | ||
| 1088 | saved_dir = self.target_rootfs + self.d.expand('${sysconfdir}/rpm-postinsts/') + new_pkg | ||
| 1089 | |||
| 1090 | cmd = self.rpm_cmd + ' -q --scripts --root ' + self.target_rootfs | ||
| 1091 | cmd += ' --dbpath=/var/lib/rpm ' + new_pkg | ||
| 1092 | cmd += ' | sed -n -e "/^postinstall scriptlet (using .*):$/,/^.* scriptlet (using .*):$/ {/.*/p}"' | ||
| 1093 | cmd += ' | sed -e "/postinstall scriptlet (using \(.*\)):$/d"' | ||
| 1094 | cmd += ' -e "/^.* scriptlet (using .*):$/d" > %s' % saved_dir | ||
| 1095 | |||
| 1096 | try: | ||
| 1097 | bb.note(cmd) | ||
| 1098 | output = subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True).strip() | ||
| 1099 | bb.note(output) | ||
| 1100 | os.chmod(saved_dir, 0755) | ||
| 1101 | except subprocess.CalledProcessError as e: | ||
| 1102 | bb.fatal("Invoke save_rpmpostinst failed. Command '%s' " | ||
| 1103 | "returned %d:\n%s" % (cmd, e.returncode, e.output)) | ||
| 1104 | |||
| 1105 | '''Write common configuration for target usage''' | ||
| 1106 | def rpm_setup_smart_target_config(self): | ||
| 1107 | bb.utils.remove(os.path.join(self.target_rootfs, 'var/lib/smart'), | ||
| 1108 | True) | ||
| 1109 | |||
| 1110 | self._invoke_smart('config --set rpm-nolinktos=1') | ||
| 1111 | self._invoke_smart('config --set rpm-noparentdirs=1') | ||
| 1112 | for i in self.d.getVar('BAD_RECOMMENDATIONS', True).split(): | ||
| 1113 | self._invoke_smart('flag --set ignore-recommends %s' % i) | ||
| 1114 | self._invoke_smart('channel --add rpmsys type=rpm-sys -y') | ||
| 1115 | |||
| 1116 | ''' | ||
| 1117 | The rpm db lock files were produced after invoking rpm to query on | ||
| 1118 | build system, and they caused the rpm on target didn't work, so we | ||
| 1119 | need to unlock the rpm db by removing the lock files. | ||
| 1120 | ''' | ||
| 1121 | def unlock_rpm_db(self): | ||
| 1122 | # Remove rpm db lock files | ||
| 1123 | rpm_db_locks = glob.glob('%s/var/lib/rpm/__db.*' % self.target_rootfs) | ||
| 1124 | for f in rpm_db_locks: | ||
| 1125 | bb.utils.remove(f, True) | ||
| 1126 | |||
| 1127 | |||
| 1128 | class OpkgPM(PackageManager): | ||
| 1129 | def __init__(self, d, target_rootfs, config_file, archs, task_name='target'): | ||
| 1130 | super(OpkgPM, self).__init__(d) | ||
| 1131 | |||
| 1132 | self.target_rootfs = target_rootfs | ||
| 1133 | self.config_file = config_file | ||
| 1134 | self.pkg_archs = archs | ||
| 1135 | self.task_name = task_name | ||
| 1136 | |||
| 1137 | self.deploy_dir = self.d.getVar("DEPLOY_DIR_IPK", True) | ||
| 1138 | self.deploy_lock_file = os.path.join(self.deploy_dir, "deploy.lock") | ||
| 1139 | self.opkg_cmd = bb.utils.which(os.getenv('PATH'), "opkg-cl") | ||
| 1140 | self.opkg_args = "-f %s -o %s " % (self.config_file, target_rootfs) | ||
| 1141 | self.opkg_args += self.d.getVar("OPKG_ARGS", True) | ||
| 1142 | |||
| 1143 | opkg_lib_dir = self.d.getVar('OPKGLIBDIR', True) | ||
| 1144 | if opkg_lib_dir[0] == "/": | ||
| 1145 | opkg_lib_dir = opkg_lib_dir[1:] | ||
| 1146 | |||
| 1147 | self.opkg_dir = os.path.join(target_rootfs, opkg_lib_dir, "opkg") | ||
| 1148 | |||
| 1149 | bb.utils.mkdirhier(self.opkg_dir) | ||
| 1150 | |||
| 1151 | self.saved_opkg_dir = self.d.expand('${T}/saved/%s' % self.task_name) | ||
| 1152 | if not os.path.exists(self.d.expand('${T}/saved')): | ||
| 1153 | bb.utils.mkdirhier(self.d.expand('${T}/saved')) | ||
| 1154 | |||
| 1155 | if (self.d.getVar('BUILD_IMAGES_FROM_FEEDS', True) or "") != "1": | ||
| 1156 | self._create_config() | ||
| 1157 | else: | ||
| 1158 | self._create_custom_config() | ||
| 1159 | |||
| 1160 | self.indexer = OpkgIndexer(self.d, self.deploy_dir) | ||
| 1161 | |||
| 1162 | """ | ||
| 1163 | This function will change a package's status in /var/lib/opkg/status file. | ||
| 1164 | If 'packages' is None then the new_status will be applied to all | ||
| 1165 | packages | ||
| 1166 | """ | ||
| 1167 | def mark_packages(self, status_tag, packages=None): | ||
| 1168 | status_file = os.path.join(self.opkg_dir, "status") | ||
| 1169 | |||
| 1170 | with open(status_file, "r") as sf: | ||
| 1171 | with open(status_file + ".tmp", "w+") as tmp_sf: | ||
| 1172 | if packages is None: | ||
| 1173 | tmp_sf.write(re.sub(r"Package: (.*?)\n((?:[^\n]+\n)*?)Status: (.*)(?:unpacked|installed)", | ||
| 1174 | r"Package: \1\n\2Status: \3%s" % status_tag, | ||
| 1175 | sf.read())) | ||
| 1176 | else: | ||
| 1177 | if type(packages).__name__ != "list": | ||
| 1178 | raise TypeError("'packages' should be a list object") | ||
| 1179 | |||
| 1180 | status = sf.read() | ||
| 1181 | for pkg in packages: | ||
| 1182 | status = re.sub(r"Package: %s\n((?:[^\n]+\n)*?)Status: (.*)(?:unpacked|installed)" % pkg, | ||
| 1183 | r"Package: %s\n\1Status: \2%s" % (pkg, status_tag), | ||
| 1184 | status) | ||
| 1185 | |||
| 1186 | tmp_sf.write(status) | ||
| 1187 | |||
| 1188 | os.rename(status_file + ".tmp", status_file) | ||
| 1189 | |||
| 1190 | def _create_custom_config(self): | ||
| 1191 | bb.note("Building from feeds activated!") | ||
| 1192 | |||
| 1193 | with open(self.config_file, "w+") as config_file: | ||
| 1194 | priority = 1 | ||
| 1195 | for arch in self.pkg_archs.split(): | ||
| 1196 | config_file.write("arch %s %d\n" % (arch, priority)) | ||
| 1197 | priority += 5 | ||
| 1198 | |||
| 1199 | for line in (self.d.getVar('IPK_FEED_URIS', True) or "").split(): | ||
| 1200 | feed_match = re.match("^[ \t]*(.*)##([^ \t]*)[ \t]*$", line) | ||
| 1201 | |||
| 1202 | if feed_match is not None: | ||
| 1203 | feed_name = feed_match.group(1) | ||
| 1204 | feed_uri = feed_match.group(2) | ||
| 1205 | |||
| 1206 | bb.note("Add %s feed with URL %s" % (feed_name, feed_uri)) | ||
| 1207 | |||
| 1208 | config_file.write("src/gz %s %s\n" % (feed_name, feed_uri)) | ||
| 1209 | |||
| 1210 | """ | ||
| 1211 | Allow to use package deploy directory contents as quick devel-testing | ||
| 1212 | feed. This creates individual feed configs for each arch subdir of those | ||
| 1213 | specified as compatible for the current machine. | ||
| 1214 | NOTE: Development-helper feature, NOT a full-fledged feed. | ||
| 1215 | """ | ||
| 1216 | if (self.d.getVar('FEED_DEPLOYDIR_BASE_URI', True) or "") != "": | ||
| 1217 | for arch in self.pkg_archs.split(): | ||
| 1218 | cfg_file_name = os.path.join(self.target_rootfs, | ||
| 1219 | self.d.getVar("sysconfdir", True), | ||
| 1220 | "opkg", | ||
| 1221 | "local-%s-feed.conf" % arch) | ||
| 1222 | |||
| 1223 | with open(cfg_file_name, "w+") as cfg_file: | ||
| 1224 | cfg_file.write("src/gz local-%s %s/%s" % | ||
| 1225 | arch, | ||
| 1226 | self.d.getVar('FEED_DEPLOYDIR_BASE_URI', True), | ||
| 1227 | arch) | ||
| 1228 | |||
| 1229 | def _create_config(self): | ||
| 1230 | with open(self.config_file, "w+") as config_file: | ||
| 1231 | priority = 1 | ||
| 1232 | for arch in self.pkg_archs.split(): | ||
| 1233 | config_file.write("arch %s %d\n" % (arch, priority)) | ||
| 1234 | priority += 5 | ||
| 1235 | |||
| 1236 | config_file.write("src oe file:%s\n" % self.deploy_dir) | ||
| 1237 | |||
| 1238 | for arch in self.pkg_archs.split(): | ||
| 1239 | pkgs_dir = os.path.join(self.deploy_dir, arch) | ||
| 1240 | if os.path.isdir(pkgs_dir): | ||
| 1241 | config_file.write("src oe-%s file:%s\n" % | ||
| 1242 | (arch, pkgs_dir)) | ||
| 1243 | |||
| 1244 | def insert_feeds_uris(self): | ||
| 1245 | if self.feed_uris == "": | ||
| 1246 | return | ||
| 1247 | |||
| 1248 | rootfs_config = os.path.join('%s/etc/opkg/base-feeds.conf' | ||
| 1249 | % self.target_rootfs) | ||
| 1250 | |||
| 1251 | with open(rootfs_config, "w+") as config_file: | ||
| 1252 | uri_iterator = 0 | ||
| 1253 | for uri in self.feed_uris.split(): | ||
| 1254 | config_file.write("src/gz url-%d %s/ipk\n" % | ||
| 1255 | (uri_iterator, uri)) | ||
| 1256 | |||
| 1257 | for arch in self.pkg_archs.split(): | ||
| 1258 | if not os.path.exists(os.path.join(self.deploy_dir, arch)): | ||
| 1259 | continue | ||
| 1260 | bb.note('Note: adding opkg channel url-%s-%d (%s)' % | ||
| 1261 | (arch, uri_iterator, uri)) | ||
| 1262 | |||
| 1263 | config_file.write("src/gz uri-%s-%d %s/ipk/%s\n" % | ||
| 1264 | (arch, uri_iterator, uri, arch)) | ||
| 1265 | uri_iterator += 1 | ||
| 1266 | |||
| 1267 | def update(self): | ||
| 1268 | self.deploy_dir_lock() | ||
| 1269 | |||
| 1270 | cmd = "%s %s update" % (self.opkg_cmd, self.opkg_args) | ||
| 1271 | |||
| 1272 | try: | ||
| 1273 | subprocess.check_output(cmd.split(), stderr=subprocess.STDOUT) | ||
| 1274 | except subprocess.CalledProcessError as e: | ||
| 1275 | self.deploy_dir_unlock() | ||
| 1276 | bb.fatal("Unable to update the package index files. Command '%s' " | ||
| 1277 | "returned %d:\n%s" % (cmd, e.returncode, e.output)) | ||
| 1278 | |||
| 1279 | self.deploy_dir_unlock() | ||
| 1280 | |||
| 1281 | def install(self, pkgs, attempt_only=False): | ||
| 1282 | if attempt_only and len(pkgs) == 0: | ||
| 1283 | return | ||
| 1284 | |||
| 1285 | cmd = "%s %s install %s" % (self.opkg_cmd, self.opkg_args, ' '.join(pkgs)) | ||
| 1286 | |||
| 1287 | os.environ['D'] = self.target_rootfs | ||
| 1288 | os.environ['OFFLINE_ROOT'] = self.target_rootfs | ||
| 1289 | os.environ['IPKG_OFFLINE_ROOT'] = self.target_rootfs | ||
| 1290 | os.environ['OPKG_OFFLINE_ROOT'] = self.target_rootfs | ||
| 1291 | os.environ['INTERCEPT_DIR'] = os.path.join(self.d.getVar('WORKDIR', True), | ||
| 1292 | "intercept_scripts") | ||
| 1293 | os.environ['NATIVE_ROOT'] = self.d.getVar('STAGING_DIR_NATIVE', True) | ||
| 1294 | |||
| 1295 | try: | ||
| 1296 | bb.note("Installing the following packages: %s" % ' '.join(pkgs)) | ||
| 1297 | bb.note(cmd) | ||
| 1298 | output = subprocess.check_output(cmd.split(), stderr=subprocess.STDOUT) | ||
| 1299 | bb.note(output) | ||
| 1300 | except subprocess.CalledProcessError as e: | ||
| 1301 | (bb.fatal, bb.note)[attempt_only]("Unable to install packages. " | ||
| 1302 | "Command '%s' returned %d:\n%s" % | ||
| 1303 | (cmd, e.returncode, e.output)) | ||
| 1304 | |||
| 1305 | def remove(self, pkgs, with_dependencies=True): | ||
| 1306 | if with_dependencies: | ||
| 1307 | cmd = "%s %s --force-depends --force-remove --force-removal-of-dependent-packages remove %s" % \ | ||
| 1308 | (self.opkg_cmd, self.opkg_args, ' '.join(pkgs)) | ||
| 1309 | else: | ||
| 1310 | cmd = "%s %s --force-depends remove %s" % \ | ||
| 1311 | (self.opkg_cmd, self.opkg_args, ' '.join(pkgs)) | ||
| 1312 | |||
| 1313 | try: | ||
| 1314 | bb.note(cmd) | ||
| 1315 | output = subprocess.check_output(cmd.split(), stderr=subprocess.STDOUT) | ||
| 1316 | bb.note(output) | ||
| 1317 | except subprocess.CalledProcessError as e: | ||
| 1318 | bb.fatal("Unable to remove packages. Command '%s' " | ||
| 1319 | "returned %d:\n%s" % (e.cmd, e.returncode, e.output)) | ||
| 1320 | |||
| 1321 | def write_index(self): | ||
| 1322 | self.deploy_dir_lock() | ||
| 1323 | |||
| 1324 | result = self.indexer.write_index() | ||
| 1325 | |||
| 1326 | self.deploy_dir_unlock() | ||
| 1327 | |||
| 1328 | if result is not None: | ||
| 1329 | bb.fatal(result) | ||
| 1330 | |||
| 1331 | def remove_packaging_data(self): | ||
| 1332 | bb.utils.remove(self.opkg_dir, True) | ||
| 1333 | # create the directory back, it's needed by PM lock | ||
| 1334 | bb.utils.mkdirhier(self.opkg_dir) | ||
| 1335 | |||
| 1336 | def list_installed(self, format=None): | ||
| 1337 | return OpkgPkgsList(self.d, self.target_rootfs, self.config_file).list(format) | ||
| 1338 | |||
| 1339 | def handle_bad_recommendations(self): | ||
| 1340 | bad_recommendations = self.d.getVar("BAD_RECOMMENDATIONS", True) or "" | ||
| 1341 | if bad_recommendations.strip() == "": | ||
| 1342 | return | ||
| 1343 | |||
| 1344 | status_file = os.path.join(self.opkg_dir, "status") | ||
| 1345 | |||
| 1346 | # If status file existed, it means the bad recommendations has already | ||
| 1347 | # been handled | ||
| 1348 | if os.path.exists(status_file): | ||
| 1349 | return | ||
| 1350 | |||
| 1351 | cmd = "%s %s info " % (self.opkg_cmd, self.opkg_args) | ||
| 1352 | |||
| 1353 | with open(status_file, "w+") as status: | ||
| 1354 | for pkg in bad_recommendations.split(): | ||
| 1355 | pkg_info = cmd + pkg | ||
| 1356 | |||
| 1357 | try: | ||
| 1358 | output = subprocess.check_output(pkg_info.split(), stderr=subprocess.STDOUT).strip() | ||
| 1359 | except subprocess.CalledProcessError as e: | ||
| 1360 | bb.fatal("Cannot get package info. Command '%s' " | ||
| 1361 | "returned %d:\n%s" % (pkg_info, e.returncode, e.output)) | ||
| 1362 | |||
| 1363 | if output == "": | ||
| 1364 | bb.note("Ignored bad recommendation: '%s' is " | ||
| 1365 | "not a package" % pkg) | ||
| 1366 | continue | ||
| 1367 | |||
| 1368 | for line in output.split('\n'): | ||
| 1369 | if line.startswith("Status:"): | ||
| 1370 | status.write("Status: deinstall hold not-installed\n") | ||
| 1371 | else: | ||
| 1372 | status.write(line + "\n") | ||
| 1373 | |||
| 1374 | ''' | ||
| 1375 | The following function dummy installs pkgs and returns the log of output. | ||
| 1376 | ''' | ||
| 1377 | def dummy_install(self, pkgs): | ||
| 1378 | if len(pkgs) == 0: | ||
| 1379 | return | ||
| 1380 | |||
| 1381 | # Create an temp dir as opkg root for dummy installation | ||
| 1382 | temp_rootfs = self.d.expand('${T}/opkg') | ||
| 1383 | temp_opkg_dir = os.path.join(temp_rootfs, 'var/lib/opkg') | ||
| 1384 | bb.utils.mkdirhier(temp_opkg_dir) | ||
| 1385 | |||
| 1386 | opkg_args = "-f %s -o %s " % (self.config_file, temp_rootfs) | ||
| 1387 | opkg_args += self.d.getVar("OPKG_ARGS", True) | ||
| 1388 | |||
| 1389 | cmd = "%s %s update" % (self.opkg_cmd, opkg_args) | ||
| 1390 | try: | ||
| 1391 | subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True) | ||
| 1392 | except subprocess.CalledProcessError as e: | ||
| 1393 | bb.fatal("Unable to update. Command '%s' " | ||
| 1394 | "returned %d:\n%s" % (cmd, e.returncode, e.output)) | ||
| 1395 | |||
| 1396 | # Dummy installation | ||
| 1397 | cmd = "%s %s --noaction install %s " % (self.opkg_cmd, | ||
| 1398 | opkg_args, | ||
| 1399 | ' '.join(pkgs)) | ||
| 1400 | try: | ||
| 1401 | output = subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True) | ||
| 1402 | except subprocess.CalledProcessError as e: | ||
| 1403 | bb.fatal("Unable to dummy install packages. Command '%s' " | ||
| 1404 | "returned %d:\n%s" % (cmd, e.returncode, e.output)) | ||
| 1405 | |||
| 1406 | bb.utils.remove(temp_rootfs, True) | ||
| 1407 | |||
| 1408 | return output | ||
| 1409 | |||
| 1410 | def backup_packaging_data(self): | ||
| 1411 | # Save the opkglib for increment ipk image generation | ||
| 1412 | if os.path.exists(self.saved_opkg_dir): | ||
| 1413 | bb.utils.remove(self.saved_opkg_dir, True) | ||
| 1414 | shutil.copytree(self.opkg_dir, | ||
| 1415 | self.saved_opkg_dir, | ||
| 1416 | symlinks=True) | ||
| 1417 | |||
| 1418 | def recover_packaging_data(self): | ||
| 1419 | # Move the opkglib back | ||
| 1420 | if os.path.exists(self.saved_opkg_dir): | ||
| 1421 | if os.path.exists(self.opkg_dir): | ||
| 1422 | bb.utils.remove(self.opkg_dir, True) | ||
| 1423 | |||
| 1424 | bb.note('Recover packaging data') | ||
| 1425 | shutil.copytree(self.saved_opkg_dir, | ||
| 1426 | self.opkg_dir, | ||
| 1427 | symlinks=True) | ||
| 1428 | |||
| 1429 | |||
| 1430 | class DpkgPM(PackageManager): | ||
| 1431 | def __init__(self, d, target_rootfs, archs, base_archs, apt_conf_dir=None): | ||
| 1432 | super(DpkgPM, self).__init__(d) | ||
| 1433 | self.target_rootfs = target_rootfs | ||
| 1434 | self.deploy_dir = self.d.getVar('DEPLOY_DIR_DEB', True) | ||
| 1435 | if apt_conf_dir is None: | ||
| 1436 | self.apt_conf_dir = self.d.expand("${APTCONF_TARGET}/apt") | ||
| 1437 | else: | ||
| 1438 | self.apt_conf_dir = apt_conf_dir | ||
| 1439 | self.apt_conf_file = os.path.join(self.apt_conf_dir, "apt.conf") | ||
| 1440 | self.apt_get_cmd = bb.utils.which(os.getenv('PATH'), "apt-get") | ||
| 1441 | |||
| 1442 | self.apt_args = d.getVar("APT_ARGS", True) | ||
| 1443 | |||
| 1444 | self._create_configs(archs, base_archs) | ||
| 1445 | |||
| 1446 | self.indexer = DpkgIndexer(self.d, self.deploy_dir) | ||
| 1447 | |||
| 1448 | """ | ||
| 1449 | This function will change a package's status in /var/lib/dpkg/status file. | ||
| 1450 | If 'packages' is None then the new_status will be applied to all | ||
| 1451 | packages | ||
| 1452 | """ | ||
| 1453 | def mark_packages(self, status_tag, packages=None): | ||
| 1454 | status_file = self.target_rootfs + "/var/lib/dpkg/status" | ||
| 1455 | |||
| 1456 | with open(status_file, "r") as sf: | ||
| 1457 | with open(status_file + ".tmp", "w+") as tmp_sf: | ||
| 1458 | if packages is None: | ||
| 1459 | tmp_sf.write(re.sub(r"Package: (.*?)\n((?:[^\n]+\n)*?)Status: (.*)(?:unpacked|installed)", | ||
| 1460 | r"Package: \1\n\2Status: \3%s" % status_tag, | ||
| 1461 | sf.read())) | ||
| 1462 | else: | ||
| 1463 | if type(packages).__name__ != "list": | ||
| 1464 | raise TypeError("'packages' should be a list object") | ||
| 1465 | |||
| 1466 | status = sf.read() | ||
| 1467 | for pkg in packages: | ||
| 1468 | status = re.sub(r"Package: %s\n((?:[^\n]+\n)*?)Status: (.*)(?:unpacked|installed)" % pkg, | ||
| 1469 | r"Package: %s\n\1Status: \2%s" % (pkg, status_tag), | ||
| 1470 | status) | ||
| 1471 | |||
| 1472 | tmp_sf.write(status) | ||
| 1473 | |||
| 1474 | os.rename(status_file + ".tmp", status_file) | ||
| 1475 | |||
| 1476 | """ | ||
| 1477 | Run the pre/post installs for package "package_name". If package_name is | ||
| 1478 | None, then run all pre/post install scriptlets. | ||
| 1479 | """ | ||
| 1480 | def run_pre_post_installs(self, package_name=None): | ||
| 1481 | info_dir = self.target_rootfs + "/var/lib/dpkg/info" | ||
| 1482 | suffixes = [(".preinst", "Preinstall"), (".postinst", "Postinstall")] | ||
| 1483 | status_file = self.target_rootfs + "/var/lib/dpkg/status" | ||
| 1484 | installed_pkgs = [] | ||
| 1485 | |||
| 1486 | with open(status_file, "r") as status: | ||
| 1487 | for line in status.read().split('\n'): | ||
| 1488 | m = re.match("^Package: (.*)", line) | ||
| 1489 | if m is not None: | ||
| 1490 | installed_pkgs.append(m.group(1)) | ||
| 1491 | |||
| 1492 | if package_name is not None and not package_name in installed_pkgs: | ||
| 1493 | return | ||
| 1494 | |||
| 1495 | os.environ['D'] = self.target_rootfs | ||
| 1496 | os.environ['OFFLINE_ROOT'] = self.target_rootfs | ||
| 1497 | os.environ['IPKG_OFFLINE_ROOT'] = self.target_rootfs | ||
| 1498 | os.environ['OPKG_OFFLINE_ROOT'] = self.target_rootfs | ||
| 1499 | os.environ['INTERCEPT_DIR'] = os.path.join(self.d.getVar('WORKDIR', True), | ||
| 1500 | "intercept_scripts") | ||
| 1501 | os.environ['NATIVE_ROOT'] = self.d.getVar('STAGING_DIR_NATIVE', True) | ||
| 1502 | |||
| 1503 | failed_pkgs = [] | ||
| 1504 | for pkg_name in installed_pkgs: | ||
| 1505 | for suffix in suffixes: | ||
| 1506 | p_full = os.path.join(info_dir, pkg_name + suffix[0]) | ||
| 1507 | if os.path.exists(p_full): | ||
| 1508 | try: | ||
| 1509 | bb.note("Executing %s for package: %s ..." % | ||
| 1510 | (suffix[1].lower(), pkg_name)) | ||
| 1511 | subprocess.check_output(p_full, stderr=subprocess.STDOUT) | ||
| 1512 | except subprocess.CalledProcessError as e: | ||
| 1513 | bb.note("%s for package %s failed with %d:\n%s" % | ||
| 1514 | (suffix[1], pkg_name, e.returncode, e.output)) | ||
| 1515 | failed_pkgs.append(pkg_name) | ||
| 1516 | break | ||
| 1517 | |||
| 1518 | if len(failed_pkgs): | ||
| 1519 | self.mark_packages("unpacked", failed_pkgs) | ||
| 1520 | |||
| 1521 | def update(self): | ||
| 1522 | os.environ['APT_CONFIG'] = self.apt_conf_file | ||
| 1523 | |||
| 1524 | self.deploy_dir_lock() | ||
| 1525 | |||
| 1526 | cmd = "%s update" % self.apt_get_cmd | ||
| 1527 | |||
| 1528 | try: | ||
| 1529 | subprocess.check_output(cmd.split(), stderr=subprocess.STDOUT) | ||
| 1530 | except subprocess.CalledProcessError as e: | ||
| 1531 | bb.fatal("Unable to update the package index files. Command '%s' " | ||
| 1532 | "returned %d:\n%s" % (e.cmd, e.returncode, e.output)) | ||
| 1533 | |||
| 1534 | self.deploy_dir_unlock() | ||
| 1535 | |||
| 1536 | def install(self, pkgs, attempt_only=False): | ||
| 1537 | if attempt_only and len(pkgs) == 0: | ||
| 1538 | return | ||
| 1539 | |||
| 1540 | os.environ['APT_CONFIG'] = self.apt_conf_file | ||
| 1541 | |||
| 1542 | cmd = "%s %s install --force-yes --allow-unauthenticated %s" % \ | ||
| 1543 | (self.apt_get_cmd, self.apt_args, ' '.join(pkgs)) | ||
| 1544 | |||
| 1545 | try: | ||
| 1546 | bb.note("Installing the following packages: %s" % ' '.join(pkgs)) | ||
| 1547 | subprocess.check_output(cmd.split(), stderr=subprocess.STDOUT) | ||
| 1548 | except subprocess.CalledProcessError as e: | ||
| 1549 | (bb.fatal, bb.note)[attempt_only]("Unable to install packages. " | ||
| 1550 | "Command '%s' returned %d:\n%s" % | ||
| 1551 | (cmd, e.returncode, e.output)) | ||
| 1552 | |||
| 1553 | # rename *.dpkg-new files/dirs | ||
| 1554 | for root, dirs, files in os.walk(self.target_rootfs): | ||
| 1555 | for dir in dirs: | ||
| 1556 | new_dir = re.sub("\.dpkg-new", "", dir) | ||
| 1557 | if dir != new_dir: | ||
| 1558 | os.rename(os.path.join(root, dir), | ||
| 1559 | os.path.join(root, new_dir)) | ||
| 1560 | |||
| 1561 | for file in files: | ||
| 1562 | new_file = re.sub("\.dpkg-new", "", file) | ||
| 1563 | if file != new_file: | ||
| 1564 | os.rename(os.path.join(root, file), | ||
| 1565 | os.path.join(root, new_file)) | ||
| 1566 | |||
| 1567 | |||
| 1568 | def remove(self, pkgs, with_dependencies=True): | ||
| 1569 | if with_dependencies: | ||
| 1570 | os.environ['APT_CONFIG'] = self.apt_conf_file | ||
| 1571 | cmd = "%s remove %s" % (self.apt_get_cmd, ' '.join(pkgs)) | ||
| 1572 | else: | ||
| 1573 | cmd = "%s --admindir=%s/var/lib/dpkg --instdir=%s" \ | ||
| 1574 | " -r --force-depends %s" % \ | ||
| 1575 | (bb.utils.which(os.getenv('PATH'), "dpkg"), | ||
| 1576 | self.target_rootfs, self.target_rootfs, ' '.join(pkgs)) | ||
| 1577 | |||
| 1578 | try: | ||
| 1579 | subprocess.check_output(cmd.split(), stderr=subprocess.STDOUT) | ||
| 1580 | except subprocess.CalledProcessError as e: | ||
| 1581 | bb.fatal("Unable to remove packages. Command '%s' " | ||
| 1582 | "returned %d:\n%s" % (e.cmd, e.returncode, e.output)) | ||
| 1583 | |||
| 1584 | def write_index(self): | ||
| 1585 | self.deploy_dir_lock() | ||
| 1586 | |||
| 1587 | result = self.indexer.write_index() | ||
| 1588 | |||
| 1589 | self.deploy_dir_unlock() | ||
| 1590 | |||
| 1591 | if result is not None: | ||
| 1592 | bb.fatal(result) | ||
| 1593 | |||
| 1594 | def insert_feeds_uris(self): | ||
| 1595 | if self.feed_uris == "": | ||
| 1596 | return | ||
| 1597 | |||
| 1598 | sources_conf = os.path.join("%s/etc/apt/sources.list" | ||
| 1599 | % self.target_rootfs) | ||
| 1600 | arch_list = [] | ||
| 1601 | archs = self.d.getVar('PACKAGE_ARCHS', True) | ||
| 1602 | for arch in archs.split(): | ||
| 1603 | if not os.path.exists(os.path.join(self.deploy_dir, arch)): | ||
| 1604 | continue | ||
| 1605 | arch_list.append(arch) | ||
| 1606 | |||
| 1607 | with open(sources_conf, "w+") as sources_file: | ||
| 1608 | for uri in self.feed_uris.split(): | ||
| 1609 | for arch in arch_list: | ||
| 1610 | bb.note('Note: adding dpkg channel at (%s)' % uri) | ||
| 1611 | sources_file.write("deb %s/deb/%s ./\n" % | ||
| 1612 | (uri, arch)) | ||
| 1613 | |||
| 1614 | def _create_configs(self, archs, base_archs): | ||
| 1615 | base_archs = re.sub("_", "-", base_archs) | ||
| 1616 | |||
| 1617 | if os.path.exists(self.apt_conf_dir): | ||
| 1618 | bb.utils.remove(self.apt_conf_dir, True) | ||
| 1619 | |||
| 1620 | bb.utils.mkdirhier(self.apt_conf_dir) | ||
| 1621 | bb.utils.mkdirhier(self.apt_conf_dir + "/lists/partial/") | ||
| 1622 | bb.utils.mkdirhier(self.apt_conf_dir + "/apt.conf.d/") | ||
| 1623 | |||
| 1624 | arch_list = [] | ||
| 1625 | for arch in archs.split(): | ||
| 1626 | if not os.path.exists(os.path.join(self.deploy_dir, arch)): | ||
| 1627 | continue | ||
| 1628 | arch_list.append(arch) | ||
| 1629 | |||
| 1630 | with open(os.path.join(self.apt_conf_dir, "preferences"), "w+") as prefs_file: | ||
| 1631 | priority = 801 | ||
| 1632 | for arch in arch_list: | ||
| 1633 | prefs_file.write( | ||
| 1634 | "Package: *\n" | ||
| 1635 | "Pin: release l=%s\n" | ||
| 1636 | "Pin-Priority: %d\n\n" % (arch, priority)) | ||
| 1637 | |||
| 1638 | priority += 5 | ||
| 1639 | |||
| 1640 | for pkg in self.d.getVar('PACKAGE_EXCLUDE', True).split(): | ||
| 1641 | prefs_file.write( | ||
| 1642 | "Package: %s\n" | ||
| 1643 | "Pin: release *\n" | ||
| 1644 | "Pin-Priority: -1\n\n" % pkg) | ||
| 1645 | |||
| 1646 | arch_list.reverse() | ||
| 1647 | |||
| 1648 | with open(os.path.join(self.apt_conf_dir, "sources.list"), "w+") as sources_file: | ||
| 1649 | for arch in arch_list: | ||
| 1650 | sources_file.write("deb file:%s/ ./\n" % | ||
| 1651 | os.path.join(self.deploy_dir, arch)) | ||
| 1652 | |||
| 1653 | with open(self.apt_conf_file, "w+") as apt_conf: | ||
| 1654 | with open(self.d.expand("${STAGING_ETCDIR_NATIVE}/apt/apt.conf.sample")) as apt_conf_sample: | ||
| 1655 | for line in apt_conf_sample.read().split("\n"): | ||
| 1656 | line = re.sub("Architecture \".*\";", | ||
| 1657 | "Architecture \"%s\";" % base_archs, line) | ||
| 1658 | line = re.sub("#ROOTFS#", self.target_rootfs, line) | ||
| 1659 | line = re.sub("#APTCONF#", self.apt_conf_dir, line) | ||
| 1660 | |||
| 1661 | apt_conf.write(line + "\n") | ||
| 1662 | |||
| 1663 | target_dpkg_dir = "%s/var/lib/dpkg" % self.target_rootfs | ||
| 1664 | bb.utils.mkdirhier(os.path.join(target_dpkg_dir, "info")) | ||
| 1665 | |||
| 1666 | bb.utils.mkdirhier(os.path.join(target_dpkg_dir, "updates")) | ||
| 1667 | |||
| 1668 | if not os.path.exists(os.path.join(target_dpkg_dir, "status")): | ||
| 1669 | open(os.path.join(target_dpkg_dir, "status"), "w+").close() | ||
| 1670 | if not os.path.exists(os.path.join(target_dpkg_dir, "available")): | ||
| 1671 | open(os.path.join(target_dpkg_dir, "available"), "w+").close() | ||
| 1672 | |||
| 1673 | def remove_packaging_data(self): | ||
| 1674 | bb.utils.remove(os.path.join(self.target_rootfs, | ||
| 1675 | self.d.getVar('opkglibdir', True)), True) | ||
| 1676 | bb.utils.remove(self.target_rootfs + "/var/lib/dpkg/", True) | ||
| 1677 | |||
| 1678 | def fix_broken_dependencies(self): | ||
| 1679 | os.environ['APT_CONFIG'] = self.apt_conf_file | ||
| 1680 | |||
| 1681 | cmd = "%s %s -f install" % (self.apt_get_cmd, self.apt_args) | ||
| 1682 | |||
| 1683 | try: | ||
| 1684 | subprocess.check_output(cmd.split(), stderr=subprocess.STDOUT) | ||
| 1685 | except subprocess.CalledProcessError as e: | ||
| 1686 | bb.fatal("Cannot fix broken dependencies. Command '%s' " | ||
| 1687 | "returned %d:\n%s" % (cmd, e.returncode, e.output)) | ||
| 1688 | |||
| 1689 | def list_installed(self, format=None): | ||
| 1690 | return DpkgPkgsList(self.d, self.target_rootfs).list() | ||
| 1691 | |||
| 1692 | |||
| 1693 | def generate_index_files(d): | ||
| 1694 | classes = d.getVar('PACKAGE_CLASSES', True).replace("package_", "").split() | ||
| 1695 | |||
| 1696 | indexer_map = { | ||
| 1697 | "rpm": (RpmIndexer, d.getVar('DEPLOY_DIR_RPM', True)), | ||
| 1698 | "ipk": (OpkgIndexer, d.getVar('DEPLOY_DIR_IPK', True)), | ||
| 1699 | "deb": (DpkgIndexer, d.getVar('DEPLOY_DIR_DEB', True)) | ||
| 1700 | } | ||
| 1701 | |||
| 1702 | result = None | ||
| 1703 | |||
| 1704 | for pkg_class in classes: | ||
| 1705 | if not pkg_class in indexer_map: | ||
| 1706 | continue | ||
| 1707 | |||
| 1708 | if os.path.exists(indexer_map[pkg_class][1]): | ||
| 1709 | result = indexer_map[pkg_class][0](d, indexer_map[pkg_class][1]).write_index() | ||
| 1710 | |||
| 1711 | if result is not None: | ||
| 1712 | bb.fatal(result) | ||
| 1713 | |||
| 1714 | if __name__ == "__main__": | ||
| 1715 | """ | ||
| 1716 | We should be able to run this as a standalone script, from outside bitbake | ||
| 1717 | environment. | ||
| 1718 | """ | ||
| 1719 | """ | ||
| 1720 | TBD | ||
| 1721 | """ | ||
diff --git a/meta/lib/oe/packagedata.py b/meta/lib/oe/packagedata.py new file mode 100644 index 0000000000..cd5f0445f5 --- /dev/null +++ b/meta/lib/oe/packagedata.py | |||
| @@ -0,0 +1,94 @@ | |||
| 1 | import codecs | ||
| 2 | |||
| 3 | def packaged(pkg, d): | ||
| 4 | return os.access(get_subpkgedata_fn(pkg, d) + '.packaged', os.R_OK) | ||
| 5 | |||
| 6 | def read_pkgdatafile(fn): | ||
| 7 | pkgdata = {} | ||
| 8 | |||
| 9 | def decode(str): | ||
| 10 | c = codecs.getdecoder("string_escape") | ||
| 11 | return c(str)[0] | ||
| 12 | |||
| 13 | if os.access(fn, os.R_OK): | ||
| 14 | import re | ||
| 15 | f = open(fn, 'r') | ||
| 16 | lines = f.readlines() | ||
| 17 | f.close() | ||
| 18 | r = re.compile("([^:]+):\s*(.*)") | ||
| 19 | for l in lines: | ||
| 20 | m = r.match(l) | ||
| 21 | if m: | ||
| 22 | pkgdata[m.group(1)] = decode(m.group(2)) | ||
| 23 | |||
| 24 | return pkgdata | ||
| 25 | |||
| 26 | def get_subpkgedata_fn(pkg, d): | ||
| 27 | return d.expand('${PKGDATA_DIR}/runtime/%s' % pkg) | ||
| 28 | |||
| 29 | def has_subpkgdata(pkg, d): | ||
| 30 | return os.access(get_subpkgedata_fn(pkg, d), os.R_OK) | ||
| 31 | |||
| 32 | def read_subpkgdata(pkg, d): | ||
| 33 | return read_pkgdatafile(get_subpkgedata_fn(pkg, d)) | ||
| 34 | |||
| 35 | def has_pkgdata(pn, d): | ||
| 36 | fn = d.expand('${PKGDATA_DIR}/%s' % pn) | ||
| 37 | return os.access(fn, os.R_OK) | ||
| 38 | |||
| 39 | def read_pkgdata(pn, d): | ||
| 40 | fn = d.expand('${PKGDATA_DIR}/%s' % pn) | ||
| 41 | return read_pkgdatafile(fn) | ||
| 42 | |||
| 43 | # | ||
| 44 | # Collapse FOO_pkg variables into FOO | ||
| 45 | # | ||
| 46 | def read_subpkgdata_dict(pkg, d): | ||
| 47 | ret = {} | ||
| 48 | subd = read_pkgdatafile(get_subpkgedata_fn(pkg, d)) | ||
| 49 | for var in subd: | ||
| 50 | newvar = var.replace("_" + pkg, "") | ||
| 51 | if newvar == var and var + "_" + pkg in subd: | ||
| 52 | continue | ||
| 53 | ret[newvar] = subd[var] | ||
| 54 | return ret | ||
| 55 | |||
| 56 | def _pkgmap(d): | ||
| 57 | """Return a dictionary mapping package to recipe name.""" | ||
| 58 | |||
| 59 | pkgdatadir = d.getVar("PKGDATA_DIR", True) | ||
| 60 | |||
| 61 | pkgmap = {} | ||
| 62 | try: | ||
| 63 | files = os.listdir(pkgdatadir) | ||
| 64 | except OSError: | ||
| 65 | bb.warn("No files in %s?" % pkgdatadir) | ||
| 66 | files = [] | ||
| 67 | |||
| 68 | for pn in filter(lambda f: not os.path.isdir(os.path.join(pkgdatadir, f)), files): | ||
| 69 | try: | ||
| 70 | pkgdata = read_pkgdatafile(os.path.join(pkgdatadir, pn)) | ||
| 71 | except OSError: | ||
| 72 | continue | ||
| 73 | |||
| 74 | packages = pkgdata.get("PACKAGES") or "" | ||
| 75 | for pkg in packages.split(): | ||
| 76 | pkgmap[pkg] = pn | ||
| 77 | |||
| 78 | return pkgmap | ||
| 79 | |||
| 80 | def pkgmap(d): | ||
| 81 | """Return a dictionary mapping package to recipe name. | ||
| 82 | Cache the mapping in the metadata""" | ||
| 83 | |||
| 84 | pkgmap_data = d.getVar("__pkgmap_data", False) | ||
| 85 | if pkgmap_data is None: | ||
| 86 | pkgmap_data = _pkgmap(d) | ||
| 87 | d.setVar("__pkgmap_data", pkgmap_data) | ||
| 88 | |||
| 89 | return pkgmap_data | ||
| 90 | |||
| 91 | def recipename(pkg, d): | ||
| 92 | """Return the recipe name for the given binary package name.""" | ||
| 93 | |||
| 94 | return pkgmap(d).get(pkg) | ||
diff --git a/meta/lib/oe/packagegroup.py b/meta/lib/oe/packagegroup.py new file mode 100644 index 0000000000..12eb4212ff --- /dev/null +++ b/meta/lib/oe/packagegroup.py | |||
| @@ -0,0 +1,36 @@ | |||
| 1 | import itertools | ||
| 2 | |||
| 3 | def is_optional(feature, d): | ||
| 4 | packages = d.getVar("FEATURE_PACKAGES_%s" % feature, True) | ||
| 5 | if packages: | ||
| 6 | return bool(d.getVarFlag("FEATURE_PACKAGES_%s" % feature, "optional")) | ||
| 7 | else: | ||
| 8 | return bool(d.getVarFlag("PACKAGE_GROUP_%s" % feature, "optional")) | ||
| 9 | |||
| 10 | def packages(features, d): | ||
| 11 | for feature in features: | ||
| 12 | packages = d.getVar("FEATURE_PACKAGES_%s" % feature, True) | ||
| 13 | if not packages: | ||
| 14 | packages = d.getVar("PACKAGE_GROUP_%s" % feature, True) | ||
| 15 | for pkg in (packages or "").split(): | ||
| 16 | yield pkg | ||
| 17 | |||
| 18 | def required_packages(features, d): | ||
| 19 | req = filter(lambda feature: not is_optional(feature, d), features) | ||
| 20 | return packages(req, d) | ||
| 21 | |||
| 22 | def optional_packages(features, d): | ||
| 23 | opt = filter(lambda feature: is_optional(feature, d), features) | ||
| 24 | return packages(opt, d) | ||
| 25 | |||
| 26 | def active_packages(features, d): | ||
| 27 | return itertools.chain(required_packages(features, d), | ||
| 28 | optional_packages(features, d)) | ||
| 29 | |||
| 30 | def active_recipes(features, d): | ||
| 31 | import oe.packagedata | ||
| 32 | |||
| 33 | for pkg in active_packages(features, d): | ||
| 34 | recipe = oe.packagedata.recipename(pkg, d) | ||
| 35 | if recipe: | ||
| 36 | yield recipe | ||
diff --git a/meta/lib/oe/patch.py b/meta/lib/oe/patch.py new file mode 100644 index 0000000000..b085c9d6b5 --- /dev/null +++ b/meta/lib/oe/patch.py | |||
| @@ -0,0 +1,447 @@ | |||
| 1 | import oe.path | ||
| 2 | |||
| 3 | class NotFoundError(bb.BBHandledException): | ||
| 4 | def __init__(self, path): | ||
| 5 | self.path = path | ||
| 6 | |||
| 7 | def __str__(self): | ||
| 8 | return "Error: %s not found." % self.path | ||
| 9 | |||
| 10 | class CmdError(bb.BBHandledException): | ||
| 11 | def __init__(self, exitstatus, output): | ||
| 12 | self.status = exitstatus | ||
| 13 | self.output = output | ||
| 14 | |||
| 15 | def __str__(self): | ||
| 16 | return "Command Error: exit status: %d Output:\n%s" % (self.status, self.output) | ||
| 17 | |||
| 18 | |||
| 19 | def runcmd(args, dir = None): | ||
| 20 | import pipes | ||
| 21 | |||
| 22 | if dir: | ||
| 23 | olddir = os.path.abspath(os.curdir) | ||
| 24 | if not os.path.exists(dir): | ||
| 25 | raise NotFoundError(dir) | ||
| 26 | os.chdir(dir) | ||
| 27 | # print("cwd: %s -> %s" % (olddir, dir)) | ||
| 28 | |||
| 29 | try: | ||
| 30 | args = [ pipes.quote(str(arg)) for arg in args ] | ||
| 31 | cmd = " ".join(args) | ||
| 32 | # print("cmd: %s" % cmd) | ||
| 33 | (exitstatus, output) = oe.utils.getstatusoutput(cmd) | ||
| 34 | if exitstatus != 0: | ||
| 35 | raise CmdError(exitstatus >> 8, output) | ||
| 36 | return output | ||
| 37 | |||
| 38 | finally: | ||
| 39 | if dir: | ||
| 40 | os.chdir(olddir) | ||
| 41 | |||
| 42 | class PatchError(Exception): | ||
| 43 | def __init__(self, msg): | ||
| 44 | self.msg = msg | ||
| 45 | |||
| 46 | def __str__(self): | ||
| 47 | return "Patch Error: %s" % self.msg | ||
| 48 | |||
| 49 | class PatchSet(object): | ||
| 50 | defaults = { | ||
| 51 | "strippath": 1 | ||
| 52 | } | ||
| 53 | |||
| 54 | def __init__(self, dir, d): | ||
| 55 | self.dir = dir | ||
| 56 | self.d = d | ||
| 57 | self.patches = [] | ||
| 58 | self._current = None | ||
| 59 | |||
| 60 | def current(self): | ||
| 61 | return self._current | ||
| 62 | |||
| 63 | def Clean(self): | ||
| 64 | """ | ||
| 65 | Clean out the patch set. Generally includes unapplying all | ||
| 66 | patches and wiping out all associated metadata. | ||
| 67 | """ | ||
| 68 | raise NotImplementedError() | ||
| 69 | |||
| 70 | def Import(self, patch, force): | ||
| 71 | if not patch.get("file"): | ||
| 72 | if not patch.get("remote"): | ||
| 73 | raise PatchError("Patch file must be specified in patch import.") | ||
| 74 | else: | ||
| 75 | patch["file"] = bb.fetch2.localpath(patch["remote"], self.d) | ||
| 76 | |||
| 77 | for param in PatchSet.defaults: | ||
| 78 | if not patch.get(param): | ||
| 79 | patch[param] = PatchSet.defaults[param] | ||
| 80 | |||
| 81 | if patch.get("remote"): | ||
| 82 | patch["file"] = bb.data.expand(bb.fetch2.localpath(patch["remote"], self.d), self.d) | ||
| 83 | |||
| 84 | patch["filemd5"] = bb.utils.md5_file(patch["file"]) | ||
| 85 | |||
| 86 | def Push(self, force): | ||
| 87 | raise NotImplementedError() | ||
| 88 | |||
| 89 | def Pop(self, force): | ||
| 90 | raise NotImplementedError() | ||
| 91 | |||
| 92 | def Refresh(self, remote = None, all = None): | ||
| 93 | raise NotImplementedError() | ||
| 94 | |||
| 95 | |||
| 96 | class PatchTree(PatchSet): | ||
| 97 | def __init__(self, dir, d): | ||
| 98 | PatchSet.__init__(self, dir, d) | ||
| 99 | self.patchdir = os.path.join(self.dir, 'patches') | ||
| 100 | self.seriespath = os.path.join(self.dir, 'patches', 'series') | ||
| 101 | bb.utils.mkdirhier(self.patchdir) | ||
| 102 | |||
| 103 | def _appendPatchFile(self, patch, strippath): | ||
| 104 | with open(self.seriespath, 'a') as f: | ||
| 105 | f.write(os.path.basename(patch) + "," + strippath + "\n") | ||
| 106 | shellcmd = ["cat", patch, ">" , self.patchdir + "/" + os.path.basename(patch)] | ||
| 107 | runcmd(["sh", "-c", " ".join(shellcmd)], self.dir) | ||
| 108 | |||
| 109 | def _removePatch(self, p): | ||
| 110 | patch = {} | ||
| 111 | patch['file'] = p.split(",")[0] | ||
| 112 | patch['strippath'] = p.split(",")[1] | ||
| 113 | self._applypatch(patch, False, True) | ||
| 114 | |||
| 115 | def _removePatchFile(self, all = False): | ||
| 116 | if not os.path.exists(self.seriespath): | ||
| 117 | return | ||
| 118 | patches = open(self.seriespath, 'r+').readlines() | ||
| 119 | if all: | ||
| 120 | for p in reversed(patches): | ||
| 121 | self._removePatch(os.path.join(self.patchdir, p.strip())) | ||
| 122 | patches = [] | ||
| 123 | else: | ||
| 124 | self._removePatch(os.path.join(self.patchdir, patches[-1].strip())) | ||
| 125 | patches.pop() | ||
| 126 | with open(self.seriespath, 'w') as f: | ||
| 127 | for p in patches: | ||
| 128 | f.write(p) | ||
| 129 | |||
| 130 | def Import(self, patch, force = None): | ||
| 131 | """""" | ||
| 132 | PatchSet.Import(self, patch, force) | ||
| 133 | |||
| 134 | if self._current is not None: | ||
| 135 | i = self._current + 1 | ||
| 136 | else: | ||
| 137 | i = 0 | ||
| 138 | self.patches.insert(i, patch) | ||
| 139 | |||
| 140 | def _applypatch(self, patch, force = False, reverse = False, run = True): | ||
| 141 | shellcmd = ["cat", patch['file'], "|", "patch", "-p", patch['strippath']] | ||
| 142 | if reverse: | ||
| 143 | shellcmd.append('-R') | ||
| 144 | |||
| 145 | if not run: | ||
| 146 | return "sh" + "-c" + " ".join(shellcmd) | ||
| 147 | |||
| 148 | if not force: | ||
| 149 | shellcmd.append('--dry-run') | ||
| 150 | |||
| 151 | output = runcmd(["sh", "-c", " ".join(shellcmd)], self.dir) | ||
| 152 | |||
| 153 | if force: | ||
| 154 | return | ||
| 155 | |||
| 156 | shellcmd.pop(len(shellcmd) - 1) | ||
| 157 | output = runcmd(["sh", "-c", " ".join(shellcmd)], self.dir) | ||
| 158 | |||
| 159 | if not reverse: | ||
| 160 | self._appendPatchFile(patch['file'], patch['strippath']) | ||
| 161 | |||
| 162 | return output | ||
| 163 | |||
| 164 | def Push(self, force = False, all = False, run = True): | ||
| 165 | bb.note("self._current is %s" % self._current) | ||
| 166 | bb.note("patches is %s" % self.patches) | ||
| 167 | if all: | ||
| 168 | for i in self.patches: | ||
| 169 | bb.note("applying patch %s" % i) | ||
| 170 | self._applypatch(i, force) | ||
| 171 | self._current = i | ||
| 172 | else: | ||
| 173 | if self._current is not None: | ||
| 174 | next = self._current + 1 | ||
| 175 | else: | ||
| 176 | next = 0 | ||
| 177 | |||
| 178 | bb.note("applying patch %s" % self.patches[next]) | ||
| 179 | ret = self._applypatch(self.patches[next], force) | ||
| 180 | |||
| 181 | self._current = next | ||
| 182 | return ret | ||
| 183 | |||
| 184 | def Pop(self, force = None, all = None): | ||
| 185 | if all: | ||
| 186 | self._removePatchFile(True) | ||
| 187 | self._current = None | ||
| 188 | else: | ||
| 189 | self._removePatchFile(False) | ||
| 190 | |||
| 191 | if self._current == 0: | ||
| 192 | self._current = None | ||
| 193 | |||
| 194 | if self._current is not None: | ||
| 195 | self._current = self._current - 1 | ||
| 196 | |||
| 197 | def Clean(self): | ||
| 198 | """""" | ||
| 199 | self.Pop(all=True) | ||
| 200 | |||
| 201 | class GitApplyTree(PatchTree): | ||
| 202 | def __init__(self, dir, d): | ||
| 203 | PatchTree.__init__(self, dir, d) | ||
| 204 | |||
| 205 | def _applypatch(self, patch, force = False, reverse = False, run = True): | ||
| 206 | def _applypatchhelper(shellcmd, patch, force = False, reverse = False, run = True): | ||
| 207 | if reverse: | ||
| 208 | shellcmd.append('-R') | ||
| 209 | |||
| 210 | shellcmd.append(patch['file']) | ||
| 211 | |||
| 212 | if not run: | ||
| 213 | return "sh" + "-c" + " ".join(shellcmd) | ||
| 214 | |||
| 215 | return runcmd(["sh", "-c", " ".join(shellcmd)], self.dir) | ||
| 216 | |||
| 217 | try: | ||
| 218 | shellcmd = ["git", "--work-tree=.", "am", "-3", "-p%s" % patch['strippath']] | ||
| 219 | return _applypatchhelper(shellcmd, patch, force, reverse, run) | ||
| 220 | except CmdError: | ||
| 221 | shellcmd = ["git", "--git-dir=.", "apply", "-p%s" % patch['strippath']] | ||
| 222 | return _applypatchhelper(shellcmd, patch, force, reverse, run) | ||
| 223 | |||
| 224 | |||
| 225 | class QuiltTree(PatchSet): | ||
| 226 | def _runcmd(self, args, run = True): | ||
| 227 | quiltrc = self.d.getVar('QUILTRCFILE', True) | ||
| 228 | if not run: | ||
| 229 | return ["quilt"] + ["--quiltrc"] + [quiltrc] + args | ||
| 230 | runcmd(["quilt"] + ["--quiltrc"] + [quiltrc] + args, self.dir) | ||
| 231 | |||
| 232 | def _quiltpatchpath(self, file): | ||
| 233 | return os.path.join(self.dir, "patches", os.path.basename(file)) | ||
| 234 | |||
| 235 | |||
| 236 | def __init__(self, dir, d): | ||
| 237 | PatchSet.__init__(self, dir, d) | ||
| 238 | self.initialized = False | ||
| 239 | p = os.path.join(self.dir, 'patches') | ||
| 240 | if not os.path.exists(p): | ||
| 241 | os.makedirs(p) | ||
| 242 | |||
| 243 | def Clean(self): | ||
| 244 | try: | ||
| 245 | self._runcmd(["pop", "-a", "-f"]) | ||
| 246 | oe.path.remove(os.path.join(self.dir, "patches","series")) | ||
| 247 | except Exception: | ||
| 248 | pass | ||
| 249 | self.initialized = True | ||
| 250 | |||
| 251 | def InitFromDir(self): | ||
| 252 | # read series -> self.patches | ||
| 253 | seriespath = os.path.join(self.dir, 'patches', 'series') | ||
| 254 | if not os.path.exists(self.dir): | ||
| 255 | raise NotFoundError(self.dir) | ||
| 256 | if os.path.exists(seriespath): | ||
| 257 | series = file(seriespath, 'r') | ||
| 258 | for line in series.readlines(): | ||
| 259 | patch = {} | ||
| 260 | parts = line.strip().split() | ||
| 261 | patch["quiltfile"] = self._quiltpatchpath(parts[0]) | ||
| 262 | patch["quiltfilemd5"] = bb.utils.md5_file(patch["quiltfile"]) | ||
| 263 | if len(parts) > 1: | ||
| 264 | patch["strippath"] = parts[1][2:] | ||
| 265 | self.patches.append(patch) | ||
| 266 | series.close() | ||
| 267 | |||
| 268 | # determine which patches are applied -> self._current | ||
| 269 | try: | ||
| 270 | output = runcmd(["quilt", "applied"], self.dir) | ||
| 271 | except CmdError: | ||
| 272 | import sys | ||
| 273 | if sys.exc_value.output.strip() == "No patches applied": | ||
| 274 | return | ||
| 275 | else: | ||
| 276 | raise | ||
| 277 | output = [val for val in output.split('\n') if not val.startswith('#')] | ||
| 278 | for patch in self.patches: | ||
| 279 | if os.path.basename(patch["quiltfile"]) == output[-1]: | ||
| 280 | self._current = self.patches.index(patch) | ||
| 281 | self.initialized = True | ||
| 282 | |||
| 283 | def Import(self, patch, force = None): | ||
| 284 | if not self.initialized: | ||
| 285 | self.InitFromDir() | ||
| 286 | PatchSet.Import(self, patch, force) | ||
| 287 | oe.path.symlink(patch["file"], self._quiltpatchpath(patch["file"]), force=True) | ||
| 288 | f = open(os.path.join(self.dir, "patches","series"), "a"); | ||
| 289 | f.write(os.path.basename(patch["file"]) + " -p" + patch["strippath"]+"\n") | ||
| 290 | f.close() | ||
| 291 | patch["quiltfile"] = self._quiltpatchpath(patch["file"]) | ||
| 292 | patch["quiltfilemd5"] = bb.utils.md5_file(patch["quiltfile"]) | ||
| 293 | |||
| 294 | # TODO: determine if the file being imported: | ||
| 295 | # 1) is already imported, and is the same | ||
| 296 | # 2) is already imported, but differs | ||
| 297 | |||
| 298 | self.patches.insert(self._current or 0, patch) | ||
| 299 | |||
| 300 | |||
| 301 | def Push(self, force = False, all = False, run = True): | ||
| 302 | # quilt push [-f] | ||
| 303 | |||
| 304 | args = ["push"] | ||
| 305 | if force: | ||
| 306 | args.append("-f") | ||
| 307 | if all: | ||
| 308 | args.append("-a") | ||
| 309 | if not run: | ||
| 310 | return self._runcmd(args, run) | ||
| 311 | |||
| 312 | self._runcmd(args) | ||
| 313 | |||
| 314 | if self._current is not None: | ||
| 315 | self._current = self._current + 1 | ||
| 316 | else: | ||
| 317 | self._current = 0 | ||
| 318 | |||
| 319 | def Pop(self, force = None, all = None): | ||
| 320 | # quilt pop [-f] | ||
| 321 | args = ["pop"] | ||
| 322 | if force: | ||
| 323 | args.append("-f") | ||
| 324 | if all: | ||
| 325 | args.append("-a") | ||
| 326 | |||
| 327 | self._runcmd(args) | ||
| 328 | |||
| 329 | if self._current == 0: | ||
| 330 | self._current = None | ||
| 331 | |||
| 332 | if self._current is not None: | ||
| 333 | self._current = self._current - 1 | ||
| 334 | |||
| 335 | def Refresh(self, **kwargs): | ||
| 336 | if kwargs.get("remote"): | ||
| 337 | patch = self.patches[kwargs["patch"]] | ||
| 338 | if not patch: | ||
| 339 | raise PatchError("No patch found at index %s in patchset." % kwargs["patch"]) | ||
| 340 | (type, host, path, user, pswd, parm) = bb.fetch.decodeurl(patch["remote"]) | ||
| 341 | if type == "file": | ||
| 342 | import shutil | ||
| 343 | if not patch.get("file") and patch.get("remote"): | ||
| 344 | patch["file"] = bb.fetch2.localpath(patch["remote"], self.d) | ||
| 345 | |||
| 346 | shutil.copyfile(patch["quiltfile"], patch["file"]) | ||
| 347 | else: | ||
| 348 | raise PatchError("Unable to do a remote refresh of %s, unsupported remote url scheme %s." % (os.path.basename(patch["quiltfile"]), type)) | ||
| 349 | else: | ||
| 350 | # quilt refresh | ||
| 351 | args = ["refresh"] | ||
| 352 | if kwargs.get("quiltfile"): | ||
| 353 | args.append(os.path.basename(kwargs["quiltfile"])) | ||
| 354 | elif kwargs.get("patch"): | ||
| 355 | args.append(os.path.basename(self.patches[kwargs["patch"]]["quiltfile"])) | ||
| 356 | self._runcmd(args) | ||
| 357 | |||
| 358 | class Resolver(object): | ||
| 359 | def __init__(self, patchset, terminal): | ||
| 360 | raise NotImplementedError() | ||
| 361 | |||
| 362 | def Resolve(self): | ||
| 363 | raise NotImplementedError() | ||
| 364 | |||
| 365 | def Revert(self): | ||
| 366 | raise NotImplementedError() | ||
| 367 | |||
| 368 | def Finalize(self): | ||
| 369 | raise NotImplementedError() | ||
| 370 | |||
| 371 | class NOOPResolver(Resolver): | ||
| 372 | def __init__(self, patchset, terminal): | ||
| 373 | self.patchset = patchset | ||
| 374 | self.terminal = terminal | ||
| 375 | |||
| 376 | def Resolve(self): | ||
| 377 | olddir = os.path.abspath(os.curdir) | ||
| 378 | os.chdir(self.patchset.dir) | ||
| 379 | try: | ||
| 380 | self.patchset.Push() | ||
| 381 | except Exception: | ||
| 382 | import sys | ||
| 383 | os.chdir(olddir) | ||
| 384 | raise | ||
| 385 | |||
| 386 | # Patch resolver which relies on the user doing all the work involved in the | ||
| 387 | # resolution, with the exception of refreshing the remote copy of the patch | ||
| 388 | # files (the urls). | ||
| 389 | class UserResolver(Resolver): | ||
| 390 | def __init__(self, patchset, terminal): | ||
| 391 | self.patchset = patchset | ||
| 392 | self.terminal = terminal | ||
| 393 | |||
| 394 | # Force a push in the patchset, then drop to a shell for the user to | ||
| 395 | # resolve any rejected hunks | ||
| 396 | def Resolve(self): | ||
| 397 | olddir = os.path.abspath(os.curdir) | ||
| 398 | os.chdir(self.patchset.dir) | ||
| 399 | try: | ||
| 400 | self.patchset.Push(False) | ||
| 401 | except CmdError as v: | ||
| 402 | # Patch application failed | ||
| 403 | patchcmd = self.patchset.Push(True, False, False) | ||
| 404 | |||
| 405 | t = self.patchset.d.getVar('T', True) | ||
| 406 | if not t: | ||
| 407 | bb.msg.fatal("Build", "T not set") | ||
| 408 | bb.utils.mkdirhier(t) | ||
| 409 | import random | ||
| 410 | rcfile = "%s/bashrc.%s.%s" % (t, str(os.getpid()), random.random()) | ||
| 411 | f = open(rcfile, "w") | ||
| 412 | f.write("echo '*** Manual patch resolution mode ***'\n") | ||
| 413 | f.write("echo 'Dropping to a shell, so patch rejects can be fixed manually.'\n") | ||
| 414 | f.write("echo 'Run \"quilt refresh\" when patch is corrected, press CTRL+D to exit.'\n") | ||
| 415 | f.write("echo ''\n") | ||
| 416 | f.write(" ".join(patchcmd) + "\n") | ||
| 417 | f.close() | ||
| 418 | os.chmod(rcfile, 0775) | ||
| 419 | |||
| 420 | self.terminal("bash --rcfile " + rcfile, 'Patch Rejects: Please fix patch rejects manually', self.patchset.d) | ||
| 421 | |||
| 422 | # Construct a new PatchSet after the user's changes, compare the | ||
| 423 | # sets, checking patches for modifications, and doing a remote | ||
| 424 | # refresh on each. | ||
| 425 | oldpatchset = self.patchset | ||
| 426 | self.patchset = oldpatchset.__class__(self.patchset.dir, self.patchset.d) | ||
| 427 | |||
| 428 | for patch in self.patchset.patches: | ||
| 429 | oldpatch = None | ||
| 430 | for opatch in oldpatchset.patches: | ||
| 431 | if opatch["quiltfile"] == patch["quiltfile"]: | ||
| 432 | oldpatch = opatch | ||
| 433 | |||
| 434 | if oldpatch: | ||
| 435 | patch["remote"] = oldpatch["remote"] | ||
| 436 | if patch["quiltfile"] == oldpatch["quiltfile"]: | ||
| 437 | if patch["quiltfilemd5"] != oldpatch["quiltfilemd5"]: | ||
| 438 | bb.note("Patch %s has changed, updating remote url %s" % (os.path.basename(patch["quiltfile"]), patch["remote"])) | ||
| 439 | # user change? remote refresh | ||
| 440 | self.patchset.Refresh(remote=True, patch=self.patchset.patches.index(patch)) | ||
| 441 | else: | ||
| 442 | # User did not fix the problem. Abort. | ||
| 443 | raise PatchError("Patch application failed, and user did not fix and refresh the patch.") | ||
| 444 | except Exception: | ||
| 445 | os.chdir(olddir) | ||
| 446 | raise | ||
| 447 | os.chdir(olddir) | ||
diff --git a/meta/lib/oe/path.py b/meta/lib/oe/path.py new file mode 100644 index 0000000000..413ebfb395 --- /dev/null +++ b/meta/lib/oe/path.py | |||
| @@ -0,0 +1,243 @@ | |||
| 1 | import errno | ||
| 2 | import glob | ||
| 3 | import shutil | ||
| 4 | import subprocess | ||
| 5 | import os.path | ||
| 6 | |||
| 7 | def join(*paths): | ||
| 8 | """Like os.path.join but doesn't treat absolute RHS specially""" | ||
| 9 | return os.path.normpath("/".join(paths)) | ||
| 10 | |||
| 11 | def relative(src, dest): | ||
| 12 | """ Return a relative path from src to dest. | ||
| 13 | |||
| 14 | >>> relative("/usr/bin", "/tmp/foo/bar") | ||
| 15 | ../../tmp/foo/bar | ||
| 16 | |||
| 17 | >>> relative("/usr/bin", "/usr/lib") | ||
| 18 | ../lib | ||
| 19 | |||
| 20 | >>> relative("/tmp", "/tmp/foo/bar") | ||
| 21 | foo/bar | ||
| 22 | """ | ||
| 23 | |||
| 24 | return os.path.relpath(dest, src) | ||
| 25 | |||
| 26 | def make_relative_symlink(path): | ||
| 27 | """ Convert an absolute symlink to a relative one """ | ||
| 28 | if not os.path.islink(path): | ||
| 29 | return | ||
| 30 | link = os.readlink(path) | ||
| 31 | if not os.path.isabs(link): | ||
| 32 | return | ||
| 33 | |||
| 34 | # find the common ancestor directory | ||
| 35 | ancestor = path | ||
| 36 | depth = 0 | ||
| 37 | while ancestor and not link.startswith(ancestor): | ||
| 38 | ancestor = ancestor.rpartition('/')[0] | ||
| 39 | depth += 1 | ||
| 40 | |||
| 41 | if not ancestor: | ||
| 42 | print("make_relative_symlink() Error: unable to find the common ancestor of %s and its target" % path) | ||
| 43 | return | ||
| 44 | |||
| 45 | base = link.partition(ancestor)[2].strip('/') | ||
| 46 | while depth > 1: | ||
| 47 | base = "../" + base | ||
| 48 | depth -= 1 | ||
| 49 | |||
| 50 | os.remove(path) | ||
| 51 | os.symlink(base, path) | ||
| 52 | |||
| 53 | def format_display(path, metadata): | ||
| 54 | """ Prepare a path for display to the user. """ | ||
| 55 | rel = relative(metadata.getVar("TOPDIR", True), path) | ||
| 56 | if len(rel) > len(path): | ||
| 57 | return path | ||
| 58 | else: | ||
| 59 | return rel | ||
| 60 | |||
| 61 | def copytree(src, dst): | ||
| 62 | # We could use something like shutil.copytree here but it turns out to | ||
| 63 | # to be slow. It takes twice as long copying to an empty directory. | ||
| 64 | # If dst already has contents performance can be 15 time slower | ||
| 65 | # This way we also preserve hardlinks between files in the tree. | ||
| 66 | |||
| 67 | bb.utils.mkdirhier(dst) | ||
| 68 | cmd = 'tar -cf - -C %s -p . | tar -xf - -C %s' % (src, dst) | ||
| 69 | check_output(cmd, shell=True, stderr=subprocess.STDOUT) | ||
| 70 | |||
| 71 | def copyhardlinktree(src, dst): | ||
| 72 | """ Make the hard link when possible, otherwise copy. """ | ||
| 73 | bb.utils.mkdirhier(dst) | ||
| 74 | if os.path.isdir(src) and not len(os.listdir(src)): | ||
| 75 | return | ||
| 76 | |||
| 77 | if (os.stat(src).st_dev == os.stat(dst).st_dev): | ||
| 78 | # Need to copy directories only with tar first since cp will error if two | ||
| 79 | # writers try and create a directory at the same time | ||
| 80 | cmd = 'cd %s; find . -type d -print | tar -cf - -C %s -p --files-from - --no-recursion | tar -xf - -C %s' % (src, src, dst) | ||
| 81 | check_output(cmd, shell=True, stderr=subprocess.STDOUT) | ||
| 82 | cmd = 'cd %s; find . -print0 | cpio --null -pdlu %s' % (src, dst) | ||
| 83 | check_output(cmd, shell=True, stderr=subprocess.STDOUT) | ||
| 84 | else: | ||
| 85 | copytree(src, dst) | ||
| 86 | |||
| 87 | def remove(path, recurse=True): | ||
| 88 | """Equivalent to rm -f or rm -rf""" | ||
| 89 | for name in glob.glob(path): | ||
| 90 | try: | ||
| 91 | os.unlink(name) | ||
| 92 | except OSError as exc: | ||
| 93 | if recurse and exc.errno == errno.EISDIR: | ||
| 94 | shutil.rmtree(name) | ||
| 95 | elif exc.errno != errno.ENOENT: | ||
| 96 | raise | ||
| 97 | |||
| 98 | def symlink(source, destination, force=False): | ||
| 99 | """Create a symbolic link""" | ||
| 100 | try: | ||
| 101 | if force: | ||
| 102 | remove(destination) | ||
| 103 | os.symlink(source, destination) | ||
| 104 | except OSError as e: | ||
| 105 | if e.errno != errno.EEXIST or os.readlink(destination) != source: | ||
| 106 | raise | ||
| 107 | |||
| 108 | class CalledProcessError(Exception): | ||
| 109 | def __init__(self, retcode, cmd, output = None): | ||
| 110 | self.retcode = retcode | ||
| 111 | self.cmd = cmd | ||
| 112 | self.output = output | ||
| 113 | def __str__(self): | ||
| 114 | return "Command '%s' returned non-zero exit status %d with output %s" % (self.cmd, self.retcode, self.output) | ||
| 115 | |||
| 116 | # Not needed when we move to python 2.7 | ||
| 117 | def check_output(*popenargs, **kwargs): | ||
| 118 | r"""Run command with arguments and return its output as a byte string. | ||
| 119 | |||
| 120 | If the exit code was non-zero it raises a CalledProcessError. The | ||
| 121 | CalledProcessError object will have the return code in the returncode | ||
| 122 | attribute and output in the output attribute. | ||
| 123 | |||
| 124 | The arguments are the same as for the Popen constructor. Example: | ||
| 125 | |||
| 126 | >>> check_output(["ls", "-l", "/dev/null"]) | ||
| 127 | 'crw-rw-rw- 1 root root 1, 3 Oct 18 2007 /dev/null\n' | ||
| 128 | |||
| 129 | The stdout argument is not allowed as it is used internally. | ||
| 130 | To capture standard error in the result, use stderr=STDOUT. | ||
| 131 | |||
| 132 | >>> check_output(["/bin/sh", "-c", | ||
| 133 | ... "ls -l non_existent_file ; exit 0"], | ||
| 134 | ... stderr=STDOUT) | ||
| 135 | 'ls: non_existent_file: No such file or directory\n' | ||
| 136 | """ | ||
| 137 | if 'stdout' in kwargs: | ||
| 138 | raise ValueError('stdout argument not allowed, it will be overridden.') | ||
| 139 | process = subprocess.Popen(stdout=subprocess.PIPE, *popenargs, **kwargs) | ||
| 140 | output, unused_err = process.communicate() | ||
| 141 | retcode = process.poll() | ||
| 142 | if retcode: | ||
| 143 | cmd = kwargs.get("args") | ||
| 144 | if cmd is None: | ||
| 145 | cmd = popenargs[0] | ||
| 146 | raise CalledProcessError(retcode, cmd, output=output) | ||
| 147 | return output | ||
| 148 | |||
| 149 | def find(dir, **walkoptions): | ||
| 150 | """ Given a directory, recurses into that directory, | ||
| 151 | returning all files as absolute paths. """ | ||
| 152 | |||
| 153 | for root, dirs, files in os.walk(dir, **walkoptions): | ||
| 154 | for file in files: | ||
| 155 | yield os.path.join(root, file) | ||
| 156 | |||
| 157 | |||
| 158 | ## realpath() related functions | ||
| 159 | def __is_path_below(file, root): | ||
| 160 | return (file + os.path.sep).startswith(root) | ||
| 161 | |||
| 162 | def __realpath_rel(start, rel_path, root, loop_cnt, assume_dir): | ||
| 163 | """Calculates real path of symlink 'start' + 'rel_path' below | ||
| 164 | 'root'; no part of 'start' below 'root' must contain symlinks. """ | ||
| 165 | have_dir = True | ||
| 166 | |||
| 167 | for d in rel_path.split(os.path.sep): | ||
| 168 | if not have_dir and not assume_dir: | ||
| 169 | raise OSError(errno.ENOENT, "no such directory %s" % start) | ||
| 170 | |||
| 171 | if d == os.path.pardir: # '..' | ||
| 172 | if len(start) >= len(root): | ||
| 173 | # do not follow '..' before root | ||
| 174 | start = os.path.dirname(start) | ||
| 175 | else: | ||
| 176 | # emit warning? | ||
| 177 | pass | ||
| 178 | else: | ||
| 179 | (start, have_dir) = __realpath(os.path.join(start, d), | ||
| 180 | root, loop_cnt, assume_dir) | ||
| 181 | |||
| 182 | assert(__is_path_below(start, root)) | ||
| 183 | |||
| 184 | return start | ||
| 185 | |||
| 186 | def __realpath(file, root, loop_cnt, assume_dir): | ||
| 187 | while os.path.islink(file) and len(file) >= len(root): | ||
| 188 | if loop_cnt == 0: | ||
| 189 | raise OSError(errno.ELOOP, file) | ||
| 190 | |||
| 191 | loop_cnt -= 1 | ||
| 192 | target = os.path.normpath(os.readlink(file)) | ||
| 193 | |||
| 194 | if not os.path.isabs(target): | ||
| 195 | tdir = os.path.dirname(file) | ||
| 196 | assert(__is_path_below(tdir, root)) | ||
| 197 | else: | ||
| 198 | tdir = root | ||
| 199 | |||
| 200 | file = __realpath_rel(tdir, target, root, loop_cnt, assume_dir) | ||
| 201 | |||
| 202 | try: | ||
| 203 | is_dir = os.path.isdir(file) | ||
| 204 | except: | ||
| 205 | is_dir = false | ||
| 206 | |||
| 207 | return (file, is_dir) | ||
| 208 | |||
| 209 | def realpath(file, root, use_physdir = True, loop_cnt = 100, assume_dir = False): | ||
| 210 | """ Returns the canonical path of 'file' with assuming a | ||
| 211 | toplevel 'root' directory. When 'use_physdir' is set, all | ||
| 212 | preceding path components of 'file' will be resolved first; | ||
| 213 | this flag should be set unless it is guaranteed that there is | ||
| 214 | no symlink in the path. When 'assume_dir' is not set, missing | ||
| 215 | path components will raise an ENOENT error""" | ||
| 216 | |||
| 217 | root = os.path.normpath(root) | ||
| 218 | file = os.path.normpath(file) | ||
| 219 | |||
| 220 | if not root.endswith(os.path.sep): | ||
| 221 | # letting root end with '/' makes some things easier | ||
| 222 | root = root + os.path.sep | ||
| 223 | |||
| 224 | if not __is_path_below(file, root): | ||
| 225 | raise OSError(errno.EINVAL, "file '%s' is not below root" % file) | ||
| 226 | |||
| 227 | try: | ||
| 228 | if use_physdir: | ||
| 229 | file = __realpath_rel(root, file[(len(root) - 1):], root, loop_cnt, assume_dir) | ||
| 230 | else: | ||
| 231 | file = __realpath(file, root, loop_cnt, assume_dir)[0] | ||
| 232 | except OSError as e: | ||
| 233 | if e.errno == errno.ELOOP: | ||
| 234 | # make ELOOP more readable; without catching it, there will | ||
| 235 | # be printed a backtrace with 100s of OSError exceptions | ||
| 236 | # else | ||
| 237 | raise OSError(errno.ELOOP, | ||
| 238 | "too much recursions while resolving '%s'; loop in '%s'" % | ||
| 239 | (file, e.strerror)) | ||
| 240 | |||
| 241 | raise | ||
| 242 | |||
| 243 | return file | ||
diff --git a/meta/lib/oe/prservice.py b/meta/lib/oe/prservice.py new file mode 100644 index 0000000000..b0cbcb1fbc --- /dev/null +++ b/meta/lib/oe/prservice.py | |||
| @@ -0,0 +1,126 @@ | |||
| 1 | |||
| 2 | def prserv_make_conn(d, check = False): | ||
| 3 | import prserv.serv | ||
| 4 | host_params = filter(None, (d.getVar("PRSERV_HOST", True) or '').split(':')) | ||
| 5 | try: | ||
| 6 | conn = None | ||
| 7 | conn = prserv.serv.PRServerConnection(host_params[0], int(host_params[1])) | ||
| 8 | if check: | ||
| 9 | if not conn.ping(): | ||
| 10 | raise Exception('service not available') | ||
| 11 | d.setVar("__PRSERV_CONN",conn) | ||
| 12 | except Exception, exc: | ||
| 13 | bb.fatal("Connecting to PR service %s:%s failed: %s" % (host_params[0], host_params[1], str(exc))) | ||
| 14 | |||
| 15 | return conn | ||
| 16 | |||
| 17 | def prserv_dump_db(d): | ||
| 18 | if not d.getVar('PRSERV_HOST', True): | ||
| 19 | bb.error("Not using network based PR service") | ||
| 20 | return None | ||
| 21 | |||
| 22 | conn = d.getVar("__PRSERV_CONN", True) | ||
| 23 | if conn is None: | ||
| 24 | conn = prserv_make_conn(d) | ||
| 25 | if conn is None: | ||
| 26 | bb.error("Making connection failed to remote PR service") | ||
| 27 | return None | ||
| 28 | |||
| 29 | #dump db | ||
| 30 | opt_version = d.getVar('PRSERV_DUMPOPT_VERSION', True) | ||
| 31 | opt_pkgarch = d.getVar('PRSERV_DUMPOPT_PKGARCH', True) | ||
| 32 | opt_checksum = d.getVar('PRSERV_DUMPOPT_CHECKSUM', True) | ||
| 33 | opt_col = ("1" == d.getVar('PRSERV_DUMPOPT_COL', True)) | ||
| 34 | return conn.export(opt_version, opt_pkgarch, opt_checksum, opt_col) | ||
| 35 | |||
| 36 | def prserv_import_db(d, filter_version=None, filter_pkgarch=None, filter_checksum=None): | ||
| 37 | if not d.getVar('PRSERV_HOST', True): | ||
| 38 | bb.error("Not using network based PR service") | ||
| 39 | return None | ||
| 40 | |||
| 41 | conn = d.getVar("__PRSERV_CONN", True) | ||
| 42 | if conn is None: | ||
| 43 | conn = prserv_make_conn(d) | ||
| 44 | if conn is None: | ||
| 45 | bb.error("Making connection failed to remote PR service") | ||
| 46 | return None | ||
| 47 | #get the entry values | ||
| 48 | imported = [] | ||
| 49 | prefix = "PRAUTO$" | ||
| 50 | for v in d.keys(): | ||
| 51 | if v.startswith(prefix): | ||
| 52 | (remain, sep, checksum) = v.rpartition('$') | ||
| 53 | (remain, sep, pkgarch) = remain.rpartition('$') | ||
| 54 | (remain, sep, version) = remain.rpartition('$') | ||
| 55 | if (remain + '$' != prefix) or \ | ||
| 56 | (filter_version and filter_version != version) or \ | ||
| 57 | (filter_pkgarch and filter_pkgarch != pkgarch) or \ | ||
| 58 | (filter_checksum and filter_checksum != checksum): | ||
| 59 | continue | ||
| 60 | try: | ||
| 61 | value = int(d.getVar(remain + '$' + version + '$' + pkgarch + '$' + checksum, True)) | ||
| 62 | except BaseException as exc: | ||
| 63 | bb.debug("Not valid value of %s:%s" % (v,str(exc))) | ||
| 64 | continue | ||
| 65 | ret = conn.importone(version,pkgarch,checksum,value) | ||
| 66 | if ret != value: | ||
| 67 | bb.error("importing(%s,%s,%s,%d) failed. DB may have larger value %d" % (version,pkgarch,checksum,value,ret)) | ||
| 68 | else: | ||
| 69 | imported.append((version,pkgarch,checksum,value)) | ||
| 70 | return imported | ||
| 71 | |||
| 72 | def prserv_export_tofile(d, metainfo, datainfo, lockdown, nomax=False): | ||
| 73 | import bb.utils | ||
| 74 | #initilize the output file | ||
| 75 | bb.utils.mkdirhier(d.getVar('PRSERV_DUMPDIR', True)) | ||
| 76 | df = d.getVar('PRSERV_DUMPFILE', True) | ||
| 77 | #write data | ||
| 78 | lf = bb.utils.lockfile("%s.lock" % df) | ||
| 79 | f = open(df, "a") | ||
| 80 | if metainfo: | ||
| 81 | #dump column info | ||
| 82 | f.write("#PR_core_ver = \"%s\"\n\n" % metainfo['core_ver']); | ||
| 83 | f.write("#Table: %s\n" % metainfo['tbl_name']) | ||
| 84 | f.write("#Columns:\n") | ||
| 85 | f.write("#name \t type \t notn \t dflt \t pk\n") | ||
| 86 | f.write("#----------\t --------\t --------\t --------\t ----\n") | ||
| 87 | for i in range(len(metainfo['col_info'])): | ||
| 88 | f.write("#%10s\t %8s\t %8s\t %8s\t %4s\n" % | ||
| 89 | (metainfo['col_info'][i]['name'], | ||
| 90 | metainfo['col_info'][i]['type'], | ||
| 91 | metainfo['col_info'][i]['notnull'], | ||
| 92 | metainfo['col_info'][i]['dflt_value'], | ||
| 93 | metainfo['col_info'][i]['pk'])) | ||
| 94 | f.write("\n") | ||
| 95 | |||
| 96 | if lockdown: | ||
| 97 | f.write("PRSERV_LOCKDOWN = \"1\"\n\n") | ||
| 98 | |||
| 99 | if datainfo: | ||
| 100 | idx = {} | ||
| 101 | for i in range(len(datainfo)): | ||
| 102 | pkgarch = datainfo[i]['pkgarch'] | ||
| 103 | value = datainfo[i]['value'] | ||
| 104 | if pkgarch not in idx: | ||
| 105 | idx[pkgarch] = i | ||
| 106 | elif value > datainfo[idx[pkgarch]]['value']: | ||
| 107 | idx[pkgarch] = i | ||
| 108 | f.write("PRAUTO$%s$%s$%s = \"%s\"\n" % | ||
| 109 | (str(datainfo[i]['version']), pkgarch, str(datainfo[i]['checksum']), str(value))) | ||
| 110 | if not nomax: | ||
| 111 | for i in idx: | ||
| 112 | f.write("PRAUTO_%s_%s = \"%s\"\n" % (str(datainfo[idx[i]]['version']),str(datainfo[idx[i]]['pkgarch']),str(datainfo[idx[i]]['value']))) | ||
| 113 | f.close() | ||
| 114 | bb.utils.unlockfile(lf) | ||
| 115 | |||
| 116 | def prserv_check_avail(d): | ||
| 117 | host_params = filter(None, (d.getVar("PRSERV_HOST", True) or '').split(':')) | ||
| 118 | try: | ||
| 119 | if len(host_params) != 2: | ||
| 120 | raise TypeError | ||
| 121 | else: | ||
| 122 | int(host_params[1]) | ||
| 123 | except TypeError: | ||
| 124 | bb.fatal('Undefined/incorrect PRSERV_HOST value. Format: "host:port"') | ||
| 125 | else: | ||
| 126 | prserv_make_conn(d, True) | ||
diff --git a/meta/lib/oe/qa.py b/meta/lib/oe/qa.py new file mode 100644 index 0000000000..d5cdaa0fcd --- /dev/null +++ b/meta/lib/oe/qa.py | |||
| @@ -0,0 +1,111 @@ | |||
| 1 | class ELFFile: | ||
| 2 | EI_NIDENT = 16 | ||
| 3 | |||
| 4 | EI_CLASS = 4 | ||
| 5 | EI_DATA = 5 | ||
| 6 | EI_VERSION = 6 | ||
| 7 | EI_OSABI = 7 | ||
| 8 | EI_ABIVERSION = 8 | ||
| 9 | |||
| 10 | # possible values for EI_CLASS | ||
| 11 | ELFCLASSNONE = 0 | ||
| 12 | ELFCLASS32 = 1 | ||
| 13 | ELFCLASS64 = 2 | ||
| 14 | |||
| 15 | # possible value for EI_VERSION | ||
| 16 | EV_CURRENT = 1 | ||
| 17 | |||
| 18 | # possible values for EI_DATA | ||
| 19 | ELFDATANONE = 0 | ||
| 20 | ELFDATA2LSB = 1 | ||
| 21 | ELFDATA2MSB = 2 | ||
| 22 | |||
| 23 | def my_assert(self, expectation, result): | ||
| 24 | if not expectation == result: | ||
| 25 | #print "'%x','%x' %s" % (ord(expectation), ord(result), self.name) | ||
| 26 | raise Exception("This does not work as expected") | ||
| 27 | |||
| 28 | def __init__(self, name, bits = 0): | ||
| 29 | self.name = name | ||
| 30 | self.bits = bits | ||
| 31 | self.objdump_output = {} | ||
| 32 | |||
| 33 | def open(self): | ||
| 34 | self.file = file(self.name, "r") | ||
| 35 | self.data = self.file.read(ELFFile.EI_NIDENT+4) | ||
| 36 | |||
| 37 | self.my_assert(len(self.data), ELFFile.EI_NIDENT+4) | ||
| 38 | self.my_assert(self.data[0], chr(0x7f) ) | ||
| 39 | self.my_assert(self.data[1], 'E') | ||
| 40 | self.my_assert(self.data[2], 'L') | ||
| 41 | self.my_assert(self.data[3], 'F') | ||
| 42 | if self.bits == 0: | ||
| 43 | if self.data[ELFFile.EI_CLASS] == chr(ELFFile.ELFCLASS32): | ||
| 44 | self.bits = 32 | ||
| 45 | elif self.data[ELFFile.EI_CLASS] == chr(ELFFile.ELFCLASS64): | ||
| 46 | self.bits = 64 | ||
| 47 | else: | ||
| 48 | # Not 32-bit or 64.. lets assert | ||
| 49 | raise Exception("ELF but not 32 or 64 bit.") | ||
| 50 | elif self.bits == 32: | ||
| 51 | self.my_assert(self.data[ELFFile.EI_CLASS], chr(ELFFile.ELFCLASS32)) | ||
| 52 | elif self.bits == 64: | ||
| 53 | self.my_assert(self.data[ELFFile.EI_CLASS], chr(ELFFile.ELFCLASS64)) | ||
| 54 | else: | ||
| 55 | raise Exception("Must specify unknown, 32 or 64 bit size.") | ||
| 56 | self.my_assert(self.data[ELFFile.EI_VERSION], chr(ELFFile.EV_CURRENT) ) | ||
| 57 | |||
| 58 | self.sex = self.data[ELFFile.EI_DATA] | ||
| 59 | if self.sex == chr(ELFFile.ELFDATANONE): | ||
| 60 | raise Exception("self.sex == ELFDATANONE") | ||
| 61 | elif self.sex == chr(ELFFile.ELFDATA2LSB): | ||
| 62 | self.sex = "<" | ||
| 63 | elif self.sex == chr(ELFFile.ELFDATA2MSB): | ||
| 64 | self.sex = ">" | ||
| 65 | else: | ||
| 66 | raise Exception("Unknown self.sex") | ||
| 67 | |||
| 68 | def osAbi(self): | ||
| 69 | return ord(self.data[ELFFile.EI_OSABI]) | ||
| 70 | |||
| 71 | def abiVersion(self): | ||
| 72 | return ord(self.data[ELFFile.EI_ABIVERSION]) | ||
| 73 | |||
| 74 | def abiSize(self): | ||
| 75 | return self.bits | ||
| 76 | |||
| 77 | def isLittleEndian(self): | ||
| 78 | return self.sex == "<" | ||
| 79 | |||
| 80 | def isBigEngian(self): | ||
| 81 | return self.sex == ">" | ||
| 82 | |||
| 83 | def machine(self): | ||
| 84 | """ | ||
| 85 | We know the sex stored in self.sex and we | ||
| 86 | know the position | ||
| 87 | """ | ||
| 88 | import struct | ||
| 89 | (a,) = struct.unpack(self.sex+"H", self.data[18:20]) | ||
| 90 | return a | ||
| 91 | |||
| 92 | def run_objdump(self, cmd, d): | ||
| 93 | import bb.process | ||
| 94 | import sys | ||
| 95 | |||
| 96 | if cmd in self.objdump_output: | ||
| 97 | return self.objdump_output[cmd] | ||
| 98 | |||
| 99 | objdump = d.getVar('OBJDUMP', True) | ||
| 100 | |||
| 101 | env = os.environ.copy() | ||
| 102 | env["LC_ALL"] = "C" | ||
| 103 | env["PATH"] = d.getVar('PATH', True) | ||
| 104 | |||
| 105 | try: | ||
| 106 | bb.note("%s %s %s" % (objdump, cmd, self.name)) | ||
| 107 | self.objdump_output[cmd] = bb.process.run([objdump, cmd, self.name], env=env, shell=False)[0] | ||
| 108 | return self.objdump_output[cmd] | ||
| 109 | except Exception as e: | ||
| 110 | bb.note("%s %s %s failed: %s" % (objdump, cmd, self.name, e)) | ||
| 111 | return "" | ||
diff --git a/meta/lib/oe/rootfs.py b/meta/lib/oe/rootfs.py new file mode 100644 index 0000000000..dddbef4d64 --- /dev/null +++ b/meta/lib/oe/rootfs.py | |||
| @@ -0,0 +1,757 @@ | |||
| 1 | from abc import ABCMeta, abstractmethod | ||
| 2 | from oe.utils import execute_pre_post_process | ||
| 3 | from oe.utils import contains as base_contains | ||
| 4 | from oe.package_manager import * | ||
| 5 | from oe.manifest import * | ||
| 6 | import oe.path | ||
| 7 | import filecmp | ||
| 8 | import shutil | ||
| 9 | import os | ||
| 10 | import subprocess | ||
| 11 | import re | ||
| 12 | |||
| 13 | |||
| 14 | class Rootfs(object): | ||
| 15 | """ | ||
| 16 | This is an abstract class. Do not instantiate this directly. | ||
| 17 | """ | ||
| 18 | __metaclass__ = ABCMeta | ||
| 19 | |||
| 20 | def __init__(self, d): | ||
| 21 | self.d = d | ||
| 22 | self.pm = None | ||
| 23 | self.image_rootfs = self.d.getVar('IMAGE_ROOTFS', True) | ||
| 24 | self.deploy_dir_image = self.d.getVar('DEPLOY_DIR_IMAGE', True) | ||
| 25 | |||
| 26 | self.install_order = Manifest.INSTALL_ORDER | ||
| 27 | |||
| 28 | @abstractmethod | ||
| 29 | def _create(self): | ||
| 30 | pass | ||
| 31 | |||
| 32 | @abstractmethod | ||
| 33 | def _get_delayed_postinsts(self): | ||
| 34 | pass | ||
| 35 | |||
| 36 | @abstractmethod | ||
| 37 | def _save_postinsts(self): | ||
| 38 | pass | ||
| 39 | |||
| 40 | @abstractmethod | ||
| 41 | def _log_check(self): | ||
| 42 | pass | ||
| 43 | |||
| 44 | def _insert_feed_uris(self): | ||
| 45 | if base_contains("IMAGE_FEATURES", "package-management", | ||
| 46 | True, False, self.d): | ||
| 47 | self.pm.insert_feeds_uris() | ||
| 48 | |||
| 49 | @abstractmethod | ||
| 50 | def _handle_intercept_failure(self, failed_script): | ||
| 51 | pass | ||
| 52 | |||
| 53 | """ | ||
| 54 | The _cleanup() method should be used to clean-up stuff that we don't really | ||
| 55 | want to end up on target. For example, in the case of RPM, the DB locks. | ||
| 56 | The method is called, once, at the end of create() method. | ||
| 57 | """ | ||
| 58 | @abstractmethod | ||
| 59 | def _cleanup(self): | ||
| 60 | pass | ||
| 61 | |||
| 62 | def _exec_shell_cmd(self, cmd): | ||
| 63 | fakerootcmd = self.d.getVar('FAKEROOT', True) | ||
| 64 | if fakerootcmd is not None: | ||
| 65 | exec_cmd = [fakerootcmd, cmd] | ||
| 66 | else: | ||
| 67 | exec_cmd = cmd | ||
| 68 | |||
| 69 | try: | ||
| 70 | subprocess.check_output(exec_cmd, stderr=subprocess.STDOUT) | ||
| 71 | except subprocess.CalledProcessError as e: | ||
| 72 | return("Command '%s' returned %d:\n%s" % (e.cmd, e.returncode, e.output)) | ||
| 73 | |||
| 74 | return None | ||
| 75 | |||
| 76 | def create(self): | ||
| 77 | bb.note("###### Generate rootfs #######") | ||
| 78 | pre_process_cmds = self.d.getVar("ROOTFS_PREPROCESS_COMMAND", True) | ||
| 79 | post_process_cmds = self.d.getVar("ROOTFS_POSTPROCESS_COMMAND", True) | ||
| 80 | |||
| 81 | intercepts_dir = os.path.join(self.d.getVar('WORKDIR', True), | ||
| 82 | "intercept_scripts") | ||
| 83 | |||
| 84 | bb.utils.remove(intercepts_dir, True) | ||
| 85 | |||
| 86 | bb.utils.mkdirhier(self.image_rootfs) | ||
| 87 | |||
| 88 | bb.utils.mkdirhier(self.deploy_dir_image) | ||
| 89 | |||
| 90 | shutil.copytree(self.d.expand("${COREBASE}/scripts/postinst-intercepts"), | ||
| 91 | intercepts_dir) | ||
| 92 | |||
| 93 | shutil.copy(self.d.expand("${COREBASE}/meta/files/deploydir_readme.txt"), | ||
| 94 | self.deploy_dir_image + | ||
| 95 | "/README_-_DO_NOT_DELETE_FILES_IN_THIS_DIRECTORY.txt") | ||
| 96 | |||
| 97 | execute_pre_post_process(self.d, pre_process_cmds) | ||
| 98 | |||
| 99 | # call the package manager dependent create method | ||
| 100 | self._create() | ||
| 101 | |||
| 102 | sysconfdir = self.image_rootfs + self.d.getVar('sysconfdir', True) | ||
| 103 | bb.utils.mkdirhier(sysconfdir) | ||
| 104 | with open(sysconfdir + "/version", "w+") as ver: | ||
| 105 | ver.write(self.d.getVar('BUILDNAME', True) + "\n") | ||
| 106 | |||
| 107 | self._run_intercepts() | ||
| 108 | |||
| 109 | execute_pre_post_process(self.d, post_process_cmds) | ||
| 110 | |||
| 111 | if base_contains("IMAGE_FEATURES", "read-only-rootfs", | ||
| 112 | True, False, self.d): | ||
| 113 | delayed_postinsts = self._get_delayed_postinsts() | ||
| 114 | if delayed_postinsts is not None: | ||
| 115 | bb.fatal("The following packages could not be configured" | ||
| 116 | "offline and rootfs is read-only: %s" % | ||
| 117 | delayed_postinsts) | ||
| 118 | |||
| 119 | if self.d.getVar('USE_DEVFS', True) != "1": | ||
| 120 | self._create_devfs() | ||
| 121 | |||
| 122 | self._uninstall_uneeded() | ||
| 123 | |||
| 124 | self._insert_feed_uris() | ||
| 125 | |||
| 126 | self._run_ldconfig() | ||
| 127 | |||
| 128 | self._generate_kernel_module_deps() | ||
| 129 | |||
| 130 | self._cleanup() | ||
| 131 | |||
| 132 | def _uninstall_uneeded(self): | ||
| 133 | if base_contains("IMAGE_FEATURES", "package-management", | ||
| 134 | True, False, self.d): | ||
| 135 | return | ||
| 136 | |||
| 137 | delayed_postinsts = self._get_delayed_postinsts() | ||
| 138 | if delayed_postinsts is None: | ||
| 139 | installed_pkgs_dir = self.d.expand('${WORKDIR}/installed_pkgs.txt') | ||
| 140 | pkgs_to_remove = list() | ||
| 141 | with open(installed_pkgs_dir, "r+") as installed_pkgs: | ||
| 142 | pkgs_installed = installed_pkgs.read().split('\n') | ||
| 143 | for pkg_installed in pkgs_installed[:]: | ||
| 144 | pkg = pkg_installed.split()[0] | ||
| 145 | if pkg in ["update-rc.d", | ||
| 146 | "base-passwd", | ||
| 147 | self.d.getVar("ROOTFS_BOOTSTRAP_INSTALL", True) | ||
| 148 | ]: | ||
| 149 | pkgs_to_remove.append(pkg) | ||
| 150 | pkgs_installed.remove(pkg_installed) | ||
| 151 | |||
| 152 | if len(pkgs_to_remove) > 0: | ||
| 153 | self.pm.remove(pkgs_to_remove, False) | ||
| 154 | # Update installed_pkgs.txt | ||
| 155 | open(installed_pkgs_dir, "w+").write('\n'.join(pkgs_installed)) | ||
| 156 | |||
| 157 | if os.path.exists(self.d.expand("${IMAGE_ROOTFS}${sysconfdir}/init.d/run-postinsts")): | ||
| 158 | self._exec_shell_cmd(["update-rc.d", "-f", "-r", | ||
| 159 | self.d.getVar('IMAGE_ROOTFS', True), | ||
| 160 | "run-postinsts", "remove"]) | ||
| 161 | else: | ||
| 162 | self._save_postinsts() | ||
| 163 | |||
| 164 | self.pm.remove_packaging_data() | ||
| 165 | |||
| 166 | def _run_intercepts(self): | ||
| 167 | intercepts_dir = os.path.join(self.d.getVar('WORKDIR', True), | ||
| 168 | "intercept_scripts") | ||
| 169 | |||
| 170 | bb.note("Running intercept scripts:") | ||
| 171 | os.environ['D'] = self.image_rootfs | ||
| 172 | for script in os.listdir(intercepts_dir): | ||
| 173 | script_full = os.path.join(intercepts_dir, script) | ||
| 174 | |||
| 175 | if script == "postinst_intercept" or not os.access(script_full, os.X_OK): | ||
| 176 | continue | ||
| 177 | |||
| 178 | bb.note("> Executing %s intercept ..." % script) | ||
| 179 | |||
| 180 | try: | ||
| 181 | subprocess.check_output(script_full) | ||
| 182 | except subprocess.CalledProcessError as e: | ||
| 183 | bb.warn("The postinstall intercept hook '%s' failed (exit code: %d)! See log for details!" % | ||
| 184 | (script, e.returncode)) | ||
| 185 | |||
| 186 | with open(script_full) as intercept: | ||
| 187 | registered_pkgs = None | ||
| 188 | for line in intercept.read().split("\n"): | ||
| 189 | m = re.match("^##PKGS:(.*)", line) | ||
| 190 | if m is not None: | ||
| 191 | registered_pkgs = m.group(1).strip() | ||
| 192 | break | ||
| 193 | |||
| 194 | if registered_pkgs is not None: | ||
| 195 | bb.warn("The postinstalls for the following packages " | ||
| 196 | "will be postponed for first boot: %s" % | ||
| 197 | registered_pkgs) | ||
| 198 | |||
| 199 | # call the backend dependent handler | ||
| 200 | self._handle_intercept_failure(registered_pkgs) | ||
| 201 | |||
| 202 | def _run_ldconfig(self): | ||
| 203 | if self.d.getVar('LDCONFIGDEPEND', True): | ||
| 204 | bb.note("Executing: ldconfig -r" + self.image_rootfs + "-c new -v") | ||
| 205 | self._exec_shell_cmd(['ldconfig', '-r', self.image_rootfs, '-c', | ||
| 206 | 'new', '-v']) | ||
| 207 | |||
| 208 | def _generate_kernel_module_deps(self): | ||
| 209 | kernel_abi_ver_file = os.path.join(self.d.getVar('STAGING_KERNEL_DIR', True), | ||
| 210 | 'kernel-abiversion') | ||
| 211 | if os.path.exists(kernel_abi_ver_file): | ||
| 212 | kernel_ver = open(kernel_abi_ver_file).read().strip(' \n') | ||
| 213 | modules_dir = os.path.join(self.image_rootfs, 'lib', 'modules', kernel_ver) | ||
| 214 | |||
| 215 | bb.utils.mkdirhier(modules_dir) | ||
| 216 | |||
| 217 | self._exec_shell_cmd(['depmodwrapper', '-a', '-b', self.image_rootfs, | ||
| 218 | kernel_ver]) | ||
| 219 | |||
| 220 | """ | ||
| 221 | Create devfs: | ||
| 222 | * IMAGE_DEVICE_TABLE is the old name to an absolute path to a device table file | ||
| 223 | * IMAGE_DEVICE_TABLES is a new name for a file, or list of files, seached | ||
| 224 | for in the BBPATH | ||
| 225 | If neither are specified then the default name of files/device_table-minimal.txt | ||
| 226 | is searched for in the BBPATH (same as the old version.) | ||
| 227 | """ | ||
| 228 | def _create_devfs(self): | ||
| 229 | devtable_list = [] | ||
| 230 | devtable = self.d.getVar('IMAGE_DEVICE_TABLE', True) | ||
| 231 | if devtable is not None: | ||
| 232 | devtable_list.append(devtable) | ||
| 233 | else: | ||
| 234 | devtables = self.d.getVar('IMAGE_DEVICE_TABLES', True) | ||
| 235 | if devtables is None: | ||
| 236 | devtables = 'files/device_table-minimal.txt' | ||
| 237 | for devtable in devtables.split(): | ||
| 238 | devtable_list.append("%s" % bb.utils.which(self.d.getVar('BBPATH', True), devtable)) | ||
| 239 | |||
| 240 | for devtable in devtable_list: | ||
| 241 | self._exec_shell_cmd(["makedevs", "-r", | ||
| 242 | self.image_rootfs, "-D", devtable]) | ||
| 243 | |||
| 244 | |||
| 245 | class RpmRootfs(Rootfs): | ||
| 246 | def __init__(self, d, manifest_dir): | ||
| 247 | super(RpmRootfs, self).__init__(d) | ||
| 248 | |||
| 249 | self.manifest = RpmManifest(d, manifest_dir) | ||
| 250 | |||
| 251 | self.pm = RpmPM(d, | ||
| 252 | d.getVar('IMAGE_ROOTFS', True), | ||
| 253 | self.d.getVar('TARGET_VENDOR', True) | ||
| 254 | ) | ||
| 255 | |||
| 256 | self.inc_rpm_image_gen = self.d.getVar('INC_RPM_IMAGE_GEN', True) | ||
| 257 | if self.inc_rpm_image_gen != "1": | ||
| 258 | bb.utils.remove(self.image_rootfs, True) | ||
| 259 | else: | ||
| 260 | self.pm.recovery_packaging_data() | ||
| 261 | bb.utils.remove(self.d.getVar('MULTILIB_TEMP_ROOTFS', True), True) | ||
| 262 | |||
| 263 | self.pm.create_configs() | ||
| 264 | |||
| 265 | ''' | ||
| 266 | While rpm incremental image generation is enabled, it will remove the | ||
| 267 | unneeded pkgs by comparing the new install solution manifest and the | ||
| 268 | old installed manifest. | ||
| 269 | ''' | ||
| 270 | def _create_incremental(self, pkgs_initial_install): | ||
| 271 | if self.inc_rpm_image_gen == "1": | ||
| 272 | |||
| 273 | pkgs_to_install = list() | ||
| 274 | for pkg_type in pkgs_initial_install: | ||
| 275 | pkgs_to_install += pkgs_initial_install[pkg_type] | ||
| 276 | |||
| 277 | installed_manifest = self.pm.load_old_install_solution() | ||
| 278 | solution_manifest = self.pm.dump_install_solution(pkgs_to_install) | ||
| 279 | |||
| 280 | pkg_to_remove = list() | ||
| 281 | for pkg in installed_manifest: | ||
| 282 | if pkg not in solution_manifest: | ||
| 283 | pkg_to_remove.append(pkg) | ||
| 284 | |||
| 285 | self.pm.update() | ||
| 286 | |||
| 287 | bb.note('incremental update -- upgrade packages in place ') | ||
| 288 | self.pm.upgrade() | ||
| 289 | if pkg_to_remove != []: | ||
| 290 | bb.note('incremental removed: %s' % ' '.join(pkg_to_remove)) | ||
| 291 | self.pm.remove(pkg_to_remove) | ||
| 292 | |||
| 293 | def _create(self): | ||
| 294 | pkgs_to_install = self.manifest.parse_initial_manifest() | ||
| 295 | |||
| 296 | # update PM index files | ||
| 297 | self.pm.write_index() | ||
| 298 | |||
| 299 | self.pm.dump_all_available_pkgs() | ||
| 300 | |||
| 301 | if self.inc_rpm_image_gen == "1": | ||
| 302 | self._create_incremental(pkgs_to_install) | ||
| 303 | |||
| 304 | self.pm.update() | ||
| 305 | |||
| 306 | pkgs = [] | ||
| 307 | pkgs_attempt = [] | ||
| 308 | for pkg_type in pkgs_to_install: | ||
| 309 | if pkg_type == Manifest.PKG_TYPE_ATTEMPT_ONLY: | ||
| 310 | pkgs_attempt += pkgs_to_install[pkg_type] | ||
| 311 | else: | ||
| 312 | pkgs += pkgs_to_install[pkg_type] | ||
| 313 | |||
| 314 | self.pm.install(pkgs) | ||
| 315 | |||
| 316 | self.pm.install(pkgs_attempt, True) | ||
| 317 | |||
| 318 | self.pm.install_complementary() | ||
| 319 | |||
| 320 | self._log_check() | ||
| 321 | |||
| 322 | if self.inc_rpm_image_gen == "1": | ||
| 323 | self.pm.backup_packaging_data() | ||
| 324 | |||
| 325 | self.pm.rpm_setup_smart_target_config() | ||
| 326 | |||
| 327 | def _get_delayed_postinsts(self): | ||
| 328 | postinst_dir = self.d.expand("${IMAGE_ROOTFS}${sysconfdir}/rpm-postinsts") | ||
| 329 | if os.path.isdir(postinst_dir): | ||
| 330 | files = os.listdir(postinst_dir) | ||
| 331 | for f in files: | ||
| 332 | bb.note('Delayed package scriptlet: %s' % f) | ||
| 333 | return files | ||
| 334 | |||
| 335 | return None | ||
| 336 | |||
| 337 | def _save_postinsts(self): | ||
| 338 | # this is just a stub. For RPM, the failed postinstalls are | ||
| 339 | # already saved in /etc/rpm-postinsts | ||
| 340 | pass | ||
| 341 | |||
| 342 | def _log_check(self): | ||
| 343 | r = re.compile('(unpacking of archive failed|Cannot find package|exit 1|ERR|Fail)') | ||
| 344 | log_path = self.d.expand("${T}/log.do_rootfs") | ||
| 345 | with open(log_path, 'r') as log: | ||
| 346 | found_error = 0 | ||
| 347 | message = "\n" | ||
| 348 | for line in log.read().split('\n'): | ||
| 349 | if 'log_check' in line: | ||
| 350 | continue | ||
| 351 | |||
| 352 | m = r.search(line) | ||
| 353 | if m: | ||
| 354 | found_error = 1 | ||
| 355 | bb.warn('log_check: There were error messages in the logfile') | ||
| 356 | bb.warn('log_check: Matched keyword: [%s]\n\n' % m.group()) | ||
| 357 | |||
| 358 | if found_error >= 1 and found_error <= 5: | ||
| 359 | message += line + '\n' | ||
| 360 | found_error += 1 | ||
| 361 | |||
| 362 | if found_error == 6: | ||
| 363 | bb.fatal(message) | ||
| 364 | |||
| 365 | def _handle_intercept_failure(self, registered_pkgs): | ||
| 366 | rpm_postinsts_dir = self.image_rootfs + self.d.expand('${sysconfdir}/rpm-postinsts/') | ||
| 367 | bb.utils.mkdirhier(rpm_postinsts_dir) | ||
| 368 | |||
| 369 | # Save the package postinstalls in /etc/rpm-postinsts | ||
| 370 | for pkg in registered_pkgs.split(): | ||
| 371 | self.pm.save_rpmpostinst(pkg) | ||
| 372 | |||
| 373 | def _cleanup(self): | ||
| 374 | # during the execution of postprocess commands, rpm is called several | ||
| 375 | # times to get the files installed, dependencies, etc. This creates the | ||
| 376 | # __db.00* (Berkeley DB files that hold locks, rpm specific environment | ||
| 377 | # settings, etc.), that should not get into the final rootfs | ||
| 378 | self.pm.unlock_rpm_db() | ||
| 379 | |||
| 380 | |||
| 381 | class DpkgRootfs(Rootfs): | ||
| 382 | def __init__(self, d, manifest_dir): | ||
| 383 | super(DpkgRootfs, self).__init__(d) | ||
| 384 | |||
| 385 | bb.utils.remove(self.image_rootfs, True) | ||
| 386 | bb.utils.remove(self.d.getVar('MULTILIB_TEMP_ROOTFS', True), True) | ||
| 387 | self.manifest = DpkgManifest(d, manifest_dir) | ||
| 388 | self.pm = DpkgPM(d, d.getVar('IMAGE_ROOTFS', True), | ||
| 389 | d.getVar('PACKAGE_ARCHS', True), | ||
| 390 | d.getVar('DPKG_ARCH', True)) | ||
| 391 | |||
| 392 | |||
| 393 | def _create(self): | ||
| 394 | pkgs_to_install = self.manifest.parse_initial_manifest() | ||
| 395 | |||
| 396 | alt_dir = self.d.expand("${IMAGE_ROOTFS}/var/lib/dpkg/alternatives") | ||
| 397 | bb.utils.mkdirhier(alt_dir) | ||
| 398 | |||
| 399 | # update PM index files | ||
| 400 | self.pm.write_index() | ||
| 401 | |||
| 402 | self.pm.update() | ||
| 403 | |||
| 404 | for pkg_type in self.install_order: | ||
| 405 | if pkg_type in pkgs_to_install: | ||
| 406 | self.pm.install(pkgs_to_install[pkg_type], | ||
| 407 | [False, True][pkg_type == Manifest.PKG_TYPE_ATTEMPT_ONLY]) | ||
| 408 | |||
| 409 | self.pm.install_complementary() | ||
| 410 | |||
| 411 | self.pm.fix_broken_dependencies() | ||
| 412 | |||
| 413 | self.pm.mark_packages("installed") | ||
| 414 | |||
| 415 | self.pm.run_pre_post_installs() | ||
| 416 | |||
| 417 | def _get_delayed_postinsts(self): | ||
| 418 | pkg_list = [] | ||
| 419 | with open(self.image_rootfs + "/var/lib/dpkg/status") as status: | ||
| 420 | for line in status: | ||
| 421 | m_pkg = re.match("^Package: (.*)", line) | ||
| 422 | m_status = re.match("^Status:.*unpacked", line) | ||
| 423 | if m_pkg is not None: | ||
| 424 | pkg_name = m_pkg.group(1) | ||
| 425 | elif m_status is not None: | ||
| 426 | pkg_list.append(pkg_name) | ||
| 427 | |||
| 428 | if len(pkg_list) == 0: | ||
| 429 | return None | ||
| 430 | |||
| 431 | return pkg_list | ||
| 432 | |||
| 433 | def _save_postinsts(self): | ||
| 434 | num = 0 | ||
| 435 | for p in self._get_delayed_postinsts(): | ||
| 436 | dst_postinst_dir = self.d.expand("${IMAGE_ROOTFS}${sysconfdir}/deb-postinsts") | ||
| 437 | src_postinst_dir = self.d.expand("${IMAGE_ROOTFS}/var/lib/dpkg/info") | ||
| 438 | |||
| 439 | bb.utils.mkdirhier(dst_postinst_dir) | ||
| 440 | |||
| 441 | if os.path.exists(os.path.join(src_postinst_dir, p + ".postinst")): | ||
| 442 | shutil.copy(os.path.join(src_postinst_dir, p + ".postinst"), | ||
| 443 | os.path.join(dst_postinst_dir, "%03d-%s" % (num, p))) | ||
| 444 | |||
| 445 | num += 1 | ||
| 446 | |||
| 447 | def _handle_intercept_failure(self, registered_pkgs): | ||
| 448 | self.pm.mark_packages("unpacked", registered_pkgs.split()) | ||
| 449 | |||
| 450 | def _log_check(self): | ||
| 451 | pass | ||
| 452 | |||
| 453 | def _cleanup(self): | ||
| 454 | pass | ||
| 455 | |||
| 456 | |||
| 457 | class OpkgRootfs(Rootfs): | ||
| 458 | def __init__(self, d, manifest_dir): | ||
| 459 | super(OpkgRootfs, self).__init__(d) | ||
| 460 | |||
| 461 | self.manifest = OpkgManifest(d, manifest_dir) | ||
| 462 | self.opkg_conf = self.d.getVar("IPKGCONF_TARGET", True) | ||
| 463 | self.pkg_archs = self.d.getVar("ALL_MULTILIB_PACKAGE_ARCHS", True) | ||
| 464 | |||
| 465 | self.inc_opkg_image_gen = self.d.getVar('INC_IPK_IMAGE_GEN', True) or "" | ||
| 466 | if self._remove_old_rootfs(): | ||
| 467 | bb.utils.remove(self.image_rootfs, True) | ||
| 468 | self.pm = OpkgPM(d, | ||
| 469 | self.image_rootfs, | ||
| 470 | self.opkg_conf, | ||
| 471 | self.pkg_archs) | ||
| 472 | else: | ||
| 473 | self.pm = OpkgPM(d, | ||
| 474 | self.image_rootfs, | ||
| 475 | self.opkg_conf, | ||
| 476 | self.pkg_archs) | ||
| 477 | self.pm.recover_packaging_data() | ||
| 478 | |||
| 479 | bb.utils.remove(self.d.getVar('MULTILIB_TEMP_ROOTFS', True), True) | ||
| 480 | |||
| 481 | def _prelink_file(self, root_dir, filename): | ||
| 482 | bb.note('prelink %s in %s' % (filename, root_dir)) | ||
| 483 | prelink_cfg = oe.path.join(root_dir, | ||
| 484 | self.d.expand('${sysconfdir}/prelink.conf')) | ||
| 485 | if not os.path.exists(prelink_cfg): | ||
| 486 | shutil.copy(self.d.expand('${STAGING_DIR_NATIVE}${sysconfdir_native}/prelink.conf'), | ||
| 487 | prelink_cfg) | ||
| 488 | |||
| 489 | cmd_prelink = self.d.expand('${STAGING_DIR_NATIVE}${sbindir_native}/prelink') | ||
| 490 | self._exec_shell_cmd([cmd_prelink, | ||
| 491 | '--root', | ||
| 492 | root_dir, | ||
| 493 | '-amR', | ||
| 494 | '-N', | ||
| 495 | '-c', | ||
| 496 | self.d.expand('${sysconfdir}/prelink.conf')]) | ||
| 497 | |||
| 498 | ''' | ||
| 499 | Compare two files with the same key twice to see if they are equal. | ||
| 500 | If they are not equal, it means they are duplicated and come from | ||
| 501 | different packages. | ||
| 502 | 1st: Comapre them directly; | ||
| 503 | 2nd: While incremental image creation is enabled, one of the | ||
| 504 | files could be probaly prelinked in the previous image | ||
| 505 | creation and the file has been changed, so we need to | ||
| 506 | prelink the other one and compare them. | ||
| 507 | ''' | ||
| 508 | def _file_equal(self, key, f1, f2): | ||
| 509 | |||
| 510 | # Both of them are not prelinked | ||
| 511 | if filecmp.cmp(f1, f2): | ||
| 512 | return True | ||
| 513 | |||
| 514 | if self.image_rootfs not in f1: | ||
| 515 | self._prelink_file(f1.replace(key, ''), f1) | ||
| 516 | |||
| 517 | if self.image_rootfs not in f2: | ||
| 518 | self._prelink_file(f2.replace(key, ''), f2) | ||
| 519 | |||
| 520 | # Both of them are prelinked | ||
| 521 | if filecmp.cmp(f1, f2): | ||
| 522 | return True | ||
| 523 | |||
| 524 | # Not equal | ||
| 525 | return False | ||
| 526 | |||
| 527 | """ | ||
| 528 | This function was reused from the old implementation. | ||
| 529 | See commit: "image.bbclass: Added variables for multilib support." by | ||
| 530 | Lianhao Lu. | ||
| 531 | """ | ||
| 532 | def _multilib_sanity_test(self, dirs): | ||
| 533 | |||
| 534 | allow_replace = self.d.getVar("MULTILIBRE_ALLOW_REP", True) | ||
| 535 | if allow_replace is None: | ||
| 536 | allow_replace = "" | ||
| 537 | |||
| 538 | allow_rep = re.compile(re.sub("\|$", "", allow_replace)) | ||
| 539 | error_prompt = "Multilib check error:" | ||
| 540 | |||
| 541 | files = {} | ||
| 542 | for dir in dirs: | ||
| 543 | for root, subfolders, subfiles in os.walk(dir): | ||
| 544 | for file in subfiles: | ||
| 545 | item = os.path.join(root, file) | ||
| 546 | key = str(os.path.join("/", os.path.relpath(item, dir))) | ||
| 547 | |||
| 548 | valid = True | ||
| 549 | if key in files: | ||
| 550 | #check whether the file is allow to replace | ||
| 551 | if allow_rep.match(key): | ||
| 552 | valid = True | ||
| 553 | else: | ||
| 554 | if os.path.exists(files[key]) and \ | ||
| 555 | os.path.exists(item) and \ | ||
| 556 | not self._file_equal(key, files[key], item): | ||
| 557 | valid = False | ||
| 558 | bb.fatal("%s duplicate files %s %s is not the same\n" % | ||
| 559 | (error_prompt, item, files[key])) | ||
| 560 | |||
| 561 | #pass the check, add to list | ||
| 562 | if valid: | ||
| 563 | files[key] = item | ||
| 564 | |||
| 565 | def _multilib_test_install(self, pkgs): | ||
| 566 | ml_temp = self.d.getVar("MULTILIB_TEMP_ROOTFS", True) | ||
| 567 | bb.utils.mkdirhier(ml_temp) | ||
| 568 | |||
| 569 | dirs = [self.image_rootfs] | ||
| 570 | |||
| 571 | for variant in self.d.getVar("MULTILIB_VARIANTS", True).split(): | ||
| 572 | ml_target_rootfs = os.path.join(ml_temp, variant) | ||
| 573 | |||
| 574 | bb.utils.remove(ml_target_rootfs, True) | ||
| 575 | |||
| 576 | ml_opkg_conf = os.path.join(ml_temp, | ||
| 577 | variant + "-" + os.path.basename(self.opkg_conf)) | ||
| 578 | |||
| 579 | ml_pm = OpkgPM(self.d, ml_target_rootfs, ml_opkg_conf, self.pkg_archs) | ||
| 580 | |||
| 581 | ml_pm.update() | ||
| 582 | ml_pm.install(pkgs) | ||
| 583 | |||
| 584 | dirs.append(ml_target_rootfs) | ||
| 585 | |||
| 586 | self._multilib_sanity_test(dirs) | ||
| 587 | |||
| 588 | ''' | ||
| 589 | While ipk incremental image generation is enabled, it will remove the | ||
| 590 | unneeded pkgs by comparing the old full manifest in previous existing | ||
| 591 | image and the new full manifest in the current image. | ||
| 592 | ''' | ||
| 593 | def _remove_extra_packages(self, pkgs_initial_install): | ||
| 594 | if self.inc_opkg_image_gen == "1": | ||
| 595 | # Parse full manifest in previous existing image creation session | ||
| 596 | old_full_manifest = self.manifest.parse_full_manifest() | ||
| 597 | |||
| 598 | # Create full manifest for the current image session, the old one | ||
| 599 | # will be replaced by the new one. | ||
| 600 | self.manifest.create_full(self.pm) | ||
| 601 | |||
| 602 | # Parse full manifest in current image creation session | ||
| 603 | new_full_manifest = self.manifest.parse_full_manifest() | ||
| 604 | |||
| 605 | pkg_to_remove = list() | ||
| 606 | for pkg in old_full_manifest: | ||
| 607 | if pkg not in new_full_manifest: | ||
| 608 | pkg_to_remove.append(pkg) | ||
| 609 | |||
| 610 | if pkg_to_remove != []: | ||
| 611 | bb.note('decremental removed: %s' % ' '.join(pkg_to_remove)) | ||
| 612 | self.pm.remove(pkg_to_remove) | ||
| 613 | |||
| 614 | ''' | ||
| 615 | Compare with previous existing image creation, if some conditions | ||
| 616 | triggered, the previous old image should be removed. | ||
| 617 | The conditions include any of 'PACKAGE_EXCLUDE, NO_RECOMMENDATIONS | ||
| 618 | and BAD_RECOMMENDATIONS' has been changed. | ||
| 619 | ''' | ||
| 620 | def _remove_old_rootfs(self): | ||
| 621 | if self.inc_opkg_image_gen != "1": | ||
| 622 | return True | ||
| 623 | |||
| 624 | vars_list_file = self.d.expand('${T}/vars_list') | ||
| 625 | |||
| 626 | old_vars_list = "" | ||
| 627 | if os.path.exists(vars_list_file): | ||
| 628 | old_vars_list = open(vars_list_file, 'r+').read() | ||
| 629 | |||
| 630 | new_vars_list = '%s:%s:%s\n' % \ | ||
| 631 | ((self.d.getVar('BAD_RECOMMENDATIONS', True) or '').strip(), | ||
| 632 | (self.d.getVar('NO_RECOMMENDATIONS', True) or '').strip(), | ||
| 633 | (self.d.getVar('PACKAGE_EXCLUDE', True) or '').strip()) | ||
| 634 | open(vars_list_file, 'w+').write(new_vars_list) | ||
| 635 | |||
| 636 | if old_vars_list != new_vars_list: | ||
| 637 | return True | ||
| 638 | |||
| 639 | return False | ||
| 640 | |||
| 641 | def _create(self): | ||
| 642 | pkgs_to_install = self.manifest.parse_initial_manifest() | ||
| 643 | opkg_pre_process_cmds = self.d.getVar('OPKG_PREPROCESS_COMMANDS', True) | ||
| 644 | opkg_post_process_cmds = self.d.getVar('OPKG_POSTPROCESS_COMMANDS', True) | ||
| 645 | rootfs_post_install_cmds = self.d.getVar('ROOTFS_POSTINSTALL_COMMAND', True) | ||
| 646 | |||
| 647 | # update PM index files, unless users provide their own feeds | ||
| 648 | if (self.d.getVar('BUILD_IMAGES_FROM_FEEDS', True) or "") != "1": | ||
| 649 | self.pm.write_index() | ||
| 650 | |||
| 651 | execute_pre_post_process(self.d, opkg_pre_process_cmds) | ||
| 652 | |||
| 653 | self.pm.update() | ||
| 654 | |||
| 655 | self.pm.handle_bad_recommendations() | ||
| 656 | |||
| 657 | if self.inc_opkg_image_gen == "1": | ||
| 658 | self._remove_extra_packages(pkgs_to_install) | ||
| 659 | |||
| 660 | for pkg_type in self.install_order: | ||
| 661 | if pkg_type in pkgs_to_install: | ||
| 662 | # For multilib, we perform a sanity test before final install | ||
| 663 | # If sanity test fails, it will automatically do a bb.fatal() | ||
| 664 | # and the installation will stop | ||
| 665 | if pkg_type == Manifest.PKG_TYPE_MULTILIB: | ||
| 666 | self._multilib_test_install(pkgs_to_install[pkg_type]) | ||
| 667 | |||
| 668 | self.pm.install(pkgs_to_install[pkg_type], | ||
| 669 | [False, True][pkg_type == Manifest.PKG_TYPE_ATTEMPT_ONLY]) | ||
| 670 | |||
| 671 | self.pm.install_complementary() | ||
| 672 | |||
| 673 | execute_pre_post_process(self.d, opkg_post_process_cmds) | ||
| 674 | execute_pre_post_process(self.d, rootfs_post_install_cmds) | ||
| 675 | |||
| 676 | if self.inc_opkg_image_gen == "1": | ||
| 677 | self.pm.backup_packaging_data() | ||
| 678 | |||
| 679 | def _get_delayed_postinsts(self): | ||
| 680 | pkg_list = [] | ||
| 681 | status_file = os.path.join(self.image_rootfs, | ||
| 682 | self.d.getVar('OPKGLIBDIR', True).strip('/'), | ||
| 683 | "opkg", "status") | ||
| 684 | |||
| 685 | with open(status_file) as status: | ||
| 686 | for line in status: | ||
| 687 | m_pkg = re.match("^Package: (.*)", line) | ||
| 688 | m_status = re.match("^Status:.*unpacked", line) | ||
| 689 | if m_pkg is not None: | ||
| 690 | pkg_name = m_pkg.group(1) | ||
| 691 | elif m_status is not None: | ||
| 692 | pkg_list.append(pkg_name) | ||
| 693 | |||
| 694 | if len(pkg_list) == 0: | ||
| 695 | return None | ||
| 696 | |||
| 697 | return pkg_list | ||
| 698 | |||
| 699 | def _save_postinsts(self): | ||
| 700 | num = 0 | ||
| 701 | for p in self._get_delayed_postinsts(): | ||
| 702 | dst_postinst_dir = self.d.expand("${IMAGE_ROOTFS}${sysconfdir}/ipk-postinsts") | ||
| 703 | src_postinst_dir = self.d.expand("${IMAGE_ROOTFS}${OPKGLIBDIR}/opkg/info") | ||
| 704 | |||
| 705 | bb.utils.mkdirhier(dst_postinst_dir) | ||
| 706 | |||
| 707 | if os.path.exists(os.path.join(src_postinst_dir, p + ".postinst")): | ||
| 708 | shutil.copy(os.path.join(src_postinst_dir, p + ".postinst"), | ||
| 709 | os.path.join(dst_postinst_dir, "%03d-%s" % (num, p))) | ||
| 710 | |||
| 711 | num += 1 | ||
| 712 | |||
| 713 | def _handle_intercept_failure(self, registered_pkgs): | ||
| 714 | self.pm.mark_packages("unpacked", registered_pkgs.split()) | ||
| 715 | |||
| 716 | def _log_check(self): | ||
| 717 | pass | ||
| 718 | |||
| 719 | def _cleanup(self): | ||
| 720 | pass | ||
| 721 | |||
| 722 | |||
| 723 | def create_rootfs(d, manifest_dir=None): | ||
| 724 | env_bkp = os.environ.copy() | ||
| 725 | |||
| 726 | img_type = d.getVar('IMAGE_PKGTYPE', True) | ||
| 727 | if img_type == "rpm": | ||
| 728 | RpmRootfs(d, manifest_dir).create() | ||
| 729 | elif img_type == "ipk": | ||
| 730 | OpkgRootfs(d, manifest_dir).create() | ||
| 731 | elif img_type == "deb": | ||
| 732 | DpkgRootfs(d, manifest_dir).create() | ||
| 733 | |||
| 734 | os.environ.clear() | ||
| 735 | os.environ.update(env_bkp) | ||
| 736 | |||
| 737 | |||
| 738 | def image_list_installed_packages(d, format=None, rootfs_dir=None): | ||
| 739 | if not rootfs_dir: | ||
| 740 | rootfs_dir = d.getVar('IMAGE_ROOTFS', True) | ||
| 741 | |||
| 742 | img_type = d.getVar('IMAGE_PKGTYPE', True) | ||
| 743 | if img_type == "rpm": | ||
| 744 | return RpmPkgsList(d, rootfs_dir).list(format) | ||
| 745 | elif img_type == "ipk": | ||
| 746 | return OpkgPkgsList(d, rootfs_dir, d.getVar("IPKGCONF_TARGET", True)).list(format) | ||
| 747 | elif img_type == "deb": | ||
| 748 | return DpkgPkgsList(d, rootfs_dir).list(format) | ||
| 749 | |||
| 750 | if __name__ == "__main__": | ||
| 751 | """ | ||
| 752 | We should be able to run this as a standalone script, from outside bitbake | ||
| 753 | environment. | ||
| 754 | """ | ||
| 755 | """ | ||
| 756 | TBD | ||
| 757 | """ | ||
diff --git a/meta/lib/oe/sdk.py b/meta/lib/oe/sdk.py new file mode 100644 index 0000000000..564319965d --- /dev/null +++ b/meta/lib/oe/sdk.py | |||
| @@ -0,0 +1,325 @@ | |||
| 1 | from abc import ABCMeta, abstractmethod | ||
| 2 | from oe.utils import execute_pre_post_process | ||
| 3 | from oe.manifest import * | ||
| 4 | from oe.package_manager import * | ||
| 5 | import os | ||
| 6 | import shutil | ||
| 7 | import glob | ||
| 8 | |||
| 9 | |||
| 10 | class Sdk(object): | ||
| 11 | __metaclass__ = ABCMeta | ||
| 12 | |||
| 13 | def __init__(self, d, manifest_dir): | ||
| 14 | self.d = d | ||
| 15 | self.sdk_output = self.d.getVar('SDK_OUTPUT', True) | ||
| 16 | self.sdk_native_path = self.d.getVar('SDKPATHNATIVE', True).strip('/') | ||
| 17 | self.target_path = self.d.getVar('SDKTARGETSYSROOT', True).strip('/') | ||
| 18 | self.sysconfdir = self.d.getVar('sysconfdir', True).strip('/') | ||
| 19 | |||
| 20 | self.sdk_target_sysroot = os.path.join(self.sdk_output, self.target_path) | ||
| 21 | self.sdk_host_sysroot = self.sdk_output | ||
| 22 | |||
| 23 | if manifest_dir is None: | ||
| 24 | self.manifest_dir = self.d.getVar("SDK_DIR", True) | ||
| 25 | else: | ||
| 26 | self.manifest_dir = manifest_dir | ||
| 27 | |||
| 28 | bb.utils.remove(self.sdk_output, True) | ||
| 29 | |||
| 30 | self.install_order = Manifest.INSTALL_ORDER | ||
| 31 | |||
| 32 | @abstractmethod | ||
| 33 | def _populate(self): | ||
| 34 | pass | ||
| 35 | |||
| 36 | def populate(self): | ||
| 37 | bb.utils.mkdirhier(self.sdk_output) | ||
| 38 | |||
| 39 | # call backend dependent implementation | ||
| 40 | self._populate() | ||
| 41 | |||
| 42 | # Don't ship any libGL in the SDK | ||
| 43 | bb.utils.remove(os.path.join(self.sdk_output, self.sdk_native_path, | ||
| 44 | self.d.getVar('libdir_nativesdk', True).strip('/'), | ||
| 45 | "libGL*")) | ||
| 46 | |||
| 47 | # Fix or remove broken .la files | ||
| 48 | bb.utils.remove(os.path.join(self.sdk_output, self.sdk_native_path, | ||
| 49 | self.d.getVar('libdir_nativesdk', True).strip('/'), | ||
| 50 | "*.la")) | ||
| 51 | |||
| 52 | # Link the ld.so.cache file into the hosts filesystem | ||
| 53 | link_name = os.path.join(self.sdk_output, self.sdk_native_path, | ||
| 54 | self.sysconfdir, "ld.so.cache") | ||
| 55 | os.symlink("/etc/ld.so.cache", link_name) | ||
| 56 | |||
| 57 | execute_pre_post_process(self.d, self.d.getVar('SDK_POSTPROCESS_COMMAND', True)) | ||
| 58 | |||
| 59 | |||
| 60 | class RpmSdk(Sdk): | ||
| 61 | def __init__(self, d, manifest_dir=None): | ||
| 62 | super(RpmSdk, self).__init__(d, manifest_dir) | ||
| 63 | |||
| 64 | self.target_manifest = RpmManifest(d, self.manifest_dir, | ||
| 65 | Manifest.MANIFEST_TYPE_SDK_TARGET) | ||
| 66 | self.host_manifest = RpmManifest(d, self.manifest_dir, | ||
| 67 | Manifest.MANIFEST_TYPE_SDK_HOST) | ||
| 68 | |||
| 69 | target_providename = ['/bin/sh', | ||
| 70 | '/bin/bash', | ||
| 71 | '/usr/bin/env', | ||
| 72 | '/usr/bin/perl', | ||
| 73 | 'pkgconfig' | ||
| 74 | ] | ||
| 75 | |||
| 76 | self.target_pm = RpmPM(d, | ||
| 77 | self.sdk_target_sysroot, | ||
| 78 | self.d.getVar('TARGET_VENDOR', True), | ||
| 79 | 'target', | ||
| 80 | target_providename | ||
| 81 | ) | ||
| 82 | |||
| 83 | sdk_providename = ['/bin/sh', | ||
| 84 | '/bin/bash', | ||
| 85 | '/usr/bin/env', | ||
| 86 | '/usr/bin/perl', | ||
| 87 | 'pkgconfig', | ||
| 88 | 'libGL.so()(64bit)', | ||
| 89 | 'libGL.so' | ||
| 90 | ] | ||
| 91 | |||
| 92 | self.host_pm = RpmPM(d, | ||
| 93 | self.sdk_host_sysroot, | ||
| 94 | self.d.getVar('SDK_VENDOR', True), | ||
| 95 | 'host', | ||
| 96 | sdk_providename, | ||
| 97 | "SDK_PACKAGE_ARCHS", | ||
| 98 | "SDK_OS" | ||
| 99 | ) | ||
| 100 | |||
| 101 | def _populate_sysroot(self, pm, manifest): | ||
| 102 | pkgs_to_install = manifest.parse_initial_manifest() | ||
| 103 | |||
| 104 | pm.create_configs() | ||
| 105 | pm.write_index() | ||
| 106 | pm.dump_all_available_pkgs() | ||
| 107 | pm.update() | ||
| 108 | |||
| 109 | for pkg_type in self.install_order: | ||
| 110 | if pkg_type in pkgs_to_install: | ||
| 111 | pm.install(pkgs_to_install[pkg_type], | ||
| 112 | [False, True][pkg_type == Manifest.PKG_TYPE_ATTEMPT_ONLY]) | ||
| 113 | |||
| 114 | def _populate(self): | ||
| 115 | bb.note("Installing TARGET packages") | ||
| 116 | self._populate_sysroot(self.target_pm, self.target_manifest) | ||
| 117 | |||
| 118 | self.target_pm.install_complementary(self.d.getVar('SDKIMAGE_INSTALL_COMPLEMENTARY', True)) | ||
| 119 | |||
| 120 | execute_pre_post_process(self.d, self.d.getVar("POPULATE_SDK_POST_TARGET_COMMAND", True)) | ||
| 121 | |||
| 122 | self.target_pm.remove_packaging_data() | ||
| 123 | |||
| 124 | bb.note("Installing NATIVESDK packages") | ||
| 125 | self._populate_sysroot(self.host_pm, self.host_manifest) | ||
| 126 | |||
| 127 | execute_pre_post_process(self.d, self.d.getVar("POPULATE_SDK_POST_HOST_COMMAND", True)) | ||
| 128 | |||
| 129 | self.host_pm.remove_packaging_data() | ||
| 130 | |||
| 131 | # Move host RPM library data | ||
| 132 | native_rpm_state_dir = os.path.join(self.sdk_output, | ||
| 133 | self.sdk_native_path, | ||
| 134 | self.d.getVar('localstatedir_nativesdk', True).strip('/'), | ||
| 135 | "lib", | ||
| 136 | "rpm" | ||
| 137 | ) | ||
| 138 | bb.utils.mkdirhier(native_rpm_state_dir) | ||
| 139 | for f in glob.glob(os.path.join(self.sdk_output, | ||
| 140 | "var", | ||
| 141 | "lib", | ||
| 142 | "rpm", | ||
| 143 | "*")): | ||
| 144 | bb.utils.movefile(f, native_rpm_state_dir) | ||
| 145 | |||
| 146 | bb.utils.remove(os.path.join(self.sdk_output, "var"), True) | ||
| 147 | |||
| 148 | # Move host sysconfig data | ||
| 149 | native_sysconf_dir = os.path.join(self.sdk_output, | ||
| 150 | self.sdk_native_path, | ||
| 151 | self.d.getVar('sysconfdir', | ||
| 152 | True).strip('/'), | ||
| 153 | ) | ||
| 154 | bb.utils.mkdirhier(native_sysconf_dir) | ||
| 155 | for f in glob.glob(os.path.join(self.sdk_output, "etc", "*")): | ||
| 156 | bb.utils.movefile(f, native_sysconf_dir) | ||
| 157 | bb.utils.remove(os.path.join(self.sdk_output, "etc"), True) | ||
| 158 | |||
| 159 | |||
| 160 | class OpkgSdk(Sdk): | ||
| 161 | def __init__(self, d, manifest_dir=None): | ||
| 162 | super(OpkgSdk, self).__init__(d, manifest_dir) | ||
| 163 | |||
| 164 | self.target_conf = self.d.getVar("IPKGCONF_TARGET", True) | ||
| 165 | self.host_conf = self.d.getVar("IPKGCONF_SDK", True) | ||
| 166 | |||
| 167 | self.target_manifest = OpkgManifest(d, self.manifest_dir, | ||
| 168 | Manifest.MANIFEST_TYPE_SDK_TARGET) | ||
| 169 | self.host_manifest = OpkgManifest(d, self.manifest_dir, | ||
| 170 | Manifest.MANIFEST_TYPE_SDK_HOST) | ||
| 171 | |||
| 172 | self.target_pm = OpkgPM(d, self.sdk_target_sysroot, self.target_conf, | ||
| 173 | self.d.getVar("ALL_MULTILIB_PACKAGE_ARCHS", True)) | ||
| 174 | |||
| 175 | self.host_pm = OpkgPM(d, self.sdk_host_sysroot, self.host_conf, | ||
| 176 | self.d.getVar("SDK_PACKAGE_ARCHS", True)) | ||
| 177 | |||
| 178 | def _populate_sysroot(self, pm, manifest): | ||
| 179 | pkgs_to_install = manifest.parse_initial_manifest() | ||
| 180 | |||
| 181 | if (self.d.getVar('BUILD_IMAGES_FROM_FEEDS', True) or "") != "1": | ||
| 182 | pm.write_index() | ||
| 183 | |||
| 184 | pm.update() | ||
| 185 | |||
| 186 | for pkg_type in self.install_order: | ||
| 187 | if pkg_type in pkgs_to_install: | ||
| 188 | pm.install(pkgs_to_install[pkg_type], | ||
| 189 | [False, True][pkg_type == Manifest.PKG_TYPE_ATTEMPT_ONLY]) | ||
| 190 | |||
| 191 | def _populate(self): | ||
| 192 | bb.note("Installing TARGET packages") | ||
| 193 | self._populate_sysroot(self.target_pm, self.target_manifest) | ||
| 194 | |||
| 195 | self.target_pm.install_complementary(self.d.getVar('SDKIMAGE_INSTALL_COMPLEMENTARY', True)) | ||
| 196 | |||
| 197 | execute_pre_post_process(self.d, self.d.getVar("POPULATE_SDK_POST_TARGET_COMMAND", True)) | ||
| 198 | |||
| 199 | bb.note("Installing NATIVESDK packages") | ||
| 200 | self._populate_sysroot(self.host_pm, self.host_manifest) | ||
| 201 | |||
| 202 | execute_pre_post_process(self.d, self.d.getVar("POPULATE_SDK_POST_HOST_COMMAND", True)) | ||
| 203 | |||
| 204 | target_sysconfdir = os.path.join(self.sdk_target_sysroot, self.sysconfdir) | ||
| 205 | host_sysconfdir = os.path.join(self.sdk_host_sysroot, self.sysconfdir) | ||
| 206 | |||
| 207 | bb.utils.mkdirhier(target_sysconfdir) | ||
| 208 | shutil.copy(self.target_conf, target_sysconfdir) | ||
| 209 | os.chmod(os.path.join(target_sysconfdir, | ||
| 210 | os.path.basename(self.target_conf)), 0644) | ||
| 211 | |||
| 212 | bb.utils.mkdirhier(host_sysconfdir) | ||
| 213 | shutil.copy(self.host_conf, host_sysconfdir) | ||
| 214 | os.chmod(os.path.join(host_sysconfdir, | ||
| 215 | os.path.basename(self.host_conf)), 0644) | ||
| 216 | |||
| 217 | native_opkg_state_dir = os.path.join(self.sdk_output, self.sdk_native_path, | ||
| 218 | self.d.getVar('localstatedir_nativesdk', True).strip('/'), | ||
| 219 | "lib", "opkg") | ||
| 220 | bb.utils.mkdirhier(native_opkg_state_dir) | ||
| 221 | for f in glob.glob(os.path.join(self.sdk_output, "var", "lib", "opkg", "*")): | ||
| 222 | bb.utils.movefile(f, native_opkg_state_dir) | ||
| 223 | |||
| 224 | bb.utils.remove(os.path.join(self.sdk_output, "var"), True) | ||
| 225 | |||
| 226 | |||
| 227 | class DpkgSdk(Sdk): | ||
| 228 | def __init__(self, d, manifest_dir=None): | ||
| 229 | super(DpkgSdk, self).__init__(d, manifest_dir) | ||
| 230 | |||
| 231 | self.target_conf_dir = os.path.join(self.d.getVar("APTCONF_TARGET", True), "apt") | ||
| 232 | self.host_conf_dir = os.path.join(self.d.getVar("APTCONF_TARGET", True), "apt-sdk") | ||
| 233 | |||
| 234 | self.target_manifest = DpkgManifest(d, self.manifest_dir, | ||
| 235 | Manifest.MANIFEST_TYPE_SDK_TARGET) | ||
| 236 | self.host_manifest = DpkgManifest(d, self.manifest_dir, | ||
| 237 | Manifest.MANIFEST_TYPE_SDK_HOST) | ||
| 238 | |||
| 239 | self.target_pm = DpkgPM(d, self.sdk_target_sysroot, | ||
| 240 | self.d.getVar("PACKAGE_ARCHS", True), | ||
| 241 | self.d.getVar("DPKG_ARCH", True), | ||
| 242 | self.target_conf_dir) | ||
| 243 | |||
| 244 | self.host_pm = DpkgPM(d, self.sdk_host_sysroot, | ||
| 245 | self.d.getVar("SDK_PACKAGE_ARCHS", True), | ||
| 246 | self.d.getVar("DEB_SDK_ARCH", True), | ||
| 247 | self.host_conf_dir) | ||
| 248 | |||
| 249 | def _copy_apt_dir_to(self, dst_dir): | ||
| 250 | staging_etcdir_native = self.d.getVar("STAGING_ETCDIR_NATIVE", True) | ||
| 251 | |||
| 252 | bb.utils.remove(dst_dir, True) | ||
| 253 | |||
| 254 | shutil.copytree(os.path.join(staging_etcdir_native, "apt"), dst_dir) | ||
| 255 | |||
| 256 | def _populate_sysroot(self, pm, manifest): | ||
| 257 | pkgs_to_install = manifest.parse_initial_manifest() | ||
| 258 | |||
| 259 | pm.write_index() | ||
| 260 | pm.update() | ||
| 261 | |||
| 262 | for pkg_type in self.install_order: | ||
| 263 | if pkg_type in pkgs_to_install: | ||
| 264 | pm.install(pkgs_to_install[pkg_type], | ||
| 265 | [False, True][pkg_type == Manifest.PKG_TYPE_ATTEMPT_ONLY]) | ||
| 266 | |||
| 267 | def _populate(self): | ||
| 268 | bb.note("Installing TARGET packages") | ||
| 269 | self._populate_sysroot(self.target_pm, self.target_manifest) | ||
| 270 | |||
| 271 | execute_pre_post_process(self.d, self.d.getVar("POPULATE_SDK_POST_TARGET_COMMAND", True)) | ||
| 272 | |||
| 273 | self._copy_apt_dir_to(os.path.join(self.sdk_target_sysroot, "etc", "apt")) | ||
| 274 | |||
| 275 | bb.note("Installing NATIVESDK packages") | ||
| 276 | self._populate_sysroot(self.host_pm, self.host_manifest) | ||
| 277 | |||
| 278 | execute_pre_post_process(self.d, self.d.getVar("POPULATE_SDK_POST_HOST_COMMAND", True)) | ||
| 279 | |||
| 280 | self._copy_apt_dir_to(os.path.join(self.sdk_output, self.sdk_native_path, | ||
| 281 | "etc", "apt")) | ||
| 282 | |||
| 283 | native_dpkg_state_dir = os.path.join(self.sdk_output, self.sdk_native_path, | ||
| 284 | "var", "lib", "dpkg") | ||
| 285 | bb.utils.mkdirhier(native_dpkg_state_dir) | ||
| 286 | for f in glob.glob(os.path.join(self.sdk_output, "var", "lib", "dpkg", "*")): | ||
| 287 | bb.utils.movefile(f, native_dpkg_state_dir) | ||
| 288 | |||
| 289 | bb.utils.remove(os.path.join(self.sdk_output, "var"), True) | ||
| 290 | |||
| 291 | |||
| 292 | def sdk_list_installed_packages(d, target, format=None, rootfs_dir=None): | ||
| 293 | if rootfs_dir is None: | ||
| 294 | sdk_output = d.getVar('SDK_OUTPUT', True) | ||
| 295 | target_path = d.getVar('SDKTARGETSYSROOT', True).strip('/') | ||
| 296 | |||
| 297 | rootfs_dir = [sdk_output, os.path.join(sdk_output, target_path)][target is True] | ||
| 298 | |||
| 299 | img_type = d.getVar('IMAGE_PKGTYPE', True) | ||
| 300 | if img_type == "rpm": | ||
| 301 | arch_var = ["SDK_PACKAGE_ARCHS", None][target is True] | ||
| 302 | os_var = ["SDK_OS", None][target is True] | ||
| 303 | return RpmPkgsList(d, rootfs_dir, arch_var, os_var).list(format) | ||
| 304 | elif img_type == "ipk": | ||
| 305 | conf_file_var = ["IPKGCONF_SDK", "IPKGCONF_Target"][target is True] | ||
| 306 | return OpkgPkgsList(d, rootfs_dir, d.getVar(conf_file_var, True)).list(format) | ||
| 307 | elif img_type == "deb": | ||
| 308 | return DpkgPkgsList(d, rootfs_dir).list(format) | ||
| 309 | |||
| 310 | def populate_sdk(d, manifest_dir=None): | ||
| 311 | env_bkp = os.environ.copy() | ||
| 312 | |||
| 313 | img_type = d.getVar('IMAGE_PKGTYPE', True) | ||
| 314 | if img_type == "rpm": | ||
| 315 | RpmSdk(d, manifest_dir).populate() | ||
| 316 | elif img_type == "ipk": | ||
| 317 | OpkgSdk(d, manifest_dir).populate() | ||
| 318 | elif img_type == "deb": | ||
| 319 | DpkgSdk(d, manifest_dir).populate() | ||
| 320 | |||
| 321 | os.environ.clear() | ||
| 322 | os.environ.update(env_bkp) | ||
| 323 | |||
| 324 | if __name__ == "__main__": | ||
| 325 | pass | ||
diff --git a/meta/lib/oe/sstatesig.py b/meta/lib/oe/sstatesig.py new file mode 100644 index 0000000000..aa25c3a10e --- /dev/null +++ b/meta/lib/oe/sstatesig.py | |||
| @@ -0,0 +1,166 @@ | |||
| 1 | import bb.siggen | ||
| 2 | |||
| 3 | def sstate_rundepfilter(siggen, fn, recipename, task, dep, depname, dataCache): | ||
| 4 | # Return True if we should keep the dependency, False to drop it | ||
| 5 | def isNative(x): | ||
| 6 | return x.endswith("-native") | ||
| 7 | def isCross(x): | ||
| 8 | return x.endswith("-cross") or x.endswith("-cross-initial") or x.endswith("-cross-intermediate") | ||
| 9 | def isNativeSDK(x): | ||
| 10 | return x.startswith("nativesdk-") | ||
| 11 | def isKernel(fn): | ||
| 12 | inherits = " ".join(dataCache.inherits[fn]) | ||
| 13 | return inherits.find("/module-base.bbclass") != -1 or inherits.find("/linux-kernel-base.bbclass") != -1 | ||
| 14 | def isPackageGroup(fn): | ||
| 15 | inherits = " ".join(dataCache.inherits[fn]) | ||
| 16 | return "/packagegroup.bbclass" in inherits | ||
| 17 | def isImage(fn): | ||
| 18 | return "/image.bbclass" in " ".join(dataCache.inherits[fn]) | ||
| 19 | |||
| 20 | # Always include our own inter-task dependencies | ||
| 21 | if recipename == depname: | ||
| 22 | return True | ||
| 23 | |||
| 24 | # Quilt (patch application) changing isn't likely to affect anything | ||
| 25 | excludelist = ['quilt-native', 'subversion-native', 'git-native'] | ||
| 26 | if depname in excludelist and recipename != depname: | ||
| 27 | return False | ||
| 28 | |||
| 29 | # Don't change native/cross/nativesdk recipe dependencies any further | ||
| 30 | if isNative(recipename) or isCross(recipename) or isNativeSDK(recipename): | ||
| 31 | return True | ||
| 32 | |||
| 33 | # Only target packages beyond here | ||
| 34 | |||
| 35 | # packagegroups are assumed to have well behaved names which don't change between architecures/tunes | ||
| 36 | if isPackageGroup(fn): | ||
| 37 | return False | ||
| 38 | |||
| 39 | # Exclude well defined machine specific configurations which don't change ABI | ||
| 40 | if depname in siggen.abisaferecipes and not isImage(fn): | ||
| 41 | return False | ||
| 42 | |||
| 43 | # Exclude well defined recipe->dependency | ||
| 44 | if "%s->%s" % (recipename, depname) in siggen.saferecipedeps: | ||
| 45 | return False | ||
| 46 | |||
| 47 | # Kernel modules are well namespaced. We don't want to depend on the kernel's checksum | ||
| 48 | # if we're just doing an RRECOMMENDS_xxx = "kernel-module-*", not least because the checksum | ||
| 49 | # is machine specific. | ||
| 50 | # Therefore if we're not a kernel or a module recipe (inheriting the kernel classes) | ||
| 51 | # and we reccomend a kernel-module, we exclude the dependency. | ||
| 52 | depfn = dep.rsplit(".", 1)[0] | ||
| 53 | if dataCache and isKernel(depfn) and not isKernel(fn): | ||
| 54 | for pkg in dataCache.runrecs[fn]: | ||
| 55 | if " ".join(dataCache.runrecs[fn][pkg]).find("kernel-module-") != -1: | ||
| 56 | return False | ||
| 57 | |||
| 58 | # Default to keep dependencies | ||
| 59 | return True | ||
| 60 | |||
| 61 | class SignatureGeneratorOEBasic(bb.siggen.SignatureGeneratorBasic): | ||
| 62 | name = "OEBasic" | ||
| 63 | def init_rundepcheck(self, data): | ||
| 64 | self.abisaferecipes = (data.getVar("SIGGEN_EXCLUDERECIPES_ABISAFE", True) or "").split() | ||
| 65 | self.saferecipedeps = (data.getVar("SIGGEN_EXCLUDE_SAFE_RECIPE_DEPS", True) or "").split() | ||
| 66 | pass | ||
| 67 | def rundep_check(self, fn, recipename, task, dep, depname, dataCache = None): | ||
| 68 | return sstate_rundepfilter(self, fn, recipename, task, dep, depname, dataCache) | ||
| 69 | |||
| 70 | class SignatureGeneratorOEBasicHash(bb.siggen.SignatureGeneratorBasicHash): | ||
| 71 | name = "OEBasicHash" | ||
| 72 | def init_rundepcheck(self, data): | ||
| 73 | self.abisaferecipes = (data.getVar("SIGGEN_EXCLUDERECIPES_ABISAFE", True) or "").split() | ||
| 74 | self.saferecipedeps = (data.getVar("SIGGEN_EXCLUDE_SAFE_RECIPE_DEPS", True) or "").split() | ||
| 75 | pass | ||
| 76 | def rundep_check(self, fn, recipename, task, dep, depname, dataCache = None): | ||
| 77 | return sstate_rundepfilter(self, fn, recipename, task, dep, depname, dataCache) | ||
| 78 | |||
| 79 | # Insert these classes into siggen's namespace so it can see and select them | ||
| 80 | bb.siggen.SignatureGeneratorOEBasic = SignatureGeneratorOEBasic | ||
| 81 | bb.siggen.SignatureGeneratorOEBasicHash = SignatureGeneratorOEBasicHash | ||
| 82 | |||
| 83 | |||
| 84 | def find_siginfo(pn, taskname, taskhashlist, d): | ||
| 85 | """ Find signature data files for comparison purposes """ | ||
| 86 | |||
| 87 | import fnmatch | ||
| 88 | import glob | ||
| 89 | |||
| 90 | if taskhashlist: | ||
| 91 | hashfiles = {} | ||
| 92 | |||
| 93 | if not taskname: | ||
| 94 | # We have to derive pn and taskname | ||
| 95 | key = pn | ||
| 96 | splitit = key.split('.bb.') | ||
| 97 | taskname = splitit[1] | ||
| 98 | pn = os.path.basename(splitit[0]).split('_')[0] | ||
| 99 | if key.startswith('virtual:native:'): | ||
| 100 | pn = pn + '-native' | ||
| 101 | |||
| 102 | if taskname in ['do_fetch', 'do_unpack', 'do_patch', 'do_populate_lic']: | ||
| 103 | pn.replace("-native", "") | ||
| 104 | |||
| 105 | filedates = {} | ||
| 106 | |||
| 107 | # First search in stamps dir | ||
| 108 | localdata = d.createCopy() | ||
| 109 | localdata.setVar('MULTIMACH_TARGET_SYS', '*') | ||
| 110 | localdata.setVar('PN', pn) | ||
| 111 | localdata.setVar('PV', '*') | ||
| 112 | localdata.setVar('PR', '*') | ||
| 113 | localdata.setVar('EXTENDPE', '') | ||
| 114 | stamp = localdata.getVar('STAMP', True) | ||
| 115 | filespec = '%s.%s.sigdata.*' % (stamp, taskname) | ||
| 116 | foundall = False | ||
| 117 | import glob | ||
| 118 | for fullpath in glob.glob(filespec): | ||
| 119 | match = False | ||
| 120 | if taskhashlist: | ||
| 121 | for taskhash in taskhashlist: | ||
| 122 | if fullpath.endswith('.%s' % taskhash): | ||
| 123 | hashfiles[taskhash] = fullpath | ||
| 124 | if len(hashfiles) == len(taskhashlist): | ||
| 125 | foundall = True | ||
| 126 | break | ||
| 127 | else: | ||
| 128 | filedates[fullpath] = os.stat(fullpath).st_mtime | ||
| 129 | |||
| 130 | if not taskhashlist or (len(filedates) < 2 and not foundall): | ||
| 131 | # That didn't work, look in sstate-cache | ||
| 132 | hashes = taskhashlist or ['*'] | ||
| 133 | localdata = bb.data.createCopy(d) | ||
| 134 | for hashval in hashes: | ||
| 135 | localdata.setVar('PACKAGE_ARCH', '*') | ||
| 136 | localdata.setVar('TARGET_VENDOR', '*') | ||
| 137 | localdata.setVar('TARGET_OS', '*') | ||
| 138 | localdata.setVar('PN', pn) | ||
| 139 | localdata.setVar('PV', '*') | ||
| 140 | localdata.setVar('PR', '*') | ||
| 141 | localdata.setVar('BB_TASKHASH', hashval) | ||
| 142 | if pn.endswith('-native') or pn.endswith('-crosssdk') or pn.endswith('-cross'): | ||
| 143 | localdata.setVar('SSTATE_EXTRAPATH', "${NATIVELSBSTRING}/") | ||
| 144 | sstatename = taskname[3:] | ||
| 145 | filespec = '%s_%s.*.siginfo' % (localdata.getVar('SSTATE_PKG', True), sstatename) | ||
| 146 | |||
| 147 | if hashval != '*': | ||
| 148 | sstatedir = "%s/%s" % (d.getVar('SSTATE_DIR', True), hashval[:2]) | ||
| 149 | else: | ||
| 150 | sstatedir = d.getVar('SSTATE_DIR', True) | ||
| 151 | |||
| 152 | for root, dirs, files in os.walk(sstatedir): | ||
| 153 | for fn in files: | ||
| 154 | fullpath = os.path.join(root, fn) | ||
| 155 | if fnmatch.fnmatch(fullpath, filespec): | ||
| 156 | if taskhashlist: | ||
| 157 | hashfiles[hashval] = fullpath | ||
| 158 | else: | ||
| 159 | filedates[fullpath] = os.stat(fullpath).st_mtime | ||
| 160 | |||
| 161 | if taskhashlist: | ||
| 162 | return hashfiles | ||
| 163 | else: | ||
| 164 | return filedates | ||
| 165 | |||
| 166 | bb.siggen.find_siginfo = find_siginfo | ||
diff --git a/meta/lib/oe/terminal.py b/meta/lib/oe/terminal.py new file mode 100644 index 0000000000..a33abd733d --- /dev/null +++ b/meta/lib/oe/terminal.py | |||
| @@ -0,0 +1,218 @@ | |||
| 1 | import logging | ||
| 2 | import oe.classutils | ||
| 3 | import shlex | ||
| 4 | from bb.process import Popen, ExecutionError | ||
| 5 | |||
| 6 | logger = logging.getLogger('BitBake.OE.Terminal') | ||
| 7 | |||
| 8 | |||
| 9 | class UnsupportedTerminal(Exception): | ||
| 10 | pass | ||
| 11 | |||
| 12 | class NoSupportedTerminals(Exception): | ||
| 13 | pass | ||
| 14 | |||
| 15 | |||
| 16 | class Registry(oe.classutils.ClassRegistry): | ||
| 17 | command = None | ||
| 18 | |||
| 19 | def __init__(cls, name, bases, attrs): | ||
| 20 | super(Registry, cls).__init__(name.lower(), bases, attrs) | ||
| 21 | |||
| 22 | @property | ||
| 23 | def implemented(cls): | ||
| 24 | return bool(cls.command) | ||
| 25 | |||
| 26 | |||
| 27 | class Terminal(Popen): | ||
| 28 | __metaclass__ = Registry | ||
| 29 | |||
| 30 | def __init__(self, sh_cmd, title=None, env=None, d=None): | ||
| 31 | fmt_sh_cmd = self.format_command(sh_cmd, title) | ||
| 32 | try: | ||
| 33 | Popen.__init__(self, fmt_sh_cmd, env=env) | ||
| 34 | except OSError as exc: | ||
| 35 | import errno | ||
| 36 | if exc.errno == errno.ENOENT: | ||
| 37 | raise UnsupportedTerminal(self.name) | ||
| 38 | else: | ||
| 39 | raise | ||
| 40 | |||
| 41 | def format_command(self, sh_cmd, title): | ||
| 42 | fmt = {'title': title or 'Terminal', 'command': sh_cmd} | ||
| 43 | if isinstance(self.command, basestring): | ||
| 44 | return shlex.split(self.command.format(**fmt)) | ||
| 45 | else: | ||
| 46 | return [element.format(**fmt) for element in self.command] | ||
| 47 | |||
| 48 | class XTerminal(Terminal): | ||
| 49 | def __init__(self, sh_cmd, title=None, env=None, d=None): | ||
| 50 | Terminal.__init__(self, sh_cmd, title, env, d) | ||
| 51 | if not os.environ.get('DISPLAY'): | ||
| 52 | raise UnsupportedTerminal(self.name) | ||
| 53 | |||
| 54 | class Gnome(XTerminal): | ||
| 55 | command = 'gnome-terminal -t "{title}" -x {command}' | ||
| 56 | priority = 2 | ||
| 57 | |||
| 58 | class Mate(XTerminal): | ||
| 59 | command = 'mate-terminal -t "{title}" -x {command}' | ||
| 60 | priority = 2 | ||
| 61 | |||
| 62 | class Xfce(XTerminal): | ||
| 63 | command = 'Terminal -T "{title}" -e "{command}"' | ||
| 64 | priority = 2 | ||
| 65 | |||
| 66 | def __init__(self, command, title=None, env=None, d=None): | ||
| 67 | # Upstream binary name is Terminal but Debian/Ubuntu use | ||
| 68 | # xfce4-terminal to avoid possible(?) conflicts | ||
| 69 | distro = distro_name() | ||
| 70 | if distro == 'ubuntu' or distro == 'debian': | ||
| 71 | cmd = 'xfce4-terminal -T "{title}" -e "{command}"' | ||
| 72 | else: | ||
| 73 | cmd = command | ||
| 74 | XTerminal.__init__(self, cmd, title, env, d) | ||
| 75 | |||
| 76 | class Konsole(XTerminal): | ||
| 77 | command = 'konsole -T "{title}" -e {command}' | ||
| 78 | priority = 2 | ||
| 79 | |||
| 80 | def __init__(self, sh_cmd, title=None, env=None, d=None): | ||
| 81 | # Check version | ||
| 82 | vernum = check_konsole_version("konsole") | ||
| 83 | if vernum: | ||
| 84 | if vernum.split('.')[0] == "2": | ||
| 85 | logger.debug(1, 'Konsole from KDE 4.x will not work as devshell, skipping') | ||
| 86 | raise UnsupportedTerminal(self.name) | ||
| 87 | XTerminal.__init__(self, sh_cmd, title, env, d) | ||
| 88 | |||
| 89 | class XTerm(XTerminal): | ||
| 90 | command = 'xterm -T "{title}" -e {command}' | ||
| 91 | priority = 1 | ||
| 92 | |||
| 93 | class Rxvt(XTerminal): | ||
| 94 | command = 'rxvt -T "{title}" -e {command}' | ||
| 95 | priority = 1 | ||
| 96 | |||
| 97 | class Screen(Terminal): | ||
| 98 | command = 'screen -D -m -t "{title}" -S devshell {command}' | ||
| 99 | |||
| 100 | def __init__(self, sh_cmd, title=None, env=None, d=None): | ||
| 101 | s_id = "devshell_%i" % os.getpid() | ||
| 102 | self.command = "screen -D -m -t \"{title}\" -S %s {command}" % s_id | ||
| 103 | Terminal.__init__(self, sh_cmd, title, env, d) | ||
| 104 | msg = 'Screen started. Please connect in another terminal with ' \ | ||
| 105 | '"screen -r %s"' % s_id | ||
| 106 | if (d): | ||
| 107 | bb.event.fire(bb.event.LogExecTTY(msg, "screen -r %s" % s_id, | ||
| 108 | 0.5, 10), d) | ||
| 109 | else: | ||
| 110 | logger.warn(msg) | ||
| 111 | |||
| 112 | class TmuxRunning(Terminal): | ||
| 113 | """Open a new pane in the current running tmux window""" | ||
| 114 | name = 'tmux-running' | ||
| 115 | command = 'tmux split-window "{command}"' | ||
| 116 | priority = 2.75 | ||
| 117 | |||
| 118 | def __init__(self, sh_cmd, title=None, env=None, d=None): | ||
| 119 | if not bb.utils.which(os.getenv('PATH'), 'tmux'): | ||
| 120 | raise UnsupportedTerminal('tmux is not installed') | ||
| 121 | |||
| 122 | if not os.getenv('TMUX'): | ||
| 123 | raise UnsupportedTerminal('tmux is not running') | ||
| 124 | |||
| 125 | Terminal.__init__(self, sh_cmd, title, env, d) | ||
| 126 | |||
| 127 | class Tmux(Terminal): | ||
| 128 | """Start a new tmux session and window""" | ||
| 129 | command = 'tmux new -d -s devshell -n devshell "{command}"' | ||
| 130 | priority = 0.75 | ||
| 131 | |||
| 132 | def __init__(self, sh_cmd, title=None, env=None, d=None): | ||
| 133 | if not bb.utils.which(os.getenv('PATH'), 'tmux'): | ||
| 134 | raise UnsupportedTerminal('tmux is not installed') | ||
| 135 | |||
| 136 | # TODO: consider using a 'devshell' session shared amongst all | ||
| 137 | # devshells, if it's already there, add a new window to it. | ||
| 138 | window_name = 'devshell-%i' % os.getpid() | ||
| 139 | |||
| 140 | self.command = 'tmux new -d -s {0} -n {0} "{{command}}"'.format(window_name) | ||
| 141 | Terminal.__init__(self, sh_cmd, title, env, d) | ||
| 142 | |||
| 143 | attach_cmd = 'tmux att -t {0}'.format(window_name) | ||
| 144 | msg = 'Tmux started. Please connect in another terminal with `tmux att -t {0}`'.format(window_name) | ||
| 145 | if d: | ||
| 146 | bb.event.fire(bb.event.LogExecTTY(msg, attach_cmd, 0.5, 10), d) | ||
| 147 | else: | ||
| 148 | logger.warn(msg) | ||
| 149 | |||
| 150 | class Custom(Terminal): | ||
| 151 | command = 'false' # This is a placeholder | ||
| 152 | priority = 3 | ||
| 153 | |||
| 154 | def __init__(self, sh_cmd, title=None, env=None, d=None): | ||
| 155 | self.command = d and d.getVar('OE_TERMINAL_CUSTOMCMD', True) | ||
| 156 | if self.command: | ||
| 157 | if not '{command}' in self.command: | ||
| 158 | self.command += ' {command}' | ||
| 159 | Terminal.__init__(self, sh_cmd, title, env, d) | ||
| 160 | logger.warn('Custom terminal was started.') | ||
| 161 | else: | ||
| 162 | logger.debug(1, 'No custom terminal (OE_TERMINAL_CUSTOMCMD) set') | ||
| 163 | raise UnsupportedTerminal('OE_TERMINAL_CUSTOMCMD not set') | ||
| 164 | |||
| 165 | |||
| 166 | def prioritized(): | ||
| 167 | return Registry.prioritized() | ||
| 168 | |||
| 169 | def spawn_preferred(sh_cmd, title=None, env=None, d=None): | ||
| 170 | """Spawn the first supported terminal, by priority""" | ||
| 171 | for terminal in prioritized(): | ||
| 172 | try: | ||
| 173 | spawn(terminal.name, sh_cmd, title, env, d) | ||
| 174 | break | ||
| 175 | except UnsupportedTerminal: | ||
| 176 | continue | ||
| 177 | else: | ||
| 178 | raise NoSupportedTerminals() | ||
| 179 | |||
| 180 | def spawn(name, sh_cmd, title=None, env=None, d=None): | ||
| 181 | """Spawn the specified terminal, by name""" | ||
| 182 | logger.debug(1, 'Attempting to spawn terminal "%s"', name) | ||
| 183 | try: | ||
| 184 | terminal = Registry.registry[name] | ||
| 185 | except KeyError: | ||
| 186 | raise UnsupportedTerminal(name) | ||
| 187 | |||
| 188 | pipe = terminal(sh_cmd, title, env, d) | ||
| 189 | output = pipe.communicate()[0] | ||
| 190 | if pipe.returncode != 0: | ||
| 191 | raise ExecutionError(sh_cmd, pipe.returncode, output) | ||
| 192 | |||
| 193 | def check_konsole_version(konsole): | ||
| 194 | import subprocess as sub | ||
| 195 | try: | ||
| 196 | p = sub.Popen(['sh', '-c', '%s --version' % konsole],stdout=sub.PIPE,stderr=sub.PIPE) | ||
| 197 | out, err = p.communicate() | ||
| 198 | ver_info = out.rstrip().split('\n') | ||
| 199 | except OSError as exc: | ||
| 200 | import errno | ||
| 201 | if exc.errno == errno.ENOENT: | ||
| 202 | return None | ||
| 203 | else: | ||
| 204 | raise | ||
| 205 | vernum = None | ||
| 206 | for ver in ver_info: | ||
| 207 | if ver.startswith('Konsole'): | ||
| 208 | vernum = ver.split(' ')[-1] | ||
| 209 | return vernum | ||
| 210 | |||
| 211 | def distro_name(): | ||
| 212 | try: | ||
| 213 | p = Popen(['lsb_release', '-i']) | ||
| 214 | out, err = p.communicate() | ||
| 215 | distro = out.split(':')[1].strip().lower() | ||
| 216 | except: | ||
| 217 | distro = "unknown" | ||
| 218 | return distro | ||
diff --git a/meta/lib/oe/tests/__init__.py b/meta/lib/oe/tests/__init__.py new file mode 100644 index 0000000000..e69de29bb2 --- /dev/null +++ b/meta/lib/oe/tests/__init__.py | |||
diff --git a/meta/lib/oe/tests/test_license.py b/meta/lib/oe/tests/test_license.py new file mode 100644 index 0000000000..c388886184 --- /dev/null +++ b/meta/lib/oe/tests/test_license.py | |||
| @@ -0,0 +1,68 @@ | |||
| 1 | import unittest | ||
| 2 | import oe.license | ||
| 3 | |||
| 4 | class SeenVisitor(oe.license.LicenseVisitor): | ||
| 5 | def __init__(self): | ||
| 6 | self.seen = [] | ||
| 7 | oe.license.LicenseVisitor.__init__(self) | ||
| 8 | |||
| 9 | def visit_Str(self, node): | ||
| 10 | self.seen.append(node.s) | ||
| 11 | |||
| 12 | class TestSingleLicense(unittest.TestCase): | ||
| 13 | licenses = [ | ||
| 14 | "GPLv2", | ||
| 15 | "LGPL-2.0", | ||
| 16 | "Artistic", | ||
| 17 | "MIT", | ||
| 18 | "GPLv3+", | ||
| 19 | "FOO_BAR", | ||
| 20 | ] | ||
| 21 | invalid_licenses = ["GPL/BSD"] | ||
| 22 | |||
| 23 | @staticmethod | ||
| 24 | def parse(licensestr): | ||
| 25 | visitor = SeenVisitor() | ||
| 26 | visitor.visit_string(licensestr) | ||
| 27 | return visitor.seen | ||
| 28 | |||
| 29 | def test_single_licenses(self): | ||
| 30 | for license in self.licenses: | ||
| 31 | licenses = self.parse(license) | ||
| 32 | self.assertListEqual(licenses, [license]) | ||
| 33 | |||
| 34 | def test_invalid_licenses(self): | ||
| 35 | for license in self.invalid_licenses: | ||
| 36 | with self.assertRaises(oe.license.InvalidLicense) as cm: | ||
| 37 | self.parse(license) | ||
| 38 | self.assertEqual(cm.exception.license, license) | ||
| 39 | |||
| 40 | class TestSimpleCombinations(unittest.TestCase): | ||
| 41 | tests = { | ||
| 42 | "FOO&BAR": ["FOO", "BAR"], | ||
| 43 | "BAZ & MOO": ["BAZ", "MOO"], | ||
| 44 | "ALPHA|BETA": ["ALPHA"], | ||
| 45 | "BAZ&MOO|FOO": ["FOO"], | ||
| 46 | "FOO&BAR|BAZ": ["FOO", "BAR"], | ||
| 47 | } | ||
| 48 | preferred = ["ALPHA", "FOO", "BAR"] | ||
| 49 | |||
| 50 | def test_tests(self): | ||
| 51 | def choose(a, b): | ||
| 52 | if all(lic in self.preferred for lic in b): | ||
| 53 | return b | ||
| 54 | else: | ||
| 55 | return a | ||
| 56 | |||
| 57 | for license, expected in self.tests.items(): | ||
| 58 | licenses = oe.license.flattened_licenses(license, choose) | ||
| 59 | self.assertListEqual(licenses, expected) | ||
| 60 | |||
| 61 | class TestComplexCombinations(TestSimpleCombinations): | ||
| 62 | tests = { | ||
| 63 | "FOO & (BAR | BAZ)&MOO": ["FOO", "BAR", "MOO"], | ||
| 64 | "(ALPHA|(BETA&THETA)|OMEGA)&DELTA": ["OMEGA", "DELTA"], | ||
| 65 | "((ALPHA|BETA)&FOO)|BAZ": ["BETA", "FOO"], | ||
| 66 | "(GPL-2.0|Proprietary)&BSD-4-clause&MIT": ["GPL-2.0", "BSD-4-clause", "MIT"], | ||
| 67 | } | ||
| 68 | preferred = ["BAR", "OMEGA", "BETA", "GPL-2.0"] | ||
diff --git a/meta/lib/oe/tests/test_path.py b/meta/lib/oe/tests/test_path.py new file mode 100644 index 0000000000..3d41ce157a --- /dev/null +++ b/meta/lib/oe/tests/test_path.py | |||
| @@ -0,0 +1,89 @@ | |||
| 1 | import unittest | ||
| 2 | import oe, oe.path | ||
| 3 | import tempfile | ||
| 4 | import os | ||
| 5 | import errno | ||
| 6 | import shutil | ||
| 7 | |||
| 8 | class TestRealPath(unittest.TestCase): | ||
| 9 | DIRS = [ "a", "b", "etc", "sbin", "usr", "usr/bin", "usr/binX", "usr/sbin", "usr/include", "usr/include/gdbm" ] | ||
| 10 | FILES = [ "etc/passwd", "b/file" ] | ||
| 11 | LINKS = [ | ||
| 12 | ( "bin", "/usr/bin", "/usr/bin" ), | ||
| 13 | ( "binX", "usr/binX", "/usr/binX" ), | ||
| 14 | ( "c", "broken", "/broken" ), | ||
| 15 | ( "etc/passwd-1", "passwd", "/etc/passwd" ), | ||
| 16 | ( "etc/passwd-2", "passwd-1", "/etc/passwd" ), | ||
| 17 | ( "etc/passwd-3", "/etc/passwd-1", "/etc/passwd" ), | ||
| 18 | ( "etc/shadow-1", "/etc/shadow", "/etc/shadow" ), | ||
| 19 | ( "etc/shadow-2", "/etc/shadow-1", "/etc/shadow" ), | ||
| 20 | ( "prog-A", "bin/prog-A", "/usr/bin/prog-A" ), | ||
| 21 | ( "prog-B", "/bin/prog-B", "/usr/bin/prog-B" ), | ||
| 22 | ( "usr/bin/prog-C", "../../sbin/prog-C", "/sbin/prog-C" ), | ||
| 23 | ( "usr/bin/prog-D", "/sbin/prog-D", "/sbin/prog-D" ), | ||
| 24 | ( "usr/binX/prog-E", "../sbin/prog-E", None ), | ||
| 25 | ( "usr/bin/prog-F", "../../../sbin/prog-F", "/sbin/prog-F" ), | ||
| 26 | ( "loop", "a/loop", None ), | ||
| 27 | ( "a/loop", "../loop", None ), | ||
| 28 | ( "b/test", "file/foo", "/b/file/foo" ), | ||
| 29 | ] | ||
| 30 | |||
| 31 | LINKS_PHYS = [ | ||
| 32 | ( "./", "/", "" ), | ||
| 33 | ( "binX/prog-E", "/usr/sbin/prog-E", "/sbin/prog-E" ), | ||
| 34 | ] | ||
| 35 | |||
| 36 | EXCEPTIONS = [ | ||
| 37 | ( "loop", errno.ELOOP ), | ||
| 38 | ( "b/test", errno.ENOENT ), | ||
| 39 | ] | ||
| 40 | |||
| 41 | def __del__(self): | ||
| 42 | try: | ||
| 43 | #os.system("tree -F %s" % self.tmpdir) | ||
| 44 | shutil.rmtree(self.tmpdir) | ||
| 45 | except: | ||
| 46 | pass | ||
| 47 | |||
| 48 | def setUp(self): | ||
| 49 | self.tmpdir = tempfile.mkdtemp(prefix = "oe-test_path") | ||
| 50 | self.root = os.path.join(self.tmpdir, "R") | ||
| 51 | |||
| 52 | os.mkdir(os.path.join(self.tmpdir, "_real")) | ||
| 53 | os.symlink("_real", self.root) | ||
| 54 | |||
| 55 | for d in self.DIRS: | ||
| 56 | os.mkdir(os.path.join(self.root, d)) | ||
| 57 | for f in self.FILES: | ||
| 58 | file(os.path.join(self.root, f), "w") | ||
| 59 | for l in self.LINKS: | ||
| 60 | os.symlink(l[1], os.path.join(self.root, l[0])) | ||
| 61 | |||
| 62 | def __realpath(self, file, use_physdir, assume_dir = True): | ||
| 63 | return oe.path.realpath(os.path.join(self.root, file), self.root, | ||
| 64 | use_physdir, assume_dir = assume_dir) | ||
| 65 | |||
| 66 | def test_norm(self): | ||
| 67 | for l in self.LINKS: | ||
| 68 | if l[2] == None: | ||
| 69 | continue | ||
| 70 | |||
| 71 | target_p = self.__realpath(l[0], True) | ||
| 72 | target_l = self.__realpath(l[0], False) | ||
| 73 | |||
| 74 | if l[2] != False: | ||
| 75 | self.assertEqual(target_p, target_l) | ||
| 76 | self.assertEqual(l[2], target_p[len(self.root):]) | ||
| 77 | |||
| 78 | def test_phys(self): | ||
| 79 | for l in self.LINKS_PHYS: | ||
| 80 | target_p = self.__realpath(l[0], True) | ||
| 81 | target_l = self.__realpath(l[0], False) | ||
| 82 | |||
| 83 | self.assertEqual(l[1], target_p[len(self.root):]) | ||
| 84 | self.assertEqual(l[2], target_l[len(self.root):]) | ||
| 85 | |||
| 86 | def test_loop(self): | ||
| 87 | for e in self.EXCEPTIONS: | ||
| 88 | self.assertRaisesRegexp(OSError, r'\[Errno %u\]' % e[1], | ||
| 89 | self.__realpath, e[0], False, False) | ||
diff --git a/meta/lib/oe/tests/test_types.py b/meta/lib/oe/tests/test_types.py new file mode 100644 index 0000000000..367cc30e45 --- /dev/null +++ b/meta/lib/oe/tests/test_types.py | |||
| @@ -0,0 +1,62 @@ | |||
| 1 | import unittest | ||
| 2 | from oe.maketype import create, factory | ||
| 3 | |||
| 4 | class TestTypes(unittest.TestCase): | ||
| 5 | def assertIsInstance(self, obj, cls): | ||
| 6 | return self.assertTrue(isinstance(obj, cls)) | ||
| 7 | |||
| 8 | def assertIsNot(self, obj, other): | ||
| 9 | return self.assertFalse(obj is other) | ||
| 10 | |||
| 11 | def assertFactoryCreated(self, value, type, **flags): | ||
| 12 | cls = factory(type) | ||
| 13 | self.assertIsNot(cls, None) | ||
| 14 | self.assertIsInstance(create(value, type, **flags), cls) | ||
| 15 | |||
| 16 | class TestBooleanType(TestTypes): | ||
| 17 | def test_invalid(self): | ||
| 18 | self.assertRaises(ValueError, create, '', 'boolean') | ||
| 19 | self.assertRaises(ValueError, create, 'foo', 'boolean') | ||
| 20 | self.assertRaises(TypeError, create, object(), 'boolean') | ||
| 21 | |||
| 22 | def test_true(self): | ||
| 23 | self.assertTrue(create('y', 'boolean')) | ||
| 24 | self.assertTrue(create('yes', 'boolean')) | ||
| 25 | self.assertTrue(create('1', 'boolean')) | ||
| 26 | self.assertTrue(create('t', 'boolean')) | ||
| 27 | self.assertTrue(create('true', 'boolean')) | ||
| 28 | self.assertTrue(create('TRUE', 'boolean')) | ||
| 29 | self.assertTrue(create('truE', 'boolean')) | ||
| 30 | |||
| 31 | def test_false(self): | ||
| 32 | self.assertFalse(create('n', 'boolean')) | ||
| 33 | self.assertFalse(create('no', 'boolean')) | ||
| 34 | self.assertFalse(create('0', 'boolean')) | ||
| 35 | self.assertFalse(create('f', 'boolean')) | ||
| 36 | self.assertFalse(create('false', 'boolean')) | ||
| 37 | self.assertFalse(create('FALSE', 'boolean')) | ||
| 38 | self.assertFalse(create('faLse', 'boolean')) | ||
| 39 | |||
| 40 | def test_bool_equality(self): | ||
| 41 | self.assertEqual(create('n', 'boolean'), False) | ||
| 42 | self.assertNotEqual(create('n', 'boolean'), True) | ||
| 43 | self.assertEqual(create('y', 'boolean'), True) | ||
| 44 | self.assertNotEqual(create('y', 'boolean'), False) | ||
| 45 | |||
| 46 | class TestList(TestTypes): | ||
| 47 | def assertListEqual(self, value, valid, sep=None): | ||
| 48 | obj = create(value, 'list', separator=sep) | ||
| 49 | self.assertEqual(obj, valid) | ||
| 50 | if sep is not None: | ||
| 51 | self.assertEqual(obj.separator, sep) | ||
| 52 | self.assertEqual(str(obj), obj.separator.join(obj)) | ||
| 53 | |||
| 54 | def test_list_nosep(self): | ||
| 55 | testlist = ['alpha', 'beta', 'theta'] | ||
| 56 | self.assertListEqual('alpha beta theta', testlist) | ||
| 57 | self.assertListEqual('alpha beta\ttheta', testlist) | ||
| 58 | self.assertListEqual('alpha', ['alpha']) | ||
| 59 | |||
| 60 | def test_list_usersep(self): | ||
| 61 | self.assertListEqual('foo:bar', ['foo', 'bar'], ':') | ||
| 62 | self.assertListEqual('foo:bar:baz', ['foo', 'bar', 'baz'], ':') | ||
diff --git a/meta/lib/oe/tests/test_utils.py b/meta/lib/oe/tests/test_utils.py new file mode 100644 index 0000000000..5d9ac52e7d --- /dev/null +++ b/meta/lib/oe/tests/test_utils.py | |||
| @@ -0,0 +1,51 @@ | |||
| 1 | import unittest | ||
| 2 | from oe.utils import packages_filter_out_system | ||
| 3 | |||
| 4 | class TestPackagesFilterOutSystem(unittest.TestCase): | ||
| 5 | def test_filter(self): | ||
| 6 | """ | ||
| 7 | Test that oe.utils.packages_filter_out_system works. | ||
| 8 | """ | ||
| 9 | try: | ||
| 10 | import bb | ||
| 11 | except ImportError: | ||
| 12 | self.skipTest("Cannot import bb") | ||
| 13 | |||
| 14 | d = bb.data_smart.DataSmart() | ||
| 15 | d.setVar("PN", "foo") | ||
| 16 | |||
| 17 | d.setVar("PACKAGES", "foo foo-doc foo-dev") | ||
| 18 | pkgs = packages_filter_out_system(d) | ||
| 19 | self.assertEqual(pkgs, []) | ||
| 20 | |||
| 21 | d.setVar("PACKAGES", "foo foo-doc foo-data foo-dev") | ||
| 22 | pkgs = packages_filter_out_system(d) | ||
| 23 | self.assertEqual(pkgs, ["foo-data"]) | ||
| 24 | |||
| 25 | d.setVar("PACKAGES", "foo foo-locale-en-gb") | ||
| 26 | pkgs = packages_filter_out_system(d) | ||
| 27 | self.assertEqual(pkgs, []) | ||
| 28 | |||
| 29 | d.setVar("PACKAGES", "foo foo-data foo-locale-en-gb") | ||
| 30 | pkgs = packages_filter_out_system(d) | ||
| 31 | self.assertEqual(pkgs, ["foo-data"]) | ||
| 32 | |||
| 33 | |||
| 34 | class TestTrimVersion(unittest.TestCase): | ||
| 35 | def test_version_exception(self): | ||
| 36 | with self.assertRaises(TypeError): | ||
| 37 | trim_version(None, 2) | ||
| 38 | with self.assertRaises(TypeError): | ||
| 39 | trim_version((1, 2, 3), 2) | ||
| 40 | |||
| 41 | def test_num_exception(self): | ||
| 42 | with self.assertRaises(ValueError): | ||
| 43 | trim_version("1.2.3", 0) | ||
| 44 | with self.assertRaises(ValueError): | ||
| 45 | trim_version("1.2.3", -1) | ||
| 46 | |||
| 47 | def test_valid(self): | ||
| 48 | self.assertEqual(trim_version("1.2.3", 1), "1") | ||
| 49 | self.assertEqual(trim_version("1.2.3", 2), "1.2") | ||
| 50 | self.assertEqual(trim_version("1.2.3", 3), "1.2.3") | ||
| 51 | self.assertEqual(trim_version("1.2.3", 4), "1.2.3") | ||
diff --git a/meta/lib/oe/types.py b/meta/lib/oe/types.py new file mode 100644 index 0000000000..7f47c17d0e --- /dev/null +++ b/meta/lib/oe/types.py | |||
| @@ -0,0 +1,153 @@ | |||
| 1 | import errno | ||
| 2 | import re | ||
| 3 | import os | ||
| 4 | |||
| 5 | |||
| 6 | class OEList(list): | ||
| 7 | """OpenEmbedded 'list' type | ||
| 8 | |||
| 9 | Acts as an ordinary list, but is constructed from a string value and a | ||
| 10 | separator (optional), and re-joins itself when converted to a string with | ||
| 11 | str(). Set the variable type flag to 'list' to use this type, and the | ||
| 12 | 'separator' flag may be specified (defaulting to whitespace).""" | ||
| 13 | |||
| 14 | name = "list" | ||
| 15 | |||
| 16 | def __init__(self, value, separator = None): | ||
| 17 | if value is not None: | ||
| 18 | list.__init__(self, value.split(separator)) | ||
| 19 | else: | ||
| 20 | list.__init__(self) | ||
| 21 | |||
| 22 | if separator is None: | ||
| 23 | self.separator = " " | ||
| 24 | else: | ||
| 25 | self.separator = separator | ||
| 26 | |||
| 27 | def __str__(self): | ||
| 28 | return self.separator.join(self) | ||
| 29 | |||
| 30 | def choice(value, choices): | ||
| 31 | """OpenEmbedded 'choice' type | ||
| 32 | |||
| 33 | Acts as a multiple choice for the user. To use this, set the variable | ||
| 34 | type flag to 'choice', and set the 'choices' flag to a space separated | ||
| 35 | list of valid values.""" | ||
| 36 | if not isinstance(value, basestring): | ||
| 37 | raise TypeError("choice accepts a string, not '%s'" % type(value)) | ||
| 38 | |||
| 39 | value = value.lower() | ||
| 40 | choices = choices.lower() | ||
| 41 | if value not in choices.split(): | ||
| 42 | raise ValueError("Invalid choice '%s'. Valid choices: %s" % | ||
| 43 | (value, choices)) | ||
| 44 | return value | ||
| 45 | |||
| 46 | class NoMatch(object): | ||
| 47 | """Stub python regex pattern object which never matches anything""" | ||
| 48 | def findall(self, string, flags=0): | ||
| 49 | return None | ||
| 50 | |||
| 51 | def finditer(self, string, flags=0): | ||
| 52 | return None | ||
| 53 | |||
| 54 | def match(self, flags=0): | ||
| 55 | return None | ||
| 56 | |||
| 57 | def search(self, string, flags=0): | ||
| 58 | return None | ||
| 59 | |||
| 60 | def split(self, string, maxsplit=0): | ||
| 61 | return None | ||
| 62 | |||
| 63 | def sub(pattern, repl, string, count=0): | ||
| 64 | return None | ||
| 65 | |||
| 66 | def subn(pattern, repl, string, count=0): | ||
| 67 | return None | ||
| 68 | |||
| 69 | NoMatch = NoMatch() | ||
| 70 | |||
| 71 | def regex(value, regexflags=None): | ||
| 72 | """OpenEmbedded 'regex' type | ||
| 73 | |||
| 74 | Acts as a regular expression, returning the pre-compiled regular | ||
| 75 | expression pattern object. To use this type, set the variable type flag | ||
| 76 | to 'regex', and optionally, set the 'regexflags' type to a space separated | ||
| 77 | list of the flags to control the regular expression matching (e.g. | ||
| 78 | FOO[regexflags] += 'ignorecase'). See the python documentation on the | ||
| 79 | 're' module for a list of valid flags.""" | ||
| 80 | |||
| 81 | flagval = 0 | ||
| 82 | if regexflags: | ||
| 83 | for flag in regexflags.split(): | ||
| 84 | flag = flag.upper() | ||
| 85 | try: | ||
| 86 | flagval |= getattr(re, flag) | ||
| 87 | except AttributeError: | ||
| 88 | raise ValueError("Invalid regex flag '%s'" % flag) | ||
| 89 | |||
| 90 | if not value: | ||
| 91 | # Let's ensure that the default behavior for an undefined or empty | ||
| 92 | # variable is to match nothing. If the user explicitly wants to match | ||
| 93 | # anything, they can match '.*' instead. | ||
| 94 | return NoMatch | ||
| 95 | |||
| 96 | try: | ||
| 97 | return re.compile(value, flagval) | ||
| 98 | except re.error as exc: | ||
| 99 | raise ValueError("Invalid regex value '%s': %s" % | ||
| 100 | (value, exc.args[0])) | ||
| 101 | |||
| 102 | def boolean(value): | ||
| 103 | """OpenEmbedded 'boolean' type | ||
| 104 | |||
| 105 | Valid values for true: 'yes', 'y', 'true', 't', '1' | ||
| 106 | Valid values for false: 'no', 'n', 'false', 'f', '0' | ||
| 107 | """ | ||
| 108 | |||
| 109 | if not isinstance(value, basestring): | ||
| 110 | raise TypeError("boolean accepts a string, not '%s'" % type(value)) | ||
| 111 | |||
| 112 | value = value.lower() | ||
| 113 | if value in ('yes', 'y', 'true', 't', '1'): | ||
| 114 | return True | ||
| 115 | elif value in ('no', 'n', 'false', 'f', '0'): | ||
| 116 | return False | ||
| 117 | raise ValueError("Invalid boolean value '%s'" % value) | ||
| 118 | |||
| 119 | def integer(value, numberbase=10): | ||
| 120 | """OpenEmbedded 'integer' type | ||
| 121 | |||
| 122 | Defaults to base 10, but this can be specified using the optional | ||
| 123 | 'numberbase' flag.""" | ||
| 124 | |||
| 125 | return int(value, int(numberbase)) | ||
| 126 | |||
| 127 | _float = float | ||
| 128 | def float(value, fromhex='false'): | ||
| 129 | """OpenEmbedded floating point type | ||
| 130 | |||
| 131 | To use this type, set the type flag to 'float', and optionally set the | ||
| 132 | 'fromhex' flag to a true value (obeying the same rules as for the | ||
| 133 | 'boolean' type) if the value is in base 16 rather than base 10.""" | ||
| 134 | |||
| 135 | if boolean(fromhex): | ||
| 136 | return _float.fromhex(value) | ||
| 137 | else: | ||
| 138 | return _float(value) | ||
| 139 | |||
| 140 | def path(value, relativeto='', normalize='true', mustexist='false'): | ||
| 141 | value = os.path.join(relativeto, value) | ||
| 142 | |||
| 143 | if boolean(normalize): | ||
| 144 | value = os.path.normpath(value) | ||
| 145 | |||
| 146 | if boolean(mustexist): | ||
| 147 | try: | ||
| 148 | open(value, 'r') | ||
| 149 | except IOError as exc: | ||
| 150 | if exc.errno == errno.ENOENT: | ||
| 151 | raise ValueError("{0}: {1}".format(value, os.strerror(errno.ENOENT))) | ||
| 152 | |||
| 153 | return value | ||
diff --git a/meta/lib/oe/utils.py b/meta/lib/oe/utils.py new file mode 100644 index 0000000000..defa53679b --- /dev/null +++ b/meta/lib/oe/utils.py | |||
| @@ -0,0 +1,166 @@ | |||
| 1 | try: | ||
| 2 | # Python 2 | ||
| 3 | import commands as cmdstatus | ||
| 4 | except ImportError: | ||
| 5 | # Python 3 | ||
| 6 | import subprocess as cmdstatus | ||
| 7 | |||
| 8 | def read_file(filename): | ||
| 9 | try: | ||
| 10 | f = open( filename, "r" ) | ||
| 11 | except IOError as reason: | ||
| 12 | return "" # WARNING: can't raise an error now because of the new RDEPENDS handling. This is a bit ugly. :M: | ||
| 13 | else: | ||
| 14 | data = f.read().strip() | ||
| 15 | f.close() | ||
| 16 | return data | ||
| 17 | return None | ||
| 18 | |||
| 19 | def ifelse(condition, iftrue = True, iffalse = False): | ||
| 20 | if condition: | ||
| 21 | return iftrue | ||
| 22 | else: | ||
| 23 | return iffalse | ||
| 24 | |||
| 25 | def conditional(variable, checkvalue, truevalue, falsevalue, d): | ||
| 26 | if d.getVar(variable,1) == checkvalue: | ||
| 27 | return truevalue | ||
| 28 | else: | ||
| 29 | return falsevalue | ||
| 30 | |||
| 31 | def less_or_equal(variable, checkvalue, truevalue, falsevalue, d): | ||
| 32 | if float(d.getVar(variable,1)) <= float(checkvalue): | ||
| 33 | return truevalue | ||
| 34 | else: | ||
| 35 | return falsevalue | ||
| 36 | |||
| 37 | def version_less_or_equal(variable, checkvalue, truevalue, falsevalue, d): | ||
| 38 | result = bb.utils.vercmp_string(d.getVar(variable,True), checkvalue) | ||
| 39 | if result <= 0: | ||
| 40 | return truevalue | ||
| 41 | else: | ||
| 42 | return falsevalue | ||
| 43 | |||
| 44 | def contains(variable, checkvalues, truevalue, falsevalue, d): | ||
| 45 | val = d.getVar(variable, True) | ||
| 46 | if not val: | ||
| 47 | return falsevalue | ||
| 48 | val = set(val.split()) | ||
| 49 | if isinstance(checkvalues, basestring): | ||
| 50 | checkvalues = set(checkvalues.split()) | ||
| 51 | else: | ||
| 52 | checkvalues = set(checkvalues) | ||
| 53 | if checkvalues.issubset(val): | ||
| 54 | return truevalue | ||
| 55 | return falsevalue | ||
| 56 | |||
| 57 | def both_contain(variable1, variable2, checkvalue, d): | ||
| 58 | if d.getVar(variable1,1).find(checkvalue) != -1 and d.getVar(variable2,1).find(checkvalue) != -1: | ||
| 59 | return checkvalue | ||
| 60 | else: | ||
| 61 | return "" | ||
| 62 | |||
| 63 | def prune_suffix(var, suffixes, d): | ||
| 64 | # See if var ends with any of the suffixes listed and | ||
| 65 | # remove it if found | ||
| 66 | for suffix in suffixes: | ||
| 67 | if var.endswith(suffix): | ||
| 68 | var = var.replace(suffix, "") | ||
| 69 | |||
| 70 | prefix = d.getVar("MLPREFIX", True) | ||
| 71 | if prefix and var.startswith(prefix): | ||
| 72 | var = var.replace(prefix, "") | ||
| 73 | |||
| 74 | return var | ||
| 75 | |||
| 76 | def str_filter(f, str, d): | ||
| 77 | from re import match | ||
| 78 | return " ".join(filter(lambda x: match(f, x, 0), str.split())) | ||
| 79 | |||
| 80 | def str_filter_out(f, str, d): | ||
| 81 | from re import match | ||
| 82 | return " ".join(filter(lambda x: not match(f, x, 0), str.split())) | ||
| 83 | |||
| 84 | def param_bool(cfg, field, dflt = None): | ||
| 85 | """Lookup <field> in <cfg> map and convert it to a boolean; take | ||
| 86 | <dflt> when this <field> does not exist""" | ||
| 87 | value = cfg.get(field, dflt) | ||
| 88 | strvalue = str(value).lower() | ||
| 89 | if strvalue in ('yes', 'y', 'true', 't', '1'): | ||
| 90 | return True | ||
| 91 | elif strvalue in ('no', 'n', 'false', 'f', '0'): | ||
| 92 | return False | ||
| 93 | raise ValueError("invalid value for boolean parameter '%s': '%s'" % (field, value)) | ||
| 94 | |||
| 95 | def inherits(d, *classes): | ||
| 96 | """Return True if the metadata inherits any of the specified classes""" | ||
| 97 | return any(bb.data.inherits_class(cls, d) for cls in classes) | ||
| 98 | |||
| 99 | def features_backfill(var,d): | ||
| 100 | # This construct allows the addition of new features to variable specified | ||
| 101 | # as var | ||
| 102 | # Example for var = "DISTRO_FEATURES" | ||
| 103 | # This construct allows the addition of new features to DISTRO_FEATURES | ||
| 104 | # that if not present would disable existing functionality, without | ||
| 105 | # disturbing distributions that have already set DISTRO_FEATURES. | ||
| 106 | # Distributions wanting to elide a value in DISTRO_FEATURES_BACKFILL should | ||
| 107 | # add the feature to DISTRO_FEATURES_BACKFILL_CONSIDERED | ||
| 108 | features = (d.getVar(var, True) or "").split() | ||
| 109 | backfill = (d.getVar(var+"_BACKFILL", True) or "").split() | ||
| 110 | considered = (d.getVar(var+"_BACKFILL_CONSIDERED", True) or "").split() | ||
| 111 | |||
| 112 | addfeatures = [] | ||
| 113 | for feature in backfill: | ||
| 114 | if feature not in features and feature not in considered: | ||
| 115 | addfeatures.append(feature) | ||
| 116 | |||
| 117 | if addfeatures: | ||
| 118 | d.appendVar(var, " " + " ".join(addfeatures)) | ||
| 119 | |||
| 120 | |||
| 121 | def packages_filter_out_system(d): | ||
| 122 | """ | ||
| 123 | Return a list of packages from PACKAGES with the "system" packages such as | ||
| 124 | PN-dbg PN-doc PN-locale-eb-gb removed. | ||
| 125 | """ | ||
| 126 | pn = d.getVar('PN', True) | ||
| 127 | blacklist = map(lambda suffix: pn + suffix, ('', '-dbg', '-dev', '-doc', '-locale', '-staticdev')) | ||
| 128 | localepkg = pn + "-locale-" | ||
| 129 | pkgs = [] | ||
| 130 | |||
| 131 | for pkg in d.getVar('PACKAGES', True).split(): | ||
| 132 | if pkg not in blacklist and localepkg not in pkg: | ||
| 133 | pkgs.append(pkg) | ||
| 134 | return pkgs | ||
| 135 | |||
| 136 | def getstatusoutput(cmd): | ||
| 137 | return cmdstatus.getstatusoutput(cmd) | ||
| 138 | |||
| 139 | |||
| 140 | def trim_version(version, num_parts=2): | ||
| 141 | """ | ||
| 142 | Return just the first <num_parts> of <version>, split by periods. For | ||
| 143 | example, trim_version("1.2.3", 2) will return "1.2". | ||
| 144 | """ | ||
| 145 | if type(version) is not str: | ||
| 146 | raise TypeError("Version should be a string") | ||
| 147 | if num_parts < 1: | ||
| 148 | raise ValueError("Cannot split to parts < 1") | ||
| 149 | |||
| 150 | parts = version.split(".") | ||
| 151 | trimmed = ".".join(parts[:num_parts]) | ||
| 152 | return trimmed | ||
| 153 | |||
| 154 | def cpu_count(): | ||
| 155 | import multiprocessing | ||
| 156 | return multiprocessing.cpu_count() | ||
| 157 | |||
| 158 | def execute_pre_post_process(d, cmds): | ||
| 159 | if cmds is None: | ||
| 160 | return | ||
| 161 | |||
| 162 | for cmd in cmds.strip().split(';'): | ||
| 163 | cmd = cmd.strip() | ||
| 164 | if cmd != '': | ||
| 165 | bb.note("Executing %s ..." % cmd) | ||
| 166 | bb.build.exec_func(cmd, d) | ||
diff --git a/meta/lib/oeqa/__init__.py b/meta/lib/oeqa/__init__.py new file mode 100644 index 0000000000..e69de29bb2 --- /dev/null +++ b/meta/lib/oeqa/__init__.py | |||
diff --git a/meta/lib/oeqa/controllers/__init__.py b/meta/lib/oeqa/controllers/__init__.py new file mode 100644 index 0000000000..8eda92763c --- /dev/null +++ b/meta/lib/oeqa/controllers/__init__.py | |||
| @@ -0,0 +1,3 @@ | |||
| 1 | # Enable other layers to have modules in the same named directory | ||
| 2 | from pkgutil import extend_path | ||
| 3 | __path__ = extend_path(__path__, __name__) | ||
diff --git a/meta/lib/oeqa/controllers/masterimage.py b/meta/lib/oeqa/controllers/masterimage.py new file mode 100644 index 0000000000..188c630bcd --- /dev/null +++ b/meta/lib/oeqa/controllers/masterimage.py | |||
| @@ -0,0 +1,133 @@ | |||
| 1 | import os | ||
| 2 | import bb | ||
| 3 | import traceback | ||
| 4 | import time | ||
| 5 | |||
| 6 | import oeqa.targetcontrol | ||
| 7 | import oeqa.utils.sshcontrol as sshcontrol | ||
| 8 | import oeqa.utils.commands as commands | ||
| 9 | |||
| 10 | class GummibootTarget(oeqa.targetcontrol.SimpleRemoteTarget): | ||
| 11 | |||
| 12 | def __init__(self, d): | ||
| 13 | # let our base class do the ip thing | ||
| 14 | super(GummibootTarget, self).__init__(d) | ||
| 15 | |||
| 16 | # test rootfs + kernel | ||
| 17 | self.rootfs = os.path.join(d.getVar("DEPLOY_DIR_IMAGE", True), d.getVar("IMAGE_LINK_NAME", True) + '.tar.gz') | ||
| 18 | self.kernel = os.path.join(d.getVar("DEPLOY_DIR_IMAGE", True), d.getVar("KERNEL_IMAGETYPE")) | ||
| 19 | if not os.path.isfile(self.rootfs): | ||
| 20 | # we could've checked that IMAGE_FSTYPES contains tar.gz but the config for running testimage might not be | ||
| 21 | # the same as the config with which the image was build, ie | ||
| 22 | # you bitbake core-image-sato with IMAGE_FSTYPES += "tar.gz" | ||
| 23 | # and your autobuilder overwrites the config, adds the test bits and runs bitbake core-image-sato -c testimage | ||
| 24 | bb.fatal("No rootfs found. Did you build the image ?\nIf yes, did you build it with IMAGE_FSTYPES += \"tar.gz\" ? \ | ||
| 25 | \nExpected path: %s" % self.rootfs) | ||
| 26 | if not os.path.isfile(self.kernel): | ||
| 27 | bb.fatal("No kernel found. Expected path: %s" % self.kernel) | ||
| 28 | |||
| 29 | # if the user knows what he's doing, then by all means... | ||
| 30 | # test-rootfs.tar.gz and test-kernel are hardcoded names in other places | ||
| 31 | # they really have to be used like that in commands though | ||
| 32 | cmds = d.getVar("TEST_DEPLOY_CMDS", True) | ||
| 33 | |||
| 34 | # this the value we need to set in the LoaderEntryOneShot EFI variable | ||
| 35 | # so the system boots the 'test' bootloader label and not the default | ||
| 36 | # The first four bytes are EFI bits, and the rest is an utf-16le string | ||
| 37 | # (EFI vars values need to be utf-16) | ||
| 38 | # $ echo -en "test\0" | iconv -f ascii -t utf-16le | hexdump -C | ||
| 39 | # 00000000 74 00 65 00 73 00 74 00 00 00 |t.e.s.t...| | ||
| 40 | self.efivarvalue = r'\x07\x00\x00\x00\x74\x00\x65\x00\x73\x00\x74\x00\x00\x00' | ||
| 41 | |||
| 42 | if cmds: | ||
| 43 | self.deploy_cmds = cmds.split("\n") | ||
| 44 | else: | ||
| 45 | self.deploy_cmds = [ | ||
| 46 | 'mount -L boot /boot', | ||
| 47 | 'mkdir -p /mnt/testrootfs', | ||
| 48 | 'mount -L testrootfs /mnt/testrootfs', | ||
| 49 | 'modprobe efivarfs', | ||
| 50 | 'mount -t efivarfs efivarfs /sys/firmware/efi/efivars', | ||
| 51 | 'cp ~/test-kernel /boot', | ||
| 52 | 'rm -rf /mnt/testrootfs/*', | ||
| 53 | 'tar xzvf ~/test-rootfs.tar.gz -C /mnt/testrootfs', | ||
| 54 | 'printf "%s" > /sys/firmware/efi/efivars/LoaderEntryOneShot-4a67b082-0a4c-41cf-b6c7-440b29bb8c4f' % self.efivarvalue | ||
| 55 | ] | ||
| 56 | |||
| 57 | # master ssh connection | ||
| 58 | self.master = None | ||
| 59 | |||
| 60 | # this is the name of the command that controls the power for a board | ||
| 61 | # e.g: TEST_POWERCONTROL_CMD = "/home/user/myscripts/powercontrol.py ${MACHINE} what-ever-other-args-the-script-wants" | ||
| 62 | # the command should take as the last argument "off" and "on" and "cycle" (off, on) | ||
| 63 | self.powercontrol_cmd = d.getVar("TEST_POWERCONTROL_CMD", True) or None | ||
| 64 | self.powercontrol_args = d.getVar("TEST_POWERCONTROL_EXTRA_ARGS") or "" | ||
| 65 | self.origenv = os.environ | ||
| 66 | if self.powercontrol_cmd: | ||
| 67 | if self.powercontrol_args: | ||
| 68 | self.powercontrol_cmd = "%s %s" % (self.powercontrol_cmd, self.powercontrol_args) | ||
| 69 | # the external script for controlling power might use ssh | ||
| 70 | # ssh + keys means we need the original user env | ||
| 71 | bborigenv = d.getVar("BB_ORIGENV", False) or {} | ||
| 72 | for key in bborigenv: | ||
| 73 | val = bborigenv.getVar(key, True) | ||
| 74 | if val is not None: | ||
| 75 | self.origenv[key] = str(val) | ||
| 76 | self.power_ctl("on") | ||
| 77 | |||
| 78 | def power_ctl(self, msg): | ||
| 79 | if self.powercontrol_cmd: | ||
| 80 | cmd = "%s %s" % (self.powercontrol_cmd, msg) | ||
| 81 | commands.runCmd(cmd, preexec_fn=os.setsid, env=self.origenv) | ||
| 82 | |||
| 83 | def power_cycle(self, conn): | ||
| 84 | if self.powercontrol_cmd: | ||
| 85 | # be nice, don't just cut power | ||
| 86 | conn.run("shutdown -h now") | ||
| 87 | time.sleep(10) | ||
| 88 | self.power_ctl("cycle") | ||
| 89 | else: | ||
| 90 | status, output = conn.run("reboot") | ||
| 91 | if status != 0: | ||
| 92 | bb.error("Failed rebooting target and no power control command defined. You need to manually reset the device.\n%s" % output) | ||
| 93 | |||
| 94 | def deploy(self): | ||
| 95 | bb.plain("%s - deploying image on target" % self.pn) | ||
| 96 | # base class just sets the ssh log file for us | ||
| 97 | super(GummibootTarget, self).deploy() | ||
| 98 | self.master = sshcontrol.SSHControl(ip=self.ip, logfile=self.sshlog, timeout=600, port=self.port) | ||
| 99 | try: | ||
| 100 | self._deploy() | ||
| 101 | except Exception as e: | ||
| 102 | bb.fatal("Failed deploying test image: %s" % e) | ||
| 103 | |||
| 104 | def _deploy(self): | ||
| 105 | # make sure we are in the right image | ||
| 106 | status, output = self.master.run("cat /etc/masterimage") | ||
| 107 | if status != 0: | ||
| 108 | raise Exception("No ssh connectivity or target isn't running a master image.\n%s" % output) | ||
| 109 | |||
| 110 | # make sure these aren't mounted | ||
| 111 | self.master.run("umount /boot; umount /mnt/testrootfs; umount /sys/firmware/efi/efivars;") | ||
| 112 | |||
| 113 | # from now on, every deploy cmd should return 0 | ||
| 114 | # else an exception will be thrown by sshcontrol | ||
| 115 | self.master.ignore_status = False | ||
| 116 | self.master.copy_to(self.rootfs, "~/test-rootfs.tar.gz") | ||
| 117 | self.master.copy_to(self.kernel, "~/test-kernel") | ||
| 118 | for cmd in self.deploy_cmds: | ||
| 119 | self.master.run(cmd) | ||
| 120 | |||
| 121 | |||
| 122 | def start(self, params=None): | ||
| 123 | bb.plain("%s - boot test image on target" % self.pn) | ||
| 124 | self.power_cycle(self.master) | ||
| 125 | # there are better ways than a timeout but this should work for now | ||
| 126 | time.sleep(120) | ||
| 127 | # set the ssh object for the target/test image | ||
| 128 | self.connection = sshcontrol.SSHControl(self.ip, logfile=self.sshlog, port=self.port) | ||
| 129 | bb.plain("%s - start running tests" % self.pn) | ||
| 130 | |||
| 131 | def stop(self): | ||
| 132 | bb.plain("%s - reboot/powercycle target" % self.pn) | ||
| 133 | self.power_cycle(self.connection) | ||
diff --git a/meta/lib/oeqa/controllers/testtargetloader.py b/meta/lib/oeqa/controllers/testtargetloader.py new file mode 100644 index 0000000000..019bbfd840 --- /dev/null +++ b/meta/lib/oeqa/controllers/testtargetloader.py | |||
| @@ -0,0 +1,69 @@ | |||
| 1 | import types | ||
| 2 | import bb | ||
| 3 | |||
| 4 | # This class is responsible for loading a test target controller | ||
| 5 | class TestTargetLoader: | ||
| 6 | |||
| 7 | # Search oeqa.controllers module directory for and return a controller | ||
| 8 | # corresponding to the given target name. | ||
| 9 | # AttributeError raised if not found. | ||
| 10 | # ImportError raised if a provided module can not be imported. | ||
| 11 | def get_controller_module(self, target, bbpath): | ||
| 12 | controllerslist = self.get_controller_modulenames(bbpath) | ||
| 13 | bb.note("Available controller modules: %s" % str(controllerslist)) | ||
| 14 | controller = self.load_controller_from_name(target, controllerslist) | ||
| 15 | return controller | ||
| 16 | |||
| 17 | # Return a list of all python modules in lib/oeqa/controllers for each | ||
| 18 | # layer in bbpath | ||
| 19 | def get_controller_modulenames(self, bbpath): | ||
| 20 | |||
| 21 | controllerslist = [] | ||
| 22 | |||
| 23 | def add_controller_list(path): | ||
| 24 | if not os.path.exists(os.path.join(path, '__init__.py')): | ||
| 25 | bb.fatal('Controllers directory %s exists but is missing __init__.py' % path) | ||
| 26 | files = sorted([f for f in os.listdir(path) if f.endswith('.py') and not f.startswith('_')]) | ||
| 27 | for f in files: | ||
| 28 | module = 'oeqa.controllers.' + f[:-3] | ||
| 29 | if module not in controllerslist: | ||
| 30 | controllerslist.append(module) | ||
| 31 | else: | ||
| 32 | bb.warn("Duplicate controller module found for %s, only one added. Layers should create unique controller module names" % module) | ||
| 33 | |||
| 34 | for p in bbpath: | ||
| 35 | controllerpath = os.path.join(p, 'lib', 'oeqa', 'controllers') | ||
| 36 | bb.debug(2, 'Searching for target controllers in %s' % controllerpath) | ||
| 37 | if os.path.exists(controllerpath): | ||
| 38 | add_controller_list(controllerpath) | ||
| 39 | return controllerslist | ||
| 40 | |||
| 41 | # Search for and return a controller from given target name and | ||
| 42 | # set of module names. | ||
| 43 | # Raise AttributeError if not found. | ||
| 44 | # Raise ImportError if a provided module can not be imported | ||
| 45 | def load_controller_from_name(self, target, modulenames): | ||
| 46 | for name in modulenames: | ||
| 47 | obj = self.load_controller_from_module(target, name) | ||
| 48 | if obj: | ||
| 49 | return obj | ||
| 50 | raise AttributeError("Unable to load {0} from available modules: {1}".format(target, str(modulenames))) | ||
| 51 | |||
| 52 | # Search for and return a controller or None from given module name | ||
| 53 | def load_controller_from_module(self, target, modulename): | ||
| 54 | obj = None | ||
| 55 | # import module, allowing it to raise import exception | ||
| 56 | module = __import__(modulename, globals(), locals(), [target]) | ||
| 57 | # look for target class in the module, catching any exceptions as it | ||
| 58 | # is valid that a module may not have the target class. | ||
| 59 | try: | ||
| 60 | obj = getattr(module, target) | ||
| 61 | if obj: | ||
| 62 | from oeqa.targetcontrol import BaseTarget | ||
| 63 | if (not isinstance(obj, (type, types.ClassType))): | ||
| 64 | bb.warn("Target {0} found, but not of type Class".format(target)) | ||
| 65 | if( not issubclass(obj, BaseTarget)): | ||
| 66 | bb.warn("Target {0} found, but subclass is not BaseTarget".format(target)) | ||
| 67 | except: | ||
| 68 | obj = None | ||
| 69 | return obj | ||
diff --git a/meta/lib/oeqa/oetest.py b/meta/lib/oeqa/oetest.py new file mode 100644 index 0000000000..0db6cb80a9 --- /dev/null +++ b/meta/lib/oeqa/oetest.py | |||
| @@ -0,0 +1,107 @@ | |||
| 1 | # Copyright (C) 2013 Intel Corporation | ||
| 2 | # | ||
| 3 | # Released under the MIT license (see COPYING.MIT) | ||
| 4 | |||
| 5 | # Main unittest module used by testimage.bbclass | ||
| 6 | # This provides the oeRuntimeTest base class which is inherited by all tests in meta/lib/oeqa/runtime. | ||
| 7 | |||
| 8 | # It also has some helper functions and it's responsible for actually starting the tests | ||
| 9 | |||
| 10 | import os, re, mmap | ||
| 11 | import unittest | ||
| 12 | import inspect | ||
| 13 | |||
| 14 | |||
| 15 | def loadTests(tc): | ||
| 16 | |||
| 17 | # set the context object passed from the test class | ||
| 18 | setattr(oeTest, "tc", tc) | ||
| 19 | # set ps command to use | ||
| 20 | setattr(oeRuntimeTest, "pscmd", "ps -ef" if oeTest.hasPackage("procps") else "ps") | ||
| 21 | # prepare test suite, loader and runner | ||
| 22 | suite = unittest.TestSuite() | ||
| 23 | testloader = unittest.TestLoader() | ||
| 24 | testloader.sortTestMethodsUsing = None | ||
| 25 | suite = testloader.loadTestsFromNames(tc.testslist) | ||
| 26 | |||
| 27 | return suite | ||
| 28 | |||
| 29 | def runTests(tc): | ||
| 30 | |||
| 31 | suite = loadTests(tc) | ||
| 32 | print("Test modules %s" % tc.testslist) | ||
| 33 | print("Found %s tests" % suite.countTestCases()) | ||
| 34 | runner = unittest.TextTestRunner(verbosity=2) | ||
| 35 | result = runner.run(suite) | ||
| 36 | |||
| 37 | return result | ||
| 38 | |||
| 39 | |||
| 40 | class oeTest(unittest.TestCase): | ||
| 41 | |||
| 42 | longMessage = True | ||
| 43 | testFailures = [] | ||
| 44 | testSkipped = [] | ||
| 45 | testErrors = [] | ||
| 46 | |||
| 47 | def run(self, result=None): | ||
| 48 | super(oeTest, self).run(result) | ||
| 49 | |||
| 50 | # we add to our own lists the results, we use those for decorators | ||
| 51 | if len(result.failures) > len(oeTest.testFailures): | ||
| 52 | oeTest.testFailures.append(str(result.failures[-1][0]).split()[0]) | ||
| 53 | if len(result.skipped) > len(oeTest.testSkipped): | ||
| 54 | oeTest.testSkipped.append(str(result.skipped[-1][0]).split()[0]) | ||
| 55 | if len(result.errors) > len(oeTest.testErrors): | ||
| 56 | oeTest.testErrors.append(str(result.errors[-1][0]).split()[0]) | ||
| 57 | |||
| 58 | @classmethod | ||
| 59 | def hasPackage(self, pkg): | ||
| 60 | |||
| 61 | if re.search(pkg, oeTest.tc.pkgmanifest): | ||
| 62 | return True | ||
| 63 | return False | ||
| 64 | |||
| 65 | @classmethod | ||
| 66 | def hasFeature(self,feature): | ||
| 67 | |||
| 68 | if feature in oeTest.tc.imagefeatures or \ | ||
| 69 | feature in oeTest.tc.distrofeatures: | ||
| 70 | return True | ||
| 71 | else: | ||
| 72 | return False | ||
| 73 | |||
| 74 | |||
| 75 | class oeRuntimeTest(oeTest): | ||
| 76 | |||
| 77 | def __init__(self, methodName='runTest'): | ||
| 78 | self.target = oeRuntimeTest.tc.target | ||
| 79 | super(oeRuntimeTest, self).__init__(methodName) | ||
| 80 | |||
| 81 | |||
| 82 | def getmodule(pos=2): | ||
| 83 | # stack returns a list of tuples containg frame information | ||
| 84 | # First element of the list the is current frame, caller is 1 | ||
| 85 | frameinfo = inspect.stack()[pos] | ||
| 86 | modname = inspect.getmodulename(frameinfo[1]) | ||
| 87 | #modname = inspect.getmodule(frameinfo[0]).__name__ | ||
| 88 | return modname | ||
| 89 | |||
| 90 | def skipModule(reason, pos=2): | ||
| 91 | modname = getmodule(pos) | ||
| 92 | if modname not in oeTest.tc.testsrequired: | ||
| 93 | raise unittest.SkipTest("%s: %s" % (modname, reason)) | ||
| 94 | else: | ||
| 95 | raise Exception("\nTest %s wants to be skipped.\nReason is: %s" \ | ||
| 96 | "\nTest was required in TEST_SUITES, so either the condition for skipping is wrong" \ | ||
| 97 | "\nor the image really doesn't have the required feature/package when it should." % (modname, reason)) | ||
| 98 | |||
| 99 | def skipModuleIf(cond, reason): | ||
| 100 | |||
| 101 | if cond: | ||
| 102 | skipModule(reason, 3) | ||
| 103 | |||
| 104 | def skipModuleUnless(cond, reason): | ||
| 105 | |||
| 106 | if not cond: | ||
| 107 | skipModule(reason, 3) | ||
diff --git a/meta/lib/oeqa/runexported.py b/meta/lib/oeqa/runexported.py new file mode 100755 index 0000000000..e1b6642ec2 --- /dev/null +++ b/meta/lib/oeqa/runexported.py | |||
| @@ -0,0 +1,140 @@ | |||
| 1 | #!/usr/bin/env python | ||
| 2 | |||
| 3 | |||
| 4 | # Copyright (C) 2013 Intel Corporation | ||
| 5 | # | ||
| 6 | # Released under the MIT license (see COPYING.MIT) | ||
| 7 | |||
| 8 | # This script should be used outside of the build system to run image tests. | ||
| 9 | # It needs a json file as input as exported by the build. | ||
| 10 | # E.g for an already built image: | ||
| 11 | #- export the tests: | ||
| 12 | # TEST_EXPORT_ONLY = "1" | ||
| 13 | # TEST_TARGET = "simpleremote" | ||
| 14 | # TEST_TARGET_IP = "192.168.7.2" | ||
| 15 | # TEST_SERVER_IP = "192.168.7.1" | ||
| 16 | # bitbake core-image-sato -c testimage | ||
| 17 | # Setup your target, e.g for qemu: runqemu core-image-sato | ||
| 18 | # cd build/tmp/testimage/core-image-sato | ||
| 19 | # ./runexported.py testdata.json | ||
| 20 | |||
| 21 | import sys | ||
| 22 | import os | ||
| 23 | import time | ||
| 24 | from optparse import OptionParser | ||
| 25 | |||
| 26 | try: | ||
| 27 | import simplejson as json | ||
| 28 | except ImportError: | ||
| 29 | import json | ||
| 30 | |||
| 31 | sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), "oeqa"))) | ||
| 32 | |||
| 33 | from oeqa.oetest import runTests | ||
| 34 | from oeqa.utils.sshcontrol import SSHControl | ||
| 35 | |||
| 36 | # this isn't pretty but we need a fake target object | ||
| 37 | # for running the tests externally as we don't care | ||
| 38 | # about deploy/start we only care about the connection methods (run, copy) | ||
| 39 | class FakeTarget(object): | ||
| 40 | def __init__(self, d): | ||
| 41 | self.connection = None | ||
| 42 | self.ip = None | ||
| 43 | self.server_ip = None | ||
| 44 | self.datetime = time.strftime('%Y%m%d%H%M%S',time.gmtime()) | ||
| 45 | self.testdir = d.getVar("TEST_LOG_DIR", True) | ||
| 46 | self.pn = d.getVar("PN", True) | ||
| 47 | |||
| 48 | def exportStart(self): | ||
| 49 | self.sshlog = os.path.join(self.testdir, "ssh_target_log.%s" % self.datetime) | ||
| 50 | sshloglink = os.path.join(self.testdir, "ssh_target_log") | ||
| 51 | if os.path.islink(sshloglink): | ||
| 52 | os.unlink(sshloglink) | ||
| 53 | os.symlink(self.sshlog, sshloglink) | ||
| 54 | print("SSH log file: %s" % self.sshlog) | ||
| 55 | self.connection = SSHControl(self.ip, logfile=self.sshlog) | ||
| 56 | |||
| 57 | def run(self, cmd, timeout=None): | ||
| 58 | return self.connection.run(cmd, timeout) | ||
| 59 | |||
| 60 | def copy_to(self, localpath, remotepath): | ||
| 61 | return self.connection.copy_to(localpath, remotepath) | ||
| 62 | |||
| 63 | def copy_from(self, remotepath, localpath): | ||
| 64 | return self.connection.copy_from(remotepath, localpath) | ||
| 65 | |||
| 66 | |||
| 67 | class MyDataDict(dict): | ||
| 68 | def getVar(self, key, unused = None): | ||
| 69 | return self.get(key, "") | ||
| 70 | |||
| 71 | class TestContext(object): | ||
| 72 | def __init__(self): | ||
| 73 | self.d = None | ||
| 74 | self.target = None | ||
| 75 | |||
| 76 | def main(): | ||
| 77 | |||
| 78 | usage = "usage: %prog [options] <json file>" | ||
| 79 | parser = OptionParser(usage=usage) | ||
| 80 | parser.add_option("-t", "--target-ip", dest="ip", help="The IP address of the target machine. Use this to \ | ||
| 81 | overwrite the value determined from TEST_TARGET_IP at build time") | ||
| 82 | parser.add_option("-s", "--server-ip", dest="server_ip", help="The IP address of this machine. Use this to \ | ||
| 83 | overwrite the value determined from TEST_SERVER_IP at build time.") | ||
| 84 | parser.add_option("-d", "--deploy-dir", dest="deploy_dir", help="Full path to the package feeds, that this \ | ||
| 85 | the contents of what used to be DEPLOY_DIR on the build machine. If not specified it will use the value \ | ||
| 86 | specified in the json if that directory actually exists or it will error out.") | ||
| 87 | parser.add_option("-l", "--log-dir", dest="log_dir", help="This sets the path for TEST_LOG_DIR. If not specified \ | ||
| 88 | the current dir is used. This is used for usually creating a ssh log file and a scp test file.") | ||
| 89 | |||
| 90 | (options, args) = parser.parse_args() | ||
| 91 | if len(args) != 1: | ||
| 92 | parser.error("Incorrect number of arguments. The one and only argument should be a json file exported by the build system") | ||
| 93 | |||
| 94 | with open(args[0], "r") as f: | ||
| 95 | loaded = json.load(f) | ||
| 96 | |||
| 97 | if options.ip: | ||
| 98 | loaded["target"]["ip"] = options.ip | ||
| 99 | if options.server_ip: | ||
| 100 | loaded["target"]["server_ip"] = options.server_ip | ||
| 101 | |||
| 102 | d = MyDataDict() | ||
| 103 | for key in loaded["d"].keys(): | ||
| 104 | d[key] = loaded["d"][key] | ||
| 105 | |||
| 106 | if options.log_dir: | ||
| 107 | d["TEST_LOG_DIR"] = options.log_dir | ||
| 108 | else: | ||
| 109 | d["TEST_LOG_DIR"] = os.path.abspath(os.path.dirname(__file__)) | ||
| 110 | if options.deploy_dir: | ||
| 111 | d["DEPLOY_DIR"] = options.deploy_dir | ||
| 112 | else: | ||
| 113 | if not os.path.isdir(d["DEPLOY_DIR"]): | ||
| 114 | raise Exception("The path to DEPLOY_DIR does not exists: %s" % d["DEPLOY_DIR"]) | ||
| 115 | |||
| 116 | |||
| 117 | target = FakeTarget(d) | ||
| 118 | for key in loaded["target"].keys(): | ||
| 119 | setattr(target, key, loaded["target"][key]) | ||
| 120 | |||
| 121 | tc = TestContext() | ||
| 122 | setattr(tc, "d", d) | ||
| 123 | setattr(tc, "target", target) | ||
| 124 | for key in loaded.keys(): | ||
| 125 | if key != "d" and key != "target": | ||
| 126 | setattr(tc, key, loaded[key]) | ||
| 127 | |||
| 128 | target.exportStart() | ||
| 129 | runTests(tc) | ||
| 130 | |||
| 131 | return 0 | ||
| 132 | |||
| 133 | if __name__ == "__main__": | ||
| 134 | try: | ||
| 135 | ret = main() | ||
| 136 | except Exception: | ||
| 137 | ret = 1 | ||
| 138 | import traceback | ||
| 139 | traceback.print_exc(5) | ||
| 140 | sys.exit(ret) | ||
diff --git a/meta/lib/oeqa/runtime/__init__.py b/meta/lib/oeqa/runtime/__init__.py new file mode 100644 index 0000000000..4cf3fa76b6 --- /dev/null +++ b/meta/lib/oeqa/runtime/__init__.py | |||
| @@ -0,0 +1,3 @@ | |||
| 1 | # Enable other layers to have tests in the same named directory | ||
| 2 | from pkgutil import extend_path | ||
| 3 | __path__ = extend_path(__path__, __name__) | ||
diff --git a/meta/lib/oeqa/runtime/buildcvs.py b/meta/lib/oeqa/runtime/buildcvs.py new file mode 100644 index 0000000000..f1fbf19c1f --- /dev/null +++ b/meta/lib/oeqa/runtime/buildcvs.py | |||
| @@ -0,0 +1,30 @@ | |||
| 1 | from oeqa.oetest import oeRuntimeTest | ||
| 2 | from oeqa.utils.decorators import * | ||
| 3 | from oeqa.utils.targetbuild import TargetBuildProject | ||
| 4 | |||
| 5 | def setUpModule(): | ||
| 6 | if not oeRuntimeTest.hasFeature("tools-sdk"): | ||
| 7 | skipModule("Image doesn't have tools-sdk in IMAGE_FEATURES") | ||
| 8 | |||
| 9 | class BuildCvsTest(oeRuntimeTest): | ||
| 10 | |||
| 11 | @classmethod | ||
| 12 | def setUpClass(self): | ||
| 13 | self.project = TargetBuildProject(oeRuntimeTest.tc.target, oeRuntimeTest.tc.d, | ||
| 14 | "http://ftp.gnu.org/non-gnu/cvs/source/feature/1.12.13/cvs-1.12.13.tar.bz2") | ||
| 15 | self.project.download_archive() | ||
| 16 | |||
| 17 | @skipUnlessPassed("test_ssh") | ||
| 18 | def test_cvs(self): | ||
| 19 | self.assertEqual(self.project.run_configure(), 0, | ||
| 20 | msg="Running configure failed") | ||
| 21 | |||
| 22 | self.assertEqual(self.project.run_make(), 0, | ||
| 23 | msg="Running make failed") | ||
| 24 | |||
| 25 | self.assertEqual(self.project.run_install(), 0, | ||
| 26 | msg="Running make install failed") | ||
| 27 | |||
| 28 | @classmethod | ||
| 29 | def tearDownClass(self): | ||
| 30 | self.project.clean() | ||
diff --git a/meta/lib/oeqa/runtime/buildiptables.py b/meta/lib/oeqa/runtime/buildiptables.py new file mode 100644 index 0000000000..f6061a7f98 --- /dev/null +++ b/meta/lib/oeqa/runtime/buildiptables.py | |||
| @@ -0,0 +1,30 @@ | |||
| 1 | from oeqa.oetest import oeRuntimeTest | ||
| 2 | from oeqa.utils.decorators import * | ||
| 3 | from oeqa.utils.targetbuild import TargetBuildProject | ||
| 4 | |||
| 5 | def setUpModule(): | ||
| 6 | if not oeRuntimeTest.hasFeature("tools-sdk"): | ||
| 7 | skipModule("Image doesn't have tools-sdk in IMAGE_FEATURES") | ||
| 8 | |||
| 9 | class BuildIptablesTest(oeRuntimeTest): | ||
| 10 | |||
| 11 | @classmethod | ||
| 12 | def setUpClass(self): | ||
| 13 | self.project = TargetBuildProject(oeRuntimeTest.tc.target, oeRuntimeTest.tc.d, | ||
| 14 | "http://netfilter.org/projects/iptables/files/iptables-1.4.13.tar.bz2") | ||
| 15 | self.project.download_archive() | ||
| 16 | |||
| 17 | @skipUnlessPassed("test_ssh") | ||
| 18 | def test_iptables(self): | ||
| 19 | self.assertEqual(self.project.run_configure(), 0, | ||
| 20 | msg="Running configure failed") | ||
| 21 | |||
| 22 | self.assertEqual(self.project.run_make(), 0, | ||
| 23 | msg="Running make failed") | ||
| 24 | |||
| 25 | self.assertEqual(self.project.run_install(), 0, | ||
| 26 | msg="Running make install failed") | ||
| 27 | |||
| 28 | @classmethod | ||
| 29 | def tearDownClass(self): | ||
| 30 | self.project.clean() | ||
diff --git a/meta/lib/oeqa/runtime/buildsudoku.py b/meta/lib/oeqa/runtime/buildsudoku.py new file mode 100644 index 0000000000..a754f1d9ea --- /dev/null +++ b/meta/lib/oeqa/runtime/buildsudoku.py | |||
| @@ -0,0 +1,27 @@ | |||
| 1 | from oeqa.oetest import oeRuntimeTest | ||
| 2 | from oeqa.utils.decorators import * | ||
| 3 | from oeqa.utils.targetbuild import TargetBuildProject | ||
| 4 | |||
| 5 | def setUpModule(): | ||
| 6 | if not oeRuntimeTest.hasFeature("tools-sdk"): | ||
| 7 | skipModule("Image doesn't have tools-sdk in IMAGE_FEATURES") | ||
| 8 | |||
| 9 | class SudokuTest(oeRuntimeTest): | ||
| 10 | |||
| 11 | @classmethod | ||
| 12 | def setUpClass(self): | ||
| 13 | self.project = TargetBuildProject(oeRuntimeTest.tc.target, oeRuntimeTest.tc.d, | ||
| 14 | "http://downloads.sourceforge.net/project/sudoku-savant/sudoku-savant/sudoku-savant-1.3/sudoku-savant-1.3.tar.bz2") | ||
| 15 | self.project.download_archive() | ||
| 16 | |||
| 17 | @skipUnlessPassed("test_ssh") | ||
| 18 | def test_sudoku(self): | ||
| 19 | self.assertEqual(self.project.run_configure(), 0, | ||
| 20 | msg="Running configure failed") | ||
| 21 | |||
| 22 | self.assertEqual(self.project.run_make(), 0, | ||
| 23 | msg="Running make failed") | ||
| 24 | |||
| 25 | @classmethod | ||
| 26 | def tearDownClass(self): | ||
| 27 | self.project.clean() | ||
diff --git a/meta/lib/oeqa/runtime/connman.py b/meta/lib/oeqa/runtime/connman.py new file mode 100644 index 0000000000..c03688206f --- /dev/null +++ b/meta/lib/oeqa/runtime/connman.py | |||
| @@ -0,0 +1,30 @@ | |||
| 1 | import unittest | ||
| 2 | from oeqa.oetest import oeRuntimeTest, skipModule | ||
| 3 | from oeqa.utils.decorators import * | ||
| 4 | |||
| 5 | def setUpModule(): | ||
| 6 | if not oeRuntimeTest.hasPackage("connman"): | ||
| 7 | skipModule("No connman package in image") | ||
| 8 | |||
| 9 | |||
| 10 | class ConnmanTest(oeRuntimeTest): | ||
| 11 | |||
| 12 | def service_status(self, service): | ||
| 13 | if oeRuntimeTest.hasFeature("systemd"): | ||
| 14 | (status, output) = self.target.run('systemctl status -l %s' % service) | ||
| 15 | return output | ||
| 16 | else: | ||
| 17 | return "Unable to get status or logs for %s" % service | ||
| 18 | |||
| 19 | @skipUnlessPassed('test_ssh') | ||
| 20 | def test_connmand_help(self): | ||
| 21 | (status, output) = self.target.run('/usr/sbin/connmand --help') | ||
| 22 | self.assertEqual(status, 0, msg="status and output: %s and %s" % (status,output)) | ||
| 23 | |||
| 24 | |||
| 25 | @skipUnlessPassed('test_connmand_help') | ||
| 26 | def test_connmand_running(self): | ||
| 27 | (status, output) = self.target.run(oeRuntimeTest.pscmd + ' | grep [c]onnmand') | ||
| 28 | if status != 0: | ||
| 29 | print self.service_status("connman") | ||
| 30 | self.fail("No connmand process running") | ||
diff --git a/meta/lib/oeqa/runtime/date.py b/meta/lib/oeqa/runtime/date.py new file mode 100644 index 0000000000..a208e29ada --- /dev/null +++ b/meta/lib/oeqa/runtime/date.py | |||
| @@ -0,0 +1,22 @@ | |||
| 1 | from oeqa.oetest import oeRuntimeTest | ||
| 2 | from oeqa.utils.decorators import * | ||
| 3 | import re | ||
| 4 | |||
| 5 | class DateTest(oeRuntimeTest): | ||
| 6 | |||
| 7 | @skipUnlessPassed("test_ssh") | ||
| 8 | def test_date(self): | ||
| 9 | (status, output) = self.target.run('date +"%Y-%m-%d %T"') | ||
| 10 | self.assertEqual(status, 0, msg="Failed to get initial date, output: %s" % output) | ||
| 11 | oldDate = output | ||
| 12 | |||
| 13 | sampleDate = '"2016-08-09 10:00:00"' | ||
| 14 | (status, output) = self.target.run("date -s %s" % sampleDate) | ||
| 15 | self.assertEqual(status, 0, msg="Date set failed, output: %s" % output) | ||
| 16 | |||
| 17 | (status, output) = self.target.run("date -R") | ||
| 18 | p = re.match('Tue, 09 Aug 2016 10:00:.. \+0000', output) | ||
| 19 | self.assertTrue(p, msg="The date was not set correctly, output: %s" % output) | ||
| 20 | |||
| 21 | (status, output) = self.target.run('date -s "%s"' % oldDate) | ||
| 22 | self.assertEqual(status, 0, msg="Failed to reset date, output: %s" % output) | ||
diff --git a/meta/lib/oeqa/runtime/df.py b/meta/lib/oeqa/runtime/df.py new file mode 100644 index 0000000000..b6da35027c --- /dev/null +++ b/meta/lib/oeqa/runtime/df.py | |||
| @@ -0,0 +1,11 @@ | |||
| 1 | import unittest | ||
| 2 | from oeqa.oetest import oeRuntimeTest | ||
| 3 | from oeqa.utils.decorators import * | ||
| 4 | |||
| 5 | |||
| 6 | class DfTest(oeRuntimeTest): | ||
| 7 | |||
| 8 | @skipUnlessPassed("test_ssh") | ||
| 9 | def test_df(self): | ||
| 10 | (status,output) = self.target.run("df / | sed -n '2p' | awk '{print $4}'") | ||
| 11 | self.assertTrue(int(output)>5120, msg="Not enough space on image. Current size is %s" % output) | ||
diff --git a/meta/lib/oeqa/runtime/dmesg.py b/meta/lib/oeqa/runtime/dmesg.py new file mode 100644 index 0000000000..64247ea704 --- /dev/null +++ b/meta/lib/oeqa/runtime/dmesg.py | |||
| @@ -0,0 +1,11 @@ | |||
| 1 | import unittest | ||
| 2 | from oeqa.oetest import oeRuntimeTest | ||
| 3 | from oeqa.utils.decorators import * | ||
| 4 | |||
| 5 | |||
| 6 | class DmesgTest(oeRuntimeTest): | ||
| 7 | |||
| 8 | @skipUnlessPassed('test_ssh') | ||
| 9 | def test_dmesg(self): | ||
| 10 | (status, output) = self.target.run('dmesg | grep -v mmci-pl18x | grep -v "error changing net interface name" | grep -iv "dma timeout" | grep -i error') | ||
| 11 | self.assertEqual(status, 1, msg = "Error messages in dmesg log: %s" % output) | ||
diff --git a/meta/lib/oeqa/runtime/files/hellomod.c b/meta/lib/oeqa/runtime/files/hellomod.c new file mode 100644 index 0000000000..a383397e93 --- /dev/null +++ b/meta/lib/oeqa/runtime/files/hellomod.c | |||
| @@ -0,0 +1,19 @@ | |||
| 1 | #include <linux/module.h> | ||
| 2 | #include <linux/kernel.h> | ||
| 3 | #include <linux/init.h> | ||
| 4 | |||
| 5 | static int __init hello_init(void) | ||
| 6 | { | ||
| 7 | printk(KERN_INFO "Hello world!\n"); | ||
| 8 | return 0; | ||
| 9 | } | ||
| 10 | |||
| 11 | static void __exit hello_cleanup(void) | ||
| 12 | { | ||
| 13 | printk(KERN_INFO "Cleaning up hellomod.\n"); | ||
| 14 | } | ||
| 15 | |||
| 16 | module_init(hello_init); | ||
| 17 | module_exit(hello_cleanup); | ||
| 18 | |||
| 19 | MODULE_LICENSE("GPL"); | ||
diff --git a/meta/lib/oeqa/runtime/files/hellomod_makefile b/meta/lib/oeqa/runtime/files/hellomod_makefile new file mode 100644 index 0000000000..b92d5c8fe0 --- /dev/null +++ b/meta/lib/oeqa/runtime/files/hellomod_makefile | |||
| @@ -0,0 +1,8 @@ | |||
| 1 | obj-m := hellomod.o | ||
| 2 | KDIR := /usr/src/kernel | ||
| 3 | |||
| 4 | all: | ||
| 5 | $(MAKE) -C $(KDIR) M=$(PWD) modules | ||
| 6 | |||
| 7 | clean: | ||
| 8 | $(MAKE) -C $(KDIR) M=$(PWD) clean | ||
diff --git a/meta/lib/oeqa/runtime/files/test.c b/meta/lib/oeqa/runtime/files/test.c new file mode 100644 index 0000000000..2d8389c92e --- /dev/null +++ b/meta/lib/oeqa/runtime/files/test.c | |||
| @@ -0,0 +1,26 @@ | |||
| 1 | #include <stdio.h> | ||
| 2 | #include <math.h> | ||
| 3 | #include <stdlib.h> | ||
| 4 | |||
| 5 | double convert(long long l) | ||
| 6 | { | ||
| 7 | return (double)l; | ||
| 8 | } | ||
| 9 | |||
| 10 | int main(int argc, char * argv[]) { | ||
| 11 | |||
| 12 | long long l = 10; | ||
| 13 | double f; | ||
| 14 | double check = 10.0; | ||
| 15 | |||
| 16 | f = convert(l); | ||
| 17 | printf("convert: %lld => %f\n", l, f); | ||
| 18 | if ( f != check ) exit(1); | ||
| 19 | |||
| 20 | f = 1234.67; | ||
| 21 | check = 1234.0; | ||
| 22 | printf("floorf(%f) = %f\n", f, floorf(f)); | ||
| 23 | if ( floorf(f) != check) exit(1); | ||
| 24 | |||
| 25 | return 0; | ||
| 26 | } | ||
diff --git a/meta/lib/oeqa/runtime/files/test.pl b/meta/lib/oeqa/runtime/files/test.pl new file mode 100644 index 0000000000..689c8f1635 --- /dev/null +++ b/meta/lib/oeqa/runtime/files/test.pl | |||
| @@ -0,0 +1,2 @@ | |||
| 1 | $a = 9.01e+21 - 9.01e+21 + 0.01; | ||
| 2 | print ("the value of a is ", $a, "\n"); | ||
diff --git a/meta/lib/oeqa/runtime/files/test.py b/meta/lib/oeqa/runtime/files/test.py new file mode 100644 index 0000000000..f3a2273c52 --- /dev/null +++ b/meta/lib/oeqa/runtime/files/test.py | |||
| @@ -0,0 +1,6 @@ | |||
| 1 | import os | ||
| 2 | |||
| 3 | os.system('touch /tmp/testfile.python') | ||
| 4 | |||
| 5 | a = 9.01e+21 - 9.01e+21 + 0.01 | ||
| 6 | print "the value of a is %s" % a | ||
diff --git a/meta/lib/oeqa/runtime/files/testmakefile b/meta/lib/oeqa/runtime/files/testmakefile new file mode 100644 index 0000000000..ca1844e930 --- /dev/null +++ b/meta/lib/oeqa/runtime/files/testmakefile | |||
| @@ -0,0 +1,5 @@ | |||
| 1 | test: test.o | ||
| 2 | gcc -o test test.o -lm | ||
| 3 | test.o: test.c | ||
| 4 | gcc -c test.c | ||
| 5 | |||
diff --git a/meta/lib/oeqa/runtime/gcc.py b/meta/lib/oeqa/runtime/gcc.py new file mode 100644 index 0000000000..b63badd3e4 --- /dev/null +++ b/meta/lib/oeqa/runtime/gcc.py | |||
| @@ -0,0 +1,36 @@ | |||
| 1 | import unittest | ||
| 2 | import os | ||
| 3 | from oeqa.oetest import oeRuntimeTest, skipModule | ||
| 4 | from oeqa.utils.decorators import * | ||
| 5 | |||
| 6 | def setUpModule(): | ||
| 7 | if not oeRuntimeTest.hasFeature("tools-sdk"): | ||
| 8 | skipModule("Image doesn't have tools-sdk in IMAGE_FEATURES") | ||
| 9 | |||
| 10 | |||
| 11 | class GccCompileTest(oeRuntimeTest): | ||
| 12 | |||
| 13 | @classmethod | ||
| 14 | def setUpClass(self): | ||
| 15 | oeRuntimeTest.tc.target.copy_to(os.path.join(oeRuntimeTest.tc.filesdir, "test.c"), "/tmp/test.c") | ||
| 16 | oeRuntimeTest.tc.target.copy_to(os.path.join(oeRuntimeTest.tc.filesdir, "testmakefile"), "/tmp/testmakefile") | ||
| 17 | |||
| 18 | def test_gcc_compile(self): | ||
| 19 | (status, output) = self.target.run('gcc /tmp/test.c -o /tmp/test -lm') | ||
| 20 | self.assertEqual(status, 0, msg="gcc compile failed, output: %s" % output) | ||
| 21 | (status, output) = self.target.run('/tmp/test') | ||
| 22 | self.assertEqual(status, 0, msg="running compiled file failed, output %s" % output) | ||
| 23 | |||
| 24 | def test_gpp_compile(self): | ||
| 25 | (status, output) = self.target.run('g++ /tmp/test.c -o /tmp/test -lm') | ||
| 26 | self.assertEqual(status, 0, msg="g++ compile failed, output: %s" % output) | ||
| 27 | (status, output) = self.target.run('/tmp/test') | ||
| 28 | self.assertEqual(status, 0, msg="running compiled file failed, output %s" % output) | ||
| 29 | |||
| 30 | def test_make(self): | ||
| 31 | (status, output) = self.target.run('cd /tmp; make -f testmakefile') | ||
| 32 | self.assertEqual(status, 0, msg="running make failed, output %s" % output) | ||
| 33 | |||
| 34 | @classmethod | ||
| 35 | def tearDownClass(self): | ||
| 36 | oeRuntimeTest.tc.target.run("rm /tmp/test.c /tmp/test.o /tmp/test /tmp/testmakefile") | ||
diff --git a/meta/lib/oeqa/runtime/kernelmodule.py b/meta/lib/oeqa/runtime/kernelmodule.py new file mode 100644 index 0000000000..cbc5742eff --- /dev/null +++ b/meta/lib/oeqa/runtime/kernelmodule.py | |||
| @@ -0,0 +1,33 @@ | |||
| 1 | import unittest | ||
| 2 | import os | ||
| 3 | from oeqa.oetest import oeRuntimeTest, skipModule | ||
| 4 | from oeqa.utils.decorators import * | ||
| 5 | |||
| 6 | def setUpModule(): | ||
| 7 | if not oeRuntimeTest.hasFeature("tools-sdk"): | ||
| 8 | skipModule("Image doesn't have tools-sdk in IMAGE_FEATURES") | ||
| 9 | |||
| 10 | |||
| 11 | class KernelModuleTest(oeRuntimeTest): | ||
| 12 | |||
| 13 | def setUp(self): | ||
| 14 | self.target.copy_to(os.path.join(oeRuntimeTest.tc.filesdir, "hellomod.c"), "/tmp/hellomod.c") | ||
| 15 | self.target.copy_to(os.path.join(oeRuntimeTest.tc.filesdir, "hellomod_makefile"), "/tmp/Makefile") | ||
| 16 | |||
| 17 | @skipUnlessPassed('test_ssh') | ||
| 18 | @skipUnlessPassed('test_gcc_compile') | ||
| 19 | def test_kernel_module(self): | ||
| 20 | cmds = [ | ||
| 21 | 'cd /usr/src/kernel && make scripts', | ||
| 22 | 'cd /tmp && make', | ||
| 23 | 'cd /tmp && insmod hellomod.ko', | ||
| 24 | 'lsmod | grep hellomod', | ||
| 25 | 'dmesg | grep Hello', | ||
| 26 | 'rmmod hellomod', 'dmesg | grep "Cleaning up hellomod"' | ||
| 27 | ] | ||
| 28 | for cmd in cmds: | ||
| 29 | (status, output) = self.target.run(cmd, 900) | ||
| 30 | self.assertEqual(status, 0, msg="\n".join([cmd, output])) | ||
| 31 | |||
| 32 | def tearDown(self): | ||
| 33 | self.target.run('rm -f /tmp/Makefile /tmp/hellomod.c') | ||
diff --git a/meta/lib/oeqa/runtime/ldd.py b/meta/lib/oeqa/runtime/ldd.py new file mode 100644 index 0000000000..4374530fc4 --- /dev/null +++ b/meta/lib/oeqa/runtime/ldd.py | |||
| @@ -0,0 +1,19 @@ | |||
| 1 | import unittest | ||
| 2 | from oeqa.oetest import oeRuntimeTest | ||
| 3 | from oeqa.utils.decorators import * | ||
| 4 | |||
| 5 | def setUpModule(): | ||
| 6 | if not oeRuntimeTest.hasFeature("tools-sdk"): | ||
| 7 | skipModule("Image doesn't have tools-sdk in IMAGE_FEATURES") | ||
| 8 | |||
| 9 | class LddTest(oeRuntimeTest): | ||
| 10 | |||
| 11 | @skipUnlessPassed('test_ssh') | ||
| 12 | def test_ldd_exists(self): | ||
| 13 | (status, output) = self.target.run('which ldd') | ||
| 14 | self.assertEqual(status, 0, msg = "ldd does not exist in PATH: which ldd: %s" % output) | ||
| 15 | |||
| 16 | @skipUnlessPassed('test_ldd_exists') | ||
| 17 | def test_ldd_rtldlist_check(self): | ||
| 18 | (status, output) = self.target.run('for i in $(which ldd | xargs cat | grep "^RTLDLIST"|cut -d\'=\' -f2|tr -d \'"\'); do test -f $i && echo $i && break; done') | ||
| 19 | self.assertEqual(status, 0, msg = "ldd path not correct or RTLDLIST files don't exist. ") | ||
diff --git a/meta/lib/oeqa/runtime/logrotate.py b/meta/lib/oeqa/runtime/logrotate.py new file mode 100644 index 0000000000..80489a3267 --- /dev/null +++ b/meta/lib/oeqa/runtime/logrotate.py | |||
| @@ -0,0 +1,27 @@ | |||
| 1 | # This test should cover https://bugzilla.yoctoproject.org/tr_show_case.cgi?case_id=289 testcase | ||
| 2 | # Note that the image under test must have logrotate installed | ||
| 3 | |||
| 4 | import unittest | ||
| 5 | from oeqa.oetest import oeRuntimeTest, skipModule | ||
| 6 | from oeqa.utils.decorators import * | ||
| 7 | |||
| 8 | def setUpModule(): | ||
| 9 | if not oeRuntimeTest.hasPackage("logrotate"): | ||
| 10 | skipModule("No logrotate package in image") | ||
| 11 | |||
| 12 | |||
| 13 | class LogrotateTest(oeRuntimeTest): | ||
| 14 | |||
| 15 | @skipUnlessPassed("test_ssh") | ||
| 16 | def test_1_logrotate_setup(self): | ||
| 17 | (status, output) = self.target.run('mkdir /home/root/logrotate_dir') | ||
| 18 | self.assertEqual(status, 0, msg = "Could not create logrotate_dir. Output: %s" % output) | ||
| 19 | (status, output) = self.target.run("sed -i 's#wtmp {#wtmp {\\n olddir /home/root/logrotate_dir#' /etc/logrotate.conf") | ||
| 20 | self.assertEqual(status, 0, msg = "Could not write to logrotate.conf file. Status and output: %s and %s)" % (status, output)) | ||
| 21 | |||
| 22 | @skipUnlessPassed("test_1_logrotate_setup") | ||
| 23 | def test_2_logrotate(self): | ||
| 24 | (status, output) = self.target.run('logrotate -f /etc/logrotate.conf') | ||
| 25 | self.assertEqual(status, 0, msg = "logrotate service could not be reloaded. Status and output: %s and %s" % (status, output)) | ||
| 26 | output = self.target.run('ls -la /home/root/logrotate_dir/ | wc -l')[1] | ||
| 27 | self.assertTrue(int(output)>=3, msg = "new logfile could not be created. List of files within log directory: %s" %(self.target.run('ls -la /home/root/logrotate_dir')[1])) | ||
diff --git a/meta/lib/oeqa/runtime/multilib.py b/meta/lib/oeqa/runtime/multilib.py new file mode 100644 index 0000000000..13a3b54b18 --- /dev/null +++ b/meta/lib/oeqa/runtime/multilib.py | |||
| @@ -0,0 +1,17 @@ | |||
| 1 | import unittest | ||
| 2 | from oeqa.oetest import oeRuntimeTest, skipModule | ||
| 3 | from oeqa.utils.decorators import * | ||
| 4 | |||
| 5 | def setUpModule(): | ||
| 6 | multilibs = oeRuntimeTest.tc.d.getVar("MULTILIBS", True) or "" | ||
| 7 | if "multilib:lib32" not in multilibs: | ||
| 8 | skipModule("this isn't a multilib:lib32 image") | ||
| 9 | |||
| 10 | |||
| 11 | class MultilibTest(oeRuntimeTest): | ||
| 12 | |||
| 13 | @skipUnlessPassed('test_ssh') | ||
| 14 | def test_file_connman(self): | ||
| 15 | self.assertTrue(oeRuntimeTest.hasPackage('connman-gnome'), msg="This test assumes connman-gnome is installed") | ||
| 16 | (status, output) = self.target.run("readelf -h /usr/bin/connman-applet | sed -n '3p' | awk '{print $2}'") | ||
| 17 | self.assertEqual(output, "ELF32", msg="connman-applet isn't an ELF32 binary. readelf says: %s" % self.target.run("readelf -h /usr/bin/connman-applet")[1]) | ||
diff --git a/meta/lib/oeqa/runtime/pam.py b/meta/lib/oeqa/runtime/pam.py new file mode 100644 index 0000000000..52e1eb88e6 --- /dev/null +++ b/meta/lib/oeqa/runtime/pam.py | |||
| @@ -0,0 +1,24 @@ | |||
| 1 | # This test should cover https://bugzilla.yoctoproject.org/tr_show_case.cgi?case_id=287 testcase | ||
| 2 | # Note that the image under test must have "pam" in DISTRO_FEATURES | ||
| 3 | |||
| 4 | import unittest | ||
| 5 | from oeqa.oetest import oeRuntimeTest | ||
| 6 | from oeqa.utils.decorators import * | ||
| 7 | |||
| 8 | def setUpModule(): | ||
| 9 | if not oeRuntimeTest.hasFeature("pam"): | ||
| 10 | skipModule("target doesn't have 'pam' in DISTRO_FEATURES") | ||
| 11 | |||
| 12 | |||
| 13 | class PamBasicTest(oeRuntimeTest): | ||
| 14 | |||
| 15 | @skipUnlessPassed('test_ssh') | ||
| 16 | def test_pam(self): | ||
| 17 | (status, output) = self.target.run('login --help') | ||
| 18 | self.assertEqual(status, 1, msg = "login command does not work as expected. Status and output:%s and %s" %(status, output)) | ||
| 19 | (status, output) = self.target.run('passwd --help') | ||
| 20 | self.assertEqual(status, 6, msg = "passwd command does not work as expected. Status and output:%s and %s" %(status, output)) | ||
| 21 | (status, output) = self.target.run('su --help') | ||
| 22 | self.assertEqual(status, 2, msg = "su command does not work as expected. Status and output:%s and %s" %(status, output)) | ||
| 23 | (status, output) = self.target.run('useradd --help') | ||
| 24 | self.assertEqual(status, 2, msg = "useradd command does not work as expected. Status and output:%s and %s" %(status, output)) | ||
diff --git a/meta/lib/oeqa/runtime/perl.py b/meta/lib/oeqa/runtime/perl.py new file mode 100644 index 0000000000..c9bb684c11 --- /dev/null +++ b/meta/lib/oeqa/runtime/perl.py | |||
| @@ -0,0 +1,28 @@ | |||
| 1 | import unittest | ||
| 2 | import os | ||
| 3 | from oeqa.oetest import oeRuntimeTest, skipModule | ||
| 4 | from oeqa.utils.decorators import * | ||
| 5 | |||
| 6 | def setUpModule(): | ||
| 7 | if not oeRuntimeTest.hasPackage("perl"): | ||
| 8 | skipModule("No perl package in the image") | ||
| 9 | |||
| 10 | |||
| 11 | class PerlTest(oeRuntimeTest): | ||
| 12 | |||
| 13 | @classmethod | ||
| 14 | def setUpClass(self): | ||
| 15 | oeRuntimeTest.tc.target.copy_to(os.path.join(oeRuntimeTest.tc.filesdir, "test.pl"), "/tmp/test.pl") | ||
| 16 | |||
| 17 | def test_perl_exists(self): | ||
| 18 | (status, output) = self.target.run('which perl') | ||
| 19 | self.assertEqual(status, 0, msg="Perl binary not in PATH or not on target.") | ||
| 20 | |||
| 21 | def test_perl_works(self): | ||
| 22 | (status, output) = self.target.run('perl /tmp/test.pl') | ||
| 23 | self.assertEqual(status, 0, msg="Exit status was not 0. Output: %s" % output) | ||
| 24 | self.assertEqual(output, "the value of a is 0.01", msg="Incorrect output: %s" % output) | ||
| 25 | |||
| 26 | @classmethod | ||
| 27 | def tearDownClass(self): | ||
| 28 | oeRuntimeTest.tc.target.run("rm /tmp/test.pl") | ||
diff --git a/meta/lib/oeqa/runtime/ping.py b/meta/lib/oeqa/runtime/ping.py new file mode 100644 index 0000000000..a73c72402a --- /dev/null +++ b/meta/lib/oeqa/runtime/ping.py | |||
| @@ -0,0 +1,20 @@ | |||
| 1 | import subprocess | ||
| 2 | import unittest | ||
| 3 | import sys | ||
| 4 | import time | ||
| 5 | from oeqa.oetest import oeRuntimeTest | ||
| 6 | |||
| 7 | class PingTest(oeRuntimeTest): | ||
| 8 | |||
| 9 | def test_ping(self): | ||
| 10 | output = '' | ||
| 11 | count = 0 | ||
| 12 | endtime = time.time() + 60 | ||
| 13 | while count < 5 and time.time() < endtime: | ||
| 14 | proc = subprocess.Popen("ping -c 1 %s" % self.target.ip, shell=True, stdout=subprocess.PIPE) | ||
| 15 | output += proc.communicate()[0] | ||
| 16 | if proc.poll() == 0: | ||
| 17 | count += 1 | ||
| 18 | else: | ||
| 19 | count = 0 | ||
| 20 | self.assertEqual(count, 5, msg = "Expected 5 consecutive replies, got %d.\nping output is:\n%s" % (count,output)) | ||
diff --git a/meta/lib/oeqa/runtime/python.py b/meta/lib/oeqa/runtime/python.py new file mode 100644 index 0000000000..c037ab2c18 --- /dev/null +++ b/meta/lib/oeqa/runtime/python.py | |||
| @@ -0,0 +1,33 @@ | |||
| 1 | import unittest | ||
| 2 | import os | ||
| 3 | from oeqa.oetest import oeRuntimeTest, skipModule | ||
| 4 | from oeqa.utils.decorators import * | ||
| 5 | |||
| 6 | def setUpModule(): | ||
| 7 | if not oeRuntimeTest.hasPackage("python"): | ||
| 8 | skipModule("No python package in the image") | ||
| 9 | |||
| 10 | |||
| 11 | class PythonTest(oeRuntimeTest): | ||
| 12 | |||
| 13 | @classmethod | ||
| 14 | def setUpClass(self): | ||
| 15 | oeRuntimeTest.tc.target.copy_to(os.path.join(oeRuntimeTest.tc.filesdir, "test.py"), "/tmp/test.py") | ||
| 16 | |||
| 17 | def test_python_exists(self): | ||
| 18 | (status, output) = self.target.run('which python') | ||
| 19 | self.assertEqual(status, 0, msg="Python binary not in PATH or not on target.") | ||
| 20 | |||
| 21 | def test_python_stdout(self): | ||
| 22 | (status, output) = self.target.run('python /tmp/test.py') | ||
| 23 | self.assertEqual(status, 0, msg="Exit status was not 0. Output: %s" % output) | ||
| 24 | self.assertEqual(output, "the value of a is 0.01", msg="Incorrect output: %s" % output) | ||
| 25 | |||
| 26 | def test_python_testfile(self): | ||
| 27 | (status, output) = self.target.run('ls /tmp/testfile.python') | ||
| 28 | self.assertEqual(status, 0, msg="Python test file generate failed.") | ||
| 29 | |||
| 30 | |||
| 31 | @classmethod | ||
| 32 | def tearDownClass(self): | ||
| 33 | oeRuntimeTest.tc.target.run("rm /tmp/test.py /tmp/testfile.python") | ||
diff --git a/meta/lib/oeqa/runtime/rpm.py b/meta/lib/oeqa/runtime/rpm.py new file mode 100644 index 0000000000..084d22f96b --- /dev/null +++ b/meta/lib/oeqa/runtime/rpm.py | |||
| @@ -0,0 +1,50 @@ | |||
| 1 | import unittest | ||
| 2 | import os | ||
| 3 | import fnmatch | ||
| 4 | from oeqa.oetest import oeRuntimeTest, skipModule | ||
| 5 | from oeqa.utils.decorators import * | ||
| 6 | |||
| 7 | def setUpModule(): | ||
| 8 | if not oeRuntimeTest.hasFeature("package-management"): | ||
| 9 | skipModule("rpm module skipped: target doesn't have package-management in IMAGE_FEATURES") | ||
| 10 | if "package_rpm" != oeRuntimeTest.tc.d.getVar("PACKAGE_CLASSES", True).split()[0]: | ||
| 11 | skipModule("rpm module skipped: target doesn't have rpm as primary package manager") | ||
| 12 | |||
| 13 | |||
| 14 | class RpmBasicTest(oeRuntimeTest): | ||
| 15 | |||
| 16 | @skipUnlessPassed('test_ssh') | ||
| 17 | def test_rpm_help(self): | ||
| 18 | (status, output) = self.target.run('rpm --help') | ||
| 19 | self.assertEqual(status, 0, msg="status and output: %s and %s" % (status,output)) | ||
| 20 | |||
| 21 | @skipUnlessPassed('test_rpm_help') | ||
| 22 | def test_rpm_query(self): | ||
| 23 | (status, output) = self.target.run('rpm -q rpm') | ||
| 24 | self.assertEqual(status, 0, msg="status and output: %s and %s" % (status,output)) | ||
| 25 | |||
| 26 | class RpmInstallRemoveTest(oeRuntimeTest): | ||
| 27 | |||
| 28 | @classmethod | ||
| 29 | def setUpClass(self): | ||
| 30 | pkgarch = oeRuntimeTest.tc.d.getVar('TUNE_PKGARCH', True).replace("-", "_") | ||
| 31 | rpmdir = os.path.join(oeRuntimeTest.tc.d.getVar('DEPLOY_DIR', True), "rpm", pkgarch) | ||
| 32 | # pick rpm-doc as a test file to get installed, because it's small and it will always be built for standard targets | ||
| 33 | for f in fnmatch.filter(os.listdir(rpmdir), "rpm-doc-*.%s.rpm" % pkgarch): | ||
| 34 | testrpmfile = f | ||
| 35 | oeRuntimeTest.tc.target.copy_to(os.path.join(rpmdir,testrpmfile), "/tmp/rpm-doc.rpm") | ||
| 36 | |||
| 37 | @skipUnlessPassed('test_rpm_help') | ||
| 38 | def test_rpm_install(self): | ||
| 39 | (status, output) = self.target.run('rpm -ivh /tmp/rpm-doc.rpm') | ||
| 40 | self.assertEqual(status, 0, msg="Failed to install rpm-doc package: %s" % output) | ||
| 41 | |||
| 42 | @skipUnlessPassed('test_rpm_install') | ||
| 43 | def test_rpm_remove(self): | ||
| 44 | (status,output) = self.target.run('rpm -e rpm-doc') | ||
| 45 | self.assertEqual(status, 0, msg="Failed to remove rpm-doc package: %s" % output) | ||
| 46 | |||
| 47 | @classmethod | ||
| 48 | def tearDownClass(self): | ||
| 49 | oeRuntimeTest.tc.target.run('rm -f /tmp/rpm-doc.rpm') | ||
| 50 | |||
diff --git a/meta/lib/oeqa/runtime/scanelf.py b/meta/lib/oeqa/runtime/scanelf.py new file mode 100644 index 0000000000..b9abf24640 --- /dev/null +++ b/meta/lib/oeqa/runtime/scanelf.py | |||
| @@ -0,0 +1,26 @@ | |||
| 1 | import unittest | ||
| 2 | from oeqa.oetest import oeRuntimeTest, skipModule | ||
| 3 | from oeqa.utils.decorators import * | ||
| 4 | |||
| 5 | def setUpModule(): | ||
| 6 | if not oeRuntimeTest.hasPackage("pax-utils"): | ||
| 7 | skipModule("pax-utils package not installed") | ||
| 8 | |||
| 9 | class ScanelfTest(oeRuntimeTest): | ||
| 10 | |||
| 11 | def setUp(self): | ||
| 12 | self.scancmd = 'scanelf --quiet --recursive --mount --ldpath --path' | ||
| 13 | |||
| 14 | @skipUnlessPassed('test_ssh') | ||
| 15 | def test_scanelf_textrel(self): | ||
| 16 | # print TEXTREL information | ||
| 17 | self.scancmd += " --textrel" | ||
| 18 | (status, output) = self.target.run(self.scancmd) | ||
| 19 | self.assertEqual(output.strip(), "", "\n".join([self.scancmd, output])) | ||
| 20 | |||
| 21 | @skipUnlessPassed('test_ssh') | ||
| 22 | def test_scanelf_rpath(self): | ||
| 23 | # print RPATH information | ||
| 24 | self.scancmd += " --rpath" | ||
| 25 | (status, output) = self.target.run(self.scancmd) | ||
| 26 | self.assertEqual(output.strip(), "", "\n".join([self.scancmd, output])) | ||
diff --git a/meta/lib/oeqa/runtime/scp.py b/meta/lib/oeqa/runtime/scp.py new file mode 100644 index 0000000000..03095bf966 --- /dev/null +++ b/meta/lib/oeqa/runtime/scp.py | |||
| @@ -0,0 +1,21 @@ | |||
| 1 | import os | ||
| 2 | from oeqa.oetest import oeRuntimeTest, skipModule | ||
| 3 | from oeqa.utils.decorators import skipUnlessPassed | ||
| 4 | |||
| 5 | def setUpModule(): | ||
| 6 | if not (oeRuntimeTest.hasPackage("dropbear") or oeRuntimeTest.hasPackage("openssh-sshd")): | ||
| 7 | skipModule("No ssh package in image") | ||
| 8 | |||
| 9 | class ScpTest(oeRuntimeTest): | ||
| 10 | |||
| 11 | @skipUnlessPassed('test_ssh') | ||
| 12 | def test_scp_file(self): | ||
| 13 | test_log_dir = oeRuntimeTest.tc.d.getVar("TEST_LOG_DIR", True) | ||
| 14 | test_file_path = os.path.join(test_log_dir, 'test_scp_file') | ||
| 15 | with open(test_file_path, 'w') as test_scp_file: | ||
| 16 | test_scp_file.seek(2 ** 22 - 1) | ||
| 17 | test_scp_file.write(os.linesep) | ||
| 18 | (status, output) = self.target.copy_to(test_file_path, '/tmp/test_scp_file') | ||
| 19 | self.assertEqual(status, 0, msg = "File could not be copied. Output: %s" % output) | ||
| 20 | (status, output) = self.target.run("ls -la /tmp/test_scp_file") | ||
| 21 | self.assertEqual(status, 0, msg = "SCP test failed") | ||
diff --git a/meta/lib/oeqa/runtime/skeletoninit.py b/meta/lib/oeqa/runtime/skeletoninit.py new file mode 100644 index 0000000000..557e715a3e --- /dev/null +++ b/meta/lib/oeqa/runtime/skeletoninit.py | |||
| @@ -0,0 +1,28 @@ | |||
| 1 | # This test should cover https://bugzilla.yoctoproject.org/tr_show_case.cgi?case_id=284 testcase | ||
| 2 | # Note that the image under test must have meta-skeleton layer in bblayers and IMAGE_INSTALL_append = " service" in local.conf | ||
| 3 | |||
| 4 | import unittest | ||
| 5 | from oeqa.oetest import oeRuntimeTest | ||
| 6 | from oeqa.utils.decorators import * | ||
| 7 | |||
| 8 | def setUpModule(): | ||
| 9 | if not oeRuntimeTest.hasPackage("service"): | ||
| 10 | skipModule("No service package in image") | ||
| 11 | |||
| 12 | |||
| 13 | class SkeletonBasicTest(oeRuntimeTest): | ||
| 14 | |||
| 15 | @skipUnlessPassed('test_ssh') | ||
| 16 | @unittest.skipIf("systemd" == oeRuntimeTest.tc.d.getVar("VIRTUAL-RUNTIME_init_manager"), "Not appropiate for systemd image") | ||
| 17 | def test_skeleton_availability(self): | ||
| 18 | (status, output) = self.target.run('ls /etc/init.d/skeleton') | ||
| 19 | self.assertEqual(status, 0, msg = "skeleton init script not found. Output:\n%s " % output) | ||
| 20 | (status, output) = self.target.run('ls /usr/sbin/skeleton-test') | ||
| 21 | self.assertEqual(status, 0, msg = "skeleton-test not found. Output:\n%s" % output) | ||
| 22 | |||
| 23 | @skipUnlessPassed('test_skeleton_availability') | ||
| 24 | @unittest.skipIf("systemd" == oeRuntimeTest.tc.d.getVar("VIRTUAL-RUNTIME_init_manager"), "Not appropiate for systemd image") | ||
| 25 | def test_skeleton_script(self): | ||
| 26 | output1 = self.target.run("/etc/init.d/skeleton start")[1] | ||
| 27 | (status, output2) = self.target.run(oeRuntimeTest.pscmd + ' | grep [s]keleton-test') | ||
| 28 | self.assertEqual(status, 0, msg = "Skeleton script could not be started:\n%s\n%s" % (output1, output2)) | ||
diff --git a/meta/lib/oeqa/runtime/smart.py b/meta/lib/oeqa/runtime/smart.py new file mode 100644 index 0000000000..195f1170c6 --- /dev/null +++ b/meta/lib/oeqa/runtime/smart.py | |||
| @@ -0,0 +1,110 @@ | |||
| 1 | import unittest | ||
| 2 | import re | ||
| 3 | from oeqa.oetest import oeRuntimeTest | ||
| 4 | from oeqa.utils.decorators import * | ||
| 5 | from oeqa.utils.httpserver import HTTPService | ||
| 6 | |||
| 7 | def setUpModule(): | ||
| 8 | if not oeRuntimeTest.hasFeature("package-management"): | ||
| 9 | skipModule("Image doesn't have package management feature") | ||
| 10 | if not oeRuntimeTest.hasPackage("smart"): | ||
| 11 | skipModule("Image doesn't have smart installed") | ||
| 12 | if "package_rpm" != oeRuntimeTest.tc.d.getVar("PACKAGE_CLASSES", True).split()[0]: | ||
| 13 | skipModule("Rpm is not the primary package manager") | ||
| 14 | |||
| 15 | class SmartTest(oeRuntimeTest): | ||
| 16 | |||
| 17 | @skipUnlessPassed('test_smart_help') | ||
| 18 | def smart(self, command, expected = 0): | ||
| 19 | command = 'smart %s' % command | ||
| 20 | status, output = self.target.run(command, 1500) | ||
| 21 | message = os.linesep.join([command, output]) | ||
| 22 | self.assertEqual(status, expected, message) | ||
| 23 | self.assertFalse("Cannot allocate memory" in output, message) | ||
| 24 | return output | ||
| 25 | |||
| 26 | class SmartBasicTest(SmartTest): | ||
| 27 | |||
| 28 | @skipUnlessPassed('test_ssh') | ||
| 29 | def test_smart_help(self): | ||
| 30 | self.smart('--help') | ||
| 31 | |||
| 32 | def test_smart_version(self): | ||
| 33 | self.smart('--version') | ||
| 34 | |||
| 35 | def test_smart_info(self): | ||
| 36 | self.smart('info python-smartpm') | ||
| 37 | |||
| 38 | def test_smart_query(self): | ||
| 39 | self.smart('query python-smartpm') | ||
| 40 | |||
| 41 | def test_smart_search(self): | ||
| 42 | self.smart('search python-smartpm') | ||
| 43 | |||
| 44 | def test_smart_stats(self): | ||
| 45 | self.smart('stats') | ||
| 46 | |||
| 47 | class SmartRepoTest(SmartTest): | ||
| 48 | |||
| 49 | @classmethod | ||
| 50 | def setUpClass(self): | ||
| 51 | self.repo_server = HTTPService(oeRuntimeTest.tc.d.getVar('DEPLOY_DIR', True), oeRuntimeTest.tc.target.server_ip) | ||
| 52 | self.repo_server.start() | ||
| 53 | |||
| 54 | @classmethod | ||
| 55 | def tearDownClass(self): | ||
| 56 | self.repo_server.stop() | ||
| 57 | |||
| 58 | def test_smart_channel(self): | ||
| 59 | self.smart('channel', 1) | ||
| 60 | |||
| 61 | def test_smart_channel_add(self): | ||
| 62 | image_pkgtype = self.tc.d.getVar('IMAGE_PKGTYPE', True) | ||
| 63 | deploy_url = 'http://%s:%s/%s' %(self.target.server_ip, self.repo_server.port, image_pkgtype) | ||
| 64 | pkgarchs = self.tc.d.getVar('PACKAGE_ARCHS', True).replace("-","_").split() | ||
| 65 | for arch in os.listdir('%s/%s' % (self.repo_server.root_dir, image_pkgtype)): | ||
| 66 | if arch in pkgarchs: | ||
| 67 | self.smart('channel -y --add {a} type=rpm-md baseurl={u}/{a}'.format(a=arch, u=deploy_url)) | ||
| 68 | self.smart('update') | ||
| 69 | |||
| 70 | def test_smart_channel_help(self): | ||
| 71 | self.smart('channel --help') | ||
| 72 | |||
| 73 | def test_smart_channel_list(self): | ||
| 74 | self.smart('channel --list') | ||
| 75 | |||
| 76 | def test_smart_channel_show(self): | ||
| 77 | self.smart('channel --show') | ||
| 78 | |||
| 79 | def test_smart_channel_rpmsys(self): | ||
| 80 | self.smart('channel --show rpmsys') | ||
| 81 | self.smart('channel --disable rpmsys') | ||
| 82 | self.smart('channel --enable rpmsys') | ||
| 83 | |||
| 84 | @skipUnlessPassed('test_smart_channel_add') | ||
| 85 | def test_smart_install(self): | ||
| 86 | self.smart('remove -y psplash-default') | ||
| 87 | self.smart('install -y psplash-default') | ||
| 88 | |||
| 89 | @skipUnlessPassed('test_smart_install') | ||
| 90 | def test_smart_install_dependency(self): | ||
| 91 | self.smart('remove -y psplash') | ||
| 92 | self.smart('install -y psplash-default') | ||
| 93 | |||
| 94 | @skipUnlessPassed('test_smart_channel_add') | ||
| 95 | def test_smart_install_from_disk(self): | ||
| 96 | self.smart('remove -y psplash-default') | ||
| 97 | self.smart('download psplash-default') | ||
| 98 | self.smart('install -y ./psplash-default*') | ||
| 99 | |||
| 100 | @skipUnlessPassed('test_smart_channel_add') | ||
| 101 | def test_smart_install_from_http(self): | ||
| 102 | output = self.smart('download --urls psplash-default') | ||
| 103 | url = re.search('(http://.*/psplash-default.*\.rpm)', output) | ||
| 104 | self.assertTrue(url, msg="Couln't find download url in %s" % output) | ||
| 105 | self.smart('remove -y psplash-default') | ||
| 106 | self.smart('install -y %s' % url.group(0)) | ||
| 107 | |||
| 108 | @skipUnlessPassed('test_smart_install') | ||
| 109 | def test_smart_reinstall(self): | ||
| 110 | self.smart('reinstall -y psplash-default') | ||
diff --git a/meta/lib/oeqa/runtime/ssh.py b/meta/lib/oeqa/runtime/ssh.py new file mode 100644 index 0000000000..e64866019f --- /dev/null +++ b/meta/lib/oeqa/runtime/ssh.py | |||
| @@ -0,0 +1,18 @@ | |||
| 1 | import subprocess | ||
| 2 | import unittest | ||
| 3 | import sys | ||
| 4 | from oeqa.oetest import oeRuntimeTest, skipModule | ||
| 5 | from oeqa.utils.decorators import * | ||
| 6 | |||
| 7 | def setUpModule(): | ||
| 8 | if not (oeRuntimeTest.hasPackage("dropbear") or oeRuntimeTest.hasPackage("openssh")): | ||
| 9 | skipModule("No ssh package in image") | ||
| 10 | |||
| 11 | class SshTest(oeRuntimeTest): | ||
| 12 | |||
| 13 | @skipUnlessPassed('test_ping') | ||
| 14 | def test_ssh(self): | ||
| 15 | (status, output) = self.target.run('uname -a') | ||
| 16 | self.assertEqual(status, 0, msg="SSH Test failed: %s" % output) | ||
| 17 | (status, output) = self.target.run('cat /etc/masterimage') | ||
| 18 | self.assertEqual(status, 1, msg="This isn't the right image - /etc/masterimage shouldn't be here %s" % output) | ||
diff --git a/meta/lib/oeqa/runtime/syslog.py b/meta/lib/oeqa/runtime/syslog.py new file mode 100644 index 0000000000..b95b36175a --- /dev/null +++ b/meta/lib/oeqa/runtime/syslog.py | |||
| @@ -0,0 +1,46 @@ | |||
| 1 | import unittest | ||
| 2 | from oeqa.oetest import oeRuntimeTest, skipModule | ||
| 3 | from oeqa.utils.decorators import * | ||
| 4 | |||
| 5 | def setUpModule(): | ||
| 6 | if not oeRuntimeTest.hasPackage("syslog"): | ||
| 7 | skipModule("No syslog package in image") | ||
| 8 | |||
| 9 | class SyslogTest(oeRuntimeTest): | ||
| 10 | |||
| 11 | @skipUnlessPassed("test_ssh") | ||
| 12 | def test_syslog_help(self): | ||
| 13 | (status,output) = self.target.run('/sbin/syslogd --help') | ||
| 14 | self.assertEqual(status, 0, msg="status and output: %s and %s" % (status,output)) | ||
| 15 | |||
| 16 | @skipUnlessPassed("test_syslog_help") | ||
| 17 | def test_syslog_running(self): | ||
| 18 | (status,output) = self.target.run(oeRuntimeTest.pscmd + ' | grep -i [s]yslogd') | ||
| 19 | self.assertEqual(status, 0, msg="no syslogd process, ps output: %s" % self.target.run(oeRuntimeTest.pscmd)[1]) | ||
| 20 | |||
| 21 | |||
| 22 | class SyslogTestConfig(oeRuntimeTest): | ||
| 23 | |||
| 24 | @skipUnlessPassed("test_syslog_running") | ||
| 25 | def test_syslog_logger(self): | ||
| 26 | (status,output) = self.target.run('logger foobar && test -e /var/log/messages && grep foobar /var/log/messages || logread | grep foobar') | ||
| 27 | self.assertEqual(status, 0, msg="Test log string not found in /var/log/messages. Output: %s " % output) | ||
| 28 | |||
| 29 | @skipUnlessPassed("test_syslog_running") | ||
| 30 | def test_syslog_restart(self): | ||
| 31 | if "systemd" != oeRuntimeTest.tc.d.getVar("VIRTUAL-RUNTIME_init_manager"): | ||
| 32 | (status,output) = self.target.run('/etc/init.d/syslog restart') | ||
| 33 | else: | ||
| 34 | (status,output) = self.target.run('systemctl restart syslog.service') | ||
| 35 | |||
| 36 | @skipUnlessPassed("test_syslog_restart") | ||
| 37 | @skipUnlessPassed("test_syslog_logger") | ||
| 38 | @unittest.skipIf("systemd" == oeRuntimeTest.tc.d.getVar("VIRTUAL-RUNTIME_init_manager"), "Not appropiate for systemd image") | ||
| 39 | def test_syslog_startup_config(self): | ||
| 40 | self.target.run('echo "LOGFILE=/var/log/test" >> /etc/syslog-startup.conf') | ||
| 41 | (status,output) = self.target.run('/etc/init.d/syslog restart') | ||
| 42 | self.assertEqual(status, 0, msg="Could not restart syslog service. Status and output: %s and %s" % (status,output)) | ||
| 43 | (status,output) = self.target.run('logger foobar && grep foobar /var/log/test') | ||
| 44 | self.assertEqual(status, 0, msg="Test log string not found. Output: %s " % output) | ||
| 45 | self.target.run("sed -i 's#LOGFILE=/var/log/test##' /etc/syslog-startup.conf") | ||
| 46 | self.target.run('/etc/init.d/syslog restart') | ||
diff --git a/meta/lib/oeqa/runtime/systemd.py b/meta/lib/oeqa/runtime/systemd.py new file mode 100644 index 0000000000..6de84f891b --- /dev/null +++ b/meta/lib/oeqa/runtime/systemd.py | |||
| @@ -0,0 +1,84 @@ | |||
| 1 | import unittest | ||
| 2 | import re | ||
| 3 | from oeqa.oetest import oeRuntimeTest, skipModule | ||
| 4 | from oeqa.utils.decorators import * | ||
| 5 | |||
| 6 | def setUpModule(): | ||
| 7 | if not oeRuntimeTest.hasFeature("systemd"): | ||
| 8 | skipModule("target doesn't have systemd in DISTRO_FEATURES") | ||
| 9 | if "systemd" != oeRuntimeTest.tc.d.getVar("VIRTUAL-RUNTIME_init_manager", True): | ||
| 10 | skipModule("systemd is not the init manager for this image") | ||
| 11 | |||
| 12 | |||
| 13 | class SystemdTest(oeRuntimeTest): | ||
| 14 | |||
| 15 | def systemctl(self, action = '', target = '', expected = 0, verbose = False): | ||
| 16 | command = 'systemctl %s %s' % (action, target) | ||
| 17 | status, output = self.target.run(command) | ||
| 18 | message = '\n'.join([command, output]) | ||
| 19 | if status != expected and verbose: | ||
| 20 | message += self.target.run('systemctl status --full %s' % target)[1] | ||
| 21 | self.assertEqual(status, expected, message) | ||
| 22 | return output | ||
| 23 | |||
| 24 | |||
| 25 | class SystemdBasicTests(SystemdTest): | ||
| 26 | |||
| 27 | @skipUnlessPassed('test_ssh') | ||
| 28 | def test_systemd_basic(self): | ||
| 29 | self.systemctl('--version') | ||
| 30 | |||
| 31 | @skipUnlessPassed('test_system_basic') | ||
| 32 | def test_systemd_list(self): | ||
| 33 | self.systemctl('list-unit-files') | ||
| 34 | |||
| 35 | def settle(self): | ||
| 36 | """ | ||
| 37 | Block until systemd has finished activating any units being activated, | ||
| 38 | or until two minutes has elapsed. | ||
| 39 | |||
| 40 | Returns a tuple, either (True, '') if all units have finished | ||
| 41 | activating, or (False, message string) if there are still units | ||
| 42 | activating (generally, failing units that restart). | ||
| 43 | """ | ||
| 44 | import time | ||
| 45 | endtime = time.time() + (60 * 2) | ||
| 46 | while True: | ||
| 47 | status, output = self.target.run('systemctl --state=activating') | ||
| 48 | if "0 loaded units listed" in output: | ||
| 49 | return (True, '') | ||
| 50 | if time.time() >= endtime: | ||
| 51 | return (False, output) | ||
| 52 | time.sleep(10) | ||
| 53 | |||
| 54 | @skipUnlessPassed('test_systemd_basic') | ||
| 55 | def test_systemd_failed(self): | ||
| 56 | settled, output = self.settle() | ||
| 57 | self.assertTrue(settled, msg="Timed out waiting for systemd to settle:\n" + output) | ||
| 58 | |||
| 59 | output = self.systemctl('list-units', '--failed') | ||
| 60 | match = re.search("0 loaded units listed", output) | ||
| 61 | if not match: | ||
| 62 | output += self.systemctl('status --full --failed') | ||
| 63 | self.assertTrue(match, msg="Some systemd units failed:\n%s" % output) | ||
| 64 | |||
| 65 | |||
| 66 | class SystemdServiceTests(SystemdTest): | ||
| 67 | |||
| 68 | @skipUnlessPassed('test_systemd_basic') | ||
| 69 | def test_systemd_status(self): | ||
| 70 | self.systemctl('status --full', 'avahi-daemon.service') | ||
| 71 | |||
| 72 | @skipUnlessPassed('test_systemd_status') | ||
| 73 | def test_systemd_stop_start(self): | ||
| 74 | self.systemctl('stop', 'avahi-daemon.service') | ||
| 75 | self.systemctl('is-active', 'avahi-daemon.service', expected=3, verbose=True) | ||
| 76 | self.systemctl('start','avahi-daemon.service') | ||
| 77 | self.systemctl('is-active', 'avahi-daemon.service', verbose=True) | ||
| 78 | |||
| 79 | @skipUnlessPassed('test_systemd_basic') | ||
| 80 | def test_systemd_disable_enable(self): | ||
| 81 | self.systemctl('disable', 'avahi-daemon.service') | ||
| 82 | self.systemctl('is-enabled', 'avahi-daemon.service', expected=1) | ||
| 83 | self.systemctl('enable', 'avahi-daemon.service') | ||
| 84 | self.systemctl('is-enabled', 'avahi-daemon.service') | ||
diff --git a/meta/lib/oeqa/runtime/vnc.py b/meta/lib/oeqa/runtime/vnc.py new file mode 100644 index 0000000000..5ed10727bc --- /dev/null +++ b/meta/lib/oeqa/runtime/vnc.py | |||
| @@ -0,0 +1,19 @@ | |||
| 1 | from oeqa.oetest import oeRuntimeTest | ||
| 2 | from oeqa.utils.decorators import * | ||
| 3 | import re | ||
| 4 | |||
| 5 | def setUpModule(): | ||
| 6 | skipModuleUnless(oeRuntimeTest.hasPackage('x11vnc'), "No x11vnc package in image") | ||
| 7 | |||
| 8 | class VNCTest(oeRuntimeTest): | ||
| 9 | |||
| 10 | @skipUnlessPassed('test_ssh') | ||
| 11 | def test_vnc(self): | ||
| 12 | (status, output) = self.target.run('x11vnc -display :0 -bg -o x11vnc.log') | ||
| 13 | self.assertEqual(status, 0, msg="x11vnc server failed to start: %s" % output) | ||
| 14 | port = re.search('PORT=[0-9]*', output) | ||
| 15 | self.assertTrue(port, msg="Listening port not specified in command output: %s" %output) | ||
| 16 | |||
| 17 | vncport = port.group(0).split('=')[1] | ||
| 18 | (status, output) = self.target.run('netstat -ntl | grep ":%s"' % vncport) | ||
| 19 | self.assertEqual(status, 0, msg="x11vnc server not running on port %s\n\n%s" % (vncport, self.target.run('netstat -ntl; cat x11vnc.log')[1])) | ||
diff --git a/meta/lib/oeqa/runtime/x32lib.py b/meta/lib/oeqa/runtime/x32lib.py new file mode 100644 index 0000000000..6bad201b12 --- /dev/null +++ b/meta/lib/oeqa/runtime/x32lib.py | |||
| @@ -0,0 +1,17 @@ | |||
| 1 | import unittest | ||
| 2 | from oeqa.oetest import oeRuntimeTest, skipModule | ||
| 3 | from oeqa.utils.decorators import * | ||
| 4 | |||
| 5 | def setUpModule(): | ||
| 6 | #check if DEFAULTTUNE is set and it's value is: x86-64-x32 | ||
| 7 | defaulttune = oeRuntimeTest.tc.d.getVar("DEFAULTTUNE", True) | ||
| 8 | if "x86-64-x32" not in defaulttune: | ||
| 9 | skipModule("DEFAULTTUNE is not set to x86-64-x32") | ||
| 10 | |||
| 11 | class X32libTest(oeRuntimeTest): | ||
| 12 | |||
| 13 | @skipUnlessPassed("test_ssh") | ||
| 14 | def test_x32_file(self): | ||
| 15 | status1 = self.target.run("readelf -h /bin/ls | grep Class | grep ELF32")[0] | ||
| 16 | status2 = self.target.run("readelf -h /bin/ls | grep Machine | grep X86-64")[0] | ||
| 17 | self.assertTrue(status1 == 0 and status2 == 0, msg="/bin/ls isn't an X86-64 ELF32 binary. readelf says: %s" % self.target.run("readelf -h /bin/ls")[1]) | ||
diff --git a/meta/lib/oeqa/runtime/xorg.py b/meta/lib/oeqa/runtime/xorg.py new file mode 100644 index 0000000000..12dccd8198 --- /dev/null +++ b/meta/lib/oeqa/runtime/xorg.py | |||
| @@ -0,0 +1,21 @@ | |||
| 1 | import unittest | ||
| 2 | from oeqa.oetest import oeRuntimeTest, skipModule | ||
| 3 | from oeqa.utils.decorators import * | ||
| 4 | |||
| 5 | def setUpModule(): | ||
| 6 | if not oeRuntimeTest.hasFeature("x11-base"): | ||
| 7 | skipModule("target doesn't have x11 in IMAGE_FEATURES") | ||
| 8 | |||
| 9 | |||
| 10 | class XorgTest(oeRuntimeTest): | ||
| 11 | |||
| 12 | @skipUnlessPassed('test_ssh') | ||
| 13 | def test_xorg_running(self): | ||
| 14 | (status, output) = self.target.run(oeRuntimeTest.pscmd + ' | grep -v xinit | grep [X]org') | ||
| 15 | self.assertEqual(status, 0, msg="Xorg does not appear to be running %s" % self.target.run(oeRuntimeTest.pscmd)[1]) | ||
| 16 | |||
| 17 | @skipUnlessPassed('test_ssh') | ||
| 18 | def test_xorg_error(self): | ||
| 19 | (status, output) = self.target.run('cat /var/log/Xorg.0.log | grep -v "(EE) error," | grep -v "PreInit" | grep -v "evdev:" | grep -v "glx" | grep "(EE)"') | ||
| 20 | self.assertEqual(status, 1, msg="Errors in Xorg log: %s" % output) | ||
| 21 | |||
diff --git a/meta/lib/oeqa/selftest/__init__.py b/meta/lib/oeqa/selftest/__init__.py new file mode 100644 index 0000000000..3ad9513f40 --- /dev/null +++ b/meta/lib/oeqa/selftest/__init__.py | |||
| @@ -0,0 +1,2 @@ | |||
| 1 | from pkgutil import extend_path | ||
| 2 | __path__ = extend_path(__path__, __name__) | ||
diff --git a/meta/lib/oeqa/selftest/_sstatetests_noauto.py b/meta/lib/oeqa/selftest/_sstatetests_noauto.py new file mode 100644 index 0000000000..fc9ae7efb9 --- /dev/null +++ b/meta/lib/oeqa/selftest/_sstatetests_noauto.py | |||
| @@ -0,0 +1,95 @@ | |||
| 1 | import datetime | ||
| 2 | import unittest | ||
| 3 | import os | ||
| 4 | import re | ||
| 5 | import shutil | ||
| 6 | |||
| 7 | import oeqa.utils.ftools as ftools | ||
| 8 | from oeqa.selftest.base import oeSelfTest | ||
| 9 | from oeqa.utils.commands import runCmd, bitbake, get_bb_var, get_test_layer | ||
| 10 | from oeqa.selftest.sstate import SStateBase | ||
| 11 | |||
| 12 | |||
| 13 | class RebuildFromSState(SStateBase): | ||
| 14 | |||
| 15 | @classmethod | ||
| 16 | def setUpClass(self): | ||
| 17 | self.builddir = os.path.join(os.environ.get('BUILDDIR')) | ||
| 18 | |||
| 19 | def get_dep_targets(self, primary_targets): | ||
| 20 | found_targets = [] | ||
| 21 | bitbake("-g " + ' '.join(map(str, primary_targets))) | ||
| 22 | with open(os.path.join(self.builddir, 'pn-buildlist'), 'r') as pnfile: | ||
| 23 | found_targets = pnfile.read().splitlines() | ||
| 24 | return found_targets | ||
| 25 | |||
| 26 | def configure_builddir(self, builddir): | ||
| 27 | os.mkdir(builddir) | ||
| 28 | self.track_for_cleanup(builddir) | ||
| 29 | os.mkdir(os.path.join(builddir, 'conf')) | ||
| 30 | shutil.copyfile(os.path.join(os.environ.get('BUILDDIR'), 'conf/local.conf'), os.path.join(builddir, 'conf/local.conf')) | ||
| 31 | config = {} | ||
| 32 | config['default_sstate_dir'] = "SSTATE_DIR ?= \"${TOPDIR}/sstate-cache\"" | ||
| 33 | config['null_sstate_mirrors'] = "SSTATE_MIRRORS = \"\"" | ||
| 34 | config['default_tmp_dir'] = "TMPDIR = \"${TOPDIR}/tmp\"" | ||
| 35 | for key in config: | ||
| 36 | ftools.append_file(os.path.join(builddir, 'conf/selftest.inc'), config[key]) | ||
| 37 | shutil.copyfile(os.path.join(os.environ.get('BUILDDIR'), 'conf/bblayers.conf'), os.path.join(builddir, 'conf/bblayers.conf')) | ||
| 38 | try: | ||
| 39 | shutil.copyfile(os.path.join(os.environ.get('BUILDDIR'), 'conf/auto.conf'), os.path.join(builddir, 'conf/auto.conf')) | ||
| 40 | except: | ||
| 41 | pass | ||
| 42 | |||
| 43 | def hardlink_tree(self, src, dst): | ||
| 44 | os.mkdir(dst) | ||
| 45 | self.track_for_cleanup(dst) | ||
| 46 | for root, dirs, files in os.walk(src): | ||
| 47 | if root == src: | ||
| 48 | continue | ||
| 49 | os.mkdir(os.path.join(dst, root.split(src)[1][1:])) | ||
| 50 | for sstate_file in files: | ||
| 51 | os.link(os.path.join(root, sstate_file), os.path.join(dst, root.split(src)[1][1:], sstate_file)) | ||
| 52 | |||
| 53 | def run_test_sstate_rebuild(self, primary_targets, relocate=False, rebuild_dependencies=False): | ||
| 54 | buildA = os.path.join(self.builddir, 'buildA') | ||
| 55 | if relocate: | ||
| 56 | buildB = os.path.join(self.builddir, 'buildB') | ||
| 57 | else: | ||
| 58 | buildB = buildA | ||
| 59 | |||
| 60 | if rebuild_dependencies: | ||
| 61 | rebuild_targets = self.get_dep_targets(primary_targets) | ||
| 62 | else: | ||
| 63 | rebuild_targets = primary_targets | ||
| 64 | |||
| 65 | self.configure_builddir(buildA) | ||
| 66 | runCmd((". %s/oe-init-build-env %s && " % (get_bb_var('COREBASE'), buildA)) + 'bitbake ' + ' '.join(map(str, primary_targets)), shell=True, executable='/bin/bash') | ||
| 67 | self.hardlink_tree(os.path.join(buildA, 'sstate-cache'), os.path.join(self.builddir, 'sstate-cache-buildA')) | ||
| 68 | shutil.rmtree(buildA) | ||
| 69 | |||
| 70 | failed_rebuild = [] | ||
| 71 | failed_cleansstate = [] | ||
| 72 | for target in rebuild_targets: | ||
| 73 | self.configure_builddir(buildB) | ||
| 74 | self.hardlink_tree(os.path.join(self.builddir, 'sstate-cache-buildA'), os.path.join(buildB, 'sstate-cache')) | ||
| 75 | |||
| 76 | result_cleansstate = runCmd((". %s/oe-init-build-env %s && " % (get_bb_var('COREBASE'), buildB)) + 'bitbake -ccleansstate ' + target, ignore_status=True, shell=True, executable='/bin/bash') | ||
| 77 | if not result_cleansstate.status == 0: | ||
| 78 | failed_cleansstate.append(target) | ||
| 79 | shutil.rmtree(buildB) | ||
| 80 | continue | ||
| 81 | |||
| 82 | result_build = runCmd((". %s/oe-init-build-env %s && " % (get_bb_var('COREBASE'), buildB)) + 'bitbake ' + target, ignore_status=True, shell=True, executable='/bin/bash') | ||
| 83 | if not result_build.status == 0: | ||
| 84 | failed_rebuild.append(target) | ||
| 85 | |||
| 86 | shutil.rmtree(buildB) | ||
| 87 | |||
| 88 | self.assertFalse(failed_rebuild, msg="The following recipes have failed to rebuild: %s" % ' '.join(map(str, failed_rebuild))) | ||
| 89 | self.assertFalse(failed_cleansstate, msg="The following recipes have failed cleansstate(all others have passed both cleansstate and rebuild from sstate tests): %s" % ' '.join(map(str, failed_cleansstate))) | ||
| 90 | |||
| 91 | def test_sstate_relocation(self): | ||
| 92 | self.run_test_sstate_rebuild(['core-image-sato-sdk'], relocate=True, rebuild_dependencies=True) | ||
| 93 | |||
| 94 | def test_sstate_rebuild(self): | ||
| 95 | self.run_test_sstate_rebuild(['core-image-sato-sdk'], relocate=False, rebuild_dependencies=True) | ||
diff --git a/meta/lib/oeqa/selftest/base.py b/meta/lib/oeqa/selftest/base.py new file mode 100644 index 0000000000..fc880e9d26 --- /dev/null +++ b/meta/lib/oeqa/selftest/base.py | |||
| @@ -0,0 +1,129 @@ | |||
| 1 | # Copyright (c) 2013 Intel Corporation | ||
| 2 | # | ||
| 3 | # Released under the MIT license (see COPYING.MIT) | ||
| 4 | |||
| 5 | |||
| 6 | # DESCRIPTION | ||
| 7 | # Base class inherited by test classes in meta/lib/selftest | ||
| 8 | |||
| 9 | import unittest | ||
| 10 | import os | ||
| 11 | import sys | ||
| 12 | import shutil | ||
| 13 | import logging | ||
| 14 | import errno | ||
| 15 | |||
| 16 | import oeqa.utils.ftools as ftools | ||
| 17 | from oeqa.utils.commands import runCmd, bitbake, get_bb_var, get_test_layer | ||
| 18 | |||
| 19 | class oeSelfTest(unittest.TestCase): | ||
| 20 | |||
| 21 | log = logging.getLogger("selftest.base") | ||
| 22 | longMessage = True | ||
| 23 | |||
| 24 | def __init__(self, methodName="runTest"): | ||
| 25 | self.builddir = os.environ.get("BUILDDIR") | ||
| 26 | self.localconf_path = os.path.join(self.builddir, "conf/local.conf") | ||
| 27 | self.testinc_path = os.path.join(self.builddir, "conf/selftest.inc") | ||
| 28 | self.testlayer_path = oeSelfTest.testlayer_path | ||
| 29 | self._extra_tear_down_commands = [] | ||
| 30 | self._track_for_cleanup = [] | ||
| 31 | super(oeSelfTest, self).__init__(methodName) | ||
| 32 | |||
| 33 | def setUp(self): | ||
| 34 | os.chdir(self.builddir) | ||
| 35 | # we don't know what the previous test left around in config or inc files | ||
| 36 | # if it failed so we need a fresh start | ||
| 37 | try: | ||
| 38 | os.remove(self.testinc_path) | ||
| 39 | except OSError as e: | ||
| 40 | if e.errno != errno.ENOENT: | ||
| 41 | raise | ||
| 42 | for root, _, files in os.walk(self.testlayer_path): | ||
| 43 | for f in files: | ||
| 44 | if f == 'test_recipe.inc': | ||
| 45 | os.remove(os.path.join(root, f)) | ||
| 46 | # tests might need their own setup | ||
| 47 | # but if they overwrite this one they have to call | ||
| 48 | # super each time, so let's give them an alternative | ||
| 49 | self.setUpLocal() | ||
| 50 | |||
| 51 | def setUpLocal(self): | ||
| 52 | pass | ||
| 53 | |||
| 54 | def tearDown(self): | ||
| 55 | if self._extra_tear_down_commands: | ||
| 56 | failed_extra_commands = [] | ||
| 57 | for command in self._extra_tear_down_commands: | ||
| 58 | result = runCmd(command, ignore_status=True) | ||
| 59 | if not result.status == 0: | ||
| 60 | failed_extra_commands.append(command) | ||
| 61 | if failed_extra_commands: | ||
| 62 | self.log.warning("tearDown commands have failed: %s" % ', '.join(map(str, failed_extra_commands))) | ||
| 63 | self.log.debug("Trying to move on.") | ||
| 64 | self._extra_tear_down_commands = [] | ||
| 65 | |||
| 66 | if self._track_for_cleanup: | ||
| 67 | for path in self._track_for_cleanup: | ||
| 68 | if os.path.isdir(path): | ||
| 69 | shutil.rmtree(path) | ||
| 70 | if os.path.isfile(path): | ||
| 71 | os.remove(path) | ||
| 72 | self._track_for_cleanup = [] | ||
| 73 | |||
| 74 | self.tearDownLocal() | ||
| 75 | |||
| 76 | def tearDownLocal(self): | ||
| 77 | pass | ||
| 78 | |||
| 79 | # add test specific commands to the tearDown method. | ||
| 80 | def add_command_to_tearDown(self, command): | ||
| 81 | self.log.debug("Adding command '%s' to tearDown for this test." % command) | ||
| 82 | self._extra_tear_down_commands.append(command) | ||
| 83 | # add test specific files or directories to be removed in the tearDown method | ||
| 84 | def track_for_cleanup(self, path): | ||
| 85 | self.log.debug("Adding path '%s' to be cleaned up when test is over" % path) | ||
| 86 | self._track_for_cleanup.append(path) | ||
| 87 | |||
| 88 | # write to <builddir>/conf/selftest.inc | ||
| 89 | def write_config(self, data): | ||
| 90 | self.log.debug("Writing to: %s\n%s\n" % (self.testinc_path, data)) | ||
| 91 | ftools.write_file(self.testinc_path, data) | ||
| 92 | |||
| 93 | # append to <builddir>/conf/selftest.inc | ||
| 94 | def append_config(self, data): | ||
| 95 | self.log.debug("Appending to: %s\n%s\n" % (self.testinc_path, data)) | ||
| 96 | ftools.append_file(self.testinc_path, data) | ||
| 97 | |||
| 98 | # remove data from <builddir>/conf/selftest.inc | ||
| 99 | def remove_config(self, data): | ||
| 100 | self.log.debug("Removing from: %s\n\%s\n" % (self.testinc_path, data)) | ||
| 101 | ftools.remove_from_file(self.testinc_path, data) | ||
| 102 | |||
| 103 | # write to meta-sefltest/recipes-test/<recipe>/test_recipe.inc | ||
| 104 | def write_recipeinc(self, recipe, data): | ||
| 105 | inc_file = os.path.join(self.testlayer_path, 'recipes-test', recipe, 'test_recipe.inc') | ||
| 106 | self.log.debug("Writing to: %s\n%s\n" % (inc_file, data)) | ||
| 107 | ftools.write_file(inc_file, data) | ||
| 108 | |||
| 109 | # append data to meta-sefltest/recipes-test/<recipe>/test_recipe.inc | ||
| 110 | def append_recipeinc(self, recipe, data): | ||
| 111 | inc_file = os.path.join(self.testlayer_path, 'recipes-test', recipe, 'test_recipe.inc') | ||
| 112 | self.log.debug("Appending to: %s\n%s\n" % (inc_file, data)) | ||
| 113 | ftools.append_file(inc_file, data) | ||
| 114 | |||
| 115 | # remove data from meta-sefltest/recipes-test/<recipe>/test_recipe.inc | ||
| 116 | def remove_recipeinc(self, recipe, data): | ||
| 117 | inc_file = os.path.join(self.testlayer_path, 'recipes-test', recipe, 'test_recipe.inc') | ||
| 118 | self.log.debug("Removing from: %s\n%s\n" % (inc_file, data)) | ||
| 119 | ftools.remove_from_file(inc_file, data) | ||
| 120 | |||
| 121 | # delete meta-sefltest/recipes-test/<recipe>/test_recipe.inc file | ||
| 122 | def delete_recipeinc(self, recipe): | ||
| 123 | inc_file = os.path.join(self.testlayer_path, 'recipes-test', recipe, 'test_recipe.inc') | ||
| 124 | self.log.debug("Deleting file: %s" % inc_file) | ||
| 125 | try: | ||
| 126 | os.remove(inc_file) | ||
| 127 | except OSError as e: | ||
| 128 | if e.errno != errno.ENOENT: | ||
| 129 | raise | ||
diff --git a/meta/lib/oeqa/selftest/bblayers.py b/meta/lib/oeqa/selftest/bblayers.py new file mode 100644 index 0000000000..52aa4f8112 --- /dev/null +++ b/meta/lib/oeqa/selftest/bblayers.py | |||
| @@ -0,0 +1,37 @@ | |||
| 1 | import unittest | ||
| 2 | import os | ||
| 3 | import logging | ||
| 4 | import re | ||
| 5 | import shutil | ||
| 6 | |||
| 7 | import oeqa.utils.ftools as ftools | ||
| 8 | from oeqa.selftest.base import oeSelfTest | ||
| 9 | from oeqa.utils.commands import runCmd | ||
| 10 | |||
| 11 | class BitbakeLayers(oeSelfTest): | ||
| 12 | |||
| 13 | def test_bitbakelayers_showcrossdepends(self): | ||
| 14 | result = runCmd('bitbake-layers show-cross-depends') | ||
| 15 | self.assertTrue('aspell' in result.output) | ||
| 16 | |||
| 17 | def test_bitbakelayers_showlayers(self): | ||
| 18 | result = runCmd('bitbake-layers show_layers') | ||
| 19 | self.assertTrue('meta-selftest' in result.output) | ||
| 20 | |||
| 21 | def test_bitbakelayers_showappends(self): | ||
| 22 | result = runCmd('bitbake-layers show_appends') | ||
| 23 | self.assertTrue('xcursor-transparent-theme_0.1.1.bbappend' in result.output, msg='xcursor-transparent-theme_0.1.1.bbappend file was not recognised') | ||
| 24 | |||
| 25 | def test_bitbakelayers_showoverlayed(self): | ||
| 26 | result = runCmd('bitbake-layers show_overlayed') | ||
| 27 | self.assertTrue('aspell' in result.output, msg='xcursor-transparent-theme_0.1.1.bbappend file was not recognised') | ||
| 28 | |||
| 29 | def test_bitbakelayers_flatten(self): | ||
| 30 | self.assertFalse(os.path.isdir(os.path.join(self.builddir, 'test'))) | ||
| 31 | result = runCmd('bitbake-layers flatten test') | ||
| 32 | bb_file = os.path.join(self.builddir, 'test/recipes-graphics/xcursor-transparent-theme/xcursor-transparent-theme_0.1.1.bb') | ||
| 33 | self.assertTrue(os.path.isfile(bb_file)) | ||
| 34 | contents = ftools.read_file(bb_file) | ||
| 35 | find_in_contents = re.search("##### bbappended from meta-selftest #####\n(.*\n)*include test_recipe.inc", contents) | ||
| 36 | shutil.rmtree(os.path.join(self.builddir, 'test')) | ||
| 37 | self.assertTrue(find_in_contents) | ||
diff --git a/meta/lib/oeqa/selftest/bbtests.py b/meta/lib/oeqa/selftest/bbtests.py new file mode 100644 index 0000000000..6815ecfe0b --- /dev/null +++ b/meta/lib/oeqa/selftest/bbtests.py | |||
| @@ -0,0 +1,104 @@ | |||
| 1 | import unittest | ||
| 2 | import os | ||
| 3 | import logging | ||
| 4 | import re | ||
| 5 | import shutil | ||
| 6 | |||
| 7 | import oeqa.utils.ftools as ftools | ||
| 8 | from oeqa.selftest.base import oeSelfTest | ||
| 9 | from oeqa.utils.commands import runCmd, bitbake, get_bb_var | ||
| 10 | |||
| 11 | class BitbakeTests(oeSelfTest): | ||
| 12 | |||
| 13 | def test_run_bitbake_from_dir_1(self): | ||
| 14 | os.chdir(os.path.join(self.builddir, 'conf')) | ||
| 15 | bitbake('-e') | ||
| 16 | |||
| 17 | def test_run_bitbake_from_dir_2(self): | ||
| 18 | my_env = os.environ.copy() | ||
| 19 | my_env['BBPATH'] = my_env['BUILDDIR'] | ||
| 20 | os.chdir(os.path.dirname(os.environ['BUILDDIR'])) | ||
| 21 | bitbake('-e', env=my_env) | ||
| 22 | |||
| 23 | def test_event_handler(self): | ||
| 24 | self.write_config("INHERIT += \"test_events\"") | ||
| 25 | result = bitbake('m4-native') | ||
| 26 | find_build_started = re.search("NOTE: Test for bb\.event\.BuildStarted(\n.*)*NOTE: Preparing runqueue", result.output) | ||
| 27 | find_build_completed = re.search("Tasks Summary:.*(\n.*)*NOTE: Test for bb\.event\.BuildCompleted", result.output) | ||
| 28 | self.assertTrue(find_build_started, msg = "Match failed in:\n%s" % result.output) | ||
| 29 | self.assertTrue(find_build_completed, msg = "Match failed in:\n%s" % result.output) | ||
| 30 | self.assertFalse('Test for bb.event.InvalidEvent' in result.output) | ||
| 31 | |||
| 32 | def test_local_sstate(self): | ||
| 33 | bitbake('m4-native -ccleansstate') | ||
| 34 | bitbake('m4-native') | ||
| 35 | bitbake('m4-native -cclean') | ||
| 36 | result = bitbake('m4-native') | ||
| 37 | find_setscene = re.search("m4-native.*do_.*_setscene", result.output) | ||
| 38 | self.assertTrue(find_setscene) | ||
| 39 | |||
| 40 | def test_bitbake_invalid_recipe(self): | ||
| 41 | result = bitbake('-b asdf', ignore_status=True) | ||
| 42 | self.assertTrue("ERROR: Unable to find any recipe file matching 'asdf'" in result.output) | ||
| 43 | |||
| 44 | def test_bitbake_invalid_target(self): | ||
| 45 | result = bitbake('asdf', ignore_status=True) | ||
| 46 | self.assertTrue("ERROR: Nothing PROVIDES 'asdf'" in result.output) | ||
| 47 | |||
| 48 | def test_warnings_errors(self): | ||
| 49 | result = bitbake('-b asdf', ignore_status=True) | ||
| 50 | find_warnings = re.search("Summary: There w.{2,3}? [1-9][0-9]* WARNING messages* shown", result.output) | ||
| 51 | find_errors = re.search("Summary: There w.{2,3}? [1-9][0-9]* ERROR messages* shown", result.output) | ||
| 52 | self.assertTrue(find_warnings, msg="Did not find the mumber of warnings at the end of the build:\n" + result.output) | ||
| 53 | self.assertTrue(find_errors, msg="Did not find the mumber of errors at the end of the build:\n" + result.output) | ||
| 54 | |||
| 55 | def test_invalid_patch(self): | ||
| 56 | self.write_recipeinc('man', 'SRC_URI += "file://man-1.5h1-make.patch"') | ||
| 57 | result = bitbake('man -c patch', ignore_status=True) | ||
| 58 | self.delete_recipeinc('man') | ||
| 59 | bitbake('-cclean man') | ||
| 60 | self.assertTrue("ERROR: Function failed: patch_do_patch" in result.output) | ||
| 61 | |||
| 62 | def test_force_task(self): | ||
| 63 | bitbake('m4-native') | ||
| 64 | result = bitbake('-C compile m4-native') | ||
| 65 | look_for_tasks = ['do_compile', 'do_install', 'do_populate_sysroot'] | ||
| 66 | for task in look_for_tasks: | ||
| 67 | find_task = re.search("m4-native.*%s" % task, result.output) | ||
| 68 | self.assertTrue(find_task) | ||
| 69 | |||
| 70 | def test_bitbake_g(self): | ||
| 71 | result = bitbake('-g core-image-full-cmdline') | ||
| 72 | self.assertTrue('NOTE: PN build list saved to \'pn-buildlist\'' in result.output) | ||
| 73 | self.assertTrue('openssh' in ftools.read_file(os.path.join(self.builddir, 'pn-buildlist'))) | ||
| 74 | for f in ['pn-buildlist', 'pn-depends.dot', 'package-depends.dot', 'task-depends.dot']: | ||
| 75 | os.remove(f) | ||
| 76 | |||
| 77 | def test_image_manifest(self): | ||
| 78 | bitbake('core-image-minimal') | ||
| 79 | deploydir = get_bb_var("DEPLOY_DIR_IMAGE", target="core-image-minimal") | ||
| 80 | imagename = get_bb_var("IMAGE_LINK_NAME", target="core-image-minimal") | ||
| 81 | manifest = os.path.join(deploydir, imagename + ".manifest") | ||
| 82 | self.assertTrue(os.path.islink(manifest), msg="No manifest file created for image") | ||
| 83 | |||
| 84 | def test_invalid_recipe_src_uri(self): | ||
| 85 | data = 'SRC_URI = "file://invalid"' | ||
| 86 | self.write_recipeinc('man', data) | ||
| 87 | bitbake('-ccleanall man') | ||
| 88 | result = bitbake('-c fetch man', ignore_status=True) | ||
| 89 | bitbake('-ccleanall man') | ||
| 90 | self.delete_recipeinc('man') | ||
| 91 | self.assertEqual(result.status, 1, msg='Command succeded when it should have failed') | ||
| 92 | self.assertTrue('ERROR: Fetcher failure: Unable to find file file://invalid anywhere. The paths that were searched were:' in result.output) | ||
| 93 | self.assertTrue('ERROR: Function failed: Fetcher failure for URL: \'file://invalid\'. Unable to fetch URL from any source.' in result.output) | ||
| 94 | |||
| 95 | def test_rename_downloaded_file(self): | ||
| 96 | data = 'SRC_URI_append = ";downloadfilename=test-aspell.tar.gz"' | ||
| 97 | self.write_recipeinc('aspell', data) | ||
| 98 | bitbake('-ccleanall aspell') | ||
| 99 | result = bitbake('-c fetch aspell', ignore_status=True) | ||
| 100 | self.delete_recipeinc('aspell') | ||
| 101 | self.assertEqual(result.status, 0) | ||
| 102 | self.assertTrue(os.path.isfile(os.path.join(get_bb_var("DL_DIR"), 'test-aspell.tar.gz'))) | ||
| 103 | self.assertTrue(os.path.isfile(os.path.join(get_bb_var("DL_DIR"), 'test-aspell.tar.gz.done'))) | ||
| 104 | bitbake('-ccleanall aspell') | ||
diff --git a/meta/lib/oeqa/selftest/buildhistory.py b/meta/lib/oeqa/selftest/buildhistory.py new file mode 100644 index 0000000000..d8cae4664b --- /dev/null +++ b/meta/lib/oeqa/selftest/buildhistory.py | |||
| @@ -0,0 +1,45 @@ | |||
| 1 | import unittest | ||
| 2 | import os | ||
| 3 | import re | ||
| 4 | import shutil | ||
| 5 | import datetime | ||
| 6 | |||
| 7 | import oeqa.utils.ftools as ftools | ||
| 8 | from oeqa.selftest.base import oeSelfTest | ||
| 9 | from oeqa.utils.commands import Command, runCmd, bitbake, get_bb_var, get_test_layer | ||
| 10 | |||
| 11 | |||
| 12 | class BuildhistoryBase(oeSelfTest): | ||
| 13 | |||
| 14 | def config_buildhistory(self, tmp_bh_location=False): | ||
| 15 | if (not 'buildhistory' in get_bb_var('USER_CLASSES')) and (not 'buildhistory' in get_bb_var('INHERIT')): | ||
| 16 | add_buildhistory_config = 'INHERIT += "buildhistory"\nBUILDHISTORY_COMMIT = "1"' | ||
| 17 | self.append_config(add_buildhistory_config) | ||
| 18 | |||
| 19 | if tmp_bh_location: | ||
| 20 | # Using a temporary buildhistory location for testing | ||
| 21 | tmp_bh_dir = os.path.join(self.builddir, "tmp_buildhistory_%s" % datetime.datetime.now().strftime('%Y%m%d%H%M%S')) | ||
| 22 | buildhistory_dir_config = "BUILDHISTORY_DIR = \"%s\"" % tmp_bh_dir | ||
| 23 | self.append_config(buildhistory_dir_config) | ||
| 24 | self.track_for_cleanup(tmp_bh_dir) | ||
| 25 | |||
| 26 | def run_buildhistory_operation(self, target, global_config='', target_config='', change_bh_location=False, expect_error=False, error_regex=''): | ||
| 27 | if change_bh_location: | ||
| 28 | tmp_bh_location = True | ||
| 29 | else: | ||
| 30 | tmp_bh_location = False | ||
| 31 | self.config_buildhistory(tmp_bh_location) | ||
| 32 | |||
| 33 | self.append_config(global_config) | ||
| 34 | self.append_recipeinc(target, target_config) | ||
| 35 | bitbake("-cclean %s" % target) | ||
| 36 | result = bitbake(target, ignore_status=True) | ||
| 37 | self.remove_config(global_config) | ||
| 38 | self.remove_recipeinc(target, target_config) | ||
| 39 | |||
| 40 | if expect_error: | ||
| 41 | self.assertEqual(result.status, 1, msg="Error expected for global config '%s' and target config '%s'" % (global_config, target_config)) | ||
| 42 | search_for_error = re.search(error_regex, result.output) | ||
| 43 | self.assertTrue(search_for_error, msg="Could not find desired error in output: %s" % error_regex) | ||
| 44 | else: | ||
| 45 | self.assertEqual(result.status, 0, msg="Command 'bitbake %s' has failed unexpectedly: %s" % (target, result.output)) | ||
diff --git a/meta/lib/oeqa/selftest/buildoptions.py b/meta/lib/oeqa/selftest/buildoptions.py new file mode 100644 index 0000000000..8ff40baddc --- /dev/null +++ b/meta/lib/oeqa/selftest/buildoptions.py | |||
| @@ -0,0 +1,113 @@ | |||
| 1 | import unittest | ||
| 2 | import os | ||
| 3 | import logging | ||
| 4 | import re | ||
| 5 | |||
| 6 | from oeqa.selftest.base import oeSelfTest | ||
| 7 | from oeqa.selftest.buildhistory import BuildhistoryBase | ||
| 8 | from oeqa.utils.commands import runCmd, bitbake, get_bb_var | ||
| 9 | import oeqa.utils.ftools as ftools | ||
| 10 | |||
| 11 | class ImageOptionsTests(oeSelfTest): | ||
| 12 | |||
| 13 | def test_incremental_image_generation(self): | ||
| 14 | bitbake("-c cleanall core-image-minimal") | ||
| 15 | self.write_config('INC_RPM_IMAGE_GEN = "1"') | ||
| 16 | self.append_config('IMAGE_FEATURES += "ssh-server-openssh"') | ||
| 17 | bitbake("core-image-minimal") | ||
| 18 | res = runCmd("grep 'Installing openssh-sshd' %s" % (os.path.join(get_bb_var("WORKDIR", "core-image-minimal"), "temp/log.do_rootfs")), ignore_status=True) | ||
| 19 | self.remove_config('IMAGE_FEATURES += "ssh-server-openssh"') | ||
| 20 | self.assertEqual(0, res.status, msg="No match for openssh-sshd in log.do_rootfs") | ||
| 21 | bitbake("core-image-minimal") | ||
| 22 | res = runCmd("grep 'Removing openssh-sshd' %s" %(os.path.join(get_bb_var("WORKDIR", "core-image-minimal"), "temp/log.do_rootfs")),ignore_status=True) | ||
| 23 | self.assertEqual(0, res.status, msg="openssh-sshd was not removed from image") | ||
| 24 | |||
| 25 | def test_rm_old_image(self): | ||
| 26 | bitbake("core-image-minimal") | ||
| 27 | deploydir = get_bb_var("DEPLOY_DIR_IMAGE", target="core-image-minimal") | ||
| 28 | imagename = get_bb_var("IMAGE_LINK_NAME", target="core-image-minimal") | ||
| 29 | deploydir_files = os.listdir(deploydir) | ||
| 30 | track_original_files = [] | ||
| 31 | for image_file in deploydir_files: | ||
| 32 | if imagename in image_file and os.path.islink(os.path.join(deploydir, image_file)): | ||
| 33 | track_original_files.append(os.path.realpath(os.path.join(deploydir, image_file))) | ||
| 34 | self.append_config("RM_OLD_IMAGE = \"1\"") | ||
| 35 | bitbake("-C rootfs core-image-minimal") | ||
| 36 | deploydir_files = os.listdir(deploydir) | ||
| 37 | remaining_not_expected = [path for path in track_original_files if os.path.basename(path) in deploydir_files] | ||
| 38 | self.assertFalse(remaining_not_expected, msg="\nThe following image files ware not removed: %s" % ', '.join(map(str, remaining_not_expected))) | ||
| 39 | |||
| 40 | def test_ccache_tool(self): | ||
| 41 | bitbake("ccache-native") | ||
| 42 | self.assertTrue(os.path.isfile(os.path.join(get_bb_var('STAGING_BINDIR_NATIVE', 'ccache-native'), "ccache"))) | ||
| 43 | self.write_config('INHERIT += "ccache"') | ||
| 44 | bitbake("m4 -c cleansstate") | ||
| 45 | bitbake("m4 -c compile") | ||
| 46 | res = runCmd("grep ccache %s" % (os.path.join(get_bb_var("WORKDIR","m4"),"temp/log.do_compile")), ignore_status=True) | ||
| 47 | self.assertEqual(0, res.status, msg="No match for ccache in m4 log.do_compile") | ||
| 48 | bitbake("ccache-native -ccleansstate") | ||
| 49 | |||
| 50 | |||
| 51 | class DiskMonTest(oeSelfTest): | ||
| 52 | |||
| 53 | def test_stoptask_behavior(self): | ||
| 54 | result = runCmd("df -Pk %s" % os.getcwd()) | ||
| 55 | size = result.output.split("\n")[1].split()[3] | ||
| 56 | self.write_config('BB_DISKMON_DIRS = "STOPTASKS,${TMPDIR},%sK,4510K"' % size) | ||
| 57 | res = bitbake("m4", ignore_status = True) | ||
| 58 | self.assertTrue('ERROR: No new tasks can be executed since the disk space monitor action is "STOPTASKS"!' in res.output) | ||
| 59 | self.assertEqual(res.status, 1) | ||
| 60 | self.write_config('BB_DISKMON_DIRS = "ABORT,${TMPDIR},%sK,4510K"' % size) | ||
| 61 | res = bitbake("m4", ignore_status = True) | ||
| 62 | self.assertTrue('ERROR: Immediately abort since the disk space monitor action is "ABORT"!' in res.output) | ||
| 63 | self.assertEqual(res.status, 1) | ||
| 64 | self.write_config('BB_DISKMON_DIRS = "WARN,${TMPDIR},%sK,4510K"' % size) | ||
| 65 | res = bitbake("m4") | ||
| 66 | self.assertTrue('WARNING: The free space' in res.output) | ||
| 67 | |||
| 68 | class SanityOptionsTest(oeSelfTest): | ||
| 69 | |||
| 70 | def test_options_warnqa_errorqa_switch(self): | ||
| 71 | bitbake("xcursor-transparent-theme -ccleansstate") | ||
| 72 | |||
| 73 | if "packages-list" not in get_bb_var("ERROR_QA"): | ||
| 74 | self.write_config("ERROR_QA_append = \" packages-list\"") | ||
| 75 | |||
| 76 | self.write_recipeinc('xcursor-transparent-theme', 'PACKAGES += \"${PN}-dbg\"') | ||
| 77 | res = bitbake("xcursor-transparent-theme", ignore_status=True) | ||
| 78 | self.delete_recipeinc('xcursor-transparent-theme') | ||
| 79 | self.assertTrue("ERROR: QA Issue: xcursor-transparent-theme-dbg is listed in PACKAGES multiple times, this leads to packaging errors." in res.output) | ||
| 80 | self.assertEqual(res.status, 1) | ||
| 81 | self.write_recipeinc('xcursor-transparent-theme', 'PACKAGES += \"${PN}-dbg\"') | ||
| 82 | self.append_config('ERROR_QA_remove = "packages-list"') | ||
| 83 | self.append_config('WARN_QA_append = " packages-list"') | ||
| 84 | res = bitbake("xcursor-transparent-theme") | ||
| 85 | bitbake("xcursor-transparent-theme -ccleansstate") | ||
| 86 | self.delete_recipeinc('xcursor-transparent-theme') | ||
| 87 | self.assertTrue("WARNING: QA Issue: xcursor-transparent-theme-dbg is listed in PACKAGES multiple times, this leads to packaging errors." in res.output) | ||
| 88 | |||
| 89 | def test_sanity_userspace_dependency(self): | ||
| 90 | self.append_config('WARN_QA_append = " unsafe-references-in-binaries unsafe-references-in-scripts"') | ||
| 91 | bitbake("-ccleansstate gzip nfs-utils") | ||
| 92 | res = bitbake("gzip nfs-utils") | ||
| 93 | self.assertTrue("WARNING: QA Issue: gzip" in res.output) | ||
| 94 | self.assertTrue("WARNING: QA Issue: nfs-utils" in res.output) | ||
| 95 | |||
| 96 | class BuildhistoryTests(BuildhistoryBase): | ||
| 97 | |||
| 98 | def test_buildhistory_basic(self): | ||
| 99 | self.run_buildhistory_operation('xcursor-transparent-theme') | ||
| 100 | self.assertTrue(os.path.isdir(get_bb_var('BUILDHISTORY_DIR'))) | ||
| 101 | |||
| 102 | def test_buildhistory_buildtime_pr_backwards(self): | ||
| 103 | self.add_command_to_tearDown('cleanup-workdir') | ||
| 104 | target = 'xcursor-transparent-theme' | ||
| 105 | error = "ERROR: QA Issue: Package version for package %s went backwards which would break package feeds from (.*-r1 to .*-r0)" % target | ||
| 106 | self.run_buildhistory_operation(target, target_config="PR = \"r1\"", change_bh_location=True) | ||
| 107 | self.run_buildhistory_operation(target, target_config="PR = \"r0\"", change_bh_location=False, expect_error=True, error_regex=error) | ||
| 108 | |||
| 109 | |||
| 110 | |||
| 111 | |||
| 112 | |||
| 113 | |||
diff --git a/meta/lib/oeqa/selftest/oescripts.py b/meta/lib/oeqa/selftest/oescripts.py new file mode 100644 index 0000000000..4aab2ed095 --- /dev/null +++ b/meta/lib/oeqa/selftest/oescripts.py | |||
| @@ -0,0 +1,60 @@ | |||
| 1 | import datetime | ||
| 2 | import unittest | ||
| 3 | import os | ||
| 4 | import re | ||
| 5 | import shutil | ||
| 6 | |||
| 7 | import oeqa.utils.ftools as ftools | ||
| 8 | from oeqa.selftest.base import oeSelfTest | ||
| 9 | from oeqa.selftest.buildhistory import BuildhistoryBase | ||
| 10 | from oeqa.utils.commands import Command, runCmd, bitbake, get_bb_var, get_test_layer | ||
| 11 | |||
| 12 | class TestScripts(oeSelfTest): | ||
| 13 | |||
| 14 | def test_cleanup_workdir(self): | ||
| 15 | path = os.path.dirname(get_bb_var('WORKDIR', 'gzip')) | ||
| 16 | old_version_recipe = os.path.join(get_bb_var('COREBASE'), 'meta/recipes-extended/gzip/gzip_1.3.12.bb') | ||
| 17 | old_version = '1.3.12' | ||
| 18 | bitbake("-ccleansstate gzip") | ||
| 19 | bitbake("-ccleansstate -b %s" % old_version_recipe) | ||
| 20 | if os.path.exists(get_bb_var('WORKDIR', "-b %s" % old_version_recipe)): | ||
| 21 | shutil.rmtree(get_bb_var('WORKDIR', "-b %s" % old_version_recipe)) | ||
| 22 | if os.path.exists(get_bb_var('WORKDIR', 'gzip')): | ||
| 23 | shutil.rmtree(get_bb_var('WORKDIR', 'gzip')) | ||
| 24 | |||
| 25 | if os.path.exists(path): | ||
| 26 | initial_contents = os.listdir(path) | ||
| 27 | else: | ||
| 28 | initial_contents = [] | ||
| 29 | |||
| 30 | bitbake('gzip') | ||
| 31 | intermediary_contents = os.listdir(path) | ||
| 32 | bitbake("-b %s" % old_version_recipe) | ||
| 33 | runCmd('cleanup-workdir') | ||
| 34 | remaining_contents = os.listdir(path) | ||
| 35 | |||
| 36 | expected_contents = [x for x in intermediary_contents if x not in initial_contents] | ||
| 37 | remaining_not_expected = [x for x in remaining_contents if x not in expected_contents] | ||
| 38 | self.assertFalse(remaining_not_expected, msg="Not all necessary content has been deleted from %s: %s" % (path, ', '.join(map(str, remaining_not_expected)))) | ||
| 39 | expected_not_remaining = [x for x in expected_contents if x not in remaining_contents] | ||
| 40 | self.assertFalse(expected_not_remaining, msg="The script removed extra contents from %s: %s" % (path, ', '.join(map(str, expected_not_remaining)))) | ||
| 41 | |||
| 42 | class BuildhistoryDiffTests(BuildhistoryBase): | ||
| 43 | |||
| 44 | def test_buildhistory_diff(self): | ||
| 45 | self.add_command_to_tearDown('cleanup-workdir') | ||
| 46 | target = 'xcursor-transparent-theme' | ||
| 47 | self.run_buildhistory_operation(target, target_config="PR = \"r1\"", change_bh_location=True) | ||
| 48 | self.run_buildhistory_operation(target, target_config="PR = \"r0\"", change_bh_location=False, expect_error=True) | ||
| 49 | result = runCmd("buildhistory-diff -p %s" % get_bb_var('BUILDHISTORY_DIR')) | ||
| 50 | expected_output = 'PR changed from "r1" to "r0"' | ||
| 51 | self.assertTrue(expected_output in result.output, msg="Did not find expected output: %s" % result.output) | ||
| 52 | |||
| 53 | |||
| 54 | |||
| 55 | |||
| 56 | |||
| 57 | |||
| 58 | |||
| 59 | |||
| 60 | |||
diff --git a/meta/lib/oeqa/selftest/prservice.py b/meta/lib/oeqa/selftest/prservice.py new file mode 100644 index 0000000000..789c05f1e5 --- /dev/null +++ b/meta/lib/oeqa/selftest/prservice.py | |||
| @@ -0,0 +1,113 @@ | |||
| 1 | import unittest | ||
| 2 | import os | ||
| 3 | import logging | ||
| 4 | import re | ||
| 5 | import shutil | ||
| 6 | import datetime | ||
| 7 | |||
| 8 | import oeqa.utils.ftools as ftools | ||
| 9 | from oeqa.selftest.base import oeSelfTest | ||
| 10 | from oeqa.utils.commands import runCmd, bitbake, get_bb_var | ||
| 11 | |||
| 12 | class BitbakePrTests(oeSelfTest): | ||
| 13 | |||
| 14 | def get_pr_version(self, package_name): | ||
| 15 | pkgdata_dir = get_bb_var('PKGDATA_DIR') | ||
| 16 | package_data_file = os.path.join(pkgdata_dir, 'runtime', package_name) | ||
| 17 | package_data = ftools.read_file(package_data_file) | ||
| 18 | find_pr = re.search("PKGR: r[0-9]+\.([0-9]+)", package_data) | ||
| 19 | self.assertTrue(find_pr) | ||
| 20 | return int(find_pr.group(1)) | ||
| 21 | |||
| 22 | def get_task_stamp(self, package_name, recipe_task): | ||
| 23 | stampdata = get_bb_var('STAMP', target=package_name).split('/') | ||
| 24 | prefix = stampdata[-1] | ||
| 25 | package_stamps_path = "/".join(stampdata[:-1]) | ||
| 26 | stamps = [] | ||
| 27 | for stamp in os.listdir(package_stamps_path): | ||
| 28 | find_stamp = re.match("%s\.%s\.([a-z0-9]{32})" % (prefix, recipe_task), stamp) | ||
| 29 | if find_stamp: | ||
| 30 | stamps.append(find_stamp.group(1)) | ||
| 31 | self.assertFalse(len(stamps) == 0, msg="Cound not find stamp for task %s for recipe %s" % (recipe_task, package_name)) | ||
| 32 | self.assertFalse(len(stamps) > 1, msg="Found multiple %s stamps for the %s recipe in the %s directory." % (recipe_task, package_name, package_stamps_path)) | ||
| 33 | return str(stamps[0]) | ||
| 34 | |||
| 35 | def increment_package_pr(self, package_name): | ||
| 36 | inc_data = "do_package_append() {\nbb.build.exec_func('do_test_prserv', d)\n}\ndo_test_prserv() {\necho \"The current date is: %s\"\n}" % datetime.datetime.now() | ||
| 37 | self.write_recipeinc(package_name, inc_data) | ||
| 38 | bitbake("-ccleansstate %s" % package_name) | ||
| 39 | res = bitbake(package_name, ignore_status=True) | ||
| 40 | self.delete_recipeinc(package_name) | ||
| 41 | self.assertEqual(res.status, 0, msg=res.output) | ||
| 42 | self.assertTrue("NOTE: Started PRServer with DBfile" in res.output, msg=res.output) | ||
| 43 | |||
| 44 | def config_pr_tests(self, package_name, package_type='rpm', pr_socket='localhost:0'): | ||
| 45 | config_package_data = 'PACKAGE_CLASSES = "package_%s"' % package_type | ||
| 46 | self.write_config(config_package_data) | ||
| 47 | config_server_data = 'PRSERV_HOST = "%s"' % pr_socket | ||
| 48 | self.append_config(config_server_data) | ||
| 49 | |||
| 50 | def run_test_pr_service(self, package_name, package_type='rpm', track_task='do_package', pr_socket='localhost:0'): | ||
| 51 | self.config_pr_tests(package_name, package_type, pr_socket) | ||
| 52 | |||
| 53 | self.increment_package_pr(package_name) | ||
| 54 | pr_1 = self.get_pr_version(package_name) | ||
| 55 | stamp_1 = self.get_task_stamp(package_name, track_task) | ||
| 56 | |||
| 57 | self.increment_package_pr(package_name) | ||
| 58 | pr_2 = self.get_pr_version(package_name) | ||
| 59 | stamp_2 = self.get_task_stamp(package_name, track_task) | ||
| 60 | |||
| 61 | bitbake("-ccleansstate %s" % package_name) | ||
| 62 | self.assertTrue(pr_2 - pr_1 == 1) | ||
| 63 | self.assertTrue(stamp_1 != stamp_2) | ||
| 64 | |||
| 65 | def run_test_pr_export_import(self, package_name, replace_current_db=True): | ||
| 66 | self.config_pr_tests(package_name) | ||
| 67 | |||
| 68 | self.increment_package_pr(package_name) | ||
| 69 | pr_1 = self.get_pr_version(package_name) | ||
| 70 | |||
| 71 | exported_db_path = os.path.join(self.builddir, 'export.inc') | ||
| 72 | export_result = runCmd("bitbake-prserv-tool export %s" % exported_db_path, ignore_status=True) | ||
| 73 | self.assertEqual(export_result.status, 0, msg="PR Service database export failed: %s" % export_result.output) | ||
| 74 | |||
| 75 | if replace_current_db: | ||
| 76 | current_db_path = os.path.join(get_bb_var('PERSISTENT_DIR'), 'prserv.sqlite3') | ||
| 77 | self.assertTrue(os.path.exists(current_db_path), msg="Path to current PR Service database is invalid: %s" % current_db_path) | ||
| 78 | os.remove(current_db_path) | ||
| 79 | |||
| 80 | import_result = runCmd("bitbake-prserv-tool import %s" % exported_db_path, ignore_status=True) | ||
| 81 | os.remove(exported_db_path) | ||
| 82 | self.assertEqual(import_result.status, 0, msg="PR Service database import failed: %s" % import_result.output) | ||
| 83 | |||
| 84 | self.increment_package_pr(package_name) | ||
| 85 | pr_2 = self.get_pr_version(package_name) | ||
| 86 | |||
| 87 | bitbake("-ccleansstate %s" % package_name) | ||
| 88 | self.assertTrue(pr_2 - pr_1 == 1) | ||
| 89 | |||
| 90 | |||
| 91 | def test_import_export_replace_db(self): | ||
| 92 | self.run_test_pr_export_import('m4') | ||
| 93 | |||
| 94 | def test_import_export_override_db(self): | ||
| 95 | self.run_test_pr_export_import('m4', replace_current_db=False) | ||
| 96 | |||
| 97 | def test_pr_service_rpm_arch_dep(self): | ||
| 98 | self.run_test_pr_service('m4', 'rpm', 'do_package') | ||
| 99 | |||
| 100 | def test_pr_service_deb_arch_dep(self): | ||
| 101 | self.run_test_pr_service('m4', 'deb', 'do_package') | ||
| 102 | |||
| 103 | def test_pr_service_ipk_arch_dep(self): | ||
| 104 | self.run_test_pr_service('m4', 'ipk', 'do_package') | ||
| 105 | |||
| 106 | def test_pr_service_rpm_arch_indep(self): | ||
| 107 | self.run_test_pr_service('xcursor-transparent-theme', 'rpm', 'do_package') | ||
| 108 | |||
| 109 | def test_pr_service_deb_arch_indep(self): | ||
| 110 | self.run_test_pr_service('xcursor-transparent-theme', 'deb', 'do_package') | ||
| 111 | |||
| 112 | def test_pr_service_ipk_arch_indep(self): | ||
| 113 | self.run_test_pr_service('xcursor-transparent-theme', 'ipk', 'do_package') | ||
diff --git a/meta/lib/oeqa/selftest/sstate.py b/meta/lib/oeqa/selftest/sstate.py new file mode 100644 index 0000000000..5989724432 --- /dev/null +++ b/meta/lib/oeqa/selftest/sstate.py | |||
| @@ -0,0 +1,53 @@ | |||
| 1 | import datetime | ||
| 2 | import unittest | ||
| 3 | import os | ||
| 4 | import re | ||
| 5 | import shutil | ||
| 6 | |||
| 7 | import oeqa.utils.ftools as ftools | ||
| 8 | from oeqa.selftest.base import oeSelfTest | ||
| 9 | from oeqa.utils.commands import runCmd, bitbake, get_bb_var, get_test_layer | ||
| 10 | |||
| 11 | |||
| 12 | class SStateBase(oeSelfTest): | ||
| 13 | |||
| 14 | def setUpLocal(self): | ||
| 15 | self.temp_sstate_location = None | ||
| 16 | self.sstate_path = get_bb_var('SSTATE_DIR') | ||
| 17 | self.distro = get_bb_var('NATIVELSBSTRING') | ||
| 18 | self.distro_specific_sstate = os.path.join(self.sstate_path, self.distro) | ||
| 19 | |||
| 20 | # Creates a special sstate configuration with the option to add sstate mirrors | ||
| 21 | def config_sstate(self, temp_sstate_location=False, add_local_mirrors=[]): | ||
| 22 | self.temp_sstate_location = temp_sstate_location | ||
| 23 | |||
| 24 | if self.temp_sstate_location: | ||
| 25 | temp_sstate_path = os.path.join(self.builddir, "temp_sstate_%s" % datetime.datetime.now().strftime('%Y%m%d%H%M%S')) | ||
| 26 | config_temp_sstate = "SSTATE_DIR = \"%s\"" % temp_sstate_path | ||
| 27 | self.append_config(config_temp_sstate) | ||
| 28 | self.track_for_cleanup(temp_sstate_path) | ||
| 29 | self.sstate_path = get_bb_var('SSTATE_DIR') | ||
| 30 | self.distro = get_bb_var('NATIVELSBSTRING') | ||
| 31 | self.distro_specific_sstate = os.path.join(self.sstate_path, self.distro) | ||
| 32 | |||
| 33 | if add_local_mirrors: | ||
| 34 | config_set_sstate_if_not_set = 'SSTATE_MIRRORS ?= ""' | ||
| 35 | self.append_config(config_set_sstate_if_not_set) | ||
| 36 | for local_mirror in add_local_mirrors: | ||
| 37 | self.assertFalse(os.path.join(local_mirror) == os.path.join(self.sstate_path), msg='Cannot add the current sstate path as a sstate mirror') | ||
| 38 | config_sstate_mirror = "SSTATE_MIRRORS += \"file://.* file:///%s/PATH\"" % local_mirror | ||
| 39 | self.append_config(config_sstate_mirror) | ||
| 40 | |||
| 41 | # Returns a list containing sstate files | ||
| 42 | def search_sstate(self, filename_regex, distro_specific=True, distro_nonspecific=True): | ||
| 43 | result = [] | ||
| 44 | for root, dirs, files in os.walk(self.sstate_path): | ||
| 45 | if distro_specific and re.search("%s/[a-z0-9]{2}$" % self.distro, root): | ||
| 46 | for f in files: | ||
| 47 | if re.search(filename_regex, f): | ||
| 48 | result.append(f) | ||
| 49 | if distro_nonspecific and re.search("%s/[a-z0-9]{2}$" % self.sstate_path, root): | ||
| 50 | for f in files: | ||
| 51 | if re.search(filename_regex, f): | ||
| 52 | result.append(f) | ||
| 53 | return result | ||
diff --git a/meta/lib/oeqa/selftest/sstatetests.py b/meta/lib/oeqa/selftest/sstatetests.py new file mode 100644 index 0000000000..35ff28b04a --- /dev/null +++ b/meta/lib/oeqa/selftest/sstatetests.py | |||
| @@ -0,0 +1,193 @@ | |||
| 1 | import datetime | ||
| 2 | import unittest | ||
| 3 | import os | ||
| 4 | import re | ||
| 5 | import shutil | ||
| 6 | |||
| 7 | import oeqa.utils.ftools as ftools | ||
| 8 | from oeqa.selftest.base import oeSelfTest | ||
| 9 | from oeqa.utils.commands import runCmd, bitbake, get_bb_var, get_test_layer | ||
| 10 | from oeqa.selftest.sstate import SStateBase | ||
| 11 | |||
| 12 | |||
| 13 | class SStateTests(SStateBase): | ||
| 14 | |||
| 15 | # Test sstate files creation and their location | ||
| 16 | def run_test_sstate_creation(self, targets, distro_specific=True, distro_nonspecific=True, temp_sstate_location=True, should_pass=True): | ||
| 17 | self.config_sstate(temp_sstate_location) | ||
| 18 | |||
| 19 | if self.temp_sstate_location: | ||
| 20 | bitbake(['-cclean'] + targets) | ||
| 21 | else: | ||
| 22 | bitbake(['-ccleansstate'] + targets) | ||
| 23 | |||
| 24 | bitbake(targets) | ||
| 25 | file_tracker = self.search_sstate('|'.join(map(str, targets)), distro_specific, distro_nonspecific) | ||
| 26 | if should_pass: | ||
| 27 | self.assertTrue(file_tracker , msg="Could not find sstate files for: %s" % ', '.join(map(str, targets))) | ||
| 28 | else: | ||
| 29 | self.assertTrue(not file_tracker , msg="Found sstate files in the wrong place for: %s" % ', '.join(map(str, targets))) | ||
| 30 | |||
| 31 | def test_sstate_creation_distro_specific_pass(self): | ||
| 32 | self.run_test_sstate_creation(['binutils-cross', 'binutils-native'], distro_specific=True, distro_nonspecific=False, temp_sstate_location=True) | ||
| 33 | |||
| 34 | def test_sstate_creation_distro_specific_fail(self): | ||
| 35 | self.run_test_sstate_creation(['binutils-cross', 'binutils-native'], distro_specific=False, distro_nonspecific=True, temp_sstate_location=True, should_pass=False) | ||
| 36 | |||
| 37 | def test_sstate_creation_distro_nonspecific_pass(self): | ||
| 38 | self.run_test_sstate_creation(['eglibc-initial'], distro_specific=False, distro_nonspecific=True, temp_sstate_location=True) | ||
| 39 | |||
| 40 | def test_sstate_creation_distro_nonspecific_fail(self): | ||
| 41 | self.run_test_sstate_creation(['eglibc-initial'], distro_specific=True, distro_nonspecific=False, temp_sstate_location=True, should_pass=False) | ||
| 42 | |||
| 43 | |||
| 44 | # Test the sstate files deletion part of the do_cleansstate task | ||
| 45 | def run_test_cleansstate_task(self, targets, distro_specific=True, distro_nonspecific=True, temp_sstate_location=True): | ||
| 46 | self.config_sstate(temp_sstate_location) | ||
| 47 | |||
| 48 | bitbake(['-ccleansstate'] + targets) | ||
| 49 | |||
| 50 | bitbake(targets) | ||
| 51 | tgz_created = self.search_sstate('|'.join(map(str, [s + '.*?\.tgz$' for s in targets])), distro_specific, distro_nonspecific) | ||
| 52 | self.assertTrue(tgz_created, msg="Could not find sstate .tgz files for: %s" % ', '.join(map(str, targets))) | ||
| 53 | |||
| 54 | siginfo_created = self.search_sstate('|'.join(map(str, [s + '.*?\.siginfo$' for s in targets])), distro_specific, distro_nonspecific) | ||
| 55 | self.assertTrue(siginfo_created, msg="Could not find sstate .siginfo files for: %s" % ', '.join(map(str, targets))) | ||
| 56 | |||
| 57 | bitbake(['-ccleansstate'] + targets) | ||
| 58 | tgz_removed = self.search_sstate('|'.join(map(str, [s + '.*?\.tgz$' for s in targets])), distro_specific, distro_nonspecific) | ||
| 59 | self.assertTrue(not tgz_removed, msg="do_cleansstate didn't remove .tgz sstate files for: %s" % ', '.join(map(str, targets))) | ||
| 60 | |||
| 61 | def test_cleansstate_task_distro_specific_nonspecific(self): | ||
| 62 | self.run_test_cleansstate_task(['binutils-cross', 'binutils-native', 'eglibc-initial'], distro_specific=True, distro_nonspecific=True, temp_sstate_location=True) | ||
| 63 | |||
| 64 | def test_cleansstate_task_distro_nonspecific(self): | ||
| 65 | self.run_test_cleansstate_task(['eglibc-initial'], distro_specific=False, distro_nonspecific=True, temp_sstate_location=True) | ||
| 66 | |||
| 67 | def test_cleansstate_task_distro_specific(self): | ||
| 68 | self.run_test_cleansstate_task(['binutils-cross', 'binutils-native', 'eglibc-initial'], distro_specific=True, distro_nonspecific=False, temp_sstate_location=True) | ||
| 69 | |||
| 70 | |||
| 71 | # Test rebuilding of distro-specific sstate files | ||
| 72 | def run_test_rebuild_distro_specific_sstate(self, targets, temp_sstate_location=True): | ||
| 73 | self.config_sstate(temp_sstate_location) | ||
| 74 | |||
| 75 | bitbake(['-ccleansstate'] + targets) | ||
| 76 | |||
| 77 | bitbake(targets) | ||
| 78 | self.assertTrue(self.search_sstate('|'.join(map(str, [s + '.*?\.tgz$' for s in targets])), distro_specific=False, distro_nonspecific=True) == [], msg="Found distro non-specific sstate for: %s" % ', '.join(map(str, targets))) | ||
| 79 | file_tracker_1 = self.search_sstate('|'.join(map(str, [s + '.*?\.tgz$' for s in targets])), distro_specific=True, distro_nonspecific=False) | ||
| 80 | self.assertTrue(len(file_tracker_1) >= len(targets), msg = "Not all sstate files ware created for: %s" % ', '.join(map(str, targets))) | ||
| 81 | |||
| 82 | self.track_for_cleanup(self.distro_specific_sstate + "_old") | ||
| 83 | shutil.copytree(self.distro_specific_sstate, self.distro_specific_sstate + "_old") | ||
| 84 | shutil.rmtree(self.distro_specific_sstate) | ||
| 85 | |||
| 86 | bitbake(['-cclean'] + targets) | ||
| 87 | bitbake(targets) | ||
| 88 | file_tracker_2 = self.search_sstate('|'.join(map(str, [s + '.*?\.tgz$' for s in targets])), distro_specific=True, distro_nonspecific=False) | ||
| 89 | self.assertTrue(len(file_tracker_2) >= len(targets), msg = "Not all sstate files ware created for: %s" % ', '.join(map(str, targets))) | ||
| 90 | |||
| 91 | not_recreated = [x for x in file_tracker_1 if x not in file_tracker_2] | ||
| 92 | self.assertTrue(not_recreated == [], msg="The following sstate files ware not recreated: %s" % ', '.join(map(str, not_recreated))) | ||
| 93 | |||
| 94 | created_once = [x for x in file_tracker_2 if x not in file_tracker_1] | ||
| 95 | self.assertTrue(created_once == [], msg="The following sstate files ware created only in the second run: %s" % ', '.join(map(str, created_once))) | ||
| 96 | |||
| 97 | def test_rebuild_distro_specific_sstate_cross_native_targets(self): | ||
| 98 | self.run_test_rebuild_distro_specific_sstate(['binutils-cross', 'binutils-native'], temp_sstate_location=True) | ||
| 99 | |||
| 100 | def test_rebuild_distro_specific_sstate_cross_target(self): | ||
| 101 | self.run_test_rebuild_distro_specific_sstate(['binutils-cross'], temp_sstate_location=True) | ||
| 102 | |||
| 103 | def test_rebuild_distro_specific_sstate_native_target(self): | ||
| 104 | self.run_test_rebuild_distro_specific_sstate(['binutils-native'], temp_sstate_location=True) | ||
| 105 | |||
| 106 | |||
| 107 | # Test the sstate-cache-management script. Each element in the global_config list is used with the corresponding element in the target_config list | ||
| 108 | # global_config elements are expected to not generate any sstate files that would be removed by sstate-cache-management.sh (such as changing the value of MACHINE) | ||
| 109 | def run_test_sstate_cache_management_script(self, target, global_config=[''], target_config=[''], ignore_patterns=[]): | ||
| 110 | self.assertTrue(global_config) | ||
| 111 | self.assertTrue(target_config) | ||
| 112 | self.assertTrue(len(global_config) == len(target_config), msg='Lists global_config and target_config should have the same number of elements') | ||
| 113 | self.config_sstate(temp_sstate_location=True, add_local_mirrors=[self.sstate_path]) | ||
| 114 | |||
| 115 | # If buildhistory is enabled, we need to disable version-going-backwards QA checks for this test. It may report errors otherwise. | ||
| 116 | if ('buildhistory' in get_bb_var('USER_CLASSES')) or ('buildhistory' in get_bb_var('INHERIT')): | ||
| 117 | remove_errors_config = 'ERROR_QA_remove = "version-going-backwards"' | ||
| 118 | self.append_config(remove_errors_config) | ||
| 119 | |||
| 120 | # For not this only checks if random sstate tasks are handled correctly as a group. | ||
| 121 | # In the future we should add control over what tasks we check for. | ||
| 122 | |||
| 123 | sstate_archs_list = [] | ||
| 124 | expected_remaining_sstate = [] | ||
| 125 | for idx in range(len(target_config)): | ||
| 126 | self.append_config(global_config[idx]) | ||
| 127 | self.append_recipeinc(target, target_config[idx]) | ||
| 128 | sstate_arch = get_bb_var('SSTATE_PKGARCH', target) | ||
| 129 | if not sstate_arch in sstate_archs_list: | ||
| 130 | sstate_archs_list.append(sstate_arch) | ||
| 131 | if target_config[idx] == target_config[-1]: | ||
| 132 | target_sstate_before_build = self.search_sstate(target + '.*?\.tgz$') | ||
| 133 | bitbake("-cclean %s" % target) | ||
| 134 | result = bitbake(target, ignore_status=True) | ||
| 135 | if target_config[idx] == target_config[-1]: | ||
| 136 | target_sstate_after_build = self.search_sstate(target + '.*?\.tgz$') | ||
| 137 | expected_remaining_sstate += [x for x in target_sstate_after_build if x not in target_sstate_before_build if not any(pattern in x for pattern in ignore_patterns)] | ||
| 138 | self.remove_config(global_config[idx]) | ||
| 139 | self.remove_recipeinc(target, target_config[idx]) | ||
| 140 | self.assertEqual(result.status, 0) | ||
| 141 | |||
| 142 | runCmd("sstate-cache-management.sh -y --cache-dir=%s --remove-duplicated --extra-archs=%s" % (self.sstate_path, ','.join(map(str, sstate_archs_list)))) | ||
| 143 | actual_remaining_sstate = [x for x in self.search_sstate(target + '.*?\.tgz$') if not any(pattern in x for pattern in ignore_patterns)] | ||
| 144 | |||
| 145 | actual_not_expected = [x for x in actual_remaining_sstate if x not in expected_remaining_sstate] | ||
| 146 | self.assertFalse(actual_not_expected, msg="Files should have been removed but ware not: %s" % ', '.join(map(str, actual_not_expected))) | ||
| 147 | expected_not_actual = [x for x in expected_remaining_sstate if x not in actual_remaining_sstate] | ||
| 148 | self.assertFalse(expected_not_actual, msg="Extra files ware removed: %s" ', '.join(map(str, expected_not_actual))) | ||
| 149 | |||
| 150 | |||
| 151 | def test_sstate_cache_management_script_using_pr_1(self): | ||
| 152 | global_config = [] | ||
| 153 | target_config = [] | ||
| 154 | global_config.append('') | ||
| 155 | target_config.append('PR = "0"') | ||
| 156 | self.run_test_sstate_cache_management_script('m4', global_config, target_config, ignore_patterns=['populate_lic']) | ||
| 157 | |||
| 158 | def test_sstate_cache_management_script_using_pr_2(self): | ||
| 159 | global_config = [] | ||
| 160 | target_config = [] | ||
| 161 | global_config.append('') | ||
| 162 | target_config.append('PR = "0"') | ||
| 163 | global_config.append('') | ||
| 164 | target_config.append('PR = "1"') | ||
| 165 | self.run_test_sstate_cache_management_script('m4', global_config, target_config, ignore_patterns=['populate_lic']) | ||
| 166 | |||
| 167 | def test_sstate_cache_management_script_using_pr_3(self): | ||
| 168 | global_config = [] | ||
| 169 | target_config = [] | ||
| 170 | global_config.append('MACHINE = "qemux86-64"') | ||
| 171 | target_config.append('PR = "0"') | ||
| 172 | global_config.append(global_config[0]) | ||
| 173 | target_config.append('PR = "1"') | ||
| 174 | global_config.append('MACHINE = "qemux86"') | ||
| 175 | target_config.append('PR = "1"') | ||
| 176 | self.run_test_sstate_cache_management_script('m4', global_config, target_config, ignore_patterns=['populate_lic']) | ||
| 177 | |||
| 178 | def test_sstate_cache_management_script_using_machine(self): | ||
| 179 | global_config = [] | ||
| 180 | target_config = [] | ||
| 181 | global_config.append('MACHINE = "qemux86-64"') | ||
| 182 | target_config.append('') | ||
| 183 | global_config.append('MACHINE = "qemux86"') | ||
| 184 | target_config.append('') | ||
| 185 | self.run_test_sstate_cache_management_script('m4', global_config, target_config, ignore_patterns=['populate_lic']) | ||
| 186 | |||
| 187 | |||
| 188 | |||
| 189 | |||
| 190 | |||
| 191 | |||
| 192 | |||
| 193 | |||
diff --git a/meta/lib/oeqa/targetcontrol.py b/meta/lib/oeqa/targetcontrol.py new file mode 100644 index 0000000000..873a66457a --- /dev/null +++ b/meta/lib/oeqa/targetcontrol.py | |||
| @@ -0,0 +1,175 @@ | |||
| 1 | # Copyright (C) 2013 Intel Corporation | ||
| 2 | # | ||
| 3 | # Released under the MIT license (see COPYING.MIT) | ||
| 4 | |||
| 5 | # This module is used by testimage.bbclass for setting up and controlling a target machine. | ||
| 6 | |||
| 7 | import os | ||
| 8 | import shutil | ||
| 9 | import subprocess | ||
| 10 | import bb | ||
| 11 | import traceback | ||
| 12 | import sys | ||
| 13 | from oeqa.utils.sshcontrol import SSHControl | ||
| 14 | from oeqa.utils.qemurunner import QemuRunner | ||
| 15 | from oeqa.controllers.testtargetloader import TestTargetLoader | ||
| 16 | from abc import ABCMeta, abstractmethod | ||
| 17 | |||
| 18 | def get_target_controller(d): | ||
| 19 | testtarget = d.getVar("TEST_TARGET", True) | ||
| 20 | # old, simple names | ||
| 21 | if testtarget == "qemu": | ||
| 22 | return QemuTarget(d) | ||
| 23 | elif testtarget == "simpleremote": | ||
| 24 | return SimpleRemoteTarget(d) | ||
| 25 | else: | ||
| 26 | # use the class name | ||
| 27 | try: | ||
| 28 | # is it a core class defined here? | ||
| 29 | controller = getattr(sys.modules[__name__], testtarget) | ||
| 30 | except AttributeError: | ||
| 31 | # nope, perhaps a layer defined one | ||
| 32 | try: | ||
| 33 | bbpath = d.getVar("BBPATH", True).split(':') | ||
| 34 | testtargetloader = TestTargetLoader() | ||
| 35 | controller = testtargetloader.get_controller_module(testtarget, bbpath) | ||
| 36 | except ImportError as e: | ||
| 37 | bb.fatal("Failed to import {0} from available controller modules:\n{1}".format(testtarget,traceback.format_exc())) | ||
| 38 | except AttributeError as e: | ||
| 39 | bb.fatal("Invalid TEST_TARGET - " + str(e)) | ||
| 40 | return controller(d) | ||
| 41 | |||
| 42 | |||
| 43 | class BaseTarget(object): | ||
| 44 | |||
| 45 | __metaclass__ = ABCMeta | ||
| 46 | |||
| 47 | def __init__(self, d): | ||
| 48 | self.connection = None | ||
| 49 | self.ip = None | ||
| 50 | self.server_ip = None | ||
| 51 | self.datetime = d.getVar('DATETIME', True) | ||
| 52 | self.testdir = d.getVar("TEST_LOG_DIR", True) | ||
| 53 | self.pn = d.getVar("PN", True) | ||
| 54 | |||
| 55 | @abstractmethod | ||
| 56 | def deploy(self): | ||
| 57 | |||
| 58 | self.sshlog = os.path.join(self.testdir, "ssh_target_log.%s" % self.datetime) | ||
| 59 | sshloglink = os.path.join(self.testdir, "ssh_target_log") | ||
| 60 | if os.path.islink(sshloglink): | ||
| 61 | os.unlink(sshloglink) | ||
| 62 | os.symlink(self.sshlog, sshloglink) | ||
| 63 | bb.note("SSH log file: %s" % self.sshlog) | ||
| 64 | |||
| 65 | @abstractmethod | ||
| 66 | def start(self, params=None): | ||
| 67 | pass | ||
| 68 | |||
| 69 | @abstractmethod | ||
| 70 | def stop(self): | ||
| 71 | pass | ||
| 72 | |||
| 73 | @abstractmethod | ||
| 74 | def restart(self, params=None): | ||
| 75 | pass | ||
| 76 | |||
| 77 | def run(self, cmd, timeout=None): | ||
| 78 | return self.connection.run(cmd, timeout) | ||
| 79 | |||
| 80 | def copy_to(self, localpath, remotepath): | ||
| 81 | return self.connection.copy_to(localpath, remotepath) | ||
| 82 | |||
| 83 | def copy_from(self, remotepath, localpath): | ||
| 84 | return self.connection.copy_from(remotepath, localpath) | ||
| 85 | |||
| 86 | |||
| 87 | |||
| 88 | class QemuTarget(BaseTarget): | ||
| 89 | |||
| 90 | def __init__(self, d): | ||
| 91 | |||
| 92 | super(QemuTarget, self).__init__(d) | ||
| 93 | |||
| 94 | self.qemulog = os.path.join(self.testdir, "qemu_boot_log.%s" % self.datetime) | ||
| 95 | self.origrootfs = os.path.join(d.getVar("DEPLOY_DIR_IMAGE", True), d.getVar("IMAGE_LINK_NAME", True) + '.ext3') | ||
| 96 | self.rootfs = os.path.join(self.testdir, d.getVar("IMAGE_LINK_NAME", True) + '-testimage.ext3') | ||
| 97 | |||
| 98 | self.runner = QemuRunner(machine=d.getVar("MACHINE", True), | ||
| 99 | rootfs=self.rootfs, | ||
| 100 | tmpdir = d.getVar("TMPDIR", True), | ||
| 101 | deploy_dir_image = d.getVar("DEPLOY_DIR_IMAGE", True), | ||
| 102 | display = d.getVar("BB_ORIGENV", False).getVar("DISPLAY", True), | ||
| 103 | logfile = self.qemulog, | ||
| 104 | boottime = int(d.getVar("TEST_QEMUBOOT_TIMEOUT", True))) | ||
| 105 | |||
| 106 | def deploy(self): | ||
| 107 | try: | ||
| 108 | shutil.copyfile(self.origrootfs, self.rootfs) | ||
| 109 | except Exception as e: | ||
| 110 | bb.fatal("Error copying rootfs: %s" % e) | ||
| 111 | |||
| 112 | qemuloglink = os.path.join(self.testdir, "qemu_boot_log") | ||
| 113 | if os.path.islink(qemuloglink): | ||
| 114 | os.unlink(qemuloglink) | ||
| 115 | os.symlink(self.qemulog, qemuloglink) | ||
| 116 | |||
| 117 | bb.note("rootfs file: %s" % self.rootfs) | ||
| 118 | bb.note("Qemu log file: %s" % self.qemulog) | ||
| 119 | super(QemuTarget, self).deploy() | ||
| 120 | |||
| 121 | def start(self, params=None): | ||
| 122 | if self.runner.start(params): | ||
| 123 | self.ip = self.runner.ip | ||
| 124 | self.server_ip = self.runner.server_ip | ||
| 125 | self.connection = SSHControl(ip=self.ip, logfile=self.sshlog) | ||
| 126 | else: | ||
| 127 | raise bb.build.FuncFailed("%s - FAILED to start qemu - check the task log and the boot log" % self.pn) | ||
| 128 | |||
| 129 | def stop(self): | ||
| 130 | self.runner.stop() | ||
| 131 | self.connection = None | ||
| 132 | self.ip = None | ||
| 133 | self.server_ip = None | ||
| 134 | |||
| 135 | def restart(self, params=None): | ||
| 136 | if self.runner.restart(params): | ||
| 137 | self.ip = self.runner.ip | ||
| 138 | self.server_ip = self.runner.server_ip | ||
| 139 | self.connection = SSHControl(ip=self.ip, logfile=self.sshlog) | ||
| 140 | else: | ||
| 141 | raise bb.build.FuncFailed("%s - FAILED to re-start qemu - check the task log and the boot log" % self.pn) | ||
| 142 | |||
| 143 | |||
| 144 | class SimpleRemoteTarget(BaseTarget): | ||
| 145 | |||
| 146 | def __init__(self, d): | ||
| 147 | super(SimpleRemoteTarget, self).__init__(d) | ||
| 148 | addr = d.getVar("TEST_TARGET_IP", True) or bb.fatal('Please set TEST_TARGET_IP with the IP address of the machine you want to run the tests on.') | ||
| 149 | self.ip = addr.split(":")[0] | ||
| 150 | try: | ||
| 151 | self.port = addr.split(":")[1] | ||
| 152 | except IndexError: | ||
| 153 | self.port = None | ||
| 154 | bb.note("Target IP: %s" % self.ip) | ||
| 155 | self.server_ip = d.getVar("TEST_SERVER_IP", True) | ||
| 156 | if not self.server_ip: | ||
| 157 | try: | ||
| 158 | self.server_ip = subprocess.check_output(['ip', 'route', 'get', self.ip ]).split("\n")[0].split()[-1] | ||
| 159 | except Exception as e: | ||
| 160 | bb.fatal("Failed to determine the host IP address (alternatively you can set TEST_SERVER_IP with the IP address of this machine): %s" % e) | ||
| 161 | bb.note("Server IP: %s" % self.server_ip) | ||
| 162 | |||
| 163 | def deploy(self): | ||
| 164 | super(SimpleRemoteTarget, self).deploy() | ||
| 165 | |||
| 166 | def start(self, params=None): | ||
| 167 | self.connection = SSHControl(self.ip, logfile=self.sshlog, port=self.port) | ||
| 168 | |||
| 169 | def stop(self): | ||
| 170 | self.connection = None | ||
| 171 | self.ip = None | ||
| 172 | self.server_ip = None | ||
| 173 | |||
| 174 | def restart(self, params=None): | ||
| 175 | pass | ||
diff --git a/meta/lib/oeqa/utils/__init__.py b/meta/lib/oeqa/utils/__init__.py new file mode 100644 index 0000000000..8eda92763c --- /dev/null +++ b/meta/lib/oeqa/utils/__init__.py | |||
| @@ -0,0 +1,3 @@ | |||
| 1 | # Enable other layers to have modules in the same named directory | ||
| 2 | from pkgutil import extend_path | ||
| 3 | __path__ = extend_path(__path__, __name__) | ||
diff --git a/meta/lib/oeqa/utils/commands.py b/meta/lib/oeqa/utils/commands.py new file mode 100644 index 0000000000..9b42620610 --- /dev/null +++ b/meta/lib/oeqa/utils/commands.py | |||
| @@ -0,0 +1,137 @@ | |||
| 1 | # Copyright (c) 2013 Intel Corporation | ||
| 2 | # | ||
| 3 | # Released under the MIT license (see COPYING.MIT) | ||
| 4 | |||
| 5 | # DESCRIPTION | ||
| 6 | # This module is mainly used by scripts/oe-selftest and modules under meta/oeqa/selftest | ||
| 7 | # It provides a class and methods for running commands on the host in a convienent way for tests. | ||
| 8 | |||
| 9 | |||
| 10 | |||
| 11 | import os | ||
| 12 | import sys | ||
| 13 | import signal | ||
| 14 | import subprocess | ||
| 15 | import threading | ||
| 16 | import logging | ||
| 17 | |||
| 18 | class Command(object): | ||
| 19 | def __init__(self, command, bg=False, timeout=None, data=None, **options): | ||
| 20 | |||
| 21 | self.defaultopts = { | ||
| 22 | "stdout": subprocess.PIPE, | ||
| 23 | "stderr": subprocess.STDOUT, | ||
| 24 | "stdin": None, | ||
| 25 | "shell": False, | ||
| 26 | "bufsize": -1, | ||
| 27 | } | ||
| 28 | |||
| 29 | self.cmd = command | ||
| 30 | self.bg = bg | ||
| 31 | self.timeout = timeout | ||
| 32 | self.data = data | ||
| 33 | |||
| 34 | self.options = dict(self.defaultopts) | ||
| 35 | if isinstance(self.cmd, basestring): | ||
| 36 | self.options["shell"] = True | ||
| 37 | if self.data: | ||
| 38 | self.options['stdin'] = subprocess.PIPE | ||
| 39 | self.options.update(options) | ||
| 40 | |||
| 41 | self.status = None | ||
| 42 | self.output = None | ||
| 43 | self.error = None | ||
| 44 | self.thread = None | ||
| 45 | |||
| 46 | self.log = logging.getLogger("utils.commands") | ||
| 47 | |||
| 48 | def run(self): | ||
| 49 | self.process = subprocess.Popen(self.cmd, **self.options) | ||
| 50 | |||
| 51 | def commThread(): | ||
| 52 | self.output, self.error = self.process.communicate(self.data) | ||
| 53 | |||
| 54 | self.thread = threading.Thread(target=commThread) | ||
| 55 | self.thread.start() | ||
| 56 | |||
| 57 | self.log.debug("Running command '%s'" % self.cmd) | ||
| 58 | |||
| 59 | if not self.bg: | ||
| 60 | self.thread.join(self.timeout) | ||
| 61 | self.stop() | ||
| 62 | |||
| 63 | def stop(self): | ||
| 64 | if self.thread.isAlive(): | ||
| 65 | self.process.terminate() | ||
| 66 | # let's give it more time to terminate gracefully before killing it | ||
| 67 | self.thread.join(5) | ||
| 68 | if self.thread.isAlive(): | ||
| 69 | self.process.kill() | ||
| 70 | self.thread.join() | ||
| 71 | |||
| 72 | self.output = self.output.rstrip() | ||
| 73 | self.status = self.process.poll() | ||
| 74 | |||
| 75 | self.log.debug("Command '%s' returned %d as exit code." % (self.cmd, self.status)) | ||
| 76 | # logging the complete output is insane | ||
| 77 | # bitbake -e output is really big | ||
| 78 | # and makes the log file useless | ||
| 79 | if self.status: | ||
| 80 | lout = "\n".join(self.output.splitlines()[-20:]) | ||
| 81 | self.log.debug("Last 20 lines:\n%s" % lout) | ||
| 82 | |||
| 83 | |||
| 84 | class Result(object): | ||
| 85 | pass | ||
| 86 | |||
| 87 | def runCmd(command, ignore_status=False, timeout=None, **options): | ||
| 88 | |||
| 89 | result = Result() | ||
| 90 | |||
| 91 | cmd = Command(command, timeout=timeout, **options) | ||
| 92 | cmd.run() | ||
| 93 | |||
| 94 | result.command = command | ||
| 95 | result.status = cmd.status | ||
| 96 | result.output = cmd.output | ||
| 97 | result.pid = cmd.process.pid | ||
| 98 | |||
| 99 | if result.status and not ignore_status: | ||
| 100 | raise AssertionError("Command '%s' returned non-zero exit status %d:\n%s" % (command, result.status, result.output)) | ||
| 101 | |||
| 102 | return result | ||
| 103 | |||
| 104 | |||
| 105 | def bitbake(command, ignore_status=False, timeout=None, **options): | ||
| 106 | if isinstance(command, basestring): | ||
| 107 | cmd = "bitbake " + command | ||
| 108 | else: | ||
| 109 | cmd = [ "bitbake" ] + command | ||
| 110 | |||
| 111 | return runCmd(cmd, ignore_status, timeout, **options) | ||
| 112 | |||
| 113 | |||
| 114 | def get_bb_env(target=None): | ||
| 115 | if target: | ||
| 116 | return runCmd("bitbake -e %s" % target).output | ||
| 117 | else: | ||
| 118 | return runCmd("bitbake -e").output | ||
| 119 | |||
| 120 | def get_bb_var(var, target=None): | ||
| 121 | val = None | ||
| 122 | bbenv = get_bb_env(target) | ||
| 123 | for line in bbenv.splitlines(): | ||
| 124 | if line.startswith(var + "="): | ||
| 125 | val = line.split('=')[1] | ||
| 126 | val = val.replace('\"','') | ||
| 127 | break | ||
| 128 | return val | ||
| 129 | |||
| 130 | def get_test_layer(): | ||
| 131 | layers = get_bb_var("BBLAYERS").split() | ||
| 132 | testlayer = None | ||
| 133 | for l in layers: | ||
| 134 | if "/meta-selftest" in l and os.path.isdir(l): | ||
| 135 | testlayer = l | ||
| 136 | break | ||
| 137 | return testlayer | ||
diff --git a/meta/lib/oeqa/utils/decorators.py b/meta/lib/oeqa/utils/decorators.py new file mode 100644 index 0000000000..b99da8d76d --- /dev/null +++ b/meta/lib/oeqa/utils/decorators.py | |||
| @@ -0,0 +1,50 @@ | |||
| 1 | # Copyright (C) 2013 Intel Corporation | ||
| 2 | # | ||
| 3 | # Released under the MIT license (see COPYING.MIT) | ||
| 4 | |||
| 5 | # Some custom decorators that can be used by unittests | ||
| 6 | # Most useful is skipUnlessPassed which can be used for | ||
| 7 | # creating dependecies between two test methods. | ||
| 8 | |||
| 9 | from oeqa.oetest import * | ||
| 10 | |||
| 11 | class skipIfFailure(object): | ||
| 12 | |||
| 13 | def __init__(self,testcase): | ||
| 14 | self.testcase = testcase | ||
| 15 | |||
| 16 | def __call__(self,f): | ||
| 17 | def wrapped_f(*args): | ||
| 18 | if self.testcase in (oeTest.testFailures or oeTest.testErrors): | ||
| 19 | raise unittest.SkipTest("Testcase dependency not met: %s" % self.testcase) | ||
| 20 | return f(*args) | ||
| 21 | wrapped_f.__name__ = f.__name__ | ||
| 22 | return wrapped_f | ||
| 23 | |||
| 24 | class skipIfSkipped(object): | ||
| 25 | |||
| 26 | def __init__(self,testcase): | ||
| 27 | self.testcase = testcase | ||
| 28 | |||
| 29 | def __call__(self,f): | ||
| 30 | def wrapped_f(*args): | ||
| 31 | if self.testcase in oeTest.testSkipped: | ||
| 32 | raise unittest.SkipTest("Testcase dependency not met: %s" % self.testcase) | ||
| 33 | return f(*args) | ||
| 34 | wrapped_f.__name__ = f.__name__ | ||
| 35 | return wrapped_f | ||
| 36 | |||
| 37 | class skipUnlessPassed(object): | ||
| 38 | |||
| 39 | def __init__(self,testcase): | ||
| 40 | self.testcase = testcase | ||
| 41 | |||
| 42 | def __call__(self,f): | ||
| 43 | def wrapped_f(*args): | ||
| 44 | if self.testcase in oeTest.testSkipped or \ | ||
| 45 | self.testcase in oeTest.testFailures or \ | ||
| 46 | self.testcase in oeTest.testErrors: | ||
| 47 | raise unittest.SkipTest("Testcase dependency not met: %s" % self.testcase) | ||
| 48 | return f(*args) | ||
| 49 | wrapped_f.__name__ = f.__name__ | ||
| 50 | return wrapped_f | ||
diff --git a/meta/lib/oeqa/utils/ftools.py b/meta/lib/oeqa/utils/ftools.py new file mode 100644 index 0000000000..64ebe3d217 --- /dev/null +++ b/meta/lib/oeqa/utils/ftools.py | |||
| @@ -0,0 +1,27 @@ | |||
| 1 | import os | ||
| 2 | import re | ||
| 3 | |||
| 4 | def write_file(path, data): | ||
| 5 | wdata = data.rstrip() + "\n" | ||
| 6 | with open(path, "w") as f: | ||
| 7 | f.write(wdata) | ||
| 8 | |||
| 9 | def append_file(path, data): | ||
| 10 | wdata = data.rstrip() + "\n" | ||
| 11 | with open(path, "a") as f: | ||
| 12 | f.write(wdata) | ||
| 13 | |||
| 14 | def read_file(path): | ||
| 15 | data = None | ||
| 16 | with open(path) as f: | ||
| 17 | data = f.read() | ||
| 18 | return data | ||
| 19 | |||
| 20 | def remove_from_file(path, data): | ||
| 21 | lines = read_file(path).splitlines() | ||
| 22 | rmdata = data.strip().splitlines() | ||
| 23 | for l in rmdata: | ||
| 24 | for c in range(0, lines.count(l)): | ||
| 25 | i = lines.index(l) | ||
| 26 | del(lines[i]) | ||
| 27 | write_file(path, "\n".join(lines)) | ||
diff --git a/meta/lib/oeqa/utils/httpserver.py b/meta/lib/oeqa/utils/httpserver.py new file mode 100644 index 0000000000..f161a1bddd --- /dev/null +++ b/meta/lib/oeqa/utils/httpserver.py | |||
| @@ -0,0 +1,33 @@ | |||
| 1 | import SimpleHTTPServer | ||
| 2 | import multiprocessing | ||
| 3 | import os | ||
| 4 | |||
| 5 | class HTTPServer(SimpleHTTPServer.BaseHTTPServer.HTTPServer): | ||
| 6 | |||
| 7 | def server_start(self, root_dir): | ||
| 8 | os.chdir(root_dir) | ||
| 9 | self.serve_forever() | ||
| 10 | |||
| 11 | class HTTPRequestHandler(SimpleHTTPServer.SimpleHTTPRequestHandler): | ||
| 12 | |||
| 13 | def log_message(self, format_str, *args): | ||
| 14 | pass | ||
| 15 | |||
| 16 | class HTTPService(object): | ||
| 17 | |||
| 18 | def __init__(self, root_dir, host=''): | ||
| 19 | self.root_dir = root_dir | ||
| 20 | self.host = host | ||
| 21 | self.port = 0 | ||
| 22 | |||
| 23 | def start(self): | ||
| 24 | self.server = HTTPServer((self.host, self.port), HTTPRequestHandler) | ||
| 25 | if self.port == 0: | ||
| 26 | self.port = self.server.server_port | ||
| 27 | self.process = multiprocessing.Process(target=self.server.server_start, args=[self.root_dir]) | ||
| 28 | self.process.start() | ||
| 29 | |||
| 30 | def stop(self): | ||
| 31 | self.server.server_close() | ||
| 32 | self.process.terminate() | ||
| 33 | self.process.join() | ||
diff --git a/meta/lib/oeqa/utils/qemurunner.py b/meta/lib/oeqa/utils/qemurunner.py new file mode 100644 index 0000000000..f1a7e24ab7 --- /dev/null +++ b/meta/lib/oeqa/utils/qemurunner.py | |||
| @@ -0,0 +1,237 @@ | |||
| 1 | # Copyright (C) 2013 Intel Corporation | ||
| 2 | # | ||
| 3 | # Released under the MIT license (see COPYING.MIT) | ||
| 4 | |||
| 5 | # This module provides a class for starting qemu images using runqemu. | ||
| 6 | # It's used by testimage.bbclass. | ||
| 7 | |||
| 8 | import subprocess | ||
| 9 | import os | ||
| 10 | import time | ||
| 11 | import signal | ||
| 12 | import re | ||
| 13 | import socket | ||
| 14 | import select | ||
| 15 | import bb | ||
| 16 | |||
| 17 | class QemuRunner: | ||
| 18 | |||
| 19 | def __init__(self, machine, rootfs, display, tmpdir, deploy_dir_image, logfile, boottime): | ||
| 20 | |||
| 21 | # Popen object for runqemu | ||
| 22 | self.runqemu = None | ||
| 23 | # pid of the qemu process that runqemu will start | ||
| 24 | self.qemupid = None | ||
| 25 | # target ip - from the command line | ||
| 26 | self.ip = None | ||
| 27 | # host ip - where qemu is running | ||
| 28 | self.server_ip = None | ||
| 29 | |||
| 30 | self.machine = machine | ||
| 31 | self.rootfs = rootfs | ||
| 32 | self.display = display | ||
| 33 | self.tmpdir = tmpdir | ||
| 34 | self.deploy_dir_image = deploy_dir_image | ||
| 35 | self.logfile = logfile | ||
| 36 | self.boottime = boottime | ||
| 37 | |||
| 38 | self.runqemutime = 60 | ||
| 39 | |||
| 40 | self.create_socket() | ||
| 41 | |||
| 42 | |||
| 43 | def create_socket(self): | ||
| 44 | |||
| 45 | self.bootlog = '' | ||
| 46 | self.qemusock = None | ||
| 47 | |||
| 48 | try: | ||
| 49 | self.server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) | ||
| 50 | self.server_socket.setblocking(0) | ||
| 51 | self.server_socket.bind(("127.0.0.1",0)) | ||
| 52 | self.server_socket.listen(2) | ||
| 53 | self.serverport = self.server_socket.getsockname()[1] | ||
| 54 | bb.note("Created listening socket for qemu serial console on: 127.0.0.1:%s" % self.serverport) | ||
| 55 | except socket.error, msg: | ||
| 56 | self.server_socket.close() | ||
| 57 | bb.fatal("Failed to create listening socket: %s" %msg[1]) | ||
| 58 | |||
| 59 | |||
| 60 | def log(self, msg): | ||
| 61 | if self.logfile: | ||
| 62 | with open(self.logfile, "a") as f: | ||
| 63 | f.write("%s" % msg) | ||
| 64 | |||
| 65 | def start(self, qemuparams = None): | ||
| 66 | |||
| 67 | if self.display: | ||
| 68 | os.environ["DISPLAY"] = self.display | ||
| 69 | else: | ||
| 70 | bb.error("To start qemu I need a X desktop, please set DISPLAY correctly (e.g. DISPLAY=:1)") | ||
| 71 | return False | ||
| 72 | if not os.path.exists(self.rootfs): | ||
| 73 | bb.error("Invalid rootfs %s" % self.rootfs) | ||
| 74 | return False | ||
| 75 | if not os.path.exists(self.tmpdir): | ||
| 76 | bb.error("Invalid TMPDIR path %s" % self.tmpdir) | ||
| 77 | return False | ||
| 78 | else: | ||
| 79 | os.environ["OE_TMPDIR"] = self.tmpdir | ||
| 80 | if not os.path.exists(self.deploy_dir_image): | ||
| 81 | bb.error("Invalid DEPLOY_DIR_IMAGE path %s" % self.deploy_dir_image) | ||
| 82 | return False | ||
| 83 | else: | ||
| 84 | os.environ["DEPLOY_DIR_IMAGE"] = self.deploy_dir_image | ||
| 85 | |||
| 86 | # Set this flag so that Qemu doesn't do any grabs as SDL grabs interact | ||
| 87 | # badly with screensavers. | ||
| 88 | os.environ["QEMU_DONT_GRAB"] = "1" | ||
| 89 | self.qemuparams = 'bootparams="console=tty1 console=ttyS0,115200n8" qemuparams="-serial tcp:127.0.0.1:%s"' % self.serverport | ||
| 90 | if qemuparams: | ||
| 91 | self.qemuparams = self.qemuparams[:-1] + " " + qemuparams + " " + '\"' | ||
| 92 | |||
| 93 | launch_cmd = 'runqemu %s %s %s' % (self.machine, self.rootfs, self.qemuparams) | ||
| 94 | self.runqemu = subprocess.Popen(launch_cmd,shell=True,stdout=subprocess.PIPE,stderr=subprocess.STDOUT,preexec_fn=os.setpgrp) | ||
| 95 | |||
| 96 | bb.note("runqemu started, pid is %s" % self.runqemu.pid) | ||
| 97 | bb.note("waiting at most %s seconds for qemu pid" % self.runqemutime) | ||
| 98 | endtime = time.time() + self.runqemutime | ||
| 99 | while not self.is_alive() and time.time() < endtime: | ||
| 100 | time.sleep(1) | ||
| 101 | |||
| 102 | if self.is_alive(): | ||
| 103 | bb.note("qemu started - qemu procces pid is %s" % self.qemupid) | ||
| 104 | cmdline = '' | ||
| 105 | with open('/proc/%s/cmdline' % self.qemupid) as p: | ||
| 106 | cmdline = p.read() | ||
| 107 | ips = re.findall("((?:[0-9]{1,3}\.){3}[0-9]{1,3})", cmdline.split("ip=")[1]) | ||
| 108 | if not ips or len(ips) != 3: | ||
| 109 | bb.note("Couldn't get ip from qemu process arguments! Here is the qemu command line used: %s" % cmdline) | ||
| 110 | self.stop() | ||
| 111 | return False | ||
| 112 | else: | ||
| 113 | self.ip = ips[0] | ||
| 114 | self.server_ip = ips[1] | ||
| 115 | bb.note("Target IP: %s" % self.ip) | ||
| 116 | bb.note("Server IP: %s" % self.server_ip) | ||
| 117 | bb.note("Waiting at most %d seconds for login banner" % self.boottime ) | ||
| 118 | endtime = time.time() + self.boottime | ||
| 119 | socklist = [self.server_socket] | ||
| 120 | reachedlogin = False | ||
| 121 | stopread = False | ||
| 122 | while time.time() < endtime and not stopread: | ||
| 123 | sread, swrite, serror = select.select(socklist, [], [], 5) | ||
| 124 | for sock in sread: | ||
| 125 | if sock is self.server_socket: | ||
| 126 | self.qemusock, addr = self.server_socket.accept() | ||
| 127 | self.qemusock.setblocking(0) | ||
| 128 | socklist.append(self.qemusock) | ||
| 129 | socklist.remove(self.server_socket) | ||
| 130 | bb.note("Connection from %s:%s" % addr) | ||
| 131 | else: | ||
| 132 | data = sock.recv(1024) | ||
| 133 | if data: | ||
| 134 | self.log(data) | ||
| 135 | self.bootlog += data | ||
| 136 | if re.search("qemu.* login:", self.bootlog): | ||
| 137 | stopread = True | ||
| 138 | reachedlogin = True | ||
| 139 | bb.note("Reached login banner") | ||
| 140 | else: | ||
| 141 | socklist.remove(sock) | ||
| 142 | sock.close() | ||
| 143 | stopread = True | ||
| 144 | |||
| 145 | if not reachedlogin: | ||
| 146 | bb.note("Target didn't reached login boot in %d seconds" % self.boottime) | ||
| 147 | lines = "\n".join(self.bootlog.splitlines()[-5:]) | ||
| 148 | bb.note("Last 5 lines of text:\n%s" % lines) | ||
| 149 | bb.note("Check full boot log: %s" % self.logfile) | ||
| 150 | self.stop() | ||
| 151 | return False | ||
| 152 | else: | ||
| 153 | bb.note("Qemu pid didn't appeared in %s seconds" % self.runqemutime) | ||
| 154 | output = self.runqemu.stdout | ||
| 155 | self.stop() | ||
| 156 | bb.note("Output from runqemu:\n%s" % output.read()) | ||
| 157 | return False | ||
| 158 | |||
| 159 | return self.is_alive() | ||
| 160 | |||
| 161 | def stop(self): | ||
| 162 | |||
| 163 | if self.runqemu: | ||
| 164 | bb.note("Sending SIGTERM to runqemu") | ||
| 165 | os.killpg(self.runqemu.pid, signal.SIGTERM) | ||
| 166 | endtime = time.time() + self.runqemutime | ||
| 167 | while self.runqemu.poll() is None and time.time() < endtime: | ||
| 168 | time.sleep(1) | ||
| 169 | if self.runqemu.poll() is None: | ||
| 170 | bb.note("Sending SIGKILL to runqemu") | ||
| 171 | os.killpg(self.runqemu.pid, signal.SIGKILL) | ||
| 172 | self.runqemu = None | ||
| 173 | if self.server_socket: | ||
| 174 | self.server_socket.close() | ||
| 175 | self.server_socket = None | ||
| 176 | self.qemupid = None | ||
| 177 | self.ip = None | ||
| 178 | |||
| 179 | def restart(self, qemuparams = None): | ||
| 180 | bb.note("Restarting qemu process") | ||
| 181 | if self.runqemu.poll() is None: | ||
| 182 | self.stop() | ||
| 183 | self.create_socket() | ||
| 184 | if self.start(qemuparams): | ||
| 185 | return True | ||
| 186 | return False | ||
| 187 | |||
| 188 | def is_alive(self): | ||
| 189 | qemu_child = self.find_child(str(self.runqemu.pid)) | ||
| 190 | if qemu_child: | ||
| 191 | self.qemupid = qemu_child[0] | ||
| 192 | if os.path.exists("/proc/" + str(self.qemupid)): | ||
| 193 | return True | ||
| 194 | return False | ||
| 195 | |||
| 196 | def find_child(self,parent_pid): | ||
| 197 | # | ||
| 198 | # Walk the process tree from the process specified looking for a qemu-system. Return its [pid'cmd] | ||
| 199 | # | ||
| 200 | ps = subprocess.Popen(['ps', 'axww', '-o', 'pid,ppid,command'], stdout=subprocess.PIPE).communicate()[0] | ||
| 201 | processes = ps.split('\n') | ||
| 202 | nfields = len(processes[0].split()) - 1 | ||
| 203 | pids = {} | ||
| 204 | commands = {} | ||
| 205 | for row in processes[1:]: | ||
| 206 | data = row.split(None, nfields) | ||
| 207 | if len(data) != 3: | ||
| 208 | continue | ||
| 209 | if data[1] not in pids: | ||
| 210 | pids[data[1]] = [] | ||
| 211 | |||
| 212 | pids[data[1]].append(data[0]) | ||
| 213 | commands[data[0]] = data[2] | ||
| 214 | |||
| 215 | if parent_pid not in pids: | ||
| 216 | return [] | ||
| 217 | |||
| 218 | parents = [] | ||
| 219 | newparents = pids[parent_pid] | ||
| 220 | while newparents: | ||
| 221 | next = [] | ||
| 222 | for p in newparents: | ||
| 223 | if p in pids: | ||
| 224 | for n in pids[p]: | ||
| 225 | if n not in parents and n not in next: | ||
| 226 | next.append(n) | ||
| 227 | if p not in parents: | ||
| 228 | parents.append(p) | ||
| 229 | newparents = next | ||
| 230 | #print "Children matching %s:" % str(parents) | ||
| 231 | for p in parents: | ||
| 232 | # Need to be careful here since runqemu-internal runs "ldd qemu-system-xxxx" | ||
| 233 | # Also, old versions of ldd (2.11) run "LD_XXXX qemu-system-xxxx" | ||
| 234 | basecmd = commands[p].split()[0] | ||
| 235 | basecmd = os.path.basename(basecmd) | ||
| 236 | if "qemu-system" in basecmd and "-serial tcp" in commands[p]: | ||
| 237 | return [int(p),commands[p]] | ||
diff --git a/meta/lib/oeqa/utils/sshcontrol.py b/meta/lib/oeqa/utils/sshcontrol.py new file mode 100644 index 0000000000..d355d5e8e9 --- /dev/null +++ b/meta/lib/oeqa/utils/sshcontrol.py | |||
| @@ -0,0 +1,127 @@ | |||
| 1 | # Copyright (C) 2013 Intel Corporation | ||
| 2 | # | ||
| 3 | # Released under the MIT license (see COPYING.MIT) | ||
| 4 | |||
| 5 | # Provides a class for setting up ssh connections, | ||
| 6 | # running commands and copying files to/from a target. | ||
| 7 | # It's used by testimage.bbclass and tests in lib/oeqa/runtime. | ||
| 8 | |||
| 9 | import subprocess | ||
| 10 | import time | ||
| 11 | import os | ||
| 12 | import select | ||
| 13 | |||
| 14 | |||
| 15 | class SSHProcess(object): | ||
| 16 | def __init__(self, **options): | ||
| 17 | |||
| 18 | self.defaultopts = { | ||
| 19 | "stdout": subprocess.PIPE, | ||
| 20 | "stderr": subprocess.STDOUT, | ||
| 21 | "stdin": None, | ||
| 22 | "shell": False, | ||
| 23 | "bufsize": -1, | ||
| 24 | "preexec_fn": os.setsid, | ||
| 25 | } | ||
| 26 | self.options = dict(self.defaultopts) | ||
| 27 | self.options.update(options) | ||
| 28 | self.status = None | ||
| 29 | self.output = None | ||
| 30 | self.process = None | ||
| 31 | self.starttime = None | ||
| 32 | |||
| 33 | def run(self, command, timeout=None): | ||
| 34 | self.starttime = time.time() | ||
| 35 | output = '' | ||
| 36 | self.process = subprocess.Popen(command, **self.options) | ||
| 37 | if timeout: | ||
| 38 | endtime = self.starttime + timeout | ||
| 39 | eof = False | ||
| 40 | while time.time() < endtime and not eof: | ||
| 41 | if select.select([self.process.stdout], [], [], 5)[0] != []: | ||
| 42 | data = os.read(self.process.stdout.fileno(), 1024) | ||
| 43 | if not data: | ||
| 44 | self.process.stdout.close() | ||
| 45 | eof = True | ||
| 46 | else: | ||
| 47 | output += data | ||
| 48 | endtime = time.time() + timeout | ||
| 49 | |||
| 50 | # process hasn't returned yet | ||
| 51 | if not eof: | ||
| 52 | self.process.terminate() | ||
| 53 | time.sleep(5) | ||
| 54 | try: | ||
| 55 | self.process.kill() | ||
| 56 | except OSError: | ||
| 57 | pass | ||
| 58 | output += "\nProcess killed - no output for %d seconds. Total running time: %d seconds." % (timeout, time.time() - self.starttime) | ||
| 59 | else: | ||
| 60 | output = self.process.communicate()[0] | ||
| 61 | |||
| 62 | self.status = self.process.wait() | ||
| 63 | self.output = output.rstrip() | ||
| 64 | return (self.status, self.output) | ||
| 65 | |||
| 66 | |||
| 67 | class SSHControl(object): | ||
| 68 | def __init__(self, ip, logfile=None, timeout=300, user='root', port=None): | ||
| 69 | self.ip = ip | ||
| 70 | self.defaulttimeout = timeout | ||
| 71 | self.ignore_status = True | ||
| 72 | self.logfile = logfile | ||
| 73 | self.user = user | ||
| 74 | self.ssh_options = [ | ||
| 75 | '-o', 'UserKnownHostsFile=/dev/null', | ||
| 76 | '-o', 'StrictHostKeyChecking=no', | ||
| 77 | '-o', 'LogLevel=ERROR' | ||
| 78 | ] | ||
| 79 | self.ssh = ['ssh', '-l', self.user ] + self.ssh_options | ||
| 80 | self.scp = ['scp'] + self.ssh_options | ||
| 81 | if port: | ||
| 82 | self.ssh = self.ssh + [ '-p', port ] | ||
| 83 | self.scp = self.scp + [ '-P', port ] | ||
| 84 | |||
| 85 | def log(self, msg): | ||
| 86 | if self.logfile: | ||
| 87 | with open(self.logfile, "a") as f: | ||
| 88 | f.write("%s\n" % msg) | ||
| 89 | |||
| 90 | def _internal_run(self, command, timeout=None, ignore_status = True): | ||
| 91 | self.log("[Running]$ %s" % " ".join(command)) | ||
| 92 | |||
| 93 | proc = SSHProcess() | ||
| 94 | status, output = proc.run(command, timeout) | ||
| 95 | |||
| 96 | self.log("%s" % output) | ||
| 97 | self.log("[Command returned '%d' after %.2f seconds]" % (status, time.time() - proc.starttime)) | ||
| 98 | |||
| 99 | if status and not ignore_status: | ||
| 100 | raise AssertionError("Command '%s' returned non-zero exit status %d:\n%s" % (command, status, output)) | ||
| 101 | |||
| 102 | return (status, output) | ||
| 103 | |||
| 104 | def run(self, command, timeout=None): | ||
| 105 | """ | ||
| 106 | command - ssh command to run | ||
| 107 | timeout=<val> - kill command if there is no output after <val> seconds | ||
| 108 | timeout=None - kill command if there is no output after a default value seconds | ||
| 109 | timeout=0 - no timeout, let command run until it returns | ||
| 110 | """ | ||
| 111 | |||
| 112 | # We need to source /etc/profile for a proper PATH on the target | ||
| 113 | command = self.ssh + [self.ip, ' . /etc/profile; ' + command] | ||
| 114 | |||
| 115 | if timeout is None: | ||
| 116 | return self._internal_run(command, self.defaulttimeout, self.ignore_status) | ||
| 117 | if timeout == 0: | ||
| 118 | return self._internal_run(command, None, self.ignore_status) | ||
| 119 | return self._internal_run(command, timeout, self.ignore_status) | ||
| 120 | |||
| 121 | def copy_to(self, localpath, remotepath): | ||
| 122 | command = self.scp + [localpath, '%s@%s:%s' % (self.user, self.ip, remotepath)] | ||
| 123 | return self._internal_run(command, ignore_status=False) | ||
| 124 | |||
| 125 | def copy_from(self, remotepath, localpath): | ||
| 126 | command = self.scp + ['%s@%s:%s' % (self.user, self.ip, remotepath), localpath] | ||
| 127 | return self._internal_run(command, ignore_status=False) | ||
diff --git a/meta/lib/oeqa/utils/targetbuild.py b/meta/lib/oeqa/utils/targetbuild.py new file mode 100644 index 0000000000..32296762c0 --- /dev/null +++ b/meta/lib/oeqa/utils/targetbuild.py | |||
| @@ -0,0 +1,68 @@ | |||
| 1 | # Copyright (C) 2013 Intel Corporation | ||
| 2 | # | ||
| 3 | # Released under the MIT license (see COPYING.MIT) | ||
| 4 | |||
| 5 | # Provides a class for automating build tests for projects | ||
| 6 | |||
| 7 | import os | ||
| 8 | import re | ||
| 9 | import subprocess | ||
| 10 | |||
| 11 | |||
| 12 | class TargetBuildProject(): | ||
| 13 | |||
| 14 | def __init__(self, target, d, uri, foldername=None): | ||
| 15 | self.target = target | ||
| 16 | self.d = d | ||
| 17 | self.uri = uri | ||
| 18 | self.targetdir = "~/" | ||
| 19 | self.archive = os.path.basename(uri) | ||
| 20 | self.localarchive = "/tmp/" + self.archive | ||
| 21 | self.fname = re.sub(r'.tar.bz2|tar.gz$', '', self.archive) | ||
| 22 | if foldername: | ||
| 23 | self.fname = foldername | ||
| 24 | |||
| 25 | def download_archive(self): | ||
| 26 | |||
| 27 | exportvars = ['HTTP_PROXY', 'http_proxy', | ||
| 28 | 'HTTPS_PROXY', 'https_proxy', | ||
| 29 | 'FTP_PROXY', 'ftp_proxy', | ||
| 30 | 'FTPS_PROXY', 'ftps_proxy', | ||
| 31 | 'NO_PROXY', 'no_proxy', | ||
| 32 | 'ALL_PROXY', 'all_proxy', | ||
| 33 | 'SOCKS5_USER', 'SOCKS5_PASSWD'] | ||
| 34 | |||
| 35 | cmd = '' | ||
| 36 | for var in exportvars: | ||
| 37 | val = self.d.getVar(var, True) | ||
| 38 | if val: | ||
| 39 | cmd = 'export ' + var + '=\"%s\"; %s' % (val, cmd) | ||
| 40 | |||
| 41 | cmd = cmd + "wget -O %s %s" % (self.localarchive, self.uri) | ||
| 42 | subprocess.check_call(cmd, shell=True) | ||
| 43 | |||
| 44 | (status, output) = self.target.copy_to(self.localarchive, self.targetdir) | ||
| 45 | if status != 0: | ||
| 46 | raise Exception("Failed to copy archive to target, output: %s" % output) | ||
| 47 | |||
| 48 | (status, output) = self.target.run('tar xf %s%s -C %s' % (self.targetdir, self.archive, self.targetdir)) | ||
| 49 | if status != 0: | ||
| 50 | raise Exception("Failed to extract archive, output: %s" % output) | ||
| 51 | |||
| 52 | #Change targetdir to project folder | ||
| 53 | self.targetdir = self.targetdir + self.fname | ||
| 54 | |||
| 55 | # The timeout parameter of target.run is set to 0 to make the ssh command | ||
| 56 | # run with no timeout. | ||
| 57 | def run_configure(self): | ||
| 58 | return self.target.run('cd %s; ./configure' % self.targetdir, 0)[0] | ||
| 59 | |||
| 60 | def run_make(self): | ||
| 61 | return self.target.run('cd %s; make' % self.targetdir, 0)[0] | ||
| 62 | |||
| 63 | def run_install(self): | ||
| 64 | return self.target.run('cd %s; make install' % self.targetdir, 0)[0] | ||
| 65 | |||
| 66 | def clean(self): | ||
| 67 | self.target.run('rm -rf %s' % self.targetdir) | ||
| 68 | subprocess.call('rm -f %s' % self.localarchive, shell=True) | ||
